All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org,
	Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>,
	linux-nilfs@vger.kernel.org
Subject: [PATCH v9 12/61] xarray: Define struct xa_node
Date: Tue, 13 Mar 2018 06:25:50 -0700	[thread overview]
Message-ID: <20180313132639.17387-13-willy@infradead.org> (raw)
In-Reply-To: <20180313132639.17387-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

This is a direct replacement for struct radix_tree_node.  A couple of
struct members have changed name, so convert those.  Use a #define so
that radix tree users continue to work without change.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/radix-tree.h            | 29 +++------------------
 include/linux/xarray.h                | 24 ++++++++++++++++++
 lib/radix-tree.c                      | 48 +++++++++++++++++------------------
 mm/workingset.c                       | 16 ++++++------
 tools/testing/radix-tree/multiorder.c | 30 +++++++++++-----------
 5 files changed, 74 insertions(+), 73 deletions(-)

diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index c8a33e9e9a3c..f64beb9ba175 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -32,6 +32,7 @@
 
 /* Keep unconverted code working */
 #define radix_tree_root		xarray
+#define radix_tree_node		xa_node
 
 /*
  * The bottom two bits of the slot determine how the remaining bits in the
@@ -60,41 +61,17 @@ static inline bool radix_tree_is_internal_node(void *ptr)
 
 /*** radix-tree API starts here ***/
 
-#define RADIX_TREE_MAX_TAGS 3
-
 #define RADIX_TREE_MAP_SHIFT	XA_CHUNK_SHIFT
 #define RADIX_TREE_MAP_SIZE	(1UL << RADIX_TREE_MAP_SHIFT)
 #define RADIX_TREE_MAP_MASK	(RADIX_TREE_MAP_SIZE-1)
 
-#define RADIX_TREE_TAG_LONGS	\
-	((RADIX_TREE_MAP_SIZE + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#define RADIX_TREE_MAX_TAGS	XA_MAX_TAGS
+#define RADIX_TREE_TAG_LONGS	XA_TAG_LONGS
 
 #define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
 					  RADIX_TREE_MAP_SHIFT))
 
-/*
- * @count is the count of every non-NULL element in the ->slots array
- * whether that is a data entry, a retry entry, a user pointer,
- * a sibling entry or a pointer to the next level of the tree.
- * @exceptional is the count of every element in ->slots which is
- * either a data entry or a sibling entry for data.
- */
-struct radix_tree_node {
-	unsigned char	shift;		/* Bits remaining in each slot */
-	unsigned char	offset;		/* Slot offset in parent */
-	unsigned char	count;		/* Total entry count */
-	unsigned char	exceptional;	/* Exceptional entry count */
-	struct radix_tree_node *parent;		/* Used when ascending tree */
-	struct radix_tree_root *root;		/* The tree we belong to */
-	union {
-		struct list_head private_list;	/* For tree user */
-		struct rcu_head	rcu_head;	/* Used when freeing node */
-	};
-	void __rcu	*slots[RADIX_TREE_MAP_SIZE];
-	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
-};
-
 /* The IDR tag is stored in the low bits of xa_flags */
 #define ROOT_IS_IDR	((__force gfp_t)4)
 /* The top bits of xa_flags are used to store the root tags */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 9b05b907062b..b51f354dfbf0 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -195,6 +195,30 @@ static inline void xa_init(struct xarray *xa)
 #endif
 #define XA_CHUNK_SIZE		(1UL << XA_CHUNK_SHIFT)
 #define XA_CHUNK_MASK		(XA_CHUNK_SIZE - 1)
+#define XA_MAX_TAGS		3
+#define XA_TAG_LONGS		DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
+
+/*
+ * @count is the count of every non-NULL element in the ->slots array
+ * whether that is a value entry, a retry entry, a user pointer,
+ * a sibling entry or a pointer to the next level of the tree.
+ * @nr_values is the count of every element in ->slots which is
+ * either a value entry or a sibling entry to a value entry.
+ */
+struct xa_node {
+	unsigned char	shift;		/* Bits remaining in each slot */
+	unsigned char	offset;		/* Slot offset in parent */
+	unsigned char	count;		/* Total entry count */
+	unsigned char	nr_values;	/* Value entry count */
+	struct xa_node __rcu *parent;	/* NULL at top of tree */
+	struct xarray	*array;		/* The array we belong to */
+	union {
+		struct list_head private_list;	/* For tree user */
+		struct rcu_head	rcu_head;	/* Used when freeing node */
+	};
+	void __rcu	*slots[XA_CHUNK_SIZE];
+	unsigned long	tags[XA_MAX_TAGS][XA_TAG_LONGS];
+};
 
 /* Private */
 static inline bool xa_is_node(const void *entry)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c9ae6e6579f8..e98de16b1648 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -260,11 +260,11 @@ static void dump_node(struct radix_tree_node *node, unsigned long index)
 {
 	unsigned long i;
 
-	pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
+	pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d nr_values %d\n",
 		node, node->offset, index, index | node_maxindex(node),
 		node->parent,
 		node->tags[0][0], node->tags[1][0], node->tags[2][0],
-		node->shift, node->count, node->exceptional);
+		node->shift, node->count, node->nr_values);
 
 	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
 		unsigned long first = index | (i << node->shift);
@@ -354,7 +354,7 @@ static struct radix_tree_node *
 radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
 			struct radix_tree_root *root,
 			unsigned int shift, unsigned int offset,
-			unsigned int count, unsigned int exceptional)
+			unsigned int count, unsigned int nr_values)
 {
 	struct radix_tree_node *ret = NULL;
 
@@ -401,9 +401,9 @@ radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
 		ret->shift = shift;
 		ret->offset = offset;
 		ret->count = count;
-		ret->exceptional = exceptional;
+		ret->nr_values = nr_values;
 		ret->parent = parent;
-		ret->root = root;
+		ret->array = root;
 	}
 	return ret;
 }
@@ -633,8 +633,8 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
 		if (radix_tree_is_internal_node(entry)) {
 			entry_to_node(entry)->parent = node;
 		} else if (xa_is_value(entry)) {
-			/* Moving an exceptional root->xa_head to a node */
-			node->exceptional = 1;
+			/* Moving a value entry root->xa_head to a node */
+			node->nr_values = 1;
 		}
 		/*
 		 * entry was already in the radix tree, so we do not need
@@ -920,12 +920,12 @@ static inline int insert_entries(struct radix_tree_node *node,
 		if (xa_is_node(old))
 			radix_tree_free_nodes(old);
 		if (xa_is_value(old))
-			node->exceptional--;
+			node->nr_values--;
 	}
 	if (node) {
 		node->count += n;
 		if (xa_is_value(item))
-			node->exceptional += n;
+			node->nr_values += n;
 	}
 	return n;
 }
@@ -939,7 +939,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 	if (node) {
 		node->count++;
 		if (xa_is_value(item))
-			node->exceptional++;
+			node->nr_values++;
 	}
 	return 1;
 }
@@ -1073,7 +1073,7 @@ void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
 EXPORT_SYMBOL(radix_tree_lookup);
 
 static inline void replace_sibling_entries(struct radix_tree_node *node,
-				void __rcu **slot, int count, int exceptional)
+				void __rcu **slot, int count, int values)
 {
 #ifdef CONFIG_RADIX_TREE_MULTIORDER
 	unsigned offset = get_slot_offset(node, slot);
@@ -1086,21 +1086,21 @@ static inline void replace_sibling_entries(struct radix_tree_node *node,
 			node->slots[offset] = NULL;
 			node->count--;
 		}
-		node->exceptional += exceptional;
+		node->nr_values += values;
 	}
 #endif
 }
 
 static void replace_slot(void __rcu **slot, void *item,
-		struct radix_tree_node *node, int count, int exceptional)
+		struct radix_tree_node *node, int count, int values)
 {
 	if (WARN_ON_ONCE(radix_tree_is_internal_node(item)))
 		return;
 
-	if (node && (count || exceptional)) {
+	if (node && (count || values)) {
 		node->count += count;
-		node->exceptional += exceptional;
-		replace_sibling_entries(node, slot, count, exceptional);
+		node->nr_values += values;
+		replace_sibling_entries(node, slot, count, values);
 	}
 
 	rcu_assign_pointer(*slot, item);
@@ -1154,17 +1154,17 @@ void __radix_tree_replace(struct radix_tree_root *root,
 			  radix_tree_update_node_t update_node)
 {
 	void *old = rcu_dereference_raw(*slot);
-	int exceptional = !!xa_is_value(item) - !!xa_is_value(old);
+	int values = !!xa_is_value(item) - !!xa_is_value(old);
 	int count = calculate_count(root, node, slot, item, old);
 
 	/*
-	 * This function supports replacing exceptional entries and
+	 * This function supports replacing value entries and
 	 * deleting entries, but that needs accounting against the
 	 * node unless the slot is root->xa_head.
 	 */
 	WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
-			(count || exceptional));
-	replace_slot(slot, item, node, count, exceptional);
+			(count || values));
+	replace_slot(slot, item, node, count, values);
 
 	if (!node)
 		return;
@@ -1186,7 +1186,7 @@ void __radix_tree_replace(struct radix_tree_root *root,
  * across slot lookup and replacement.
  *
  * NOTE: This cannot be used to switch between non-entries (empty slots),
- * regular entries, and exceptional entries, as that requires accounting
+ * regular entries, and value entries, as that requires accounting
  * inside the radix tree node. When switching from one type of entry or
  * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
  * radix_tree_iter_replace().
@@ -1294,7 +1294,7 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
 		rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY);
 	}
 	rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY);
-	parent->exceptional -= (end - offset);
+	parent->nr_values -= (end - offset);
 
 	if (order == parent->shift)
 		return 0;
@@ -1954,7 +1954,7 @@ static bool __radix_tree_delete(struct radix_tree_root *root,
 				struct radix_tree_node *node, void __rcu **slot)
 {
 	void *old = rcu_dereference_raw(*slot);
-	int exceptional = xa_is_value(old) ? -1 : 0;
+	int values = xa_is_value(old) ? -1 : 0;
 	unsigned offset = get_slot_offset(node, slot);
 	int tag;
 
@@ -1964,7 +1964,7 @@ static bool __radix_tree_delete(struct radix_tree_root *root,
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 			node_tag_clear(root, node, tag, offset);
 
-	replace_slot(slot, NULL, node, -1, exceptional);
+	replace_slot(slot, NULL, node, -1, values);
 	return node && delete_node(root, node, NULL);
 }
 
diff --git a/mm/workingset.c b/mm/workingset.c
index b5dd3936e697..bad4e58881cd 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -348,7 +348,7 @@ void workingset_update_node(struct radix_tree_node *node)
 	 * already where they should be. The list_empty() test is safe
 	 * as node->private_list is protected by the i_pages lock.
 	 */
-	if (node->count && node->count == node->exceptional) {
+	if (node->count && node->count == node->nr_values) {
 		if (list_empty(&node->private_list))
 			list_lru_add(&shadow_nodes, &node->private_list);
 	} else {
@@ -427,8 +427,8 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
 	 * to reclaim, take the node off-LRU, and drop the lru_lock.
 	 */
 
-	node = container_of(item, struct radix_tree_node, private_list);
-	mapping = container_of(node->root, struct address_space, i_pages);
+	node = container_of(item, struct xa_node, private_list);
+	mapping = container_of(node->array, struct address_space, i_pages);
 
 	/* Coming from the list, invert the lock order */
 	if (!xa_trylock(&mapping->i_pages)) {
@@ -445,25 +445,25 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
 	 * no pages, so we expect to be able to remove them all and
 	 * delete and free the empty node afterwards.
 	 */
-	if (WARN_ON_ONCE(!node->exceptional))
+	if (WARN_ON_ONCE(!node->nr_values))
 		goto out_invalid;
-	if (WARN_ON_ONCE(node->count != node->exceptional))
+	if (WARN_ON_ONCE(node->count != node->nr_values))
 		goto out_invalid;
 	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
 		if (node->slots[i]) {
 			if (WARN_ON_ONCE(!xa_is_value(node->slots[i])))
 				goto out_invalid;
-			if (WARN_ON_ONCE(!node->exceptional))
+			if (WARN_ON_ONCE(!node->nr_values))
 				goto out_invalid;
 			if (WARN_ON_ONCE(!mapping->nrexceptional))
 				goto out_invalid;
 			node->slots[i] = NULL;
-			node->exceptional--;
+			node->nr_values--;
 			node->count--;
 			mapping->nrexceptional--;
 		}
 	}
-	if (WARN_ON_ONCE(node->exceptional))
+	if (WARN_ON_ONCE(node->nr_values))
 		goto out_invalid;
 	inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
 	__radix_tree_delete_node(&mapping->i_pages, node,
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index 24293a2fd82d..ed51edc008fd 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -392,7 +392,7 @@ static void multiorder_join2(unsigned order1, unsigned order2)
 	radix_tree_insert(&tree, 1 << order2, xa_mk_value(5));
 	item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
 	assert(item2 == xa_mk_value(5));
-	assert(node->exceptional == 1);
+	assert(node->nr_values == 1);
 
 	item2 = radix_tree_lookup(&tree, 0);
 	free(item2);
@@ -400,7 +400,7 @@ static void multiorder_join2(unsigned order1, unsigned order2)
 	radix_tree_join(&tree, 0, order1, item1);
 	item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
 	assert(item2 == item1);
-	assert(node->exceptional == 0);
+	assert(node->nr_values == 0);
 	item_kill_tree(&tree);
 }
 
@@ -408,7 +408,7 @@ static void multiorder_join2(unsigned order1, unsigned order2)
  * This test revealed an accounting bug for inline data entries at one point.
  * Nodes were being freed back into the pool with an elevated exception count
  * by radix_tree_join() and then radix_tree_split() was failing to zero the
- * count of exceptional entries.
+ * count of value entries.
  */
 static void multiorder_join3(unsigned int order)
 {
@@ -432,7 +432,7 @@ static void multiorder_join3(unsigned int order)
 	}
 
 	__radix_tree_lookup(&tree, 0, &node, NULL);
-	assert(node->exceptional == node->count);
+	assert(node->nr_values == node->count);
 
 	item_kill_tree(&tree);
 }
@@ -519,7 +519,7 @@ static void __multiorder_split2(int old_order, int new_order)
 
 	item = __radix_tree_lookup(&tree, 0, &node, NULL);
 	assert(item == xa_mk_value(5));
-	assert(node->exceptional > 0);
+	assert(node->nr_values > 0);
 
 	radix_tree_split(&tree, 0, new_order);
 	radix_tree_for_each_slot(slot, &tree, &iter, 0) {
@@ -529,7 +529,7 @@ static void __multiorder_split2(int old_order, int new_order)
 
 	item = __radix_tree_lookup(&tree, 0, &node, NULL);
 	assert(item != xa_mk_value(5));
-	assert(node->exceptional == 0);
+	assert(node->nr_values == 0);
 
 	item_kill_tree(&tree);
 }
@@ -546,7 +546,7 @@ static void __multiorder_split3(int old_order, int new_order)
 
 	item = __radix_tree_lookup(&tree, 0, &node, NULL);
 	assert(item == xa_mk_value(5));
-	assert(node->exceptional > 0);
+	assert(node->nr_values > 0);
 
 	radix_tree_split(&tree, 0, new_order);
 	radix_tree_for_each_slot(slot, &tree, &iter, 0) {
@@ -555,7 +555,7 @@ static void __multiorder_split3(int old_order, int new_order)
 
 	item = __radix_tree_lookup(&tree, 0, &node, NULL);
 	assert(item == xa_mk_value(7));
-	assert(node->exceptional > 0);
+	assert(node->nr_values > 0);
 
 	item_kill_tree(&tree);
 
@@ -563,7 +563,7 @@ static void __multiorder_split3(int old_order, int new_order)
 
 	item = __radix_tree_lookup(&tree, 0, &node, NULL);
 	assert(item == xa_mk_value(5));
-	assert(node->exceptional > 0);
+	assert(node->nr_values > 0);
 
 	radix_tree_split(&tree, 0, new_order);
 	radix_tree_for_each_slot(slot, &tree, &iter, 0) {
@@ -576,13 +576,13 @@ static void __multiorder_split3(int old_order, int new_order)
 
 	item = __radix_tree_lookup(&tree, 1 << new_order, &node, NULL);
 	assert(item == xa_mk_value(7));
-	assert(node->count == node->exceptional);
+	assert(node->count == node->nr_values);
 	do {
 		node = node->parent;
 		if (!node)
 			break;
 		assert(node->count == 1);
-		assert(node->exceptional == 0);
+		assert(node->nr_values == 0);
 	} while (1);
 
 	item_kill_tree(&tree);
@@ -610,15 +610,15 @@ static void multiorder_account(void)
 
 	__radix_tree_insert(&tree, 1 << 5, 5, xa_mk_value(5));
 	__radix_tree_lookup(&tree, 0, &node, NULL);
-	assert(node->count == node->exceptional * 2);
+	assert(node->count == node->nr_values * 2);
 	radix_tree_delete(&tree, 1 << 5);
-	assert(node->exceptional == 0);
+	assert(node->nr_values == 0);
 
 	__radix_tree_insert(&tree, 1 << 5, 5, xa_mk_value(5));
 	__radix_tree_lookup(&tree, 1 << 5, &node, &slot);
-	assert(node->count == node->exceptional * 2);
+	assert(node->count == node->nr_values * 2);
 	__radix_tree_replace(&tree, node, slot, NULL, NULL);
-	assert(node->exceptional == 0);
+	assert(node->nr_values == 0);
 
 	item_kill_tree(&tree);
 }
-- 
2.16.1

  parent reply	other threads:[~2018-03-13 13:48 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-13 13:25 [PATCH v9 00/61] XArray v9 Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 01/61] mac80211_hwsim: Use DEFINE_IDA Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 02/61] radix tree: Use GFP_ZONEMASK bits of gfp_t for flags Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 03/61] arm64: Turn flush_dcache_mmap_lock into a no-op Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 04/61] unicore32: " Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 05/61] Export __set_page_dirty Matthew Wilcox
2018-03-27  0:28   ` Darrick J. Wong
2018-03-13 13:25 ` [PATCH v9 06/61] fscache: Use appropriate radix tree accessors Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 07/61] xarray: Add the xa_lock to the radix_tree_root Matthew Wilcox
2018-04-12 20:59   ` Ross Zwisler
2018-04-12 21:10     ` Matthew Wilcox
2018-04-12 21:10       ` Matthew Wilcox
2018-04-12 21:16       ` Ross Zwisler
2018-04-12 21:22         ` Matthew Wilcox
2018-04-12 21:22           ` Matthew Wilcox
2018-04-12 21:27           ` Ross Zwisler
2018-03-13 13:25 ` [PATCH v9 08/61] page cache: Use xa_lock Matthew Wilcox
2018-03-16 18:06   ` Josef Bacik
2018-03-13 13:25 ` [PATCH v9 09/61] xarray: Replace exceptional entries Matthew Wilcox
2018-03-16 18:53   ` Josef Bacik
2018-03-16 19:06     ` Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 10/61] xarray: Change definition of sibling entries Matthew Wilcox
2018-03-16 19:00   ` Josef Bacik
2018-03-13 13:25 ` [PATCH v9 11/61] xarray: Add definition of struct xarray Matthew Wilcox
2018-03-16 19:05   ` Josef Bacik
2018-03-13 13:25 ` Matthew Wilcox [this message]
2018-03-16 19:11   ` [PATCH v9 12/61] xarray: Define struct xa_node Josef Bacik
2018-03-13 13:25 ` [PATCH v9 13/61] xarray: Add documentation Matthew Wilcox
2018-03-16 19:12   ` Josef Bacik
2018-03-13 13:25 ` [PATCH v9 14/61] xarray: Add xa_load Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 15/61] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 16/61] xarray: Add xa_store Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 17/61] xarray: Add xa_cmpxchg and xa_insert Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 18/61] xarray: Add xa_for_each Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 19/61] xarray: Add xa_extract Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 20/61] xarray: Add xa_destroy Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 21/61] xarray: Add xas_next and xas_prev Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 22/61] xarray: Add xas_create_range Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 23/61] xarray: Add MAINTAINERS entry Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 24/61] page cache: Rearrange address_space Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 25/61] page cache: Convert hole search to XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 26/61] page cache: Add and replace pages using the XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 27/61] page cache: Convert page deletion to XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 28/61] page cache: Convert page cache lookups " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 29/61] page cache: Convert delete_batch " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 30/61] page cache: Remove stray radix comment Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 31/61] page cache: Convert filemap_range_has_page to XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 32/61] mm: Convert page-writeback " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 33/61] mm: Convert workingset " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 34/61] mm: Convert truncate " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 35/61] mm: Convert add_to_swap_cache " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 36/61] mm: Convert delete_from_swap_cache " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 37/61] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 38/61] mm: Convert page migration " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 39/61] mm: Convert huge_memory " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 40/61] mm: Convert collapse_shmem " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 41/61] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 42/61] pagevec: Use xa_tag_t Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 43/61] shmem: Convert replace to XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 44/61] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 45/61] shmem: Convert find_swap_entry " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 46/61] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 47/61] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 48/61] shmem: Convert shmem_free_swap " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 49/61] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 50/61] memfd: Convert shmem_tag_pins " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 51/61] memfd: Convert shmem_wait_for_pins " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 52/61] shmem: Comment fixups Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 53/61] btrfs: Convert page cache to XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 54/61] fs: Convert buffer " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 55/61] fs: Convert writeback " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 56/61] nilfs2: Convert " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 57/61] f2fs: " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 58/61] lustre: " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 59/61] dax: " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 60/61] page cache: Finish XArray conversion Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 61/61] radix tree: Remove unused functions Matthew Wilcox
2018-03-26 22:36 ` [PATCH v9 00/61] XArray v9 Andrew Morton
2018-03-26 23:26   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180313132639.17387-13-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=konishi.ryusuke@lab.ntt.co.jp \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nilfs@vger.kernel.org \
    --cc=mawilcox@microsoft.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.