All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: linux-kernel@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>,
	linux-xfs@vger.kernel.org, linux-nilfs@vger.kernel.org,
	linux-raid@vger.kernel.org,
	Matthew Wilcox <mawilcox@microsoft.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	linux-usb@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	David Howells <dhowells@redhat.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Rehas Sachdeva <aquannie@gmail.com>, Shaohua Li <shli@kernel.org>,
	linux-btrfs@vger.kernel.org
Subject: [PATCH v5 78/78] fscache: Convert to XArray
Date: Fri, 15 Dec 2017 14:04:50 -0800	[thread overview]
Message-ID: <20171215220450.7899-79-willy@infradead.org> (raw)
In-Reply-To: <20171215220450.7899-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

Removes another user of radix_tree_preload().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 fs/fscache/cookie.c     |   6 +-
 fs/fscache/internal.h   |   2 +-
 fs/fscache/object.c     |   2 +-
 fs/fscache/page.c       | 152 +++++++++++++++++++++---------------------------
 fs/fscache/stats.c      |   6 +-
 include/linux/fscache.h |   8 +--
 6 files changed, 76 insertions(+), 100 deletions(-)

diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index e9054e0c1a49..6d45134d609e 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -109,9 +109,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
 	cookie->netfs_data	= netfs_data;
 	cookie->flags		= (1 << FSCACHE_COOKIE_NO_DATA_YET);
 
-	/* radix tree insertion won't use the preallocation pool unless it's
-	 * told it may not wait */
-	INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+	xa_init(&cookie->stores);
 
 	switch (cookie->def->type) {
 	case FSCACHE_COOKIE_TYPE_INDEX:
@@ -608,7 +606,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
 	/* Clear pointers back to the netfs */
 	cookie->netfs_data	= NULL;
 	cookie->def		= NULL;
-	BUG_ON(!radix_tree_empty(&cookie->stores));
+	BUG_ON(!xa_empty(&cookie->stores));
 
 	if (cookie->parent) {
 		ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 0ff4b49a0037..468d9bd7f8c3 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -200,7 +200,7 @@ extern atomic_t fscache_n_stores_oom;
 extern atomic_t fscache_n_store_ops;
 extern atomic_t fscache_n_store_calls;
 extern atomic_t fscache_n_store_pages;
-extern atomic_t fscache_n_store_radix_deletes;
+extern atomic_t fscache_n_store_xarray_deletes;
 extern atomic_t fscache_n_store_pages_over_limit;
 
 extern atomic_t fscache_n_store_vmscan_not_storing;
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index aa0e71f02c33..ed165736a358 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -956,7 +956,7 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
 	 * retire the object instead.
 	 */
 	if (!fscache_use_cookie(object)) {
-		ASSERT(radix_tree_empty(&object->cookie->stores));
+		ASSERT(xa_empty(&object->cookie->stores));
 		set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
 		_leave(" [no cookie]");
 		return transit_to(KILL_OBJECT);
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 961029e04027..315e2745f822 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -22,13 +22,7 @@
  */
 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
 {
-	void *val;
-
-	rcu_read_lock();
-	val = radix_tree_lookup(&cookie->stores, page->index);
-	rcu_read_unlock();
-
-	return val != NULL;
+	return xa_load(&cookie->stores, page->index) != NULL;
 }
 EXPORT_SYMBOL(__fscache_check_page_write);
 
@@ -64,15 +58,15 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
 				  struct page *page,
 				  gfp_t gfp)
 {
+	XA_STATE(xas, &cookie->stores, page->index);
 	struct page *xpage;
-	void *val;
 
 	_enter("%p,%p,%x", cookie, page, gfp);
 
 try_again:
 	rcu_read_lock();
-	val = radix_tree_lookup(&cookie->stores, page->index);
-	if (!val) {
+	xpage = xas_load(&xas);
+	if (!xpage) {
 		rcu_read_unlock();
 		fscache_stat(&fscache_n_store_vmscan_not_storing);
 		__fscache_uncache_page(cookie, page);
@@ -81,31 +75,32 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
 
 	/* see if the page is actually undergoing storage - if so we can't get
 	 * rid of it till the cache has finished with it */
-	if (radix_tree_tag_get(&cookie->stores, page->index,
-			       FSCACHE_COOKIE_STORING_TAG)) {
+	if (xas_get_tag(&xas, FSCACHE_COOKIE_STORING_TAG)) {
 		rcu_read_unlock();
+		xas_retry(&xas, XA_RETRY_ENTRY);
 		goto page_busy;
 	}
 
 	/* the page is pending storage, so we attempt to cancel the store and
 	 * discard the store request so that the page can be reclaimed */
-	spin_lock(&cookie->stores_lock);
+	xas_retry(&xas, XA_RETRY_ENTRY);
+	xas_lock(&xas);
 	rcu_read_unlock();
 
-	if (radix_tree_tag_get(&cookie->stores, page->index,
-			       FSCACHE_COOKIE_STORING_TAG)) {
+	xpage = xas_load(&xas);
+	if (xas_get_tag(&xas, FSCACHE_COOKIE_STORING_TAG)) {
 		/* the page started to undergo storage whilst we were looking,
 		 * so now we can only wait or return */
 		spin_unlock(&cookie->stores_lock);
 		goto page_busy;
 	}
 
-	xpage = radix_tree_delete(&cookie->stores, page->index);
+	xas_store(&xas, NULL);
 	spin_unlock(&cookie->stores_lock);
 
 	if (xpage) {
 		fscache_stat(&fscache_n_store_vmscan_cancelled);
-		fscache_stat(&fscache_n_store_radix_deletes);
+		fscache_stat(&fscache_n_store_xarray_deletes);
 		ASSERTCMP(xpage, ==, page);
 	} else {
 		fscache_stat(&fscache_n_store_vmscan_gone);
@@ -149,17 +144,19 @@ static void fscache_end_page_write(struct fscache_object *object,
 	spin_lock(&object->lock);
 	cookie = object->cookie;
 	if (cookie) {
+		XA_STATE(xas, &cookie->stores, page->index);
 		/* delete the page from the tree if it is now no longer
 		 * pending */
-		spin_lock(&cookie->stores_lock);
-		radix_tree_tag_clear(&cookie->stores, page->index,
-				     FSCACHE_COOKIE_STORING_TAG);
-		if (!radix_tree_tag_get(&cookie->stores, page->index,
-					FSCACHE_COOKIE_PENDING_TAG)) {
-			fscache_stat(&fscache_n_store_radix_deletes);
-			xpage = radix_tree_delete(&cookie->stores, page->index);
+		xas_lock(&xas);
+		xpage = xas_load(&xas);
+		xas_clear_tag(&xas, FSCACHE_COOKIE_STORING_TAG);
+		if (xas_get_tag(&xas, FSCACHE_COOKIE_PENDING_TAG)) {
+			xpage = NULL;
+		} else {
+			fscache_stat(&fscache_n_store_xarray_deletes);
+			xas_store(&xas, NULL);
 		}
-		spin_unlock(&cookie->stores_lock);
+		xas_unlock(&xas);
 		wake_up_bit(&cookie->flags, 0);
 	}
 	spin_unlock(&object->lock);
@@ -765,13 +762,12 @@ static void fscache_release_write_op(struct fscache_operation *_op)
  */
 static void fscache_write_op(struct fscache_operation *_op)
 {
+	XA_STATE(xas, NULL, 0);
 	struct fscache_storage *op =
 		container_of(_op, struct fscache_storage, op);
 	struct fscache_object *object = op->op.object;
 	struct fscache_cookie *cookie;
 	struct page *page;
-	unsigned n;
-	void *results[1];
 	int ret;
 
 	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
@@ -804,29 +800,25 @@ static void fscache_write_op(struct fscache_operation *_op)
 		return;
 	}
 
-	spin_lock(&cookie->stores_lock);
+	xas.xa = &cookie->stores;
+	xas_lock(&xas);
 
 	fscache_stat(&fscache_n_store_calls);
 
 	/* find a page to store */
-	page = NULL;
-	n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
-				       FSCACHE_COOKIE_PENDING_TAG);
-	if (n != 1)
+	page = xas_find_tag(&xas, ULONG_MAX, FSCACHE_COOKIE_PENDING_TAG);
+	if (!page)
 		goto superseded;
-	page = results[0];
-	_debug("gang %d [%lx]", n, page->index);
+	_debug("found %lx", page->index);
 	if (page->index >= op->store_limit) {
 		fscache_stat(&fscache_n_store_pages_over_limit);
 		goto superseded;
 	}
 
-	radix_tree_tag_set(&cookie->stores, page->index,
-			   FSCACHE_COOKIE_STORING_TAG);
-	radix_tree_tag_clear(&cookie->stores, page->index,
-			     FSCACHE_COOKIE_PENDING_TAG);
+	xas_set_tag(&xas, FSCACHE_COOKIE_STORING_TAG);
+	xas_clear_tag(&xas, FSCACHE_COOKIE_PENDING_TAG);
+	xas_unlock(&xas);
 
-	spin_unlock(&cookie->stores_lock);
 	spin_unlock(&object->lock);
 
 	fscache_stat(&fscache_n_store_pages);
@@ -848,7 +840,7 @@ static void fscache_write_op(struct fscache_operation *_op)
 	/* this writer is going away and there aren't any more things to
 	 * write */
 	_debug("cease");
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
 	spin_unlock(&object->lock);
 	fscache_op_complete(&op->op, true);
@@ -860,32 +852,25 @@ static void fscache_write_op(struct fscache_operation *_op)
  */
 void fscache_invalidate_writes(struct fscache_cookie *cookie)
 {
+	XA_STATE(xas, &cookie->stores, 0);
+	unsigned int cleared = 0;
 	struct page *page;
-	void *results[16];
-	int n, i;
 
 	_enter("");
 
-	for (;;) {
-		spin_lock(&cookie->stores_lock);
-		n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
-					       ARRAY_SIZE(results),
-					       FSCACHE_COOKIE_PENDING_TAG);
-		if (n == 0) {
-			spin_unlock(&cookie->stores_lock);
-			break;
-		}
-
-		for (i = n - 1; i >= 0; i--) {
-			page = results[i];
-			radix_tree_delete(&cookie->stores, page->index);
-		}
+	xas_lock(&xas);
+	xas_for_each_tag(&xas, page, ULONG_MAX, FSCACHE_COOKIE_PENDING_TAG) {
+		xas_store(&xas, NULL);
+		put_page(page);
+		if (++cleared % XA_CHECK_SCHED)
+			continue;
 
-		spin_unlock(&cookie->stores_lock);
-
-		for (i = n - 1; i >= 0; i--)
-			put_page(results[i]);
+		xas_pause(&xas);
+		xas_unlock(&xas);
+		cond_resched();
+		xas_lock(&xas);
 	}
+	xas_unlock(&xas);
 
 	wake_up_bit(&cookie->flags, 0);
 
@@ -925,9 +910,11 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 			 struct page *page,
 			 gfp_t gfp)
 {
+	XA_STATE(xas, &cookie->stores, page->index);
 	struct fscache_storage *op;
 	struct fscache_object *object;
 	bool wake_cookie = false;
+	struct page *xpage;
 	int ret;
 
 	_enter("%p,%x,", cookie, (u32) page->flags);
@@ -952,10 +939,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 		(1 << FSCACHE_OP_WAITING) |
 		(1 << FSCACHE_OP_UNUSE_COOKIE);
 
-	ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
-	if (ret < 0)
-		goto nomem_free;
-
+retry:
 	ret = -ENOBUFS;
 	spin_lock(&cookie->lock);
 
@@ -967,23 +951,19 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
 		goto nobufs;
 
-	/* add the page to the pending-storage radix tree on the backing
-	 * object */
+	/* add the page to the pending-storage xarray on the backing object */
 	spin_lock(&object->lock);
-	spin_lock(&cookie->stores_lock);
+	xas_lock(&xas);
 
 	_debug("store limit %llx", (unsigned long long) object->store_limit);
 
-	ret = radix_tree_insert(&cookie->stores, page->index, page);
-	if (ret < 0) {
-		if (ret == -EEXIST)
-			goto already_queued;
-		_debug("insert failed %d", ret);
+	xpage = xas_create(&xas);
+	if (xpage)
+		goto already_queued;
+	if (xas_error(&xas))
 		goto nobufs_unlock_obj;
-	}
-
-	radix_tree_tag_set(&cookie->stores, page->index,
-			   FSCACHE_COOKIE_PENDING_TAG);
+	xas_store(&xas, page);
+	xas_set_tag(&xas, FSCACHE_COOKIE_PENDING_TAG);
 	get_page(page);
 
 	/* we only want one writer at a time, but we do need to queue new
@@ -991,7 +971,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
 		goto already_pending;
 
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
 
 	op->op.debug_id	= atomic_inc_return(&fscache_op_debug_id);
@@ -1002,7 +982,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 		goto submit_failed;
 
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
 	fscache_stat(&fscache_n_store_ops);
 	fscache_stat(&fscache_n_stores_ok);
 
@@ -1014,30 +993,31 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 already_queued:
 	fscache_stat(&fscache_n_stores_again);
 already_pending:
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
 	fscache_put_operation(&op->op);
 	fscache_stat(&fscache_n_stores_ok);
 	_leave(" = 0");
 	return 0;
 
 submit_failed:
-	spin_lock(&cookie->stores_lock);
-	radix_tree_delete(&cookie->stores, page->index);
-	spin_unlock(&cookie->stores_lock);
+	xa_erase(&cookie->stores, page->index);
 	wake_cookie = __fscache_unuse_cookie(cookie);
 	put_page(page);
 	ret = -ENOBUFS;
 	goto nobufs;
 
 nobufs_unlock_obj:
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
+	spin_unlock(&cookie->lock);
+	if (xas_nomem(&xas, gfp))
+		goto retry;
+	goto nobufs2;
 nobufs:
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
+nobufs2:
 	fscache_put_operation(&op->op);
 	if (wake_cookie)
 		__fscache_wake_unused_cookie(cookie);
@@ -1045,8 +1025,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	_leave(" = -ENOBUFS");
 	return -ENOBUFS;
 
-nomem_free:
-	fscache_put_operation(&op->op);
 nomem:
 	fscache_stat(&fscache_n_stores_oom);
 	_leave(" = -ENOMEM");
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 7ac6e839b065..9c012b4229cd 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -63,7 +63,7 @@ atomic_t fscache_n_stores_oom;
 atomic_t fscache_n_store_ops;
 atomic_t fscache_n_store_calls;
 atomic_t fscache_n_store_pages;
-atomic_t fscache_n_store_radix_deletes;
+atomic_t fscache_n_store_xarray_deletes;
 atomic_t fscache_n_store_pages_over_limit;
 
 atomic_t fscache_n_store_vmscan_not_storing;
@@ -232,11 +232,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
 		   atomic_read(&fscache_n_stores_again),
 		   atomic_read(&fscache_n_stores_nobufs),
 		   atomic_read(&fscache_n_stores_oom));
-	seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
+	seq_printf(m, "Stores : ops=%u run=%u pgs=%u xar=%u olm=%u\n",
 		   atomic_read(&fscache_n_store_ops),
 		   atomic_read(&fscache_n_store_calls),
 		   atomic_read(&fscache_n_store_pages),
-		   atomic_read(&fscache_n_store_radix_deletes),
+		   atomic_read(&fscache_n_store_xarray_deletes),
 		   atomic_read(&fscache_n_store_pages_over_limit));
 
 	seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 6a2f631a913f..74ea31368c09 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -22,7 +22,7 @@
 #include <linux/list.h>
 #include <linux/pagemap.h>
 #include <linux/pagevec.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
 
 #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
 #define fscache_available() (1)
@@ -175,9 +175,9 @@ struct fscache_cookie {
 	const struct fscache_cookie_def	*def;		/* definition */
 	struct fscache_cookie		*parent;	/* parent of this entry */
 	void				*netfs_data;	/* back pointer to netfs */
-	struct radix_tree_root		stores;		/* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG	0		/* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG	1		/* pages tag: writing to cache */
+	struct xarray			stores;		/* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING_TAG	XA_TAG_0	/* pages tag: pending write to cache */
+#define FSCACHE_COOKIE_STORING_TAG	XA_TAG_1	/* pages tag: writing to cache */
 
 	unsigned long			flags;
 #define FSCACHE_COOKIE_LOOKING_UP	0	/* T if non-index cookie being looked up still */
-- 
2.15.1


------------------------------------------------------------------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: linux-kernel@vger.kernel.org
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	David Howells <dhowells@redhat.com>, Shaohua Li <shli@kernel.org>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-raid@vger.kernel.org
Subject: [PATCH v5 78/78] fscache: Convert to XArray
Date: Fri, 15 Dec 2017 14:04:50 -0800	[thread overview]
Message-ID: <20171215220450.7899-79-willy@infradead.org> (raw)
In-Reply-To: <20171215220450.7899-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

Removes another user of radix_tree_preload().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 fs/fscache/cookie.c     |   6 +-
 fs/fscache/internal.h   |   2 +-
 fs/fscache/object.c     |   2 +-
 fs/fscache/page.c       | 152 +++++++++++++++++++++---------------------------
 fs/fscache/stats.c      |   6 +-
 include/linux/fscache.h |   8 +--
 6 files changed, 76 insertions(+), 100 deletions(-)

diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index e9054e0c1a49..6d45134d609e 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -109,9 +109,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
 	cookie->netfs_data	= netfs_data;
 	cookie->flags		= (1 << FSCACHE_COOKIE_NO_DATA_YET);
 
-	/* radix tree insertion won't use the preallocation pool unless it's
-	 * told it may not wait */
-	INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+	xa_init(&cookie->stores);
 
 	switch (cookie->def->type) {
 	case FSCACHE_COOKIE_TYPE_INDEX:
@@ -608,7 +606,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
 	/* Clear pointers back to the netfs */
 	cookie->netfs_data	= NULL;
 	cookie->def		= NULL;
-	BUG_ON(!radix_tree_empty(&cookie->stores));
+	BUG_ON(!xa_empty(&cookie->stores));
 
 	if (cookie->parent) {
 		ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 0ff4b49a0037..468d9bd7f8c3 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -200,7 +200,7 @@ extern atomic_t fscache_n_stores_oom;
 extern atomic_t fscache_n_store_ops;
 extern atomic_t fscache_n_store_calls;
 extern atomic_t fscache_n_store_pages;
-extern atomic_t fscache_n_store_radix_deletes;
+extern atomic_t fscache_n_store_xarray_deletes;
 extern atomic_t fscache_n_store_pages_over_limit;
 
 extern atomic_t fscache_n_store_vmscan_not_storing;
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index aa0e71f02c33..ed165736a358 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -956,7 +956,7 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
 	 * retire the object instead.
 	 */
 	if (!fscache_use_cookie(object)) {
-		ASSERT(radix_tree_empty(&object->cookie->stores));
+		ASSERT(xa_empty(&object->cookie->stores));
 		set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
 		_leave(" [no cookie]");
 		return transit_to(KILL_OBJECT);
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 961029e04027..315e2745f822 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -22,13 +22,7 @@
  */
 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
 {
-	void *val;
-
-	rcu_read_lock();
-	val = radix_tree_lookup(&cookie->stores, page->index);
-	rcu_read_unlock();
-
-	return val != NULL;
+	return xa_load(&cookie->stores, page->index) != NULL;
 }
 EXPORT_SYMBOL(__fscache_check_page_write);
 
@@ -64,15 +58,15 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
 				  struct page *page,
 				  gfp_t gfp)
 {
+	XA_STATE(xas, &cookie->stores, page->index);
 	struct page *xpage;
-	void *val;
 
 	_enter("%p,%p,%x", cookie, page, gfp);
 
 try_again:
 	rcu_read_lock();
-	val = radix_tree_lookup(&cookie->stores, page->index);
-	if (!val) {
+	xpage = xas_load(&xas);
+	if (!xpage) {
 		rcu_read_unlock();
 		fscache_stat(&fscache_n_store_vmscan_not_storing);
 		__fscache_uncache_page(cookie, page);
@@ -81,31 +75,32 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
 
 	/* see if the page is actually undergoing storage - if so we can't get
 	 * rid of it till the cache has finished with it */
-	if (radix_tree_tag_get(&cookie->stores, page->index,
-			       FSCACHE_COOKIE_STORING_TAG)) {
+	if (xas_get_tag(&xas, FSCACHE_COOKIE_STORING_TAG)) {
 		rcu_read_unlock();
+		xas_retry(&xas, XA_RETRY_ENTRY);
 		goto page_busy;
 	}
 
 	/* the page is pending storage, so we attempt to cancel the store and
 	 * discard the store request so that the page can be reclaimed */
-	spin_lock(&cookie->stores_lock);
+	xas_retry(&xas, XA_RETRY_ENTRY);
+	xas_lock(&xas);
 	rcu_read_unlock();
 
-	if (radix_tree_tag_get(&cookie->stores, page->index,
-			       FSCACHE_COOKIE_STORING_TAG)) {
+	xpage = xas_load(&xas);
+	if (xas_get_tag(&xas, FSCACHE_COOKIE_STORING_TAG)) {
 		/* the page started to undergo storage whilst we were looking,
 		 * so now we can only wait or return */
 		spin_unlock(&cookie->stores_lock);
 		goto page_busy;
 	}
 
-	xpage = radix_tree_delete(&cookie->stores, page->index);
+	xas_store(&xas, NULL);
 	spin_unlock(&cookie->stores_lock);
 
 	if (xpage) {
 		fscache_stat(&fscache_n_store_vmscan_cancelled);
-		fscache_stat(&fscache_n_store_radix_deletes);
+		fscache_stat(&fscache_n_store_xarray_deletes);
 		ASSERTCMP(xpage, ==, page);
 	} else {
 		fscache_stat(&fscache_n_store_vmscan_gone);
@@ -149,17 +144,19 @@ static void fscache_end_page_write(struct fscache_object *object,
 	spin_lock(&object->lock);
 	cookie = object->cookie;
 	if (cookie) {
+		XA_STATE(xas, &cookie->stores, page->index);
 		/* delete the page from the tree if it is now no longer
 		 * pending */
-		spin_lock(&cookie->stores_lock);
-		radix_tree_tag_clear(&cookie->stores, page->index,
-				     FSCACHE_COOKIE_STORING_TAG);
-		if (!radix_tree_tag_get(&cookie->stores, page->index,
-					FSCACHE_COOKIE_PENDING_TAG)) {
-			fscache_stat(&fscache_n_store_radix_deletes);
-			xpage = radix_tree_delete(&cookie->stores, page->index);
+		xas_lock(&xas);
+		xpage = xas_load(&xas);
+		xas_clear_tag(&xas, FSCACHE_COOKIE_STORING_TAG);
+		if (xas_get_tag(&xas, FSCACHE_COOKIE_PENDING_TAG)) {
+			xpage = NULL;
+		} else {
+			fscache_stat(&fscache_n_store_xarray_deletes);
+			xas_store(&xas, NULL);
 		}
-		spin_unlock(&cookie->stores_lock);
+		xas_unlock(&xas);
 		wake_up_bit(&cookie->flags, 0);
 	}
 	spin_unlock(&object->lock);
@@ -765,13 +762,12 @@ static void fscache_release_write_op(struct fscache_operation *_op)
  */
 static void fscache_write_op(struct fscache_operation *_op)
 {
+	XA_STATE(xas, NULL, 0);
 	struct fscache_storage *op =
 		container_of(_op, struct fscache_storage, op);
 	struct fscache_object *object = op->op.object;
 	struct fscache_cookie *cookie;
 	struct page *page;
-	unsigned n;
-	void *results[1];
 	int ret;
 
 	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
@@ -804,29 +800,25 @@ static void fscache_write_op(struct fscache_operation *_op)
 		return;
 	}
 
-	spin_lock(&cookie->stores_lock);
+	xas.xa = &cookie->stores;
+	xas_lock(&xas);
 
 	fscache_stat(&fscache_n_store_calls);
 
 	/* find a page to store */
-	page = NULL;
-	n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
-				       FSCACHE_COOKIE_PENDING_TAG);
-	if (n != 1)
+	page = xas_find_tag(&xas, ULONG_MAX, FSCACHE_COOKIE_PENDING_TAG);
+	if (!page)
 		goto superseded;
-	page = results[0];
-	_debug("gang %d [%lx]", n, page->index);
+	_debug("found %lx", page->index);
 	if (page->index >= op->store_limit) {
 		fscache_stat(&fscache_n_store_pages_over_limit);
 		goto superseded;
 	}
 
-	radix_tree_tag_set(&cookie->stores, page->index,
-			   FSCACHE_COOKIE_STORING_TAG);
-	radix_tree_tag_clear(&cookie->stores, page->index,
-			     FSCACHE_COOKIE_PENDING_TAG);
+	xas_set_tag(&xas, FSCACHE_COOKIE_STORING_TAG);
+	xas_clear_tag(&xas, FSCACHE_COOKIE_PENDING_TAG);
+	xas_unlock(&xas);
 
-	spin_unlock(&cookie->stores_lock);
 	spin_unlock(&object->lock);
 
 	fscache_stat(&fscache_n_store_pages);
@@ -848,7 +840,7 @@ static void fscache_write_op(struct fscache_operation *_op)
 	/* this writer is going away and there aren't any more things to
 	 * write */
 	_debug("cease");
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
 	spin_unlock(&object->lock);
 	fscache_op_complete(&op->op, true);
@@ -860,32 +852,25 @@ static void fscache_write_op(struct fscache_operation *_op)
  */
 void fscache_invalidate_writes(struct fscache_cookie *cookie)
 {
+	XA_STATE(xas, &cookie->stores, 0);
+	unsigned int cleared = 0;
 	struct page *page;
-	void *results[16];
-	int n, i;
 
 	_enter("");
 
-	for (;;) {
-		spin_lock(&cookie->stores_lock);
-		n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
-					       ARRAY_SIZE(results),
-					       FSCACHE_COOKIE_PENDING_TAG);
-		if (n == 0) {
-			spin_unlock(&cookie->stores_lock);
-			break;
-		}
-
-		for (i = n - 1; i >= 0; i--) {
-			page = results[i];
-			radix_tree_delete(&cookie->stores, page->index);
-		}
+	xas_lock(&xas);
+	xas_for_each_tag(&xas, page, ULONG_MAX, FSCACHE_COOKIE_PENDING_TAG) {
+		xas_store(&xas, NULL);
+		put_page(page);
+		if (++cleared % XA_CHECK_SCHED)
+			continue;
 
-		spin_unlock(&cookie->stores_lock);
-
-		for (i = n - 1; i >= 0; i--)
-			put_page(results[i]);
+		xas_pause(&xas);
+		xas_unlock(&xas);
+		cond_resched();
+		xas_lock(&xas);
 	}
+	xas_unlock(&xas);
 
 	wake_up_bit(&cookie->flags, 0);
 
@@ -925,9 +910,11 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 			 struct page *page,
 			 gfp_t gfp)
 {
+	XA_STATE(xas, &cookie->stores, page->index);
 	struct fscache_storage *op;
 	struct fscache_object *object;
 	bool wake_cookie = false;
+	struct page *xpage;
 	int ret;
 
 	_enter("%p,%x,", cookie, (u32) page->flags);
@@ -952,10 +939,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 		(1 << FSCACHE_OP_WAITING) |
 		(1 << FSCACHE_OP_UNUSE_COOKIE);
 
-	ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
-	if (ret < 0)
-		goto nomem_free;
-
+retry:
 	ret = -ENOBUFS;
 	spin_lock(&cookie->lock);
 
@@ -967,23 +951,19 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
 		goto nobufs;
 
-	/* add the page to the pending-storage radix tree on the backing
-	 * object */
+	/* add the page to the pending-storage xarray on the backing object */
 	spin_lock(&object->lock);
-	spin_lock(&cookie->stores_lock);
+	xas_lock(&xas);
 
 	_debug("store limit %llx", (unsigned long long) object->store_limit);
 
-	ret = radix_tree_insert(&cookie->stores, page->index, page);
-	if (ret < 0) {
-		if (ret == -EEXIST)
-			goto already_queued;
-		_debug("insert failed %d", ret);
+	xpage = xas_create(&xas);
+	if (xpage)
+		goto already_queued;
+	if (xas_error(&xas))
 		goto nobufs_unlock_obj;
-	}
-
-	radix_tree_tag_set(&cookie->stores, page->index,
-			   FSCACHE_COOKIE_PENDING_TAG);
+	xas_store(&xas, page);
+	xas_set_tag(&xas, FSCACHE_COOKIE_PENDING_TAG);
 	get_page(page);
 
 	/* we only want one writer at a time, but we do need to queue new
@@ -991,7 +971,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
 		goto already_pending;
 
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
 
 	op->op.debug_id	= atomic_inc_return(&fscache_op_debug_id);
@@ -1002,7 +982,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 		goto submit_failed;
 
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
 	fscache_stat(&fscache_n_store_ops);
 	fscache_stat(&fscache_n_stores_ok);
 
@@ -1014,30 +993,31 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 already_queued:
 	fscache_stat(&fscache_n_stores_again);
 already_pending:
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
 	fscache_put_operation(&op->op);
 	fscache_stat(&fscache_n_stores_ok);
 	_leave(" = 0");
 	return 0;
 
 submit_failed:
-	spin_lock(&cookie->stores_lock);
-	radix_tree_delete(&cookie->stores, page->index);
-	spin_unlock(&cookie->stores_lock);
+	xa_erase(&cookie->stores, page->index);
 	wake_cookie = __fscache_unuse_cookie(cookie);
 	put_page(page);
 	ret = -ENOBUFS;
 	goto nobufs;
 
 nobufs_unlock_obj:
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
+	spin_unlock(&cookie->lock);
+	if (xas_nomem(&xas, gfp))
+		goto retry;
+	goto nobufs2;
 nobufs:
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
+nobufs2:
 	fscache_put_operation(&op->op);
 	if (wake_cookie)
 		__fscache_wake_unused_cookie(cookie);
@@ -1045,8 +1025,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	_leave(" = -ENOBUFS");
 	return -ENOBUFS;
 
-nomem_free:
-	fscache_put_operation(&op->op);
 nomem:
 	fscache_stat(&fscache_n_stores_oom);
 	_leave(" = -ENOMEM");
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 7ac6e839b065..9c012b4229cd 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -63,7 +63,7 @@ atomic_t fscache_n_stores_oom;
 atomic_t fscache_n_store_ops;
 atomic_t fscache_n_store_calls;
 atomic_t fscache_n_store_pages;
-atomic_t fscache_n_store_radix_deletes;
+atomic_t fscache_n_store_xarray_deletes;
 atomic_t fscache_n_store_pages_over_limit;
 
 atomic_t fscache_n_store_vmscan_not_storing;
@@ -232,11 +232,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
 		   atomic_read(&fscache_n_stores_again),
 		   atomic_read(&fscache_n_stores_nobufs),
 		   atomic_read(&fscache_n_stores_oom));
-	seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
+	seq_printf(m, "Stores : ops=%u run=%u pgs=%u xar=%u olm=%u\n",
 		   atomic_read(&fscache_n_store_ops),
 		   atomic_read(&fscache_n_store_calls),
 		   atomic_read(&fscache_n_store_pages),
-		   atomic_read(&fscache_n_store_radix_deletes),
+		   atomic_read(&fscache_n_store_xarray_deletes),
 		   atomic_read(&fscache_n_store_pages_over_limit));
 
 	seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 6a2f631a913f..74ea31368c09 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -22,7 +22,7 @@
 #include <linux/list.h>
 #include <linux/pagemap.h>
 #include <linux/pagevec.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
 
 #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
 #define fscache_available() (1)
@@ -175,9 +175,9 @@ struct fscache_cookie {
 	const struct fscache_cookie_def	*def;		/* definition */
 	struct fscache_cookie		*parent;	/* parent of this entry */
 	void				*netfs_data;	/* back pointer to netfs */
-	struct radix_tree_root		stores;		/* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG	0		/* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG	1		/* pages tag: writing to cache */
+	struct xarray			stores;		/* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING_TAG	XA_TAG_0	/* pages tag: pending write to cache */
+#define FSCACHE_COOKIE_STORING_TAG	XA_TAG_1	/* pages tag: writing to cache */
 
 	unsigned long			flags;
 #define FSCACHE_COOKIE_LOOKING_UP	0	/* T if non-index cookie being looked up still */
-- 
2.15.1

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: linux-kernel@vger.kernel.org
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	David Howells <dhowells@redhat.com>, Shaohua Li <shli@kernel.org>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-raid@vger.kernel.org
Subject: [PATCH v5 78/78] fscache: Convert to XArray
Date: Fri, 15 Dec 2017 14:04:50 -0800	[thread overview]
Message-ID: <20171215220450.7899-79-willy@infradead.org> (raw)
In-Reply-To: <20171215220450.7899-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

Removes another user of radix_tree_preload().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 fs/fscache/cookie.c     |   6 +-
 fs/fscache/internal.h   |   2 +-
 fs/fscache/object.c     |   2 +-
 fs/fscache/page.c       | 152 +++++++++++++++++++++---------------------------
 fs/fscache/stats.c      |   6 +-
 include/linux/fscache.h |   8 +--
 6 files changed, 76 insertions(+), 100 deletions(-)

diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index e9054e0c1a49..6d45134d609e 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -109,9 +109,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
 	cookie->netfs_data	= netfs_data;
 	cookie->flags		= (1 << FSCACHE_COOKIE_NO_DATA_YET);
 
-	/* radix tree insertion won't use the preallocation pool unless it's
-	 * told it may not wait */
-	INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+	xa_init(&cookie->stores);
 
 	switch (cookie->def->type) {
 	case FSCACHE_COOKIE_TYPE_INDEX:
@@ -608,7 +606,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
 	/* Clear pointers back to the netfs */
 	cookie->netfs_data	= NULL;
 	cookie->def		= NULL;
-	BUG_ON(!radix_tree_empty(&cookie->stores));
+	BUG_ON(!xa_empty(&cookie->stores));
 
 	if (cookie->parent) {
 		ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 0ff4b49a0037..468d9bd7f8c3 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -200,7 +200,7 @@ extern atomic_t fscache_n_stores_oom;
 extern atomic_t fscache_n_store_ops;
 extern atomic_t fscache_n_store_calls;
 extern atomic_t fscache_n_store_pages;
-extern atomic_t fscache_n_store_radix_deletes;
+extern atomic_t fscache_n_store_xarray_deletes;
 extern atomic_t fscache_n_store_pages_over_limit;
 
 extern atomic_t fscache_n_store_vmscan_not_storing;
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index aa0e71f02c33..ed165736a358 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -956,7 +956,7 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
 	 * retire the object instead.
 	 */
 	if (!fscache_use_cookie(object)) {
-		ASSERT(radix_tree_empty(&object->cookie->stores));
+		ASSERT(xa_empty(&object->cookie->stores));
 		set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
 		_leave(" [no cookie]");
 		return transit_to(KILL_OBJECT);
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 961029e04027..315e2745f822 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -22,13 +22,7 @@
  */
 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
 {
-	void *val;
-
-	rcu_read_lock();
-	val = radix_tree_lookup(&cookie->stores, page->index);
-	rcu_read_unlock();
-
-	return val != NULL;
+	return xa_load(&cookie->stores, page->index) != NULL;
 }
 EXPORT_SYMBOL(__fscache_check_page_write);
 
@@ -64,15 +58,15 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
 				  struct page *page,
 				  gfp_t gfp)
 {
+	XA_STATE(xas, &cookie->stores, page->index);
 	struct page *xpage;
-	void *val;
 
 	_enter("%p,%p,%x", cookie, page, gfp);
 
 try_again:
 	rcu_read_lock();
-	val = radix_tree_lookup(&cookie->stores, page->index);
-	if (!val) {
+	xpage = xas_load(&xas);
+	if (!xpage) {
 		rcu_read_unlock();
 		fscache_stat(&fscache_n_store_vmscan_not_storing);
 		__fscache_uncache_page(cookie, page);
@@ -81,31 +75,32 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
 
 	/* see if the page is actually undergoing storage - if so we can't get
 	 * rid of it till the cache has finished with it */
-	if (radix_tree_tag_get(&cookie->stores, page->index,
-			       FSCACHE_COOKIE_STORING_TAG)) {
+	if (xas_get_tag(&xas, FSCACHE_COOKIE_STORING_TAG)) {
 		rcu_read_unlock();
+		xas_retry(&xas, XA_RETRY_ENTRY);
 		goto page_busy;
 	}
 
 	/* the page is pending storage, so we attempt to cancel the store and
 	 * discard the store request so that the page can be reclaimed */
-	spin_lock(&cookie->stores_lock);
+	xas_retry(&xas, XA_RETRY_ENTRY);
+	xas_lock(&xas);
 	rcu_read_unlock();
 
-	if (radix_tree_tag_get(&cookie->stores, page->index,
-			       FSCACHE_COOKIE_STORING_TAG)) {
+	xpage = xas_load(&xas);
+	if (xas_get_tag(&xas, FSCACHE_COOKIE_STORING_TAG)) {
 		/* the page started to undergo storage whilst we were looking,
 		 * so now we can only wait or return */
 		spin_unlock(&cookie->stores_lock);
 		goto page_busy;
 	}
 
-	xpage = radix_tree_delete(&cookie->stores, page->index);
+	xas_store(&xas, NULL);
 	spin_unlock(&cookie->stores_lock);
 
 	if (xpage) {
 		fscache_stat(&fscache_n_store_vmscan_cancelled);
-		fscache_stat(&fscache_n_store_radix_deletes);
+		fscache_stat(&fscache_n_store_xarray_deletes);
 		ASSERTCMP(xpage, ==, page);
 	} else {
 		fscache_stat(&fscache_n_store_vmscan_gone);
@@ -149,17 +144,19 @@ static void fscache_end_page_write(struct fscache_object *object,
 	spin_lock(&object->lock);
 	cookie = object->cookie;
 	if (cookie) {
+		XA_STATE(xas, &cookie->stores, page->index);
 		/* delete the page from the tree if it is now no longer
 		 * pending */
-		spin_lock(&cookie->stores_lock);
-		radix_tree_tag_clear(&cookie->stores, page->index,
-				     FSCACHE_COOKIE_STORING_TAG);
-		if (!radix_tree_tag_get(&cookie->stores, page->index,
-					FSCACHE_COOKIE_PENDING_TAG)) {
-			fscache_stat(&fscache_n_store_radix_deletes);
-			xpage = radix_tree_delete(&cookie->stores, page->index);
+		xas_lock(&xas);
+		xpage = xas_load(&xas);
+		xas_clear_tag(&xas, FSCACHE_COOKIE_STORING_TAG);
+		if (xas_get_tag(&xas, FSCACHE_COOKIE_PENDING_TAG)) {
+			xpage = NULL;
+		} else {
+			fscache_stat(&fscache_n_store_xarray_deletes);
+			xas_store(&xas, NULL);
 		}
-		spin_unlock(&cookie->stores_lock);
+		xas_unlock(&xas);
 		wake_up_bit(&cookie->flags, 0);
 	}
 	spin_unlock(&object->lock);
@@ -765,13 +762,12 @@ static void fscache_release_write_op(struct fscache_operation *_op)
  */
 static void fscache_write_op(struct fscache_operation *_op)
 {
+	XA_STATE(xas, NULL, 0);
 	struct fscache_storage *op =
 		container_of(_op, struct fscache_storage, op);
 	struct fscache_object *object = op->op.object;
 	struct fscache_cookie *cookie;
 	struct page *page;
-	unsigned n;
-	void *results[1];
 	int ret;
 
 	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
@@ -804,29 +800,25 @@ static void fscache_write_op(struct fscache_operation *_op)
 		return;
 	}
 
-	spin_lock(&cookie->stores_lock);
+	xas.xa = &cookie->stores;
+	xas_lock(&xas);
 
 	fscache_stat(&fscache_n_store_calls);
 
 	/* find a page to store */
-	page = NULL;
-	n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
-				       FSCACHE_COOKIE_PENDING_TAG);
-	if (n != 1)
+	page = xas_find_tag(&xas, ULONG_MAX, FSCACHE_COOKIE_PENDING_TAG);
+	if (!page)
 		goto superseded;
-	page = results[0];
-	_debug("gang %d [%lx]", n, page->index);
+	_debug("found %lx", page->index);
 	if (page->index >= op->store_limit) {
 		fscache_stat(&fscache_n_store_pages_over_limit);
 		goto superseded;
 	}
 
-	radix_tree_tag_set(&cookie->stores, page->index,
-			   FSCACHE_COOKIE_STORING_TAG);
-	radix_tree_tag_clear(&cookie->stores, page->index,
-			     FSCACHE_COOKIE_PENDING_TAG);
+	xas_set_tag(&xas, FSCACHE_COOKIE_STORING_TAG);
+	xas_clear_tag(&xas, FSCACHE_COOKIE_PENDING_TAG);
+	xas_unlock(&xas);
 
-	spin_unlock(&cookie->stores_lock);
 	spin_unlock(&object->lock);
 
 	fscache_stat(&fscache_n_store_pages);
@@ -848,7 +840,7 @@ static void fscache_write_op(struct fscache_operation *_op)
 	/* this writer is going away and there aren't any more things to
 	 * write */
 	_debug("cease");
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
 	spin_unlock(&object->lock);
 	fscache_op_complete(&op->op, true);
@@ -860,32 +852,25 @@ static void fscache_write_op(struct fscache_operation *_op)
  */
 void fscache_invalidate_writes(struct fscache_cookie *cookie)
 {
+	XA_STATE(xas, &cookie->stores, 0);
+	unsigned int cleared = 0;
 	struct page *page;
-	void *results[16];
-	int n, i;
 
 	_enter("");
 
-	for (;;) {
-		spin_lock(&cookie->stores_lock);
-		n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
-					       ARRAY_SIZE(results),
-					       FSCACHE_COOKIE_PENDING_TAG);
-		if (n == 0) {
-			spin_unlock(&cookie->stores_lock);
-			break;
-		}
-
-		for (i = n - 1; i >= 0; i--) {
-			page = results[i];
-			radix_tree_delete(&cookie->stores, page->index);
-		}
+	xas_lock(&xas);
+	xas_for_each_tag(&xas, page, ULONG_MAX, FSCACHE_COOKIE_PENDING_TAG) {
+		xas_store(&xas, NULL);
+		put_page(page);
+		if (++cleared % XA_CHECK_SCHED)
+			continue;
 
-		spin_unlock(&cookie->stores_lock);
-
-		for (i = n - 1; i >= 0; i--)
-			put_page(results[i]);
+		xas_pause(&xas);
+		xas_unlock(&xas);
+		cond_resched();
+		xas_lock(&xas);
 	}
+	xas_unlock(&xas);
 
 	wake_up_bit(&cookie->flags, 0);
 
@@ -925,9 +910,11 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 			 struct page *page,
 			 gfp_t gfp)
 {
+	XA_STATE(xas, &cookie->stores, page->index);
 	struct fscache_storage *op;
 	struct fscache_object *object;
 	bool wake_cookie = false;
+	struct page *xpage;
 	int ret;
 
 	_enter("%p,%x,", cookie, (u32) page->flags);
@@ -952,10 +939,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 		(1 << FSCACHE_OP_WAITING) |
 		(1 << FSCACHE_OP_UNUSE_COOKIE);
 
-	ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
-	if (ret < 0)
-		goto nomem_free;
-
+retry:
 	ret = -ENOBUFS;
 	spin_lock(&cookie->lock);
 
@@ -967,23 +951,19 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
 		goto nobufs;
 
-	/* add the page to the pending-storage radix tree on the backing
-	 * object */
+	/* add the page to the pending-storage xarray on the backing object */
 	spin_lock(&object->lock);
-	spin_lock(&cookie->stores_lock);
+	xas_lock(&xas);
 
 	_debug("store limit %llx", (unsigned long long) object->store_limit);
 
-	ret = radix_tree_insert(&cookie->stores, page->index, page);
-	if (ret < 0) {
-		if (ret == -EEXIST)
-			goto already_queued;
-		_debug("insert failed %d", ret);
+	xpage = xas_create(&xas);
+	if (xpage)
+		goto already_queued;
+	if (xas_error(&xas))
 		goto nobufs_unlock_obj;
-	}
-
-	radix_tree_tag_set(&cookie->stores, page->index,
-			   FSCACHE_COOKIE_PENDING_TAG);
+	xas_store(&xas, page);
+	xas_set_tag(&xas, FSCACHE_COOKIE_PENDING_TAG);
 	get_page(page);
 
 	/* we only want one writer at a time, but we do need to queue new
@@ -991,7 +971,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
 		goto already_pending;
 
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
 
 	op->op.debug_id	= atomic_inc_return(&fscache_op_debug_id);
@@ -1002,7 +982,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 		goto submit_failed;
 
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
 	fscache_stat(&fscache_n_store_ops);
 	fscache_stat(&fscache_n_stores_ok);
 
@@ -1014,30 +993,31 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 already_queued:
 	fscache_stat(&fscache_n_stores_again);
 already_pending:
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
 	fscache_put_operation(&op->op);
 	fscache_stat(&fscache_n_stores_ok);
 	_leave(" = 0");
 	return 0;
 
 submit_failed:
-	spin_lock(&cookie->stores_lock);
-	radix_tree_delete(&cookie->stores, page->index);
-	spin_unlock(&cookie->stores_lock);
+	xa_erase(&cookie->stores, page->index);
 	wake_cookie = __fscache_unuse_cookie(cookie);
 	put_page(page);
 	ret = -ENOBUFS;
 	goto nobufs;
 
 nobufs_unlock_obj:
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
+	spin_unlock(&cookie->lock);
+	if (xas_nomem(&xas, gfp))
+		goto retry;
+	goto nobufs2;
 nobufs:
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
+nobufs2:
 	fscache_put_operation(&op->op);
 	if (wake_cookie)
 		__fscache_wake_unused_cookie(cookie);
@@ -1045,8 +1025,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	_leave(" = -ENOBUFS");
 	return -ENOBUFS;
 
-nomem_free:
-	fscache_put_operation(&op->op);
 nomem:
 	fscache_stat(&fscache_n_stores_oom);
 	_leave(" = -ENOMEM");
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 7ac6e839b065..9c012b4229cd 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -63,7 +63,7 @@ atomic_t fscache_n_stores_oom;
 atomic_t fscache_n_store_ops;
 atomic_t fscache_n_store_calls;
 atomic_t fscache_n_store_pages;
-atomic_t fscache_n_store_radix_deletes;
+atomic_t fscache_n_store_xarray_deletes;
 atomic_t fscache_n_store_pages_over_limit;
 
 atomic_t fscache_n_store_vmscan_not_storing;
@@ -232,11 +232,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
 		   atomic_read(&fscache_n_stores_again),
 		   atomic_read(&fscache_n_stores_nobufs),
 		   atomic_read(&fscache_n_stores_oom));
-	seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
+	seq_printf(m, "Stores : ops=%u run=%u pgs=%u xar=%u olm=%u\n",
 		   atomic_read(&fscache_n_store_ops),
 		   atomic_read(&fscache_n_store_calls),
 		   atomic_read(&fscache_n_store_pages),
-		   atomic_read(&fscache_n_store_radix_deletes),
+		   atomic_read(&fscache_n_store_xarray_deletes),
 		   atomic_read(&fscache_n_store_pages_over_limit));
 
 	seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 6a2f631a913f..74ea31368c09 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -22,7 +22,7 @@
 #include <linux/list.h>
 #include <linux/pagemap.h>
 #include <linux/pagevec.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
 
 #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
 #define fscache_available() (1)
@@ -175,9 +175,9 @@ struct fscache_cookie {
 	const struct fscache_cookie_def	*def;		/* definition */
 	struct fscache_cookie		*parent;	/* parent of this entry */
 	void				*netfs_data;	/* back pointer to netfs */
-	struct radix_tree_root		stores;		/* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG	0		/* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG	1		/* pages tag: writing to cache */
+	struct xarray			stores;		/* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING_TAG	XA_TAG_0	/* pages tag: pending write to cache */
+#define FSCACHE_COOKIE_STORING_TAG	XA_TAG_1	/* pages tag: writing to cache */
 
 	unsigned long			flags;
 #define FSCACHE_COOKIE_LOOKING_UP	0	/* T if non-index cookie being looked up still */
-- 
2.15.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: linux-kernel@vger.kernel.org
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	David Howells <dhowells@redhat.com>, Shaohua Li <shli@kernel.org>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-raid@vger.kernel.org
Subject: [v5,78/78] fscache: Convert to XArray
Date: Fri, 15 Dec 2017 14:04:50 -0800	[thread overview]
Message-ID: <20171215220450.7899-79-willy@infradead.org> (raw)

From: Matthew Wilcox <mawilcox@microsoft.com>

Removes another user of radix_tree_preload().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 fs/fscache/cookie.c     |   6 +-
 fs/fscache/internal.h   |   2 +-
 fs/fscache/object.c     |   2 +-
 fs/fscache/page.c       | 152 +++++++++++++++++++++---------------------------
 fs/fscache/stats.c      |   6 +-
 include/linux/fscache.h |   8 +--
 6 files changed, 76 insertions(+), 100 deletions(-)

diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index e9054e0c1a49..6d45134d609e 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -109,9 +109,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
 	cookie->netfs_data	= netfs_data;
 	cookie->flags		= (1 << FSCACHE_COOKIE_NO_DATA_YET);
 
-	/* radix tree insertion won't use the preallocation pool unless it's
-	 * told it may not wait */
-	INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+	xa_init(&cookie->stores);
 
 	switch (cookie->def->type) {
 	case FSCACHE_COOKIE_TYPE_INDEX:
@@ -608,7 +606,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
 	/* Clear pointers back to the netfs */
 	cookie->netfs_data	= NULL;
 	cookie->def		= NULL;
-	BUG_ON(!radix_tree_empty(&cookie->stores));
+	BUG_ON(!xa_empty(&cookie->stores));
 
 	if (cookie->parent) {
 		ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index 0ff4b49a0037..468d9bd7f8c3 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -200,7 +200,7 @@ extern atomic_t fscache_n_stores_oom;
 extern atomic_t fscache_n_store_ops;
 extern atomic_t fscache_n_store_calls;
 extern atomic_t fscache_n_store_pages;
-extern atomic_t fscache_n_store_radix_deletes;
+extern atomic_t fscache_n_store_xarray_deletes;
 extern atomic_t fscache_n_store_pages_over_limit;
 
 extern atomic_t fscache_n_store_vmscan_not_storing;
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index aa0e71f02c33..ed165736a358 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -956,7 +956,7 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
 	 * retire the object instead.
 	 */
 	if (!fscache_use_cookie(object)) {
-		ASSERT(radix_tree_empty(&object->cookie->stores));
+		ASSERT(xa_empty(&object->cookie->stores));
 		set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
 		_leave(" [no cookie]");
 		return transit_to(KILL_OBJECT);
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 961029e04027..315e2745f822 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -22,13 +22,7 @@
  */
 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
 {
-	void *val;
-
-	rcu_read_lock();
-	val = radix_tree_lookup(&cookie->stores, page->index);
-	rcu_read_unlock();
-
-	return val != NULL;
+	return xa_load(&cookie->stores, page->index) != NULL;
 }
 EXPORT_SYMBOL(__fscache_check_page_write);
 
@@ -64,15 +58,15 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
 				  struct page *page,
 				  gfp_t gfp)
 {
+	XA_STATE(xas, &cookie->stores, page->index);
 	struct page *xpage;
-	void *val;
 
 	_enter("%p,%p,%x", cookie, page, gfp);
 
 try_again:
 	rcu_read_lock();
-	val = radix_tree_lookup(&cookie->stores, page->index);
-	if (!val) {
+	xpage = xas_load(&xas);
+	if (!xpage) {
 		rcu_read_unlock();
 		fscache_stat(&fscache_n_store_vmscan_not_storing);
 		__fscache_uncache_page(cookie, page);
@@ -81,31 +75,32 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
 
 	/* see if the page is actually undergoing storage - if so we can't get
 	 * rid of it till the cache has finished with it */
-	if (radix_tree_tag_get(&cookie->stores, page->index,
-			       FSCACHE_COOKIE_STORING_TAG)) {
+	if (xas_get_tag(&xas, FSCACHE_COOKIE_STORING_TAG)) {
 		rcu_read_unlock();
+		xas_retry(&xas, XA_RETRY_ENTRY);
 		goto page_busy;
 	}
 
 	/* the page is pending storage, so we attempt to cancel the store and
 	 * discard the store request so that the page can be reclaimed */
-	spin_lock(&cookie->stores_lock);
+	xas_retry(&xas, XA_RETRY_ENTRY);
+	xas_lock(&xas);
 	rcu_read_unlock();
 
-	if (radix_tree_tag_get(&cookie->stores, page->index,
-			       FSCACHE_COOKIE_STORING_TAG)) {
+	xpage = xas_load(&xas);
+	if (xas_get_tag(&xas, FSCACHE_COOKIE_STORING_TAG)) {
 		/* the page started to undergo storage whilst we were looking,
 		 * so now we can only wait or return */
 		spin_unlock(&cookie->stores_lock);
 		goto page_busy;
 	}
 
-	xpage = radix_tree_delete(&cookie->stores, page->index);
+	xas_store(&xas, NULL);
 	spin_unlock(&cookie->stores_lock);
 
 	if (xpage) {
 		fscache_stat(&fscache_n_store_vmscan_cancelled);
-		fscache_stat(&fscache_n_store_radix_deletes);
+		fscache_stat(&fscache_n_store_xarray_deletes);
 		ASSERTCMP(xpage, ==, page);
 	} else {
 		fscache_stat(&fscache_n_store_vmscan_gone);
@@ -149,17 +144,19 @@ static void fscache_end_page_write(struct fscache_object *object,
 	spin_lock(&object->lock);
 	cookie = object->cookie;
 	if (cookie) {
+		XA_STATE(xas, &cookie->stores, page->index);
 		/* delete the page from the tree if it is now no longer
 		 * pending */
-		spin_lock(&cookie->stores_lock);
-		radix_tree_tag_clear(&cookie->stores, page->index,
-				     FSCACHE_COOKIE_STORING_TAG);
-		if (!radix_tree_tag_get(&cookie->stores, page->index,
-					FSCACHE_COOKIE_PENDING_TAG)) {
-			fscache_stat(&fscache_n_store_radix_deletes);
-			xpage = radix_tree_delete(&cookie->stores, page->index);
+		xas_lock(&xas);
+		xpage = xas_load(&xas);
+		xas_clear_tag(&xas, FSCACHE_COOKIE_STORING_TAG);
+		if (xas_get_tag(&xas, FSCACHE_COOKIE_PENDING_TAG)) {
+			xpage = NULL;
+		} else {
+			fscache_stat(&fscache_n_store_xarray_deletes);
+			xas_store(&xas, NULL);
 		}
-		spin_unlock(&cookie->stores_lock);
+		xas_unlock(&xas);
 		wake_up_bit(&cookie->flags, 0);
 	}
 	spin_unlock(&object->lock);
@@ -765,13 +762,12 @@ static void fscache_release_write_op(struct fscache_operation *_op)
  */
 static void fscache_write_op(struct fscache_operation *_op)
 {
+	XA_STATE(xas, NULL, 0);
 	struct fscache_storage *op =
 		container_of(_op, struct fscache_storage, op);
 	struct fscache_object *object = op->op.object;
 	struct fscache_cookie *cookie;
 	struct page *page;
-	unsigned n;
-	void *results[1];
 	int ret;
 
 	_enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
@@ -804,29 +800,25 @@ static void fscache_write_op(struct fscache_operation *_op)
 		return;
 	}
 
-	spin_lock(&cookie->stores_lock);
+	xas.xa = &cookie->stores;
+	xas_lock(&xas);
 
 	fscache_stat(&fscache_n_store_calls);
 
 	/* find a page to store */
-	page = NULL;
-	n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
-				       FSCACHE_COOKIE_PENDING_TAG);
-	if (n != 1)
+	page = xas_find_tag(&xas, ULONG_MAX, FSCACHE_COOKIE_PENDING_TAG);
+	if (!page)
 		goto superseded;
-	page = results[0];
-	_debug("gang %d [%lx]", n, page->index);
+	_debug("found %lx", page->index);
 	if (page->index >= op->store_limit) {
 		fscache_stat(&fscache_n_store_pages_over_limit);
 		goto superseded;
 	}
 
-	radix_tree_tag_set(&cookie->stores, page->index,
-			   FSCACHE_COOKIE_STORING_TAG);
-	radix_tree_tag_clear(&cookie->stores, page->index,
-			     FSCACHE_COOKIE_PENDING_TAG);
+	xas_set_tag(&xas, FSCACHE_COOKIE_STORING_TAG);
+	xas_clear_tag(&xas, FSCACHE_COOKIE_PENDING_TAG);
+	xas_unlock(&xas);
 
-	spin_unlock(&cookie->stores_lock);
 	spin_unlock(&object->lock);
 
 	fscache_stat(&fscache_n_store_pages);
@@ -848,7 +840,7 @@ static void fscache_write_op(struct fscache_operation *_op)
 	/* this writer is going away and there aren't any more things to
 	 * write */
 	_debug("cease");
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
 	spin_unlock(&object->lock);
 	fscache_op_complete(&op->op, true);
@@ -860,32 +852,25 @@ static void fscache_write_op(struct fscache_operation *_op)
  */
 void fscache_invalidate_writes(struct fscache_cookie *cookie)
 {
+	XA_STATE(xas, &cookie->stores, 0);
+	unsigned int cleared = 0;
 	struct page *page;
-	void *results[16];
-	int n, i;
 
 	_enter("");
 
-	for (;;) {
-		spin_lock(&cookie->stores_lock);
-		n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
-					       ARRAY_SIZE(results),
-					       FSCACHE_COOKIE_PENDING_TAG);
-		if (n == 0) {
-			spin_unlock(&cookie->stores_lock);
-			break;
-		}
-
-		for (i = n - 1; i >= 0; i--) {
-			page = results[i];
-			radix_tree_delete(&cookie->stores, page->index);
-		}
+	xas_lock(&xas);
+	xas_for_each_tag(&xas, page, ULONG_MAX, FSCACHE_COOKIE_PENDING_TAG) {
+		xas_store(&xas, NULL);
+		put_page(page);
+		if (++cleared % XA_CHECK_SCHED)
+			continue;
 
-		spin_unlock(&cookie->stores_lock);
-
-		for (i = n - 1; i >= 0; i--)
-			put_page(results[i]);
+		xas_pause(&xas);
+		xas_unlock(&xas);
+		cond_resched();
+		xas_lock(&xas);
 	}
+	xas_unlock(&xas);
 
 	wake_up_bit(&cookie->flags, 0);
 
@@ -925,9 +910,11 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 			 struct page *page,
 			 gfp_t gfp)
 {
+	XA_STATE(xas, &cookie->stores, page->index);
 	struct fscache_storage *op;
 	struct fscache_object *object;
 	bool wake_cookie = false;
+	struct page *xpage;
 	int ret;
 
 	_enter("%p,%x,", cookie, (u32) page->flags);
@@ -952,10 +939,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 		(1 << FSCACHE_OP_WAITING) |
 		(1 << FSCACHE_OP_UNUSE_COOKIE);
 
-	ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
-	if (ret < 0)
-		goto nomem_free;
-
+retry:
 	ret = -ENOBUFS;
 	spin_lock(&cookie->lock);
 
@@ -967,23 +951,19 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
 		goto nobufs;
 
-	/* add the page to the pending-storage radix tree on the backing
-	 * object */
+	/* add the page to the pending-storage xarray on the backing object */
 	spin_lock(&object->lock);
-	spin_lock(&cookie->stores_lock);
+	xas_lock(&xas);
 
 	_debug("store limit %llx", (unsigned long long) object->store_limit);
 
-	ret = radix_tree_insert(&cookie->stores, page->index, page);
-	if (ret < 0) {
-		if (ret == -EEXIST)
-			goto already_queued;
-		_debug("insert failed %d", ret);
+	xpage = xas_create(&xas);
+	if (xpage)
+		goto already_queued;
+	if (xas_error(&xas))
 		goto nobufs_unlock_obj;
-	}
-
-	radix_tree_tag_set(&cookie->stores, page->index,
-			   FSCACHE_COOKIE_PENDING_TAG);
+	xas_store(&xas, page);
+	xas_set_tag(&xas, FSCACHE_COOKIE_PENDING_TAG);
 	get_page(page);
 
 	/* we only want one writer at a time, but we do need to queue new
@@ -991,7 +971,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
 		goto already_pending;
 
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
 
 	op->op.debug_id	= atomic_inc_return(&fscache_op_debug_id);
@@ -1002,7 +982,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 		goto submit_failed;
 
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
 	fscache_stat(&fscache_n_store_ops);
 	fscache_stat(&fscache_n_stores_ok);
 
@@ -1014,30 +993,31 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 already_queued:
 	fscache_stat(&fscache_n_stores_again);
 already_pending:
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
 	fscache_put_operation(&op->op);
 	fscache_stat(&fscache_n_stores_ok);
 	_leave(" = 0");
 	return 0;
 
 submit_failed:
-	spin_lock(&cookie->stores_lock);
-	radix_tree_delete(&cookie->stores, page->index);
-	spin_unlock(&cookie->stores_lock);
+	xa_erase(&cookie->stores, page->index);
 	wake_cookie = __fscache_unuse_cookie(cookie);
 	put_page(page);
 	ret = -ENOBUFS;
 	goto nobufs;
 
 nobufs_unlock_obj:
-	spin_unlock(&cookie->stores_lock);
+	xas_unlock(&xas);
 	spin_unlock(&object->lock);
+	spin_unlock(&cookie->lock);
+	if (xas_nomem(&xas, gfp))
+		goto retry;
+	goto nobufs2;
 nobufs:
 	spin_unlock(&cookie->lock);
-	radix_tree_preload_end();
+nobufs2:
 	fscache_put_operation(&op->op);
 	if (wake_cookie)
 		__fscache_wake_unused_cookie(cookie);
@@ -1045,8 +1025,6 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 	_leave(" = -ENOBUFS");
 	return -ENOBUFS;
 
-nomem_free:
-	fscache_put_operation(&op->op);
 nomem:
 	fscache_stat(&fscache_n_stores_oom);
 	_leave(" = -ENOMEM");
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 7ac6e839b065..9c012b4229cd 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -63,7 +63,7 @@ atomic_t fscache_n_stores_oom;
 atomic_t fscache_n_store_ops;
 atomic_t fscache_n_store_calls;
 atomic_t fscache_n_store_pages;
-atomic_t fscache_n_store_radix_deletes;
+atomic_t fscache_n_store_xarray_deletes;
 atomic_t fscache_n_store_pages_over_limit;
 
 atomic_t fscache_n_store_vmscan_not_storing;
@@ -232,11 +232,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
 		   atomic_read(&fscache_n_stores_again),
 		   atomic_read(&fscache_n_stores_nobufs),
 		   atomic_read(&fscache_n_stores_oom));
-	seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
+	seq_printf(m, "Stores : ops=%u run=%u pgs=%u xar=%u olm=%u\n",
 		   atomic_read(&fscache_n_store_ops),
 		   atomic_read(&fscache_n_store_calls),
 		   atomic_read(&fscache_n_store_pages),
-		   atomic_read(&fscache_n_store_radix_deletes),
+		   atomic_read(&fscache_n_store_xarray_deletes),
 		   atomic_read(&fscache_n_store_pages_over_limit));
 
 	seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 6a2f631a913f..74ea31368c09 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -22,7 +22,7 @@
 #include <linux/list.h>
 #include <linux/pagemap.h>
 #include <linux/pagevec.h>
-#include <linux/radix-tree.h>
+#include <linux/xarray.h>
 
 #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
 #define fscache_available() (1)
@@ -175,9 +175,9 @@ struct fscache_cookie {
 	const struct fscache_cookie_def	*def;		/* definition */
 	struct fscache_cookie		*parent;	/* parent of this entry */
 	void				*netfs_data;	/* back pointer to netfs */
-	struct radix_tree_root		stores;		/* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG	0		/* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG	1		/* pages tag: writing to cache */
+	struct xarray			stores;		/* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING_TAG	XA_TAG_0	/* pages tag: pending write to cache */
+#define FSCACHE_COOKIE_STORING_TAG	XA_TAG_1	/* pages tag: writing to cache */
 
 	unsigned long			flags;
 #define FSCACHE_COOKIE_LOOKING_UP	0	/* T if non-index cookie being looked up still */

  parent reply	other threads:[~2017-12-15 22:04 UTC|newest]

Thread overview: 348+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-15 22:03 [PATCH v5 00/78] XArray v5 Matthew Wilcox
2017-12-15 22:03 ` Matthew Wilcox
2017-12-15 22:03 ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 01/78] xfs: Rename xa_ elements to ail_ Matthew Wilcox
2017-12-15 22:03   ` [v5,01/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 01/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2018-01-03  1:01   ` Darrick J. Wong
2018-01-03  1:01     ` [v5,01/78] " Darrick J. Wong
2018-01-03  1:01     ` [PATCH v5 01/78] " Darrick J. Wong
2018-01-03  1:01     ` Darrick J. Wong
2017-12-15 22:03 ` [PATCH v5 02/78] fscache: Use appropriate radix tree accessors Matthew Wilcox
2017-12-15 22:03   ` [v5,02/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 02/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 03/78] xarray: Add the xa_lock to the radix_tree_root Matthew Wilcox
2017-12-15 22:03   ` [v5,03/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 03/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-26 16:54   ` Kirill A. Shutemov
2017-12-26 16:54     ` [v5,03/78] " Kirill A. Shutemov
2017-12-26 16:54     ` [PATCH v5 03/78] " Kirill A. Shutemov
2017-12-27  3:43     ` Matthew Wilcox
2017-12-27  3:43       ` [v5,03/78] " Matthew Wilcox
2017-12-27  3:43       ` [PATCH v5 03/78] " Matthew Wilcox
2017-12-27  3:58       ` Matthew Wilcox
2017-12-27  3:58         ` [v5,03/78] " Matthew Wilcox
2017-12-27  3:58         ` [PATCH v5 03/78] " Matthew Wilcox
2017-12-27 10:18         ` Kirill A. Shutemov
2017-12-27 10:18           ` [v5,03/78] " Kirill A. Shutemov
2017-12-27 10:18           ` [PATCH v5 03/78] " Kirill A. Shutemov
2018-01-02 18:01         ` Darrick J. Wong
2018-01-02 18:01           ` [v5,03/78] " Darrick J. Wong
2018-01-02 18:01           ` [PATCH v5 03/78] " Darrick J. Wong
2018-01-02 22:41           ` Matthew Wilcox
2018-01-02 22:41             ` [v5,03/78] " Matthew Wilcox
2018-01-02 22:41             ` [PATCH v5 03/78] " Matthew Wilcox
2018-01-02 22:41             ` Matthew Wilcox
2017-12-27 10:17       ` Kirill A. Shutemov
2017-12-27 10:17         ` [v5,03/78] " Kirill A. Shutemov
2017-12-27 10:17         ` [PATCH v5 03/78] " Kirill A. Shutemov
2017-12-15 22:03 ` [PATCH v5 04/78] page cache: Use xa_lock Matthew Wilcox
2017-12-15 22:03   ` [v5,04/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 04/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-26 16:56   ` Kirill A. Shutemov
2017-12-26 16:56     ` [v5,04/78] " Kirill A. Shutemov
2017-12-26 16:56     ` [PATCH v5 04/78] " Kirill A. Shutemov
2017-12-15 22:03 ` [PATCH v5 05/78] xarray: Replace exceptional entries Matthew Wilcox
2017-12-15 22:03   ` [v5,05/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 05/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-26 17:15   ` Kirill A. Shutemov
2017-12-26 17:15     ` [v5,05/78] " Kirill A. Shutemov
2017-12-26 17:15     ` [PATCH v5 05/78] " Kirill A. Shutemov
     [not found]     ` <20171226171542.v25xieedd46y5peu-sVvlyX1904swdBt8bTSxpkEMvNT87kid@public.gmane.org>
2017-12-27  3:05       ` Matthew Wilcox
2017-12-27  3:05         ` [v5,05/78] " Matthew Wilcox
2017-12-27  3:05         ` [PATCH v5 05/78] " Matthew Wilcox
2017-12-27  3:05         ` Matthew Wilcox
2017-12-27 10:24         ` Kirill A. Shutemov
2017-12-27 10:24           ` [v5,05/78] " Kirill A. Shutemov
2017-12-27 10:24           ` [PATCH v5 05/78] " Kirill A. Shutemov
2017-12-15 22:03 ` [PATCH v5 06/78] xarray: Change definition of sibling entries Matthew Wilcox
2017-12-15 22:03   ` [v5,06/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 06/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-26 17:21   ` Kirill A. Shutemov
2017-12-26 17:21     ` [v5,06/78] " Kirill A. Shutemov
2017-12-26 17:21     ` [PATCH v5 06/78] " Kirill A. Shutemov
     [not found]     ` <20171226172153.pylgdefajcrthe3b-sVvlyX1904swdBt8bTSxpkEMvNT87kid@public.gmane.org>
2017-12-27  3:13       ` Matthew Wilcox
2017-12-27  3:13         ` [v5,06/78] " Matthew Wilcox
2017-12-27  3:13         ` [PATCH v5 06/78] " Matthew Wilcox
2017-12-27  3:13         ` Matthew Wilcox
2017-12-27 10:26         ` Kirill A. Shutemov
2017-12-27 10:26           ` [v5,06/78] " Kirill A. Shutemov
2017-12-27 10:26           ` [PATCH v5 06/78] " Kirill A. Shutemov
2017-12-15 22:03 ` [PATCH v5 07/78] xarray: Add definition of struct xarray Matthew Wilcox
2017-12-15 22:03   ` [v5,07/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 07/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 08/78] xarray: Define struct xa_node Matthew Wilcox
2017-12-15 22:03   ` [v5,08/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 08/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 09/78] xarray: Add documentation Matthew Wilcox
2017-12-15 22:03   ` [v5,09/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 09/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 10/78] xarray: Add xa_load Matthew Wilcox
2017-12-15 22:03   ` [v5,10/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 10/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 11/78] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag Matthew Wilcox
2017-12-15 22:03   ` [v5,11/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 11/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 12/78] xarray: Add xa_store Matthew Wilcox
2017-12-15 22:03   ` [v5,12/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 12/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 13/78] xarray: Add xa_cmpxchg Matthew Wilcox
2017-12-15 22:03   ` [v5,13/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 13/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 14/78] xarray: Add xa_for_each Matthew Wilcox
2017-12-15 22:03   ` [v5,14/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 14/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 15/78] xarray: Add xas_for_each_tag Matthew Wilcox
2017-12-15 22:03   ` [v5,15/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 15/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 16/78] xarray: Add xa_get_entries, xa_get_tagged and xa_get_maybe_tag Matthew Wilcox
2017-12-15 22:03   ` [v5,16/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 16/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 17/78] xarray: Add xa_destroy Matthew Wilcox
2017-12-15 22:03   ` [v5,17/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 17/78] " Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 18/78] xarray: Add xas_next and xas_prev Matthew Wilcox
2017-12-15 22:03   ` [v5,18/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 18/78] " Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 19/78] xarray: Add xas_create_range Matthew Wilcox
2017-12-15 22:03   ` [v5,19/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 19/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 20/78] xarray: Add MAINTAINERS entry Matthew Wilcox
2017-12-15 22:03   ` [v5,20/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 20/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 21/78] xarray: Add ability to store errno values Matthew Wilcox
2017-12-15 22:03   ` [v5,21/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 21/78] " Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 22/78] idr: Convert to XArray Matthew Wilcox
2017-12-15 22:03   ` [v5,22/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 22/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 23/78] ida: " Matthew Wilcox
2017-12-15 22:03   ` [v5,23/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 23/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 24/78] page cache: Convert hole search " Matthew Wilcox
2017-12-15 22:03   ` [v5,24/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 24/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 25/78] page cache: Add page_cache_range_empty function Matthew Wilcox
2017-12-15 22:03   ` [v5,25/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 25/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 26/78] page cache: Add and replace pages using the XArray Matthew Wilcox
2017-12-15 22:03   ` [v5,26/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 26/78] " Matthew Wilcox
2017-12-15 22:03 ` [PATCH v5 27/78] page cache: Convert page deletion to XArray Matthew Wilcox
2017-12-15 22:03   ` [v5,27/78] " Matthew Wilcox
2017-12-15 22:03   ` [PATCH v5 27/78] " Matthew Wilcox
2017-12-15 22:03   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 28/78] page cache: Convert page cache lookups " Matthew Wilcox
2017-12-15 22:04   ` [v5,28/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 28/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 29/78] page cache: Convert delete_batch " Matthew Wilcox
2017-12-15 22:04   ` [v5,29/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 29/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 30/78] page cache: Remove stray radix comment Matthew Wilcox
2017-12-15 22:04   ` [v5,30/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 30/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 31/78] mm: Convert page-writeback to XArray Matthew Wilcox
2017-12-15 22:04   ` [v5,31/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 31/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 32/78] mm: Convert workingset " Matthew Wilcox
2017-12-15 22:04   ` [v5,32/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 32/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 33/78] mm: Convert truncate " Matthew Wilcox
2017-12-15 22:04   ` [v5,33/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 33/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 34/78] mm: Convert add_to_swap_cache " Matthew Wilcox
2017-12-15 22:04   ` [v5,34/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 34/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 35/78] mm: Convert delete_from_swap_cache " Matthew Wilcox
2017-12-15 22:04   ` [v5,35/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 35/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 36/78] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2017-12-15 22:04   ` [v5,36/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 36/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 37/78] mm: Convert page migration " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04   ` [v5,37/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 38/78] mm: Convert huge_memory " Matthew Wilcox
2017-12-15 22:04   ` [v5,38/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 38/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 39/78] mm: Convert collapse_shmem " Matthew Wilcox
2017-12-15 22:04   ` [v5,39/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 39/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 40/78] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2017-12-15 22:04   ` [v5,40/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 40/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 41/78] pagevec: Use xa_tag_t Matthew Wilcox
2017-12-15 22:04   ` [v5,41/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 41/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 42/78] shmem: Convert replace to XArray Matthew Wilcox
2017-12-15 22:04   ` [v5,42/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 42/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 43/78] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2017-12-15 22:04   ` [v5,43/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 43/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 44/78] shmem: Convert find_swap_entry " Matthew Wilcox
2017-12-15 22:04   ` [v5,44/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 44/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 45/78] shmem: Convert shmem_tag_pins " Matthew Wilcox
2017-12-15 22:04   ` [v5,45/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 45/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 46/78] shmem: Convert shmem_wait_for_pins " Matthew Wilcox
2017-12-15 22:04   ` [v5,46/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 46/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 47/78] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2017-12-15 22:04   ` [v5,47/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 47/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 48/78] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2017-12-15 22:04   ` [v5,48/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 48/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 49/78] shmem: Convert shmem_free_swap " Matthew Wilcox
2017-12-15 22:04   ` [v5,49/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 49/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 50/78] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2017-12-15 22:04   ` [v5,50/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 50/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 51/78] shmem: Comment fixups Matthew Wilcox
2017-12-15 22:04   ` [v5,51/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 51/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 52/78] btrfs: Convert page cache to XArray Matthew Wilcox
2017-12-15 22:04   ` [v5,52/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 52/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 53/78] fs: Convert buffer " Matthew Wilcox
2017-12-15 22:04   ` [v5,53/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 53/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 54/78] fs: Convert writeback " Matthew Wilcox
2017-12-15 22:04   ` [v5,54/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 54/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 55/78] nilfs2: Convert " Matthew Wilcox
2017-12-15 22:04   ` [v5,55/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 55/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 56/78] f2fs: " Matthew Wilcox
2017-12-15 22:04   ` [v5,56/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 56/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 57/78] lustre: " Matthew Wilcox
2017-12-15 22:04   ` [v5,57/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 57/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 58/78] dax: Convert dax_unlock_mapping_entry " Matthew Wilcox
2017-12-15 22:04   ` [v5,58/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 58/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 59/78] dax: Convert lock_slot " Matthew Wilcox
2017-12-15 22:04   ` [v5,59/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 59/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 60/78] dax: More XArray conversion Matthew Wilcox
2017-12-15 22:04   ` [v5,60/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 60/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 61/78] dax: Convert __dax_invalidate_mapping_entry to XArray Matthew Wilcox
2017-12-15 22:04   ` [v5,61/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 61/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 62/78] dax: Convert dax_writeback_one " Matthew Wilcox
2017-12-15 22:04   ` [v5,62/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 62/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 63/78] dax: Convert dax_insert_pfn_mkwrite " Matthew Wilcox
2017-12-15 22:04   ` [v5,63/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 63/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 64/78] dax: Convert dax_insert_mapping_entry " Matthew Wilcox
2017-12-15 22:04   ` [v5,64/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 64/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 65/78] dax: Convert grab_mapping_entry " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04   ` [v5,65/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 66/78] dax: Fix sparse warning Matthew Wilcox
2017-12-15 22:04   ` [v5,66/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 66/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 67/78] page cache: Finish XArray conversion Matthew Wilcox
2017-12-15 22:04   ` [v5,67/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 67/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 68/78] mm: Convert cgroup writeback to XArray Matthew Wilcox
2017-12-15 22:04   ` [v5,68/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 68/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 69/78] vmalloc: Convert " Matthew Wilcox
2017-12-15 22:04   ` [v5,69/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 69/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 70/78] brd: " Matthew Wilcox
2017-12-15 22:04   ` [v5,70/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 70/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 71/78] xfs: Convert m_perag_tree " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04   ` [v5,71/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 72/78] xfs: Convert pag_ici_root " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04   ` [v5,72/78] " Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 73/78] xfs: Convert xfs dquot " Matthew Wilcox
2017-12-15 22:04   ` [v5,73/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 73/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 74/78] xfs: Convert mru cache " Matthew Wilcox
2017-12-15 22:04   ` [v5,74/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 74/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 75/78] usb: Convert xhci-mem " Matthew Wilcox
2017-12-15 22:04   ` [v5,75/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 75/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 76/78] md: Convert raid5-cache " Matthew Wilcox
2017-12-15 22:04   ` [v5,76/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 76/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-15 22:04 ` [PATCH v5 77/78] irqdomain: Convert " Matthew Wilcox
2017-12-15 22:04   ` [v5,77/78] " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 77/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox
2017-12-16 10:51   ` Marc Zyngier
2017-12-16 10:51     ` [v5,77/78] " Marc Zyngier
2017-12-16 10:51     ` [PATCH v5 77/78] " Marc Zyngier
2017-12-15 22:04 ` Matthew Wilcox [this message]
2017-12-15 22:04   ` [v5,78/78] fscache: " Matthew Wilcox
2017-12-15 22:04   ` [PATCH v5 78/78] " Matthew Wilcox
2017-12-15 22:04   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171215220450.7899-79-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=aquannie@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=dhowells@redhat.com \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nilfs@vger.kernel.org \
    --cc=linux-raid@vger.kernel.org \
    --cc=linux-usb@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=marc.zyngier@arm.com \
    --cc=mawilcox@microsoft.com \
    --cc=ross.zwisler@linux.intel.com \
    --cc=shli@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.