All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org,
	Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>,
	linux-nilfs@vger.kernel.org
Subject: [PATCH v9 28/61] page cache: Convert page cache lookups to XArray
Date: Tue, 13 Mar 2018 06:26:06 -0700	[thread overview]
Message-ID: <20180313132639.17387-29-willy@infradead.org> (raw)
In-Reply-To: <20180313132639.17387-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

Introduce page_cache_pin() to factor out the common logic between the
various lookup routines:

find_get_entry
find_get_entries
find_get_pages_range
find_get_pages_contig
find_get_pages_range_tag
find_get_entries_tag
filemap_map_pages

By using the xa_state to control the iteration, we can remove most of
the gotos and just use the normal break/continue loop control flow.

Also convert the regression1 read-side to XArray since that simulates
the functions being modified here.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/pagemap.h                |   6 +-
 mm/filemap.c                           | 380 +++++++++------------------------
 tools/testing/radix-tree/regression1.c |  68 +++---
 3 files changed, 129 insertions(+), 325 deletions(-)

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 2f5d2d3ebaac..442977811b59 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -363,17 +363,17 @@ static inline unsigned find_get_pages(struct address_space *mapping,
 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
 			       unsigned int nr_pages, struct page **pages);
 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
-			pgoff_t end, int tag, unsigned int nr_pages,
+			pgoff_t end, xa_tag_t tag, unsigned int nr_pages,
 			struct page **pages);
 static inline unsigned find_get_pages_tag(struct address_space *mapping,
-			pgoff_t *index, int tag, unsigned int nr_pages,
+			pgoff_t *index, xa_tag_t tag, unsigned int nr_pages,
 			struct page **pages)
 {
 	return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
 					nr_pages, pages);
 }
 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
-			int tag, unsigned int nr_entries,
+			xa_tag_t tag, unsigned int nr_entries,
 			struct page **entries, pgoff_t *indices);
 
 struct page *grab_cache_page_write_begin(struct address_space *mapping,
diff --git a/mm/filemap.c b/mm/filemap.c
index bdda1beda932..5a6c7c874d45 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1374,6 +1374,32 @@ pgoff_t page_cache_prev_gap(struct address_space *mapping,
 }
 EXPORT_SYMBOL(page_cache_prev_gap);
 
+/*
+ * page_cache_pin() - Try to pin a page in the page cache.
+ * @xas: The XArray operation state.
+ * @pagep: The page which has been previously found at this location.
+ *
+ * On success, the page has an elevated refcount, but is not locked.
+ * This implements the lockless pagecache protocol as described in
+ * include/linux/pagemap.h; see page_cache_get_speculative().
+ *
+ * Return: True if the page is still in the cache.
+ */
+static bool page_cache_pin(struct xa_state *xas, struct page *page)
+{
+	struct page *head = compound_head(page);
+	bool got = page_cache_get_speculative(head);
+
+	if (likely(got && (xas_reload(xas) == page) &&
+						(compound_head(page) == head)))
+		return true;
+
+	if (got)
+		put_page(head);
+	xas_reset(xas);
+	return false;
+}
+
 /**
  * find_get_entry - find and get a page cache entry
  * @mapping: the address_space to search
@@ -1389,51 +1415,21 @@ EXPORT_SYMBOL(page_cache_prev_gap);
  */
 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
 {
-	void **pagep;
-	struct page *head, *page;
+	XA_STATE(xas, &mapping->i_pages, offset);
+	struct page *page;
 
 	rcu_read_lock();
-repeat:
-	page = NULL;
-	pagep = radix_tree_lookup_slot(&mapping->i_pages, offset);
-	if (pagep) {
-		page = radix_tree_deref_slot(pagep);
-		if (unlikely(!page))
-			goto out;
-		if (radix_tree_exception(page)) {
-			if (radix_tree_deref_retry(page))
-				goto repeat;
-			/*
-			 * A shadow entry of a recently evicted page,
-			 * or a swap entry from shmem/tmpfs.  Return
-			 * it without attempting to raise page count.
-			 */
-			goto out;
-		}
-
-		head = compound_head(page);
-		if (!page_cache_get_speculative(head))
-			goto repeat;
-
-		/* The page was split under us? */
-		if (compound_head(page) != head) {
-			put_page(head);
-			goto repeat;
-		}
+	do {
+		page = xas_load(&xas);
+		if (xas_retry(&xas, page))
+			continue;
+		if (!page || xa_is_value(page))
+			break;
+		if (!page_cache_pin(&xas, page))
+			continue;
+	} while (0);
 
-		/*
-		 * Has the page moved?
-		 * This is part of the lockless pagecache protocol. See
-		 * include/linux/pagemap.h for details.
-		 */
-		if (unlikely(page != *pagep)) {
-			put_page(head);
-			goto repeat;
-		}
-	}
-out:
 	rcu_read_unlock();
-
 	return page;
 }
 EXPORT_SYMBOL(find_get_entry);
@@ -1460,7 +1456,7 @@ struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
 
 repeat:
 	page = find_get_entry(mapping, offset);
-	if (page && !radix_tree_exception(page)) {
+	if (page && !xa_is_value(page)) {
 		lock_page(page);
 		/* Has the page been truncated? */
 		if (unlikely(page_mapping(page) != mapping)) {
@@ -1593,50 +1589,21 @@ unsigned find_get_entries(struct address_space *mapping,
 			  pgoff_t start, unsigned int nr_entries,
 			  struct page **entries, pgoff_t *indices)
 {
-	void **slot;
+	XA_STATE(xas, &mapping->i_pages, start);
+	struct page *page;
 	unsigned int ret = 0;
-	struct radix_tree_iter iter;
 
 	if (!nr_entries)
 		return 0;
 
 	rcu_read_lock();
-	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
-		struct page *head, *page;
-repeat:
-		page = radix_tree_deref_slot(slot);
-		if (unlikely(!page))
+	xas_for_each(&xas, page, ULONG_MAX) {
+		if (xas_retry(&xas, page))
+			continue;
+		if (!xa_is_value(page) && !page_cache_pin(&xas, page))
 			continue;
-		if (radix_tree_exception(page)) {
-			if (radix_tree_deref_retry(page)) {
-				slot = radix_tree_iter_retry(&iter);
-				continue;
-			}
-			/*
-			 * A shadow entry of a recently evicted page, a swap
-			 * entry from shmem/tmpfs or a DAX entry.  Return it
-			 * without attempting to raise page count.
-			 */
-			goto export;
-		}
-
-		head = compound_head(page);
-		if (!page_cache_get_speculative(head))
-			goto repeat;
-
-		/* The page was split under us? */
-		if (compound_head(page) != head) {
-			put_page(head);
-			goto repeat;
-		}
 
-		/* Has the page moved? */
-		if (unlikely(page != *slot)) {
-			put_page(head);
-			goto repeat;
-		}
-export:
-		indices[ret] = iter.index;
+		indices[ret] = xas.xa_index;
 		entries[ret] = page;
 		if (++ret == nr_entries)
 			break;
@@ -1670,56 +1637,26 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
 			      pgoff_t end, unsigned int nr_pages,
 			      struct page **pages)
 {
-	struct radix_tree_iter iter;
-	void **slot;
+	XA_STATE(xas, &mapping->i_pages, *start);
+	struct page *page;
 	unsigned ret = 0;
 
 	if (unlikely(!nr_pages))
 		return 0;
 
 	rcu_read_lock();
-	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, *start) {
-		struct page *head, *page;
-
-		if (iter.index > end)
-			break;
-repeat:
-		page = radix_tree_deref_slot(slot);
-		if (unlikely(!page))
+	xas_for_each(&xas, page, end) {
+		if (xas_retry(&xas, page))
 			continue;
-
-		if (radix_tree_exception(page)) {
-			if (radix_tree_deref_retry(page)) {
-				slot = radix_tree_iter_retry(&iter);
-				continue;
-			}
-			/*
-			 * A shadow entry of a recently evicted page,
-			 * or a swap entry from shmem/tmpfs.  Skip
-			 * over it.
-			 */
+		/* Skip over shadow or swap entries */
+		if (xa_is_value(page))
+			continue;
+		if (!page_cache_pin(&xas, page))
 			continue;
-		}
-
-		head = compound_head(page);
-		if (!page_cache_get_speculative(head))
-			goto repeat;
-
-		/* The page was split under us? */
-		if (compound_head(page) != head) {
-			put_page(head);
-			goto repeat;
-		}
-
-		/* Has the page moved? */
-		if (unlikely(page != *slot)) {
-			put_page(head);
-			goto repeat;
-		}
 
 		pages[ret] = page;
 		if (++ret == nr_pages) {
-			*start = pages[ret - 1]->index + 1;
+			*start = page->index + 1;
 			goto out;
 		}
 	}
@@ -1727,7 +1664,7 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
 	/*
 	 * We come here when there is no page beyond @end. We take care to not
 	 * overflow the index @start as it confuses some of the callers. This
-	 * breaks the iteration when there is page at index -1 but that is
+	 * breaks the iteration when there is a page at index -1 but that is
 	 * already broken anyway.
 	 */
 	if (end == (pgoff_t)-1)
@@ -1755,57 +1692,28 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
 			       unsigned int nr_pages, struct page **pages)
 {
-	struct radix_tree_iter iter;
-	void **slot;
+	XA_STATE(xas, &mapping->i_pages, index);
+	struct page *page;
 	unsigned int ret = 0;
 
 	if (unlikely(!nr_pages))
 		return 0;
 
 	rcu_read_lock();
-	radix_tree_for_each_contig(slot, &mapping->i_pages, &iter, index) {
-		struct page *head, *page;
-repeat:
-		page = radix_tree_deref_slot(slot);
-		/* The hole, there no reason to continue */
-		if (unlikely(!page))
-			break;
-
-		if (radix_tree_exception(page)) {
-			if (radix_tree_deref_retry(page)) {
-				slot = radix_tree_iter_retry(&iter);
-				continue;
-			}
-			/*
-			 * A shadow entry of a recently evicted page,
-			 * or a swap entry from shmem/tmpfs.  Stop
-			 * looking for contiguous pages.
-			 */
+	for (page = xas_load(&xas); page; page = xas_next(&xas)) {
+		if (xas_retry(&xas, page))
+			continue;
+		if (xa_is_value(page))
 			break;
-		}
-
-		head = compound_head(page);
-		if (!page_cache_get_speculative(head))
-			goto repeat;
-
-		/* The page was split under us? */
-		if (compound_head(page) != head) {
-			put_page(head);
-			goto repeat;
-		}
-
-		/* Has the page moved? */
-		if (unlikely(page != *slot)) {
-			put_page(head);
-			goto repeat;
-		}
+		if (!page_cache_pin(&xas, page))
+			continue;
 
 		/*
 		 * must check mapping and index after taking the ref.
 		 * otherwise we can get both false positives and false
 		 * negatives, which is just confusing to the caller.
 		 */
-		if (page->mapping == NULL || page_to_pgoff(page) != iter.index) {
+		if (!page->mapping || page_to_pgoff(page) != xas.xa_index) {
 			put_page(page);
 			break;
 		}
@@ -1832,74 +1740,42 @@ EXPORT_SYMBOL(find_get_pages_contig);
  * @tag.   We update @index to index the next page for the traversal.
  */
 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
-			pgoff_t end, int tag, unsigned int nr_pages,
+			pgoff_t end, xa_tag_t tag, unsigned int nr_pages,
 			struct page **pages)
 {
-	struct radix_tree_iter iter;
-	void **slot;
+	XA_STATE(xas, &mapping->i_pages, *index);
+	struct page *page;
 	unsigned ret = 0;
 
 	if (unlikely(!nr_pages))
 		return 0;
 
 	rcu_read_lock();
-	radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, *index, tag) {
-		struct page *head, *page;
-
-		if (iter.index > end)
-			break;
-repeat:
-		page = radix_tree_deref_slot(slot);
-		if (unlikely(!page))
+	xas_for_each_tag(&xas, page, end, tag) {
+		if (xas_retry(&xas, page))
 			continue;
-
-		if (radix_tree_exception(page)) {
-			if (radix_tree_deref_retry(page)) {
-				slot = radix_tree_iter_retry(&iter);
-				continue;
-			}
-			/*
-			 * A shadow entry of a recently evicted page.
-			 *
-			 * Those entries should never be tagged, but
-			 * this tree walk is lockless and the tags are
-			 * looked up in bulk, one radix tree node at a
-			 * time, so there is a sizable window for page
-			 * reclaim to evict a page we saw tagged.
-			 *
-			 * Skip over it.
-			 */
+		/*
+		 * Shadow entries should never be tagged, but this iteration
+		 * is lockless so there is a window for page reclaim to evict
+		 * a page we saw tagged.  Skip over it.
+		 */
+		if (xa_is_value(page))
+			continue;
+		if (!page_cache_pin(&xas, page))
 			continue;
-		}
-
-		head = compound_head(page);
-		if (!page_cache_get_speculative(head))
-			goto repeat;
-
-		/* The page was split under us? */
-		if (compound_head(page) != head) {
-			put_page(head);
-			goto repeat;
-		}
-
-		/* Has the page moved? */
-		if (unlikely(page != *slot)) {
-			put_page(head);
-			goto repeat;
-		}
 
 		pages[ret] = page;
 		if (++ret == nr_pages) {
-			*index = pages[ret - 1]->index + 1;
+			*index = page->index + 1;
 			goto out;
 		}
 	}
 
 	/*
-	 * We come here when we got at @end. We take care to not overflow the
+	 * We come here when we got to @end. We take care to not overflow the
 	 * index @index as it confuses some of the callers. This breaks the
-	 * iteration when there is page at index -1 but that is already broken
-	 * anyway.
+	 * iteration when there is a page at index -1 but that is already
+	 * broken anyway.
 	 */
 	if (end == (pgoff_t)-1)
 		*index = (pgoff_t)-1;
@@ -1925,54 +1801,24 @@ EXPORT_SYMBOL(find_get_pages_range_tag);
  * @tag.
  */
 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
-			int tag, unsigned int nr_entries,
+			xa_tag_t tag, unsigned int nr_entries,
 			struct page **entries, pgoff_t *indices)
 {
-	void **slot;
+	XA_STATE(xas, &mapping->i_pages, start);
+	struct page *page;
 	unsigned int ret = 0;
-	struct radix_tree_iter iter;
 
 	if (!nr_entries)
 		return 0;
 
 	rcu_read_lock();
-	radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start, tag) {
-		struct page *head, *page;
-repeat:
-		page = radix_tree_deref_slot(slot);
-		if (unlikely(!page))
+	xas_for_each_tag(&xas, page, ULONG_MAX, tag) {
+		if (xas_retry(&xas, page))
+			continue;
+		if (!xa_is_value(page) && !page_cache_pin(&xas, page))
 			continue;
-		if (radix_tree_exception(page)) {
-			if (radix_tree_deref_retry(page)) {
-				slot = radix_tree_iter_retry(&iter);
-				continue;
-			}
-
-			/*
-			 * A shadow entry of a recently evicted page, a swap
-			 * entry from shmem/tmpfs or a DAX entry.  Return it
-			 * without attempting to raise page count.
-			 */
-			goto export;
-		}
-
-		head = compound_head(page);
-		if (!page_cache_get_speculative(head))
-			goto repeat;
-
-		/* The page was split under us? */
-		if (compound_head(page) != head) {
-			put_page(head);
-			goto repeat;
-		}
 
-		/* Has the page moved? */
-		if (unlikely(page != *slot)) {
-			put_page(head);
-			goto repeat;
-		}
-export:
-		indices[ret] = iter.index;
+		indices[ret] = xas.xa_index;
 		entries[ret] = page;
 		if (++ret == nr_entries)
 			break;
@@ -2581,45 +2427,21 @@ EXPORT_SYMBOL(filemap_fault);
 void filemap_map_pages(struct vm_fault *vmf,
 		pgoff_t start_pgoff, pgoff_t end_pgoff)
 {
-	struct radix_tree_iter iter;
-	void **slot;
 	struct file *file = vmf->vma->vm_file;
 	struct address_space *mapping = file->f_mapping;
 	pgoff_t last_pgoff = start_pgoff;
 	unsigned long max_idx;
-	struct page *head, *page;
+	XA_STATE(xas, &mapping->i_pages, start_pgoff);
+	struct page *page;
 
 	rcu_read_lock();
-	radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start_pgoff) {
-		if (iter.index > end_pgoff)
-			break;
-repeat:
-		page = radix_tree_deref_slot(slot);
-		if (unlikely(!page))
-			goto next;
-		if (radix_tree_exception(page)) {
-			if (radix_tree_deref_retry(page)) {
-				slot = radix_tree_iter_retry(&iter);
-				continue;
-			}
+	xas_for_each(&xas, page, end_pgoff) {
+		if (xas_retry(&xas, page))
+			continue;
+		if (xa_is_value(page))
 			goto next;
-		}
-
-		head = compound_head(page);
-		if (!page_cache_get_speculative(head))
-			goto repeat;
-
-		/* The page was split under us? */
-		if (compound_head(page) != head) {
-			put_page(head);
-			goto repeat;
-		}
-
-		/* Has the page moved? */
-		if (unlikely(page != *slot)) {
-			put_page(head);
-			goto repeat;
-		}
+		if (!page_cache_pin(&xas, page))
+			continue;
 
 		if (!PageUptodate(page) ||
 				PageReadahead(page) ||
@@ -2638,10 +2460,10 @@ void filemap_map_pages(struct vm_fault *vmf,
 		if (file->f_ra.mmap_miss > 0)
 			file->f_ra.mmap_miss--;
 
-		vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
+		vmf->address += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
 		if (vmf->pte)
-			vmf->pte += iter.index - last_pgoff;
-		last_pgoff = iter.index;
+			vmf->pte += xas.xa_index - last_pgoff;
+		last_pgoff = xas.xa_index;
 		if (alloc_set_pte(vmf, NULL, page))
 			goto unlock;
 		unlock_page(page);
@@ -2654,8 +2476,6 @@ void filemap_map_pages(struct vm_fault *vmf,
 		/* Huge page is mapped? No need to proceed. */
 		if (pmd_trans_huge(*vmf->pmd))
 			break;
-		if (iter.index == end_pgoff)
-			break;
 	}
 	rcu_read_unlock();
 }
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
index 0aece092f40e..cd84d7de45e6 100644
--- a/tools/testing/radix-tree/regression1.c
+++ b/tools/testing/radix-tree/regression1.c
@@ -58,7 +58,7 @@ static struct page *page_alloc(void)
 	struct page *p;
 	p = malloc(sizeof(struct page));
 	p->count = 1;
-	p->index = 1;
+	p->index = (unsigned long)p;
 	pthread_mutex_init(&p->lock, NULL);
 
 	return p;
@@ -77,53 +77,37 @@ static void page_free(struct page *p)
 	call_rcu(&p->rcu, page_rcu_free);
 }
 
+static bool page_cache_pin(struct xa_state *xas, struct page *page)
+{
+	pthread_mutex_lock(&page->lock);
+	if (!page->count) {
+		pthread_mutex_unlock(&page->lock);
+		goto fail;
+	}
+	/* don't actually update page refcount */
+	pthread_mutex_unlock(&page->lock);
+
+	/* Has the page moved? */
+	if (xas_reload(xas) == page)
+		return true;
+fail:
+	xas_reset(xas);
+	return false;
+}
+
 static unsigned find_get_pages(unsigned long start,
 			    unsigned int nr_pages, struct page **pages)
 {
-	unsigned int i;
-	unsigned int ret;
-	unsigned int nr_found;
+	XA_STATE(xas, &mt_tree, start);
+	struct page *page;
+	unsigned int ret = 0;
 
 	rcu_read_lock();
-restart:
-	nr_found = radix_tree_gang_lookup_slot(&mt_tree,
-				(void ***)pages, NULL, start, nr_pages);
-	ret = 0;
-	for (i = 0; i < nr_found; i++) {
-		struct page *page;
-repeat:
-		page = radix_tree_deref_slot((void **)pages[i]);
-		if (unlikely(!page))
+	xas_for_each(&xas, page, ULONG_MAX) {
+		if (xas_retry(&xas, page))
+			continue;
+		if (!page_cache_pin(&xas, page))
 			continue;
-
-		if (radix_tree_exception(page)) {
-			if (radix_tree_deref_retry(page)) {
-				/*
-				 * Transient condition which can only trigger
-				 * when entry at index 0 moves out of or back
-				 * to root: none yet gotten, safe to restart.
-				 */
-				assert((start | i) == 0);
-				goto restart;
-			}
-			/*
-			 * No exceptional entries are inserted in this test.
-			 */
-			assert(0);
-		}
-
-		pthread_mutex_lock(&page->lock);
-		if (!page->count) {
-			pthread_mutex_unlock(&page->lock);
-			goto repeat;
-		}
-		/* don't actually update page refcount */
-		pthread_mutex_unlock(&page->lock);
-
-		/* Has the page moved? */
-		if (unlikely(page != *((void **)pages[i]))) {
-			goto repeat;
-		}
 
 		pages[ret] = page;
 		ret++;
-- 
2.16.1

  parent reply	other threads:[~2018-03-13 13:44 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-03-13 13:25 [PATCH v9 00/61] XArray v9 Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 01/61] mac80211_hwsim: Use DEFINE_IDA Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 02/61] radix tree: Use GFP_ZONEMASK bits of gfp_t for flags Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 03/61] arm64: Turn flush_dcache_mmap_lock into a no-op Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 04/61] unicore32: " Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 05/61] Export __set_page_dirty Matthew Wilcox
2018-03-27  0:28   ` Darrick J. Wong
2018-03-13 13:25 ` [PATCH v9 06/61] fscache: Use appropriate radix tree accessors Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 07/61] xarray: Add the xa_lock to the radix_tree_root Matthew Wilcox
2018-04-12 20:59   ` Ross Zwisler
2018-04-12 21:10     ` Matthew Wilcox
2018-04-12 21:10       ` Matthew Wilcox
2018-04-12 21:16       ` Ross Zwisler
2018-04-12 21:22         ` Matthew Wilcox
2018-04-12 21:22           ` Matthew Wilcox
2018-04-12 21:27           ` Ross Zwisler
2018-03-13 13:25 ` [PATCH v9 08/61] page cache: Use xa_lock Matthew Wilcox
2018-03-16 18:06   ` Josef Bacik
2018-03-13 13:25 ` [PATCH v9 09/61] xarray: Replace exceptional entries Matthew Wilcox
2018-03-16 18:53   ` Josef Bacik
2018-03-16 19:06     ` Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 10/61] xarray: Change definition of sibling entries Matthew Wilcox
2018-03-16 19:00   ` Josef Bacik
2018-03-13 13:25 ` [PATCH v9 11/61] xarray: Add definition of struct xarray Matthew Wilcox
2018-03-16 19:05   ` Josef Bacik
2018-03-13 13:25 ` [PATCH v9 12/61] xarray: Define struct xa_node Matthew Wilcox
2018-03-16 19:11   ` Josef Bacik
2018-03-13 13:25 ` [PATCH v9 13/61] xarray: Add documentation Matthew Wilcox
2018-03-16 19:12   ` Josef Bacik
2018-03-13 13:25 ` [PATCH v9 14/61] xarray: Add xa_load Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 15/61] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 16/61] xarray: Add xa_store Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 17/61] xarray: Add xa_cmpxchg and xa_insert Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 18/61] xarray: Add xa_for_each Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 19/61] xarray: Add xa_extract Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 20/61] xarray: Add xa_destroy Matthew Wilcox
2018-03-13 13:25 ` [PATCH v9 21/61] xarray: Add xas_next and xas_prev Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 22/61] xarray: Add xas_create_range Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 23/61] xarray: Add MAINTAINERS entry Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 24/61] page cache: Rearrange address_space Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 25/61] page cache: Convert hole search to XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 26/61] page cache: Add and replace pages using the XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 27/61] page cache: Convert page deletion to XArray Matthew Wilcox
2018-03-13 13:26 ` Matthew Wilcox [this message]
2018-03-13 13:26 ` [PATCH v9 29/61] page cache: Convert delete_batch " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 30/61] page cache: Remove stray radix comment Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 31/61] page cache: Convert filemap_range_has_page to XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 32/61] mm: Convert page-writeback " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 33/61] mm: Convert workingset " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 34/61] mm: Convert truncate " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 35/61] mm: Convert add_to_swap_cache " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 36/61] mm: Convert delete_from_swap_cache " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 37/61] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 38/61] mm: Convert page migration " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 39/61] mm: Convert huge_memory " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 40/61] mm: Convert collapse_shmem " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 41/61] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 42/61] pagevec: Use xa_tag_t Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 43/61] shmem: Convert replace to XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 44/61] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 45/61] shmem: Convert find_swap_entry " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 46/61] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 47/61] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 48/61] shmem: Convert shmem_free_swap " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 49/61] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 50/61] memfd: Convert shmem_tag_pins " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 51/61] memfd: Convert shmem_wait_for_pins " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 52/61] shmem: Comment fixups Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 53/61] btrfs: Convert page cache to XArray Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 54/61] fs: Convert buffer " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 55/61] fs: Convert writeback " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 56/61] nilfs2: Convert " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 57/61] f2fs: " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 58/61] lustre: " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 59/61] dax: " Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 60/61] page cache: Finish XArray conversion Matthew Wilcox
2018-03-13 13:26 ` [PATCH v9 61/61] radix tree: Remove unused functions Matthew Wilcox
2018-03-26 22:36 ` [PATCH v9 00/61] XArray v9 Andrew Morton
2018-03-26 23:26   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180313132639.17387-29-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=akpm@linux-foundation.org \
    --cc=konishi.ryusuke@lab.ntt.co.jp \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nilfs@vger.kernel.org \
    --cc=mawilcox@microsoft.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.