All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: unlisted-recipients:; (no To-header on input)
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 38/73] mm: Convert collapse_shmem to XArray
Date: Tue,  5 Dec 2017 16:41:24 -0800	[thread overview]
Message-ID: <20171206004159.3755-39-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

I found another victim of the radix tree being hard to use.  Because
there was no call to radix_tree_preload(), khugepaged was allocating
radix_tree_nodes using GFP_ATOMIC.

I also converted a local_irq_save()/restore() pair to
disable()/enable().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/swap.h |   4 +-
 mm/khugepaged.c      | 158 +++++++++++++++++++++------------------------------
 2 files changed, 67 insertions(+), 95 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 569a8ac4fe3f..9774f43d3e4f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -300,12 +300,12 @@ bool workingset_refault(void *shadow);
 void workingset_activation(struct page *page);
 
 /* Do not use directly, use workingset_lookup_update */
-void workingset_update_node(struct xa_node *node);
+void workingset_update_node(struct radix_tree_node *node);
 
 /* Returns workingset_update_node() if the mapping has shadow entries. */
 #define workingset_lookup_update(mapping)				\
 ({									\
-	xa_update_node_t __helper = workingset_update_node;		\
+	radix_tree_update_node_t __helper = workingset_update_node;	\
 	if (dax_mapping(mapping) || shmem_mapping(mapping))		\
 		__helper = NULL;					\
 	__helper;							\
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 55ade70c33bb..9f49d0cd61c2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1282,17 +1282,17 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  *
  * Basic scheme is simple, details are more complex:
  *  - allocate and freeze a new huge page;
- *  - scan over radix tree replacing old pages the new one
+ *  - scan page cache replacing old pages with the new one
  *    + swap in pages if necessary;
  *    + fill in gaps;
- *    + keep old pages around in case if rollback is required;
- *  - if replacing succeed:
+ *    + keep old pages around in case rollback is required;
+ *  - if replacing succeeds:
  *    + copy data over;
  *    + free old pages;
  *    + unfreeze huge page;
  *  - if replacing failed;
  *    + put all pages back and unfreeze them;
- *    + restore gaps in the radix-tree;
+ *    + restore gaps in the page cache;
  *    + free huge page;
  */
 static void collapse_shmem(struct mm_struct *mm,
@@ -1300,12 +1300,11 @@ static void collapse_shmem(struct mm_struct *mm,
 		struct page **hpage, int node)
 {
 	gfp_t gfp;
-	struct page *page, *new_page, *tmp;
+	struct page *new_page;
 	struct mem_cgroup *memcg;
 	pgoff_t index, end = start + HPAGE_PMD_NR;
 	LIST_HEAD(pagelist);
-	struct radix_tree_iter iter;
-	void **slot;
+	XA_STATE(xas, &mapping->pages, start);
 	int nr_none = 0, result = SCAN_SUCCEED;
 
 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
@@ -1330,48 +1329,48 @@ static void collapse_shmem(struct mm_struct *mm,
 	__SetPageLocked(new_page);
 	BUG_ON(!page_ref_freeze(new_page, 1));
 
-
 	/*
-	 * At this point the new_page is 'frozen' (page_count() is zero), locked
-	 * and not up-to-date. It's safe to insert it into radix tree, because
-	 * nobody would be able to map it or use it in other way until we
-	 * unfreeze it.
+	 * At this point the new_page is 'frozen' (page_count() is zero),
+	 * locked and not up-to-date. It's safe to insert it into the page
+	 * cache, because nobody would be able to map it or use it in other
+	 * way until we unfreeze it.
 	 */
 
-	index = start;
-	xa_lock_irq(&mapping->pages);
-	radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
-		int n = min(iter.index, end) - index;
-
-		/*
-		 * Handle holes in the radix tree: charge it from shmem and
-		 * insert relevant subpage of new_page into the radix-tree.
-		 */
-		if (n && !shmem_charge(mapping->host, n)) {
-			result = SCAN_FAIL;
+	/* This will be less messy when we use multi-index entries */
+	do {
+		xas_lock_irq(&xas);
+		xas_create_range(&xas, end - 1);
+		if (!xas_error(&xas))
 			break;
-		}
-		nr_none += n;
-		for (; index < min(iter.index, end); index++) {
-			radix_tree_insert(&mapping->pages, index,
-					new_page + (index % HPAGE_PMD_NR));
-		}
+		xas_unlock_irq(&xas);
+		if (!xas_nomem(&xas, GFP_KERNEL))
+			goto out;
+	} while (1);
 
-		/* We are done. */
-		if (index >= end)
-			break;
+	for (index = start; index < end; index++) {
+		struct page *page = xas_next(&xas);
+
+		VM_BUG_ON(index != xas.xa_index);
+		if (!page) {
+			if (!shmem_charge(mapping->host, 1)) {
+				result = SCAN_FAIL;
+				break;
+			}
+			xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
+			nr_none++;
+			continue;
+		}
 
-		page = radix_tree_deref_slot_protected(slot,
-				&mapping->pages.xa_lock);
 		if (xa_is_value(page) || !PageUptodate(page)) {
-			xa_unlock_irq(&mapping->pages);
+			xas_unlock_irq(&xas);
 			/* swap in or instantiate fallocated page */
 			if (shmem_getpage(mapping->host, index, &page,
 						SGP_NOHUGE)) {
 				result = SCAN_FAIL;
-				goto tree_unlocked;
+				goto xa_unlocked;
 			}
-			xa_lock_irq(&mapping->pages);
+			xas_lock_irq(&xas);
+			xas_set(&xas, index);
 		} else if (trylock_page(page)) {
 			get_page(page);
 		} else {
@@ -1391,7 +1390,7 @@ static void collapse_shmem(struct mm_struct *mm,
 			result = SCAN_TRUNCATED;
 			goto out_unlock;
 		}
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 
 		if (isolate_lru_page(page)) {
 			result = SCAN_DEL_PAGE_LRU;
@@ -1402,17 +1401,16 @@ static void collapse_shmem(struct mm_struct *mm,
 			unmap_mapping_range(mapping, index << PAGE_SHIFT,
 					PAGE_SIZE, 0);
 
-		xa_lock_irq(&mapping->pages);
+		xas_lock(&xas);
+		xas_set(&xas, index);
 
-		slot = radix_tree_lookup_slot(&mapping->pages, index);
-		VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
-					&mapping->pages.xa_lock), page);
+		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
 		VM_BUG_ON_PAGE(page_mapped(page), page);
 
 		/*
 		 * The page is expected to have page_count() == 3:
 		 *  - we hold a pin on it;
-		 *  - one reference from radix tree;
+		 *  - one reference from page cache;
 		 *  - one from isolate_lru_page;
 		 */
 		if (!page_ref_freeze(page, 3)) {
@@ -1427,56 +1425,30 @@ static void collapse_shmem(struct mm_struct *mm,
 		list_add_tail(&page->lru, &pagelist);
 
 		/* Finally, replace with the new page. */
-		radix_tree_replace_slot(&mapping->pages, slot,
-				new_page + (index % HPAGE_PMD_NR));
-
-		slot = radix_tree_iter_resume(slot, &iter);
-		index++;
+		xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
 		continue;
 out_lru:
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 		putback_lru_page(page);
 out_isolate_failed:
 		unlock_page(page);
 		put_page(page);
-		goto tree_unlocked;
+		goto xa_unlocked;
 out_unlock:
 		unlock_page(page);
 		put_page(page);
 		break;
 	}
+	xas_unlock_irq(&xas);
 
-	/*
-	 * Handle hole in radix tree at the end of the range.
-	 * This code only triggers if there's nothing in radix tree
-	 * beyond 'end'.
-	 */
-	if (result == SCAN_SUCCEED && index < end) {
-		int n = end - index;
-
-		if (!shmem_charge(mapping->host, n)) {
-			result = SCAN_FAIL;
-			goto tree_locked;
-		}
-
-		for (; index < end; index++) {
-			radix_tree_insert(&mapping->pages, index,
-					new_page + (index % HPAGE_PMD_NR));
-		}
-		nr_none += n;
-	}
-
-tree_locked:
-	xa_unlock_irq(&mapping->pages);
-tree_unlocked:
-
+xa_unlocked:
 	if (result == SCAN_SUCCEED) {
-		unsigned long flags;
+		struct page *page, *tmp;
 		struct zone *zone = page_zone(new_page);
 
 		/*
-		 * Replacing old pages with new one has succeed, now we need to
-		 * copy the content and free old pages.
+		 * Replacing old pages with new one has succeeded, now we
+		 * need to copy the content and free the old pages.
 		 */
 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
@@ -1490,16 +1462,16 @@ static void collapse_shmem(struct mm_struct *mm,
 			put_page(page);
 		}
 
-		local_irq_save(flags);
+		local_irq_disable();
 		__inc_node_page_state(new_page, NR_SHMEM_THPS);
 		if (nr_none) {
 			__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
 			__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
 		}
-		local_irq_restore(flags);
+		local_irq_enable();
 
 		/*
-		 * Remove pte page tables, so we can re-faulti
+		 * Remove pte page tables, so we can re-fault
 		 * the page as huge.
 		 */
 		retract_page_tables(mapping, start);
@@ -1514,37 +1486,37 @@ static void collapse_shmem(struct mm_struct *mm,
 
 		*hpage = NULL;
 	} else {
-		/* Something went wrong: rollback changes to the radix-tree */
+		struct page *page;
+		/* Something went wrong: roll back page cache changes */
 		shmem_uncharge(mapping->host, nr_none);
-		xa_lock_irq(&mapping->pages);
-		radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
-			if (iter.index >= end)
-				break;
+		xas_lock_irq(&xas);
+		xas_set(&xas, start);
+		xas_for_each(&xas, page, end - 1) {
 			page = list_first_entry_or_null(&pagelist,
 					struct page, lru);
-			if (!page || iter.index < page->index) {
+			if (!page || xas.xa_index < page->index) {
 				if (!nr_none)
 					break;
 				nr_none--;
 				/* Put holes back where they were */
-				radix_tree_delete(&mapping->pages, iter.index);
+				xas_store(&xas, NULL);
 				continue;
 			}
 
-			VM_BUG_ON_PAGE(page->index != iter.index, page);
+			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
 
 			/* Unfreeze the page. */
 			list_del(&page->lru);
 			page_ref_unfreeze(page, 2);
-			radix_tree_replace_slot(&mapping->pages, slot, page);
-			slot = radix_tree_iter_resume(slot, &iter);
-			xa_unlock_irq(&mapping->pages);
+			xas_store(&xas, page);
+			xas_pause(&xas);
+			xas_unlock_irq(&xas);
 			putback_lru_page(page);
 			unlock_page(page);
-			xa_lock_irq(&mapping->pages);
+			xas_lock_irq(&xas);
 		}
 		VM_BUG_ON(nr_none);
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 
 		/* Unfreeze new_page, caller would take care about freeing it */
 		page_ref_unfreeze(new_page, 1);
-- 
2.15.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 38/73] mm: Convert collapse_shmem to XArray
Date: Tue,  5 Dec 2017 16:41:24 -0800	[thread overview]
Message-ID: <20171206004159.3755-39-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

I found another victim of the radix tree being hard to use.  Because
there was no call to radix_tree_preload(), khugepaged was allocating
radix_tree_nodes using GFP_ATOMIC.

I also converted a local_irq_save()/restore() pair to
disable()/enable().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/swap.h |   4 +-
 mm/khugepaged.c      | 158 +++++++++++++++++++++------------------------------
 2 files changed, 67 insertions(+), 95 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 569a8ac4fe3f..9774f43d3e4f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -300,12 +300,12 @@ bool workingset_refault(void *shadow);
 void workingset_activation(struct page *page);
 
 /* Do not use directly, use workingset_lookup_update */
-void workingset_update_node(struct xa_node *node);
+void workingset_update_node(struct radix_tree_node *node);
 
 /* Returns workingset_update_node() if the mapping has shadow entries. */
 #define workingset_lookup_update(mapping)				\
 ({									\
-	xa_update_node_t __helper = workingset_update_node;		\
+	radix_tree_update_node_t __helper = workingset_update_node;	\
 	if (dax_mapping(mapping) || shmem_mapping(mapping))		\
 		__helper = NULL;					\
 	__helper;							\
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 55ade70c33bb..9f49d0cd61c2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1282,17 +1282,17 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  *
  * Basic scheme is simple, details are more complex:
  *  - allocate and freeze a new huge page;
- *  - scan over radix tree replacing old pages the new one
+ *  - scan page cache replacing old pages with the new one
  *    + swap in pages if necessary;
  *    + fill in gaps;
- *    + keep old pages around in case if rollback is required;
- *  - if replacing succeed:
+ *    + keep old pages around in case rollback is required;
+ *  - if replacing succeeds:
  *    + copy data over;
  *    + free old pages;
  *    + unfreeze huge page;
  *  - if replacing failed;
  *    + put all pages back and unfreeze them;
- *    + restore gaps in the radix-tree;
+ *    + restore gaps in the page cache;
  *    + free huge page;
  */
 static void collapse_shmem(struct mm_struct *mm,
@@ -1300,12 +1300,11 @@ static void collapse_shmem(struct mm_struct *mm,
 		struct page **hpage, int node)
 {
 	gfp_t gfp;
-	struct page *page, *new_page, *tmp;
+	struct page *new_page;
 	struct mem_cgroup *memcg;
 	pgoff_t index, end = start + HPAGE_PMD_NR;
 	LIST_HEAD(pagelist);
-	struct radix_tree_iter iter;
-	void **slot;
+	XA_STATE(xas, &mapping->pages, start);
 	int nr_none = 0, result = SCAN_SUCCEED;
 
 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
@@ -1330,48 +1329,48 @@ static void collapse_shmem(struct mm_struct *mm,
 	__SetPageLocked(new_page);
 	BUG_ON(!page_ref_freeze(new_page, 1));
 
-
 	/*
-	 * At this point the new_page is 'frozen' (page_count() is zero), locked
-	 * and not up-to-date. It's safe to insert it into radix tree, because
-	 * nobody would be able to map it or use it in other way until we
-	 * unfreeze it.
+	 * At this point the new_page is 'frozen' (page_count() is zero),
+	 * locked and not up-to-date. It's safe to insert it into the page
+	 * cache, because nobody would be able to map it or use it in other
+	 * way until we unfreeze it.
 	 */
 
-	index = start;
-	xa_lock_irq(&mapping->pages);
-	radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
-		int n = min(iter.index, end) - index;
-
-		/*
-		 * Handle holes in the radix tree: charge it from shmem and
-		 * insert relevant subpage of new_page into the radix-tree.
-		 */
-		if (n && !shmem_charge(mapping->host, n)) {
-			result = SCAN_FAIL;
+	/* This will be less messy when we use multi-index entries */
+	do {
+		xas_lock_irq(&xas);
+		xas_create_range(&xas, end - 1);
+		if (!xas_error(&xas))
 			break;
-		}
-		nr_none += n;
-		for (; index < min(iter.index, end); index++) {
-			radix_tree_insert(&mapping->pages, index,
-					new_page + (index % HPAGE_PMD_NR));
-		}
+		xas_unlock_irq(&xas);
+		if (!xas_nomem(&xas, GFP_KERNEL))
+			goto out;
+	} while (1);
 
-		/* We are done. */
-		if (index >= end)
-			break;
+	for (index = start; index < end; index++) {
+		struct page *page = xas_next(&xas);
+
+		VM_BUG_ON(index != xas.xa_index);
+		if (!page) {
+			if (!shmem_charge(mapping->host, 1)) {
+				result = SCAN_FAIL;
+				break;
+			}
+			xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
+			nr_none++;
+			continue;
+		}
 
-		page = radix_tree_deref_slot_protected(slot,
-				&mapping->pages.xa_lock);
 		if (xa_is_value(page) || !PageUptodate(page)) {
-			xa_unlock_irq(&mapping->pages);
+			xas_unlock_irq(&xas);
 			/* swap in or instantiate fallocated page */
 			if (shmem_getpage(mapping->host, index, &page,
 						SGP_NOHUGE)) {
 				result = SCAN_FAIL;
-				goto tree_unlocked;
+				goto xa_unlocked;
 			}
-			xa_lock_irq(&mapping->pages);
+			xas_lock_irq(&xas);
+			xas_set(&xas, index);
 		} else if (trylock_page(page)) {
 			get_page(page);
 		} else {
@@ -1391,7 +1390,7 @@ static void collapse_shmem(struct mm_struct *mm,
 			result = SCAN_TRUNCATED;
 			goto out_unlock;
 		}
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 
 		if (isolate_lru_page(page)) {
 			result = SCAN_DEL_PAGE_LRU;
@@ -1402,17 +1401,16 @@ static void collapse_shmem(struct mm_struct *mm,
 			unmap_mapping_range(mapping, index << PAGE_SHIFT,
 					PAGE_SIZE, 0);
 
-		xa_lock_irq(&mapping->pages);
+		xas_lock(&xas);
+		xas_set(&xas, index);
 
-		slot = radix_tree_lookup_slot(&mapping->pages, index);
-		VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
-					&mapping->pages.xa_lock), page);
+		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
 		VM_BUG_ON_PAGE(page_mapped(page), page);
 
 		/*
 		 * The page is expected to have page_count() == 3:
 		 *  - we hold a pin on it;
-		 *  - one reference from radix tree;
+		 *  - one reference from page cache;
 		 *  - one from isolate_lru_page;
 		 */
 		if (!page_ref_freeze(page, 3)) {
@@ -1427,56 +1425,30 @@ static void collapse_shmem(struct mm_struct *mm,
 		list_add_tail(&page->lru, &pagelist);
 
 		/* Finally, replace with the new page. */
-		radix_tree_replace_slot(&mapping->pages, slot,
-				new_page + (index % HPAGE_PMD_NR));
-
-		slot = radix_tree_iter_resume(slot, &iter);
-		index++;
+		xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
 		continue;
 out_lru:
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 		putback_lru_page(page);
 out_isolate_failed:
 		unlock_page(page);
 		put_page(page);
-		goto tree_unlocked;
+		goto xa_unlocked;
 out_unlock:
 		unlock_page(page);
 		put_page(page);
 		break;
 	}
+	xas_unlock_irq(&xas);
 
-	/*
-	 * Handle hole in radix tree at the end of the range.
-	 * This code only triggers if there's nothing in radix tree
-	 * beyond 'end'.
-	 */
-	if (result == SCAN_SUCCEED && index < end) {
-		int n = end - index;
-
-		if (!shmem_charge(mapping->host, n)) {
-			result = SCAN_FAIL;
-			goto tree_locked;
-		}
-
-		for (; index < end; index++) {
-			radix_tree_insert(&mapping->pages, index,
-					new_page + (index % HPAGE_PMD_NR));
-		}
-		nr_none += n;
-	}
-
-tree_locked:
-	xa_unlock_irq(&mapping->pages);
-tree_unlocked:
-
+xa_unlocked:
 	if (result == SCAN_SUCCEED) {
-		unsigned long flags;
+		struct page *page, *tmp;
 		struct zone *zone = page_zone(new_page);
 
 		/*
-		 * Replacing old pages with new one has succeed, now we need to
-		 * copy the content and free old pages.
+		 * Replacing old pages with new one has succeeded, now we
+		 * need to copy the content and free the old pages.
 		 */
 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
@@ -1490,16 +1462,16 @@ static void collapse_shmem(struct mm_struct *mm,
 			put_page(page);
 		}
 
-		local_irq_save(flags);
+		local_irq_disable();
 		__inc_node_page_state(new_page, NR_SHMEM_THPS);
 		if (nr_none) {
 			__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
 			__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
 		}
-		local_irq_restore(flags);
+		local_irq_enable();
 
 		/*
-		 * Remove pte page tables, so we can re-faulti
+		 * Remove pte page tables, so we can re-fault
 		 * the page as huge.
 		 */
 		retract_page_tables(mapping, start);
@@ -1514,37 +1486,37 @@ static void collapse_shmem(struct mm_struct *mm,
 
 		*hpage = NULL;
 	} else {
-		/* Something went wrong: rollback changes to the radix-tree */
+		struct page *page;
+		/* Something went wrong: roll back page cache changes */
 		shmem_uncharge(mapping->host, nr_none);
-		xa_lock_irq(&mapping->pages);
-		radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
-			if (iter.index >= end)
-				break;
+		xas_lock_irq(&xas);
+		xas_set(&xas, start);
+		xas_for_each(&xas, page, end - 1) {
 			page = list_first_entry_or_null(&pagelist,
 					struct page, lru);
-			if (!page || iter.index < page->index) {
+			if (!page || xas.xa_index < page->index) {
 				if (!nr_none)
 					break;
 				nr_none--;
 				/* Put holes back where they were */
-				radix_tree_delete(&mapping->pages, iter.index);
+				xas_store(&xas, NULL);
 				continue;
 			}
 
-			VM_BUG_ON_PAGE(page->index != iter.index, page);
+			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
 
 			/* Unfreeze the page. */
 			list_del(&page->lru);
 			page_ref_unfreeze(page, 2);
-			radix_tree_replace_slot(&mapping->pages, slot, page);
-			slot = radix_tree_iter_resume(slot, &iter);
-			xa_unlock_irq(&mapping->pages);
+			xas_store(&xas, page);
+			xas_pause(&xas);
+			xas_unlock_irq(&xas);
 			putback_lru_page(page);
 			unlock_page(page);
-			xa_lock_irq(&mapping->pages);
+			xas_lock_irq(&xas);
 		}
 		VM_BUG_ON(nr_none);
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 
 		/* Unfreeze new_page, caller would take care about freeing it */
 		page_ref_unfreeze(new_page, 1);
-- 
2.15.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 38/73] mm: Convert collapse_shmem to XArray
Date: Tue,  5 Dec 2017 16:41:24 -0800	[thread overview]
Message-ID: <20171206004159.3755-39-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

I found another victim of the radix tree being hard to use.  Because
there was no call to radix_tree_preload(), khugepaged was allocating
radix_tree_nodes using GFP_ATOMIC.

I also converted a local_irq_save()/restore() pair to
disable()/enable().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/swap.h |   4 +-
 mm/khugepaged.c      | 158 +++++++++++++++++++++------------------------------
 2 files changed, 67 insertions(+), 95 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 569a8ac4fe3f..9774f43d3e4f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -300,12 +300,12 @@ bool workingset_refault(void *shadow);
 void workingset_activation(struct page *page);
 
 /* Do not use directly, use workingset_lookup_update */
-void workingset_update_node(struct xa_node *node);
+void workingset_update_node(struct radix_tree_node *node);
 
 /* Returns workingset_update_node() if the mapping has shadow entries. */
 #define workingset_lookup_update(mapping)				\
 ({									\
-	xa_update_node_t __helper = workingset_update_node;		\
+	radix_tree_update_node_t __helper = workingset_update_node;	\
 	if (dax_mapping(mapping) || shmem_mapping(mapping))		\
 		__helper = NULL;					\
 	__helper;							\
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 55ade70c33bb..9f49d0cd61c2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1282,17 +1282,17 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  *
  * Basic scheme is simple, details are more complex:
  *  - allocate and freeze a new huge page;
- *  - scan over radix tree replacing old pages the new one
+ *  - scan page cache replacing old pages with the new one
  *    + swap in pages if necessary;
  *    + fill in gaps;
- *    + keep old pages around in case if rollback is required;
- *  - if replacing succeed:
+ *    + keep old pages around in case rollback is required;
+ *  - if replacing succeeds:
  *    + copy data over;
  *    + free old pages;
  *    + unfreeze huge page;
  *  - if replacing failed;
  *    + put all pages back and unfreeze them;
- *    + restore gaps in the radix-tree;
+ *    + restore gaps in the page cache;
  *    + free huge page;
  */
 static void collapse_shmem(struct mm_struct *mm,
@@ -1300,12 +1300,11 @@ static void collapse_shmem(struct mm_struct *mm,
 		struct page **hpage, int node)
 {
 	gfp_t gfp;
-	struct page *page, *new_page, *tmp;
+	struct page *new_page;
 	struct mem_cgroup *memcg;
 	pgoff_t index, end = start + HPAGE_PMD_NR;
 	LIST_HEAD(pagelist);
-	struct radix_tree_iter iter;
-	void **slot;
+	XA_STATE(xas, &mapping->pages, start);
 	int nr_none = 0, result = SCAN_SUCCEED;
 
 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
@@ -1330,48 +1329,48 @@ static void collapse_shmem(struct mm_struct *mm,
 	__SetPageLocked(new_page);
 	BUG_ON(!page_ref_freeze(new_page, 1));
 
-
 	/*
-	 * At this point the new_page is 'frozen' (page_count() is zero), locked
-	 * and not up-to-date. It's safe to insert it into radix tree, because
-	 * nobody would be able to map it or use it in other way until we
-	 * unfreeze it.
+	 * At this point the new_page is 'frozen' (page_count() is zero),
+	 * locked and not up-to-date. It's safe to insert it into the page
+	 * cache, because nobody would be able to map it or use it in other
+	 * way until we unfreeze it.
 	 */
 
-	index = start;
-	xa_lock_irq(&mapping->pages);
-	radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
-		int n = min(iter.index, end) - index;
-
-		/*
-		 * Handle holes in the radix tree: charge it from shmem and
-		 * insert relevant subpage of new_page into the radix-tree.
-		 */
-		if (n && !shmem_charge(mapping->host, n)) {
-			result = SCAN_FAIL;
+	/* This will be less messy when we use multi-index entries */
+	do {
+		xas_lock_irq(&xas);
+		xas_create_range(&xas, end - 1);
+		if (!xas_error(&xas))
 			break;
-		}
-		nr_none += n;
-		for (; index < min(iter.index, end); index++) {
-			radix_tree_insert(&mapping->pages, index,
-					new_page + (index % HPAGE_PMD_NR));
-		}
+		xas_unlock_irq(&xas);
+		if (!xas_nomem(&xas, GFP_KERNEL))
+			goto out;
+	} while (1);
 
-		/* We are done. */
-		if (index >= end)
-			break;
+	for (index = start; index < end; index++) {
+		struct page *page = xas_next(&xas);
+
+		VM_BUG_ON(index != xas.xa_index);
+		if (!page) {
+			if (!shmem_charge(mapping->host, 1)) {
+				result = SCAN_FAIL;
+				break;
+			}
+			xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
+			nr_none++;
+			continue;
+		}
 
-		page = radix_tree_deref_slot_protected(slot,
-				&mapping->pages.xa_lock);
 		if (xa_is_value(page) || !PageUptodate(page)) {
-			xa_unlock_irq(&mapping->pages);
+			xas_unlock_irq(&xas);
 			/* swap in or instantiate fallocated page */
 			if (shmem_getpage(mapping->host, index, &page,
 						SGP_NOHUGE)) {
 				result = SCAN_FAIL;
-				goto tree_unlocked;
+				goto xa_unlocked;
 			}
-			xa_lock_irq(&mapping->pages);
+			xas_lock_irq(&xas);
+			xas_set(&xas, index);
 		} else if (trylock_page(page)) {
 			get_page(page);
 		} else {
@@ -1391,7 +1390,7 @@ static void collapse_shmem(struct mm_struct *mm,
 			result = SCAN_TRUNCATED;
 			goto out_unlock;
 		}
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 
 		if (isolate_lru_page(page)) {
 			result = SCAN_DEL_PAGE_LRU;
@@ -1402,17 +1401,16 @@ static void collapse_shmem(struct mm_struct *mm,
 			unmap_mapping_range(mapping, index << PAGE_SHIFT,
 					PAGE_SIZE, 0);
 
-		xa_lock_irq(&mapping->pages);
+		xas_lock(&xas);
+		xas_set(&xas, index);
 
-		slot = radix_tree_lookup_slot(&mapping->pages, index);
-		VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
-					&mapping->pages.xa_lock), page);
+		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
 		VM_BUG_ON_PAGE(page_mapped(page), page);
 
 		/*
 		 * The page is expected to have page_count() == 3:
 		 *  - we hold a pin on it;
-		 *  - one reference from radix tree;
+		 *  - one reference from page cache;
 		 *  - one from isolate_lru_page;
 		 */
 		if (!page_ref_freeze(page, 3)) {
@@ -1427,56 +1425,30 @@ static void collapse_shmem(struct mm_struct *mm,
 		list_add_tail(&page->lru, &pagelist);
 
 		/* Finally, replace with the new page. */
-		radix_tree_replace_slot(&mapping->pages, slot,
-				new_page + (index % HPAGE_PMD_NR));
-
-		slot = radix_tree_iter_resume(slot, &iter);
-		index++;
+		xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
 		continue;
 out_lru:
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 		putback_lru_page(page);
 out_isolate_failed:
 		unlock_page(page);
 		put_page(page);
-		goto tree_unlocked;
+		goto xa_unlocked;
 out_unlock:
 		unlock_page(page);
 		put_page(page);
 		break;
 	}
+	xas_unlock_irq(&xas);
 
-	/*
-	 * Handle hole in radix tree at the end of the range.
-	 * This code only triggers if there's nothing in radix tree
-	 * beyond 'end'.
-	 */
-	if (result == SCAN_SUCCEED && index < end) {
-		int n = end - index;
-
-		if (!shmem_charge(mapping->host, n)) {
-			result = SCAN_FAIL;
-			goto tree_locked;
-		}
-
-		for (; index < end; index++) {
-			radix_tree_insert(&mapping->pages, index,
-					new_page + (index % HPAGE_PMD_NR));
-		}
-		nr_none += n;
-	}
-
-tree_locked:
-	xa_unlock_irq(&mapping->pages);
-tree_unlocked:
-
+xa_unlocked:
 	if (result == SCAN_SUCCEED) {
-		unsigned long flags;
+		struct page *page, *tmp;
 		struct zone *zone = page_zone(new_page);
 
 		/*
-		 * Replacing old pages with new one has succeed, now we need to
-		 * copy the content and free old pages.
+		 * Replacing old pages with new one has succeeded, now we
+		 * need to copy the content and free the old pages.
 		 */
 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
@@ -1490,16 +1462,16 @@ static void collapse_shmem(struct mm_struct *mm,
 			put_page(page);
 		}
 
-		local_irq_save(flags);
+		local_irq_disable();
 		__inc_node_page_state(new_page, NR_SHMEM_THPS);
 		if (nr_none) {
 			__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
 			__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
 		}
-		local_irq_restore(flags);
+		local_irq_enable();
 
 		/*
-		 * Remove pte page tables, so we can re-faulti
+		 * Remove pte page tables, so we can re-fault
 		 * the page as huge.
 		 */
 		retract_page_tables(mapping, start);
@@ -1514,37 +1486,37 @@ static void collapse_shmem(struct mm_struct *mm,
 
 		*hpage = NULL;
 	} else {
-		/* Something went wrong: rollback changes to the radix-tree */
+		struct page *page;
+		/* Something went wrong: roll back page cache changes */
 		shmem_uncharge(mapping->host, nr_none);
-		xa_lock_irq(&mapping->pages);
-		radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
-			if (iter.index >= end)
-				break;
+		xas_lock_irq(&xas);
+		xas_set(&xas, start);
+		xas_for_each(&xas, page, end - 1) {
 			page = list_first_entry_or_null(&pagelist,
 					struct page, lru);
-			if (!page || iter.index < page->index) {
+			if (!page || xas.xa_index < page->index) {
 				if (!nr_none)
 					break;
 				nr_none--;
 				/* Put holes back where they were */
-				radix_tree_delete(&mapping->pages, iter.index);
+				xas_store(&xas, NULL);
 				continue;
 			}
 
-			VM_BUG_ON_PAGE(page->index != iter.index, page);
+			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
 
 			/* Unfreeze the page. */
 			list_del(&page->lru);
 			page_ref_unfreeze(page, 2);
-			radix_tree_replace_slot(&mapping->pages, slot, page);
-			slot = radix_tree_iter_resume(slot, &iter);
-			xa_unlock_irq(&mapping->pages);
+			xas_store(&xas, page);
+			xas_pause(&xas);
+			xas_unlock_irq(&xas);
 			putback_lru_page(page);
 			unlock_page(page);
-			xa_lock_irq(&mapping->pages);
+			xas_lock_irq(&xas);
 		}
 		VM_BUG_ON(nr_none);
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 
 		/* Unfreeze new_page, caller would take care about freeing it */
 		page_ref_unfreeze(new_page, 1);
-- 
2.15.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 38/73] mm: Convert collapse_shmem to XArray
Date: Tue,  5 Dec 2017 16:41:24 -0800	[thread overview]
Message-ID: <20171206004159.3755-39-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

I found another victim of the radix tree being hard to use.  Because
there was no call to radix_tree_preload(), khugepaged was allocating
radix_tree_nodes using GFP_ATOMIC.

I also converted a local_irq_save()/restore() pair to
disable()/enable().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/swap.h |   4 +-
 mm/khugepaged.c      | 158 +++++++++++++++++++++------------------------------
 2 files changed, 67 insertions(+), 95 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 569a8ac4fe3f..9774f43d3e4f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -300,12 +300,12 @@ bool workingset_refault(void *shadow);
 void workingset_activation(struct page *page);
 
 /* Do not use directly, use workingset_lookup_update */
-void workingset_update_node(struct xa_node *node);
+void workingset_update_node(struct radix_tree_node *node);
 
 /* Returns workingset_update_node() if the mapping has shadow entries. */
 #define workingset_lookup_update(mapping)				\
 ({									\
-	xa_update_node_t __helper = workingset_update_node;		\
+	radix_tree_update_node_t __helper = workingset_update_node;	\
 	if (dax_mapping(mapping) || shmem_mapping(mapping))		\
 		__helper = NULL;					\
 	__helper;							\
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 55ade70c33bb..9f49d0cd61c2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1282,17 +1282,17 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  *
  * Basic scheme is simple, details are more complex:
  *  - allocate and freeze a new huge page;
- *  - scan over radix tree replacing old pages the new one
+ *  - scan page cache replacing old pages with the new one
  *    + swap in pages if necessary;
  *    + fill in gaps;
- *    + keep old pages around in case if rollback is required;
- *  - if replacing succeed:
+ *    + keep old pages around in case rollback is required;
+ *  - if replacing succeeds:
  *    + copy data over;
  *    + free old pages;
  *    + unfreeze huge page;
  *  - if replacing failed;
  *    + put all pages back and unfreeze them;
- *    + restore gaps in the radix-tree;
+ *    + restore gaps in the page cache;
  *    + free huge page;
  */
 static void collapse_shmem(struct mm_struct *mm,
@@ -1300,12 +1300,11 @@ static void collapse_shmem(struct mm_struct *mm,
 		struct page **hpage, int node)
 {
 	gfp_t gfp;
-	struct page *page, *new_page, *tmp;
+	struct page *new_page;
 	struct mem_cgroup *memcg;
 	pgoff_t index, end = start + HPAGE_PMD_NR;
 	LIST_HEAD(pagelist);
-	struct radix_tree_iter iter;
-	void **slot;
+	XA_STATE(xas, &mapping->pages, start);
 	int nr_none = 0, result = SCAN_SUCCEED;
 
 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
@@ -1330,48 +1329,48 @@ static void collapse_shmem(struct mm_struct *mm,
 	__SetPageLocked(new_page);
 	BUG_ON(!page_ref_freeze(new_page, 1));
 
-
 	/*
-	 * At this point the new_page is 'frozen' (page_count() is zero), locked
-	 * and not up-to-date. It's safe to insert it into radix tree, because
-	 * nobody would be able to map it or use it in other way until we
-	 * unfreeze it.
+	 * At this point the new_page is 'frozen' (page_count() is zero),
+	 * locked and not up-to-date. It's safe to insert it into the page
+	 * cache, because nobody would be able to map it or use it in other
+	 * way until we unfreeze it.
 	 */
 
-	index = start;
-	xa_lock_irq(&mapping->pages);
-	radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
-		int n = min(iter.index, end) - index;
-
-		/*
-		 * Handle holes in the radix tree: charge it from shmem and
-		 * insert relevant subpage of new_page into the radix-tree.
-		 */
-		if (n && !shmem_charge(mapping->host, n)) {
-			result = SCAN_FAIL;
+	/* This will be less messy when we use multi-index entries */
+	do {
+		xas_lock_irq(&xas);
+		xas_create_range(&xas, end - 1);
+		if (!xas_error(&xas))
 			break;
-		}
-		nr_none += n;
-		for (; index < min(iter.index, end); index++) {
-			radix_tree_insert(&mapping->pages, index,
-					new_page + (index % HPAGE_PMD_NR));
-		}
+		xas_unlock_irq(&xas);
+		if (!xas_nomem(&xas, GFP_KERNEL))
+			goto out;
+	} while (1);
 
-		/* We are done. */
-		if (index >= end)
-			break;
+	for (index = start; index < end; index++) {
+		struct page *page = xas_next(&xas);
+
+		VM_BUG_ON(index != xas.xa_index);
+		if (!page) {
+			if (!shmem_charge(mapping->host, 1)) {
+				result = SCAN_FAIL;
+				break;
+			}
+			xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
+			nr_none++;
+			continue;
+		}
 
-		page = radix_tree_deref_slot_protected(slot,
-				&mapping->pages.xa_lock);
 		if (xa_is_value(page) || !PageUptodate(page)) {
-			xa_unlock_irq(&mapping->pages);
+			xas_unlock_irq(&xas);
 			/* swap in or instantiate fallocated page */
 			if (shmem_getpage(mapping->host, index, &page,
 						SGP_NOHUGE)) {
 				result = SCAN_FAIL;
-				goto tree_unlocked;
+				goto xa_unlocked;
 			}
-			xa_lock_irq(&mapping->pages);
+			xas_lock_irq(&xas);
+			xas_set(&xas, index);
 		} else if (trylock_page(page)) {
 			get_page(page);
 		} else {
@@ -1391,7 +1390,7 @@ static void collapse_shmem(struct mm_struct *mm,
 			result = SCAN_TRUNCATED;
 			goto out_unlock;
 		}
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 
 		if (isolate_lru_page(page)) {
 			result = SCAN_DEL_PAGE_LRU;
@@ -1402,17 +1401,16 @@ static void collapse_shmem(struct mm_struct *mm,
 			unmap_mapping_range(mapping, index << PAGE_SHIFT,
 					PAGE_SIZE, 0);
 
-		xa_lock_irq(&mapping->pages);
+		xas_lock(&xas);
+		xas_set(&xas, index);
 
-		slot = radix_tree_lookup_slot(&mapping->pages, index);
-		VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
-					&mapping->pages.xa_lock), page);
+		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
 		VM_BUG_ON_PAGE(page_mapped(page), page);
 
 		/*
 		 * The page is expected to have page_count() == 3:
 		 *  - we hold a pin on it;
-		 *  - one reference from radix tree;
+		 *  - one reference from page cache;
 		 *  - one from isolate_lru_page;
 		 */
 		if (!page_ref_freeze(page, 3)) {
@@ -1427,56 +1425,30 @@ static void collapse_shmem(struct mm_struct *mm,
 		list_add_tail(&page->lru, &pagelist);
 
 		/* Finally, replace with the new page. */
-		radix_tree_replace_slot(&mapping->pages, slot,
-				new_page + (index % HPAGE_PMD_NR));
-
-		slot = radix_tree_iter_resume(slot, &iter);
-		index++;
+		xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
 		continue;
 out_lru:
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 		putback_lru_page(page);
 out_isolate_failed:
 		unlock_page(page);
 		put_page(page);
-		goto tree_unlocked;
+		goto xa_unlocked;
 out_unlock:
 		unlock_page(page);
 		put_page(page);
 		break;
 	}
+	xas_unlock_irq(&xas);
 
-	/*
-	 * Handle hole in radix tree at the end of the range.
-	 * This code only triggers if there's nothing in radix tree
-	 * beyond 'end'.
-	 */
-	if (result == SCAN_SUCCEED && index < end) {
-		int n = end - index;
-
-		if (!shmem_charge(mapping->host, n)) {
-			result = SCAN_FAIL;
-			goto tree_locked;
-		}
-
-		for (; index < end; index++) {
-			radix_tree_insert(&mapping->pages, index,
-					new_page + (index % HPAGE_PMD_NR));
-		}
-		nr_none += n;
-	}
-
-tree_locked:
-	xa_unlock_irq(&mapping->pages);
-tree_unlocked:
-
+xa_unlocked:
 	if (result == SCAN_SUCCEED) {
-		unsigned long flags;
+		struct page *page, *tmp;
 		struct zone *zone = page_zone(new_page);
 
 		/*
-		 * Replacing old pages with new one has succeed, now we need to
-		 * copy the content and free old pages.
+		 * Replacing old pages with new one has succeeded, now we
+		 * need to copy the content and free the old pages.
 		 */
 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
@@ -1490,16 +1462,16 @@ static void collapse_shmem(struct mm_struct *mm,
 			put_page(page);
 		}
 
-		local_irq_save(flags);
+		local_irq_disable();
 		__inc_node_page_state(new_page, NR_SHMEM_THPS);
 		if (nr_none) {
 			__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
 			__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
 		}
-		local_irq_restore(flags);
+		local_irq_enable();
 
 		/*
-		 * Remove pte page tables, so we can re-faulti
+		 * Remove pte page tables, so we can re-fault
 		 * the page as huge.
 		 */
 		retract_page_tables(mapping, start);
@@ -1514,37 +1486,37 @@ static void collapse_shmem(struct mm_struct *mm,
 
 		*hpage = NULL;
 	} else {
-		/* Something went wrong: rollback changes to the radix-tree */
+		struct page *page;
+		/* Something went wrong: roll back page cache changes */
 		shmem_uncharge(mapping->host, nr_none);
-		xa_lock_irq(&mapping->pages);
-		radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
-			if (iter.index >= end)
-				break;
+		xas_lock_irq(&xas);
+		xas_set(&xas, start);
+		xas_for_each(&xas, page, end - 1) {
 			page = list_first_entry_or_null(&pagelist,
 					struct page, lru);
-			if (!page || iter.index < page->index) {
+			if (!page || xas.xa_index < page->index) {
 				if (!nr_none)
 					break;
 				nr_none--;
 				/* Put holes back where they were */
-				radix_tree_delete(&mapping->pages, iter.index);
+				xas_store(&xas, NULL);
 				continue;
 			}
 
-			VM_BUG_ON_PAGE(page->index != iter.index, page);
+			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
 
 			/* Unfreeze the page. */
 			list_del(&page->lru);
 			page_ref_unfreeze(page, 2);
-			radix_tree_replace_slot(&mapping->pages, slot, page);
-			slot = radix_tree_iter_resume(slot, &iter);
-			xa_unlock_irq(&mapping->pages);
+			xas_store(&xas, page);
+			xas_pause(&xas);
+			xas_unlock_irq(&xas);
 			putback_lru_page(page);
 			unlock_page(page);
-			xa_lock_irq(&mapping->pages);
+			xas_lock_irq(&xas);
 		}
 		VM_BUG_ON(nr_none);
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 
 		/* Unfreeze new_page, caller would take care about freeing it */
 		page_ref_unfreeze(new_page, 1);
-- 
2.15.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Jens Axboe <axboe@kernel.dk>, Rehas Sachdeva <aquannie@gmail.com>,
	linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-f2fs-devel@lists.sourceforge.net,
	linux-nilfs@vger.kernel.org, linux-btrfs@vger.kernel.org,
	linux-xfs@vger.kernel.org, linux-usb@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: [PATCH v4 38/73] mm: Convert collapse_shmem to XArray
Date: Tue,  5 Dec 2017 16:41:24 -0800	[thread overview]
Message-ID: <20171206004159.3755-39-willy@infradead.org> (raw)
In-Reply-To: <20171206004159.3755-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

I found another victim of the radix tree being hard to use.  Because
there was no call to radix_tree_preload(), khugepaged was allocating
radix_tree_nodes using GFP_ATOMIC.

I also converted a local_irq_save()/restore() pair to
disable()/enable().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/swap.h |   4 +-
 mm/khugepaged.c      | 158 +++++++++++++++++++++------------------------------
 2 files changed, 67 insertions(+), 95 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 569a8ac4fe3f..9774f43d3e4f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -300,12 +300,12 @@ bool workingset_refault(void *shadow);
 void workingset_activation(struct page *page);
 
 /* Do not use directly, use workingset_lookup_update */
-void workingset_update_node(struct xa_node *node);
+void workingset_update_node(struct radix_tree_node *node);
 
 /* Returns workingset_update_node() if the mapping has shadow entries. */
 #define workingset_lookup_update(mapping)				\
 ({									\
-	xa_update_node_t __helper = workingset_update_node;		\
+	radix_tree_update_node_t __helper = workingset_update_node;	\
 	if (dax_mapping(mapping) || shmem_mapping(mapping))		\
 		__helper = NULL;					\
 	__helper;							\
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 55ade70c33bb..9f49d0cd61c2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1282,17 +1282,17 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  *
  * Basic scheme is simple, details are more complex:
  *  - allocate and freeze a new huge page;
- *  - scan over radix tree replacing old pages the new one
+ *  - scan page cache replacing old pages with the new one
  *    + swap in pages if necessary;
  *    + fill in gaps;
- *    + keep old pages around in case if rollback is required;
- *  - if replacing succeed:
+ *    + keep old pages around in case rollback is required;
+ *  - if replacing succeeds:
  *    + copy data over;
  *    + free old pages;
  *    + unfreeze huge page;
  *  - if replacing failed;
  *    + put all pages back and unfreeze them;
- *    + restore gaps in the radix-tree;
+ *    + restore gaps in the page cache;
  *    + free huge page;
  */
 static void collapse_shmem(struct mm_struct *mm,
@@ -1300,12 +1300,11 @@ static void collapse_shmem(struct mm_struct *mm,
 		struct page **hpage, int node)
 {
 	gfp_t gfp;
-	struct page *page, *new_page, *tmp;
+	struct page *new_page;
 	struct mem_cgroup *memcg;
 	pgoff_t index, end = start + HPAGE_PMD_NR;
 	LIST_HEAD(pagelist);
-	struct radix_tree_iter iter;
-	void **slot;
+	XA_STATE(xas, &mapping->pages, start);
 	int nr_none = 0, result = SCAN_SUCCEED;
 
 	VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
@@ -1330,48 +1329,48 @@ static void collapse_shmem(struct mm_struct *mm,
 	__SetPageLocked(new_page);
 	BUG_ON(!page_ref_freeze(new_page, 1));
 
-
 	/*
-	 * At this point the new_page is 'frozen' (page_count() is zero), locked
-	 * and not up-to-date. It's safe to insert it into radix tree, because
-	 * nobody would be able to map it or use it in other way until we
-	 * unfreeze it.
+	 * At this point the new_page is 'frozen' (page_count() is zero),
+	 * locked and not up-to-date. It's safe to insert it into the page
+	 * cache, because nobody would be able to map it or use it in other
+	 * way until we unfreeze it.
 	 */
 
-	index = start;
-	xa_lock_irq(&mapping->pages);
-	radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
-		int n = min(iter.index, end) - index;
-
-		/*
-		 * Handle holes in the radix tree: charge it from shmem and
-		 * insert relevant subpage of new_page into the radix-tree.
-		 */
-		if (n && !shmem_charge(mapping->host, n)) {
-			result = SCAN_FAIL;
+	/* This will be less messy when we use multi-index entries */
+	do {
+		xas_lock_irq(&xas);
+		xas_create_range(&xas, end - 1);
+		if (!xas_error(&xas))
 			break;
-		}
-		nr_none += n;
-		for (; index < min(iter.index, end); index++) {
-			radix_tree_insert(&mapping->pages, index,
-					new_page + (index % HPAGE_PMD_NR));
-		}
+		xas_unlock_irq(&xas);
+		if (!xas_nomem(&xas, GFP_KERNEL))
+			goto out;
+	} while (1);
 
-		/* We are done. */
-		if (index >= end)
-			break;
+	for (index = start; index < end; index++) {
+		struct page *page = xas_next(&xas);
+
+		VM_BUG_ON(index != xas.xa_index);
+		if (!page) {
+			if (!shmem_charge(mapping->host, 1)) {
+				result = SCAN_FAIL;
+				break;
+			}
+			xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
+			nr_none++;
+			continue;
+		}
 
-		page = radix_tree_deref_slot_protected(slot,
-				&mapping->pages.xa_lock);
 		if (xa_is_value(page) || !PageUptodate(page)) {
-			xa_unlock_irq(&mapping->pages);
+			xas_unlock_irq(&xas);
 			/* swap in or instantiate fallocated page */
 			if (shmem_getpage(mapping->host, index, &page,
 						SGP_NOHUGE)) {
 				result = SCAN_FAIL;
-				goto tree_unlocked;
+				goto xa_unlocked;
 			}
-			xa_lock_irq(&mapping->pages);
+			xas_lock_irq(&xas);
+			xas_set(&xas, index);
 		} else if (trylock_page(page)) {
 			get_page(page);
 		} else {
@@ -1391,7 +1390,7 @@ static void collapse_shmem(struct mm_struct *mm,
 			result = SCAN_TRUNCATED;
 			goto out_unlock;
 		}
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 
 		if (isolate_lru_page(page)) {
 			result = SCAN_DEL_PAGE_LRU;
@@ -1402,17 +1401,16 @@ static void collapse_shmem(struct mm_struct *mm,
 			unmap_mapping_range(mapping, index << PAGE_SHIFT,
 					PAGE_SIZE, 0);
 
-		xa_lock_irq(&mapping->pages);
+		xas_lock(&xas);
+		xas_set(&xas, index);
 
-		slot = radix_tree_lookup_slot(&mapping->pages, index);
-		VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
-					&mapping->pages.xa_lock), page);
+		VM_BUG_ON_PAGE(page != xas_load(&xas), page);
 		VM_BUG_ON_PAGE(page_mapped(page), page);
 
 		/*
 		 * The page is expected to have page_count() == 3:
 		 *  - we hold a pin on it;
-		 *  - one reference from radix tree;
+		 *  - one reference from page cache;
 		 *  - one from isolate_lru_page;
 		 */
 		if (!page_ref_freeze(page, 3)) {
@@ -1427,56 +1425,30 @@ static void collapse_shmem(struct mm_struct *mm,
 		list_add_tail(&page->lru, &pagelist);
 
 		/* Finally, replace with the new page. */
-		radix_tree_replace_slot(&mapping->pages, slot,
-				new_page + (index % HPAGE_PMD_NR));
-
-		slot = radix_tree_iter_resume(slot, &iter);
-		index++;
+		xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
 		continue;
 out_lru:
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 		putback_lru_page(page);
 out_isolate_failed:
 		unlock_page(page);
 		put_page(page);
-		goto tree_unlocked;
+		goto xa_unlocked;
 out_unlock:
 		unlock_page(page);
 		put_page(page);
 		break;
 	}
+	xas_unlock_irq(&xas);
 
-	/*
-	 * Handle hole in radix tree at the end of the range.
-	 * This code only triggers if there's nothing in radix tree
-	 * beyond 'end'.
-	 */
-	if (result == SCAN_SUCCEED && index < end) {
-		int n = end - index;
-
-		if (!shmem_charge(mapping->host, n)) {
-			result = SCAN_FAIL;
-			goto tree_locked;
-		}
-
-		for (; index < end; index++) {
-			radix_tree_insert(&mapping->pages, index,
-					new_page + (index % HPAGE_PMD_NR));
-		}
-		nr_none += n;
-	}
-
-tree_locked:
-	xa_unlock_irq(&mapping->pages);
-tree_unlocked:
-
+xa_unlocked:
 	if (result == SCAN_SUCCEED) {
-		unsigned long flags;
+		struct page *page, *tmp;
 		struct zone *zone = page_zone(new_page);
 
 		/*
-		 * Replacing old pages with new one has succeed, now we need to
-		 * copy the content and free old pages.
+		 * Replacing old pages with new one has succeeded, now we
+		 * need to copy the content and free the old pages.
 		 */
 		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
 			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
@@ -1490,16 +1462,16 @@ static void collapse_shmem(struct mm_struct *mm,
 			put_page(page);
 		}
 
-		local_irq_save(flags);
+		local_irq_disable();
 		__inc_node_page_state(new_page, NR_SHMEM_THPS);
 		if (nr_none) {
 			__mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
 			__mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
 		}
-		local_irq_restore(flags);
+		local_irq_enable();
 
 		/*
-		 * Remove pte page tables, so we can re-faulti
+		 * Remove pte page tables, so we can re-fault
 		 * the page as huge.
 		 */
 		retract_page_tables(mapping, start);
@@ -1514,37 +1486,37 @@ static void collapse_shmem(struct mm_struct *mm,
 
 		*hpage = NULL;
 	} else {
-		/* Something went wrong: rollback changes to the radix-tree */
+		struct page *page;
+		/* Something went wrong: roll back page cache changes */
 		shmem_uncharge(mapping->host, nr_none);
-		xa_lock_irq(&mapping->pages);
-		radix_tree_for_each_slot(slot, &mapping->pages, &iter, start) {
-			if (iter.index >= end)
-				break;
+		xas_lock_irq(&xas);
+		xas_set(&xas, start);
+		xas_for_each(&xas, page, end - 1) {
 			page = list_first_entry_or_null(&pagelist,
 					struct page, lru);
-			if (!page || iter.index < page->index) {
+			if (!page || xas.xa_index < page->index) {
 				if (!nr_none)
 					break;
 				nr_none--;
 				/* Put holes back where they were */
-				radix_tree_delete(&mapping->pages, iter.index);
+				xas_store(&xas, NULL);
 				continue;
 			}
 
-			VM_BUG_ON_PAGE(page->index != iter.index, page);
+			VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
 
 			/* Unfreeze the page. */
 			list_del(&page->lru);
 			page_ref_unfreeze(page, 2);
-			radix_tree_replace_slot(&mapping->pages, slot, page);
-			slot = radix_tree_iter_resume(slot, &iter);
-			xa_unlock_irq(&mapping->pages);
+			xas_store(&xas, page);
+			xas_pause(&xas);
+			xas_unlock_irq(&xas);
 			putback_lru_page(page);
 			unlock_page(page);
-			xa_lock_irq(&mapping->pages);
+			xas_lock_irq(&xas);
 		}
 		VM_BUG_ON(nr_none);
-		xa_unlock_irq(&mapping->pages);
+		xas_unlock_irq(&xas);
 
 		/* Unfreeze new_page, caller would take care about freeing it */
 		page_ref_unfreeze(new_page, 1);
-- 
2.15.0


  parent reply	other threads:[~2017-12-06  0:42 UTC|newest]

Thread overview: 533+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-06  0:40 [PATCH v4 00/73] XArray version 4 Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 01/73] xfs: Rename xa_ elements to ail_ Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 02/73] xarray: Add the xa_lock to the radix_tree_root Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 03/73] page cache: Use xa_lock Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 04/73] xarray: Replace exceptional entries Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 05/73] xarray: Change definition of sibling entries Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 06/73] xarray: Add definition of struct xarray Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 07/73] xarray: Define struct xa_node Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 08/73] xarray: Add documentation Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-11 23:10   ` Randy Dunlap
2017-12-11 23:10     ` Randy Dunlap
2017-12-15  4:22     ` Matthew Wilcox
2017-12-15  4:22       ` Matthew Wilcox
2017-12-15  4:22       ` Matthew Wilcox
2017-12-15  4:22       ` Matthew Wilcox
2017-12-15 12:34       ` Naming of tag operations in the XArray Matthew Wilcox
2017-12-15 12:34         ` Matthew Wilcox
2017-12-19  0:16         ` Randy Dunlap
2017-12-19  0:16           ` Randy Dunlap
2017-12-19  0:16           ` Randy Dunlap
2017-12-15 17:10     ` Storing errors " Matthew Wilcox
2017-12-15 17:10       ` Matthew Wilcox
2017-12-15 17:10       ` Matthew Wilcox
2017-12-19  0:27       ` Randy Dunlap
2017-12-19  0:27         ` Randy Dunlap
2017-12-19  0:27         ` Randy Dunlap
2017-12-19  0:27         ` Randy Dunlap
2017-12-06  0:40 ` [PATCH v4 09/73] xarray: Add xa_load Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 10/73] xarray: Add xa_get_tag, xa_set_tag and xa_clear_tag Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 11/73] xarray: Add xa_store Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 12/73] xarray: Add xa_cmpxchg Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40 ` [PATCH v4 13/73] xarray: Add xa_for_each Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:40   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 14/73] xarray: Add xas_for_each_tag Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 15/73] xarray: Add xa_get_entries, xa_get_tagged and xa_get_maybe_tag Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 16/73] xarray: Add xa_destroy Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 17/73] xarray: Add xas_next and xas_prev Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 18/73] xarray: Add xas_create_range Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 19/73] xarray: Add MAINTAINERS entry Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 20/73] idr: Convert to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 21/73] ida: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 22/73] page cache: Convert hole search " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 23/73] page cache: Add page_cache_range_empty function Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 24/73] page cache: Add and replace pages using the XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 25/73] page cache: Convert page deletion to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 26/73] page cache: Convert page cache lookups " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 27/73] page cache: Convert delete_batch " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 28/73] page cache: Remove stray radix comment Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 29/73] mm: Convert page-writeback to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 30/73] mm: Convert workingset " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 31/73] mm: Convert truncate " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 32/73] mm: Convert add_to_swap_cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 33/73] mm: Convert delete_from_swap_cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 34/73] mm: Convert cgroup writeback " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 35/73] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 36/73] mm: Convert page migration " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 37/73] mm: Convert huge_memory " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` Matthew Wilcox [this message]
2017-12-06  0:41   ` [PATCH v4 38/73] mm: Convert collapse_shmem " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 39/73] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 40/73] pagevec: Use xa_tag_t Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 41/73] shmem: Convert replace to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 42/73] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 43/73] shmem: Convert find_swap_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 44/73] shmem: Convert shmem_tag_pins " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 45/73] shmem: Convert shmem_wait_for_pins " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 46/73] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 47/73] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 48/73] shmem: Convert shmem_free_swap " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 49/73] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 50/73] shmem: Comment fixups Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 51/73] btrfs: Convert page cache to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 52/73] fs: Convert buffer " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 53/73] fs: Convert writeback " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 54/73] nilfs2: Convert " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 55/73] f2fs: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 56/73] lustre: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 57/73] dax: Convert dax_unlock_mapping_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 58/73] dax: Convert lock_slot " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 59/73] dax: More XArray conversion Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 60/73] dax: Convert __dax_invalidate_mapping_entry to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 61/73] dax: Convert dax_writeback_one " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 62/73] dax: Convert dax_insert_pfn_mkwrite " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 63/73] dax: Convert dax_insert_mapping_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 64/73] dax: Convert grab_mapping_entry " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 65/73] dax: Fix sparse warning Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 66/73] page cache: Finish XArray conversion Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 67/73] vmalloc: Convert to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 68/73] brd: " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 69/73] xfs: Convert m_perag_tree " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 70/73] xfs: Convert pag_ici_root " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 71/73] xfs: Convert xfs dquot " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41 ` [PATCH v4 72/73] xfs: Convert mru cache " Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  1:36   ` Dave Chinner
2017-12-06  1:36     ` Dave Chinner
2017-12-06  1:36     ` Dave Chinner
2017-12-06  2:02     ` Matthew Wilcox
2017-12-06  2:02       ` Matthew Wilcox
2017-12-06  3:14       ` Dave Chinner
2017-12-06  3:14         ` Dave Chinner
2017-12-06  4:45         ` Matthew Wilcox
2017-12-06  4:45           ` Matthew Wilcox
2017-12-06  4:45           ` Matthew Wilcox
2017-12-06  4:52           ` Matthew Wilcox
2017-12-06  4:52             ` Matthew Wilcox
2017-12-06  8:44           ` Dave Chinner
2017-12-06  8:44             ` Dave Chinner
2017-12-06  8:44             ` Dave Chinner
2017-12-06 14:06             ` Matthew Wilcox
2017-12-06 14:06               ` Matthew Wilcox
2017-12-06 14:06               ` Matthew Wilcox
2017-12-07  0:38               ` Dave Chinner
2017-12-07  0:38                 ` Dave Chinner
2017-12-08 23:01                 ` Matthew Wilcox
2017-12-08 23:01                   ` Matthew Wilcox
2017-12-10 23:57                   ` Dave Chinner
2017-12-10 23:57                     ` Dave Chinner
2017-12-10 23:57                     ` Dave Chinner
2017-12-11  4:23                     ` Matthew Wilcox
2017-12-11  4:23                       ` [v4,72/73] " Matthew Wilcox
2017-12-11  4:23                       ` [PATCH v4 72/73] " Matthew Wilcox
2017-12-11 21:55                       ` Dave Chinner
2017-12-11 21:55                         ` [v4,72/73] " Dave Chinner
2017-12-11 21:55                         ` [PATCH v4 72/73] " Dave Chinner
2017-12-07 16:06               ` Theodore Ts'o
2017-12-07 16:06                 ` Theodore Ts'o
2017-12-07 22:22                 ` Dave Chinner
2017-12-07 22:22                   ` Dave Chinner
2017-12-08  4:45                   ` Byungchul Park
2017-12-08  4:45                     ` Byungchul Park
2017-12-08  4:45                     ` Byungchul Park
2017-12-08  7:25                     ` Dave Chinner
2017-12-08  7:25                       ` Dave Chinner
2017-12-08  7:25                       ` Dave Chinner
2017-12-08  9:27                       ` Byungchul Park
2017-12-08  9:27                         ` Byungchul Park
2017-12-08  9:27                         ` Byungchul Park
2017-12-08 17:35                         ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 17:35                           ` Alan Stern
2017-12-08 22:36                           ` Dave Chinner
2017-12-08 22:36                             ` Dave Chinner
2017-12-08 22:36                             ` Dave Chinner
2017-12-09 17:00                             ` Joe Perches
2017-12-09 17:00                               ` Joe Perches
2017-12-09 17:00                               ` Joe Perches
2017-12-11 21:43                               ` Dave Chinner
2017-12-11 21:43                                 ` Dave Chinner
2017-12-11 22:12                                 ` Joe Perches
2017-12-11 22:12                                   ` Joe Perches
2017-12-11 22:12                                   ` Joe Perches
2017-12-11 22:43                                   ` Matthew Wilcox
2017-12-11 22:43                                     ` Matthew Wilcox
2017-12-11 23:46                                     ` Joe Perches
2017-12-11 23:46                                       ` Joe Perches
2017-12-11 23:46                                       ` Joe Perches
2017-12-12 15:51                                       ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-12 15:51                                         ` Alan Stern
2017-12-14 18:23                                     ` Joe Perches
2017-12-14 18:23                                       ` Joe Perches
2017-12-14 18:23                                       ` [v4,72/73] " Joe Perches
2017-12-14 18:23                                       ` [PATCH v4 72/73] " Joe Perches
2017-12-17  1:26                                     ` [RFC patch] checkpatch: Add a test for long function definitions (>200 lines) Joe Perches
2017-12-17 21:46                                       ` Linus Torvalds
2017-12-17 22:22                                         ` Joe Perches
2017-12-17 22:33                                         ` Luc Van Oostenryck
2017-12-11 23:38                                   ` [PATCH v4 72/73] xfs: Convert mru cache to XArray Dave Chinner
2017-12-11 23:38                                     ` Dave Chinner
2017-12-21 12:05                                   ` Knut Omang
2017-12-21 12:05                                     ` Knut Omang
2017-12-07 22:38                 ` Lockdep is less useful than it was Matthew Wilcox
2017-12-07 22:38                   ` Matthew Wilcox
2017-12-07 22:39                   ` Matthew Wilcox
2017-12-07 22:39                     ` Matthew Wilcox
2017-12-08  0:14                   ` Dave Chinner
2017-12-08  0:14                     ` Dave Chinner
2017-12-08 15:27                   ` Theodore Ts'o
2017-12-08 15:27                     ` Theodore Ts'o
2017-12-08 18:14                     ` Matthew Wilcox
2017-12-08 18:14                       ` Matthew Wilcox
2017-12-08 22:47                       ` Dave Chinner
2017-12-08 22:47                         ` Dave Chinner
2017-12-06  0:41 ` [PATCH v4 73/73] usb: Convert xhci-mem to XArray Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  0:41   ` Matthew Wilcox
2017-12-06  1:45 ` [PATCH v4 00/73] XArray version 4 Dave Chinner
2017-12-06  1:45   ` Dave Chinner
2017-12-06  1:45   ` Dave Chinner
2017-12-06  1:51   ` Dave Chinner
2017-12-06  1:51     ` Dave Chinner
2017-12-06  1:51     ` Dave Chinner
2017-12-06  1:51     ` Dave Chinner
2017-12-06  1:53     ` Matthew Wilcox
2017-12-06  1:53       ` Matthew Wilcox
2017-12-06  1:53       ` Matthew Wilcox
2017-12-06  1:53       ` Matthew Wilcox
2017-12-06  2:17       ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:17         ` Dave Chinner
2017-12-06  2:27         ` Matthew Wilcox
2017-12-06  2:27           ` Matthew Wilcox
2017-12-06  2:27           ` Matthew Wilcox
2017-12-06  2:27           ` Matthew Wilcox
2017-12-06  2:05   ` Matthew Wilcox
2017-12-06  2:05     ` Matthew Wilcox
2017-12-06  2:38     ` Dave Chinner
2017-12-06  2:38       ` Dave Chinner
2017-12-06 23:58 ` Ross Zwisler
2017-12-06 23:58   ` Ross Zwisler
2017-12-06 23:58   ` Ross Zwisler
2017-12-07  0:13   ` Matthew Wilcox
2017-12-07  0:13     ` Matthew Wilcox
2017-12-07  0:13     ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171206004159.3755-39-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=aquannie@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nilfs@vger.kernel.org \
    --cc=linux-usb@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=mawilcox@microsoft.com \
    --cc=ross.zwisler@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.