All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org
Cc: Matthew Wilcox <mawilcox@microsoft.com>, Jan Kara <jack@suse.cz>,
	Jeff Layton <jlayton@redhat.com>,
	Lukas Czerner <lczerner@redhat.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Christoph Hellwig <hch@lst.de>,
	Goldwyn Rodrigues <rgoldwyn@suse.com>,
	Nicholas Piggin <npiggin@gmail.com>,
	Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>,
	linux-nilfs@vger.kernel.org, Jaegeuk Kim <jaegeuk@kernel.org>,
	Chao Yu <yuchao0@huawei.com>,
	linux-f2fs-devel@lists.sourceforge.net
Subject: [PATCH v13 67/72] dax: Convert page fault handlers to XArray
Date: Mon, 11 Jun 2018 07:06:34 -0700	[thread overview]
Message-ID: <20180611140639.17215-68-willy@infradead.org> (raw)
In-Reply-To: <20180611140639.17215-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

This is the last part of DAX to be converted to the XArray so
remove all the old helper functions.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 fs/dax.c | 391 ++++++++++++++++---------------------------------------
 1 file changed, 113 insertions(+), 278 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index f35913b8c5bb..de738e0ea456 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -93,10 +93,9 @@ static unsigned long dax_to_pfn(void *entry)
 	return xa_to_value(entry) >> DAX_SHIFT;
 }
 
-static void *dax_make_locked(unsigned long pfn, unsigned long flags)
+static void *dax_make_entry(pfn_t pfn, unsigned long flags)
 {
-	return xa_mk_value(flags | ((unsigned long)pfn << DAX_SHIFT) |
-			DAX_LOCKED);
+	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
 }
 
 static bool dax_is_locked(void *entry)
@@ -144,10 +143,11 @@ struct wait_exceptional_entry_queue {
 	struct exceptional_entry_key key;
 };
 
-static wait_queue_head_t *dax_entry_waitqueue(struct xarray *xa,
-		pgoff_t index, void *entry, struct exceptional_entry_key *key)
+static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
+		void *entry, struct exceptional_entry_key *key)
 {
 	unsigned long hash;
+	unsigned long index = xas->xa_index;
 
 	/*
 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
@@ -156,11 +156,10 @@ static wait_queue_head_t *dax_entry_waitqueue(struct xarray *xa,
 	 */
 	if (dax_is_pmd_entry(entry))
 		index &= ~PG_PMD_COLOUR;
-
-	key->xa = xa;
+	key->xa = xas->xa;
 	key->entry_start = index;
 
-	hash = hash_long((unsigned long)xa ^ index, DAX_WAIT_TABLE_BITS);
+	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
 	return wait_table + hash;
 }
 
@@ -182,13 +181,12 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
  * The important information it's conveying is whether the entry at
  * this index used to be a PMD entry.
  */
-static void dax_wake_mapping_entry_waiter(struct xarray *xa,
-		pgoff_t index, void *entry, bool wake_all)
+static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
 {
 	struct exceptional_entry_key key;
 	wait_queue_head_t *wq;
 
-	wq = dax_entry_waitqueue(xa, index, entry, &key);
+	wq = dax_entry_waitqueue(xas, entry, &key);
 
 	/*
 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
@@ -200,12 +198,6 @@ static void dax_wake_mapping_entry_waiter(struct xarray *xa,
 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 }
 
-static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
-{
-	return dax_wake_mapping_entry_waiter(xas->xa, xas->xa_index, entry,
-								wake_all);
-}
-
 /*
  * Look up entry in page cache, wait for it to become unlocked if it
  * is a DAX entry and return it.  The caller must subsequently call
@@ -229,8 +221,7 @@ static void *get_unlocked_entry(struct xa_state *xas)
 				!dax_is_locked(entry))
 			return entry;
 
-		wq = dax_entry_waitqueue(xas->xa, xas->xa_index, entry,
-				&ewait.key);
+		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 		prepare_to_wait_exclusive(wq, &ewait.wait,
 					  TASK_UNINTERRUPTIBLE);
 		xas_unlock_irq(xas);
@@ -271,119 +262,6 @@ static void dax_lock_entry(struct xa_state *xas, void *entry)
 	xas_store(xas, xa_mk_value(v | DAX_LOCKED));
 }
 
-/*
- * Check whether the given slot is locked.  Must be called with the i_pages
- * lock held.
- */
-static inline int slot_locked(struct address_space *mapping, void **slot)
-{
-	unsigned long entry = xa_to_value(
-		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock));
-	return entry & DAX_LOCKED;
-}
-
-/*
- * Mark the given slot as locked.  Must be called with the i_pages lock held.
- */
-static inline void *lock_slot(struct address_space *mapping, void **slot)
-{
-	unsigned long v = xa_to_value(
-		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock));
-	void *entry = xa_mk_value(v | DAX_LOCKED);
-	radix_tree_replace_slot(&mapping->i_pages, slot, entry);
-	return entry;
-}
-
-/*
- * Mark the given slot as unlocked.  Must be called with the i_pages lock held.
- */
-static inline void *unlock_slot(struct address_space *mapping, void **slot)
-{
-	unsigned long v = xa_to_value(
-		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock));
-	void *entry = xa_mk_value(v & ~DAX_LOCKED);
-	radix_tree_replace_slot(&mapping->i_pages, slot, entry);
-	return entry;
-}
-
-/*
- * Lookup entry in page cache, wait for it to become unlocked if it is
- * a DAX entry and return it. The caller must call
- * put_unlocked_mapping_entry() when he decided not to lock the entry or
- * put_locked_mapping_entry() when he locked the entry and now wants to
- * unlock it.
- *
- * Must be called with the i_pages lock held.
- */
-static void *get_unlocked_mapping_entry(struct address_space *mapping,
-					pgoff_t index, void ***slotp)
-{
-	void *entry, **slot;
-	struct wait_exceptional_entry_queue ewait;
-	wait_queue_head_t *wq;
-
-	init_wait(&ewait.wait);
-	ewait.wait.func = wake_exceptional_entry_func;
-
-	for (;;) {
-		entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
-					  &slot);
-		if (!entry ||
-		    WARN_ON_ONCE(!xa_is_value(entry)) ||
-		    !slot_locked(mapping, slot)) {
-			if (slotp)
-				*slotp = slot;
-			return entry;
-		}
-
-		wq = dax_entry_waitqueue(&mapping->i_pages, index, entry,
-				&ewait.key);
-		prepare_to_wait_exclusive(wq, &ewait.wait,
-					  TASK_UNINTERRUPTIBLE);
-		xa_unlock_irq(&mapping->i_pages);
-		schedule();
-		finish_wait(wq, &ewait.wait);
-		xa_lock_irq(&mapping->i_pages);
-	}
-}
-
-static void dax_unlock_mapping_entry(struct address_space *mapping,
-				     pgoff_t index)
-{
-	void *entry, **slot;
-
-	xa_lock_irq(&mapping->i_pages);
-	entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
-	if (WARN_ON_ONCE(!entry || !xa_is_value(entry) ||
-			 !slot_locked(mapping, slot))) {
-		xa_unlock_irq(&mapping->i_pages);
-		return;
-	}
-	unlock_slot(mapping, slot);
-	xa_unlock_irq(&mapping->i_pages);
-	dax_wake_mapping_entry_waiter(&mapping->i_pages, index, entry, false);
-}
-
-static void put_locked_mapping_entry(struct address_space *mapping,
-		pgoff_t index)
-{
-	dax_unlock_mapping_entry(mapping, index);
-}
-
-/*
- * Called when we are done with page cache entry we looked up via
- * get_unlocked_mapping_entry() and which we didn't lock in the end.
- */
-static void put_unlocked_mapping_entry(struct address_space *mapping,
-				       pgoff_t index, void *entry)
-{
-	if (!entry)
-		return;
-
-	/* We have to wake up next waiter for the page cache entry lock */
-	dax_wake_mapping_entry_waiter(&mapping->i_pages, index, entry, false);
-}
-
 static unsigned long dax_entry_size(void *entry)
 {
 	if (dax_is_zero_entry(entry))
@@ -470,9 +348,9 @@ static struct page *dax_busy_page(void *entry)
  * that index, add a locked empty entry.
  *
  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
- * either return that locked entry or will return an error.  This error will
- * happen if there are any 4k entries within the 2MiB range that we are
- * requesting.
+ * either return that locked entry or will return VM_FAULT_FALLBACK.
+ * This will happen if there are any 4k entries within the 2MiB range
+ * that we are requesting.
  *
  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
@@ -488,29 +366,31 @@ static struct page *dax_busy_page(void *entry)
  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
  * persistent memory the benefit is doubtful. We can add that later if we can
  * show it helps.
+ *
+ * On error, this function does not return an ERR_PTR.  Instead it returns
+ * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
+ * overlap with xarray value entries.
  */
-static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
-		unsigned long size_flag)
+static
+void *grab_mapping_entry(struct xa_state *xas, struct address_space *mapping)
 {
 	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
-	void *entry, **slot;
-
-restart:
-	xa_lock_irq(&mapping->i_pages);
-	entry = get_unlocked_mapping_entry(mapping, index, &slot);
-
-	if (WARN_ON_ONCE(entry && !xa_is_value(entry))) {
-		entry = ERR_PTR(-EIO);
-		goto out_unlock;
-	}
+	void *locked = dax_make_entry(pfn_to_pfn_t(0),
+						DAX_EMPTY | DAX_LOCKED);
+	void *unlocked = dax_make_entry(pfn_to_pfn_t(0), DAX_EMPTY);
+	void *entry;
 
-	if (entry) {
-		if (size_flag & DAX_PMD) {
+retry:
+	xas_lock_irq(xas);
+	xas_for_each_conflict(xas, entry) {
+		if (dax_is_locked(entry))
+			entry = get_unlocked_entry(xas);
+		if (!xa_is_value(entry))
+			goto error;
+		if (xas->xa_shift) {
 			if (dax_is_pte_entry(entry)) {
-				put_unlocked_mapping_entry(mapping, index,
-						entry);
-				entry = ERR_PTR(-EEXIST);
-				goto out_unlock;
+				put_unlocked_entry(xas, entry);
+				goto fallback;
 			}
 		} else { /* trying to grab a PTE entry */
 			if (dax_is_pmd_entry(entry) &&
@@ -519,89 +399,56 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 				pmd_downgrade = true;
 			}
 		}
+		dax_lock_entry(xas, entry);
+		break;	/* We don't want to overwrite an existing entry */
 	}
 
-	/* No entry for given index? Make sure radix tree is big enough. */
-	if (!entry || pmd_downgrade) {
-		int err;
-
-		if (pmd_downgrade) {
-			/*
-			 * Make sure 'entry' remains valid while we drop
-			 * the i_pages lock.
-			 */
-			entry = lock_slot(mapping, slot);
-		}
+	if (!entry) {
+		xas_store(xas, locked);
+		if (xas_error(xas))
+			goto error;
+		entry = unlocked;
+		mapping->nrexceptional++;
+	}
 
-		xa_unlock_irq(&mapping->i_pages);
+	if (pmd_downgrade) {
 		/*
 		 * Besides huge zero pages the only other thing that gets
 		 * downgraded are empty entries which don't need to be
 		 * unmapped.
 		 */
-		if (pmd_downgrade && dax_is_zero_entry(entry))
-			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
+		if (dax_is_zero_entry(entry)) {
+			xas_unlock_irq(xas);
+			unmap_mapping_pages(mapping, xas->xa_index,
 							PG_PMD_NR, false);
-
-		err = radix_tree_preload(
-				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
-		if (err) {
-			if (pmd_downgrade)
-				put_locked_mapping_entry(mapping, index);
-			return ERR_PTR(err);
-		}
-		xa_lock_irq(&mapping->i_pages);
-
-		if (!entry) {
-			/*
-			 * We needed to drop the i_pages lock while calling
-			 * radix_tree_preload() and we didn't have an entry to
-			 * lock.  See if another thread inserted an entry at
-			 * our index during this time.
-			 */
-			entry = __radix_tree_lookup(&mapping->i_pages, index,
-					NULL, &slot);
-			if (entry) {
-				radix_tree_preload_end();
-				xa_unlock_irq(&mapping->i_pages);
-				goto restart;
-			}
+			xas_reset(xas);
+			xas_lock_irq(xas);
 		}
 
-		if (pmd_downgrade) {
-			dax_disassociate_entry(entry, mapping, false);
-			radix_tree_delete(&mapping->i_pages, index);
+		dax_disassociate_entry(entry, mapping, false);
+		xas_store(xas, NULL);	/* undo the PMD join */
+		xas_store(xas, locked);	/* may fail with ENOMEM */
+		dax_wake_entry(xas, entry, true);
+		if (xas_error(xas)) {
 			mapping->nrexceptional--;
-			dax_wake_mapping_entry_waiter(&mapping->i_pages,
-					index, entry, true);
-		}
-
-		entry = dax_make_locked(0, size_flag | DAX_EMPTY);
-
-		err = __radix_tree_insert(&mapping->i_pages, index,
-				dax_entry_order(entry), entry);
-		radix_tree_preload_end();
-		if (err) {
-			xa_unlock_irq(&mapping->i_pages);
-			/*
-			 * Our insertion of a DAX entry failed, most likely
-			 * because we were inserting a PMD entry and it
-			 * collided with a PTE sized entry at a different
-			 * index in the PMD range.  We haven't inserted
-			 * anything into the radix tree and have no waiters to
-			 * wake.
-			 */
-			return ERR_PTR(err);
+			goto error;
 		}
-		/* Good, we have inserted empty locked entry into the tree. */
-		mapping->nrexceptional++;
-		xa_unlock_irq(&mapping->i_pages);
-		return entry;
+		entry = unlocked;
 	}
-	entry = lock_slot(mapping, slot);
- out_unlock:
-	xa_unlock_irq(&mapping->i_pages);
+
+	xas_unlock_irq(xas);
 	return entry;
+
+fallback:
+	xas_unlock_irq(xas);
+	return xa_mk_internal(VM_FAULT_FALLBACK);
+error:
+	xas_unlock_irq(xas);
+	if (xas_nomem(xas, GFP_NOIO))
+		goto retry;
+	if (xas->xa_node == XA_ERROR(-ENOMEM))
+		return xa_mk_internal(VM_FAULT_OOM);
+	return xa_mk_internal(VM_FAULT_SIGBUS);
 }
 
 /**
@@ -760,29 +607,25 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
  * already in the tree, we will skip the insertion and just dirty the PMD as
  * appropriate.
  */
-static void *dax_insert_entry(struct address_space *mapping,
-		struct vm_fault *vmf,
+static void *dax_insert_entry(struct xa_state *xas,
+		struct address_space *mapping, struct vm_fault *vmf,
 		void *entry, pfn_t pfn_t, unsigned long flags, bool dirty)
 {
-	struct radix_tree_root *pages = &mapping->i_pages;
-	unsigned long pfn = pfn_t_to_pfn(pfn_t);
-	pgoff_t index = vmf->pgoff;
-	void *new_entry;
-
+	void *new_entry = dax_make_entry(pfn_t, flags);
 	if (dirty)
 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
 	if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
+		unsigned long index = xas->xa_index;
 		/* we are replacing a zero page with block mapping */
 		if (dax_is_pmd_entry(entry))
-			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
-							PG_PMD_NR, false);
+			unmap_mapping_pages(mapping, index, PG_PMD_NR, false);
 		else /* pte entry */
-			unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
+			unmap_mapping_pages(mapping, index, 1, false);
 	}
 
-	xa_lock_irq(pages);
-	new_entry = dax_make_locked(pfn, flags);
+	xas_reset(xas);
+	xas_lock_irq(xas);
 	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
 		dax_disassociate_entry(entry, mapping, false);
 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
@@ -797,21 +640,16 @@ static void *dax_insert_entry(struct address_space *mapping,
 		 * existing entry is a PMD, we will just leave the PMD in the
 		 * tree and dirty it if necessary.
 		 */
-		struct radix_tree_node *node;
-		void **slot;
-		void *ret;
-
-		ret = __radix_tree_lookup(pages, index, &node, &slot);
-		WARN_ON_ONCE(ret != entry);
-		__radix_tree_replace(pages, node, slot,
-				     new_entry, NULL);
+		dax_lock_entry(xas, new_entry);
 		entry = new_entry;
 	}
 
-	if (dirty)
-		radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
+	if (dirty) {
+		xas_load(xas);	/* Walk the xa_state */
+		xas_set_tag(xas, PAGECACHE_TAG_DIRTY);
+	}
 
-	xa_unlock_irq(pages);
+	xas_unlock_irq(xas);
 	return entry;
 }
 
@@ -1079,15 +917,16 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
  * If this page is ever written to we will re-fault and change the mapping to
  * point to real DAX storage instead.
  */
-static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
-			 struct vm_fault *vmf)
+static vm_fault_t dax_load_hole(struct xa_state *xas,
+		struct address_space *mapping, void **entry,
+		struct vm_fault *vmf)
 {
 	struct inode *inode = mapping->host;
 	unsigned long vaddr = vmf->address;
 	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
 	vm_fault_t ret;
 
-	dax_insert_entry(mapping, vmf, entry, pfn,
+	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
 			DAX_ZERO_PAGE, false);
 
 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
@@ -1300,6 +1139,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 {
 	struct vm_area_struct *vma = vmf->vma;
 	struct address_space *mapping = vma->vm_file->f_mapping;
+	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
 	struct inode *inode = mapping->host;
 	unsigned long vaddr = vmf->address;
 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
@@ -1326,9 +1166,9 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	if (write && !vmf->cow_page)
 		flags |= IOMAP_WRITE;
 
-	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
-	if (IS_ERR(entry)) {
-		ret = dax_fault_return(PTR_ERR(entry));
+	entry = grab_mapping_entry(&xas, mapping);
+	if (xa_is_internal(entry)) {
+		ret = xa_to_internal(entry);
 		goto out;
 	}
 
@@ -1401,7 +1241,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		if (error < 0)
 			goto error_finish_iomap;
 
-		entry = dax_insert_entry(mapping, vmf, entry, pfn,
+		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
 						 0, write && !sync);
 
 		/*
@@ -1429,7 +1269,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	case IOMAP_UNWRITTEN:
 	case IOMAP_HOLE:
 		if (!write) {
-			ret = dax_load_hole(mapping, entry, vmf);
+			ret = dax_load_hole(&xas, mapping, &entry, vmf);
 			goto finish_iomap;
 		}
 		/*FALLTHRU*/
@@ -1456,21 +1296,20 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
 	}
  unlock_entry:
-	put_locked_mapping_entry(mapping, vmf->pgoff);
+	put_locked_entry(&xas, entry);
  out:
 	trace_dax_pte_fault_done(inode, vmf, ret);
 	return ret | major;
 }
 
 #ifdef CONFIG_FS_DAX_PMD
-static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
-		void *entry)
+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+		struct iomap *iomap, void **entry)
 {
 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 	unsigned long pmd_addr = vmf->address & PMD_MASK;
 	struct inode *inode = mapping->host;
 	struct page *zero_page;
-	void *ret = NULL;
 	spinlock_t *ptl;
 	pmd_t pmd_entry;
 	pfn_t pfn;
@@ -1481,7 +1320,7 @@ static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 		goto fallback;
 
 	pfn = page_to_pfn_t(zero_page);
-	ret = dax_insert_entry(mapping, vmf, entry, pfn,
+	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
 			DAX_PMD | DAX_ZERO_PAGE, false);
 
 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
@@ -1494,11 +1333,11 @@ static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 	pmd_entry = pmd_mkhuge(pmd_entry);
 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
 	spin_unlock(ptl);
-	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
+	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
 	return VM_FAULT_NOPAGE;
 
 fallback:
-	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
+	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
 	return VM_FAULT_FALLBACK;
 }
 
@@ -1507,6 +1346,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 {
 	struct vm_area_struct *vma = vmf->vma;
 	struct address_space *mapping = vma->vm_file->f_mapping;
+	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
 	unsigned long pmd_addr = vmf->address & PMD_MASK;
 	bool write = vmf->flags & FAULT_FLAG_WRITE;
 	bool sync;
@@ -1514,7 +1354,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	struct inode *inode = mapping->host;
 	vm_fault_t result = VM_FAULT_FALLBACK;
 	struct iomap iomap = { 0 };
-	pgoff_t max_pgoff, pgoff;
+	pgoff_t max_pgoff;
 	void *entry;
 	loff_t pos;
 	int error;
@@ -1525,7 +1365,6 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	 * supposed to hold locks serializing us with truncate / punch hole so
 	 * this is a reliable test.
 	 */
-	pgoff = linear_page_index(vma, pmd_addr);
 	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
 	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
@@ -1550,24 +1389,21 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
 		goto fallback;
 
-	if (pgoff >= max_pgoff) {
-		result = VM_FAULT_SIGBUS;
-		goto out;
-	}
-
 	/* If the PMD would extend beyond the file size */
-	if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
+	if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
 		goto fallback;
 
 	/*
-	 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
-	 * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
-	 * is already in the tree, for instance), it will return -EEXIST and
-	 * we just fall back to 4k entries.
+	 * grab_mapping_entry() will make sure we get a 2MiB empty entry,
+	 * a 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k
+	 * page is already in the tree, for instance), it will return
+	 * VM_FAULT_FALLBACK.
 	 */
-	entry = grab_mapping_entry(mapping, pgoff, DAX_PMD);
-	if (IS_ERR(entry))
+	entry = grab_mapping_entry(&xas, mapping);
+	if (xa_is_internal(entry)) {
+		result = xa_to_internal(entry);
 		goto fallback;
+	}
 
 	/*
 	 * It is possible, particularly with mixed reads & writes to private
@@ -1586,7 +1422,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	 * setting up a mapping, so really we're using iomap_begin() as a way
 	 * to look up our filesystem block.
 	 */
-	pos = (loff_t)pgoff << PAGE_SHIFT;
+	pos = (loff_t)xas.xa_index << PAGE_SHIFT;
 	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
 	if (error)
 		goto unlock_entry;
@@ -1602,7 +1438,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		if (error < 0)
 			goto finish_iomap;
 
-		entry = dax_insert_entry(mapping, vmf, entry, pfn,
+		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
 						DAX_PMD, write && !sync);
 
 		/*
@@ -1627,7 +1463,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	case IOMAP_HOLE:
 		if (WARN_ON_ONCE(write))
 			break;
-		result = dax_pmd_load_hole(vmf, &iomap, entry);
+		result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
 		break;
 	default:
 		WARN_ON_ONCE(1);
@@ -1650,13 +1486,12 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 				&iomap);
 	}
  unlock_entry:
-	put_locked_mapping_entry(mapping, pgoff);
+	put_locked_entry(&xas, entry);
  fallback:
 	if (result == VM_FAULT_FALLBACK) {
 		split_huge_pmd(vma, vmf->pmd, vmf->address);
 		count_vm_event(THP_FAULT_FALLBACK);
 	}
-out:
 	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
 	return result;
 }
-- 
2.17.1

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <willy@infradead.org>
To: linux-mm@kvack.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org
Cc: linux-nilfs@vger.kernel.org, Jan Kara <jack@suse.cz>,
	Jeff Layton <jlayton@redhat.com>,
	Matthew Wilcox <mawilcox@microsoft.com>,
	Jaegeuk Kim <jaegeuk@kernel.org>,
	Nicholas Piggin <npiggin@gmail.com>,
	linux-f2fs-devel@lists.sourceforge.net,
	Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>,
	Lukas Czerner <lczerner@redhat.com>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	Christoph Hellwig <hch@lst.de>,
	Goldwyn Rodrigues <rgoldwyn@suse.com>
Subject: [PATCH v13 67/72] dax: Convert page fault handlers to XArray
Date: Mon, 11 Jun 2018 07:06:34 -0700	[thread overview]
Message-ID: <20180611140639.17215-68-willy@infradead.org> (raw)
In-Reply-To: <20180611140639.17215-1-willy@infradead.org>

From: Matthew Wilcox <mawilcox@microsoft.com>

This is the last part of DAX to be converted to the XArray so
remove all the old helper functions.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 fs/dax.c | 391 ++++++++++++++++---------------------------------------
 1 file changed, 113 insertions(+), 278 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index f35913b8c5bb..de738e0ea456 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -93,10 +93,9 @@ static unsigned long dax_to_pfn(void *entry)
 	return xa_to_value(entry) >> DAX_SHIFT;
 }
 
-static void *dax_make_locked(unsigned long pfn, unsigned long flags)
+static void *dax_make_entry(pfn_t pfn, unsigned long flags)
 {
-	return xa_mk_value(flags | ((unsigned long)pfn << DAX_SHIFT) |
-			DAX_LOCKED);
+	return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
 }
 
 static bool dax_is_locked(void *entry)
@@ -144,10 +143,11 @@ struct wait_exceptional_entry_queue {
 	struct exceptional_entry_key key;
 };
 
-static wait_queue_head_t *dax_entry_waitqueue(struct xarray *xa,
-		pgoff_t index, void *entry, struct exceptional_entry_key *key)
+static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
+		void *entry, struct exceptional_entry_key *key)
 {
 	unsigned long hash;
+	unsigned long index = xas->xa_index;
 
 	/*
 	 * If 'entry' is a PMD, align the 'index' that we use for the wait
@@ -156,11 +156,10 @@ static wait_queue_head_t *dax_entry_waitqueue(struct xarray *xa,
 	 */
 	if (dax_is_pmd_entry(entry))
 		index &= ~PG_PMD_COLOUR;
-
-	key->xa = xa;
+	key->xa = xas->xa;
 	key->entry_start = index;
 
-	hash = hash_long((unsigned long)xa ^ index, DAX_WAIT_TABLE_BITS);
+	hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
 	return wait_table + hash;
 }
 
@@ -182,13 +181,12 @@ static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
  * The important information it's conveying is whether the entry at
  * this index used to be a PMD entry.
  */
-static void dax_wake_mapping_entry_waiter(struct xarray *xa,
-		pgoff_t index, void *entry, bool wake_all)
+static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
 {
 	struct exceptional_entry_key key;
 	wait_queue_head_t *wq;
 
-	wq = dax_entry_waitqueue(xa, index, entry, &key);
+	wq = dax_entry_waitqueue(xas, entry, &key);
 
 	/*
 	 * Checking for locked entry and prepare_to_wait_exclusive() happens
@@ -200,12 +198,6 @@ static void dax_wake_mapping_entry_waiter(struct xarray *xa,
 		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 }
 
-static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
-{
-	return dax_wake_mapping_entry_waiter(xas->xa, xas->xa_index, entry,
-								wake_all);
-}
-
 /*
  * Look up entry in page cache, wait for it to become unlocked if it
  * is a DAX entry and return it.  The caller must subsequently call
@@ -229,8 +221,7 @@ static void *get_unlocked_entry(struct xa_state *xas)
 				!dax_is_locked(entry))
 			return entry;
 
-		wq = dax_entry_waitqueue(xas->xa, xas->xa_index, entry,
-				&ewait.key);
+		wq = dax_entry_waitqueue(xas, entry, &ewait.key);
 		prepare_to_wait_exclusive(wq, &ewait.wait,
 					  TASK_UNINTERRUPTIBLE);
 		xas_unlock_irq(xas);
@@ -271,119 +262,6 @@ static void dax_lock_entry(struct xa_state *xas, void *entry)
 	xas_store(xas, xa_mk_value(v | DAX_LOCKED));
 }
 
-/*
- * Check whether the given slot is locked.  Must be called with the i_pages
- * lock held.
- */
-static inline int slot_locked(struct address_space *mapping, void **slot)
-{
-	unsigned long entry = xa_to_value(
-		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock));
-	return entry & DAX_LOCKED;
-}
-
-/*
- * Mark the given slot as locked.  Must be called with the i_pages lock held.
- */
-static inline void *lock_slot(struct address_space *mapping, void **slot)
-{
-	unsigned long v = xa_to_value(
-		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock));
-	void *entry = xa_mk_value(v | DAX_LOCKED);
-	radix_tree_replace_slot(&mapping->i_pages, slot, entry);
-	return entry;
-}
-
-/*
- * Mark the given slot as unlocked.  Must be called with the i_pages lock held.
- */
-static inline void *unlock_slot(struct address_space *mapping, void **slot)
-{
-	unsigned long v = xa_to_value(
-		radix_tree_deref_slot_protected(slot, &mapping->i_pages.xa_lock));
-	void *entry = xa_mk_value(v & ~DAX_LOCKED);
-	radix_tree_replace_slot(&mapping->i_pages, slot, entry);
-	return entry;
-}
-
-/*
- * Lookup entry in page cache, wait for it to become unlocked if it is
- * a DAX entry and return it. The caller must call
- * put_unlocked_mapping_entry() when he decided not to lock the entry or
- * put_locked_mapping_entry() when he locked the entry and now wants to
- * unlock it.
- *
- * Must be called with the i_pages lock held.
- */
-static void *get_unlocked_mapping_entry(struct address_space *mapping,
-					pgoff_t index, void ***slotp)
-{
-	void *entry, **slot;
-	struct wait_exceptional_entry_queue ewait;
-	wait_queue_head_t *wq;
-
-	init_wait(&ewait.wait);
-	ewait.wait.func = wake_exceptional_entry_func;
-
-	for (;;) {
-		entry = __radix_tree_lookup(&mapping->i_pages, index, NULL,
-					  &slot);
-		if (!entry ||
-		    WARN_ON_ONCE(!xa_is_value(entry)) ||
-		    !slot_locked(mapping, slot)) {
-			if (slotp)
-				*slotp = slot;
-			return entry;
-		}
-
-		wq = dax_entry_waitqueue(&mapping->i_pages, index, entry,
-				&ewait.key);
-		prepare_to_wait_exclusive(wq, &ewait.wait,
-					  TASK_UNINTERRUPTIBLE);
-		xa_unlock_irq(&mapping->i_pages);
-		schedule();
-		finish_wait(wq, &ewait.wait);
-		xa_lock_irq(&mapping->i_pages);
-	}
-}
-
-static void dax_unlock_mapping_entry(struct address_space *mapping,
-				     pgoff_t index)
-{
-	void *entry, **slot;
-
-	xa_lock_irq(&mapping->i_pages);
-	entry = __radix_tree_lookup(&mapping->i_pages, index, NULL, &slot);
-	if (WARN_ON_ONCE(!entry || !xa_is_value(entry) ||
-			 !slot_locked(mapping, slot))) {
-		xa_unlock_irq(&mapping->i_pages);
-		return;
-	}
-	unlock_slot(mapping, slot);
-	xa_unlock_irq(&mapping->i_pages);
-	dax_wake_mapping_entry_waiter(&mapping->i_pages, index, entry, false);
-}
-
-static void put_locked_mapping_entry(struct address_space *mapping,
-		pgoff_t index)
-{
-	dax_unlock_mapping_entry(mapping, index);
-}
-
-/*
- * Called when we are done with page cache entry we looked up via
- * get_unlocked_mapping_entry() and which we didn't lock in the end.
- */
-static void put_unlocked_mapping_entry(struct address_space *mapping,
-				       pgoff_t index, void *entry)
-{
-	if (!entry)
-		return;
-
-	/* We have to wake up next waiter for the page cache entry lock */
-	dax_wake_mapping_entry_waiter(&mapping->i_pages, index, entry, false);
-}
-
 static unsigned long dax_entry_size(void *entry)
 {
 	if (dax_is_zero_entry(entry))
@@ -470,9 +348,9 @@ static struct page *dax_busy_page(void *entry)
  * that index, add a locked empty entry.
  *
  * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
- * either return that locked entry or will return an error.  This error will
- * happen if there are any 4k entries within the 2MiB range that we are
- * requesting.
+ * either return that locked entry or will return VM_FAULT_FALLBACK.
+ * This will happen if there are any 4k entries within the 2MiB range
+ * that we are requesting.
  *
  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
@@ -488,29 +366,31 @@ static struct page *dax_busy_page(void *entry)
  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
  * persistent memory the benefit is doubtful. We can add that later if we can
  * show it helps.
+ *
+ * On error, this function does not return an ERR_PTR.  Instead it returns
+ * a VM_FAULT code, encoded as an xarray internal entry.  The ERR_PTR values
+ * overlap with xarray value entries.
  */
-static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
-		unsigned long size_flag)
+static
+void *grab_mapping_entry(struct xa_state *xas, struct address_space *mapping)
 {
 	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
-	void *entry, **slot;
-
-restart:
-	xa_lock_irq(&mapping->i_pages);
-	entry = get_unlocked_mapping_entry(mapping, index, &slot);
-
-	if (WARN_ON_ONCE(entry && !xa_is_value(entry))) {
-		entry = ERR_PTR(-EIO);
-		goto out_unlock;
-	}
+	void *locked = dax_make_entry(pfn_to_pfn_t(0),
+						DAX_EMPTY | DAX_LOCKED);
+	void *unlocked = dax_make_entry(pfn_to_pfn_t(0), DAX_EMPTY);
+	void *entry;
 
-	if (entry) {
-		if (size_flag & DAX_PMD) {
+retry:
+	xas_lock_irq(xas);
+	xas_for_each_conflict(xas, entry) {
+		if (dax_is_locked(entry))
+			entry = get_unlocked_entry(xas);
+		if (!xa_is_value(entry))
+			goto error;
+		if (xas->xa_shift) {
 			if (dax_is_pte_entry(entry)) {
-				put_unlocked_mapping_entry(mapping, index,
-						entry);
-				entry = ERR_PTR(-EEXIST);
-				goto out_unlock;
+				put_unlocked_entry(xas, entry);
+				goto fallback;
 			}
 		} else { /* trying to grab a PTE entry */
 			if (dax_is_pmd_entry(entry) &&
@@ -519,89 +399,56 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
 				pmd_downgrade = true;
 			}
 		}
+		dax_lock_entry(xas, entry);
+		break;	/* We don't want to overwrite an existing entry */
 	}
 
-	/* No entry for given index? Make sure radix tree is big enough. */
-	if (!entry || pmd_downgrade) {
-		int err;
-
-		if (pmd_downgrade) {
-			/*
-			 * Make sure 'entry' remains valid while we drop
-			 * the i_pages lock.
-			 */
-			entry = lock_slot(mapping, slot);
-		}
+	if (!entry) {
+		xas_store(xas, locked);
+		if (xas_error(xas))
+			goto error;
+		entry = unlocked;
+		mapping->nrexceptional++;
+	}
 
-		xa_unlock_irq(&mapping->i_pages);
+	if (pmd_downgrade) {
 		/*
 		 * Besides huge zero pages the only other thing that gets
 		 * downgraded are empty entries which don't need to be
 		 * unmapped.
 		 */
-		if (pmd_downgrade && dax_is_zero_entry(entry))
-			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
+		if (dax_is_zero_entry(entry)) {
+			xas_unlock_irq(xas);
+			unmap_mapping_pages(mapping, xas->xa_index,
 							PG_PMD_NR, false);
-
-		err = radix_tree_preload(
-				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
-		if (err) {
-			if (pmd_downgrade)
-				put_locked_mapping_entry(mapping, index);
-			return ERR_PTR(err);
-		}
-		xa_lock_irq(&mapping->i_pages);
-
-		if (!entry) {
-			/*
-			 * We needed to drop the i_pages lock while calling
-			 * radix_tree_preload() and we didn't have an entry to
-			 * lock.  See if another thread inserted an entry at
-			 * our index during this time.
-			 */
-			entry = __radix_tree_lookup(&mapping->i_pages, index,
-					NULL, &slot);
-			if (entry) {
-				radix_tree_preload_end();
-				xa_unlock_irq(&mapping->i_pages);
-				goto restart;
-			}
+			xas_reset(xas);
+			xas_lock_irq(xas);
 		}
 
-		if (pmd_downgrade) {
-			dax_disassociate_entry(entry, mapping, false);
-			radix_tree_delete(&mapping->i_pages, index);
+		dax_disassociate_entry(entry, mapping, false);
+		xas_store(xas, NULL);	/* undo the PMD join */
+		xas_store(xas, locked);	/* may fail with ENOMEM */
+		dax_wake_entry(xas, entry, true);
+		if (xas_error(xas)) {
 			mapping->nrexceptional--;
-			dax_wake_mapping_entry_waiter(&mapping->i_pages,
-					index, entry, true);
-		}
-
-		entry = dax_make_locked(0, size_flag | DAX_EMPTY);
-
-		err = __radix_tree_insert(&mapping->i_pages, index,
-				dax_entry_order(entry), entry);
-		radix_tree_preload_end();
-		if (err) {
-			xa_unlock_irq(&mapping->i_pages);
-			/*
-			 * Our insertion of a DAX entry failed, most likely
-			 * because we were inserting a PMD entry and it
-			 * collided with a PTE sized entry at a different
-			 * index in the PMD range.  We haven't inserted
-			 * anything into the radix tree and have no waiters to
-			 * wake.
-			 */
-			return ERR_PTR(err);
+			goto error;
 		}
-		/* Good, we have inserted empty locked entry into the tree. */
-		mapping->nrexceptional++;
-		xa_unlock_irq(&mapping->i_pages);
-		return entry;
+		entry = unlocked;
 	}
-	entry = lock_slot(mapping, slot);
- out_unlock:
-	xa_unlock_irq(&mapping->i_pages);
+
+	xas_unlock_irq(xas);
 	return entry;
+
+fallback:
+	xas_unlock_irq(xas);
+	return xa_mk_internal(VM_FAULT_FALLBACK);
+error:
+	xas_unlock_irq(xas);
+	if (xas_nomem(xas, GFP_NOIO))
+		goto retry;
+	if (xas->xa_node == XA_ERROR(-ENOMEM))
+		return xa_mk_internal(VM_FAULT_OOM);
+	return xa_mk_internal(VM_FAULT_SIGBUS);
 }
 
 /**
@@ -760,29 +607,25 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
  * already in the tree, we will skip the insertion and just dirty the PMD as
  * appropriate.
  */
-static void *dax_insert_entry(struct address_space *mapping,
-		struct vm_fault *vmf,
+static void *dax_insert_entry(struct xa_state *xas,
+		struct address_space *mapping, struct vm_fault *vmf,
 		void *entry, pfn_t pfn_t, unsigned long flags, bool dirty)
 {
-	struct radix_tree_root *pages = &mapping->i_pages;
-	unsigned long pfn = pfn_t_to_pfn(pfn_t);
-	pgoff_t index = vmf->pgoff;
-	void *new_entry;
-
+	void *new_entry = dax_make_entry(pfn_t, flags);
 	if (dirty)
 		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
 	if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
+		unsigned long index = xas->xa_index;
 		/* we are replacing a zero page with block mapping */
 		if (dax_is_pmd_entry(entry))
-			unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
-							PG_PMD_NR, false);
+			unmap_mapping_pages(mapping, index, PG_PMD_NR, false);
 		else /* pte entry */
-			unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
+			unmap_mapping_pages(mapping, index, 1, false);
 	}
 
-	xa_lock_irq(pages);
-	new_entry = dax_make_locked(pfn, flags);
+	xas_reset(xas);
+	xas_lock_irq(xas);
 	if (dax_entry_size(entry) != dax_entry_size(new_entry)) {
 		dax_disassociate_entry(entry, mapping, false);
 		dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
@@ -797,21 +640,16 @@ static void *dax_insert_entry(struct address_space *mapping,
 		 * existing entry is a PMD, we will just leave the PMD in the
 		 * tree and dirty it if necessary.
 		 */
-		struct radix_tree_node *node;
-		void **slot;
-		void *ret;
-
-		ret = __radix_tree_lookup(pages, index, &node, &slot);
-		WARN_ON_ONCE(ret != entry);
-		__radix_tree_replace(pages, node, slot,
-				     new_entry, NULL);
+		dax_lock_entry(xas, new_entry);
 		entry = new_entry;
 	}
 
-	if (dirty)
-		radix_tree_tag_set(pages, index, PAGECACHE_TAG_DIRTY);
+	if (dirty) {
+		xas_load(xas);	/* Walk the xa_state */
+		xas_set_tag(xas, PAGECACHE_TAG_DIRTY);
+	}
 
-	xa_unlock_irq(pages);
+	xas_unlock_irq(xas);
 	return entry;
 }
 
@@ -1079,15 +917,16 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
  * If this page is ever written to we will re-fault and change the mapping to
  * point to real DAX storage instead.
  */
-static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
-			 struct vm_fault *vmf)
+static vm_fault_t dax_load_hole(struct xa_state *xas,
+		struct address_space *mapping, void **entry,
+		struct vm_fault *vmf)
 {
 	struct inode *inode = mapping->host;
 	unsigned long vaddr = vmf->address;
 	pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
 	vm_fault_t ret;
 
-	dax_insert_entry(mapping, vmf, entry, pfn,
+	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
 			DAX_ZERO_PAGE, false);
 
 	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
@@ -1300,6 +1139,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 {
 	struct vm_area_struct *vma = vmf->vma;
 	struct address_space *mapping = vma->vm_file->f_mapping;
+	XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
 	struct inode *inode = mapping->host;
 	unsigned long vaddr = vmf->address;
 	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
@@ -1326,9 +1166,9 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	if (write && !vmf->cow_page)
 		flags |= IOMAP_WRITE;
 
-	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
-	if (IS_ERR(entry)) {
-		ret = dax_fault_return(PTR_ERR(entry));
+	entry = grab_mapping_entry(&xas, mapping);
+	if (xa_is_internal(entry)) {
+		ret = xa_to_internal(entry);
 		goto out;
 	}
 
@@ -1401,7 +1241,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		if (error < 0)
 			goto error_finish_iomap;
 
-		entry = dax_insert_entry(mapping, vmf, entry, pfn,
+		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
 						 0, write && !sync);
 
 		/*
@@ -1429,7 +1269,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	case IOMAP_UNWRITTEN:
 	case IOMAP_HOLE:
 		if (!write) {
-			ret = dax_load_hole(mapping, entry, vmf);
+			ret = dax_load_hole(&xas, mapping, &entry, vmf);
 			goto finish_iomap;
 		}
 		/*FALLTHRU*/
@@ -1456,21 +1296,20 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
 	}
  unlock_entry:
-	put_locked_mapping_entry(mapping, vmf->pgoff);
+	put_locked_entry(&xas, entry);
  out:
 	trace_dax_pte_fault_done(inode, vmf, ret);
 	return ret | major;
 }
 
 #ifdef CONFIG_FS_DAX_PMD
-static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
-		void *entry)
+static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
+		struct iomap *iomap, void **entry)
 {
 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 	unsigned long pmd_addr = vmf->address & PMD_MASK;
 	struct inode *inode = mapping->host;
 	struct page *zero_page;
-	void *ret = NULL;
 	spinlock_t *ptl;
 	pmd_t pmd_entry;
 	pfn_t pfn;
@@ -1481,7 +1320,7 @@ static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 		goto fallback;
 
 	pfn = page_to_pfn_t(zero_page);
-	ret = dax_insert_entry(mapping, vmf, entry, pfn,
+	*entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
 			DAX_PMD | DAX_ZERO_PAGE, false);
 
 	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
@@ -1494,11 +1333,11 @@ static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 	pmd_entry = pmd_mkhuge(pmd_entry);
 	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
 	spin_unlock(ptl);
-	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
+	trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
 	return VM_FAULT_NOPAGE;
 
 fallback:
-	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
+	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
 	return VM_FAULT_FALLBACK;
 }
 
@@ -1507,6 +1346,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 {
 	struct vm_area_struct *vma = vmf->vma;
 	struct address_space *mapping = vma->vm_file->f_mapping;
+	XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
 	unsigned long pmd_addr = vmf->address & PMD_MASK;
 	bool write = vmf->flags & FAULT_FLAG_WRITE;
 	bool sync;
@@ -1514,7 +1354,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	struct inode *inode = mapping->host;
 	vm_fault_t result = VM_FAULT_FALLBACK;
 	struct iomap iomap = { 0 };
-	pgoff_t max_pgoff, pgoff;
+	pgoff_t max_pgoff;
 	void *entry;
 	loff_t pos;
 	int error;
@@ -1525,7 +1365,6 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	 * supposed to hold locks serializing us with truncate / punch hole so
 	 * this is a reliable test.
 	 */
-	pgoff = linear_page_index(vma, pmd_addr);
 	max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
 
 	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
@@ -1550,24 +1389,21 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
 		goto fallback;
 
-	if (pgoff >= max_pgoff) {
-		result = VM_FAULT_SIGBUS;
-		goto out;
-	}
-
 	/* If the PMD would extend beyond the file size */
-	if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
+	if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
 		goto fallback;
 
 	/*
-	 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
-	 * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
-	 * is already in the tree, for instance), it will return -EEXIST and
-	 * we just fall back to 4k entries.
+	 * grab_mapping_entry() will make sure we get a 2MiB empty entry,
+	 * a 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k
+	 * page is already in the tree, for instance), it will return
+	 * VM_FAULT_FALLBACK.
 	 */
-	entry = grab_mapping_entry(mapping, pgoff, DAX_PMD);
-	if (IS_ERR(entry))
+	entry = grab_mapping_entry(&xas, mapping);
+	if (xa_is_internal(entry)) {
+		result = xa_to_internal(entry);
 		goto fallback;
+	}
 
 	/*
 	 * It is possible, particularly with mixed reads & writes to private
@@ -1586,7 +1422,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	 * setting up a mapping, so really we're using iomap_begin() as a way
 	 * to look up our filesystem block.
 	 */
-	pos = (loff_t)pgoff << PAGE_SHIFT;
+	pos = (loff_t)xas.xa_index << PAGE_SHIFT;
 	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
 	if (error)
 		goto unlock_entry;
@@ -1602,7 +1438,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		if (error < 0)
 			goto finish_iomap;
 
-		entry = dax_insert_entry(mapping, vmf, entry, pfn,
+		entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
 						DAX_PMD, write && !sync);
 
 		/*
@@ -1627,7 +1463,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	case IOMAP_HOLE:
 		if (WARN_ON_ONCE(write))
 			break;
-		result = dax_pmd_load_hole(vmf, &iomap, entry);
+		result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
 		break;
 	default:
 		WARN_ON_ONCE(1);
@@ -1650,13 +1486,12 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 				&iomap);
 	}
  unlock_entry:
-	put_locked_mapping_entry(mapping, pgoff);
+	put_locked_entry(&xas, entry);
  fallback:
 	if (result == VM_FAULT_FALLBACK) {
 		split_huge_pmd(vma, vmf->pmd, vmf->address);
 		count_vm_event(THP_FAULT_FALLBACK);
 	}
-out:
 	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
 	return result;
 }
-- 
2.17.1


------------------------------------------------------------------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot

  parent reply	other threads:[~2018-06-11 14:10 UTC|newest]

Thread overview: 142+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-11 14:05 [PATCH v13 00/72] Convert page cache to XArray Matthew Wilcox
2018-06-11 14:05 ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 01/72] radix tree test suite: Enable ubsan Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 02/72] dax: Fix use of zero page Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 03/72] xarray: Replace exceptional entries Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 04/72] xarray: Change definition of sibling entries Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 05/72] xarray: Add definition of struct xarray Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 06/72] xarray: Define struct xa_node Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 07/72] xarray: Add documentation Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 08/72] xarray: Add xa_load Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 09/72] xarray: Add XArray tags Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 10/72] xarray: Add xa_store Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 11/72] xarray: Add xa_cmpxchg and xa_insert Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 12/72] xarray: Add xa_for_each Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 13/72] xarray: Add xa_extract Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 14/72] xarray: Add xa_destroy Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 15/72] xarray: Add xas_next and xas_prev Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 16/72] xarray: Add xas_for_each_conflict Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 17/72] xarray: Add xas_create_range Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 18/72] xarray: Add MAINTAINERS entry Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 19/72] page cache: Rearrange address_space Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 20/72] page cache: Convert hole search to XArray Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 21/72] page cache: Add and replace pages using the XArray Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 22/72] page cache: Convert page deletion to XArray Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 23/72] page cache: Convert find_get_entry " Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 24/72] page cache: Convert find_get_entries " Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 25/72] page cache: Convert find_get_pages_range " Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 26/72] page cache: Convert find_get_pages_contig " Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 27/72] page cache; Convert find_get_pages_range_tag " Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 28/72] page cache: Convert find_get_entries_tag " Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 29/72] page cache: Convert filemap_map_pages " Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 30/72] radix tree test suite: Convert regression1 " Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 31/72] page cache: Convert delete_batch " Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:05 ` [PATCH v13 32/72] page cache: Remove stray radix comment Matthew Wilcox
2018-06-11 14:05   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 33/72] page cache: Convert filemap_range_has_page to XArray Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 34/72] mm: Convert page-writeback " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 35/72] mm: Convert workingset " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 36/72] mm: Convert truncate " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 37/72] mm: Convert add_to_swap_cache " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 38/72] mm: Convert delete_from_swap_cache " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 39/72] mm: Convert __do_page_cache_readahead " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 40/72] mm: Convert page migration " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 41/72] mm: Convert huge_memory " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 42/72] mm: Convert collapse_shmem " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 43/72] mm: Convert khugepaged_scan_shmem " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 44/72] mm: Convert is_page_cache_freeable " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 45/72] pagevec: Use xa_tag_t Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 46/72] shmem: Convert shmem_radix_tree_replace to XArray Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 47/72] shmem: Convert shmem_confirm_swap " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 48/72] shmem: Convert find_swap_entry " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 49/72] shmem: Convert shmem_add_to_page_cache " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 50/72] shmem: Convert shmem_alloc_hugepage " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 51/72] shmem: Convert shmem_free_swap " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 52/72] shmem: Convert shmem_partial_swap_usage " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 53/72] memfd: Convert memfd_wait_for_pins " Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 54/72] memfd: Convert memfd_tag_pins " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 55/72] shmem: Comment fixups Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 56/72] btrfs: Convert page cache to XArray Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 57/72] fs: Convert buffer " Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 58/72] fs: Convert writeback " Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 59/72] nilfs2: Convert " Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 60/72] f2fs: " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 61/72] dax: Rename some functions Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 62/72] dax: Hash on XArray instead of mapping Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 63/72] dax: Convert dax_insert_pfn_mkwrite to XArray Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 64/72] dax: Convert dax_layout_busy_page " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 65/72] dax: Convert __dax_invalidate_entry " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 66/72] dax: Convert dax writeback " Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` Matthew Wilcox [this message]
2018-06-11 14:06   ` [PATCH v13 67/72] dax: Convert page fault handlers " Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 68/72] page cache: Finish XArray conversion Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 69/72] radix tree: Remove radix_tree_update_node_t Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 70/72] radix tree: Remove split/join code Matthew Wilcox
2018-06-11 14:06   ` Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 71/72] radix tree: Remove radix_tree_maybe_preload_order Matthew Wilcox
2018-06-11 14:06 ` [PATCH v13 72/72] radix tree: Remove radix_tree_clear_tags Matthew Wilcox
2018-06-12 10:40 ` [PATCH v13 00/72] Convert page cache to XArray David Sterba
2018-06-12 11:31   ` Matthew Wilcox
2018-06-12 19:37     ` Ross Zwisler
2018-06-12 19:46       ` Matthew Wilcox
2018-06-13 20:10         ` Ross Zwisler
2018-06-16 12:07           ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180611140639.17215-68-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=hch@lst.de \
    --cc=jack@suse.cz \
    --cc=jaegeuk@kernel.org \
    --cc=jlayton@redhat.com \
    --cc=konishi.ryusuke@lab.ntt.co.jp \
    --cc=lczerner@redhat.com \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nilfs@vger.kernel.org \
    --cc=mawilcox@microsoft.com \
    --cc=npiggin@gmail.com \
    --cc=rgoldwyn@suse.com \
    --cc=ross.zwisler@linux.intel.com \
    --cc=yuchao0@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.