All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] fs: dax: Adding new return type vm_fault_t
@ 2018-04-17 15:35 Souptick Joarder
  2018-04-17 20:26 ` Ross Zwisler
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Souptick Joarder @ 2018-04-17 15:35 UTC (permalink / raw)
  To: mawilcox, ross.zwisler, viro; +Cc: linux-fsdevel

Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

Reference id -> 1c8f422059ae ("mm: change return type to
vm_fault_t")

There was an existing bug inside dax_load_hole()
if vm_insert_mixed had failed to allocate a page table,
we'd return VM_FAULT_NOPAGE instead of VM_FAULT_OOM.
With vmf_insert_mixed this issue is addressed.

vmf_insert_mixed_mkwrite() is the new wrapper function
which will convert err returned from vm_insert_mixed_
mkwrite() to vm_fault_t type.

Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
---
 fs/dax.c            | 56 +++++++++++++++++++++++++++++++----------------------
 include/linux/dax.h |  4 ++--
 2 files changed, 35 insertions(+), 25 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index 0276df9..d7112ad 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -869,12 +869,12 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
  * If this page is ever written to we will re-fault and change the mapping to
  * point to real DAX storage instead.
  */
-static int dax_load_hole(struct address_space *mapping, void *entry,
+static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
 			 struct vm_fault *vmf)
 {
 	struct inode *inode = mapping->host;
 	unsigned long vaddr = vmf->address;
-	int ret = VM_FAULT_NOPAGE;
+	vm_fault_t ret = VM_FAULT_NOPAGE;
 	struct page *zero_page;
 	void *entry2;
 
@@ -891,7 +891,7 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
 		goto out;
 	}
 
-	vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page));
+	ret = vmf_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page));
 out:
 	trace_dax_load_hole(inode, vmf, ret);
 	return ret;
@@ -1074,7 +1074,7 @@ int __dax_zero_page_range(struct block_device *bdev,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_rw);
 
-static int dax_fault_return(int error)
+static vm_fault_t dax_fault_return(int error)
 {
 	if (error == 0)
 		return VM_FAULT_NOPAGE;
@@ -1094,6 +1094,18 @@ static bool dax_fault_is_synchronous(unsigned long flags,
 		&& (iomap->flags & IOMAP_F_DIRTY);
 }
 
+static vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
+			unsigned long addr, pfn_t pfn)
+{
+	int error;
+	vm_fault_t vmf_ret;
+
+	error = vm_insert_mixed_mkwrite(vma, addr, pfn);
+	vmf_ret = dax_fault_return(error);
+
+	return vmf_ret;
+}
+
 static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 			       int *iomap_errp, const struct iomap_ops *ops)
 {
@@ -1107,7 +1119,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	int error, major = 0;
 	bool write = vmf->flags & FAULT_FLAG_WRITE;
 	bool sync;
-	int vmf_ret = 0;
+	vm_fault_t vmf_ret = 0;
 	void *entry;
 	pfn_t pfn;
 
@@ -1225,14 +1237,12 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 		}
 		trace_dax_insert_mapping(inode, vmf, entry);
 		if (write)
-			error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
+			vmf_ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
 		else
-			error = vm_insert_mixed(vma, vaddr, pfn);
+			vmf_ret = vmf_insert_mixed(vma, vaddr, pfn);
+
+		goto finish_iomap;
 
-		/* -EBUSY is fine, somebody else faulted on the same PTE */
-		if (error == -EBUSY)
-			error = 0;
-		break;
 	case IOMAP_UNWRITTEN:
 	case IOMAP_HOLE:
 		if (!write) {
@@ -1270,7 +1280,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 }
 
 #ifdef CONFIG_FS_DAX_PMD
-static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
+static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 		void *entry)
 {
 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -1309,7 +1319,7 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 	return VM_FAULT_FALLBACK;
 }
 
-static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 			       const struct iomap_ops *ops)
 {
 	struct vm_area_struct *vma = vmf->vma;
@@ -1319,7 +1329,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	bool sync;
 	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
 	struct inode *inode = mapping->host;
-	int result = VM_FAULT_FALLBACK;
+	vm_fault_t result = VM_FAULT_FALLBACK;
 	struct iomap iomap = { 0 };
 	pgoff_t max_pgoff, pgoff;
 	void *entry;
@@ -1471,7 +1481,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	return result;
 }
 #else
-static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 			       const struct iomap_ops *ops)
 {
 	return VM_FAULT_FALLBACK;
@@ -1491,7 +1501,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
  * has done all the necessary locking for page fault to proceed
  * successfully.
  */
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
 		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
 {
 	switch (pe_size) {
@@ -1515,14 +1525,14 @@ int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
  * DAX file.  It takes care of marking corresponding radix tree entry as dirty
  * as well.
  */
-static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
+static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
 				  enum page_entry_size pe_size,
 				  pfn_t pfn)
 {
 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 	void *entry, **slot;
 	pgoff_t index = vmf->pgoff;
-	int vmf_ret, error;
+	vm_fault_t vmf_ret;
 
 	spin_lock_irq(&mapping->tree_lock);
 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
@@ -1541,8 +1551,7 @@ static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
 	spin_unlock_irq(&mapping->tree_lock);
 	switch (pe_size) {
 	case PE_SIZE_PTE:
-		error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
-		vmf_ret = dax_fault_return(error);
+		vmf_ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
 		break;
 #ifdef CONFIG_FS_DAX_PMD
 	case PE_SIZE_PMD:
@@ -1568,8 +1577,8 @@ static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
  * stored persistently on the media and handles inserting of appropriate page
  * table entry.
  */
-int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
-			  pfn_t pfn)
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
+		enum page_entry_size pe_size, pfn_t pfn)
 {
 	int err;
 	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
@@ -1581,7 +1590,8 @@ int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
 		len = PMD_SIZE;
 	else
 		WARN_ON_ONCE(1);
-	err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
+	err = vfs_fsync_range(vmf->vma->vm_file,
+			start, start + len - 1, 1);
 	if (err)
 		return VM_FAULT_SIGBUS;
 	return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 0185ecd..5b3a5ff 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -97,8 +97,8 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 		const struct iomap_ops *ops);
 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
 		    pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
-int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
-			  pfn_t pfn);
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
+		enum page_entry_size pe_size, pfn_t pfn);
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 				      pgoff_t index);
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2018-04-21 17:26 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-17 15:35 [PATCH] fs: dax: Adding new return type vm_fault_t Souptick Joarder
2018-04-17 20:26 ` Ross Zwisler
2018-04-17 21:00 ` Matthew Wilcox
2018-04-20 19:21   ` Souptick Joarder
2018-04-20 20:47     ` Matthew Wilcox
2018-04-21 17:26       ` Souptick Joarder
2018-04-18  9:02 ` kbuild test robot
2018-04-18  9:18 ` kbuild test robot
2018-04-18 11:38   ` Souptick Joarder
2018-04-18 12:13     ` Fengguang Wu
2018-04-18 15:41       ` Ross Zwisler

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.