linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] fs: dax: Adding new return type vm_fault_t
@ 2018-04-21 17:14 Souptick Joarder
  2018-04-21 20:17 ` Matthew Wilcox
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: Souptick Joarder @ 2018-04-21 17:14 UTC (permalink / raw)
  To: mawilcox, ross.zwisler, viro; +Cc: linux-fsdevel, linux-kernel, linux-mm

Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

commit 1c8f422059ae ("mm: change return type to vm_fault_t")

There was an existing bug inside dax_load_hole()
if vm_insert_mixed had failed to allocate a page table,
we'd return VM_FAULT_NOPAGE instead of VM_FAULT_OOM.
With new vmf_insert_mixed() this issue is addressed.

vm_insert_mixed_mkwrite has inefficiency when it returns
an error value, driver has to convert it to vm_fault_t
type. With new vmf_insert_mixed_mkwrite() this limitation
will be addressed.

Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
---
 fs/dax.c            | 78 +++++++++++++++++++++++++----------------------------
 include/linux/dax.h |  4 +--
 2 files changed, 39 insertions(+), 43 deletions(-)

diff --git a/fs/dax.c b/fs/dax.c
index aaec72de..3483237 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -905,12 +905,12 @@ static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
  * If this page is ever written to we will re-fault and change the mapping to
  * point to real DAX storage instead.
  */
-static int dax_load_hole(struct address_space *mapping, void *entry,
+static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
 			 struct vm_fault *vmf)
 {
 	struct inode *inode = mapping->host;
 	unsigned long vaddr = vmf->address;
-	int ret = VM_FAULT_NOPAGE;
+	vm_fault_t ret = VM_FAULT_NOPAGE;
 	struct page *zero_page;
 	void *entry2;
 	pfn_t pfn;
@@ -929,7 +929,7 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
 		goto out;
 	}
 
-	vm_insert_mixed(vmf->vma, vaddr, pfn);
+	ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
 out:
 	trace_dax_load_hole(inode, vmf, ret);
 	return ret;
@@ -1112,7 +1112,7 @@ int __dax_zero_page_range(struct block_device *bdev,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_rw);
 
-static int dax_fault_return(int error)
+static vm_fault_t dax_fault_return(int error)
 {
 	if (error == 0)
 		return VM_FAULT_NOPAGE;
@@ -1132,7 +1132,7 @@ static bool dax_fault_is_synchronous(unsigned long flags,
 		&& (iomap->flags & IOMAP_F_DIRTY);
 }
 
-static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 			       int *iomap_errp, const struct iomap_ops *ops)
 {
 	struct vm_area_struct *vma = vmf->vma;
@@ -1145,18 +1145,18 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	int error, major = 0;
 	bool write = vmf->flags & FAULT_FLAG_WRITE;
 	bool sync;
-	int vmf_ret = 0;
+	vm_fault_t ret = 0;
 	void *entry;
 	pfn_t pfn;
 
-	trace_dax_pte_fault(inode, vmf, vmf_ret);
+	trace_dax_pte_fault(inode, vmf, ret);
 	/*
 	 * Check whether offset isn't beyond end of file now. Caller is supposed
 	 * to hold locks serializing us with truncate / punch hole so this is
 	 * a reliable test.
 	 */
 	if (pos >= i_size_read(inode)) {
-		vmf_ret = VM_FAULT_SIGBUS;
+		ret = VM_FAULT_SIGBUS;
 		goto out;
 	}
 
@@ -1165,7 +1165,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 
 	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
 	if (IS_ERR(entry)) {
-		vmf_ret = dax_fault_return(PTR_ERR(entry));
+		ret = dax_fault_return(PTR_ERR(entry));
 		goto out;
 	}
 
@@ -1176,7 +1176,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	 * retried.
 	 */
 	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
-		vmf_ret = VM_FAULT_NOPAGE;
+		ret = VM_FAULT_NOPAGE;
 		goto unlock_entry;
 	}
 
@@ -1189,7 +1189,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	if (iomap_errp)
 		*iomap_errp = error;
 	if (error) {
-		vmf_ret = dax_fault_return(error);
+		ret = dax_fault_return(error);
 		goto unlock_entry;
 	}
 	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
@@ -1219,9 +1219,9 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 			goto error_finish_iomap;
 
 		__SetPageUptodate(vmf->cow_page);
-		vmf_ret = finish_fault(vmf);
-		if (!vmf_ret)
-			vmf_ret = VM_FAULT_DONE_COW;
+		ret = finish_fault(vmf);
+		if (!ret)
+			ret = VM_FAULT_DONE_COW;
 		goto finish_iomap;
 	}
 
@@ -1257,23 +1257,20 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 				goto error_finish_iomap;
 			}
 			*pfnp = pfn;
-			vmf_ret = VM_FAULT_NEEDDSYNC | major;
+			ret = VM_FAULT_NEEDDSYNC | major;
 			goto finish_iomap;
 		}
 		trace_dax_insert_mapping(inode, vmf, entry);
 		if (write)
-			error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
+			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
 		else
-			error = vm_insert_mixed(vma, vaddr, pfn);
+			ret = vmf_insert_mixed(vma, vaddr, pfn);
 
-		/* -EBUSY is fine, somebody else faulted on the same PTE */
-		if (error == -EBUSY)
-			error = 0;
-		break;
+		goto finish_iomap;
 	case IOMAP_UNWRITTEN:
 	case IOMAP_HOLE:
 		if (!write) {
-			vmf_ret = dax_load_hole(mapping, entry, vmf);
+			ret = dax_load_hole(mapping, entry, vmf);
 			goto finish_iomap;
 		}
 		/*FALLTHRU*/
@@ -1284,12 +1281,12 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	}
 
  error_finish_iomap:
-	vmf_ret = dax_fault_return(error) | major;
+	ret = dax_fault_return(error) | major;
  finish_iomap:
 	if (ops->iomap_end) {
 		int copied = PAGE_SIZE;
 
-		if (vmf_ret & VM_FAULT_ERROR)
+		if (ret & VM_FAULT_ERROR)
 			copied = 0;
 		/*
 		 * The fault is done by now and there's no way back (other
@@ -1302,12 +1299,12 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
  unlock_entry:
 	put_locked_mapping_entry(mapping, vmf->pgoff);
  out:
-	trace_dax_pte_fault_done(inode, vmf, vmf_ret);
-	return vmf_ret;
+	trace_dax_pte_fault_done(inode, vmf, ret);
+	return ret;
 }
 
 #ifdef CONFIG_FS_DAX_PMD
-static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
+static vm_fault_t dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 		void *entry)
 {
 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
@@ -1348,7 +1345,7 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
 	return VM_FAULT_FALLBACK;
 }
 
-static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 			       const struct iomap_ops *ops)
 {
 	struct vm_area_struct *vma = vmf->vma;
@@ -1358,7 +1355,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	bool sync;
 	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
 	struct inode *inode = mapping->host;
-	int result = VM_FAULT_FALLBACK;
+	vm_fault_t result = VM_FAULT_FALLBACK;
 	struct iomap iomap = { 0 };
 	pgoff_t max_pgoff, pgoff;
 	void *entry;
@@ -1509,7 +1506,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 	return result;
 }
 #else
-static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
+static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
 			       const struct iomap_ops *ops)
 {
 	return VM_FAULT_FALLBACK;
@@ -1529,7 +1526,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
  * has done all the necessary locking for page fault to proceed
  * successfully.
  */
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
 		    pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
 {
 	switch (pe_size) {
@@ -1553,14 +1550,14 @@ int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
  * DAX file.  It takes care of marking corresponding radix tree entry as dirty
  * as well.
  */
-static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
+static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
 				  enum page_entry_size pe_size,
 				  pfn_t pfn)
 {
 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 	void *entry, **slot;
 	pgoff_t index = vmf->pgoff;
-	int vmf_ret, error;
+	vm_fault_t ret;
 
 	xa_lock_irq(&mapping->i_pages);
 	entry = get_unlocked_mapping_entry(mapping, index, &slot);
@@ -1579,21 +1576,20 @@ static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
 	xa_unlock_irq(&mapping->i_pages);
 	switch (pe_size) {
 	case PE_SIZE_PTE:
-		error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
-		vmf_ret = dax_fault_return(error);
+		ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
 		break;
 #ifdef CONFIG_FS_DAX_PMD
 	case PE_SIZE_PMD:
-		vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
+		ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
 			pfn, true);
 		break;
 #endif
 	default:
-		vmf_ret = VM_FAULT_FALLBACK;
+		ret = VM_FAULT_FALLBACK;
 	}
 	put_locked_mapping_entry(mapping, index);
-	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret);
-	return vmf_ret;
+	trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
+	return ret;
 }
 
 /**
@@ -1606,8 +1602,8 @@ static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
  * stored persistently on the media and handles inserting of appropriate page
  * table entry.
  */
-int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
-			  pfn_t pfn)
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
+		enum page_entry_size pe_size, pfn_t pfn)
 {
 	int err;
 	loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
diff --git a/include/linux/dax.h b/include/linux/dax.h
index f9eb22a..7fddea8 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -124,8 +124,8 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 		const struct iomap_ops *ops);
 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
 		    pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
-int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
-			  pfn_t pfn);
+vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
+		enum page_entry_size pe_size, pfn_t pfn);
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
 				      pgoff_t index);
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] fs: dax: Adding new return type vm_fault_t
  2018-04-21 17:14 [PATCH v2] fs: dax: Adding new return type vm_fault_t Souptick Joarder
@ 2018-04-21 20:17 ` Matthew Wilcox
  2018-04-23  5:28 ` kbuild test robot
  2018-04-23  7:40 ` kbuild test robot
  2 siblings, 0 replies; 5+ messages in thread
From: Matthew Wilcox @ 2018-04-21 20:17 UTC (permalink / raw)
  To: Souptick Joarder
  Cc: mawilcox, ross.zwisler, viro, linux-fsdevel, linux-kernel, linux-mm

On Sat, Apr 21, 2018 at 10:44:42PM +0530, Souptick Joarder wrote:
> @@ -1112,7 +1112,7 @@ int __dax_zero_page_range(struct block_device *bdev,
>  }
>  EXPORT_SYMBOL_GPL(dax_iomap_rw);
>  
> -static int dax_fault_return(int error)
> +static vm_fault_t dax_fault_return(int error)
>  {
>  	if (error == 0)
>  		return VM_FAULT_NOPAGE;

At some point, we'll want to get rid of dax_fault_return, but that can be
a follow-on patch after vmf_error is in.

>  		if (write)
> -			error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
> +			ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
>  		else
> -			error = vm_insert_mixed(vma, vaddr, pfn);
> +			ret = vmf_insert_mixed(vma, vaddr, pfn);
>  
> -		/* -EBUSY is fine, somebody else faulted on the same PTE */
> -		if (error == -EBUSY)
> -			error = 0;
> -		break;
> +		goto finish_iomap;

> @@ -1284,12 +1281,12 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
>  	}
>  
>   error_finish_iomap:
> -	vmf_ret = dax_fault_return(error) | major;
> +	ret = dax_fault_return(error) | major;
>   finish_iomap:

I think we lose VM_FAULT_MAJOR with this change.

I would suggest fixing this with ...

  error_finish_iomap:
-	vmf_ret = dax_fault_return(error) | major;
+	ret = dax_fault_return(error);
  finish_iomap:

[...]

  out:
-	trace_dax_pte_fault_done(inode, vmf, vmf_ret);
-	return vmf_ret;
+	trace_dax_pte_fault_done(inode, vmf, ret);
+	return ret | major;

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] fs: dax: Adding new return type vm_fault_t
  2018-04-21 17:14 [PATCH v2] fs: dax: Adding new return type vm_fault_t Souptick Joarder
  2018-04-21 20:17 ` Matthew Wilcox
@ 2018-04-23  5:28 ` kbuild test robot
  2018-04-23  6:23   ` Souptick Joarder
  2018-04-23  7:40 ` kbuild test robot
  2 siblings, 1 reply; 5+ messages in thread
From: kbuild test robot @ 2018-04-23  5:28 UTC (permalink / raw)
  To: Souptick Joarder
  Cc: kbuild-all, mawilcox, ross.zwisler, viro, linux-fsdevel,
	linux-kernel, linux-mm

[-- Attachment #1: Type: text/plain, Size: 6738 bytes --]

Hi Souptick,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v4.17-rc2 next-20180420]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Souptick-Joarder/fs-dax-Adding-new-return-type-vm_fault_t/20180423-102814
config: i386-randconfig-x006-201816 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-16) 7.3.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All errors (new ones prefixed by >>):

   fs/dax.c: In function 'dax_iomap_pte_fault':
>> fs/dax.c:1265:10: error: implicit declaration of function 'vmf_insert_mixed_mkwrite'; did you mean 'vm_insert_mixed_mkwrite'? [-Werror=implicit-function-declaration]
       ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
             ^~~~~~~~~~~~~~~~~~~~~~~~
             vm_insert_mixed_mkwrite
   cc1: some warnings being treated as errors

vim +1265 fs/dax.c

  1134	
  1135	static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
  1136				       int *iomap_errp, const struct iomap_ops *ops)
  1137	{
  1138		struct vm_area_struct *vma = vmf->vma;
  1139		struct address_space *mapping = vma->vm_file->f_mapping;
  1140		struct inode *inode = mapping->host;
  1141		unsigned long vaddr = vmf->address;
  1142		loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
  1143		struct iomap iomap = { 0 };
  1144		unsigned flags = IOMAP_FAULT;
  1145		int error, major = 0;
  1146		bool write = vmf->flags & FAULT_FLAG_WRITE;
  1147		bool sync;
  1148		vm_fault_t ret = 0;
  1149		void *entry;
  1150		pfn_t pfn;
  1151	
  1152		trace_dax_pte_fault(inode, vmf, ret);
  1153		/*
  1154		 * Check whether offset isn't beyond end of file now. Caller is supposed
  1155		 * to hold locks serializing us with truncate / punch hole so this is
  1156		 * a reliable test.
  1157		 */
  1158		if (pos >= i_size_read(inode)) {
  1159			ret = VM_FAULT_SIGBUS;
  1160			goto out;
  1161		}
  1162	
  1163		if (write && !vmf->cow_page)
  1164			flags |= IOMAP_WRITE;
  1165	
  1166		entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
  1167		if (IS_ERR(entry)) {
  1168			ret = dax_fault_return(PTR_ERR(entry));
  1169			goto out;
  1170		}
  1171	
  1172		/*
  1173		 * It is possible, particularly with mixed reads & writes to private
  1174		 * mappings, that we have raced with a PMD fault that overlaps with
  1175		 * the PTE we need to set up.  If so just return and the fault will be
  1176		 * retried.
  1177		 */
  1178		if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
  1179			ret = VM_FAULT_NOPAGE;
  1180			goto unlock_entry;
  1181		}
  1182	
  1183		/*
  1184		 * Note that we don't bother to use iomap_apply here: DAX required
  1185		 * the file system block size to be equal the page size, which means
  1186		 * that we never have to deal with more than a single extent here.
  1187		 */
  1188		error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
  1189		if (iomap_errp)
  1190			*iomap_errp = error;
  1191		if (error) {
  1192			ret = dax_fault_return(error);
  1193			goto unlock_entry;
  1194		}
  1195		if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
  1196			error = -EIO;	/* fs corruption? */
  1197			goto error_finish_iomap;
  1198		}
  1199	
  1200		if (vmf->cow_page) {
  1201			sector_t sector = dax_iomap_sector(&iomap, pos);
  1202	
  1203			switch (iomap.type) {
  1204			case IOMAP_HOLE:
  1205			case IOMAP_UNWRITTEN:
  1206				clear_user_highpage(vmf->cow_page, vaddr);
  1207				break;
  1208			case IOMAP_MAPPED:
  1209				error = copy_user_dax(iomap.bdev, iomap.dax_dev,
  1210						sector, PAGE_SIZE, vmf->cow_page, vaddr);
  1211				break;
  1212			default:
  1213				WARN_ON_ONCE(1);
  1214				error = -EIO;
  1215				break;
  1216			}
  1217	
  1218			if (error)
  1219				goto error_finish_iomap;
  1220	
  1221			__SetPageUptodate(vmf->cow_page);
  1222			ret = finish_fault(vmf);
  1223			if (!ret)
  1224				ret = VM_FAULT_DONE_COW;
  1225			goto finish_iomap;
  1226		}
  1227	
  1228		sync = dax_fault_is_synchronous(flags, vma, &iomap);
  1229	
  1230		switch (iomap.type) {
  1231		case IOMAP_MAPPED:
  1232			if (iomap.flags & IOMAP_F_NEW) {
  1233				count_vm_event(PGMAJFAULT);
  1234				count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
  1235				major = VM_FAULT_MAJOR;
  1236			}
  1237			error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
  1238			if (error < 0)
  1239				goto error_finish_iomap;
  1240	
  1241			entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
  1242							 0, write && !sync);
  1243			if (IS_ERR(entry)) {
  1244				error = PTR_ERR(entry);
  1245				goto error_finish_iomap;
  1246			}
  1247	
  1248			/*
  1249			 * If we are doing synchronous page fault and inode needs fsync,
  1250			 * we can insert PTE into page tables only after that happens.
  1251			 * Skip insertion for now and return the pfn so that caller can
  1252			 * insert it after fsync is done.
  1253			 */
  1254			if (sync) {
  1255				if (WARN_ON_ONCE(!pfnp)) {
  1256					error = -EIO;
  1257					goto error_finish_iomap;
  1258				}
  1259				*pfnp = pfn;
  1260				ret = VM_FAULT_NEEDDSYNC | major;
  1261				goto finish_iomap;
  1262			}
  1263			trace_dax_insert_mapping(inode, vmf, entry);
  1264			if (write)
> 1265				ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
  1266			else
  1267				ret = vmf_insert_mixed(vma, vaddr, pfn);
  1268	
  1269			goto finish_iomap;
  1270		case IOMAP_UNWRITTEN:
  1271		case IOMAP_HOLE:
  1272			if (!write) {
  1273				ret = dax_load_hole(mapping, entry, vmf);
  1274				goto finish_iomap;
  1275			}
  1276			/*FALLTHRU*/
  1277		default:
  1278			WARN_ON_ONCE(1);
  1279			error = -EIO;
  1280			break;
  1281		}
  1282	
  1283	 error_finish_iomap:
  1284		ret = dax_fault_return(error) | major;
  1285	 finish_iomap:
  1286		if (ops->iomap_end) {
  1287			int copied = PAGE_SIZE;
  1288	
  1289			if (ret & VM_FAULT_ERROR)
  1290				copied = 0;
  1291			/*
  1292			 * The fault is done by now and there's no way back (other
  1293			 * thread may be already happily using PTE we have installed).
  1294			 * Just ignore error from ->iomap_end since we cannot do much
  1295			 * with it.
  1296			 */
  1297			ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
  1298		}
  1299	 unlock_entry:
  1300		put_locked_mapping_entry(mapping, vmf->pgoff);
  1301	 out:
  1302		trace_dax_pte_fault_done(inode, vmf, ret);
  1303		return ret;
  1304	}
  1305	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 34169 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] fs: dax: Adding new return type vm_fault_t
  2018-04-23  5:28 ` kbuild test robot
@ 2018-04-23  6:23   ` Souptick Joarder
  0 siblings, 0 replies; 5+ messages in thread
From: Souptick Joarder @ 2018-04-23  6:23 UTC (permalink / raw)
  To: kbuild test robot
  Cc: linux-fsdevel, kbuild-all, Ross Zwisler, Al Viro, Linux-MM,
	linux-kernel, mawilcox

[-- Attachment #1: Type: text/plain, Size: 9313 bytes --]

Patch v3 is the latest one which need to be tested. Please ignore v2.

On 23-Apr-2018 10:59 AM, "kbuild test robot" <lkp@intel.com> wrote:
>
> Hi Souptick,
>
> Thank you for the patch! Yet something to improve:
>
> [auto build test ERROR on linus/master]
> [also build test ERROR on v4.17-rc2 next-20180420]
> [if your patch is applied to the wrong git tree, please drop us a note to
help improve the system]
>
> url:
https://github.com/0day-ci/linux/commits/Souptick-Joarder/fs-dax-Adding-new-return-type-vm_fault_t/20180423-102814
> config: i386-randconfig-x006-201816 (attached as .config)
> compiler: gcc-7 (Debian 7.3.0-16) 7.3.0
> reproduce:
>         # save the attached .config to linux build tree
>         make ARCH=i386
>
> All errors (new ones prefixed by >>):
>
>    fs/dax.c: In function 'dax_iomap_pte_fault':
> >> fs/dax.c:1265:10: error: implicit declaration of function
'vmf_insert_mixed_mkwrite'; did you mean 'vm_insert_mixed_mkwrite'?
[-Werror=implicit-function-declaration]
>        ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
>              ^~~~~~~~~~~~~~~~~~~~~~~~
>              vm_insert_mixed_mkwrite
>    cc1: some warnings being treated as errors
>
> vim +1265 fs/dax.c
>
>   1134
>   1135  static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t
*pfnp,
>   1136                                 int *iomap_errp, const struct
iomap_ops *ops)
>   1137  {
>   1138          struct vm_area_struct *vma = vmf->vma;
>   1139          struct address_space *mapping = vma->vm_file->f_mapping;
>   1140          struct inode *inode = mapping->host;
>   1141          unsigned long vaddr = vmf->address;
>   1142          loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
>   1143          struct iomap iomap = { 0 };
>   1144          unsigned flags = IOMAP_FAULT;
>   1145          int error, major = 0;
>   1146          bool write = vmf->flags & FAULT_FLAG_WRITE;
>   1147          bool sync;
>   1148          vm_fault_t ret = 0;
>   1149          void *entry;
>   1150          pfn_t pfn;
>   1151
>   1152          trace_dax_pte_fault(inode, vmf, ret);
>   1153          /*
>   1154           * Check whether offset isn't beyond end of file now.
Caller is supposed
>   1155           * to hold locks serializing us with truncate / punch
hole so this is
>   1156           * a reliable test.
>   1157           */
>   1158          if (pos >= i_size_read(inode)) {
>   1159                  ret = VM_FAULT_SIGBUS;
>   1160                  goto out;
>   1161          }
>   1162
>   1163          if (write && !vmf->cow_page)
>   1164                  flags |= IOMAP_WRITE;
>   1165
>   1166          entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
>   1167          if (IS_ERR(entry)) {
>   1168                  ret = dax_fault_return(PTR_ERR(entry));
>   1169                  goto out;
>   1170          }
>   1171
>   1172          /*
>   1173           * It is possible, particularly with mixed reads & writes
to private
>   1174           * mappings, that we have raced with a PMD fault that
overlaps with
>   1175           * the PTE we need to set up.  If so just return and the
fault will be
>   1176           * retried.
>   1177           */
>   1178          if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
>   1179                  ret = VM_FAULT_NOPAGE;
>   1180                  goto unlock_entry;
>   1181          }
>   1182
>   1183          /*
>   1184           * Note that we don't bother to use iomap_apply here: DAX
required
>   1185           * the file system block size to be equal the page size,
which means
>   1186           * that we never have to deal with more than a single
extent here.
>   1187           */
>   1188          error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags,
&iomap);
>   1189          if (iomap_errp)
>   1190                  *iomap_errp = error;
>   1191          if (error) {
>   1192                  ret = dax_fault_return(error);
>   1193                  goto unlock_entry;
>   1194          }
>   1195          if (WARN_ON_ONCE(iomap.offset + iomap.length < pos +
PAGE_SIZE)) {
>   1196                  error = -EIO;   /* fs corruption? */
>   1197                  goto error_finish_iomap;
>   1198          }
>   1199
>   1200          if (vmf->cow_page) {
>   1201                  sector_t sector = dax_iomap_sector(&iomap, pos);
>   1202
>   1203                  switch (iomap.type) {
>   1204                  case IOMAP_HOLE:
>   1205                  case IOMAP_UNWRITTEN:
>   1206                          clear_user_highpage(vmf->cow_page, vaddr);
>   1207                          break;
>   1208                  case IOMAP_MAPPED:
>   1209                          error = copy_user_dax(iomap.bdev,
iomap.dax_dev,
>   1210                                          sector, PAGE_SIZE,
vmf->cow_page, vaddr);
>   1211                          break;
>   1212                  default:
>   1213                          WARN_ON_ONCE(1);
>   1214                          error = -EIO;
>   1215                          break;
>   1216                  }
>   1217
>   1218                  if (error)
>   1219                          goto error_finish_iomap;
>   1220
>   1221                  __SetPageUptodate(vmf->cow_page);
>   1222                  ret = finish_fault(vmf);
>   1223                  if (!ret)
>   1224                          ret = VM_FAULT_DONE_COW;
>   1225                  goto finish_iomap;
>   1226          }
>   1227
>   1228          sync = dax_fault_is_synchronous(flags, vma, &iomap);
>   1229
>   1230          switch (iomap.type) {
>   1231          case IOMAP_MAPPED:
>   1232                  if (iomap.flags & IOMAP_F_NEW) {
>   1233                          count_vm_event(PGMAJFAULT);
>   1234                          count_memcg_event_mm(vma->vm_mm,
PGMAJFAULT);
>   1235                          major = VM_FAULT_MAJOR;
>   1236                  }
>   1237                  error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE,
&pfn);
>   1238                  if (error < 0)
>   1239                          goto error_finish_iomap;
>   1240
>   1241                  entry = dax_insert_mapping_entry(mapping, vmf,
entry, pfn,
>   1242                                                   0, write &&
!sync);
>   1243                  if (IS_ERR(entry)) {
>   1244                          error = PTR_ERR(entry);
>   1245                          goto error_finish_iomap;
>   1246                  }
>   1247
>   1248                  /*
>   1249                   * If we are doing synchronous page fault and
inode needs fsync,
>   1250                   * we can insert PTE into page tables only after
that happens.
>   1251                   * Skip insertion for now and return the pfn so
that caller can
>   1252                   * insert it after fsync is done.
>   1253                   */
>   1254                  if (sync) {
>   1255                          if (WARN_ON_ONCE(!pfnp)) {
>   1256                                  error = -EIO;
>   1257                                  goto error_finish_iomap;
>   1258                          }
>   1259                          *pfnp = pfn;
>   1260                          ret = VM_FAULT_NEEDDSYNC | major;
>   1261                          goto finish_iomap;
>   1262                  }
>   1263                  trace_dax_insert_mapping(inode, vmf, entry);
>   1264                  if (write)
> > 1265                          ret = vmf_insert_mixed_mkwrite(vma,
vaddr, pfn);
>   1266                  else
>   1267                          ret = vmf_insert_mixed(vma, vaddr, pfn);
>   1268
>   1269                  goto finish_iomap;
>   1270          case IOMAP_UNWRITTEN:
>   1271          case IOMAP_HOLE:
>   1272                  if (!write) {
>   1273                          ret = dax_load_hole(mapping, entry, vmf);
>   1274                          goto finish_iomap;
>   1275                  }
>   1276                  /*FALLTHRU*/
>   1277          default:
>   1278                  WARN_ON_ONCE(1);
>   1279                  error = -EIO;
>   1280                  break;
>   1281          }
>   1282
>   1283   error_finish_iomap:
>   1284          ret = dax_fault_return(error) | major;
>   1285   finish_iomap:
>   1286          if (ops->iomap_end) {
>   1287                  int copied = PAGE_SIZE;
>   1288
>   1289                  if (ret & VM_FAULT_ERROR)
>   1290                          copied = 0;
>   1291                  /*
>   1292                   * The fault is done by now and there's no way
back (other
>   1293                   * thread may be already happily using PTE we
have installed).
>   1294                   * Just ignore error from ->iomap_end since we
cannot do much
>   1295                   * with it.
>   1296                   */
>   1297                  ops->iomap_end(inode, pos, PAGE_SIZE, copied,
flags, &iomap);
>   1298          }
>   1299   unlock_entry:
>   1300          put_locked_mapping_entry(mapping, vmf->pgoff);
>   1301   out:
>   1302          trace_dax_pte_fault_done(inode, vmf, ret);
>   1303          return ret;
>   1304  }
>   1305
>
> ---
> 0-DAY kernel test infrastructure                Open Source Technology
Center
> https://lists.01.org/pipermail/kbuild-all                   Intel
Corporation

[-- Attachment #2: Type: text/html, Size: 12953 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] fs: dax: Adding new return type vm_fault_t
  2018-04-21 17:14 [PATCH v2] fs: dax: Adding new return type vm_fault_t Souptick Joarder
  2018-04-21 20:17 ` Matthew Wilcox
  2018-04-23  5:28 ` kbuild test robot
@ 2018-04-23  7:40 ` kbuild test robot
  2 siblings, 0 replies; 5+ messages in thread
From: kbuild test robot @ 2018-04-23  7:40 UTC (permalink / raw)
  To: Souptick Joarder
  Cc: kbuild-all, mawilcox, ross.zwisler, viro, linux-fsdevel,
	linux-kernel, linux-mm

[-- Attachment #1: Type: text/plain, Size: 6676 bytes --]

Hi Souptick,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v4.17-rc2 next-20180420]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Souptick-Joarder/fs-dax-Adding-new-return-type-vm_fault_t/20180423-102814
config: x86_64-randconfig-u0-04230854 (attached as .config)
compiler: gcc-5 (Debian 5.5.0-3) 5.4.1 20171010
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

All errors (new ones prefixed by >>):

   fs/dax.c: In function 'dax_iomap_pte_fault':
>> fs/dax.c:1265:10: error: implicit declaration of function 'vmf_insert_mixed_mkwrite' [-Werror=implicit-function-declaration]
       ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
             ^
   cc1: some warnings being treated as errors

vim +/vmf_insert_mixed_mkwrite +1265 fs/dax.c

  1134	
  1135	static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
  1136				       int *iomap_errp, const struct iomap_ops *ops)
  1137	{
  1138		struct vm_area_struct *vma = vmf->vma;
  1139		struct address_space *mapping = vma->vm_file->f_mapping;
  1140		struct inode *inode = mapping->host;
  1141		unsigned long vaddr = vmf->address;
  1142		loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
  1143		struct iomap iomap = { 0 };
  1144		unsigned flags = IOMAP_FAULT;
  1145		int error, major = 0;
  1146		bool write = vmf->flags & FAULT_FLAG_WRITE;
  1147		bool sync;
  1148		vm_fault_t ret = 0;
  1149		void *entry;
  1150		pfn_t pfn;
  1151	
  1152		trace_dax_pte_fault(inode, vmf, ret);
  1153		/*
  1154		 * Check whether offset isn't beyond end of file now. Caller is supposed
  1155		 * to hold locks serializing us with truncate / punch hole so this is
  1156		 * a reliable test.
  1157		 */
  1158		if (pos >= i_size_read(inode)) {
  1159			ret = VM_FAULT_SIGBUS;
  1160			goto out;
  1161		}
  1162	
  1163		if (write && !vmf->cow_page)
  1164			flags |= IOMAP_WRITE;
  1165	
  1166		entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
  1167		if (IS_ERR(entry)) {
  1168			ret = dax_fault_return(PTR_ERR(entry));
  1169			goto out;
  1170		}
  1171	
  1172		/*
  1173		 * It is possible, particularly with mixed reads & writes to private
  1174		 * mappings, that we have raced with a PMD fault that overlaps with
  1175		 * the PTE we need to set up.  If so just return and the fault will be
  1176		 * retried.
  1177		 */
  1178		if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
  1179			ret = VM_FAULT_NOPAGE;
  1180			goto unlock_entry;
  1181		}
  1182	
  1183		/*
  1184		 * Note that we don't bother to use iomap_apply here: DAX required
  1185		 * the file system block size to be equal the page size, which means
  1186		 * that we never have to deal with more than a single extent here.
  1187		 */
  1188		error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
  1189		if (iomap_errp)
  1190			*iomap_errp = error;
  1191		if (error) {
  1192			ret = dax_fault_return(error);
  1193			goto unlock_entry;
  1194		}
  1195		if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
  1196			error = -EIO;	/* fs corruption? */
  1197			goto error_finish_iomap;
  1198		}
  1199	
  1200		if (vmf->cow_page) {
  1201			sector_t sector = dax_iomap_sector(&iomap, pos);
  1202	
  1203			switch (iomap.type) {
  1204			case IOMAP_HOLE:
  1205			case IOMAP_UNWRITTEN:
  1206				clear_user_highpage(vmf->cow_page, vaddr);
  1207				break;
  1208			case IOMAP_MAPPED:
  1209				error = copy_user_dax(iomap.bdev, iomap.dax_dev,
  1210						sector, PAGE_SIZE, vmf->cow_page, vaddr);
  1211				break;
  1212			default:
  1213				WARN_ON_ONCE(1);
  1214				error = -EIO;
  1215				break;
  1216			}
  1217	
  1218			if (error)
  1219				goto error_finish_iomap;
  1220	
  1221			__SetPageUptodate(vmf->cow_page);
  1222			ret = finish_fault(vmf);
  1223			if (!ret)
  1224				ret = VM_FAULT_DONE_COW;
  1225			goto finish_iomap;
  1226		}
  1227	
  1228		sync = dax_fault_is_synchronous(flags, vma, &iomap);
  1229	
  1230		switch (iomap.type) {
  1231		case IOMAP_MAPPED:
  1232			if (iomap.flags & IOMAP_F_NEW) {
  1233				count_vm_event(PGMAJFAULT);
  1234				count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
  1235				major = VM_FAULT_MAJOR;
  1236			}
  1237			error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
  1238			if (error < 0)
  1239				goto error_finish_iomap;
  1240	
  1241			entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
  1242							 0, write && !sync);
  1243			if (IS_ERR(entry)) {
  1244				error = PTR_ERR(entry);
  1245				goto error_finish_iomap;
  1246			}
  1247	
  1248			/*
  1249			 * If we are doing synchronous page fault and inode needs fsync,
  1250			 * we can insert PTE into page tables only after that happens.
  1251			 * Skip insertion for now and return the pfn so that caller can
  1252			 * insert it after fsync is done.
  1253			 */
  1254			if (sync) {
  1255				if (WARN_ON_ONCE(!pfnp)) {
  1256					error = -EIO;
  1257					goto error_finish_iomap;
  1258				}
  1259				*pfnp = pfn;
  1260				ret = VM_FAULT_NEEDDSYNC | major;
  1261				goto finish_iomap;
  1262			}
  1263			trace_dax_insert_mapping(inode, vmf, entry);
  1264			if (write)
> 1265				ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
  1266			else
  1267				ret = vmf_insert_mixed(vma, vaddr, pfn);
  1268	
  1269			goto finish_iomap;
  1270		case IOMAP_UNWRITTEN:
  1271		case IOMAP_HOLE:
  1272			if (!write) {
  1273				ret = dax_load_hole(mapping, entry, vmf);
  1274				goto finish_iomap;
  1275			}
  1276			/*FALLTHRU*/
  1277		default:
  1278			WARN_ON_ONCE(1);
  1279			error = -EIO;
  1280			break;
  1281		}
  1282	
  1283	 error_finish_iomap:
  1284		ret = dax_fault_return(error) | major;
  1285	 finish_iomap:
  1286		if (ops->iomap_end) {
  1287			int copied = PAGE_SIZE;
  1288	
  1289			if (ret & VM_FAULT_ERROR)
  1290				copied = 0;
  1291			/*
  1292			 * The fault is done by now and there's no way back (other
  1293			 * thread may be already happily using PTE we have installed).
  1294			 * Just ignore error from ->iomap_end since we cannot do much
  1295			 * with it.
  1296			 */
  1297			ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
  1298		}
  1299	 unlock_entry:
  1300		put_locked_mapping_entry(mapping, vmf->pgoff);
  1301	 out:
  1302		trace_dax_pte_fault_done(inode, vmf, ret);
  1303		return ret;
  1304	}
  1305	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 34754 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2018-04-23  7:40 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-21 17:14 [PATCH v2] fs: dax: Adding new return type vm_fault_t Souptick Joarder
2018-04-21 20:17 ` Matthew Wilcox
2018-04-23  5:28 ` kbuild test robot
2018-04-23  6:23   ` Souptick Joarder
2018-04-23  7:40 ` kbuild test robot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).