All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Kara <jack@suse.cz>
To: linux-mm@kvack.org
Cc: Jan Kara <jack@suse.cz>,
	linux-nvdimm@lists.01.org, linux-fsdevel@vger.kernel.org,
	Andrew Morton <akpm@linux-foundation.org>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCH 05/21] mm: Trim __do_fault() arguments
Date: Fri,  4 Nov 2016 05:25:01 +0100	[thread overview]
Message-ID: <1478233517-3571-6-git-send-email-jack@suse.cz> (raw)
In-Reply-To: <1478233517-3571-1-git-send-email-jack@suse.cz>

Use vm_fault structure to pass cow_page, page, and entry in and out of
the function. That reduces number of __do_fault() arguments from 4 to 1.

Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
---
 mm/memory.c | 53 +++++++++++++++++++++++------------------------------
 1 file changed, 23 insertions(+), 30 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 8145dadb2645..f5ef7b8a30c5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2848,26 +2848,22 @@ static int do_anonymous_page(struct vm_fault *vmf)
  * released depending on flags and vma->vm_ops->fault() return value.
  * See filemap_fault() and __lock_page_retry().
  */
-static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
-		      struct page **page, void **entry)
+static int __do_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	int ret;
 
-	vmf->cow_page = cow_page;
-
 	ret = vma->vm_ops->fault(vma, vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
-	if (ret & VM_FAULT_DAX_LOCKED) {
-		*entry = vmf->entry;
+	if (ret & VM_FAULT_DAX_LOCKED)
 		return ret;
-	}
 
 	if (unlikely(PageHWPoison(vmf->page))) {
 		if (ret & VM_FAULT_LOCKED)
 			unlock_page(vmf->page);
 		put_page(vmf->page);
+		vmf->page = NULL;
 		return VM_FAULT_HWPOISON;
 	}
 
@@ -2876,7 +2872,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
 	else
 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
 
-	*page = vmf->page;
 	return ret;
 }
 
@@ -3173,7 +3168,6 @@ static int do_fault_around(struct vm_fault *vmf)
 static int do_read_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page;
 	int ret = 0;
 
 	/*
@@ -3187,24 +3181,23 @@ static int do_read_fault(struct vm_fault *vmf)
 			return ret;
 	}
 
-	ret = __do_fault(vmf, NULL, &fault_page, NULL);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
-	ret |= alloc_set_pte(vmf, NULL, fault_page);
+	ret |= alloc_set_pte(vmf, NULL, vmf->page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
-	unlock_page(fault_page);
+	unlock_page(vmf->page);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
-		put_page(fault_page);
+		put_page(vmf->page);
 	return ret;
 }
 
 static int do_cow_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page, *new_page;
-	void *fault_entry;
+	struct page *new_page;
 	struct mem_cgroup *memcg;
 	int ret;
 
@@ -3221,20 +3214,21 @@ static int do_cow_fault(struct vm_fault *vmf)
 		return VM_FAULT_OOM;
 	}
 
-	ret = __do_fault(vmf, new_page, &fault_page, &fault_entry);
+	vmf->cow_page = new_page;
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		goto uncharge_out;
 
 	if (!(ret & VM_FAULT_DAX_LOCKED))
-		copy_user_highpage(new_page, fault_page, vmf->address, vma);
+		copy_user_highpage(new_page, vmf->page, vmf->address, vma);
 	__SetPageUptodate(new_page);
 
 	ret |= alloc_set_pte(vmf, memcg, new_page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 	if (!(ret & VM_FAULT_DAX_LOCKED)) {
-		unlock_page(fault_page);
-		put_page(fault_page);
+		unlock_page(vmf->page);
+		put_page(vmf->page);
 	} else {
 		dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
 	}
@@ -3250,12 +3244,11 @@ static int do_cow_fault(struct vm_fault *vmf)
 static int do_shared_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page;
 	struct address_space *mapping;
 	int dirtied = 0;
 	int ret, tmp;
 
-	ret = __do_fault(vmf, NULL, &fault_page, NULL);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
@@ -3264,26 +3257,26 @@ static int do_shared_fault(struct vm_fault *vmf)
 	 * about to become writable
 	 */
 	if (vma->vm_ops->page_mkwrite) {
-		unlock_page(fault_page);
-		tmp = do_page_mkwrite(vma, fault_page, vmf->address);
+		unlock_page(vmf->page);
+		tmp = do_page_mkwrite(vma, vmf->page, vmf->address);
 		if (unlikely(!tmp ||
 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
-			put_page(fault_page);
+			put_page(vmf->page);
 			return tmp;
 		}
 	}
 
-	ret |= alloc_set_pte(vmf, NULL, fault_page);
+	ret |= alloc_set_pte(vmf, NULL, vmf->page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
 					VM_FAULT_RETRY))) {
-		unlock_page(fault_page);
-		put_page(fault_page);
+		unlock_page(vmf->page);
+		put_page(vmf->page);
 		return ret;
 	}
 
-	if (set_page_dirty(fault_page))
+	if (set_page_dirty(vmf->page))
 		dirtied = 1;
 	/*
 	 * Take a local copy of the address_space - page.mapping may be zeroed
@@ -3291,8 +3284,8 @@ static int do_shared_fault(struct vm_fault *vmf)
 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
 	 * release semantics to prevent the compiler from undoing this copying.
 	 */
-	mapping = page_rmapping(fault_page);
-	unlock_page(fault_page);
+	mapping = page_rmapping(vmf->page);
+	unlock_page(vmf->page);
 	if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
 		/*
 		 * Some device drivers do not set page.mapping but still
-- 
2.6.6

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Jan Kara <jack@suse.cz>
To: <linux-mm@kvack.org>
Cc: <linux-fsdevel@vger.kernel.org>,
	linux-nvdimm@lists.01.org,
	Andrew Morton <akpm@linux-foundation.org>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	Jan Kara <jack@suse.cz>
Subject: [PATCH 05/21] mm: Trim __do_fault() arguments
Date: Fri,  4 Nov 2016 05:25:01 +0100	[thread overview]
Message-ID: <1478233517-3571-6-git-send-email-jack@suse.cz> (raw)
In-Reply-To: <1478233517-3571-1-git-send-email-jack@suse.cz>

Use vm_fault structure to pass cow_page, page, and entry in and out of
the function. That reduces number of __do_fault() arguments from 4 to 1.

Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
---
 mm/memory.c | 53 +++++++++++++++++++++++------------------------------
 1 file changed, 23 insertions(+), 30 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 8145dadb2645..f5ef7b8a30c5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2848,26 +2848,22 @@ static int do_anonymous_page(struct vm_fault *vmf)
  * released depending on flags and vma->vm_ops->fault() return value.
  * See filemap_fault() and __lock_page_retry().
  */
-static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
-		      struct page **page, void **entry)
+static int __do_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	int ret;
 
-	vmf->cow_page = cow_page;
-
 	ret = vma->vm_ops->fault(vma, vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
-	if (ret & VM_FAULT_DAX_LOCKED) {
-		*entry = vmf->entry;
+	if (ret & VM_FAULT_DAX_LOCKED)
 		return ret;
-	}
 
 	if (unlikely(PageHWPoison(vmf->page))) {
 		if (ret & VM_FAULT_LOCKED)
 			unlock_page(vmf->page);
 		put_page(vmf->page);
+		vmf->page = NULL;
 		return VM_FAULT_HWPOISON;
 	}
 
@@ -2876,7 +2872,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
 	else
 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
 
-	*page = vmf->page;
 	return ret;
 }
 
@@ -3173,7 +3168,6 @@ static int do_fault_around(struct vm_fault *vmf)
 static int do_read_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page;
 	int ret = 0;
 
 	/*
@@ -3187,24 +3181,23 @@ static int do_read_fault(struct vm_fault *vmf)
 			return ret;
 	}
 
-	ret = __do_fault(vmf, NULL, &fault_page, NULL);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
-	ret |= alloc_set_pte(vmf, NULL, fault_page);
+	ret |= alloc_set_pte(vmf, NULL, vmf->page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
-	unlock_page(fault_page);
+	unlock_page(vmf->page);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
-		put_page(fault_page);
+		put_page(vmf->page);
 	return ret;
 }
 
 static int do_cow_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page, *new_page;
-	void *fault_entry;
+	struct page *new_page;
 	struct mem_cgroup *memcg;
 	int ret;
 
@@ -3221,20 +3214,21 @@ static int do_cow_fault(struct vm_fault *vmf)
 		return VM_FAULT_OOM;
 	}
 
-	ret = __do_fault(vmf, new_page, &fault_page, &fault_entry);
+	vmf->cow_page = new_page;
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		goto uncharge_out;
 
 	if (!(ret & VM_FAULT_DAX_LOCKED))
-		copy_user_highpage(new_page, fault_page, vmf->address, vma);
+		copy_user_highpage(new_page, vmf->page, vmf->address, vma);
 	__SetPageUptodate(new_page);
 
 	ret |= alloc_set_pte(vmf, memcg, new_page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 	if (!(ret & VM_FAULT_DAX_LOCKED)) {
-		unlock_page(fault_page);
-		put_page(fault_page);
+		unlock_page(vmf->page);
+		put_page(vmf->page);
 	} else {
 		dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
 	}
@@ -3250,12 +3244,11 @@ static int do_cow_fault(struct vm_fault *vmf)
 static int do_shared_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page;
 	struct address_space *mapping;
 	int dirtied = 0;
 	int ret, tmp;
 
-	ret = __do_fault(vmf, NULL, &fault_page, NULL);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
@@ -3264,26 +3257,26 @@ static int do_shared_fault(struct vm_fault *vmf)
 	 * about to become writable
 	 */
 	if (vma->vm_ops->page_mkwrite) {
-		unlock_page(fault_page);
-		tmp = do_page_mkwrite(vma, fault_page, vmf->address);
+		unlock_page(vmf->page);
+		tmp = do_page_mkwrite(vma, vmf->page, vmf->address);
 		if (unlikely(!tmp ||
 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
-			put_page(fault_page);
+			put_page(vmf->page);
 			return tmp;
 		}
 	}
 
-	ret |= alloc_set_pte(vmf, NULL, fault_page);
+	ret |= alloc_set_pte(vmf, NULL, vmf->page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
 					VM_FAULT_RETRY))) {
-		unlock_page(fault_page);
-		put_page(fault_page);
+		unlock_page(vmf->page);
+		put_page(vmf->page);
 		return ret;
 	}
 
-	if (set_page_dirty(fault_page))
+	if (set_page_dirty(vmf->page))
 		dirtied = 1;
 	/*
 	 * Take a local copy of the address_space - page.mapping may be zeroed
@@ -3291,8 +3284,8 @@ static int do_shared_fault(struct vm_fault *vmf)
 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
 	 * release semantics to prevent the compiler from undoing this copying.
 	 */
-	mapping = page_rmapping(fault_page);
-	unlock_page(fault_page);
+	mapping = page_rmapping(vmf->page);
+	unlock_page(vmf->page);
 	if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
 		/*
 		 * Some device drivers do not set page.mapping but still
-- 
2.6.6

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: Jan Kara <jack@suse.cz>
To: linux-mm@kvack.org
Cc: linux-fsdevel@vger.kernel.org, linux-nvdimm@lists.01.org,
	Andrew Morton <akpm@linux-foundation.org>,
	Ross Zwisler <ross.zwisler@linux.intel.com>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	Jan Kara <jack@suse.cz>
Subject: [PATCH 05/21] mm: Trim __do_fault() arguments
Date: Fri,  4 Nov 2016 05:25:01 +0100	[thread overview]
Message-ID: <1478233517-3571-6-git-send-email-jack@suse.cz> (raw)
In-Reply-To: <1478233517-3571-1-git-send-email-jack@suse.cz>

Use vm_fault structure to pass cow_page, page, and entry in and out of
the function. That reduces number of __do_fault() arguments from 4 to 1.

Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Jan Kara <jack@suse.cz>
---
 mm/memory.c | 53 +++++++++++++++++++++++------------------------------
 1 file changed, 23 insertions(+), 30 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 8145dadb2645..f5ef7b8a30c5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2848,26 +2848,22 @@ static int do_anonymous_page(struct vm_fault *vmf)
  * released depending on flags and vma->vm_ops->fault() return value.
  * See filemap_fault() and __lock_page_retry().
  */
-static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
-		      struct page **page, void **entry)
+static int __do_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
 	int ret;
 
-	vmf->cow_page = cow_page;
-
 	ret = vma->vm_ops->fault(vma, vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
-	if (ret & VM_FAULT_DAX_LOCKED) {
-		*entry = vmf->entry;
+	if (ret & VM_FAULT_DAX_LOCKED)
 		return ret;
-	}
 
 	if (unlikely(PageHWPoison(vmf->page))) {
 		if (ret & VM_FAULT_LOCKED)
 			unlock_page(vmf->page);
 		put_page(vmf->page);
+		vmf->page = NULL;
 		return VM_FAULT_HWPOISON;
 	}
 
@@ -2876,7 +2872,6 @@ static int __do_fault(struct vm_fault *vmf, struct page *cow_page,
 	else
 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
 
-	*page = vmf->page;
 	return ret;
 }
 
@@ -3173,7 +3168,6 @@ static int do_fault_around(struct vm_fault *vmf)
 static int do_read_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page;
 	int ret = 0;
 
 	/*
@@ -3187,24 +3181,23 @@ static int do_read_fault(struct vm_fault *vmf)
 			return ret;
 	}
 
-	ret = __do_fault(vmf, NULL, &fault_page, NULL);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
-	ret |= alloc_set_pte(vmf, NULL, fault_page);
+	ret |= alloc_set_pte(vmf, NULL, vmf->page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
-	unlock_page(fault_page);
+	unlock_page(vmf->page);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
-		put_page(fault_page);
+		put_page(vmf->page);
 	return ret;
 }
 
 static int do_cow_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page, *new_page;
-	void *fault_entry;
+	struct page *new_page;
 	struct mem_cgroup *memcg;
 	int ret;
 
@@ -3221,20 +3214,21 @@ static int do_cow_fault(struct vm_fault *vmf)
 		return VM_FAULT_OOM;
 	}
 
-	ret = __do_fault(vmf, new_page, &fault_page, &fault_entry);
+	vmf->cow_page = new_page;
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		goto uncharge_out;
 
 	if (!(ret & VM_FAULT_DAX_LOCKED))
-		copy_user_highpage(new_page, fault_page, vmf->address, vma);
+		copy_user_highpage(new_page, vmf->page, vmf->address, vma);
 	__SetPageUptodate(new_page);
 
 	ret |= alloc_set_pte(vmf, memcg, new_page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 	if (!(ret & VM_FAULT_DAX_LOCKED)) {
-		unlock_page(fault_page);
-		put_page(fault_page);
+		unlock_page(vmf->page);
+		put_page(vmf->page);
 	} else {
 		dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
 	}
@@ -3250,12 +3244,11 @@ static int do_cow_fault(struct vm_fault *vmf)
 static int do_shared_fault(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *fault_page;
 	struct address_space *mapping;
 	int dirtied = 0;
 	int ret, tmp;
 
-	ret = __do_fault(vmf, NULL, &fault_page, NULL);
+	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
 
@@ -3264,26 +3257,26 @@ static int do_shared_fault(struct vm_fault *vmf)
 	 * about to become writable
 	 */
 	if (vma->vm_ops->page_mkwrite) {
-		unlock_page(fault_page);
-		tmp = do_page_mkwrite(vma, fault_page, vmf->address);
+		unlock_page(vmf->page);
+		tmp = do_page_mkwrite(vma, vmf->page, vmf->address);
 		if (unlikely(!tmp ||
 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
-			put_page(fault_page);
+			put_page(vmf->page);
 			return tmp;
 		}
 	}
 
-	ret |= alloc_set_pte(vmf, NULL, fault_page);
+	ret |= alloc_set_pte(vmf, NULL, vmf->page);
 	if (vmf->pte)
 		pte_unmap_unlock(vmf->pte, vmf->ptl);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
 					VM_FAULT_RETRY))) {
-		unlock_page(fault_page);
-		put_page(fault_page);
+		unlock_page(vmf->page);
+		put_page(vmf->page);
 		return ret;
 	}
 
-	if (set_page_dirty(fault_page))
+	if (set_page_dirty(vmf->page))
 		dirtied = 1;
 	/*
 	 * Take a local copy of the address_space - page.mapping may be zeroed
@@ -3291,8 +3284,8 @@ static int do_shared_fault(struct vm_fault *vmf)
 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
 	 * release semantics to prevent the compiler from undoing this copying.
 	 */
-	mapping = page_rmapping(fault_page);
-	unlock_page(fault_page);
+	mapping = page_rmapping(vmf->page);
+	unlock_page(vmf->page);
 	if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) {
 		/*
 		 * Some device drivers do not set page.mapping but still
-- 
2.6.6

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-11-04  4:25 UTC|newest]

Thread overview: 124+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-04  4:24 [PATCH 0/21 v4 RESEND] dax: Clear dirty bits after flushing caches Jan Kara
2016-11-04  4:24 ` Jan Kara
2016-11-04  4:24 ` Jan Kara
2016-11-04  4:24 ` [PATCH 01/21] mm: Join struct fault_env and vm_fault Jan Kara
2016-11-04  4:24   ` Jan Kara
2016-11-04  4:24   ` Jan Kara
2016-11-15 21:50   ` Kirill A. Shutemov
2016-11-15 21:50     ` Kirill A. Shutemov
2016-11-16 10:51     ` Peter Zijlstra
2016-11-16 10:51       ` Peter Zijlstra
     [not found]       ` <20161116105132.GR3142-ndre7Fmf5hadTX5a5knrm8zTDFooKrT+cvkQGrU6aU0@public.gmane.org>
2016-11-16 11:01         ` Jan Kara
2016-11-16 11:01           ` Jan Kara
     [not found]           ` <20161116110101.GE21785-4I4JzKEfoa/jFM9bn6wA6Q@public.gmane.org>
2016-11-16 17:21             ` Peter Zijlstra
2016-11-16 17:21               ` Peter Zijlstra
2016-11-17  9:07               ` Jan Kara
2016-11-16 11:13       ` Kirill A. Shutemov
2016-11-04  4:24 ` [PATCH 02/21] mm: Use vmf->address instead of of vmf->virtual_address Jan Kara
2016-11-04  4:24   ` Jan Kara
2016-11-04  4:24   ` Jan Kara
2016-11-15 21:55   ` Kirill A. Shutemov
2016-11-15 21:55     ` Kirill A. Shutemov
2016-11-16 11:05     ` Jan Kara
2016-11-16 11:05       ` Jan Kara
2016-11-16 11:32       ` Kirill A. Shutemov
2016-11-16 11:55         ` Jan Kara
2016-11-16 11:55           ` Jan Kara
2016-11-04  4:24 ` [PATCH 03/21] mm: Use pgoff in struct vm_fault instead of passing it separately Jan Kara
2016-11-04  4:24   ` Jan Kara
2016-11-04  4:24   ` Jan Kara
2016-11-15 22:01   ` Kirill A. Shutemov
2016-11-15 22:01     ` Kirill A. Shutemov
2016-11-04  4:25 ` [PATCH 04/21] mm: Use passed vm_fault structure in __do_fault() Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:05   ` Kirill A. Shutemov
2016-11-15 22:05     ` Kirill A. Shutemov
2016-11-04  4:25 ` Jan Kara [this message]
2016-11-04  4:25   ` [PATCH 05/21] mm: Trim __do_fault() arguments Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:10   ` Kirill A. Shutemov
2016-11-15 22:10     ` Kirill A. Shutemov
2016-11-16 13:12     ` Jan Kara
2016-11-04  4:25 ` [PATCH 06/21] mm: Use passed vm_fault structure for in wp_pfn_shared() Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:10   ` Kirill A. Shutemov
2016-11-15 22:10     ` Kirill A. Shutemov
2016-11-04  4:25 ` [PATCH 07/21] mm: Add orig_pte field into vm_fault Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:14   ` Kirill A. Shutemov
2016-11-15 22:14     ` Kirill A. Shutemov
2016-11-16 20:00   ` Ross Zwisler
2016-11-16 20:00     ` Ross Zwisler
2016-11-04  4:25 ` [PATCH 08/21] mm: Allow full handling of COW faults in ->fault handlers Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:20   ` Kirill A. Shutemov
2016-11-15 22:20     ` Kirill A. Shutemov
2016-11-04  4:25 ` [PATCH 09/21] mm: Factor out functionality to finish page faults Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:21   ` Kirill A. Shutemov
2016-11-15 22:21     ` Kirill A. Shutemov
2016-11-04  4:25 ` [PATCH 10/21] mm: Move handling of COW faults into DAX code Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:22   ` Kirill A. Shutemov
2016-11-15 22:22     ` Kirill A. Shutemov
2016-11-16 21:28   ` Ross Zwisler
2016-11-17  9:36     ` Jan Kara
2016-11-17  9:36       ` Jan Kara
2016-11-04  4:25 ` [PATCH 11/21] mm: Remove unnecessary vma->vm_ops check Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:28   ` Kirill A. Shutemov
2016-11-16 13:29     ` Jan Kara
2016-11-16 14:27       ` Kirill A. Shutemov
2016-11-16 14:27         ` Kirill A. Shutemov
2016-11-16 14:43         ` Jan Kara
2016-11-04  4:25 ` [PATCH 12/21] mm: Factor out common parts of write fault handling Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:30   ` Kirill A. Shutemov
2016-11-15 22:30     ` Kirill A. Shutemov
2016-11-04  4:25 ` [PATCH 13/21] mm: Pass vm_fault structure into do_page_mkwrite() Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:40   ` Kirill A. Shutemov
2016-11-15 22:40     ` Kirill A. Shutemov
2016-11-16 13:34     ` Jan Kara
2016-11-04  4:25 ` [PATCH 14/21] mm: Use vmf->page during WP faults Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:42   ` Kirill A. Shutemov
2016-11-04  4:25 ` [PATCH 15/21] mm: Move part of wp_page_reuse() into the single call site Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:44   ` Kirill A. Shutemov
2016-11-15 22:44     ` Kirill A. Shutemov
2016-11-04  4:25 ` [PATCH 16/21] mm: Provide helper for finishing mkwrite faults Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:52   ` Kirill A. Shutemov
2016-11-15 22:52     ` Kirill A. Shutemov
2016-11-16 13:39     ` Jan Kara
2016-11-04  4:25 ` [PATCH 17/21] mm: Change return values of finish_mkwrite_fault() Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-15 22:57   ` Kirill A. Shutemov
2016-11-04  4:25 ` [PATCH 18/21] mm: Export follow_pte() Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25 ` [PATCH 19/21] dax: Make cache flushing protected by entry lock Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25 ` [PATCH 20/21] dax: Protect PTE modification on WP fault by radix tree " Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25 ` [PATCH 21/21] dax: Clear dirty entry tags on cache flush Jan Kara
2016-11-04  4:25   ` Jan Kara
2016-11-04  4:25   ` Jan Kara
  -- strict thread matches above, loose matches on Subject: below --
2016-11-01 22:36 [PATCH 0/21 v4] dax: Clear dirty bits after flushing caches Jan Kara
2016-11-01 22:36 ` [PATCH 05/21] mm: Trim __do_fault() arguments Jan Kara
2016-11-01 22:36   ` Jan Kara
2016-11-01 22:36   ` Jan Kara

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1478233517-3571-6-git-send-email-jack@suse.cz \
    --to=jack@suse.cz \
    --cc=akpm@linux-foundation.org \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.