All of lore.kernel.org
 help / color / mirror / Atom feed
From: Laurent Dufour <ldufour@linux.vnet.ibm.com>
To: akpm@linux-foundation.org, mhocko@kernel.org,
	peterz@infradead.org, kirill@shutemov.name, ak@linux.intel.com,
	dave@stgolabs.net, jack@suse.cz,
	Matthew Wilcox <willy@infradead.org>,
	khandual@linux.vnet.ibm.com, aneesh.kumar@linux.vnet.ibm.com,
	benh@kernel.crashing.org, mpe@ellerman.id.au, paulus@samba.org,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	hpa@zytor.com, Will Deacon <will.deacon@arm.com>,
	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>,
	sergey.senozhatsky.work@gmail.com,
	Andrea Arcangeli <aarcange@redhat.com>,
	Alexei Starovoitov <alexei.starovoitov@gmail.com>,
	kemi.wang@intel.com, Daniel Jordan <daniel.m.jordan@oracle.com>,
	David Rientjes <rientjes@google.com>,
	Jerome Glisse <jglisse@redhat.com>,
	Ganesh Mahendran <opensource.ganesh@gmail.com>,
	Minchan Kim <minchan@kernel.org>,
	Punit Agrawal <punitagrawal@gmail.com>,
	vinayak menon <vinayakm.list@gmail.com>,
	Yang Shi <yang.shi@linux.alibaba.com>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	haren@linux.vnet.ibm.com, npiggin@gmail.com,
	bsingharora@gmail.com, paulmck@linux.vnet.ibm.com,
	Tim Chen <tim.c.chen@linux.intel.com>,
	linuxppc-dev@lists.ozlabs.org, x86@kernel.org
Subject: [PATCH v11 05/26] mm: prepare for FAULT_FLAG_SPECULATIVE
Date: Thu, 17 May 2018 13:06:12 +0200	[thread overview]
Message-ID: <1526555193-7242-6-git-send-email-ldufour@linux.vnet.ibm.com> (raw)
In-Reply-To: <1526555193-7242-1-git-send-email-ldufour@linux.vnet.ibm.com>

From: Peter Zijlstra <peterz@infradead.org>

When speculating faults (without holding mmap_sem) we need to validate
that the vma against which we loaded pages is still valid when we're
ready to install the new PTE.

Therefore, replace the pte_offset_map_lock() calls that (re)take the
PTL with pte_map_lock() which can fail in case we find the VMA changed
since we started the fault.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>

[Port to 4.12 kernel]
[Remove the comment about the fault_env structure which has been
 implemented as the vm_fault structure in the kernel]
[move pte_map_lock()'s definition upper in the file]
[move the define of FAULT_FLAG_SPECULATIVE later in the series]
[review error path in do_swap_page(), do_anonymous_page() and
 wp_page_copy()]
Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
---
 mm/memory.c | 87 ++++++++++++++++++++++++++++++++++++++++---------------------
 1 file changed, 58 insertions(+), 29 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 14578158ed20..a55e72c8e469 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2298,6 +2298,13 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
 }
 EXPORT_SYMBOL_GPL(apply_to_page_range);
 
+static inline bool pte_map_lock(struct vm_fault *vmf)
+{
+	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
+				       vmf->address, &vmf->ptl);
+	return true;
+}
+
 /*
  * handle_pte_fault chooses page fault handler according to an entry which was
  * read non-atomically.  Before making any commitment, on those architectures
@@ -2487,25 +2494,26 @@ static int wp_page_copy(struct vm_fault *vmf)
 	const unsigned long mmun_start = vmf->address & PAGE_MASK;
 	const unsigned long mmun_end = mmun_start + PAGE_SIZE;
 	struct mem_cgroup *memcg;
+	int ret = VM_FAULT_OOM;
 
 	if (unlikely(anon_vma_prepare(vma)))
-		goto oom;
+		goto out;
 
 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
 		new_page = alloc_zeroed_user_highpage_movable(vma,
 							      vmf->address);
 		if (!new_page)
-			goto oom;
+			goto out;
 	} else {
 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
 				vmf->address);
 		if (!new_page)
-			goto oom;
+			goto out;
 		cow_user_page(new_page, old_page, vmf->address, vma);
 	}
 
 	if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false))
-		goto oom_free_new;
+		goto out_free_new;
 
 	__SetPageUptodate(new_page);
 
@@ -2514,7 +2522,10 @@ static int wp_page_copy(struct vm_fault *vmf)
 	/*
 	 * Re-check the pte - we dropped the lock
 	 */
-	vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
+	if (!pte_map_lock(vmf)) {
+		ret = VM_FAULT_RETRY;
+		goto out_uncharge;
+	}
 	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
 		if (old_page) {
 			if (!PageAnon(old_page)) {
@@ -2601,12 +2612,14 @@ static int wp_page_copy(struct vm_fault *vmf)
 		put_page(old_page);
 	}
 	return page_copied ? VM_FAULT_WRITE : 0;
-oom_free_new:
+out_uncharge:
+	mem_cgroup_cancel_charge(new_page, memcg, false);
+out_free_new:
 	put_page(new_page);
-oom:
+out:
 	if (old_page)
 		put_page(old_page);
-	return VM_FAULT_OOM;
+	return ret;
 }
 
 /**
@@ -2627,8 +2640,8 @@ static int wp_page_copy(struct vm_fault *vmf)
 int finish_mkwrite_fault(struct vm_fault *vmf)
 {
 	WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
-	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
-				       &vmf->ptl);
+	if (!pte_map_lock(vmf))
+		return VM_FAULT_RETRY;
 	/*
 	 * We might have raced with another page fault while we released the
 	 * pte_offset_map_lock.
@@ -2746,8 +2759,11 @@ static int do_wp_page(struct vm_fault *vmf)
 			get_page(vmf->page);
 			pte_unmap_unlock(vmf->pte, vmf->ptl);
 			lock_page(vmf->page);
-			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
-					vmf->address, &vmf->ptl);
+			if (!pte_map_lock(vmf)) {
+				unlock_page(vmf->page);
+				put_page(vmf->page);
+				return VM_FAULT_RETRY;
+			}
 			if (!pte_same(*vmf->pte, vmf->orig_pte)) {
 				unlock_page(vmf->page);
 				pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2954,11 +2970,15 @@ int do_swap_page(struct vm_fault *vmf)
 
 		if (!page) {
 			/*
-			 * Back out if somebody else faulted in this pte
-			 * while we released the pte lock.
+			 * Back out if the VMA has changed in our back during
+			 * a speculative page fault or if somebody else
+			 * faulted in this pte while we released the pte lock.
 			 */
-			vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
-					vmf->address, &vmf->ptl);
+			if (!pte_map_lock(vmf)) {
+				delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+				ret = VM_FAULT_RETRY;
+				goto out;
+			}
 			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
 				ret = VM_FAULT_OOM;
 			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -3011,10 +3031,13 @@ int do_swap_page(struct vm_fault *vmf)
 	}
 
 	/*
-	 * Back out if somebody else already faulted in this pte.
+	 * Back out if the VMA has changed in our back during a speculative
+	 * page fault or if somebody else already faulted in this pte.
 	 */
-	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
-			&vmf->ptl);
+	if (!pte_map_lock(vmf)) {
+		ret = VM_FAULT_RETRY;
+		goto out_cancel_cgroup;
+	}
 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
 		goto out_nomap;
 
@@ -3092,8 +3115,9 @@ int do_swap_page(struct vm_fault *vmf)
 out:
 	return ret;
 out_nomap:
-	mem_cgroup_cancel_charge(page, memcg, false);
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
+out_cancel_cgroup:
+	mem_cgroup_cancel_charge(page, memcg, false);
 out_page:
 	unlock_page(page);
 out_release:
@@ -3144,8 +3168,8 @@ static int do_anonymous_page(struct vm_fault *vmf)
 			!mm_forbids_zeropage(vma->vm_mm)) {
 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
 						vma->vm_page_prot));
-		vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
-				vmf->address, &vmf->ptl);
+		if (!pte_map_lock(vmf))
+			return VM_FAULT_RETRY;
 		if (!pte_none(*vmf->pte))
 			goto unlock;
 		ret = check_stable_address_space(vma->vm_mm);
@@ -3180,14 +3204,16 @@ static int do_anonymous_page(struct vm_fault *vmf)
 	if (vma->vm_flags & VM_WRITE)
 		entry = pte_mkwrite(pte_mkdirty(entry));
 
-	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
-			&vmf->ptl);
-	if (!pte_none(*vmf->pte))
+	if (!pte_map_lock(vmf)) {
+		ret = VM_FAULT_RETRY;
 		goto release;
+	}
+	if (!pte_none(*vmf->pte))
+		goto unlock_and_release;
 
 	ret = check_stable_address_space(vma->vm_mm);
 	if (ret)
-		goto release;
+		goto unlock_and_release;
 
 	/* Deliver the page fault to userland, check inside PT lock */
 	if (userfaultfd_missing(vma)) {
@@ -3209,10 +3235,12 @@ static int do_anonymous_page(struct vm_fault *vmf)
 unlock:
 	pte_unmap_unlock(vmf->pte, vmf->ptl);
 	return ret;
+unlock_and_release:
+	pte_unmap_unlock(vmf->pte, vmf->ptl);
 release:
 	mem_cgroup_cancel_charge(page, memcg, false);
 	put_page(page);
-	goto unlock;
+	return ret;
 oom_free_page:
 	put_page(page);
 oom:
@@ -3305,8 +3333,9 @@ static int pte_alloc_one_map(struct vm_fault *vmf)
 	 * pte_none() under vmf->ptl protection when we return to
 	 * alloc_set_pte().
 	 */
-	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
-			&vmf->ptl);
+	if (!pte_map_lock(vmf))
+		return VM_FAULT_RETRY;
+
 	return 0;
 }
 
-- 
2.7.4

  parent reply	other threads:[~2018-05-17 11:12 UTC|newest]

Thread overview: 81+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-17 11:06 [PATCH v11 00/26] Speculative page faults Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 01/26] mm: introduce CONFIG_SPECULATIVE_PAGE_FAULT Laurent Dufour
2018-05-17 16:36   ` Randy Dunlap
2018-05-17 17:19     ` Matthew Wilcox
2018-05-17 17:34       ` Randy Dunlap
2018-05-22 12:00         ` [FIX PATCH " Laurent Dufour
2018-05-22 11:44       ` [PATCH " Laurent Dufour
2018-05-22 11:47     ` Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 02/26] x86/mm: define ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 03/26] powerpc/mm: set ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 04/26] arm64/mm: define ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT Laurent Dufour
2018-05-17 11:06 ` Laurent Dufour [this message]
2018-05-17 11:06 ` [PATCH v11 06/26] mm: introduce pte_spinlock for FAULT_FLAG_SPECULATIVE Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 07/26] mm: make pte_unmap_same compatible with SPF Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 08/26] mm: introduce INIT_VMA() Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 09/26] mm: VMA sequence count Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 10/26] mm: protect VMA modifications using " Laurent Dufour
2018-11-05  7:04   ` vinayak menon
2018-11-05  7:04     ` vinayak menon
2018-11-05 18:22     ` Laurent Dufour
2018-11-05 18:22       ` Laurent Dufour
2018-11-05 18:22       ` Laurent Dufour
2018-11-06  9:28       ` Vinayak Menon
2018-11-06  9:28         ` Vinayak Menon
2018-11-06  9:28         ` Vinayak Menon
2018-05-17 11:06 ` [PATCH v11 11/26] mm: protect mremap() against SPF hanlder Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 12/26] mm: protect SPF handler against anon_vma changes Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 13/26] mm: cache some VMA fields in the vm_fault structure Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 14/26] mm/migrate: Pass vm_fault pointer to migrate_misplaced_page() Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 15/26] mm: introduce __lru_cache_add_active_or_unevictable Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 16/26] mm: introduce __vm_normal_page() Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 17/26] mm: introduce __page_add_new_anon_rmap() Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 18/26] mm: protect mm_rb tree with a rwlock Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 19/26] mm: provide speculative fault infrastructure Laurent Dufour
2018-07-24 14:26   ` zhong jiang
2018-07-24 14:26     ` zhong jiang
2018-07-24 16:10     ` Laurent Dufour
2018-07-25  9:04       ` zhong jiang
2018-07-25  9:04         ` zhong jiang
2018-07-25 10:44         ` Laurent Dufour
2018-07-25 11:23           ` zhong jiang
2018-07-25 11:23             ` zhong jiang
2018-05-17 11:06 ` [PATCH v11 20/26] mm: adding speculative page fault failure trace events Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 21/26] perf: add a speculative page fault sw event Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 22/26] perf tools: add support for the SPF perf event Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 23/26] mm: add speculative page fault vmstats Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 24/26] x86/mm: add speculative pagefault handling Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 25/26] powerpc/mm: add speculative page fault Laurent Dufour
2018-05-17 11:06 ` [PATCH v11 26/26] arm64/mm: " Laurent Dufour
2018-05-28  5:23 ` [PATCH v11 00/26] Speculative page faults Song, HaiyanX
2018-05-28  5:23   ` Song, HaiyanX
2018-05-28  7:51   ` Laurent Dufour
2018-05-28  8:22     ` Haiyan Song
2018-05-28  8:54       ` Laurent Dufour
2018-05-28 11:04         ` Wang, Kemi
2018-05-28 11:04           ` Wang, Kemi
2018-06-11  7:49         ` Song, HaiyanX
2018-06-11  7:49           ` Song, HaiyanX
2018-06-11 15:15           ` Laurent Dufour
2018-06-19  9:16             ` Haiyan Song
2018-06-19  9:16               ` Haiyan Song
2018-07-02  8:59           ` Laurent Dufour
2018-07-04  3:23             ` Song, HaiyanX
2018-07-04  3:23               ` Song, HaiyanX
2018-07-04  7:51               ` Laurent Dufour
2018-07-04  7:51                 ` Laurent Dufour
2018-07-11 17:05                 ` Laurent Dufour
2018-07-11 17:05                   ` Laurent Dufour
2018-07-13  3:56                   ` Song, HaiyanX
2018-07-17  9:36                     ` Laurent Dufour
2018-07-17  9:36                       ` Laurent Dufour
2018-08-03  6:36                       ` Song, HaiyanX
2018-08-03  6:45                         ` Song, HaiyanX
2018-08-22 14:23                         ` Laurent Dufour
2018-08-22 14:23                           ` Laurent Dufour
2018-09-18  6:42                           ` Song, HaiyanX
2018-11-05 10:42 ` Balbir Singh
2018-11-05 10:42   ` Balbir Singh
2018-11-05 16:08   ` Laurent Dufour
2018-11-05 16:08     ` Laurent Dufour
2018-11-05 16:08     ` Laurent Dufour

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1526555193-7242-6-git-send-email-ldufour@linux.vnet.ibm.com \
    --to=ldufour@linux.vnet.ibm.com \
    --cc=aarcange@redhat.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexei.starovoitov@gmail.com \
    --cc=aneesh.kumar@linux.vnet.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=bsingharora@gmail.com \
    --cc=daniel.m.jordan@oracle.com \
    --cc=dave@stgolabs.net \
    --cc=haren@linux.vnet.ibm.com \
    --cc=hpa@zytor.com \
    --cc=jack@suse.cz \
    --cc=jglisse@redhat.com \
    --cc=kemi.wang@intel.com \
    --cc=khandual@linux.vnet.ibm.com \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mhocko@kernel.org \
    --cc=minchan@kernel.org \
    --cc=mingo@redhat.com \
    --cc=mpe@ellerman.id.au \
    --cc=npiggin@gmail.com \
    --cc=opensource.ganesh@gmail.com \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    --cc=punitagrawal@gmail.com \
    --cc=rientjes@google.com \
    --cc=sergey.senozhatsky.work@gmail.com \
    --cc=sergey.senozhatsky@gmail.com \
    --cc=tglx@linutronix.de \
    --cc=tim.c.chen@linux.intel.com \
    --cc=vinayakm.list@gmail.com \
    --cc=will.deacon@arm.com \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=yang.shi@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.