All of lore.kernel.org
 help / color / mirror / Atom feed
From: Laurent Dufour <ldufour@linux.vnet.ibm.com>
To: paulmck@linux.vnet.ibm.com, peterz@infradead.org,
	akpm@linux-foundation.org, kirill@shutemov.name,
	ak@linux.intel.com, mhocko@kernel.org, dave@stgolabs.net,
	jack@suse.cz, Matthew Wilcox <willy@infradead.org>,
	benh@kernel.crashing.org, mpe@ellerman.id.au, paulus@samba.org,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	hpa@zytor.com, Will Deacon <will.deacon@arm.com>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	haren@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com,
	npiggin@gmail.com, bsingharora@gmail.com,
	Tim Chen <tim.c.chen@linux.intel.com>,
	linuxppc-dev@lists.ozlabs.org, x86@kernel.org
Subject: [PATCH v2 16/20] mm: Adding speculative page fault failure trace events
Date: Fri, 18 Aug 2017 00:05:15 +0200	[thread overview]
Message-ID: <1503007519-26777-17-git-send-email-ldufour@linux.vnet.ibm.com> (raw)
In-Reply-To: <1503007519-26777-1-git-send-email-ldufour@linux.vnet.ibm.com>

This patch a set of new trace events to collect the speculative page fault
event failures.

Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
---
 include/trace/events/pagefault.h | 87 ++++++++++++++++++++++++++++++++++++++++
 mm/memory.c                      | 68 ++++++++++++++++++++++++-------
 2 files changed, 141 insertions(+), 14 deletions(-)
 create mode 100644 include/trace/events/pagefault.h

diff --git a/include/trace/events/pagefault.h b/include/trace/events/pagefault.h
new file mode 100644
index 000000000000..d7d56f8102d1
--- /dev/null
+++ b/include/trace/events/pagefault.h
@@ -0,0 +1,87 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pagefault
+
+#if !defined(_TRACE_PAGEFAULT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PAGEFAULT_H
+
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+
+DECLARE_EVENT_CLASS(spf,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, caller)
+		__field(unsigned long, vm_start)
+		__field(unsigned long, vm_end)
+		__field(unsigned long, address)
+	),
+
+	TP_fast_assign(
+		__entry->caller		= caller;
+		__entry->vm_start	= vma->vm_start;
+		__entry->vm_end		= vma->vm_end;
+		__entry->address	= address;
+	),
+
+	TP_printk("ip:%lx vma:%lu-%lx address:%lx",
+		  __entry->caller, __entry->vm_start, __entry->vm_end,
+		  __entry->address)
+);
+
+DEFINE_EVENT(spf, spf_pte_lock,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_changed,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_dead,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_noanon,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_notsup,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_access,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+#endif /* _TRACE_PAGEFAULT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/mm/memory.c b/mm/memory.c
index 8c701e4f59d3..549d23583f53 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -79,6 +79,9 @@
 
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/pagefault.h>
+
 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
 #endif
@@ -2296,15 +2299,20 @@ static bool pte_spinlock(struct vm_fault *vmf)
 	}
 
 	local_irq_disable();
-	if (vma_has_changed(vmf))
+	if (vma_has_changed(vmf)) {
+		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
+	}
 
 	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
-	if (unlikely(!spin_trylock(vmf->ptl)))
+	if (unlikely(!spin_trylock(vmf->ptl))) {
+		trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
+	}
 
 	if (vma_has_changed(vmf)) {
 		spin_unlock(vmf->ptl);
+		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
 	}
 
@@ -2334,8 +2342,10 @@ static bool pte_map_lock(struct vm_fault *vmf)
 	 * block on the PTL and thus we're safe.
 	 */
 	local_irq_disable();
-	if (vma_has_changed(vmf))
+	if (vma_has_changed(vmf)) {
+		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
+	}
 
 	/*
 	 * Same as pte_offset_map_lock() except that we call
@@ -2348,11 +2358,13 @@ static bool pte_map_lock(struct vm_fault *vmf)
 	pte = pte_offset_map(vmf->pmd, vmf->address);
 	if (unlikely(!spin_trylock(ptl))) {
 		pte_unmap(pte);
+		trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
 	}
 
 	if (vma_has_changed(vmf)) {
 		pte_unmap_unlock(pte, ptl);
+		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
 	}
 
@@ -3989,27 +4001,40 @@ int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	 * Validate the VMA found by the lockless lookup.
 	 */
 	dead = RB_EMPTY_NODE(&vma->vm_rb);
+	if (dead) {
+		trace_spf_vma_dead(_RET_IP_, vma, address);
+		goto unlock;
+	}
+
 	seq = raw_read_seqcount(&vma->vm_sequence); /* rmb <-> seqlock,vma_rb_erase() */
-	if ((seq & 1) || dead)
+	if (seq & 1) {
+		trace_spf_vma_changed(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	/*
 	 * Can't call vm_ops service has we don't know what they would do
 	 * with the VMA.
 	 * This include huge page from hugetlbfs.
 	 */
-	if (vma->vm_ops)
+	if (vma->vm_ops) {
+		trace_spf_vma_notsup(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
-	if (unlikely(!vma->anon_vma))
+	if (unlikely(!vma->anon_vma)) {
+		trace_spf_vma_notsup(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	vmf.vma_flags = READ_ONCE(vma->vm_flags);
 	vmf.vma_page_prot = READ_ONCE(vma->vm_page_prot);
 
 	/* Can't call userland page fault handler in the speculative path */
-	if (unlikely(vmf.vma_flags & VM_UFFD_MISSING))
+	if (unlikely(vmf.vma_flags & VM_UFFD_MISSING)) {
+		trace_spf_vma_notsup(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	/*
 	 * MPOL_INTERLEAVE implies additional check in mpol_misplaced() which
@@ -4018,20 +4043,26 @@ int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	pol = __get_vma_policy(vma, address);
 	if (!pol)
 		pol = get_task_policy(current);
-	if (pol && pol->mode == MPOL_INTERLEAVE)
+	if (pol && pol->mode == MPOL_INTERLEAVE) {
+		trace_spf_vma_notsup(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
-	if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP)
+	if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) {
 		/*
 		 * This could be detected by the check address against VMA's
 		 * boundaries but we want to trace it as not supported instead
 		 * of changed.
 		 */
+		trace_spf_vma_notsup(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	if (address < READ_ONCE(vma->vm_start)
-	    || READ_ONCE(vma->vm_end) <= address)
+	    || READ_ONCE(vma->vm_end) <= address) {
+		trace_spf_vma_changed(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	/*
 	 * The three following checks are copied from access_error from
@@ -4039,16 +4070,22 @@ int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	 */
 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
 				       flags & FAULT_FLAG_INSTRUCTION,
-				       flags & FAULT_FLAG_REMOTE))
+				       flags & FAULT_FLAG_REMOTE)) {
+		trace_spf_vma_access(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	/* This is one is required to check that the VMA has write access set */
 	if (flags & FAULT_FLAG_WRITE) {
-		if (unlikely(!(vmf.vma_flags & VM_WRITE)))
+		if (unlikely(!(vmf.vma_flags & VM_WRITE))) {
+			trace_spf_vma_access(_RET_IP_, vma, address);
 			goto unlock;
+		}
 	} else {
-		if (unlikely(!(vmf.vma_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+		if (unlikely(!(vmf.vma_flags & (VM_READ | VM_EXEC | VM_WRITE)))) {
+			trace_spf_vma_access(_RET_IP_, vma, address);
 			goto unlock;
+		}
 	}
 
 	/*
@@ -4097,8 +4134,10 @@ int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	 * We need to re-validate the VMA after checking the bounds, otherwise
 	 * we might have a false positive on the bounds.
 	 */
-	if (read_seqcount_retry(&vma->vm_sequence, seq))
+	if (read_seqcount_retry(&vma->vm_sequence, seq)) {
+		trace_spf_vma_changed(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	ret = handle_pte_fault(&vmf);
 
@@ -4107,6 +4146,7 @@ int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	return ret;
 
 out_walk:
+	trace_spf_vma_notsup(_RET_IP_, vma, address);
 	local_irq_enable();
 	goto unlock;
 }
-- 
2.7.4

WARNING: multiple messages have this Message-ID (diff)
From: Laurent Dufour <ldufour@linux.vnet.ibm.com>
To: paulmck@linux.vnet.ibm.com, peterz@infradead.org,
	akpm@linux-foundation.org, kirill@shutemov.name,
	ak@linux.intel.com, mhocko@kernel.org, dave@stgolabs.net,
	jack@suse.cz, Matthew Wilcox <willy@infradead.org>,
	benh@kernel.crashing.org, mpe@ellerman.id.au, paulus@samba.org,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	hpa@zytor.com, Will Deacon <will.deacon@arm.com>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	haren@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com,
	npiggin@gmail.com, bsingharora@gmail.com,
	Tim Chen <tim.c.chen@linux.intel.com>,
	linuxppc-dev@lists.ozlabs.org, x86@kernel.org
Subject: [PATCH v2 16/20] mm: Adding speculative page fault failure trace events
Date: Fri, 18 Aug 2017 00:05:15 +0200	[thread overview]
Message-ID: <1503007519-26777-17-git-send-email-ldufour@linux.vnet.ibm.com> (raw)
In-Reply-To: <1503007519-26777-1-git-send-email-ldufour@linux.vnet.ibm.com>

This patch a set of new trace events to collect the speculative page fault
event failures.

Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
---
 include/trace/events/pagefault.h | 87 ++++++++++++++++++++++++++++++++++++++++
 mm/memory.c                      | 68 ++++++++++++++++++++++++-------
 2 files changed, 141 insertions(+), 14 deletions(-)
 create mode 100644 include/trace/events/pagefault.h

diff --git a/include/trace/events/pagefault.h b/include/trace/events/pagefault.h
new file mode 100644
index 000000000000..d7d56f8102d1
--- /dev/null
+++ b/include/trace/events/pagefault.h
@@ -0,0 +1,87 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pagefault
+
+#if !defined(_TRACE_PAGEFAULT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PAGEFAULT_H
+
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+
+DECLARE_EVENT_CLASS(spf,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, caller)
+		__field(unsigned long, vm_start)
+		__field(unsigned long, vm_end)
+		__field(unsigned long, address)
+	),
+
+	TP_fast_assign(
+		__entry->caller		= caller;
+		__entry->vm_start	= vma->vm_start;
+		__entry->vm_end		= vma->vm_end;
+		__entry->address	= address;
+	),
+
+	TP_printk("ip:%lx vma:%lu-%lx address:%lx",
+		  __entry->caller, __entry->vm_start, __entry->vm_end,
+		  __entry->address)
+);
+
+DEFINE_EVENT(spf, spf_pte_lock,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_changed,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_dead,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_noanon,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_notsup,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+DEFINE_EVENT(spf, spf_vma_access,
+
+	TP_PROTO(unsigned long caller,
+		 struct vm_area_struct *vma, unsigned long address),
+
+	TP_ARGS(caller, vma, address)
+);
+
+#endif /* _TRACE_PAGEFAULT_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/mm/memory.c b/mm/memory.c
index 8c701e4f59d3..549d23583f53 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -79,6 +79,9 @@
 
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/pagefault.h>
+
 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
 #endif
@@ -2296,15 +2299,20 @@ static bool pte_spinlock(struct vm_fault *vmf)
 	}
 
 	local_irq_disable();
-	if (vma_has_changed(vmf))
+	if (vma_has_changed(vmf)) {
+		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
+	}
 
 	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
-	if (unlikely(!spin_trylock(vmf->ptl)))
+	if (unlikely(!spin_trylock(vmf->ptl))) {
+		trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
+	}
 
 	if (vma_has_changed(vmf)) {
 		spin_unlock(vmf->ptl);
+		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
 	}
 
@@ -2334,8 +2342,10 @@ static bool pte_map_lock(struct vm_fault *vmf)
 	 * block on the PTL and thus we're safe.
 	 */
 	local_irq_disable();
-	if (vma_has_changed(vmf))
+	if (vma_has_changed(vmf)) {
+		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
+	}
 
 	/*
 	 * Same as pte_offset_map_lock() except that we call
@@ -2348,11 +2358,13 @@ static bool pte_map_lock(struct vm_fault *vmf)
 	pte = pte_offset_map(vmf->pmd, vmf->address);
 	if (unlikely(!spin_trylock(ptl))) {
 		pte_unmap(pte);
+		trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
 	}
 
 	if (vma_has_changed(vmf)) {
 		pte_unmap_unlock(pte, ptl);
+		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
 		goto out;
 	}
 
@@ -3989,27 +4001,40 @@ int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	 * Validate the VMA found by the lockless lookup.
 	 */
 	dead = RB_EMPTY_NODE(&vma->vm_rb);
+	if (dead) {
+		trace_spf_vma_dead(_RET_IP_, vma, address);
+		goto unlock;
+	}
+
 	seq = raw_read_seqcount(&vma->vm_sequence); /* rmb <-> seqlock,vma_rb_erase() */
-	if ((seq & 1) || dead)
+	if (seq & 1) {
+		trace_spf_vma_changed(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	/*
 	 * Can't call vm_ops service has we don't know what they would do
 	 * with the VMA.
 	 * This include huge page from hugetlbfs.
 	 */
-	if (vma->vm_ops)
+	if (vma->vm_ops) {
+		trace_spf_vma_notsup(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
-	if (unlikely(!vma->anon_vma))
+	if (unlikely(!vma->anon_vma)) {
+		trace_spf_vma_notsup(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	vmf.vma_flags = READ_ONCE(vma->vm_flags);
 	vmf.vma_page_prot = READ_ONCE(vma->vm_page_prot);
 
 	/* Can't call userland page fault handler in the speculative path */
-	if (unlikely(vmf.vma_flags & VM_UFFD_MISSING))
+	if (unlikely(vmf.vma_flags & VM_UFFD_MISSING)) {
+		trace_spf_vma_notsup(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	/*
 	 * MPOL_INTERLEAVE implies additional check in mpol_misplaced() which
@@ -4018,20 +4043,26 @@ int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	pol = __get_vma_policy(vma, address);
 	if (!pol)
 		pol = get_task_policy(current);
-	if (pol && pol->mode == MPOL_INTERLEAVE)
+	if (pol && pol->mode == MPOL_INTERLEAVE) {
+		trace_spf_vma_notsup(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
-	if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP)
+	if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) {
 		/*
 		 * This could be detected by the check address against VMA's
 		 * boundaries but we want to trace it as not supported instead
 		 * of changed.
 		 */
+		trace_spf_vma_notsup(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	if (address < READ_ONCE(vma->vm_start)
-	    || READ_ONCE(vma->vm_end) <= address)
+	    || READ_ONCE(vma->vm_end) <= address) {
+		trace_spf_vma_changed(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	/*
 	 * The three following checks are copied from access_error from
@@ -4039,16 +4070,22 @@ int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	 */
 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
 				       flags & FAULT_FLAG_INSTRUCTION,
-				       flags & FAULT_FLAG_REMOTE))
+				       flags & FAULT_FLAG_REMOTE)) {
+		trace_spf_vma_access(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	/* This is one is required to check that the VMA has write access set */
 	if (flags & FAULT_FLAG_WRITE) {
-		if (unlikely(!(vmf.vma_flags & VM_WRITE)))
+		if (unlikely(!(vmf.vma_flags & VM_WRITE))) {
+			trace_spf_vma_access(_RET_IP_, vma, address);
 			goto unlock;
+		}
 	} else {
-		if (unlikely(!(vmf.vma_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+		if (unlikely(!(vmf.vma_flags & (VM_READ | VM_EXEC | VM_WRITE)))) {
+			trace_spf_vma_access(_RET_IP_, vma, address);
 			goto unlock;
+		}
 	}
 
 	/*
@@ -4097,8 +4134,10 @@ int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	 * We need to re-validate the VMA after checking the bounds, otherwise
 	 * we might have a false positive on the bounds.
 	 */
-	if (read_seqcount_retry(&vma->vm_sequence, seq))
+	if (read_seqcount_retry(&vma->vm_sequence, seq)) {
+		trace_spf_vma_changed(_RET_IP_, vma, address);
 		goto unlock;
+	}
 
 	ret = handle_pte_fault(&vmf);
 
@@ -4107,6 +4146,7 @@ int handle_speculative_fault(struct mm_struct *mm, unsigned long address,
 	return ret;
 
 out_walk:
+	trace_spf_vma_notsup(_RET_IP_, vma, address);
 	local_irq_enable();
 	goto unlock;
 }
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-08-17 22:06 UTC|newest]

Thread overview: 122+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-17 22:04 [PATCH v2 00/20] Speculative page faults Laurent Dufour
2017-08-17 22:04 ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 01/20] mm: Dont assume page-table invariance during faults Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 02/20] mm: Prepare for FAULT_FLAG_SPECULATIVE Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 03/20] mm: Introduce pte_spinlock " Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 04/20] mm: VMA sequence count Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 05/20] mm: Protect VMA modifications using " Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 06/20] mm: RCU free VMAs Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 07/20] mm: Cache some VMA fields in the vm_fault structure Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 08/20] mm: Protect SPF handler against anon_vma changes Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 09/20] mm/migrate: Pass vm_fault pointer to migrate_misplaced_page() Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 10/20] mm: Introduce __lru_cache_add_active_or_unevictable Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 11/20] mm: Introduce __maybe_mkwrite() Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 12/20] mm: Introduce __vm_normal_page() Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 13/20] mm: Introduce __page_add_new_anon_rmap() Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 14/20] mm: Provide speculative fault infrastructure Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-20 12:11   ` Sergey Senozhatsky
2017-08-20 12:11     ` Sergey Senozhatsky
2017-08-25  8:52     ` Laurent Dufour
2017-08-25  8:52       ` Laurent Dufour
2017-08-27  0:18   ` Kirill A. Shutemov
2017-08-27  0:18     ` Kirill A. Shutemov
2017-08-28  9:37     ` Peter Zijlstra
2017-08-28  9:37       ` Peter Zijlstra
2017-08-28 21:14       ` Benjamin Herrenschmidt
2017-08-28 21:14         ` Benjamin Herrenschmidt
2017-08-28 22:35         ` Andi Kleen
2017-08-28 22:35           ` Andi Kleen
2017-08-29  8:15           ` Peter Zijlstra
2017-08-29  8:15             ` Peter Zijlstra
2017-08-29  8:33         ` Peter Zijlstra
2017-08-29  8:33           ` Peter Zijlstra
2017-08-29 11:27           ` Peter Zijlstra
2017-08-29 11:27             ` Peter Zijlstra
2017-08-29 21:19             ` Benjamin Herrenschmidt
2017-08-29 21:19               ` Benjamin Herrenschmidt
2017-08-30  6:13               ` Peter Zijlstra
2017-08-30  6:13                 ` Peter Zijlstra
2017-08-29  7:59     ` Laurent Dufour
2017-08-29  7:59       ` Laurent Dufour
2017-08-29 12:04       ` Peter Zijlstra
2017-08-29 12:04         ` Peter Zijlstra
2017-08-29 13:18         ` Laurent Dufour
2017-08-29 13:18           ` Laurent Dufour
2017-08-29 13:45           ` Peter Zijlstra
2017-08-29 13:45             ` Peter Zijlstra
2017-08-30  5:03             ` Anshuman Khandual
2017-08-30  5:03               ` Anshuman Khandual
2017-08-30  5:58               ` Peter Zijlstra
2017-08-30  5:58                 ` Peter Zijlstra
2017-08-30  9:32                 ` Laurent Dufour
2017-08-30  9:32                   ` Laurent Dufour
2017-08-31  6:55                   ` Anshuman Khandual
2017-08-31  6:55                     ` Anshuman Khandual
2017-08-31  7:31                     ` Peter Zijlstra
2017-08-31  7:31                       ` Peter Zijlstra
2017-08-30  9:53               ` Laurent Dufour
2017-08-30  9:53                 ` Laurent Dufour
2017-08-30  3:48         ` Anshuman Khandual
2017-08-30  3:48           ` Anshuman Khandual
2017-08-30  5:25     ` Anshuman Khandual
2017-08-30  5:25       ` Anshuman Khandual
2017-08-30  8:56     ` Laurent Dufour
2017-08-30  8:56       ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 15/20] mm: Try spin lock in speculative path Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-17 22:05 ` Laurent Dufour [this message]
2017-08-17 22:05   ` [PATCH v2 16/20] mm: Adding speculative page fault failure trace events Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 17/20] perf: Add a speculative page fault sw event Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-21  8:55   ` Anshuman Khandual
2017-08-21  8:55     ` Anshuman Khandual
2017-08-22  1:46     ` Michael Ellerman
2017-08-22  1:46       ` Michael Ellerman
2017-08-17 22:05 ` [PATCH v2 18/20] perf tools: Add support for the SPF perf event Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-21  8:48   ` Anshuman Khandual
2017-08-21  8:48     ` Anshuman Khandual
2017-08-25  8:53     ` Laurent Dufour
2017-08-25  8:53       ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 19/20] x86/mm: Add speculative pagefault handling Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-21  7:29   ` Anshuman Khandual
2017-08-21  7:29     ` Anshuman Khandual
2017-08-29 14:50     ` Laurent Dufour
2017-08-29 14:50       ` Laurent Dufour
2017-08-29 14:58       ` Laurent Dufour
2017-08-29 14:58         ` Laurent Dufour
2017-08-17 22:05 ` [PATCH v2 20/20] powerpc/mm: Add speculative page fault Laurent Dufour
2017-08-17 22:05   ` Laurent Dufour
2017-08-21  6:58   ` Anshuman Khandual
2017-08-21  6:58     ` Anshuman Khandual
2017-08-29 15:13     ` Laurent Dufour
2017-08-29 15:13       ` Laurent Dufour
2017-08-21  2:26 ` [PATCH v2 00/20] Speculative page faults Sergey Senozhatsky
2017-08-21  2:26   ` Sergey Senozhatsky
2017-09-08  9:24   ` Laurent Dufour
2017-09-08  9:24     ` Laurent Dufour
2017-09-11  0:45     ` Sergey Senozhatsky
2017-09-11  0:45       ` Sergey Senozhatsky
2017-09-11  6:28       ` Laurent Dufour
2017-09-11  6:28         ` Laurent Dufour
2017-08-21  6:28 ` Anshuman Khandual
2017-08-21  6:28   ` Anshuman Khandual
2017-08-22  0:41   ` Paul E. McKenney
2017-08-22  0:41     ` Paul E. McKenney
2017-08-25  9:41   ` Laurent Dufour
2017-08-25  9:41     ` Laurent Dufour

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1503007519-26777-17-git-send-email-ldufour@linux.vnet.ibm.com \
    --to=ldufour@linux.vnet.ibm.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=benh@kernel.crashing.org \
    --cc=bsingharora@gmail.com \
    --cc=dave@stgolabs.net \
    --cc=haren@linux.vnet.ibm.com \
    --cc=hpa@zytor.com \
    --cc=jack@suse.cz \
    --cc=khandual@linux.vnet.ibm.com \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mhocko@kernel.org \
    --cc=mingo@redhat.com \
    --cc=mpe@ellerman.id.au \
    --cc=npiggin@gmail.com \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=tim.c.chen@linux.intel.com \
    --cc=will.deacon@arm.com \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.