All of lore.kernel.org
 help / color / mirror / Atom feed
From: Laurent Dufour <ldufour@linux.vnet.ibm.com>
To: paulmck@linux.vnet.ibm.com, peterz@infradead.org,
	akpm@linux-foundation.org, kirill@shutemov.name,
	ak@linux.intel.com, mhocko@kernel.org, dave@stgolabs.net,
	jack@suse.cz, Matthew Wilcox <willy@infradead.org>,
	benh@kernel.crashing.org, mpe@ellerman.id.au, paulus@samba.org,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	hpa@zytor.com, Will Deacon <will.deacon@arm.com>,
	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	haren@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com,
	npiggin@gmail.com, bsingharora@gmail.com,
	Tim Chen <tim.c.chen@linux.intel.com>,
	linuxppc-dev@lists.ozlabs.org, x86@kernel.org
Subject: [PATCH v3 10/20] mm: Introduce __lru_cache_add_active_or_unevictable
Date: Fri,  8 Sep 2017 20:06:54 +0200	[thread overview]
Message-ID: <1504894024-2750-11-git-send-email-ldufour@linux.vnet.ibm.com> (raw)
In-Reply-To: <1504894024-2750-1-git-send-email-ldufour@linux.vnet.ibm.com>

The speculative page fault handler which is run without holding the
mmap_sem is calling lru_cache_add_active_or_unevictable() but the vm_flags
is not guaranteed to remain constant.
Introducing __lru_cache_add_active_or_unevictable() which has the vma flags
value parameter instead of the vma pointer.

Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
---
 include/linux/swap.h | 11 +++++++++--
 mm/memory.c          |  8 ++++----
 mm/swap.c            | 12 ++++++------
 3 files changed, 19 insertions(+), 12 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 8a807292037f..9b4dbb98af89 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -323,8 +323,15 @@ extern void swap_setup(void);
 
 extern void add_page_to_unevictable_list(struct page *page);
 
-extern void lru_cache_add_active_or_unevictable(struct page *page,
-						struct vm_area_struct *vma);
+extern void __lru_cache_add_active_or_unevictable(struct page *page,
+						unsigned long vma_flags);
+
+static inline void lru_cache_add_active_or_unevictable(struct page *page,
+						struct vm_area_struct *vma)
+{
+	return __lru_cache_add_active_or_unevictable(page, vma->vm_flags);
+}
+
 
 /* linux/mm/vmscan.c */
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
diff --git a/mm/memory.c b/mm/memory.c
index a4982917c16e..4583f354be94 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2509,7 +2509,7 @@ static int wp_page_copy(struct vm_fault *vmf)
 		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
 		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
 		mem_cgroup_commit_charge(new_page, memcg, false, false);
-		lru_cache_add_active_or_unevictable(new_page, vma);
+		__lru_cache_add_active_or_unevictable(new_page, vmf->vma_flags);
 		/*
 		 * We call the notify macro here because, when using secondary
 		 * mmu page tables (such as kvm shadow page tables), we want the
@@ -2998,7 +2998,7 @@ int do_swap_page(struct vm_fault *vmf)
 	} else { /* ksm created a completely new copy */
 		page_add_new_anon_rmap(page, vma, vmf->address, false);
 		mem_cgroup_commit_charge(page, memcg, false, false);
-		lru_cache_add_active_or_unevictable(page, vma);
+		__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
 	}
 
 	swap_free(entry);
@@ -3144,7 +3144,7 @@ static int do_anonymous_page(struct vm_fault *vmf)
 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
 	page_add_new_anon_rmap(page, vma, vmf->address, false);
 	mem_cgroup_commit_charge(page, memcg, false, false);
-	lru_cache_add_active_or_unevictable(page, vma);
+	__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
 setpte:
 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
@@ -3396,7 +3396,7 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
 		page_add_new_anon_rmap(page, vma, vmf->address, false);
 		mem_cgroup_commit_charge(page, memcg, false, false);
-		lru_cache_add_active_or_unevictable(page, vma);
+		__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
 	} else {
 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
 		page_add_file_rmap(page, false);
diff --git a/mm/swap.c b/mm/swap.c
index 9295ae960d66..b084bb16d769 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -470,21 +470,21 @@ void add_page_to_unevictable_list(struct page *page)
 }
 
 /**
- * lru_cache_add_active_or_unevictable
- * @page:  the page to be added to LRU
- * @vma:   vma in which page is mapped for determining reclaimability
+ * __lru_cache_add_active_or_unevictable
+ * @page:	the page to be added to LRU
+ * @vma_flags:  vma in which page is mapped for determining reclaimability
  *
  * Place @page on the active or unevictable LRU list, depending on its
  * evictability.  Note that if the page is not evictable, it goes
  * directly back onto it's zone's unevictable list, it does NOT use a
  * per cpu pagevec.
  */
-void lru_cache_add_active_or_unevictable(struct page *page,
-					 struct vm_area_struct *vma)
+void __lru_cache_add_active_or_unevictable(struct page *page,
+					   unsigned long vma_flags)
 {
 	VM_BUG_ON_PAGE(PageLRU(page), page);
 
-	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
+	if (likely((vma_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
 		SetPageActive(page);
 		lru_cache_add(page);
 		return;
-- 
2.7.4

WARNING: multiple messages have this Message-ID (diff)
From: Laurent Dufour <ldufour@linux.vnet.ibm.com>
To: paulmck@linux.vnet.ibm.com, peterz@infradead.org,
	akpm@linux-foundation.org, kirill@shutemov.name,
	ak@linux.intel.com, mhocko@kernel.org, dave@stgolabs.net,
	jack@suse.cz, Matthew Wilcox <willy@infradead.org>,
	benh@kernel.crashing.org, mpe@ellerman.id.au, paulus@samba.org,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	hpa@zytor.com, Will Deacon <will.deacon@arm.com>,
	Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	haren@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com,
	npiggin@gmail.com, bsingharora@gmail.com,
	Tim Chen <tim.c.chen@linux.intel.com>,
	linuxppc-dev@lists.ozlabs.org, x86@kernel.org
Subject: [PATCH v3 10/20] mm: Introduce __lru_cache_add_active_or_unevictable
Date: Fri,  8 Sep 2017 20:06:54 +0200	[thread overview]
Message-ID: <1504894024-2750-11-git-send-email-ldufour@linux.vnet.ibm.com> (raw)
In-Reply-To: <1504894024-2750-1-git-send-email-ldufour@linux.vnet.ibm.com>

The speculative page fault handler which is run without holding the
mmap_sem is calling lru_cache_add_active_or_unevictable() but the vm_flags
is not guaranteed to remain constant.
Introducing __lru_cache_add_active_or_unevictable() which has the vma flags
value parameter instead of the vma pointer.

Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
---
 include/linux/swap.h | 11 +++++++++--
 mm/memory.c          |  8 ++++----
 mm/swap.c            | 12 ++++++------
 3 files changed, 19 insertions(+), 12 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 8a807292037f..9b4dbb98af89 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -323,8 +323,15 @@ extern void swap_setup(void);
 
 extern void add_page_to_unevictable_list(struct page *page);
 
-extern void lru_cache_add_active_or_unevictable(struct page *page,
-						struct vm_area_struct *vma);
+extern void __lru_cache_add_active_or_unevictable(struct page *page,
+						unsigned long vma_flags);
+
+static inline void lru_cache_add_active_or_unevictable(struct page *page,
+						struct vm_area_struct *vma)
+{
+	return __lru_cache_add_active_or_unevictable(page, vma->vm_flags);
+}
+
 
 /* linux/mm/vmscan.c */
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
diff --git a/mm/memory.c b/mm/memory.c
index a4982917c16e..4583f354be94 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2509,7 +2509,7 @@ static int wp_page_copy(struct vm_fault *vmf)
 		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
 		page_add_new_anon_rmap(new_page, vma, vmf->address, false);
 		mem_cgroup_commit_charge(new_page, memcg, false, false);
-		lru_cache_add_active_or_unevictable(new_page, vma);
+		__lru_cache_add_active_or_unevictable(new_page, vmf->vma_flags);
 		/*
 		 * We call the notify macro here because, when using secondary
 		 * mmu page tables (such as kvm shadow page tables), we want the
@@ -2998,7 +2998,7 @@ int do_swap_page(struct vm_fault *vmf)
 	} else { /* ksm created a completely new copy */
 		page_add_new_anon_rmap(page, vma, vmf->address, false);
 		mem_cgroup_commit_charge(page, memcg, false, false);
-		lru_cache_add_active_or_unevictable(page, vma);
+		__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
 	}
 
 	swap_free(entry);
@@ -3144,7 +3144,7 @@ static int do_anonymous_page(struct vm_fault *vmf)
 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
 	page_add_new_anon_rmap(page, vma, vmf->address, false);
 	mem_cgroup_commit_charge(page, memcg, false, false);
-	lru_cache_add_active_or_unevictable(page, vma);
+	__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
 setpte:
 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
 
@@ -3396,7 +3396,7 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
 		page_add_new_anon_rmap(page, vma, vmf->address, false);
 		mem_cgroup_commit_charge(page, memcg, false, false);
-		lru_cache_add_active_or_unevictable(page, vma);
+		__lru_cache_add_active_or_unevictable(page, vmf->vma_flags);
 	} else {
 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
 		page_add_file_rmap(page, false);
diff --git a/mm/swap.c b/mm/swap.c
index 9295ae960d66..b084bb16d769 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -470,21 +470,21 @@ void add_page_to_unevictable_list(struct page *page)
 }
 
 /**
- * lru_cache_add_active_or_unevictable
- * @page:  the page to be added to LRU
- * @vma:   vma in which page is mapped for determining reclaimability
+ * __lru_cache_add_active_or_unevictable
+ * @page:	the page to be added to LRU
+ * @vma_flags:  vma in which page is mapped for determining reclaimability
  *
  * Place @page on the active or unevictable LRU list, depending on its
  * evictability.  Note that if the page is not evictable, it goes
  * directly back onto it's zone's unevictable list, it does NOT use a
  * per cpu pagevec.
  */
-void lru_cache_add_active_or_unevictable(struct page *page,
-					 struct vm_area_struct *vma)
+void __lru_cache_add_active_or_unevictable(struct page *page,
+					   unsigned long vma_flags)
 {
 	VM_BUG_ON_PAGE(PageLRU(page), page);
 
-	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
+	if (likely((vma_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
 		SetPageActive(page);
 		lru_cache_add(page);
 		return;
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-09-08 18:07 UTC|newest]

Thread overview: 66+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-08 18:06 [PATCH v3 00/20] Speculative page faults Laurent Dufour
2017-09-08 18:06 ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 01/20] mm: Dont assume page-table invariance during faults Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 02/20] mm: Prepare for FAULT_FLAG_SPECULATIVE Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 03/20] mm: Introduce pte_spinlock " Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 04/20] mm: VMA sequence count Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-13 11:53   ` Sergey Senozhatsky
2017-09-13 11:53     ` Sergey Senozhatsky
2017-09-13 16:56     ` Laurent Dufour
2017-09-13 16:56       ` Laurent Dufour
2017-09-14  0:31       ` Sergey Senozhatsky
2017-09-14  0:31         ` Sergey Senozhatsky
2017-09-14  7:55         ` Laurent Dufour
2017-09-14  7:55           ` Laurent Dufour
2017-09-14  8:13           ` Sergey Senozhatsky
2017-09-14  8:13             ` Sergey Senozhatsky
2017-09-14  8:58             ` Laurent Dufour
2017-09-14  8:58               ` Laurent Dufour
2017-09-14  9:11               ` Sergey Senozhatsky
2017-09-14  9:11                 ` Sergey Senozhatsky
2017-09-14  9:15                 ` Laurent Dufour
2017-09-14  9:15                   ` Laurent Dufour
2017-09-14  9:40                   ` Sergey Senozhatsky
2017-09-14  9:40                     ` Sergey Senozhatsky
2017-09-15 12:38                     ` Laurent Dufour
2017-09-15 12:38                       ` Laurent Dufour
2017-09-25 12:22                       ` Peter Zijlstra
2017-09-25 12:22                         ` Peter Zijlstra
2017-09-08 18:06 ` [PATCH v3 05/20] mm: Protect VMA modifications using " Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 06/20] mm: RCU free VMAs Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 07/20] mm: Cache some VMA fields in the vm_fault structure Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 08/20] mm: Protect SPF handler against anon_vma changes Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 09/20] mm/migrate: Pass vm_fault pointer to migrate_misplaced_page() Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` Laurent Dufour [this message]
2017-09-08 18:06   ` [PATCH v3 10/20] mm: Introduce __lru_cache_add_active_or_unevictable Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 11/20] mm: Introduce __maybe_mkwrite() Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 12/20] mm: Introduce __vm_normal_page() Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 13/20] mm: Introduce __page_add_new_anon_rmap() Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 14/20] mm: Provide speculative fault infrastructure Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:06 ` [PATCH v3 15/20] mm: Try spin lock in speculative path Laurent Dufour
2017-09-08 18:06   ` Laurent Dufour
2017-09-08 18:07 ` [PATCH v3 16/20] mm: Adding speculative page fault failure trace events Laurent Dufour
2017-09-08 18:07   ` Laurent Dufour
2017-09-08 18:07 ` [PATCH v3 17/20] perf: Add a speculative page fault sw event Laurent Dufour
2017-09-08 18:07   ` Laurent Dufour
2017-09-08 18:07 ` [PATCH v3 18/20] perf tools: Add support for the SPF perf event Laurent Dufour
2017-09-08 18:07   ` Laurent Dufour
2017-09-08 18:07 ` [PATCH v3 19/20] x86/mm: Add speculative pagefault handling Laurent Dufour
2017-09-08 18:07   ` Laurent Dufour
2017-09-08 18:07 ` [PATCH v3 20/20] powerpc/mm: Add speculative page fault Laurent Dufour
2017-09-08 18:07   ` Laurent Dufour
2017-09-18  7:15 ` [PATCH v3 00/20] Speculative page faults Laurent Dufour
2017-09-18  7:15   ` Laurent Dufour

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1504894024-2750-11-git-send-email-ldufour@linux.vnet.ibm.com \
    --to=ldufour@linux.vnet.ibm.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=benh@kernel.crashing.org \
    --cc=bsingharora@gmail.com \
    --cc=dave@stgolabs.net \
    --cc=haren@linux.vnet.ibm.com \
    --cc=hpa@zytor.com \
    --cc=jack@suse.cz \
    --cc=khandual@linux.vnet.ibm.com \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mhocko@kernel.org \
    --cc=mingo@redhat.com \
    --cc=mpe@ellerman.id.au \
    --cc=npiggin@gmail.com \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=paulus@samba.org \
    --cc=peterz@infradead.org \
    --cc=sergey.senozhatsky@gmail.com \
    --cc=tglx@linutronix.de \
    --cc=tim.c.chen@linux.intel.com \
    --cc=will.deacon@arm.com \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.