All of lore.kernel.org
 help / color / mirror / Atom feed
From: Laurent Dufour <ldufour@linux.vnet.ibm.com>
To: paulmck@linux.vnet.ibm.com, peterz@infradead.org,
	akpm@linux-foundation.org, kirill@shutemov.name,
	ak@linux.intel.com, mhocko@kernel.org, dave@stgolabs.net,
	jack@suse.cz
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	haren@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com,
	npiggin@gmail.com, bsingharora@gmail.com
Subject: [RFC v3 12/17] mm/spf: Protect changes to vm_flags
Date: Thu, 27 Apr 2017 17:52:51 +0200	[thread overview]
Message-ID: <1493308376-23851-13-git-send-email-ldufour@linux.vnet.ibm.com> (raw)
In-Reply-To: <1493308376-23851-1-git-send-email-ldufour@linux.vnet.ibm.com>

Protect VMA's flags change against the speculative page fault handler.

Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
---
 fs/proc/task_mmu.c | 2 ++
 mm/mempolicy.c     | 2 ++
 mm/mlock.c         | 9 ++++++---
 mm/mmap.c          | 2 ++
 mm/mprotect.c      | 2 ++
 5 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 8f96a49178d0..54c9a87530cb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1055,8 +1055,10 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 					goto out_mm;
 				}
 				for (vma = mm->mmap; vma; vma = vma->vm_next) {
+					write_seqcount_begin(&vma->vm_sequence);
 					vma->vm_flags &= ~VM_SOFTDIRTY;
 					vma_set_page_prot(vma);
+					write_seqcount_end(&vma->vm_sequence);
 				}
 				downgrade_write(&mm->mmap_sem);
 				break;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1e7873e40c9a..1518b022927d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -603,9 +603,11 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
 {
 	int nr_updated;
 
+	write_seqcount_begin(&vma->vm_sequence);
 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
 	if (nr_updated)
 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
+	write_seqcount_end(&vma->vm_sequence);
 
 	return nr_updated;
 }
diff --git a/mm/mlock.c b/mm/mlock.c
index cdbed8aaa426..44cf70413530 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -437,7 +437,9 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
 void munlock_vma_pages_range(struct vm_area_struct *vma,
 			     unsigned long start, unsigned long end)
 {
+	write_seqcount_begin(&vma->vm_sequence);
 	vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
+	write_seqcount_end(&vma->vm_sequence);
 
 	while (start < end) {
 		struct page *page;
@@ -563,10 +565,11 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
 	 * It's okay if try_to_unmap_one unmaps a page just after we
 	 * set VM_LOCKED, populate_vma_page_range will bring it back.
 	 */
-
-	if (lock)
+	if (lock) {
+		write_seqcount_begin(&vma->vm_sequence);
 		vma->vm_flags = newflags;
-	else
+		write_seqcount_end(&vma->vm_sequence);
+	} else
 		munlock_vma_pages_range(vma, start, end);
 
 out:
diff --git a/mm/mmap.c b/mm/mmap.c
index 27f407d8f7d7..815065d740c4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1742,6 +1742,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 out:
 	perf_event_mmap(vma);
 
+	write_seqcount_begin(&vma->vm_sequence);
 	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
 	if (vm_flags & VM_LOCKED) {
 		if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
@@ -1764,6 +1765,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	vma->vm_flags |= VM_SOFTDIRTY;
 
 	vma_set_page_prot(vma);
+	write_seqcount_end(&vma->vm_sequence);
 
 	return addr;
 
diff --git a/mm/mprotect.c b/mm/mprotect.c
index f9c07f54dd62..646347faf4d5 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -341,6 +341,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 	 * vm_flags and vm_page_prot are protected by the mmap_sem
 	 * held in write mode.
 	 */
+	write_seqcount_begin(&vma->vm_sequence);
 	vma->vm_flags = newflags;
 	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
 	vma_set_page_prot(vma);
@@ -356,6 +357,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 			(newflags & VM_WRITE)) {
 		populate_vma_page_range(vma, start, end, NULL);
 	}
+	write_seqcount_end(&vma->vm_sequence);
 
 	vm_stat_account(mm, oldflags, -nrpages);
 	vm_stat_account(mm, newflags, nrpages);
-- 
2.7.4

WARNING: multiple messages have this Message-ID (diff)
From: Laurent Dufour <ldufour@linux.vnet.ibm.com>
To: paulmck@linux.vnet.ibm.com, peterz@infradead.org,
	akpm@linux-foundation.org, kirill@shutemov.name,
	ak@linux.intel.com, mhocko@kernel.org, dave@stgolabs.net,
	jack@suse.cz
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	haren@linux.vnet.ibm.com, khandual@linux.vnet.ibm.com,
	npiggin@gmail.com, bsingharora@gmail.com
Subject: [RFC v3 12/17] mm/spf: Protect changes to vm_flags
Date: Thu, 27 Apr 2017 17:52:51 +0200	[thread overview]
Message-ID: <1493308376-23851-13-git-send-email-ldufour@linux.vnet.ibm.com> (raw)
In-Reply-To: <1493308376-23851-1-git-send-email-ldufour@linux.vnet.ibm.com>

Protect VMA's flags change against the speculative page fault handler.

Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
---
 fs/proc/task_mmu.c | 2 ++
 mm/mempolicy.c     | 2 ++
 mm/mlock.c         | 9 ++++++---
 mm/mmap.c          | 2 ++
 mm/mprotect.c      | 2 ++
 5 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 8f96a49178d0..54c9a87530cb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1055,8 +1055,10 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 					goto out_mm;
 				}
 				for (vma = mm->mmap; vma; vma = vma->vm_next) {
+					write_seqcount_begin(&vma->vm_sequence);
 					vma->vm_flags &= ~VM_SOFTDIRTY;
 					vma_set_page_prot(vma);
+					write_seqcount_end(&vma->vm_sequence);
 				}
 				downgrade_write(&mm->mmap_sem);
 				break;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1e7873e40c9a..1518b022927d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -603,9 +603,11 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
 {
 	int nr_updated;
 
+	write_seqcount_begin(&vma->vm_sequence);
 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
 	if (nr_updated)
 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
+	write_seqcount_end(&vma->vm_sequence);
 
 	return nr_updated;
 }
diff --git a/mm/mlock.c b/mm/mlock.c
index cdbed8aaa426..44cf70413530 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -437,7 +437,9 @@ static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
 void munlock_vma_pages_range(struct vm_area_struct *vma,
 			     unsigned long start, unsigned long end)
 {
+	write_seqcount_begin(&vma->vm_sequence);
 	vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
+	write_seqcount_end(&vma->vm_sequence);
 
 	while (start < end) {
 		struct page *page;
@@ -563,10 +565,11 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
 	 * It's okay if try_to_unmap_one unmaps a page just after we
 	 * set VM_LOCKED, populate_vma_page_range will bring it back.
 	 */
-
-	if (lock)
+	if (lock) {
+		write_seqcount_begin(&vma->vm_sequence);
 		vma->vm_flags = newflags;
-	else
+		write_seqcount_end(&vma->vm_sequence);
+	} else
 		munlock_vma_pages_range(vma, start, end);
 
 out:
diff --git a/mm/mmap.c b/mm/mmap.c
index 27f407d8f7d7..815065d740c4 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1742,6 +1742,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 out:
 	perf_event_mmap(vma);
 
+	write_seqcount_begin(&vma->vm_sequence);
 	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
 	if (vm_flags & VM_LOCKED) {
 		if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
@@ -1764,6 +1765,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	vma->vm_flags |= VM_SOFTDIRTY;
 
 	vma_set_page_prot(vma);
+	write_seqcount_end(&vma->vm_sequence);
 
 	return addr;
 
diff --git a/mm/mprotect.c b/mm/mprotect.c
index f9c07f54dd62..646347faf4d5 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -341,6 +341,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 	 * vm_flags and vm_page_prot are protected by the mmap_sem
 	 * held in write mode.
 	 */
+	write_seqcount_begin(&vma->vm_sequence);
 	vma->vm_flags = newflags;
 	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
 	vma_set_page_prot(vma);
@@ -356,6 +357,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 			(newflags & VM_WRITE)) {
 		populate_vma_page_range(vma, start, end, NULL);
 	}
+	write_seqcount_end(&vma->vm_sequence);
 
 	vm_stat_account(mm, oldflags, -nrpages);
 	vm_stat_account(mm, newflags, nrpages);
-- 
2.7.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-04-27 15:53 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-04-27 15:52 [RFC v3 00/17] Speculative page faults Laurent Dufour
2017-04-27 15:52 ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 01/17] mm: Dont assume page-table invariance during faults Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 02/17] mm: Prepare for FAULT_FLAG_SPECULATIVE Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 03/17] mm: Introduce pte_spinlock Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-30  4:47   ` Matthew Wilcox
2017-04-30  4:47     ` Matthew Wilcox
2017-05-03 13:01     ` Laurent Dufour
2017-05-03 13:01       ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 04/17] mm: VMA sequence count Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 05/17] RCU free VMAs Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 18:28   ` Paul E. McKenney
2017-04-27 18:28     ` Paul E. McKenney
2017-04-30  4:57   ` Matthew Wilcox
2017-04-30  4:57     ` Matthew Wilcox
2017-04-30  5:05   ` Matthew Wilcox
2017-04-30  5:05     ` Matthew Wilcox
2017-05-03  7:23     ` Laurent Dufour
2017-05-03  7:23       ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 06/17] mm: Provide speculative fault infrastructure Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 07/17] mm,x86: Add speculative pagefault handling Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 08/17] mm/spf: Try spin lock in speculative path Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 09/17] mm/spf: Fix fe.sequence init in __handle_mm_fault() Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 10/17] mm/spf: don't set fault entry's fields if locking failed Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 11/17] mm/spf; fix lock dependency against mapping->i_mmap_rwsem Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` Laurent Dufour [this message]
2017-04-27 15:52   ` [RFC v3 12/17] mm/spf: Protect changes to vm_flags Laurent Dufour
2017-04-27 15:52 ` [RFC v3 13/17] mm/spf Protect vm_policy's changes against speculative pf Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 14/17] x86/mm: Update the handle_speculative_fault's path Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 15/17] mm/spf: Add check on the VMA's flags Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 16/17] mm: protect madvise vs speculative pf Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour
2017-04-27 15:52 ` [RFC v3 17/17] mm/spf: protect mremap() against " Laurent Dufour
2017-04-27 15:52   ` Laurent Dufour

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1493308376-23851-13-git-send-email-ldufour@linux.vnet.ibm.com \
    --to=ldufour@linux.vnet.ibm.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=bsingharora@gmail.com \
    --cc=dave@stgolabs.net \
    --cc=haren@linux.vnet.ibm.com \
    --cc=jack@suse.cz \
    --cc=khandual@linux.vnet.ibm.com \
    --cc=kirill@shutemov.name \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@kernel.org \
    --cc=npiggin@gmail.com \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.