All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yu-cheng Yu <yu-cheng.yu@intel.com>
To: x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
	linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-api@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Andy Lutomirski <luto@kernel.org>,
	Balbir Singh <bsingharora@gmail.com>,
	Borislav Petkov <bp@alien8.de>,
	Cyrill Gorcunov <gorcunov@gmail.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Eugene Syromiatnikov <esyr@redhat.com>,
	Florian Weimer <fweimer@redhat.com>,
	"H.J. Lu" <hjl.tools@gmail.com>, Jann Horn <jannh@google.com>,
	Jonathan Corbet <corbet@lwn.net>,
	Kees Cook <keescook@chromium.org>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Nadav Amit <nadav.amit@gmail.com>,
	Oleg Nesterov <oleg@redhat.com>, Pavel Machek <pavel@ucw.cz>,
	Peter Zijlstra <peterz@infradead.org>,
	Randy Dunlap <rdunlap@infradead.org>,
	"Ravi V. Shankar" <ravi.v.shankar@intel.com>,
	Vedvyas Shanbhogue <vedvyas.shanbhogue@intel.com>,
	Dave Martin <Dave.Martin@arm.com>,
	x86-patch-review@intel.com
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Subject: [RFC PATCH v9 16/27] mm: Update can_follow_write_pte() for Shadow Stack
Date: Wed,  5 Feb 2020 10:19:24 -0800	[thread overview]
Message-ID: <20200205181935.3712-17-yu-cheng.yu@intel.com> (raw)
In-Reply-To: <20200205181935.3712-1-yu-cheng.yu@intel.com>

Can_follow_write_pte() verifies that a read-only page is the task's own
copy by ensuring the page has gone through faultin_page() and the PTE is
Dirty.

A Shadow Stack (SHSTK) PTE must be (read-only + _PAGE_DIRTY_HW).  When a
task does fork(), its SHSTK PTEs become (read-only + _PAGE_DIRTY_SW).  This
causes the next SHSTK access (i.e. CALL, RET, INCSSP) to trigger a fault;
the page is then copied, and (read-only + _PAGE_DIRTY_HW) is restored.

To update can_follow_write_pte() for SHSTK, introduce pte_exclusive().  It
verifies a data PTE is Dirty and a SHSTK PTE has _PAGE_DIRTY_HW.

Also rename can_follow_write_pte() to can_follow_write() to make its
meaning clear; i.e. "Can we write to the page?", not "Is the PTE writable?"

Also apply same changes to the huge memory case.

Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
---
 arch/x86/mm/pgtable.c         | 18 ++++++++++++++++++
 include/asm-generic/pgtable.h | 12 ++++++++++++
 mm/gup.c                      |  8 +++++---
 mm/huge_memory.c              |  8 +++++---
 4 files changed, 40 insertions(+), 6 deletions(-)

diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 3340b1d4e9da..fa8133f37918 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -887,6 +887,15 @@ inline pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma)
 		return pte;
 }
 
+inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHSTK)
+		return pte_dirty_hw(pte);
+	else
+		return pte_dirty(pte);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma)
 {
 	if (vma->vm_flags & VM_SHSTK)
@@ -894,4 +903,13 @@ inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma)
 	else
 		return pmd;
 }
+
+inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHSTK)
+		return pmd_dirty_hw(pmd);
+	else
+		return pmd_dirty(pmd);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif /* CONFIG_X86_INTEL_SHADOW_STACK_USER */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index a9df093fdf45..ae9a84fffc25 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1202,18 +1202,30 @@ static inline pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma)
 	return pte;
 }
 
+static inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
+{
+	return pte_dirty(pte);
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma)
 {
 	return pmd;
 }
+
+static inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
+{
+	return pmd_dirty(pmd);
+}
 #endif
 #else
 bool arch_copy_pte_mapping(vm_flags_t vm_flags);
 pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma);
+bool pte_exclusive(pte_t pte, struct vm_area_struct *vma);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma);
+bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma);
 #endif
 #endif
 #endif /* CONFIG_MMU */
diff --git a/mm/gup.c b/mm/gup.c
index 7646bf993b25..d1dbfbde8443 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -164,10 +164,12 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  * FOLL_FORCE can write to even unwritable pte's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+static inline bool can_follow_write(pte_t pte, unsigned int flags,
+				    struct vm_area_struct *vma)
 {
 	return pte_write(pte) ||
-		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+		((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+		 pte_exclusive(pte, vma));
 }
 
 static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -205,7 +207,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 	}
 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
 		goto no_page;
-	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
+	if ((flags & FOLL_WRITE) && !can_follow_write(pte, flags, vma)) {
 		pte_unmap_unlock(ptep, ptl);
 		return NULL;
 	}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 93ef368df2dd..baad346e9f4a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1469,10 +1469,12 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
  * FOLL_FORCE can write to even unwritable pmd's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+static inline bool can_follow_write(pmd_t pmd, unsigned int flags,
+				    struct vm_area_struct *vma)
 {
 	return pmd_write(pmd) ||
-	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+		pmd_exclusive(pmd, vma));
 }
 
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1485,7 +1487,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
 	assert_spin_locked(pmd_lockptr(mm, pmd));
 
-	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
+	if (flags & FOLL_WRITE && !can_follow_write(*pmd, flags, vma))
 		goto out;
 
 	/* Avoid dumping huge zero page */
-- 
2.21.0


WARNING: multiple messages have this Message-ID (diff)
From: Yu-cheng Yu <yu-cheng.yu@intel.com>
To: x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
	linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-api@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Andy Lutomirski <luto@kernel.org>,
	Balbir Singh <bsingharora@gmail.com>,
	Borislav Petkov <bp@alien8.de>,
	Cyrill Gorcunov <gorcunov@gmail.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Eugene Syromiatnikov <esyr@redhat.com>,
	Florian Weimer <fweimer@redhat.com>,
	"H.J. Lu" <hjl.tools@gmail.com>, Jann Horn <jannh@google.com>,
	Jonathan Corbet <corbet@lwn.net>,
	Kees Cook <keescook@chromium.org>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Nadav Amit <nadav.amit@gmail.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Subject: [RFC PATCH v9 16/27] mm: Update can_follow_write_pte() for Shadow Stack
Date: Wed,  5 Feb 2020 10:19:24 -0800	[thread overview]
Message-ID: <20200205181935.3712-17-yu-cheng.yu@intel.com> (raw)
In-Reply-To: <20200205181935.3712-1-yu-cheng.yu@intel.com>

Can_follow_write_pte() verifies that a read-only page is the task's own
copy by ensuring the page has gone through faultin_page() and the PTE is
Dirty.

A Shadow Stack (SHSTK) PTE must be (read-only + _PAGE_DIRTY_HW).  When a
task does fork(), its SHSTK PTEs become (read-only + _PAGE_DIRTY_SW).  This
causes the next SHSTK access (i.e. CALL, RET, INCSSP) to trigger a fault;
the page is then copied, and (read-only + _PAGE_DIRTY_HW) is restored.

To update can_follow_write_pte() for SHSTK, introduce pte_exclusive().  It
verifies a data PTE is Dirty and a SHSTK PTE has _PAGE_DIRTY_HW.

Also rename can_follow_write_pte() to can_follow_write() to make its
meaning clear; i.e. "Can we write to the page?", not "Is the PTE writable?"

Also apply same changes to the huge memory case.

Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
---
 arch/x86/mm/pgtable.c         | 18 ++++++++++++++++++
 include/asm-generic/pgtable.h | 12 ++++++++++++
 mm/gup.c                      |  8 +++++---
 mm/huge_memory.c              |  8 +++++---
 4 files changed, 40 insertions(+), 6 deletions(-)

diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 3340b1d4e9da..fa8133f37918 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -887,6 +887,15 @@ inline pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma)
 		return pte;
 }
 
+inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHSTK)
+		return pte_dirty_hw(pte);
+	else
+		return pte_dirty(pte);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma)
 {
 	if (vma->vm_flags & VM_SHSTK)
@@ -894,4 +903,13 @@ inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma)
 	else
 		return pmd;
 }
+
+inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHSTK)
+		return pmd_dirty_hw(pmd);
+	else
+		return pmd_dirty(pmd);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif /* CONFIG_X86_INTEL_SHADOW_STACK_USER */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index a9df093fdf45..ae9a84fffc25 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1202,18 +1202,30 @@ static inline pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma)
 	return pte;
 }
 
+static inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
+{
+	return pte_dirty(pte);
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma)
 {
 	return pmd;
 }
+
+static inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
+{
+	return pmd_dirty(pmd);
+}
 #endif
 #else
 bool arch_copy_pte_mapping(vm_flags_t vm_flags);
 pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma);
+bool pte_exclusive(pte_t pte, struct vm_area_struct *vma);
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma);
+bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma);
 #endif
 #endif
 #endif /* CONFIG_MMU */
diff --git a/mm/gup.c b/mm/gup.c
index 7646bf993b25..d1dbfbde8443 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -164,10 +164,12 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  * FOLL_FORCE can write to even unwritable pte's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+static inline bool can_follow_write(pte_t pte, unsigned int flags,
+				    struct vm_area_struct *vma)
 {
 	return pte_write(pte) ||
-		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+		((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+		 pte_exclusive(pte, vma));
 }
 
 static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -205,7 +207,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 	}
 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
 		goto no_page;
-	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
+	if ((flags & FOLL_WRITE) && !can_follow_write(pte, flags, vma)) {
 		pte_unmap_unlock(ptep, ptl);
 		return NULL;
 	}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 93ef368df2dd..baad346e9f4a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1469,10 +1469,12 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
  * FOLL_FORCE can write to even unwritable pmd's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+static inline bool can_follow_write(pmd_t pmd, unsigned int flags,
+				    struct vm_area_struct *vma)
 {
 	return pmd_write(pmd) ||
-	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+		pmd_exclusive(pmd, vma));
 }
 
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1485,7 +1487,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
 	assert_spin_locked(pmd_lockptr(mm, pmd));
 
-	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
+	if (flags & FOLL_WRITE && !can_follow_write(*pmd, flags, vma))
 		goto out;
 
 	/* Avoid dumping huge zero page */
-- 
2.21.0

WARNING: multiple messages have this Message-ID (diff)
From: Yu-cheng Yu <yu-cheng.yu@intel.com>
To: x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
	linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-api@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Andy Lutomirski <luto@kernel.org>,
	Balbir Singh <bsingharora@gmail.com>,
	Borislav Petkov <bp@alien8.de>,
	Cyrill Gorcunov <gorcunov@gmail.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Eugene Syromiatnikov <esyr@redhat.com>,
	Florian Weimer <fweimer@redhat.com>,
	"H.J. Lu" <hjl.tools@gmail.com>, Jann Horn <jannh@google.com>,
	Jonathan Corbet <corbet@lwn.net>,
	Kees Cook <keescook@chromium.org>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Nadav Amit <nadav.amit@gmail.com>,
	Oleg Nesterov <oleg@redhat.com>, Pavel Machek <pavel@ucw.cz>,
	Peter Zijlstra <peterz@infradead.org>,
	Randy Dunlap <rdunlap@infradead.org>,
	"Ravi V. Shankar" <ravi.v.shankar@intel.com>,
	Vedvyas Shanbhogue <vedvyas.shanbhogue@intel.com>,
	Dave Martin <Dave.Martin@arm.com>,
	x86-patch-review@intel.com
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Subject: [RFC PATCH v9 16/27] mm: Update can_follow_write_pte() for Shadow Stack
Date: Wed,  5 Feb 2020 10:19:24 -0800	[thread overview]
Message-ID: <20200205181935.3712-17-yu-cheng.yu@intel.com> (raw)
Message-ID: <20200205181924.SAYy5b8Cgn6G9IJYFt3PsMmbwC2gXO3GZRu5wXiIMos@z> (raw)
In-Reply-To: <20200205181935.3712-1-yu-cheng.yu@intel.com>

Can_follow_write_pte() verifies that a read-only page is the task's own
copy by ensuring the page has gone through faultin_page() and the PTE is
Dirty.

A Shadow Stack (SHSTK) PTE must be (read-only + _PAGE_DIRTY_HW).  When a
task does fork(), its SHSTK PTEs become (read-only + _PAGE_DIRTY_SW).  This
causes the next SHSTK access (i.e. CALL, RET, INCSSP) to trigger a fault;
the page is then copied, and (read-only + _PAGE_DIRTY_HW) is restored.

To update can_follow_write_pte() for SHSTK, introduce pte_exclusive().  It
verifies a data PTE is Dirty and a SHSTK PTE has _PAGE_DIRTY_HW.

Also rename can_follow_write_pte() to can_follow_write() to make its
meaning clear; i.e. "Can we write to the page?", not "Is the PTE writable?"

Also apply same changes to the huge memory case.

Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
---
 arch/x86/mm/pgtable.c         | 18 ++++++++++++++++++
 include/asm-generic/pgtable.h | 12 ++++++++++++
 mm/gup.c                      |  8 +++++---
 mm/huge_memory.c              |  8 +++++---
 4 files changed, 40 insertions(+), 6 deletions(-)

diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 3340b1d4e9da..fa8133f37918 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -887,6 +887,15 @@ inline pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma)
 		return pte;
 }

+inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHSTK)
+		return pte_dirty_hw(pte);
+	else
+		return pte_dirty(pte);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma)
 {
 	if (vma->vm_flags & VM_SHSTK)
@@ -894,4 +903,13 @@ inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma)
 	else
 		return pmd;
 }
+
+inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHSTK)
+		return pmd_dirty_hw(pmd);
+	else
+		return pmd_dirty(pmd);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif /* CONFIG_X86_INTEL_SHADOW_STACK_USER */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index a9df093fdf45..ae9a84fffc25 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1202,18 +1202,30 @@ static inline pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma)
 	return pte;
 }

+static inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
+{
+	return pte_dirty(pte);
+}
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma)
 {
 	return pmd;
 }
+
+static inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
+{
+	return pmd_dirty(pmd);
+}
 #endif
 #else
 bool arch_copy_pte_mapping(vm_flags_t vm_flags);
 pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma);
+bool pte_exclusive(pte_t pte, struct vm_area_struct *vma);

 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma);
+bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma);
 #endif
 #endif
 #endif /* CONFIG_MMU */
diff --git a/mm/gup.c b/mm/gup.c
index 7646bf993b25..d1dbfbde8443 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -164,10 +164,12 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  * FOLL_FORCE can write to even unwritable pte's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+static inline bool can_follow_write(pte_t pte, unsigned int flags,
+				    struct vm_area_struct *vma)
 {
 	return pte_write(pte) ||
-		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+		((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+		 pte_exclusive(pte, vma));
 }

 static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -205,7 +207,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 	}
 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
 		goto no_page;
-	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
+	if ((flags & FOLL_WRITE) && !can_follow_write(pte, flags, vma)) {
 		pte_unmap_unlock(ptep, ptl);
 		return NULL;
 	}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 93ef368df2dd..baad346e9f4a 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1469,10 +1469,12 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
  * FOLL_FORCE can write to even unwritable pmd's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+static inline bool can_follow_write(pmd_t pmd, unsigned int flags,
+				    struct vm_area_struct *vma)
 {
 	return pmd_write(pmd) ||
-	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+		pmd_exclusive(pmd, vma));
 }

 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1485,7 +1487,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,

 	assert_spin_locked(pmd_lockptr(mm, pmd));

-	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
+	if (flags & FOLL_WRITE && !can_follow_write(*pmd, flags, vma))
 		goto out;

 	/* Avoid dumping huge zero page */
-- 
2.21.0

  parent reply	other threads:[~2020-02-05 18:21 UTC|newest]

Thread overview: 264+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-05 18:19 [RFC PATCH v9 00/27] Control-flow Enforcement: Shadow Stack Yu-cheng Yu
2020-02-05 18:19 ` Yu-cheng Yu
2020-02-05 18:19 ` [RFC PATCH v9 01/27] Documentation/x86: Add CET description Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-06  0:16   ` Randy Dunlap
2020-02-06  0:16     ` Randy Dunlap
2020-02-06 20:17     ` Yu-cheng Yu
2020-02-06 20:17       ` Yu-cheng Yu
2020-02-06 20:17       ` Yu-cheng Yu
2020-02-25 20:02   ` Kees Cook
2020-02-25 20:02     ` Kees Cook
2020-02-28 15:55     ` Yu-cheng Yu
2020-02-28 15:55       ` Yu-cheng Yu
2020-02-28 15:55       ` Yu-cheng Yu
2020-02-26 17:57   ` Dave Hansen
2020-02-26 17:57     ` Dave Hansen
2020-03-09 17:00     ` Yu-cheng Yu
2020-03-09 17:00       ` Yu-cheng Yu
2020-03-09 17:00       ` Yu-cheng Yu
2020-03-09 17:21       ` Dave Hansen
2020-03-09 17:21         ` Dave Hansen
2020-03-09 19:27         ` Yu-cheng Yu
2020-03-09 19:27           ` Yu-cheng Yu
2020-03-09 19:27           ` Yu-cheng Yu
2020-03-09 19:35           ` Dave Hansen
2020-03-09 19:35             ` Dave Hansen
2020-03-09 19:50             ` H.J. Lu
2020-03-09 19:50               ` H.J. Lu
2020-03-09 19:50               ` H.J. Lu
2020-03-09 20:16               ` Andy Lutomirski
2020-03-09 20:16                 ` Andy Lutomirski
2020-03-09 20:54                 ` H.J. Lu
2020-03-09 20:54                   ` H.J. Lu
2020-03-09 20:54                   ` H.J. Lu
2020-03-09 20:59                   ` Dave Hansen
2020-03-09 20:59                     ` Dave Hansen
2020-03-09 21:12                     ` H.J. Lu
2020-03-09 21:12                       ` H.J. Lu
2020-03-09 21:12                       ` H.J. Lu
2020-03-09 22:02                       ` Andy Lutomirski
2020-03-09 22:02                         ` Andy Lutomirski
2020-03-09 22:19                       ` Dave Hansen
2020-03-09 22:19                         ` Dave Hansen
2020-03-09 23:11                         ` H.J. Lu
2020-03-09 23:11                           ` H.J. Lu
2020-03-09 23:11                           ` H.J. Lu
2020-03-09 23:20                           ` Dave Hansen
2020-03-09 23:20                             ` Dave Hansen
2020-03-09 23:51                             ` H.J. Lu
2020-03-09 23:51                               ` H.J. Lu
2020-03-09 23:51                               ` H.J. Lu
2020-03-09 23:59                               ` Andy Lutomirski
2020-03-09 23:59                                 ` Andy Lutomirski
2020-03-09 23:59                                 ` Andy Lutomirski
2020-03-10  0:08                                 ` H.J. Lu
2020-03-10  0:08                                   ` H.J. Lu
2020-03-10  0:08                                   ` H.J. Lu
2020-03-10  1:21                                   ` Andy Lutomirski
2020-03-10  1:21                                     ` Andy Lutomirski
2020-03-10  2:13                                     ` H.J. Lu
2020-03-10  2:13                                       ` H.J. Lu
2020-03-10  2:13                                       ` H.J. Lu
2020-02-05 18:19 ` [RFC PATCH v9 02/27] x86/cpufeatures: Add CET CPU feature flags for Control-flow Enforcement Technology (CET) Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:02   ` Kees Cook
2020-02-25 20:02     ` Kees Cook
2020-02-05 18:19 ` [RFC PATCH v9 03/27] x86/fpu/xstate: Introduce CET MSR XSAVES supervisor states Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:04   ` Kees Cook
2020-02-25 20:04     ` Kees Cook
2020-02-05 18:19 ` [RFC PATCH v9 04/27] x86/cet: Add control-protection fault handler Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:06   ` Kees Cook
2020-02-25 20:06     ` Kees Cook
2020-02-26 17:10   ` Dave Hansen
2020-02-26 17:10     ` Dave Hansen
2020-03-05 20:44     ` Yu-cheng Yu
2020-03-05 20:44       ` Yu-cheng Yu
2020-03-05 20:44       ` Yu-cheng Yu
2020-02-05 18:19 ` [RFC PATCH v9 05/27] x86/cet/shstk: Add Kconfig option for user-mode Shadow Stack protection Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:07   ` Kees Cook
2020-02-25 20:07     ` Kees Cook
2020-02-26 17:03   ` Dave Hansen
2020-02-26 17:03     ` Dave Hansen
2020-02-26 19:57     ` Pavel Machek
2020-02-26 19:57       ` Pavel Machek
2020-03-05 20:38     ` Yu-cheng Yu
2020-03-05 20:38       ` Yu-cheng Yu
2020-03-05 20:38       ` Yu-cheng Yu
2020-02-26 18:05   ` Dave Hansen
2020-02-26 18:05     ` Dave Hansen
2020-02-27  1:02     ` H.J. Lu
2020-02-27  1:02       ` H.J. Lu
2020-02-27  1:02       ` H.J. Lu
2020-02-27  1:16       ` Dave Hansen
2020-02-27  1:16         ` Dave Hansen
2020-02-27  2:11         ` H.J. Lu
2020-02-27  2:11           ` H.J. Lu
2020-02-27  2:11           ` H.J. Lu
2020-02-27  3:57           ` Andy Lutomirski
2020-02-27  3:57             ` Andy Lutomirski
2020-02-27 18:03             ` Dave Hansen
2020-02-27 18:03               ` Dave Hansen
2020-03-06 18:37     ` Yu-cheng Yu
2020-03-06 18:37       ` Yu-cheng Yu
2020-03-06 18:37       ` Yu-cheng Yu
2020-03-06 19:02       ` Dave Hansen
2020-03-06 19:02         ` Dave Hansen
2020-03-06 21:16         ` Yu-cheng Yu
2020-03-06 21:16           ` Yu-cheng Yu
2020-03-06 21:16           ` Yu-cheng Yu
2020-02-05 18:19 ` [RFC PATCH v9 06/27] mm: Introduce VM_SHSTK for Shadow Stack memory Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:07   ` Kees Cook
2020-02-25 20:07     ` Kees Cook
2020-02-26 18:07   ` Dave Hansen
2020-02-26 18:07     ` Dave Hansen
2020-02-05 18:19 ` [RFC PATCH v9 07/27] Add guard pages around a Shadow Stack Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:11   ` Kees Cook
2020-02-25 20:11     ` Kees Cook
2020-02-26 18:17   ` Dave Hansen
2020-02-26 18:17     ` Dave Hansen
2020-02-05 18:19 ` [RFC PATCH v9 08/27] x86/mm: Change _PAGE_DIRTY to _PAGE_DIRTY_HW Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:12   ` Kees Cook
2020-02-25 20:12     ` Kees Cook
2020-02-26 18:20   ` Dave Hansen
2020-02-26 18:20     ` Dave Hansen
2020-02-05 18:19 ` [RFC PATCH v9 09/27] x86/mm: Introduce _PAGE_DIRTY_SW Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:12   ` Kees Cook
2020-02-25 20:12     ` Kees Cook
2020-02-26 21:35   ` Dave Hansen
2020-02-26 21:35     ` Dave Hansen
2020-04-01 19:08     ` Yu-cheng Yu
2020-04-01 19:08       ` Yu-cheng Yu
2020-04-01 19:08       ` Yu-cheng Yu
2020-04-01 19:22       ` Dave Hansen
2020-04-01 19:22         ` Dave Hansen
2020-02-05 18:19 ` [RFC PATCH v9 10/27] x86/mm: Update pte_modify, pmd_modify, and _PAGE_CHG_MASK for _PAGE_DIRTY_SW Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-26 22:02   ` Dave Hansen
2020-02-26 22:02     ` Dave Hansen
2020-02-05 18:19 ` [RFC PATCH v9 11/27] drm/i915/gvt: Change _PAGE_DIRTY to _PAGE_DIRTY_BITS Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:13   ` Kees Cook
2020-02-25 20:13     ` Kees Cook
2020-02-26 22:04   ` Dave Hansen
2020-02-26 22:04     ` Dave Hansen
2020-04-03 15:42     ` Yu-cheng Yu
2020-04-03 15:42       ` Yu-cheng Yu
2020-04-03 15:42       ` Yu-cheng Yu
2020-02-05 18:19 ` [RFC PATCH v9 12/27] x86/mm: Modify ptep_set_wrprotect and pmdp_set_wrprotect for _PAGE_DIRTY_SW Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:14   ` Kees Cook
2020-02-25 20:14     ` Kees Cook
2020-02-26 22:20   ` Dave Hansen
2020-02-26 22:20     ` Dave Hansen
2020-02-05 18:19 ` [RFC PATCH v9 13/27] x86/mm: Shadow Stack page fault error checking Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:16   ` Kees Cook
2020-02-25 20:16     ` Kees Cook
2020-02-26 22:47   ` Dave Hansen
2020-02-26 22:47     ` Dave Hansen
2020-02-05 18:19 ` [RFC PATCH v9 14/27] mm: Handle Shadow Stack page fault Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:20   ` Kees Cook
2020-02-25 20:20     ` Kees Cook
2020-03-05 18:30     ` Yu-cheng Yu
2020-03-05 18:30       ` Yu-cheng Yu
2020-03-05 18:30       ` Yu-cheng Yu
2020-02-27  0:08   ` Dave Hansen
2020-02-27  0:08     ` Dave Hansen
2020-04-07 18:14     ` Yu-cheng Yu
2020-04-07 18:14       ` Yu-cheng Yu
2020-04-07 18:14       ` Yu-cheng Yu
2020-04-07 22:21       ` Dave Hansen
2020-04-07 22:21         ` Dave Hansen
2020-04-08 18:18         ` Yu-cheng Yu
2020-04-08 18:18           ` Yu-cheng Yu
2020-04-08 18:18           ` Yu-cheng Yu
2020-02-05 18:19 ` [RFC PATCH v9 15/27] mm: Handle THP/HugeTLB " Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 20:59   ` Kees Cook
2020-02-25 20:59     ` Kees Cook
2020-03-13 22:00     ` Yu-cheng Yu
2020-03-13 22:00       ` Yu-cheng Yu
2020-03-13 22:00       ` Yu-cheng Yu
2020-02-05 18:19 ` Yu-cheng Yu [this message]
2020-02-05 18:19   ` [RFC PATCH v9 16/27] mm: Update can_follow_write_pte() for Shadow Stack Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-27  0:34   ` Dave Hansen
2020-02-27  0:34     ` Dave Hansen
2020-02-05 18:19 ` [RFC PATCH v9 17/27] x86/cet/shstk: User-mode Shadow Stack support Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 21:07   ` Kees Cook
2020-02-25 21:07     ` Kees Cook
2020-02-27  0:55   ` Dave Hansen
2020-02-27  0:55     ` Dave Hansen
2020-02-05 18:19 ` [RFC PATCH v9 18/27] x86/cet/shstk: Introduce WRUSS instruction Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 21:10   ` Kees Cook
2020-02-25 21:10     ` Kees Cook
2020-03-05 18:39     ` Yu-cheng Yu
2020-03-05 18:39       ` Yu-cheng Yu
2020-03-05 18:39       ` Yu-cheng Yu
2020-02-05 18:19 ` [RFC PATCH v9 19/27] x86/cet/shstk: Handle signals for Shadow Stack Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 21:17   ` Kees Cook
2020-02-25 21:17     ` Kees Cook
2020-02-05 18:19 ` [RFC PATCH v9 20/27] ELF: UAPI and Kconfig additions for ELF program properties Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19 ` [RFC PATCH v9 21/27] binfmt_elf: Define GNU_PROPERTY_X86_FEATURE_1_AND Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 21:18   ` Kees Cook
2020-02-25 21:18     ` Kees Cook
2020-02-05 18:19 ` [RFC PATCH v9 22/27] ELF: Add ELF program property parsing support Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 21:20   ` Kees Cook
2020-02-25 21:20     ` Kees Cook
2020-02-05 18:19 ` [RFC PATCH v9 23/27] ELF: Introduce arch_setup_elf_property() Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19 ` [RFC PATCH v9 24/27] x86/cet/shstk: ELF header parsing for Shadow Stack Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 21:22   ` Kees Cook
2020-02-25 21:22     ` Kees Cook
2020-02-05 18:19 ` [RFC PATCH v9 25/27] x86/cet/shstk: Handle thread " Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 21:29   ` Kees Cook
2020-02-25 21:29     ` Kees Cook
2020-03-25 21:51     ` Yu-cheng Yu
2020-03-25 21:51       ` Yu-cheng Yu
2020-03-25 21:51       ` Yu-cheng Yu
2020-02-05 18:19 ` [RFC PATCH v9 26/27] mm/mmap: Add Shadow Stack pages to memory accounting Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19 ` [RFC PATCH v9 27/27] x86/cet/shstk: Add arch_prctl functions for Shadow Stack Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-05 18:19   ` Yu-cheng Yu
2020-02-25 21:31 ` [RFC PATCH v9 00/27] Control-flow Enforcement: " Kees Cook
2020-02-25 21:31   ` Kees Cook

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200205181935.3712-17-yu-cheng.yu@intel.com \
    --to=yu-cheng.yu@intel.com \
    --cc=Dave.Martin@arm.com \
    --cc=arnd@arndb.de \
    --cc=bp@alien8.de \
    --cc=bsingharora@gmail.com \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=esyr@redhat.com \
    --cc=fweimer@redhat.com \
    --cc=gorcunov@gmail.com \
    --cc=hjl.tools@gmail.com \
    --cc=hpa@zytor.com \
    --cc=jannh@google.com \
    --cc=keescook@chromium.org \
    --cc=linux-api@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@redhat.com \
    --cc=nadav.amit@gmail.com \
    --cc=oleg@redhat.com \
    --cc=pavel@ucw.cz \
    --cc=peterz@infradead.org \
    --cc=ravi.v.shankar@intel.com \
    --cc=rdunlap@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=vedvyas.shanbhogue@intel.com \
    --cc=x86-patch-review@intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.