From: Yu-cheng Yu <yu-cheng.yu@intel.com>
To: x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
Thomas Gleixner <tglx@linutronix.de>,
Ingo Molnar <mingo@redhat.com>,
linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
linux-mm@kvack.org, linux-arch@vger.kernel.org,
linux-api@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
Andy Lutomirski <luto@amacapital.net>,
Balbir Singh <bsingharora@gmail.com>,
Borislav Petkov <bp@alien8.de>,
Cyrill Gorcunov <gorcunov@gmail.com>,
Dave Hansen <dave.hansen@linux.intel.com>,
Eugene Syromiatnikov <esyr@redhat.com>,
Florian Weimer <fweimer@redhat.com>,
"H.J. Lu" <hjl.tools@gmail.com>, Jann Horn <jannh@google.com>,
Jonathan Corbet <corbet@lwn.net>,
Kees Cook <keescook@chromium.org>,
Mike Kravetz <mike.kravetz@oracle.com>,
Nadav Amit <nadav.amit@gmail.com>,
Oleg Nesterov <oleg@redhat.com>, Pavel Machek <pavel@ucw.cz>,
Peter Zijlstra <peterz@infradead.org>,
Randy Dunlap <rdunlap@infradead.org>,
"Ravi V. Shankar" <ravi.v.shankar@intel.com>,
Vedvyas Shanbhogue <vedvyas.shanbhogue@intel.com>,
Dave Martin <Dave.Martin@arm.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Subject: [PATCH v8 17/27] mm: Update can_follow_write_pte/pmd for shadow stack
Date: Tue, 13 Aug 2019 13:52:15 -0700 [thread overview]
Message-ID: <20190813205225.12032-18-yu-cheng.yu@intel.com> (raw)
In-Reply-To: <20190813205225.12032-1-yu-cheng.yu@intel.com>
can_follow_write_pte/pmd look for the (RO & DIRTY) PTE/PMD to
verify an exclusive RO page still exists after a broken COW.
A shadow stack PTE is RO & PAGE_DIRTY_SW when it is shared,
otherwise RO & PAGE_DIRTY_HW.
Introduce pte_exclusive() and pmd_exclusive() to also verify a
shadow stack PTE is exclusive.
Also rename can_follow_write_pte/pmd() to can_follow_write() to
make their meaning clear; i.e. "Can we write to the page?", not
"Is the PTE writable?"
Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
---
arch/x86/mm/pgtable.c | 18 ++++++++++++++++++
include/asm-generic/pgtable.h | 12 ++++++++++++
mm/gup.c | 8 +++++---
mm/huge_memory.c | 8 +++++---
4 files changed, 40 insertions(+), 6 deletions(-)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 6f3959ca2a08..326715fd0c50 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -898,4 +898,22 @@ inline bool arch_copy_pte_mapping(vm_flags_t vm_flags)
{
return (vm_flags & VM_SHSTK);
}
+
+inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_SHSTK)
+ return pte_dirty_hw(pte);
+ else
+ return pte_dirty(pte);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_SHSTK)
+ return pmd_dirty_hw(pmd);
+ else
+ return pmd_dirty(pmd);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_X86_INTEL_SHADOW_STACK_USER */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 438ce73b57ea..b58f40525ebc 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1203,10 +1203,22 @@ static inline bool arch_copy_pte_mapping(vm_flags_t vm_flags)
{
return false;
}
+
+static inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
+{
+ return pte_dirty(pte);
+}
+
+static inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
+{
+ return pmd_dirty(pmd);
+}
#else
pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma);
pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma);
bool arch_copy_pte_mapping(vm_flags_t vm_flags);
+bool pte_exclusive(pte_t pte, struct vm_area_struct *vma);
+bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma);
#endif
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/mm/gup.c b/mm/gup.c
index 98f13ab37bac..d7b298c5f6cb 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -179,10 +179,12 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
* FOLL_FORCE can write to even unwritable pte's, but only
* after we've gone through a COW cycle and they are dirty.
*/
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+static inline bool can_follow_write(pte_t pte, unsigned int flags,
+ struct vm_area_struct *vma)
{
return pte_write(pte) ||
- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+ pte_exclusive(pte, vma));
}
static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -220,7 +222,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
- if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
+ if ((flags & FOLL_WRITE) && !can_follow_write(pte, flags, vma)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 39d66c628121..947eb0121671 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1444,10 +1444,12 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
* FOLL_FORCE can write to even unwritable pmd's, but only
* after we've gone through a COW cycle and they are dirty.
*/
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+static inline bool can_follow_write(pmd_t pmd, unsigned int flags,
+ struct vm_area_struct *vma)
{
return pmd_write(pmd) ||
- ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+ pmd_exclusive(pmd, vma));
}
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1460,7 +1462,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
assert_spin_locked(pmd_lockptr(mm, pmd));
- if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
+ if (flags & FOLL_WRITE && !can_follow_write(*pmd, flags, vma))
goto out;
/* Avoid dumping huge zero page */
--
2.17.1
next prev parent reply other threads:[~2019-08-13 21:03 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-13 20:51 [PATCH v8 00/27] Control-flow Enforcement: Shadow Stack Yu-cheng Yu
2019-08-13 20:51 ` [PATCH v8 01/27] Documentation/x86: Add CET description Yu-cheng Yu
2019-08-14 8:07 ` Florian Weimer
2019-08-14 15:57 ` Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 02/27] x86/cpufeatures: Add CET CPU feature flags for Control-flow Enforcement Technology (CET) Yu-cheng Yu
2019-08-21 10:20 ` Borislav Petkov
2019-08-21 14:46 ` Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 03/27] x86/fpu/xstate: Change names to separate XSAVES system and user states Yu-cheng Yu
2019-08-21 14:45 ` Borislav Petkov
2019-08-13 20:52 ` [PATCH v8 04/27] x86/fpu/xstate: Introduce XSAVES system states Yu-cheng Yu
2019-08-16 19:56 ` Thomas Gleixner
2019-08-16 19:59 ` Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 05/27] x86/fpu/xstate: Introduce CET MSR " Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 06/27] x86/cet: Add control protection exception handler Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 07/27] x86/cet/shstk: Add Kconfig option for user-mode shadow stack Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 08/27] mm: Introduce VM_SHSTK for shadow stack memory Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 09/27] mm/mmap: Prevent Shadow Stack VMA merges Yu-cheng Yu
2019-08-13 22:34 ` Dave Hansen
2019-08-14 16:20 ` Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 10/27] x86/mm: Change _PAGE_DIRTY to _PAGE_DIRTY_HW Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 11/27] x86/mm: Introduce _PAGE_DIRTY_SW Yu-cheng Yu
2019-08-13 23:02 ` Dave Hansen
2019-08-13 23:49 ` Andy Lutomirski
2019-08-14 16:42 ` Yu-cheng Yu
2019-08-14 16:58 ` Dave Hansen
2019-08-23 14:02 ` Peter Zijlstra
2019-08-27 22:37 ` Yu-cheng Yu
2019-08-28 7:03 ` Peter Zijlstra
2019-08-28 14:57 ` Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 12/27] drm/i915/gvt: Update _PAGE_DIRTY to _PAGE_DIRTY_BITS Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 13/27] x86/mm: Modify ptep_set_wrprotect and pmdp_set_wrprotect for _PAGE_DIRTY_SW Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 14/27] x86/mm: Shadow stack page fault error checking Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 15/27] mm: Handle shadow stack page fault Yu-cheng Yu
2019-08-13 22:55 ` Andy Lutomirski
2019-08-14 16:27 ` Yu-cheng Yu
2019-08-14 16:48 ` Dave Hansen
2019-08-14 17:00 ` Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 16/27] mm: Handle THP/HugeTLB " Yu-cheng Yu
2019-08-13 20:52 ` Yu-cheng Yu [this message]
2019-08-13 20:52 ` [PATCH v8 18/27] mm: Introduce do_mmap_locked() Yu-cheng Yu
2019-08-20 1:02 ` Sean Christopherson
2019-08-20 16:08 ` Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 19/27] x86/cet/shstk: User-mode shadow stack support Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 20/27] x86/cet/shstk: Introduce WRUSS instruction Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 21/27] x86/cet/shstk: Handle signals for shadow stack Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 22/27] binfmt_elf: Extract .note.gnu.property from an ELF file Yu-cheng Yu
2019-08-16 13:55 ` Dave Martin
2019-08-20 10:02 ` Dave Martin
2019-08-13 20:52 ` [PATCH v8 23/27] x86/cet/shstk: ELF header parsing of Shadow Stack Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 24/27] x86/cet/shstk: Handle thread shadow stack Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 25/27] mm/mmap: Add Shadow stack pages to memory accounting Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 26/27] x86/cet/shstk: Add arch_prctl functions for Shadow Stack Yu-cheng Yu
2019-08-13 20:52 ` [PATCH v8 27/27] x86/cet/shstk: Add Shadow Stack instructions to opcode map Yu-cheng Yu
2019-11-08 13:27 ` Adrian Hunter
2019-11-08 18:09 ` Yu-cheng Yu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190813205225.12032-18-yu-cheng.yu@intel.com \
--to=yu-cheng.yu@intel.com \
--cc=Dave.Martin@arm.com \
--cc=arnd@arndb.de \
--cc=bp@alien8.de \
--cc=bsingharora@gmail.com \
--cc=corbet@lwn.net \
--cc=dave.hansen@linux.intel.com \
--cc=esyr@redhat.com \
--cc=fweimer@redhat.com \
--cc=gorcunov@gmail.com \
--cc=hjl.tools@gmail.com \
--cc=hpa@zytor.com \
--cc=jannh@google.com \
--cc=keescook@chromium.org \
--cc=linux-api@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-doc@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@amacapital.net \
--cc=mike.kravetz@oracle.com \
--cc=mingo@redhat.com \
--cc=nadav.amit@gmail.com \
--cc=oleg@redhat.com \
--cc=pavel@ucw.cz \
--cc=peterz@infradead.org \
--cc=ravi.v.shankar@intel.com \
--cc=rdunlap@infradead.org \
--cc=tglx@linutronix.de \
--cc=vedvyas.shanbhogue@intel.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).