All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yu-cheng Yu <yu-cheng.yu@intel.com>
To: x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
	linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-api@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Andy Lutomirski <luto@amacapital.net>,
	Balbir Singh <bsingharora@gmail.com>,
	Borislav Petkov <bp@alien8.de>,
	Cyrill Gorcunov <gorcunov@gmail.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Eugene Syromiatnikov <esyr@redhat.com>,
	Florian Weimer <fweimer@redhat.com>,
	"H.J. Lu" <hjl.tools@gmail.com>, Jann Horn <jannh@google.com>,
	Jonathan Corbet <corbet@lwn.net>,
	Kees Cook <keescook@chromium.org>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Nadav Amit <nadav.amit@gmail.com>,
	Oleg Nesterov <oleg@redhat.com>, Pavel Machek <pavel@ucw.cz>,
	Peter Zijlstra <peterz@infradead.org>,
	Randy Dunlap <rdunlap@infradead.org>,
	"Ravi V. Shankar" <ravi.v.shankar@intel.com>,
	Vedvyas Shanbhogue <vedvyas.shanbhogue@intel.com>,
	Dave Martin <Dave.Martin@arm.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Subject: [PATCH v7 17/27] mm: Update can_follow_write_pte/pmd for shadow stack
Date: Thu,  6 Jun 2019 13:06:36 -0700	[thread overview]
Message-ID: <20190606200646.3951-18-yu-cheng.yu@intel.com> (raw)
In-Reply-To: <20190606200646.3951-1-yu-cheng.yu@intel.com>

can_follow_write_pte/pmd look for the (RO & DIRTY) PTE/PMD to
verify an exclusive RO page still exists after a broken COW.

A shadow stack PTE is RO & PAGE_DIRTY_SW when it is shared,
otherwise RO & PAGE_DIRTY_HW.

Introduce pte_exclusive() and pmd_exclusive() to also verify a
shadow stack PTE is exclusive.

Also rename can_follow_write_pte/pmd() to can_follow_write() to
make their meaning clear; i.e. "Can we write to the page?", not
"Is the PTE writable?"

Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
---
 arch/x86/mm/pgtable.c         | 18 ++++++++++++++++++
 include/asm-generic/pgtable.h |  4 ++++
 mm/gup.c                      |  8 +++++---
 mm/huge_memory.c              |  8 +++++---
 4 files changed, 32 insertions(+), 6 deletions(-)

diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 8ff54bd978f3..2a89c168df7b 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -913,4 +913,22 @@ inline bool arch_copy_pte_mapping(vm_flags_t vm_flags)
 {
 	return (vm_flags & VM_SHSTK);
 }
+
+inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHSTK)
+		return pte_dirty_hw(pte);
+	else
+		return pte_dirty(pte);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHSTK)
+		return pmd_dirty_hw(pmd);
+	else
+		return pmd_dirty(pmd);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif /* CONFIG_X86_INTEL_SHADOW_STACK_USER */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 4940411b8e1c..3324e30bb07f 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1192,10 +1192,14 @@ static inline bool arch_has_pfn_modify_check(void)
 #define pte_set_vma_features(pte, vma) pte
 #define pmd_set_vma_features(pmd, vma) pmd
 #define arch_copy_pte_mapping(vma_flags) false
+#define pte_exclusive(pte, vma) pte_dirty(pte)
+#define pmd_exclusive(pmd, vma) pmd_dirty(pmd)
 #else
 pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma);
 pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma);
 bool arch_copy_pte_mapping(vm_flags_t vm_flags);
+bool pte_exclusive(pte_t pte, struct vm_area_struct *vma);
+bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma);
 #endif
 
 #endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/mm/gup.c b/mm/gup.c
index ddde097cf9e4..7d11fff1e8c3 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -178,10 +178,12 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  * FOLL_FORCE can write to even unwritable pte's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+static inline bool can_follow_write(pte_t pte, unsigned int flags,
+				    struct vm_area_struct *vma)
 {
 	return pte_write(pte) ||
-		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+		((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+		 pte_exclusive(pte, vma));
 }
 
 static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -219,7 +221,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 	}
 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
 		goto no_page;
-	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
+	if ((flags & FOLL_WRITE) && !can_follow_write(pte, flags, vma)) {
 		pte_unmap_unlock(ptep, ptl);
 		return NULL;
 	}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index eac1ee2f8985..d65970b9ece6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1441,10 +1441,12 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
  * FOLL_FORCE can write to even unwritable pmd's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+static inline bool can_follow_write(pmd_t pmd, unsigned int flags,
+				    struct vm_area_struct *vma)
 {
 	return pmd_write(pmd) ||
-	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+		pmd_exclusive(pmd, vma));
 }
 
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1457,7 +1459,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
 	assert_spin_locked(pmd_lockptr(mm, pmd));
 
-	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
+	if (flags & FOLL_WRITE && !can_follow_write(*pmd, flags, vma))
 		goto out;
 
 	/* Avoid dumping huge zero page */
-- 
2.17.1


WARNING: multiple messages have this Message-ID (diff)
From: Yu-cheng Yu <yu-cheng.yu@intel.com>
To: x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org,
	linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-api@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Andy Lutomirski <luto@amacapital.net>,
	Balbir Singh <bsingharora@gmail.com>,
	Borislav Petkov <bp@alien8.de>,
	Cyrill Gorcunov <gorcunov@gmail.com>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Eugene Syromiatnikov <esyr@redhat.com>,
	Florian Weimer <fweimer@redhat.com>,
	"H.J. Lu" <hjl.tools@gmail.com>, Jann Horn <jannh@google.com>,
	Jonathan Corbet <corbet@lwn.net>,
	Kees Cook <keescook@chromium.org>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Nadav Amit <nadav.amit@gmail.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Subject: [PATCH v7 17/27] mm: Update can_follow_write_pte/pmd for shadow stack
Date: Thu,  6 Jun 2019 13:06:36 -0700	[thread overview]
Message-ID: <20190606200646.3951-18-yu-cheng.yu@intel.com> (raw)
In-Reply-To: <20190606200646.3951-1-yu-cheng.yu@intel.com>

can_follow_write_pte/pmd look for the (RO & DIRTY) PTE/PMD to
verify an exclusive RO page still exists after a broken COW.

A shadow stack PTE is RO & PAGE_DIRTY_SW when it is shared,
otherwise RO & PAGE_DIRTY_HW.

Introduce pte_exclusive() and pmd_exclusive() to also verify a
shadow stack PTE is exclusive.

Also rename can_follow_write_pte/pmd() to can_follow_write() to
make their meaning clear; i.e. "Can we write to the page?", not
"Is the PTE writable?"

Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
---
 arch/x86/mm/pgtable.c         | 18 ++++++++++++++++++
 include/asm-generic/pgtable.h |  4 ++++
 mm/gup.c                      |  8 +++++---
 mm/huge_memory.c              |  8 +++++---
 4 files changed, 32 insertions(+), 6 deletions(-)

diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 8ff54bd978f3..2a89c168df7b 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -913,4 +913,22 @@ inline bool arch_copy_pte_mapping(vm_flags_t vm_flags)
 {
 	return (vm_flags & VM_SHSTK);
 }
+
+inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHSTK)
+		return pte_dirty_hw(pte);
+	else
+		return pte_dirty(pte);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
+{
+	if (vma->vm_flags & VM_SHSTK)
+		return pmd_dirty_hw(pmd);
+	else
+		return pmd_dirty(pmd);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif /* CONFIG_X86_INTEL_SHADOW_STACK_USER */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 4940411b8e1c..3324e30bb07f 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1192,10 +1192,14 @@ static inline bool arch_has_pfn_modify_check(void)
 #define pte_set_vma_features(pte, vma) pte
 #define pmd_set_vma_features(pmd, vma) pmd
 #define arch_copy_pte_mapping(vma_flags) false
+#define pte_exclusive(pte, vma) pte_dirty(pte)
+#define pmd_exclusive(pmd, vma) pmd_dirty(pmd)
 #else
 pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma);
 pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma);
 bool arch_copy_pte_mapping(vm_flags_t vm_flags);
+bool pte_exclusive(pte_t pte, struct vm_area_struct *vma);
+bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma);
 #endif
 
 #endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/mm/gup.c b/mm/gup.c
index ddde097cf9e4..7d11fff1e8c3 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -178,10 +178,12 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  * FOLL_FORCE can write to even unwritable pte's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+static inline bool can_follow_write(pte_t pte, unsigned int flags,
+				    struct vm_area_struct *vma)
 {
 	return pte_write(pte) ||
-		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+		((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+		 pte_exclusive(pte, vma));
 }
 
 static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -219,7 +221,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
 	}
 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
 		goto no_page;
-	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
+	if ((flags & FOLL_WRITE) && !can_follow_write(pte, flags, vma)) {
 		pte_unmap_unlock(ptep, ptl);
 		return NULL;
 	}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index eac1ee2f8985..d65970b9ece6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1441,10 +1441,12 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
  * FOLL_FORCE can write to even unwritable pmd's, but only
  * after we've gone through a COW cycle and they are dirty.
  */
-static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+static inline bool can_follow_write(pmd_t pmd, unsigned int flags,
+				    struct vm_area_struct *vma)
 {
 	return pmd_write(pmd) ||
-	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+	       ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
+		pmd_exclusive(pmd, vma));
 }
 
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
@@ -1457,7 +1459,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
 	assert_spin_locked(pmd_lockptr(mm, pmd));
 
-	if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
+	if (flags & FOLL_WRITE && !can_follow_write(*pmd, flags, vma))
 		goto out;
 
 	/* Avoid dumping huge zero page */
-- 
2.17.1

  parent reply	other threads:[~2019-06-06 20:16 UTC|newest]

Thread overview: 161+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-06 20:06 [PATCH v7 00/27] Control-flow Enforcement: Shadow Stack Yu-cheng Yu
2019-06-06 20:06 ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 01/27] Documentation/x86: Add CET description Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 02/27] x86/cpufeatures: Add CET CPU feature flags for Control-flow Enforcement Technology (CET) Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 03/27] x86/fpu/xstate: Change names to separate XSAVES system and user states Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 04/27] x86/fpu/xstate: Introduce XSAVES system states Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 21:18   ` Dave Hansen
2019-06-06 21:18     ` Dave Hansen
2019-06-06 22:04     ` Andy Lutomirski
2019-06-06 22:04       ` Andy Lutomirski
2019-06-06 22:08       ` Dave Hansen
2019-06-06 22:08         ` Dave Hansen
2019-06-06 22:10         ` Yu-cheng Yu
2019-06-06 22:10           ` Yu-cheng Yu
2019-06-06 22:10           ` Yu-cheng Yu
2019-06-07  1:54         ` Andy Lutomirski
2019-06-07  1:54           ` Andy Lutomirski
2019-06-06 20:06 ` [PATCH v7 05/27] x86/fpu/xstate: Add XSAVES system states for shadow stack Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-07  7:07   ` Peter Zijlstra
2019-06-07  7:07     ` Peter Zijlstra
2019-06-07 16:14     ` Yu-cheng Yu
2019-06-07 16:14       ` Yu-cheng Yu
2019-06-07 16:14       ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 06/27] x86/cet: Add control protection exception handler Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 07/27] x86/cet/shstk: Add Kconfig option for user-mode shadow stack Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 08/27] mm: Introduce VM_SHSTK for shadow stack memory Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 09/27] mm/mmap: Prevent Shadow Stack VMA merges Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 10/27] x86/mm: Change _PAGE_DIRTY to _PAGE_DIRTY_HW Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 11/27] x86/mm: Introduce _PAGE_DIRTY_SW Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 12/27] drm/i915/gvt: Update _PAGE_DIRTY to _PAGE_DIRTY_BITS Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 13/27] x86/mm: Modify ptep_set_wrprotect and pmdp_set_wrprotect for _PAGE_DIRTY_SW Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 14/27] x86/mm: Shadow stack page fault error checking Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 15/27] mm: Handle shadow stack page fault Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-07  7:30   ` Peter Zijlstra
2019-06-07  7:30     ` Peter Zijlstra
2019-06-06 20:06 ` [PATCH v7 16/27] mm: Handle THP/HugeTLB " Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` Yu-cheng Yu [this message]
2019-06-06 20:06   ` [PATCH v7 17/27] mm: Update can_follow_write_pte/pmd for shadow stack Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 18/27] mm: Introduce do_mmap_locked() Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-07  7:43   ` Peter Zijlstra
2019-06-07  7:43     ` Peter Zijlstra
2019-06-07  7:47     ` Peter Zijlstra
2019-06-07  7:47       ` Peter Zijlstra
2019-06-07 16:16       ` Yu-cheng Yu
2019-06-07 16:16         ` Yu-cheng Yu
2019-06-07 16:16         ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 19/27] x86/cet/shstk: User-mode shadow stack support Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 20/27] x86/cet/shstk: Introduce WRUSS instruction Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 21/27] x86/cet/shstk: Handle signals for shadow stack Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 22/27] binfmt_elf: Extract .note.gnu.property from an ELF file Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-07  7:58   ` Peter Zijlstra
2019-06-07  7:58     ` Peter Zijlstra
2019-06-07 16:17     ` Yu-cheng Yu
2019-06-07 16:17       ` Yu-cheng Yu
2019-06-07 16:17       ` Yu-cheng Yu
2019-06-07 18:01   ` Dave Martin
2019-06-07 18:01     ` Dave Martin
2019-06-10 16:29     ` Yu-cheng Yu
2019-06-10 16:29       ` Yu-cheng Yu
2019-06-10 16:29       ` Yu-cheng Yu
2019-06-10 16:57       ` Dave Martin
2019-06-10 16:57         ` Dave Martin
2019-06-10 17:24       ` Florian Weimer
2019-06-10 17:24         ` Florian Weimer
2019-06-10 17:24         ` Florian Weimer
2019-06-11 11:41         ` Dave Martin
2019-06-11 11:41           ` Dave Martin
2019-06-11 19:31           ` Yu-cheng Yu
2019-06-11 19:31             ` Yu-cheng Yu
2019-06-11 19:31             ` Yu-cheng Yu
2019-06-12  9:32             ` Dave Martin
2019-06-12  9:32               ` Dave Martin
2019-06-12 19:04               ` Yu-cheng Yu
2019-06-12 19:04                 ` Yu-cheng Yu
2019-06-12 19:04                 ` Yu-cheng Yu
2019-06-13 13:26                 ` Dave Martin
2019-06-13 13:26                   ` Dave Martin
2019-06-17 11:08               ` Florian Weimer
2019-06-17 11:08                 ` Florian Weimer
2019-06-17 11:08                 ` Florian Weimer
2019-06-17 12:20                 ` Thomas Gleixner
2019-06-17 12:20                   ` Thomas Gleixner
2019-06-17 12:20                   ` Thomas Gleixner
2019-06-18  9:12                   ` Dave Martin
2019-06-18  9:12                     ` Dave Martin
2019-06-18 12:41                     ` Peter Zijlstra
2019-06-18 12:41                       ` Peter Zijlstra
2019-06-18 12:47                       ` Florian Weimer
2019-06-18 12:47                         ` Florian Weimer
2019-06-18 12:47                         ` Florian Weimer
2019-06-18 12:55                         ` Peter Zijlstra
2019-06-18 12:55                           ` Peter Zijlstra
2019-06-18 13:32                           ` Dave Martin
2019-06-18 13:32                             ` Dave Martin
2019-06-18 13:32                             ` Dave Martin
2019-06-18 13:32                             ` Dave Martin
2019-06-18 14:58                             ` Yu-cheng Yu
2019-06-18 14:58                               ` Yu-cheng Yu
2019-06-18 14:58                               ` Yu-cheng Yu
2019-06-18 15:49                               ` Florian Weimer
2019-06-18 15:49                                 ` Florian Weimer
2019-06-18 15:49                                 ` Florian Weimer
2019-06-18 15:53                                 ` Yu-cheng Yu
2019-06-18 15:53                                   ` Yu-cheng Yu
2019-06-18 15:53                                   ` Yu-cheng Yu
2019-06-18 16:05                                   ` Florian Weimer
2019-06-18 16:05                                     ` Florian Weimer
2019-06-18 16:05                                     ` Florian Weimer
2019-06-18 16:00                                     ` Yu-cheng Yu
2019-06-18 16:00                                       ` Yu-cheng Yu
2019-06-18 16:00                                       ` Yu-cheng Yu
2019-06-18 16:20                                       ` Dave Martin
2019-06-18 16:20                                         ` Dave Martin
2019-06-18 16:25                                         ` Florian Weimer
2019-06-18 16:25                                           ` Florian Weimer
2019-06-18 16:25                                           ` Florian Weimer
2019-06-18 16:50                                           ` Dave Martin
2019-06-18 16:50                                             ` Dave Martin
2019-06-06 20:06 ` [PATCH v7 23/27] x86/cet/shstk: ELF header parsing of Shadow Stack Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-07  7:54   ` Peter Zijlstra
2019-06-07  7:54     ` Peter Zijlstra
2019-06-06 20:06 ` [PATCH v7 24/27] x86/cet/shstk: Handle thread shadow stack Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 25/27] mm/mmap: Add Shadow stack pages to memory accounting Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-11 17:55   ` Dave Hansen
2019-06-11 17:55     ` Dave Hansen
2019-06-11 19:22     ` Yu-cheng Yu
2019-06-11 19:22       ` Yu-cheng Yu
2019-06-11 19:22       ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 26/27] x86/cet/shstk: Add arch_prctl functions for Shadow Stack Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-06-06 20:06 ` [PATCH v7 27/27] x86/cet/shstk: Add Shadow Stack instructions to opcode map Yu-cheng Yu
2019-06-06 20:06   ` Yu-cheng Yu
2019-11-01 14:03   ` Adrian Hunter
2019-11-01 14:03     ` Adrian Hunter
2019-11-01 14:17     ` Yu-cheng Yu
2019-11-01 14:17       ` Yu-cheng Yu
2019-11-01 14:17       ` Yu-cheng Yu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190606200646.3951-18-yu-cheng.yu@intel.com \
    --to=yu-cheng.yu@intel.com \
    --cc=Dave.Martin@arm.com \
    --cc=arnd@arndb.de \
    --cc=bp@alien8.de \
    --cc=bsingharora@gmail.com \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=esyr@redhat.com \
    --cc=fweimer@redhat.com \
    --cc=gorcunov@gmail.com \
    --cc=hjl.tools@gmail.com \
    --cc=hpa@zytor.com \
    --cc=jannh@google.com \
    --cc=keescook@chromium.org \
    --cc=linux-api@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@amacapital.net \
    --cc=mike.kravetz@oracle.com \
    --cc=mingo@redhat.com \
    --cc=nadav.amit@gmail.com \
    --cc=oleg@redhat.com \
    --cc=pavel@ucw.cz \
    --cc=peterz@infradead.org \
    --cc=ravi.v.shankar@intel.com \
    --cc=rdunlap@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=vedvyas.shanbhogue@intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.