All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86/pti: Make X86_BUG_CPU_SECURE_MODE_PTI an X86_FEATURE
@ 2017-12-12 19:00 Borislav Petkov
  0 siblings, 0 replies; only message in thread
From: Borislav Petkov @ 2017-12-12 19:00 UTC (permalink / raw)
  To: X86 ML
  Cc: LKML, Linus Torvalds, Andy Lutomirsky, Peter Zijlstra,
	Dave Hansen, Greg KH, keescook, hughd, Brian Gerst,
	Josh Poimboeuf, Denys Vlasenko, Boris Ostrovsky, Juergen Gross,
	David Laight, Eduardo Valentin, aliguori, Will Deacon,
	daniel.gruss

From: Borislav Petkov <bp@suse.de>

... to denote that we have enabled page table isolation. Which is not a
bug but a feature! :-)

Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andy Lutomirsky <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: keescook@google.com
Cc: hughd@google.com
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: David Laight <David.Laight@aculab.com>
Cc: Eduardo Valentin <eduval@amazon.com>
Cc: aliguori@amazon.com
Cc: Will Deacon <will.deacon@arm.com>
Cc: daniel.gruss@iaik.tugraz.at
---
 arch/x86/entry/calling.h           | 8 ++++----
 arch/x86/include/asm/cpufeatures.h | 3 +--
 arch/x86/include/asm/tlbflush.h    | 6 +++---
 arch/x86/kernel/cpu/common.c       | 2 +-
 arch/x86/kernel/ldt.c              | 2 +-
 arch/x86/mm/dump_pagetables.c      | 4 ++--
 arch/x86/mm/init.c                 | 2 +-
 arch/x86/mm/pti.c                  | 6 +++---
 arch/x86/mm/tlb.c                  | 2 +-
 9 files changed, 17 insertions(+), 18 deletions(-)

diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 393a5bf190b7..45a63e00a6af 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -212,7 +212,7 @@ For 32-bit we have the following conventions - kernel is built with
 .endm
 
 .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
-	ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_CPU_SECURE_MODE_PTI
+	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
 	mov	%cr3, \scratch_reg
 	ADJUST_KERNEL_CR3 \scratch_reg
 	mov	\scratch_reg, %cr3
@@ -223,7 +223,7 @@ For 32-bit we have the following conventions - kernel is built with
 	PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask
 
 .macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req
-	ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_CPU_SECURE_MODE_PTI
+	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
 	mov	%cr3, \scratch_reg
 
 	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
@@ -259,7 +259,7 @@ For 32-bit we have the following conventions - kernel is built with
 .endm
 
 .macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
-	ALTERNATIVE "jmp .Ldone_\@", "", X86_BUG_CPU_SECURE_MODE_PTI
+	ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
 	movq	%cr3, \scratch_reg
 	movq	\scratch_reg, \save_reg
 	/*
@@ -282,7 +282,7 @@ For 32-bit we have the following conventions - kernel is built with
 .endm
 
 .macro RESTORE_CR3 scratch_reg:req save_reg:req
-	ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_CPU_SECURE_MODE_PTI
+	ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
 
 	ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID
 
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index f8c2bd42ec48..df7e474b470b 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -198,10 +198,10 @@
 #define X86_FEATURE_CAT_L2		( 7*32+ 5) /* Cache Allocation Technology L2 */
 #define X86_FEATURE_CDP_L3		( 7*32+ 6) /* Code and Data Prioritization L3 */
 #define X86_FEATURE_INVPCID_SINGLE	( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
-
 #define X86_FEATURE_HW_PSTATE		( 7*32+ 8) /* AMD HW-PState */
 #define X86_FEATURE_PROC_FEEDBACK	( 7*32+ 9) /* AMD ProcFeedbackInterface */
 #define X86_FEATURE_SME			( 7*32+10) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_PTI		( 7*32+11) /* Kernel Page Table Isolation enabled */
 
 #define X86_FEATURE_INTEL_PPIN		( 7*32+14) /* Intel Processor Inventory Number */
 #define X86_FEATURE_INTEL_PT		( 7*32+15) /* Intel Processor Trace */
@@ -342,6 +342,5 @@
 #define X86_BUG_MONITOR			X86_BUG(12) /* IPI required to wake up remote CPU */
 #define X86_BUG_AMD_E400		X86_BUG(13) /* CPU is among the affected by Erratum 400 */
 #define X86_BUG_CPU_INSECURE		X86_BUG(14) /* CPU is insecure and needs kernel page table isolation */
-#define X86_BUG_CPU_SECURE_MODE_PTI	X86_BUG(15) /* Kernel Page Table Isolation enabled*/
 
 #endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index a97f000caf2f..8590aa954a1d 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -333,7 +333,7 @@ static inline void invalidate_user_asid(u16 asid)
 	if (!cpu_feature_enabled(X86_FEATURE_PCID))
 		return;
 
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI))
+	if (!static_cpu_has(X86_FEATURE_PTI))
 		return;
 
 	__set_bit(kern_pcid(asid),
@@ -399,7 +399,7 @@ static inline void __native_flush_tlb_single(unsigned long addr)
 
 	asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
 
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI))
+	if (!static_cpu_has(X86_FEATURE_PTI))
 		return;
 
 	/*
@@ -435,7 +435,7 @@ static inline void __flush_tlb_one(unsigned long addr)
 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
 	__flush_tlb_single(addr);
 
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI))
+	if (!static_cpu_has(X86_FEATURE_PTI))
 		return;
 
 	/*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 7b9d87107950..7b2988e521a7 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1456,7 +1456,7 @@ void syscall_init(void)
 		(entry_SYSCALL_64_trampoline - _entry_trampoline);
 
 	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
-	if (static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI))
+	if (static_cpu_has(X86_FEATURE_PTI))
 		wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
 	else
 		wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index a10286342e05..b88dcaf7249e 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -53,7 +53,7 @@ static void set_ldt_and_map(struct ldt_struct *ldt)
 	void *fixva;
 	int idx, i;
 
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI)) {
+	if (!static_cpu_has(X86_FEATURE_PTI)) {
 		set_ldt(ldt->entries, ldt->nr_entries);
 		return;
 	}
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index e5a2df886130..b740c2b100fe 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -504,7 +504,7 @@ void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
 {
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
-	if (user && static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI))
+	if (user && static_cpu_has(X86_FEATURE_PTI))
 		pgd = kernel_to_user_pgdp(pgd);
 #endif
 	ptdump_walk_pgd_level_core(m, pgd, false, false);
@@ -516,7 +516,7 @@ static void ptdump_walk_user_pgd_level_checkwx(void)
 #ifdef CONFIG_PAGE_TABLE_ISOLATION
 	pgd_t *pgd = (pgd_t *) &init_top_pgt;
 
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI))
+	if (!static_cpu_has(X86_FEATURE_PTI))
 		return;
 
 	pr_info("x86/mm: Checking user space page tables\n");
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 3fb7806c6806..80259ad8c386 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -164,7 +164,7 @@ static int page_size_mask;
 
 static void enable_global_pages(void)
 {
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI))
+	if (!static_cpu_has(X86_FEATURE_PTI))
 		__supported_pte_mask |= _PAGE_GLOBAL;
 }
 
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 1f8114689528..6294c44a60cf 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -79,7 +79,7 @@ void __init pti_check_boottime_disable(void)
 
 enable:
 	if (enable)
-		setup_force_cpu_bug(X86_BUG_CPU_SECURE_MODE_PTI);
+		setup_force_cpu_cap(X86_FEATURE_PTI);
 }
 
 /*
@@ -89,7 +89,7 @@ void __init pti_check_boottime_disable(void)
  */
 pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI))
+	if (!static_cpu_has(X86_FEATURE_PTI))
 		return pgd;
 
 	if (pgdp_maps_userspace(pgdp)) {
@@ -272,7 +272,7 @@ static void __init pti_clone_entry_text(void)
  */
 void __init pti_init(void)
 {
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI))
+	if (!static_cpu_has(X86_FEATURE_PTI))
 		return;
 
 	pr_info("enabled\n");
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 7982d873a6ac..a1561957dccb 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -42,7 +42,7 @@ void clear_asid_other(void)
 	 * This is only expected to be set if we have disabled
 	 * kernel _PAGE_GLOBAL pages.
 	 */
-	if (!static_cpu_has_bug(X86_BUG_CPU_SECURE_MODE_PTI)) {
+	if (!static_cpu_has(X86_FEATURE_PTI)) {
 		WARN_ON_ONCE(1);
 		return;
 	}
-- 
2.13.0

^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2017-12-12 19:00 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-12-12 19:00 [PATCH] x86/pti: Make X86_BUG_CPU_SECURE_MODE_PTI an X86_FEATURE Borislav Petkov

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.