From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932552AbeARPKF (ORCPT ); Thu, 18 Jan 2018 10:10:05 -0500 Received: from bombadil.infradead.org ([65.50.211.133]:51399 "EHLO bombadil.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754513AbeARO56 (ORCPT ); Thu, 18 Jan 2018 09:57:58 -0500 Message-Id: <20180118140153.076225139@infradead.org> User-Agent: quilt/0.63-1 Date: Thu, 18 Jan 2018 14:48:27 +0100 From: Peter Zijlstra From: Peter Zijlstra To: David Woodhouse , Thomas Gleixner , Josh Poimboeuf Cc: linux-kernel@vger.kernel.org, Dave Hansen , Ashok Raj , Tim Chen , Andy Lutomirski , Linus Torvalds , Greg KH , Andrea Arcangeli , Andi Kleen , Arjan Van De Ven , Dan Williams , Paolo Bonzini , Jun Nakajima , Asit Mallick , Jason Baron , Peter Zijlstra , David Woodhouse Subject: [PATCH 27/35] x86/enter: Use IBRS on syscall and interrupts References: <20180118134800.711245485@infradead.org> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Disposition: inline; filename=x86-enter--Use_IBRS_on_syscall_and_interrupts.patch Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Tim Chen Stop Indirect Branch Speculation on every user space to kernel space transition and reenable it when returning to user space. The NMI interrupt save/restore of IBRS state was based on Andrea Arcangeli's implementation. Here's an explanation by Dave Hansen on why we save IBRS state for NMI. The normal interrupt code uses the 'error_entry' path which uses the Code Segment (CS) of the instruction that was interrupted to tell whether it interrupted the kernel or userspace and thus has to switch IBRS, or leave it alone. The NMI code is different. It uses 'paranoid_entry' because it can interrupt the kernel while it is running with a userspace IBRS (and %GS and CR3) value, but has a kernel CS. If we used the same approach as the normal interrupt code, we might do the following; SYSENTER_entry <-------------- NMI HERE IBRS=1 do_something() IBRS=0 SYSRET The NMI code might notice that we are running in the kernel and decide that it is OK to skip the IBRS=1. This would leave it running unprotected with IBRS=0, which is bad. However, if we unconditionally set IBRS=1, in the NMI, we might get the following case: SYSENTER_entry IBRS=1 do_something() IBRS=0 <-------------- NMI HERE (set IBRS=1) SYSRET and we would return to userspace with IBRS=1. Userspace would run slowly until we entered and exited the kernel again. Instead of those two approaches, we chose a third one where we simply save the IBRS value in a scratch register (%r13) and then restore that value, verbatim. Co-developed-by: Andrea Arcangeli Signed-off-by: Andrea Arcangeli Signed-off-by: Tim Chen Signed-off-by: Thomas Gleixner Cc: Andi Kleen Cc: Peter Zijlstra Cc: Greg KH Cc: Dave Hansen Cc: Andy Lutomirski Cc: Paolo Bonzini Cc: Dan Williams Cc: Arjan Van De Ven Cc: Linus Torvalds Cc: David Woodhouse Cc: Ashok Raj --- arch/x86/entry/entry_64.S | 35 ++++++++++++++++++++++++++++++++++- arch/x86/entry/entry_64_compat.S | 21 +++++++++++++++++++-- 2 files changed, 53 insertions(+), 3 deletions(-) --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -171,6 +171,8 @@ ENTRY(entry_SYSCALL_64_trampoline) /* Load the top of the task stack into RSP */ movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp + /* Stop indirect branch speculation */ + STOP_IB_SPEC /* Start building the simulated IRET frame. */ pushq $__USER_DS /* pt_regs->ss */ @@ -214,6 +216,8 @@ ENTRY(entry_SYSCALL_64) */ movq %rsp, PER_CPU_VAR(rsp_scratch) movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + /* Stop Indirect Branch Speculation */ + STOP_IB_SPEC TRACE_IRQS_OFF @@ -409,6 +413,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) pushq RSP-RDI(%rdi) /* RSP */ pushq (%rdi) /* RDI */ + /* Restart Indirect Branch Speculation */ + RESTART_IB_SPEC /* * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. @@ -757,11 +763,12 @@ GLOBAL(swapgs_restore_regs_and_return_to /* Push user RDI on the trampoline stack. */ pushq (%rdi) + /* Restart Indirect Branch Speculation */ + RESTART_IB_SPEC /* * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ - SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi /* Restore RDI. */ @@ -849,6 +856,13 @@ ENTRY(native_iret) SWAPGS /* to kernel GS */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ + /* + * There is no point in disabling Indirect Branch Speculation + * here as this is going to return to user space immediately + * after fixing ESPFIX stack. There is no vulnerable code + * to protect so spare two MSR writes. + */ + movq PER_CPU_VAR(espfix_waddr), %rdi movq %rax, (0*8)(%rdi) /* user RAX */ movq (1*8)(%rsp), %rax /* user RIP */ @@ -982,6 +996,8 @@ ENTRY(switch_to_thread_stack) SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi movq %rsp, %rdi movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + /* Stop Indirect Branch Speculation */ + STOP_IB_SPEC UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI pushq 7*8(%rdi) /* regs->ss */ @@ -1282,6 +1298,8 @@ ENTRY(paranoid_entry) 1: SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 + /* Stop Indirect Branch speculation */ + STOP_IB_SPEC_SAVE_AND_CLOBBER save_reg=%r13d ret END(paranoid_entry) @@ -1305,6 +1323,8 @@ ENTRY(paranoid_exit) testl %ebx, %ebx /* swapgs needed? */ jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ + /* Restore Indirect Branch Speculation to the previous state */ + RESTORE_IB_SPEC_CLOBBER save_reg=%r13d RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK jmp .Lparanoid_exit_restore @@ -1335,6 +1355,8 @@ ENTRY(error_entry) SWAPGS /* We have user CR3. Change to kernel CR3. */ SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + /* Stop Indirect Branch Speculation */ + STOP_IB_SPEC_CLOBBER .Lerror_entry_from_usermode_after_swapgs: /* Put us onto the real thread stack. */ @@ -1382,6 +1404,8 @@ ENTRY(error_entry) */ SWAPGS SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + /* Stop Indirect Branch Speculation */ + STOP_IB_SPEC_CLOBBER jmp .Lerror_entry_done .Lbstep_iret: @@ -1396,6 +1420,8 @@ ENTRY(error_entry) */ SWAPGS SWITCH_TO_KERNEL_CR3 scratch_reg=%rax + /* Stop Indirect Branch Speculation */ + STOP_IB_SPEC /* * Pretend that the exception came from user mode: set up pt_regs @@ -1497,6 +1523,10 @@ ENTRY(nmi) SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + + /* Stop Indirect Branch Speculation */ + STOP_IB_SPEC + UNWIND_HINT_IRET_REGS base=%rdx offset=8 pushq 5*8(%rdx) /* pt_regs->ss */ pushq 4*8(%rdx) /* pt_regs->rsp */ @@ -1747,6 +1777,9 @@ ENTRY(nmi) movq $-1, %rsi call do_nmi + /* Restore Indirect Branch speculation to the previous state */ + RESTORE_IB_SPEC_CLOBBER save_reg=%r13d + RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 testl %ebx, %ebx /* swapgs needed? */ --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -54,6 +54,8 @@ ENTRY(entry_SYSENTER_compat) SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp + /* Stop Indirect Branch Speculation */ + STOP_IB_SPEC /* * User tracing code (ptrace or signal handlers) might assume that @@ -224,12 +226,18 @@ GLOBAL(entry_SYSCALL_compat_after_hwfram pushq $0 /* pt_regs->r14 = 0 */ pushq $0 /* pt_regs->r15 = 0 */ - /* - * User mode is traced as though IRQs are on, and SYSENTER + /* Stop Indirect Branch Speculation. All registers are saved already */ + STOP_IB_SPEC_CLOBBER + + /* User mode is traced as though IRQs are on, and SYSENTER * turned them off. */ TRACE_IRQS_OFF + /* + * We just saved %rdi so it is safe to clobber. It is not + * preserved during the C calls inside TRACE_IRQS_OFF anyway. + */ movq %rsp, %rdi call do_fast_syscall_32 /* XEN PV guests always use IRET path */ @@ -239,6 +247,15 @@ GLOBAL(entry_SYSCALL_compat_after_hwfram /* Opportunistic SYSRET */ sysret32_from_system_call: TRACE_IRQS_ON /* User mode traces as IRQs on. */ + + /* + * Restart Indirect Branch Speculation. This is safe to do here + * because there are no indirect branches between here and the + * return to userspace (sysretl). + * Clobber of %rax, %rcx, %rdx is OK before register restoring. + */ + RESTART_IB_SPEC_CLOBBER + movq RBX(%rsp), %rbx /* pt_regs->rbx */ movq RBP(%rsp), %rbp /* pt_regs->rbp */ movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */