All of lore.kernel.org
 help / color / mirror / Atom feed
From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: linux-kernel@vger.kernel.org, stable@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Josh Poimboeuf <jpoimboe@kernel.org>,
	"Peter Zijlstra (Intel)" <peterz@infradead.org>,
	Borislav Petkov <bp@suse.de>,
	Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Subject: [PATCH 5.4 28/51] KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
Date: Wed,  5 Oct 2022 13:32:16 +0200	[thread overview]
Message-ID: <20221005113211.571486799@linuxfoundation.org> (raw)
In-Reply-To: <20221005113210.255710920@linuxfoundation.org>

From: Josh Poimboeuf <jpoimboe@kernel.org>

commit fc02735b14fff8c6678b521d324ade27b1a3d4cf upstream.

On eIBRS systems, the returns in the vmexit return path from
__vmx_vcpu_run() to vmx_vcpu_run() are exposed to RSB poisoning attacks.

Fix that by moving the post-vmexit spec_ctrl handling to immediately
after the vmexit.

Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
 arch/x86/include/asm/nospec-branch.h |    1 
 arch/x86/kernel/cpu/bugs.c           |    4 ++
 arch/x86/kvm/vmx/run_flags.h         |    1 
 arch/x86/kvm/vmx/vmenter.S           |   49 +++++++++++++++++++++++++++--------
 arch/x86/kvm/vmx/vmx.c               |   48 ++++++++++++++++++++--------------
 arch/x86/kvm/vmx/vmx.h               |    1 
 6 files changed, 73 insertions(+), 31 deletions(-)

--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -296,6 +296,7 @@ static inline void indirect_branch_predi
 
 /* The Intel SPEC CTRL MSR base value cache */
 extern u64 x86_spec_ctrl_base;
+extern u64 x86_spec_ctrl_current;
 extern void write_spec_ctrl_current(u64 val, bool force);
 extern u64 spec_ctrl_current(void);
 
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -185,6 +185,10 @@ void __init check_bugs(void)
 #endif
 }
 
+/*
+ * NOTE: For VMX, this function is not called in the vmexit path.
+ * It uses vmx_spec_ctrl_restore_host() instead.
+ */
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 {
--- a/arch/x86/kvm/vmx/run_flags.h
+++ b/arch/x86/kvm/vmx/run_flags.h
@@ -3,5 +3,6 @@
 #define __KVM_X86_VMX_RUN_FLAGS_H
 
 #define VMX_RUN_VMRESUME	(1 << 0)
+#define VMX_RUN_SAVE_SPEC_CTRL	(1 << 1)
 
 #endif /* __KVM_X86_VMX_RUN_FLAGS_H */
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -32,9 +32,10 @@
 
 /**
  * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
- * @vmx:	struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
+ * @vmx:	struct vcpu_vmx *
  * @regs:	unsigned long * (to guest registers)
- * @flags:	VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
+ * @flags:	VMX_RUN_VMRESUME:	use VMRESUME instead of VMLAUNCH
+ *		VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl
  *
  * Returns:
  *	0 on VM-Exit, 1 on VM-Fail
@@ -53,6 +54,12 @@ ENTRY(__vmx_vcpu_run)
 #endif
 	push %_ASM_BX
 
+	/* Save @vmx for SPEC_CTRL handling */
+	push %_ASM_ARG1
+
+	/* Save @flags for SPEC_CTRL handling */
+	push %_ASM_ARG3
+
 	/*
 	 * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
 	 * @regs is needed after VM-Exit to save the guest's register values.
@@ -136,23 +143,21 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL
 	mov %r15, VCPU_R15(%_ASM_AX)
 #endif
 
-	/* IMPORTANT: RSB must be stuffed before the first return. */
-	FILL_RETURN_BUFFER %_ASM_BX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
-
-	/* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
-	xor %eax, %eax
+	/* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
+	xor %ebx, %ebx
 
 .Lclear_regs:
 	/*
-	 * Clear all general purpose registers except RSP and RAX to prevent
+	 * Clear all general purpose registers except RSP and RBX to prevent
 	 * speculative use of the guest's values, even those that are reloaded
 	 * via the stack.  In theory, an L1 cache miss when restoring registers
 	 * could lead to speculative execution with the guest's values.
 	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
 	 * free.  RSP and RAX are exempt as RSP is restored by hardware during
-	 * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
+	 * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
+	 * value.
 	 */
-	xor %ebx, %ebx
+	xor %eax, %eax
 	xor %ecx, %ecx
 	xor %edx, %edx
 	xor %esi, %esi
@@ -172,6 +177,28 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL
 	/* "POP" @regs. */
 	add $WORD_SIZE, %_ASM_SP
 
+	/*
+	 * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
+	 * the first unbalanced RET after vmexit!
+	 *
+	 * For retpoline, RSB filling is needed to prevent poisoned RSB entries
+	 * and (in some cases) RSB underflow.
+	 *
+	 * eIBRS has its own protection against poisoned RSB, so it doesn't
+	 * need the RSB filling sequence.  But it does need to be enabled
+	 * before the first unbalanced RET.
+         */
+
+	FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+
+	pop %_ASM_ARG2	/* @flags */
+	pop %_ASM_ARG1	/* @vmx */
+
+	call vmx_spec_ctrl_restore_host
+
+	/* Put return value in AX */
+	mov %_ASM_BX, %_ASM_AX
+
 	pop %_ASM_BX
 #ifdef CONFIG_X86_64
 	pop %r12
@@ -191,7 +218,7 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL
 	ud2
 .Lvmfail:
 	/* VM-Fail: set return value to 1 */
-	mov $1, %eax
+	mov $1, %_ASM_BX
 	jmp .Lclear_regs
 
 ENDPROC(__vmx_vcpu_run)
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -870,6 +870,14 @@ unsigned int __vmx_vcpu_run_flags(struct
 	if (vmx->loaded_vmcs->launched)
 		flags |= VMX_RUN_VMRESUME;
 
+	/*
+	 * If writes to the SPEC_CTRL MSR aren't intercepted, the guest is free
+	 * to change it directly without causing a vmexit.  In that case read
+	 * it after vmexit and store it in vmx->spec_ctrl.
+	 */
+	if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
+		flags |= VMX_RUN_SAVE_SPEC_CTRL;
+
 	return flags;
 }
 
@@ -6550,6 +6558,26 @@ void vmx_update_host_rsp(struct vcpu_vmx
 	}
 }
 
+void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
+					unsigned int flags)
+{
+	u64 hostval = this_cpu_read(x86_spec_ctrl_current);
+
+	if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
+		return;
+
+	if (flags & VMX_RUN_SAVE_SPEC_CTRL)
+		vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
+
+	/*
+	 * If the guest/host SPEC_CTRL values differ, restore the host value.
+	 */
+	if (vmx->spec_ctrl != hostval)
+		native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
+
+	barrier_nospec();
+}
+
 static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6643,26 +6671,6 @@ static void vmx_vcpu_run(struct kvm_vcpu
 
 	vmx_enable_fb_clear(vmx);
 
-	/*
-	 * We do not use IBRS in the kernel. If this vCPU has used the
-	 * SPEC_CTRL MSR it may have left it on; save the value and
-	 * turn it off. This is much more efficient than blindly adding
-	 * it to the atomic save/restore list. Especially as the former
-	 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
-	 *
-	 * For non-nested case:
-	 * If the L01 MSR bitmap does not intercept the MSR, then we need to
-	 * save it.
-	 *
-	 * For nested case:
-	 * If the L02 MSR bitmap does not intercept the MSR, then we need to
-	 * save it.
-	 */
-	if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
-		vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
-
-	x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
-
 	/* All fields are clean at this point */
 	if (static_branch_unlikely(&enable_evmcs))
 		current_evmcs->hv_clean_fields |=
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -337,6 +337,7 @@ void vmx_set_virtual_apic_mode(struct kv
 struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
+void vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx, unsigned int flags);
 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx);
 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs,
 		    unsigned int flags);



  parent reply	other threads:[~2022-10-05 11:35 UTC|newest]

Thread overview: 60+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-05 11:31 [PATCH 5.4 00/51] 5.4.217-rc1 review Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 01/51] Revert "x86/speculation: Add RSB VM Exit protections" Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 02/51] Revert "x86/cpu: Add a steppings field to struct x86_cpu_id" Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 03/51] x86/devicetable: Move x86 specific macro out of generic code Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 04/51] x86/cpu: Add consistent CPU match macros Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 05/51] x86/cpu: Add a steppings field to struct x86_cpu_id Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 06/51] x86/kvm/vmx: Make noinstr clean Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 07/51] x86/cpufeatures: Move RETPOLINE flags to word 11 Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 08/51] x86/bugs: Report AMD retbleed vulnerability Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 09/51] x86/bugs: Add AMD retbleed= boot parameter Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 10/51] x86/bugs: Keep a per-CPU IA32_SPEC_CTRL value Greg Kroah-Hartman
2022-10-05 11:31 ` [PATCH 5.4 11/51] x86/entry: Remove skip_r11rcx Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 12/51] x86/entry: Add kernel IBRS implementation Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 13/51] x86/bugs: Optimize SPEC_CTRL MSR writes Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 14/51] x86/speculation: Add spectre_v2=ibrs option to support Kernel IBRS Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 15/51] x86/bugs: Split spectre_v2_select_mitigation() and spectre_v2_user_select_mitigation() Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 16/51] x86/bugs: Report Intel retbleed vulnerability Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 17/51] intel_idle: Disable IBRS during long idle Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 18/51] x86/speculation: Change FILL_RETURN_BUFFER to work with objtool Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 19/51] x86/speculation: Fix RSB filling with CONFIG_RETPOLINE=n Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 20/51] x86/speculation: Fix firmware entry SPEC_CTRL handling Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 21/51] x86/speculation: Fix SPEC_CTRL write on SMT state change Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 22/51] x86/speculation: Use cached host SPEC_CTRL value for guest entry/exit Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 23/51] x86/speculation: Remove x86_spec_ctrl_mask Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 24/51] KVM/VMX: Use TEST %REG,%REG instead of CMP $0,%REG in vmenter.S Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 25/51] KVM/nVMX: Use __vmx_vcpu_run in nested_vmx_check_vmentry_hw Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 26/51] KVM: VMX: Flatten __vmx_vcpu_run() Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 27/51] KVM: VMX: Convert launched argument to flags Greg Kroah-Hartman
2022-10-05 11:32 ` Greg Kroah-Hartman [this message]
2022-10-05 11:32 ` [PATCH 5.4 29/51] KVM: VMX: Fix IBRS handling after vmexit Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 30/51] x86/speculation: Fill RSB on vmexit for IBRS Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 31/51] x86/common: Stamp out the stepping madness Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 32/51] x86/cpu/amd: Enumerate BTC_NO Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 33/51] x86/bugs: Add Cannon lake to RETBleed affected CPU list Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 34/51] x86/speculation: Disable RRSBA behavior Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 35/51] x86/speculation: Use DECLARE_PER_CPU for x86_spec_ctrl_current Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 36/51] x86/bugs: Warn when "ibrs" mitigation is selected on Enhanced IBRS parts Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 37/51] x86/speculation: Add RSB VM Exit protections Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 38/51] xfs: fix misuse of the XFS_ATTR_INCOMPLETE flag Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 39/51] xfs: introduce XFS_MAX_FILEOFF Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 40/51] xfs: truncate should remove all blocks, not just to the end of the page cache Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 41/51] xfs: fix s_maxbytes computation on 32-bit kernels Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 42/51] xfs: fix IOCB_NOWAIT handling in xfs_file_dio_aio_read Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 43/51] xfs: refactor remote attr value buffer invalidation Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 44/51] xfs: fix memory corruption during " Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 45/51] xfs: move incore structures out of xfs_da_format.h Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 46/51] xfs: streamline xfs_attr3_leaf_inactive Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 47/51] xfs: fix uninitialized variable in xfs_attr3_leaf_inactive Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 48/51] xfs: remove unused variable done Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 49/51] Revert "drm/amdgpu: use dirty framebuffer helper" Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 50/51] Makefile.extrawarn: Move -Wcast-function-type-strict to W=1 Greg Kroah-Hartman
2022-10-05 11:32 ` [PATCH 5.4 51/51] docs: update mediator information in CoC docs Greg Kroah-Hartman
2022-10-05 19:12 ` [PATCH 5.4 00/51] 5.4.217-rc1 review Daniel Díaz
2022-10-05 19:29   ` Thadeu Lima de Souza Cascardo
2022-10-05 19:29 ` Guenter Roeck
2022-10-06  9:32 ` Jon Hunter
2022-10-06 19:02 ` Naresh Kamboju
2022-10-06 19:39 ` Slade Watkins
2022-10-06 20:01 ` Allen Pais
2022-10-07 14:35 ` zhouzhixiu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221005113211.571486799@linuxfoundation.org \
    --to=gregkh@linuxfoundation.org \
    --cc=bp@suse.de \
    --cc=cascardo@canonical.com \
    --cc=jpoimboe@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=peterz@infradead.org \
    --cc=stable@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.