All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sean Christopherson <seanjc@google.com>
To: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Josh Poimboeuf <jpoimboe@kernel.org>,
	Andy Lutomirski <luto@kernel.org>,
	Jonathan Corbet <corbet@lwn.net>,
	Paolo Bonzini <pbonzini@redhat.com>,
	tony.luck@intel.com, ak@linux.intel.com,
	tim.c.chen@linux.intel.com, linux-kernel@vger.kernel.org,
	linux-doc@vger.kernel.org, kvm@vger.kernel.org,
	Alyssa Milburn <alyssa.milburn@linux.intel.com>,
	Daniel Sneddon <daniel.sneddon@linux.intel.com>,
	antonio.gomez.iglesias@linux.intel.com
Subject: Re: [PATCH  6/6] KVM: VMX: Move VERW closer to VMentry for MDS mitigation
Date: Mon, 23 Oct 2023 07:58:57 -0700	[thread overview]
Message-ID: <ZTaKMdZqq2R1xXFh@google.com> (raw)
In-Reply-To: <20231021004633.ymughma7zijosku5@desk>

On Fri, Oct 20, 2023, Pawan Gupta wrote:
> On Fri, Oct 20, 2023 at 03:55:07PM -0700, Sean Christopherson wrote:
> > On Fri, Oct 20, 2023, Pawan Gupta wrote:
> > > During VMentry VERW is executed to mitigate MDS. After VERW, any memory
> > > access like register push onto stack may put host data in MDS affected
> > > CPU buffers. A guest can then use MDS to sample host data.
> > > 
> > > Although likelihood of secrets surviving in registers at current VERW
> > > callsite is less, but it can't be ruled out. Harden the MDS mitigation
> > > by moving the VERW mitigation late in VMentry path.
> > > 
> > > Note that VERW for MMIO Stale Data mitigation is unchanged because of
> > > the complexity of per-guest conditional VERW which is not easy to handle
> > > that late in asm with no GPRs available. If the CPU is also affected by
> > > MDS, VERW is unconditionally executed late in asm regardless of guest
> > > having MMIO access.
> > > 
> > > Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
> > > ---
> > >  arch/x86/kvm/vmx/vmenter.S |  9 +++++++++
> > >  arch/x86/kvm/vmx/vmx.c     | 10 +++++++---
> > >  2 files changed, 16 insertions(+), 3 deletions(-)
> > > 
> > > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > > index be275a0410a8..efa716cf4727 100644
> > > --- a/arch/x86/kvm/vmx/vmenter.S
> > > +++ b/arch/x86/kvm/vmx/vmenter.S
> > > @@ -1,6 +1,7 @@
> > >  /* SPDX-License-Identifier: GPL-2.0 */
> > >  #include <linux/linkage.h>
> > >  #include <asm/asm.h>
> > > +#include <asm/segment.h>
> > >  #include <asm/bitsperlong.h>
> > >  #include <asm/kvm_vcpu_regs.h>
> > >  #include <asm/nospec-branch.h>
> > > @@ -31,6 +32,8 @@
> > >  #define VCPU_R15	__VCPU_REGS_R15 * WORD_SIZE
> > >  #endif
> > >  
> > > +#define GUEST_CLEAR_CPU_BUFFERS		USER_CLEAR_CPU_BUFFERS
> > > +
> > >  .macro VMX_DO_EVENT_IRQOFF call_insn call_target
> > >  	/*
> > >  	 * Unconditionally create a stack frame, getting the correct RSP on the
> > > @@ -177,10 +180,16 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > >   * the 'vmx_vmexit' label below.
> > >   */
> > >  .Lvmresume:
> > > +	/* Mitigate CPU data sampling attacks .e.g. MDS */
> > > +	GUEST_CLEAR_CPU_BUFFERS
> > 
> > I have a very hard time believing that it's worth duplicating the mitigation
> > for VMRESUME vs. VMLAUNCH just to land it after a Jcc.
> 
> VERW modifies the flags, so it either needs to be after Jcc or we
> push/pop flags that adds 2 extra memory operations. Please let me know
> if there is a better option.

Ugh, I assumed that piggybacking VERW overrode the original behavior entirely, I
didn't realize it sacrifices EFLAGS.ZF on the altar of mitigations.

Luckily, this is easy to solve now that VMRESUME vs. VMLAUNCH uses a flag instead
of a dedicated bool.

From: Sean Christopherson <seanjc@google.com>
Date: Mon, 23 Oct 2023 07:44:35 -0700
Subject: [PATCH] KVM: VMX: Use BT+JNC, i.e. EFLAGS.CF to select VMRESUME vs.
 VMLAUNCH

Use EFLAGS.CF instead of EFLAGS.ZF to track whether to use VMRESUME versus
VMLAUNCH.  Freeing up EFLAGS.ZF will allow doing VERW, which clobbers ZF,
for MDS mitigations as late as possible without needing to duplicate VERW
for both paths.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/vmx/run_flags.h | 7 +++++--
 arch/x86/kvm/vmx/vmenter.S   | 6 +++---
 2 files changed, 8 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/vmx/run_flags.h b/arch/x86/kvm/vmx/run_flags.h
index edc3f16cc189..6a9bfdfbb6e5 100644
--- a/arch/x86/kvm/vmx/run_flags.h
+++ b/arch/x86/kvm/vmx/run_flags.h
@@ -2,7 +2,10 @@
 #ifndef __KVM_X86_VMX_RUN_FLAGS_H
 #define __KVM_X86_VMX_RUN_FLAGS_H
 
-#define VMX_RUN_VMRESUME	(1 << 0)
-#define VMX_RUN_SAVE_SPEC_CTRL	(1 << 1)
+#define VMX_RUN_VMRESUME_SHIFT		0
+#define VMX_RUN_SAVE_SPEC_CTRL_SHIFT	1
+
+#define VMX_RUN_VMRESUME		BIT(VMX_RUN_VMRESUME_SHIFT)
+#define VMX_RUN_SAVE_SPEC_CTRL		BIT(VMX_RUN_SAVE_SPEC_CTRL_SHIFT)
 
 #endif /* __KVM_X86_VMX_RUN_FLAGS_H */
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index be275a0410a8..b3b13ec04bac 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -139,7 +139,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
 	mov (%_ASM_SP), %_ASM_AX
 
 	/* Check if vmlaunch or vmresume is needed */
-	test $VMX_RUN_VMRESUME, %ebx
+	bt   $VMX_RUN_VMRESUME_SHIFT, %ebx
 
 	/* Load guest registers.  Don't clobber flags. */
 	mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@@ -161,8 +161,8 @@ SYM_FUNC_START(__vmx_vcpu_run)
 	/* Load guest RAX.  This kills the @regs pointer! */
 	mov VCPU_RAX(%_ASM_AX), %_ASM_AX
 
-	/* Check EFLAGS.ZF from 'test VMX_RUN_VMRESUME' above */
-	jz .Lvmlaunch
+	/* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
+	jnc .Lvmlaunch
 
 	/*
 	 * After a successful VMRESUME/VMLAUNCH, control flow "magically"

base-commit: ec2f1daad460c6201338dae606466220ccaa96d5
-- 


  reply	other threads:[~2023-10-23 14:59 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-20 20:44 [PATCH 0/6] Delay VERW Pawan Gupta
2023-10-20 20:44 ` [PATCH 1/6] x86/bugs: Add asm helpers for executing VERW Pawan Gupta
2023-10-20 23:13   ` Sean Christopherson
2023-10-21  1:00     ` Pawan Gupta
2023-10-20 23:55   ` [RESEND][PATCH " Andrew Cooper
2023-10-21  1:18     ` Pawan Gupta
2023-10-21  1:33       ` Andrew Cooper
2023-10-21  2:21         ` Pawan Gupta
2023-10-23 18:08           ` Josh Poimboeuf
2023-10-23 19:09             ` Pawan Gupta
2023-10-25  6:28     ` Pawan Gupta
2023-10-25  7:22       ` Peter Zijlstra
2023-10-25  7:52         ` Andrew Cooper
2023-10-25  8:02           ` Peter Zijlstra
2023-10-25 15:27         ` Pawan Gupta
     [not found]   ` <6439a094-23a6-4de3-aa41-bd033163e044@citrix.com>
2023-10-22 16:16     ` [PATCH " Peter Zijlstra
2023-10-20 20:45 ` [PATCH 2/6] x86/entry_64: Add VERW just before userspace transition Pawan Gupta
2023-10-23 18:22   ` Josh Poimboeuf
2023-10-23 19:13     ` Pawan Gupta
2023-10-23 19:17     ` Dave Hansen
2023-10-23 18:35   ` Josh Poimboeuf
2023-10-23 21:04     ` Pawan Gupta
2023-10-23 21:47       ` Josh Poimboeuf
2023-10-23 22:30         ` Pawan Gupta
2023-10-23 22:45           ` Dave Hansen
2023-10-24  0:00             ` Pawan Gupta
2023-10-20 20:45 ` [PATCH 3/6] x86/entry_32: " Pawan Gupta
2023-10-20 23:49   ` Andi Kleen
2023-10-21  1:28     ` Pawan Gupta
2023-10-20 20:45 ` [PATCH 4/6] x86/bugs: Use ALTERNATIVE() instead of mds_user_clear static key Pawan Gupta
2023-10-23 18:48   ` Josh Poimboeuf
2023-10-23 21:09     ` Pawan Gupta
2023-10-20 20:45 ` [PATCH 5/6] x86/bugs: Cleanup mds_user_clear Pawan Gupta
2023-10-23  8:51   ` Nikolay Borisov
2023-10-23 16:06     ` Pawan Gupta
2023-10-20 20:45 ` [PATCH 6/6] KVM: VMX: Move VERW closer to VMentry for MDS mitigation Pawan Gupta
2023-10-20 22:55   ` Sean Christopherson
2023-10-21  0:46     ` Pawan Gupta
2023-10-23 14:58       ` Sean Christopherson [this message]
2023-10-23 17:05         ` Pawan Gupta
2023-10-23 18:56   ` Josh Poimboeuf
2023-10-23 21:17     ` Pawan Gupta

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZTaKMdZqq2R1xXFh@google.com \
    --to=seanjc@google.com \
    --cc=ak@linux.intel.com \
    --cc=alyssa.milburn@linux.intel.com \
    --cc=antonio.gomez.iglesias@linux.intel.com \
    --cc=bp@alien8.de \
    --cc=corbet@lwn.net \
    --cc=daniel.sneddon@linux.intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=jpoimboe@kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mingo@redhat.com \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=tim.c.chen@linux.intel.com \
    --cc=tony.luck@intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.