All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chao Gao <chao.gao@intel.com>
To: kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: daniel.sneddon@linux.intel.com,
	pawan.kumar.gupta@linux.intel.com, Chao Gao <chao.gao@intel.com>,
	Sean Christopherson <seanjc@google.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	x86@kernel.org, "H. Peter Anvin" <hpa@zytor.com>
Subject: [RFC PATCH v3 06/10] KVM: VMX: Cache force_spec_ctrl_value/mask for each vCPU
Date: Wed, 10 Apr 2024 22:34:34 +0800	[thread overview]
Message-ID: <20240410143446.797262-7-chao.gao@intel.com> (raw)
In-Reply-To: <20240410143446.797262-1-chao.gao@intel.com>

so that KVM can adjust the mask/value for each vCPU according to the
software mitigations the vCPU is using.

KVM_CAP_FORCE_SPEC_CTRL allows the userspace VMM to proactively enable
hardware mitigations (by setting some bits in IA32_SPEC_CTRL MSRs) to
protect the guest from becoming vulnerable to some security issues after
live migration. E.g., if a guest using the short BHB-clearing sequence
for BHI is migrated from a pre-SPR part to a SPR part will become
vulnerable for BHI. Current solution is the userspace VMM deploys
BHI_DIS_S for all guests migrated to SPR parts from pre-SPR parts.

But KVM_CAP_FORCE_SPEC_CTRL isn't flexible because the userspace VMM may
configure KVM to enable BHI_DIS_S for guests which don't care about BHI
at all or are using other mitigations (e.g, TSX abort sequence) for BHI.
This would cause unnecessary overhead to the guest.

To reduce the overhead, the idea is to let the guest communicate which
software mitigations are being used to the VMM via Intel-defined virtual
MSRs [1]. This information from guests is much more accurate. KVM can
adjust hardware mitigations accordingly to reduce the performance impact
to the guest as much as possible.

The Intel-defined value MSRs are per-thread scope. vCPUs _can_ program
different values to them. This means, KVM may need to apply different
mask/value to IA32_SPEC_CTRL MSR. So, cache force_spec_ctrl_value/mask
for each vCPU in preparation for adding support for intel-defined
virtual MSRs.

[1]: https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html

Signed-off-by: Chao Gao <chao.gao@intel.com>
---
 arch/x86/kvm/vmx/nested.c |  2 +-
 arch/x86/kvm/vmx/vmx.c    | 11 +++++++----
 arch/x86/kvm/vmx/vmx.h    |  7 +++++++
 3 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 174790b2ffbc..efbc871d0466 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2390,7 +2390,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
 		exec_control &= TERTIARY_EXEC_SPEC_CTRL_SHADOW;
 		if (exec_control & TERTIARY_EXEC_SPEC_CTRL_SHADOW)
 			vmcs_write64(IA32_SPEC_CTRL_MASK,
-				     vmx->vcpu.kvm->arch.force_spec_ctrl_mask);
+				     vmx->force_spec_ctrl_mask);
 
 		tertiary_exec_controls_set(vmx, exec_control);
 	}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 93c208f009cf..cdfcc1290d82 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2161,7 +2161,7 @@ static void vmx_set_spec_ctrl(struct kvm_vcpu *vcpu, u64 val)
 		vmx->spec_ctrl_shadow = val;
 		vmcs_write64(IA32_SPEC_CTRL_SHADOW, val);
 
-		vmx->spec_ctrl |= vcpu->kvm->arch.force_spec_ctrl_value;
+		vmx->spec_ctrl |= vmx->force_spec_ctrl_value;
 	}
 }
 
@@ -4803,6 +4803,9 @@ static void init_vmcs(struct vcpu_vmx *vmx)
 	if (cpu_has_vmx_xsaves())
 		vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
 
+	vmx->force_spec_ctrl_mask = kvm->arch.force_spec_ctrl_mask;
+	vmx->force_spec_ctrl_value = kvm->arch.force_spec_ctrl_value;
+
 	if (cpu_has_spec_ctrl_shadow()) {
 		vmx->spec_ctrl_shadow = 0;
 		vmcs_write64(IA32_SPEC_CTRL_SHADOW, 0);
@@ -4816,7 +4819,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
 		 * guest modify other bits at will, without triggering VM-Exits.
 		 */
 		if (kvm->arch.force_spec_ctrl_mask)
-			vmcs_write64(IA32_SPEC_CTRL_MASK, kvm->arch.force_spec_ctrl_mask);
+			vmcs_write64(IA32_SPEC_CTRL_MASK, vmx->force_spec_ctrl_mask);
 		else
 			vmcs_write64(IA32_SPEC_CTRL_MASK, 0);
 	}
@@ -7251,8 +7254,8 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
 		if (cpu_has_spec_ctrl_shadow()) {
 			vmx->spec_ctrl_shadow = vmcs_read64(IA32_SPEC_CTRL_SHADOW);
 			vmx->spec_ctrl = (vmx->spec_ctrl_shadow &
-					~vmx->vcpu.kvm->arch.force_spec_ctrl_mask) |
-					 vmx->vcpu.kvm->arch.force_spec_ctrl_value;
+					~vmx->force_spec_ctrl_mask) |
+					 vmx->force_spec_ctrl_value;
 		} else {
 			vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
 		}
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 97324f6ee01c..a4dfe538e5a8 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -287,6 +287,13 @@ struct vcpu_vmx {
 	 */
 	u64		      spec_ctrl_shadow;
 
+	/*
+	 * Mask and value of SPEC_CTRL MSR bits which the guest is not allowed to
+	 * change.
+	 */
+	u64		      force_spec_ctrl_mask;
+	u64		      force_spec_ctrl_value;
+
 	u32		      msr_ia32_umwait_control;
 
 	/*
-- 
2.39.3


  parent reply	other threads:[~2024-04-10 14:35 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-04-10 14:34 [RFC PATCH v3 00/10] Virtualize Intel IA32_SPEC_CTRL Chao Gao
2024-04-10 14:34 ` [RFC PATCH v3 01/10] KVM: VMX: " Chao Gao
2024-04-12  4:07   ` Jim Mattson
2024-04-12 10:18     ` Chao Gao
2024-04-10 14:34 ` [RFC PATCH v3 02/10] KVM: VMX: Cache IA32_SPEC_CTRL_SHADOW field of VMCS Chao Gao
2024-04-10 14:34 ` [RFC PATCH v3 03/10] KVM: nVMX: Enable SPEC_CTRL virtualizaton for vmcs02 Chao Gao
2024-04-10 14:34 ` [RFC PATCH v3 04/10] x86/bugs: Use Virtual MSRs to request BHI_DIS_S Chao Gao
2024-04-10 14:34 ` [RFC PATCH v3 05/10] x86/bugs: Use Virtual MSRs to request RRSBA_DIS_S Chao Gao
2024-04-10 14:34 ` Chao Gao [this message]
2024-04-10 14:34 ` [RFC PATCH v3 07/10] KVM: x86: Advertise ARCH_CAP_VIRTUAL_ENUM support Chao Gao
2024-04-12  4:22   ` Jim Mattson
2024-04-10 14:34 ` [RFC PATCH v3 08/10] KVM: VMX: Advertise MITIGATION_CTRL support Chao Gao
2024-04-10 14:34 ` [RFC PATCH v3 09/10] KVM: VMX: Advertise MITI_CTRL_BHB_CLEAR_SEQ_S_SUPPORT Chao Gao
2024-04-10 14:34 ` [RFC PATCH v3 10/10] KVM: VMX: Advertise MITI_ENUM_RETPOLINE_S_SUPPORT Chao Gao

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240410143446.797262-7-chao.gao@intel.com \
    --to=chao.gao@intel.com \
    --cc=bp@alien8.de \
    --cc=daniel.sneddon@linux.intel.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=pawan.kumar.gupta@linux.intel.com \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.