All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yang Weijiang <weijiang.yang@intel.com>
To: pbonzini@redhat.com, seanjc@google.com, kvm@vger.kernel.org
Cc: like.xu.linux@gmail.com, kan.liang@linux.intel.com,
	wei.w.wang@intel.com, linux-kernel@vger.kernel.org
Subject: [PATCH 10/15] KVM: x86/vmx: Check Arch LBR config when return perf capabilities
Date: Wed, 31 Aug 2022 18:34:33 -0400	[thread overview]
Message-ID: <20220831223438.413090-11-weijiang.yang@intel.com> (raw)
In-Reply-To: <20220831223438.413090-1-weijiang.yang@intel.com>

Two new bit fields(VM_EXIT_CLEAR_IA32_LBR_CTL, VM_ENTRY_LOAD_IA32_LBR_CTL)
are added to support guest Arch LBR. These two bits should be set in order
to make Arch LBR workable in both guest and host. Since we don't support
Arch LBR in nested guest, clear the two bits before run L2 VM.

Co-developed-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/include/asm/vmx.h      |  2 ++
 arch/x86/kvm/vmx/capabilities.h |  8 ++++++++
 arch/x86/kvm/vmx/nested.c       |  8 ++++++++
 arch/x86/kvm/vmx/vmx.c          | 16 ++++++++++++++--
 4 files changed, 32 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 50c6f36daaea..e8781df8cc2e 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -102,6 +102,7 @@
 #define VM_EXIT_CLEAR_BNDCFGS                   0x00800000
 #define VM_EXIT_PT_CONCEAL_PIP			0x01000000
 #define VM_EXIT_CLEAR_IA32_RTIT_CTL		0x02000000
+#define VM_EXIT_CLEAR_IA32_LBR_CTL		0x04000000
 
 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR	0x00036dff
 
@@ -115,6 +116,7 @@
 #define VM_ENTRY_LOAD_BNDCFGS                   0x00010000
 #define VM_ENTRY_PT_CONCEAL_PIP			0x00020000
 #define VM_ENTRY_LOAD_IA32_RTIT_CTL		0x00040000
+#define VM_ENTRY_LOAD_IA32_LBR_CTL		0x00200000
 
 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR	0x000011ff
 
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index c5e5dfef69c7..b7147698ad82 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -401,6 +401,11 @@ static inline bool vmx_pebs_supported(void)
 	return boot_cpu_has(X86_FEATURE_PEBS) && kvm_pmu_cap.pebs_ept;
 }
 
+static inline bool cpu_has_vmx_arch_lbr(void)
+{
+	return vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_LBR_CTL;
+}
+
 static inline u64 vmx_get_perf_capabilities(void)
 {
 	u64 perf_cap = PMU_CAP_FW_WRITES;
@@ -420,6 +425,9 @@ static inline u64 vmx_get_perf_capabilities(void)
 			perf_cap &= ~PERF_CAP_PEBS_BASELINE;
 	}
 
+	if (boot_cpu_has(X86_FEATURE_ARCH_LBR) && !cpu_has_vmx_arch_lbr())
+		perf_cap &= ~PMU_CAP_LBR_FMT;
+
 	return perf_cap;
 }
 
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ddd4367d4826..d4b05354e7ab 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2338,6 +2338,10 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
 		if (guest_efer != host_efer)
 			exec_control |= VM_ENTRY_LOAD_IA32_EFER;
 	}
+
+	if (cpu_has_vmx_arch_lbr())
+		exec_control &= ~VM_ENTRY_LOAD_IA32_LBR_CTL;
+
 	vm_entry_controls_set(vmx, exec_control);
 
 	/*
@@ -2352,6 +2356,10 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
 		exec_control |= VM_EXIT_LOAD_IA32_EFER;
 	else
 		exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
+
+	if (cpu_has_vmx_arch_lbr())
+		exec_control &= ~VM_EXIT_CLEAR_IA32_LBR_CTL;
+
 	vm_exit_controls_set(vmx, exec_control);
 
 	/*
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 32e41bd217e3..cdf65cdcb45a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2559,6 +2559,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
 		{ VM_ENTRY_LOAD_IA32_EFER,		VM_EXIT_LOAD_IA32_EFER },
 		{ VM_ENTRY_LOAD_BNDCFGS,		VM_EXIT_CLEAR_BNDCFGS },
 		{ VM_ENTRY_LOAD_IA32_RTIT_CTL,		VM_EXIT_CLEAR_IA32_RTIT_CTL },
+		{ VM_ENTRY_LOAD_IA32_LBR_CTL,		VM_EXIT_CLEAR_IA32_LBR_CTL },
 	};
 
 	memset(vmcs_conf, 0, sizeof(*vmcs_conf));
@@ -2679,7 +2680,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
 	      VM_EXIT_LOAD_IA32_EFER |
 	      VM_EXIT_CLEAR_BNDCFGS |
 	      VM_EXIT_PT_CONCEAL_PIP |
-	      VM_EXIT_CLEAR_IA32_RTIT_CTL;
+	      VM_EXIT_CLEAR_IA32_RTIT_CTL |
+	      VM_EXIT_CLEAR_IA32_LBR_CTL;
 	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
 				&_vmexit_control) < 0)
 		return -EIO;
@@ -2703,7 +2705,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
 	      VM_ENTRY_LOAD_IA32_EFER |
 	      VM_ENTRY_LOAD_BNDCFGS |
 	      VM_ENTRY_PT_CONCEAL_PIP |
-	      VM_ENTRY_LOAD_IA32_RTIT_CTL;
+	      VM_ENTRY_LOAD_IA32_RTIT_CTL |
+	      VM_ENTRY_LOAD_IA32_LBR_CTL;
 	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
 				&_vmentry_control) < 0)
 		return -EIO;
@@ -4803,6 +4806,11 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 	vpid_sync_context(vmx->vpid);
 
 	vmx_update_fb_clear_dis(vcpu, vmx);
+
+	if (!init_event) {
+		if (cpu_has_vmx_arch_lbr())
+			vmcs_write64(GUEST_IA32_LBR_CTL, 0);
+	}
 }
 
 static void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
@@ -6198,6 +6206,10 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
 	    vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
 		pr_err("PerfGlobCtl = 0x%016llx\n",
 		       vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
+	if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR) &&
+	    vmentry_ctl & VM_ENTRY_LOAD_IA32_LBR_CTL)
+		pr_err("ArchLBRCtl = 0x%016llx\n",
+		       vmcs_read64(GUEST_IA32_LBR_CTL));
 	if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
 		pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
 	pr_err("Interruptibility = %08x  ActivityState = %08x\n",
-- 
2.27.0


  parent reply	other threads:[~2022-09-01  1:38 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-31 22:34 [PATCH 00/15] Introduce Architectural LBR for vPMU Yang Weijiang
2022-08-31 22:34 ` [PATCH 01/15] perf/x86/lbr: Simplify the exposure check for the LBR_INFO registers Yang Weijiang
2022-09-01 14:19   ` Sean Christopherson
2022-09-02  3:05     ` Yang, Weijiang
2022-08-31 22:34 ` [PATCH 02/15] KVM: x86: Report XSS as an MSR to be saved if there are supported features Yang Weijiang
2022-08-31 22:34 ` [PATCH 03/15] KVM: x86: Refresh CPUID on writes to MSR_IA32_XSS Yang Weijiang
2022-08-31 22:34 ` [PATCH 04/15] KVM: PMU: disable LBR handling if architectural LBR is available Yang Weijiang
2022-08-31 22:34 ` [PATCH 05/15] KVM: vmx/pmu: Emulate MSR_ARCH_LBR_DEPTH for guest Arch LBR Yang Weijiang
2022-08-31 22:34 ` [PATCH 06/15] KVM: vmx/pmu: Emulate MSR_ARCH_LBR_CTL " Yang Weijiang
2022-08-31 22:34 ` [PATCH 07/15] KVM: VMX: Support passthrough of architectural LBRs Yang Weijiang
2022-09-01 14:20   ` Sean Christopherson
2022-09-02  3:04     ` Yang, Weijiang
2022-08-31 22:34 ` [PATCH 08/15] KVM: x86: Add Arch LBR MSRs to msrs_to_save_all list Yang Weijiang
2022-08-31 22:34 ` [PATCH 09/15] KVM: x86: Refine the matching and clearing logic for supported_xss Yang Weijiang
2022-08-31 22:34 ` Yang Weijiang [this message]
2022-08-31 22:34 ` [PATCH 11/15] KVM: x86: Add XSAVE Support for Architectural LBR Yang Weijiang
2022-08-31 22:34 ` [PATCH 12/15] KVM: x86/vmx: Clear Arch LBREn bit before inject #DB to guest Yang Weijiang
2022-08-31 22:34 ` [PATCH 13/15] KVM: x86/vmx: Flip Arch LBREn bit on guest state change Yang Weijiang
2022-08-31 22:34 ` [PATCH 14/15] KVM: x86: Add Arch LBR data MSR access interface Yang Weijiang
2022-08-31 22:34 ` [PATCH 15/15] KVM: x86/cpuid: Advertise Arch LBR feature in CPUID Yang Weijiang
2022-09-01 14:23 ` [PATCH 00/15] Introduce Architectural LBR for vPMU Sean Christopherson
2022-09-02  3:44   ` Yang, Weijiang
2022-10-21  2:14     ` Yang, Weijiang
2022-10-30  6:06       ` Yang, Weijiang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220831223438.413090-11-weijiang.yang@intel.com \
    --to=weijiang.yang@intel.com \
    --cc=kan.liang@linux.intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=like.xu.linux@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=wei.w.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.