All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yang Weijiang <weijiang.yang@intel.com>
To: pbonzini@redhat.com, seanjc@google.com, vkuznets@redhat.com,
	jmattson@google.com, wei.w.wang@intel.com,
	like.xu.linux@gmail.com, kvm@vger.kernel.org,
	linux-kernel@vger.kernel.org
Cc: Like Xu <like.xu@linux.intel.com>,
	Yang Weijiang <weijiang.yang@intel.com>
Subject: [PATCH v5 04/13] KVM: vmx/pmu: Emulate MSR_ARCH_LBR_DEPTH for guest Arch LBR
Date: Fri,  9 Jul 2021 18:05:02 +0800	[thread overview]
Message-ID: <1625825111-6604-5-git-send-email-weijiang.yang@intel.com> (raw)
In-Reply-To: <1625825111-6604-1-git-send-email-weijiang.yang@intel.com>

From: Like Xu <like.xu@linux.intel.com>

The number of Arch LBR entries available is determined by the value
in host MSR_ARCH_LBR_DEPTH.DEPTH. The supported LBR depth values are
enumerated in CPUID.(EAX=01CH, ECX=0):EAX[7:0]. For each bit "n" set
in this field, the MSR_ARCH_LBR_DEPTH.DEPTH value of "8*(n+1)" is
supported.

On a guest write to MSR_ARCH_LBR_DEPTH, all LBR entries are reset to 0.
KVM emulates the reset behavior by introducing lbr_desc->arch_lbr_reset.
KVM writes guest requested value to the native ARCH_LBR_DEPTH MSR
(this is safe because the two values will be the same) when the Arch LBR
records MSRs are pass-through to the guest.

Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 46 +++++++++++++++++++++++++++++++++++-
 arch/x86/kvm/vmx/vmx.h       |  3 +++
 2 files changed, 48 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 9efc1a6b8693..da68f0e74702 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -211,7 +211,7 @@ static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-	int ret;
+	int ret = 0;
 
 	switch (msr) {
 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -220,6 +220,10 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 		ret = pmu->version > 1;
 		break;
+	case MSR_ARCH_LBR_DEPTH:
+		if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
+			ret = guest_cpuid_has(vcpu, X86_FEATURE_ARCH_LBR);
+		break;
 	default:
 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
@@ -250,6 +254,7 @@ static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
 	if (lbr_desc->event) {
 		perf_event_release_kernel(lbr_desc->event);
 		lbr_desc->event = NULL;
+		lbr_desc->arch_lbr_reset = false;
 		vcpu_to_pmu(vcpu)->event_count--;
 	}
 }
@@ -348,10 +353,26 @@ static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
 	return true;
 }
 
+/*
+ * Check if the requested depth values is supported
+ * based on the bits [0:7] of the guest cpuid.1c.eax.
+ */
+static bool arch_lbr_depth_is_valid(struct kvm_vcpu *vcpu, u64 depth)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 0x1c, 0);
+	if (best && depth && (depth < 65) && !(depth & 7))
+		return best->eax & BIT_ULL(depth / 8 - 1);
+
+	return false;
+}
+
 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	struct kvm_pmc *pmc;
+	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
 	u32 msr = msr_info->index;
 
 	switch (msr) {
@@ -367,6 +388,9 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 		msr_info->data = pmu->global_ovf_ctrl;
 		return 0;
+	case MSR_ARCH_LBR_DEPTH:
+		msr_info->data = lbr_desc->records.nr;
+		return 0;
 	default:
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -393,6 +417,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	struct kvm_pmc *pmc;
+	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
 	u32 msr = msr_info->index;
 	u64 data = msr_info->data;
 
@@ -427,6 +452,12 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 0;
 		}
 		break;
+	case MSR_ARCH_LBR_DEPTH:
+		if (!arch_lbr_depth_is_valid(vcpu, data))
+			return 1;
+		lbr_desc->records.nr = data;
+		lbr_desc->arch_lbr_reset = true;
+		return 0;
 	default:
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -566,6 +597,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
 	lbr_desc->records.nr = 0;
 	lbr_desc->event = NULL;
 	lbr_desc->msr_passthrough = false;
+	lbr_desc->arch_lbr_reset = false;
 }
 
 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
@@ -623,6 +655,15 @@ static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
 		intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
 }
 
+static void intel_pmu_arch_lbr_reset(struct kvm_vcpu *vcpu)
+{
+	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+	/* Software write to IA32_LBR_DEPTH will reset all LBR entries. */
+	wrmsrl(MSR_ARCH_LBR_DEPTH, lbr_desc->records.nr);
+	lbr_desc->arch_lbr_reset = false;
+}
+
 static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
 {
 	struct x86_pmu_lbr *lbr = vcpu_to_lbr_records(vcpu);
@@ -654,6 +695,9 @@ static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
 {
 	struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
 
+	if (unlikely(lbr_desc->arch_lbr_reset))
+		intel_pmu_arch_lbr_reset(vcpu);
+
 	if (lbr_desc->msr_passthrough)
 		return;
 
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 16e4e457ba23..cc362e2d3eaa 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -116,6 +116,9 @@ struct lbr_desc {
 
 	/* True if LBRs are marked as not intercepted in the MSR bitmap */
 	bool msr_passthrough;
+
+	/* Reset all LBR entries on a guest write to MSR_ARCH_LBR_DEPTH */
+	bool arch_lbr_reset;
 };
 
 /*
-- 
2.21.1


  parent reply	other threads:[~2021-07-09  9:51 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-09 10:04 [PATCH v5 00/13] Introduce Architectural LBR for vPMU Yang Weijiang
2021-07-09 10:04 ` [PATCH v5 01/13] perf/x86/intel: Fix the comment about guest LBR support on KVM Yang Weijiang
2021-07-09 10:05 ` [PATCH v5 02/13] perf/x86/lbr: Simplify the exposure check for the LBR_INFO registers Yang Weijiang
2021-07-09 10:05 ` [PATCH v5 03/13] KVM: x86: Add arch LBR MSRs to msrs_to_save_all list Yang Weijiang
2021-07-09 18:24   ` Jim Mattson
2021-07-12  8:55     ` Yang Weijiang
2021-07-09 10:05 ` Yang Weijiang [this message]
2021-07-09 20:35   ` [PATCH v5 04/13] KVM: vmx/pmu: Emulate MSR_ARCH_LBR_DEPTH for guest Arch LBR Jim Mattson
2021-07-12  9:17     ` Yang Weijiang
2021-07-09 10:05 ` [PATCH v5 05/13] KVM: vmx/pmu: Emulate MSR_ARCH_LBR_CTL " Yang Weijiang
2021-07-09 21:55   ` Jim Mattson
2021-07-12  9:36     ` Yang Weijiang
2021-07-12 10:10       ` Like Xu
2021-07-13  9:05         ` Yang Weijiang
2021-07-10  0:42   ` kernel test robot
2021-07-10  0:42     ` kernel test robot
2021-07-09 10:05 ` [PATCH v5 06/13] KVM: x86/vmx: Save/Restore host MSR_ARCH_LBR_CTL state Yang Weijiang
2021-07-09 22:54   ` Jim Mattson
2021-07-09 23:41     ` Jim Mattson
2021-07-12  9:53       ` Yang Weijiang
2021-07-12 10:19         ` Like Xu
2021-07-12 17:20           ` Jim Mattson
2021-07-12 17:45             ` Jim Mattson
2021-07-13  9:49               ` Like Xu
2021-07-13 17:00                 ` Jim Mattson
2021-07-14 13:33                   ` Like Xu
2021-07-14 16:15                     ` Jim Mattson
2021-07-13  9:53             ` Yang Weijiang
2021-07-12  9:50     ` Yang Weijiang
2021-07-12 17:23       ` Jim Mattson
2021-07-13  9:47         ` Yang Weijiang
2021-07-13 10:16           ` Like Xu
2021-07-13 17:12             ` Jim Mattson
2021-07-14 13:55               ` Like Xu
2021-07-09 10:05 ` [PATCH v5 07/13] KVM: x86/pmu: Refactor code to support guest Arch LBR Yang Weijiang
2021-07-09 10:05 ` [PATCH v5 08/13] KVM: x86: Refresh CPUID on writes to MSR_IA32_XSS Yang Weijiang
2021-07-09 10:05 ` [PATCH v5 09/13] KVM: x86: Report XSS as an MSR to be saved if there are supported features Yang Weijiang
2021-07-09 10:05 ` [PATCH v5 10/13] KVM: x86: Refine the matching and clearing logic for supported_xss Yang Weijiang
2021-07-09 10:05 ` [PATCH v5 11/13] KVM: x86: Add XSAVE Support for Architectural LBR Yang Weijiang
2021-07-09 10:05 ` [PATCH v5 12/13] KVM: x86/vmx: Check Arch LBR config when return perf capabilities Yang Weijiang
2021-07-09 10:05 ` [PATCH v5 13/13] KVM: x86/cpuid: Advise Arch LBR feature in CPUID Yang Weijiang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1625825111-6604-5-git-send-email-weijiang.yang@intel.com \
    --to=weijiang.yang@intel.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=like.xu.linux@gmail.com \
    --cc=like.xu@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=vkuznets@redhat.com \
    --cc=wei.w.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.