kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Like Xu <like.xu@linux.intel.com>
To: Peter Zijlstra <peterz@infradead.org>,
	Paolo Bonzini <pbonzini@redhat.com>
Cc: linux-kernel@vger.kernel.org, kvm@vger.kernel.org,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	ak@linux.intel.com, wei.w.wang@intel.com,
	Like Xu <like.xu@linux.intel.com>
Subject: [PATCH v11 09/11] KVM: x86/pmu: Release guest LBR event via vPMU lazy release mechanism
Date: Thu, 14 May 2020 16:30:52 +0800	[thread overview]
Message-ID: <20200514083054.62538-10-like.xu@linux.intel.com> (raw)
In-Reply-To: <20200514083054.62538-1-like.xu@linux.intel.com>

The vPMU uses GUEST_LBR_IN_USE_IDX (bit 58) in 'pmu->pmc_in_use' to
indicate whether a guest LBR event is still needed by the vcpu. If the vcpu
no longer accesses LBR related registers within a scheduling time slice,
and the enable bit of LBR has been unset, vPMU will treat the guest LBR
event as a bland event of a vPMC counter and release it as usual. Also the
passthrough state of LBR records msrs is cancelled.

Signed-off-by: Like Xu <like.xu@linux.intel.com>
---
 arch/x86/kvm/pmu.c           |  9 +++++++++
 arch/x86/kvm/pmu.h           |  4 ++++
 arch/x86/kvm/vmx/pmu_intel.c | 11 +++++++++++
 3 files changed, 24 insertions(+)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 5053f4238218..d0dece055605 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -451,6 +451,12 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
 	return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
 }
 
+static inline void kvm_pmu_lbr_cleanup(struct kvm_vcpu *vcpu)
+{
+	if (kvm_x86_ops.pmu_ops->lbr_cleanup)
+		kvm_x86_ops.pmu_ops->lbr_cleanup(vcpu);
+}
+
 /* Release perf_events for vPMCs that have been unused for a full time slice.  */
 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
 {
@@ -469,6 +475,9 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
 
 		if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
 			pmc_stop_counter(pmc);
+
+		if (i == GUEST_LBR_IN_USE_IDX && pmu->lbr_event)
+			kvm_pmu_lbr_cleanup(vcpu);
 	}
 
 	bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index ed96cbb40757..78f0cfe1622f 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -15,6 +15,9 @@
 #define VMWARE_BACKDOOR_PMC_REAL_TIME		0x10001
 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME	0x10002
 
+/* Indicates whether LBR msrs were accessed during the last time slice. */
+#define GUEST_LBR_IN_USE_IDX INTEL_PMC_IDX_FIXED_VLBR
+
 struct kvm_event_hw_type_mapping {
 	u8 eventsel;
 	u8 unit_mask;
@@ -38,6 +41,7 @@ struct kvm_pmu_ops {
 	void (*init)(struct kvm_vcpu *vcpu);
 	void (*reset)(struct kvm_vcpu *vcpu);
 	void (*deliver_pmi)(struct kvm_vcpu *vcpu);
+	void (*lbr_cleanup)(struct kvm_vcpu *vcpu);
 };
 
 static inline bool event_is_oncpu(struct perf_event *event)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 8f9c837c7dca..ea4faae56473 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -353,6 +353,7 @@ static bool intel_pmu_access_lbr_msr(struct kvm_vcpu *vcpu,
 		msr_info->data = 0;
 	local_irq_enable();
 
+	__set_bit(GUEST_LBR_IN_USE_IDX, pmu->pmc_in_use);
 	return true;
 }
 
@@ -460,6 +461,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		vmcs_write64(GUEST_IA32_DEBUGCTL, data);
 		if (!msr_info->host_initiated && !pmu->lbr_event)
 			intel_pmu_create_lbr_event(vcpu);
+		__set_bit(GUEST_LBR_IN_USE_IDX, pmu->pmc_in_use);
 		return 0;
 	default:
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
@@ -555,6 +557,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 		0, pmu->nr_arch_gp_counters);
 	bitmap_set(pmu->all_valid_pmc_idx,
 		INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
+	if (lbr_is_enabled(vcpu))
+		bitmap_set(pmu->all_valid_pmc_idx, GUEST_LBR_IN_USE_IDX, 1);
 
 	nested_vmx_pmu_entry_exit_ctls_update(vcpu);
 }
@@ -636,6 +640,12 @@ static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
 		intel_pmu_legacy_freezing_lbrs_on_pmi(vcpu);
 }
 
+static void intel_pmu_lbr_cleanup(struct kvm_vcpu *vcpu)
+{
+	if (!(vmcs_read64(GUEST_IA32_DEBUGCTL) & DEBUGCTLMSR_LBR))
+		intel_pmu_free_lbr_event(vcpu);
+}
+
 struct kvm_pmu_ops intel_pmu_ops = {
 	.find_arch_event = intel_find_arch_event,
 	.find_fixed_event = intel_find_fixed_event,
@@ -651,4 +661,5 @@ struct kvm_pmu_ops intel_pmu_ops = {
 	.init = intel_pmu_init,
 	.reset = intel_pmu_reset,
 	.deliver_pmi = intel_pmu_deliver_pmi,
+	.lbr_cleanup = intel_pmu_lbr_cleanup,
 };
-- 
2.21.3


  parent reply	other threads:[~2020-05-14  8:31 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-14  8:30 [PATCH v11 00/11] Guest Last Branch Recording Enabling Like Xu
2020-05-14  8:30 ` [PATCH v11 01/11] perf/x86: Fix variable types for LBR registers Like Xu
2020-05-14  8:30 ` [PATCH v11 02/11] perf/x86/core: Refactor hw->idx checks and cleanup Like Xu
2020-05-14  8:30 ` [PATCH v11 03/11] perf/x86/lbr: Add interface to get basic information about LBR stack Like Xu
2020-05-14  8:30 ` [PATCH v11 04/11] perf/x86: Add constraint to create guest LBR event without hw counter Like Xu
2020-05-18 11:43   ` Peter Zijlstra
2020-05-14  8:30 ` [PATCH v11 05/11] perf/x86: Keep LBR stack unchanged in host context for guest LBR event Like Xu
2020-05-18 11:59   ` Peter Zijlstra
2020-05-18 12:02   ` Peter Zijlstra
2020-05-19  3:08     ` Like Xu
2020-05-19 10:45       ` Peter Zijlstra
2020-05-19 13:25         ` Xu, Like
2020-05-14  8:30 ` [PATCH v11 06/11] KVM: x86/pmu: Tweak kvm_pmu_get_msr to pass 'struct msr_data' in Like Xu
2020-05-14  8:30 ` [PATCH v11 07/11] KVM: x86: Expose MSR_IA32_PERF_CAPABILITIES for LBR record format Like Xu
2020-05-19 10:53   ` Peter Zijlstra
2020-05-19 12:19     ` Xu, Like
2020-05-19 15:12     ` Sean Christopherson
2020-05-14  8:30 ` [PATCH v11 08/11] KVM: x86/pmu: Emulate LBR feature via guest LBR event Like Xu
2020-05-19 11:00   ` Peter Zijlstra
2020-05-19 12:24     ` Xu, Like
2020-05-19 11:01   ` Peter Zijlstra
2020-05-19 12:28     ` Xu, Like
2020-05-19 11:03   ` Peter Zijlstra
2020-05-19 12:40     ` Xu, Like
2020-05-14  8:30 ` Like Xu [this message]
2020-05-14  8:30 ` [PATCH v11 10/11] KVM: x86/pmu: Check guest LBR availability in case host reclaims them Like Xu
2020-05-19 11:15   ` Peter Zijlstra
2020-05-19 13:10     ` Xu, Like
2020-05-19 14:57       ` Peter Zijlstra
2020-05-20  2:01         ` Xu, Like
2020-05-27  8:17           ` Like Xu
2020-05-14  8:30 ` [PATCH v11 11/11] KVM: x86/pmu: Reduce the overhead of LBR passthrough or cancellation Like Xu
2020-05-27  8:28 ` [PATCH v11 00/11] Guest Last Branch Recording Enabling Xu, Like

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200514083054.62538-10-like.xu@linux.intel.com \
    --to=like.xu@linux.intel.com \
    --cc=ak@linux.intel.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=sean.j.christopherson@intel.com \
    --cc=tglx@linutronix.de \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    --cc=wei.w.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).