All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] KVM: x86/pmu: Add fast-path check for per-vm vPMU disablility
@ 2022-05-10 11:57 Like Xu
  2022-05-10 11:57 ` [PATCH 2/3] KVM: x86/svm/pmu: Direct access pmu->gp_counter[] to implement amd_*_to_pmc() Like Xu
  2022-05-10 11:57 ` [PATCH 3/3] KVM: x86/svm/pmu: Drop 'enum index' for more counters scalability Like Xu
  0 siblings, 2 replies; 5+ messages in thread
From: Like Xu @ 2022-05-10 11:57 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Jim Mattson, sandipan.das, Sean Christopherson, Vitaly Kuznetsov,
	Wanpeng Li, Joerg Roedel, kvm, linux-kernel

From: Like Xu <likexu@tencent.com>

Since vcpu->kvm->arch.enable_pmu is introduced in a generic way,
it makes more sense to move the relevant checks to generic code rather
than scattering usages around, thus saving cpu cycles from static_call()
when vPMU is disabled.

Signed-off-by: Like Xu <likexu@tencent.com>
---
 arch/x86/kvm/pmu.c           | 6 ++++++
 arch/x86/kvm/svm/pmu.c       | 3 ---
 arch/x86/kvm/vmx/pmu_intel.c | 2 +-
 3 files changed, 7 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 618f529f1c4d..522498945a4a 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -415,6 +415,9 @@ void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
 
 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
+	if (!vcpu->kvm->arch.enable_pmu)
+		return false;
+
 	return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
 		static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
 }
@@ -445,6 +448,9 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  */
 void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
 {
+	if (!vcpu->kvm->arch.enable_pmu)
+		return;
+
 	static_call(kvm_x86_pmu_refresh)(vcpu);
 }
 
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 57ab4739eb19..68b9e22c84d2 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -101,9 +101,6 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
 {
 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
 
-	if (!vcpu->kvm->arch.enable_pmu)
-		return NULL;
-
 	switch (msr) {
 	case MSR_F15H_PERF_CTL0:
 	case MSR_F15H_PERF_CTL1:
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 9db662399487..3f15ec2dd4b3 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -493,7 +493,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	pmu->raw_event_mask = X86_RAW_EVENT_MASK;
 
 	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
-	if (!entry || !vcpu->kvm->arch.enable_pmu)
+	if (!entry)
 		return;
 	eax.full = entry->eax;
 	edx.full = entry->edx;
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/3] KVM: x86/svm/pmu: Direct access pmu->gp_counter[] to implement amd_*_to_pmc()
  2022-05-10 11:57 [PATCH 1/3] KVM: x86/pmu: Add fast-path check for per-vm vPMU disablility Like Xu
@ 2022-05-10 11:57 ` Like Xu
  2022-05-20 14:45   ` [PATCH v2 " Like Xu
  2022-05-10 11:57 ` [PATCH 3/3] KVM: x86/svm/pmu: Drop 'enum index' for more counters scalability Like Xu
  1 sibling, 1 reply; 5+ messages in thread
From: Like Xu @ 2022-05-10 11:57 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Jim Mattson, sandipan.das, Sean Christopherson, Vitaly Kuznetsov,
	Wanpeng Li, Joerg Roedel, kvm, linux-kernel

From: Like Xu <likexu@tencent.com>

AMD only has gp counters, whose corresponding vPMCs are initialised
and stored in pmu->gp_counter[] in order of idx, so we can access this
array directly based on any valid pmc->idx, without any help from other
interfaces at all. The amd_rdpmc_ecx_to_pmc() can now reuse this part
of the code quite naturally.

Opportunistically apply array_index_nospec() to reduce the attack
surface for speculative execution.

Signed-off-by: Like Xu <likexu@tencent.com>
---
 arch/x86/kvm/svm/pmu.c | 36 +++++++++++-------------------------
 1 file changed, 11 insertions(+), 25 deletions(-)

diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 68b9e22c84d2..4668baf762d2 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -45,6 +45,16 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
 	[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
 };
 
+static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
+{
+	unsigned int num_counters = pmu->nr_arch_gp_counters;
+
+	if (pmc_idx >= num_counters)
+		return NULL;
+
+	return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
+}
+
 static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
 {
 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
@@ -164,22 +174,6 @@ static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
 	return true;
 }
 
-static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
-{
-	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
-	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
-
-	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
-		/*
-		 * The idx is contiguous. The MSRs are not. The counter MSRs
-		 * are interleaved with the event select MSRs.
-		 */
-		pmc_idx *= 2;
-	}
-
-	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
-}
-
 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -193,15 +187,7 @@ static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
 	unsigned int idx, u64 *mask)
 {
-	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-	struct kvm_pmc *counters;
-
-	idx &= ~(3u << 30);
-	if (idx >= pmu->nr_arch_gp_counters)
-		return NULL;
-	counters = pmu->gp_counters;
-
-	return &counters[idx];
+	return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
 }
 
 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/3] KVM: x86/svm/pmu: Drop 'enum index' for more counters scalability
  2022-05-10 11:57 [PATCH 1/3] KVM: x86/pmu: Add fast-path check for per-vm vPMU disablility Like Xu
  2022-05-10 11:57 ` [PATCH 2/3] KVM: x86/svm/pmu: Direct access pmu->gp_counter[] to implement amd_*_to_pmc() Like Xu
@ 2022-05-10 11:57 ` Like Xu
  2022-05-20 14:32   ` Like Xu
  1 sibling, 1 reply; 5+ messages in thread
From: Like Xu @ 2022-05-10 11:57 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Jim Mattson, sandipan.das, Sean Christopherson, Vitaly Kuznetsov,
	Wanpeng Li, Joerg Roedel, kvm, linux-kernel

From: Like Xu <likexu@tencent.com>

If the number of AMD gp counters continues to grow, the code will
be very clumsy and the switch-case design of inline get_gp_pmc_amd()
will also bloat the kernel text size.

The target code is taught to manage two groups of MSRs, each
representing a different version of the AMD PMU counter MSRs.
The MSR addresses of each group are contiguous, with no holes,
and there is no intersection between two sets of addresses,
but they are discrete in functionality by design like this:

[Group A : All counter MSRs are tightly bound to all event select MSRs ]

  MSR_K7_EVNTSEL0			0xc0010000
  MSR_K7_EVNTSELi			0xc0010000 + i
  ...
  MSR_K7_EVNTSEL3			0xc0010003
  MSR_K7_PERFCTR0			0xc0010004
  MSR_K7_PERFCTRi			0xc0010004 + i
  ...
  MSR_K7_PERFCTR3			0xc0010007

[Group B : The counter MSRs are interleaved with the event select MSRs ]

  MSR_F15H_PERF_CTL0		0xc0010200
  MSR_F15H_PERF_CTR0		(0xc0010200 + 1)
  ...
  MSR_F15H_PERF_CTLi		(0xc0010200 + 2 * i)
  MSR_F15H_PERF_CTRi		(0xc0010200 + 2 * i + 1)
  ...
  MSR_F15H_PERF_CTL5		(0xc0010200 + 2 * 5)
  MSR_F15H_PERF_CTR5		(0xc0010200 + 2 * 5 + 1)

Rewrite get_gp_pmc_amd() in this way: first determine which group of
registers is accessed by the pass-in 'msr' address, then determine
which msr 'base' is referenced by 'type', applying different address
scaling ratios separately, and finally get the pmc_idx.

If the 'base' does not match its 'type', it continues to remain invalid.

Signed-off-by: Like Xu <likexu@tencent.com>
---
 arch/x86/kvm/svm/pmu.c | 96 ++++++++----------------------------------
 1 file changed, 18 insertions(+), 78 deletions(-)

diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 4668baf762d2..b1ae249b4779 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -23,16 +23,6 @@ enum pmu_type {
 	PMU_TYPE_EVNTSEL,
 };
 
-enum index {
-	INDEX_ZERO = 0,
-	INDEX_ONE,
-	INDEX_TWO,
-	INDEX_THREE,
-	INDEX_FOUR,
-	INDEX_FIVE,
-	INDEX_ERROR,
-};
-
 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
 	[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
@@ -55,11 +45,9 @@ static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
 	return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
 }
 
-static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
+static u32 get_msr_base(bool core_ctr, enum pmu_type type)
 {
-	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
-
-	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
+	if (core_ctr) {
 		if (type == PMU_TYPE_COUNTER)
 			return MSR_F15H_PERF_CTR;
 		else
@@ -72,77 +60,29 @@ static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
 	}
 }
 
-static enum index msr_to_index(u32 msr)
-{
-	switch (msr) {
-	case MSR_F15H_PERF_CTL0:
-	case MSR_F15H_PERF_CTR0:
-	case MSR_K7_EVNTSEL0:
-	case MSR_K7_PERFCTR0:
-		return INDEX_ZERO;
-	case MSR_F15H_PERF_CTL1:
-	case MSR_F15H_PERF_CTR1:
-	case MSR_K7_EVNTSEL1:
-	case MSR_K7_PERFCTR1:
-		return INDEX_ONE;
-	case MSR_F15H_PERF_CTL2:
-	case MSR_F15H_PERF_CTR2:
-	case MSR_K7_EVNTSEL2:
-	case MSR_K7_PERFCTR2:
-		return INDEX_TWO;
-	case MSR_F15H_PERF_CTL3:
-	case MSR_F15H_PERF_CTR3:
-	case MSR_K7_EVNTSEL3:
-	case MSR_K7_PERFCTR3:
-		return INDEX_THREE;
-	case MSR_F15H_PERF_CTL4:
-	case MSR_F15H_PERF_CTR4:
-		return INDEX_FOUR;
-	case MSR_F15H_PERF_CTL5:
-	case MSR_F15H_PERF_CTR5:
-		return INDEX_FIVE;
-	default:
-		return INDEX_ERROR;
-	}
-}
-
 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
 					     enum pmu_type type)
 {
 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+	unsigned int ratio = 0;
+	unsigned int pmc_idx;
+	u32 base;
 
-	switch (msr) {
-	case MSR_F15H_PERF_CTL0:
-	case MSR_F15H_PERF_CTL1:
-	case MSR_F15H_PERF_CTL2:
-	case MSR_F15H_PERF_CTL3:
-	case MSR_F15H_PERF_CTL4:
-	case MSR_F15H_PERF_CTL5:
-		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
-			return NULL;
-		fallthrough;
-	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
-		if (type != PMU_TYPE_EVNTSEL)
-			return NULL;
-		break;
-	case MSR_F15H_PERF_CTR0:
-	case MSR_F15H_PERF_CTR1:
-	case MSR_F15H_PERF_CTR2:
-	case MSR_F15H_PERF_CTR3:
-	case MSR_F15H_PERF_CTR4:
-	case MSR_F15H_PERF_CTR5:
-		if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
-			return NULL;
-		fallthrough;
-	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
-		if (type != PMU_TYPE_COUNTER)
-			return NULL;
-		break;
-	default:
-		return NULL;
+	/* MSR_K7_* MSRs are still visible to PERFCTR_CORE guest. */
+	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE) &&
+	    msr >= MSR_F15H_PERF_CTL0 && msr <= MSR_F15H_PERF_CTR5) {
+		base = get_msr_base(true, type);
+		ratio = 2;
+	} else if (msr >= MSR_K7_EVNTSEL0 && msr <= MSR_K7_PERFCTR3) {
+		base = get_msr_base(false, type);
+		ratio = 1;
 	}
 
-	return &pmu->gp_counters[msr_to_index(msr)];
+	if (!ratio || msr < base)
+		return NULL;
+
+	pmc_idx = (unsigned int)((msr - base) / ratio);
+	return amd_pmc_idx_to_pmc(pmu, pmc_idx);
 }
 
 static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 3/3] KVM: x86/svm/pmu: Drop 'enum index' for more counters scalability
  2022-05-10 11:57 ` [PATCH 3/3] KVM: x86/svm/pmu: Drop 'enum index' for more counters scalability Like Xu
@ 2022-05-20 14:32   ` Like Xu
  0 siblings, 0 replies; 5+ messages in thread
From: Like Xu @ 2022-05-20 14:32 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Jim Mattson, sandipan.das, Sean Christopherson, Vitaly Kuznetsov,
	Wanpeng Li, Joerg Roedel, kvm, linux-kernel

The third patch is buggy, please ignore it.
I will post a new version. Sorry for the noise.

On 10/5/2022 7:57 pm, Like Xu wrote:
> From: Like Xu<likexu@tencent.com>
> 
> If the number of AMD gp counters continues to grow, the code will
> be very clumsy and the switch-case design of inline get_gp_pmc_amd()
> will also bloat the kernel text size.
> 
> The target code is taught to manage two groups of MSRs, each
> representing a different version of the AMD PMU counter MSRs.
> The MSR addresses of each group are contiguous, with no holes,
> and there is no intersection between two sets of addresses,
> but they are discrete in functionality by design like this:
> 
> [Group A : All counter MSRs are tightly bound to all event select MSRs ]
> 
>    MSR_K7_EVNTSEL0			0xc0010000
>    MSR_K7_EVNTSELi			0xc0010000 + i
>    ...
>    MSR_K7_EVNTSEL3			0xc0010003
>    MSR_K7_PERFCTR0			0xc0010004
>    MSR_K7_PERFCTRi			0xc0010004 + i
>    ...
>    MSR_K7_PERFCTR3			0xc0010007
> 
> [Group B : The counter MSRs are interleaved with the event select MSRs ]
> 
>    MSR_F15H_PERF_CTL0		0xc0010200
>    MSR_F15H_PERF_CTR0		(0xc0010200 + 1)
>    ...
>    MSR_F15H_PERF_CTLi		(0xc0010200 + 2 * i)
>    MSR_F15H_PERF_CTRi		(0xc0010200 + 2 * i + 1)
>    ...
>    MSR_F15H_PERF_CTL5		(0xc0010200 + 2 * 5)
>    MSR_F15H_PERF_CTR5		(0xc0010200 + 2 * 5 + 1)
> 
> Rewrite get_gp_pmc_amd() in this way: first determine which group of
> registers is accessed by the pass-in 'msr' address, then determine
> which msr 'base' is referenced by 'type', applying different address
> scaling ratios separately, and finally get the pmc_idx.
> 
> If the 'base' does not match its 'type', it continues to remain invalid.
> 
> Signed-off-by: Like Xu<likexu@tencent.com>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2 2/3] KVM: x86/svm/pmu: Direct access pmu->gp_counter[] to implement amd_*_to_pmc()
  2022-05-10 11:57 ` [PATCH 2/3] KVM: x86/svm/pmu: Direct access pmu->gp_counter[] to implement amd_*_to_pmc() Like Xu
@ 2022-05-20 14:45   ` Like Xu
  0 siblings, 0 replies; 5+ messages in thread
From: Like Xu @ 2022-05-20 14:45 UTC (permalink / raw)
  To: pbonzini
  Cc: jmattson, joro, kvm, linux-kernel, sandipan.das, seanjc,
	vkuznets, wanpengli

From: Like Xu <likexu@tencent.com>

AMD only has gp counters, whose corresponding vPMCs are initialised
and stored in pmu->gp_counter[] in order of idx, so we can access this
array directly based on any valid pmc->idx, without any help from other
interfaces at all. The amd_rdpmc_ecx_to_pmc() can now reuse this part
of the code quite naturally.

Opportunistically apply array_index_nospec() to reduce the attack
surface for speculative execution and remove the dead code.

Signed-off-by: Like Xu <likexu@tencent.com>
---
v1 -> v2 Changelog:
- Remove unused helper get_msr_base();

 arch/x86/kvm/svm/pmu.c | 45 +++++++-----------------------------------
 1 file changed, 7 insertions(+), 38 deletions(-)

diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index a3b78342a221..6cd8d3c2000c 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -61,21 +61,14 @@ static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
 static_assert(ARRAY_SIZE(amd_event_mapping) ==
 	     ARRAY_SIZE(amd_f17h_event_mapping));
 
-static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
+static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
 {
-	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+	unsigned int num_counters = pmu->nr_arch_gp_counters;
 
-	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
-		if (type == PMU_TYPE_COUNTER)
-			return MSR_F15H_PERF_CTR;
-		else
-			return MSR_F15H_PERF_CTL;
-	} else {
-		if (type == PMU_TYPE_COUNTER)
-			return MSR_K7_PERFCTR0;
-		else
-			return MSR_K7_EVNTSEL0;
-	}
+	if (pmc_idx >= num_counters)
+		return NULL;
+
+	return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
 }
 
 static enum index msr_to_index(u32 msr)
@@ -186,22 +179,6 @@ static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
 	return true;
 }
 
-static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
-{
-	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
-	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
-
-	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
-		/*
-		 * The idx is contiguous. The MSRs are not. The counter MSRs
-		 * are interleaved with the event select MSRs.
-		 */
-		pmc_idx *= 2;
-	}
-
-	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
-}
-
 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -215,15 +192,7 @@ static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
 	unsigned int idx, u64 *mask)
 {
-	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-	struct kvm_pmc *counters;
-
-	idx &= ~(3u << 30);
-	if (idx >= pmu->nr_arch_gp_counters)
-		return NULL;
-	counters = pmu->gp_counters;
-
-	return &counters[idx];
+	return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
 }
 
 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
-- 
2.36.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-05-20 14:45 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-05-10 11:57 [PATCH 1/3] KVM: x86/pmu: Add fast-path check for per-vm vPMU disablility Like Xu
2022-05-10 11:57 ` [PATCH 2/3] KVM: x86/svm/pmu: Direct access pmu->gp_counter[] to implement amd_*_to_pmc() Like Xu
2022-05-20 14:45   ` [PATCH v2 " Like Xu
2022-05-10 11:57 ` [PATCH 3/3] KVM: x86/svm/pmu: Drop 'enum index' for more counters scalability Like Xu
2022-05-20 14:32   ` Like Xu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.