All of lore.kernel.org
 help / color / mirror / Atom feed
From: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
To: kvm@vger.kernel.org, x86@kernel.org, linux-kernel@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, "H . Peter Anvin" <hpa@zytor.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Radim Krcmar <rkrcmar@redhat.com>,
	Len Brown <len.brown@intel.com>, Kyle Huey <me@kylehuey.com>,
	Borislav Petkov <bp@suse.de>, Kan Liang <Kan.liang@intel.com>,
	Grzegorz Andrejczuk <grzegorz.andrejczuk@intel.com>,
	Tom Lendacky <thomas.lendacky@amd.com>,
	Tony Luck <tony.luck@intel.com>,
	Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
Subject: [PATCH v2 3/4] Add support for AMD Core Perf Extension in guest
Date: Mon,  6 Nov 2017 11:44:25 -0600	[thread overview]
Message-ID: <5113a9d6e76d2c6050c1fba4007068340321521c.1509985085.git.Janakarajan.Natarajan@amd.com> (raw)
In-Reply-To: <cover.1509985085.git.Janakarajan.Natarajan@amd.com>

This patch adds support for AMD Core Performance counters in the guest.
The base event select and counter MSRs are changed. In addition, with
the core extension, there are 2 extra counters available for performance
measurements for a total of 6.

With the new MSRs, the logic to map them to the gp_counters[] is changed.
New functions are introduced to get the right base MSRs and to check the
validity of the get/set MSRs.

If the guest has vcpus of either family 16h or a generation < 15h, it
falls back to using K7 MSRs and the number of counters the guest can
access is set to 4.

Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
---
 arch/x86/kvm/pmu_amd.c | 133 +++++++++++++++++++++++++++++++++++++++++++------
 arch/x86/kvm/x86.c     |   1 +
 2 files changed, 120 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index cd94443..2c694446 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -19,6 +19,11 @@
 #include "lapic.h"
 #include "pmu.h"
 
+enum pmu_type {
+	PMU_TYPE_COUNTER = 0,
+	PMU_TYPE_EVNTSEL,
+};
+
 /* duplicated from amd_perfmon_event_map, K7 and above should work. */
 static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
 	[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
@@ -31,6 +36,86 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
 	[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
 };
 
+static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
+{
+	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+	int family;
+	bool has_perf_ext;
+
+	family = guest_cpuid_family(vcpu);
+	has_perf_ext = guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE);
+
+	switch (family) {
+	case 0x15:
+	case 0x17:
+		if (has_perf_ext) {
+			if (type == PMU_TYPE_COUNTER)
+				return MSR_F15H_PERF_CTR;
+			else
+				return MSR_F15H_PERF_CTL;
+			break;
+		}
+		/*
+		 * Fall-through because the K7 MSRs are
+		 * backwards compatible
+		 */
+	default:
+		if (type == PMU_TYPE_COUNTER)
+			return MSR_K7_PERFCTR0;
+		else
+			return MSR_K7_EVNTSEL0;
+	}
+}
+
+static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
+					     enum pmu_type type)
+{
+	unsigned int base = get_msr_base(pmu, type);
+
+	if (base == MSR_F15H_PERF_CTL) {
+		switch (msr) {
+		case MSR_F15H_PERF_CTL0:
+		case MSR_F15H_PERF_CTL1:
+		case MSR_F15H_PERF_CTL2:
+		case MSR_F15H_PERF_CTL3:
+		case MSR_F15H_PERF_CTL4:
+		case MSR_F15H_PERF_CTL5:
+			/*
+			 * AMD Perf Extension MSRs are not continuous.
+			 *
+			 * E.g. MSR_F15H_PERF_CTR0 -> 0xc0010201
+			 *	MSR_F15H_PERF_CTR1 -> 0xc0010203
+			 *
+			 * These are mapped to work with gp_counters[].
+			 * The index into the array is calculated by
+			 * dividing the difference between the requested
+			 * msr and the msr base by 2.
+			 *
+			 * E.g. MSR_F15H_PERF_CTR1 uses
+			 *	->gp_counters[(0xc0010203-0xc0010201)/2]
+			 *	->gp_counters[1]
+			 */
+			return &pmu->gp_counters[(msr - base) >> 1];
+		default:
+			return NULL;
+		}
+	} else if (base == MSR_F15H_PERF_CTR) {
+		switch (msr) {
+		case MSR_F15H_PERF_CTR0:
+		case MSR_F15H_PERF_CTR1:
+		case MSR_F15H_PERF_CTR2:
+		case MSR_F15H_PERF_CTR3:
+		case MSR_F15H_PERF_CTR4:
+		case MSR_F15H_PERF_CTR5:
+			return &pmu->gp_counters[(msr - base) >> 1];
+		default:
+			return NULL;
+		}
+	} else {
+		return get_gp_pmc(pmu, msr, base);
+	}
+}
+
 static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
 				    u8 event_select,
 				    u8 unit_mask)
@@ -64,7 +149,20 @@ static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
 
 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
 {
-	return get_gp_pmc(pmu, MSR_K7_EVNTSEL0 + pmc_idx, MSR_K7_EVNTSEL0);
+	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
+	unsigned int family;
+
+	family = guest_cpuid_family(pmu_to_vcpu(pmu));
+
+	if (family == 0x15 || family == 0x17) {
+		/*
+		 * The idx is contiguous. The MSRs are not. The counter MSRs
+		 * are interleaved with the event select MSRs.
+		 */
+		pmc_idx *= 2;
+	}
+
+	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
 }
 
 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
@@ -96,8 +194,8 @@ static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	int ret = false;
 
-	ret = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0) ||
-		get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0);
+	ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) ||
+		get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
 
 	return ret;
 }
@@ -107,14 +205,14 @@ static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	struct kvm_pmc *pmc;
 
-	/* MSR_K7_PERFCTRn */
-	pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0);
+	/* MSR_PERFCTRn */
+	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
 	if (pmc) {
 		*data = pmc_read_counter(pmc);
 		return 0;
 	}
-	/* MSR_K7_EVNTSELn */
-	pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0);
+	/* MSR_EVNTSELn */
+	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
 	if (pmc) {
 		*data = pmc->eventsel;
 		return 0;
@@ -130,14 +228,14 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	u32 msr = msr_info->index;
 	u64 data = msr_info->data;
 
-	/* MSR_K7_PERFCTRn */
-	pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0);
+	/* MSR_PERFCTRn */
+	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
 	if (pmc) {
 		pmc->counter += data - pmc_read_counter(pmc);
 		return 0;
 	}
-	/* MSR_K7_EVNTSELn */
-	pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0);
+	/* MSR_EVNTSELn */
+	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
 	if (pmc) {
 		if (data == pmc->eventsel)
 			return 0;
@@ -153,8 +251,15 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+	int family, nr_counters;
+
+	family = guest_cpuid_family(vcpu);
+	if (family == 0x15 || family == 0x17)
+		nr_counters = AMD64_NUM_COUNTERS_CORE;
+	else
+		nr_counters = AMD64_NUM_COUNTERS;
 
-	pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
+	pmu->nr_arch_gp_counters = nr_counters;
 	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
 	pmu->reserved_bits = 0xffffffff00200000ull;
 	/* not applicable to AMD; but clean them to prevent any fall out */
@@ -169,7 +274,7 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	int i;
 
-	for (i = 0; i < AMD64_NUM_COUNTERS ; i++) {
+	for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
 		pmu->gp_counters[i].type = KVM_PMC_GP;
 		pmu->gp_counters[i].vcpu = vcpu;
 		pmu->gp_counters[i].idx = i;
@@ -181,7 +286,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 	int i;
 
-	for (i = 0; i < AMD64_NUM_COUNTERS; i++) {
+	for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
 
 		pmc_stop_counter(pmc);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 03869eb..5a6ad6f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2433,6 +2433,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	case MSR_AMD64_DC_CFG:
 		msr_info->data = 0;
 		break;
+	case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
 	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
 	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
 	case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
-- 
2.7.4

  parent reply	other threads:[~2017-11-06 17:45 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-11-06 17:44 [PATCH v2 0/4] Support Perf Extension on AMD KVM guests Janakarajan Natarajan
2017-11-06 17:44 ` [PATCH v2 1/4] x86/kvm/cpuid: Fix CPUID function for word 6 (80000001_ECX) Janakarajan Natarajan
2017-11-06 18:09   ` Jim Mattson
2017-11-06 18:14   ` Krish Sadhukhan
2017-11-06 20:38   ` Janakarajan Natarajan
2017-11-10 21:47     ` Radim Krcmar
2017-11-06 17:44 ` [PATCH v2 2/4] Add AMD Core Perf Extension MSRs Janakarajan Natarajan
2017-11-09 18:00   ` Borislav Petkov
2017-11-06 17:44 ` Janakarajan Natarajan [this message]
2017-11-09 18:34   ` [PATCH v2 3/4] Add support for AMD Core Perf Extension in guest Borislav Petkov
2017-11-15 19:04     ` Natarajan, Janakarajan
2017-11-15 19:07       ` Borislav Petkov
2017-11-16 17:13         ` Natarajan, Janakarajan
2017-11-16 17:25           ` Borislav Petkov
2017-11-16 18:00             ` Natarajan, Janakarajan
2017-11-17 11:44               ` Borislav Petkov
2017-11-27 18:21                 ` Natarajan, Janakarajan
2017-12-01 19:30                 ` Natarajan, Janakarajan
2017-12-05 17:56                   ` Radim Krcmar
2017-12-06 20:19                     ` Natarajan, Janakarajan
2017-11-06 17:44 ` [PATCH v2 4/4] Expose AMD Core Perf Extension flag to guests Janakarajan Natarajan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5113a9d6e76d2c6050c1fba4007068340321521c.1509985085.git.Janakarajan.Natarajan@amd.com \
    --to=janakarajan.natarajan@amd.com \
    --cc=Kan.liang@intel.com \
    --cc=bp@suse.de \
    --cc=grzegorz.andrejczuk@intel.com \
    --cc=hpa@zytor.com \
    --cc=kvm@vger.kernel.org \
    --cc=len.brown@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=me@kylehuey.com \
    --cc=mingo@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=thomas.lendacky@amd.com \
    --cc=tony.luck@intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.