All of lore.kernel.org
 help / color / mirror / Atom feed
From: Like Xu <like.xu@linux.intel.com>
To: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org,
	Like Xu <like.xu@linux.intel.com>
Subject: [kvm-unit-tests PATCH] x86: pmu: Test full-width counter writes support
Date: Fri,  8 May 2020 16:32:18 +0800	[thread overview]
Message-ID: <20200508083218.120559-2-like.xu@linux.intel.com> (raw)
In-Reply-To: <20200508083218.120559-1-like.xu@linux.intel.com>

When the full-width writes capability is set, use the alternative MSR
range to write larger sign counter values (up to GP counter width).

Signed-off-by: Like Xu <like.xu@linux.intel.com>
---
 lib/x86/msr.h |   1 +
 x86/pmu.c     | 125 ++++++++++++++++++++++++++++++++++++++++----------
 2 files changed, 102 insertions(+), 24 deletions(-)

diff --git a/lib/x86/msr.h b/lib/x86/msr.h
index 8dca964..6ef5502 100644
--- a/lib/x86/msr.h
+++ b/lib/x86/msr.h
@@ -35,6 +35,7 @@
 #define MSR_IA32_SPEC_CTRL              0x00000048
 #define MSR_IA32_PRED_CMD               0x00000049
 
+#define MSR_IA32_PMC0                  0x000004c1
 #define MSR_IA32_PERFCTR0		0x000000c1
 #define MSR_IA32_PERFCTR1		0x000000c2
 #define MSR_FSB_FREQ			0x000000cd
diff --git a/x86/pmu.c b/x86/pmu.c
index f45621a..8644f90 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -91,6 +91,9 @@ struct pmu_event {
 	{"fixed 3", MSR_CORE_PERF_FIXED_CTR0 + 2, 0.1*N, 30*N}
 };
 
+#define PMU_CAP_FW_WRITES	(1ULL << 13)
+static u64 gp_counter_base = MSR_IA32_PERFCTR0;
+
 static int num_counters;
 
 char *buf;
@@ -125,12 +128,13 @@ static bool check_irq(void)
 
 static bool is_gp(pmu_counter_t *evt)
 {
-	return evt->ctr < MSR_CORE_PERF_FIXED_CTR0;
+	return evt->ctr < MSR_CORE_PERF_FIXED_CTR0 ||
+		evt->ctr >= MSR_IA32_PMC0;
 }
 
 static int event_to_global_idx(pmu_counter_t *cnt)
 {
-	return cnt->ctr - (is_gp(cnt) ? MSR_IA32_PERFCTR0 :
+	return cnt->ctr - (is_gp(cnt) ? gp_counter_base :
 		(MSR_CORE_PERF_FIXED_CTR0 - FIXED_CNT_INDEX));
 }
 
@@ -226,7 +230,7 @@ static bool verify_counter(pmu_counter_t *cnt)
 static void check_gp_counter(struct pmu_event *evt)
 {
 	pmu_counter_t cnt = {
-		.ctr = MSR_IA32_PERFCTR0,
+		.ctr = gp_counter_base,
 		.config = EVNTSEL_OS | EVNTSEL_USR | evt->unit_sel,
 	};
 	int i;
@@ -276,7 +280,7 @@ static void check_counters_many(void)
 			continue;
 
 		cnt[n].count = 0;
-		cnt[n].ctr = MSR_IA32_PERFCTR0 + n;
+		cnt[n].ctr = gp_counter_base + n;
 		cnt[n].config = EVNTSEL_OS | EVNTSEL_USR |
 			gp_events[i % ARRAY_SIZE(gp_events)].unit_sel;
 		n++;
@@ -302,7 +306,7 @@ static void check_counter_overflow(void)
 	uint64_t count;
 	int i;
 	pmu_counter_t cnt = {
-		.ctr = MSR_IA32_PERFCTR0,
+		.ctr = gp_counter_base,
 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */,
 		.count = 0,
 	};
@@ -319,6 +323,8 @@ static void check_counter_overflow(void)
 		int idx;
 
 		cnt.count = 1 - count;
+		if (gp_counter_base == MSR_IA32_PMC0)
+			cnt.count &= (1ul << eax.split.bit_width) - 1;
 
 		if (i == num_counters) {
 			cnt.ctr = fixed_events[0].unit_sel;
@@ -346,7 +352,7 @@ static void check_counter_overflow(void)
 static void check_gp_counter_cmask(void)
 {
 	pmu_counter_t cnt = {
-		.ctr = MSR_IA32_PERFCTR0,
+		.ctr = gp_counter_base,
 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel /* instructions */,
 		.count = 0,
 	};
@@ -369,7 +375,7 @@ static void do_rdpmc_fast(void *ptr)
 
 static void check_rdpmc(void)
 {
-	uint64_t val = 0x1f3456789ull;
+	uint64_t val = 0xff0123456789ull;
 	bool exc;
 	int i;
 
@@ -378,20 +384,23 @@ static void check_rdpmc(void)
 	for (i = 0; i < num_counters; i++) {
 		uint64_t x;
 		pmu_counter_t cnt = {
-			.ctr = MSR_IA32_PERFCTR0 + i,
+			.ctr = gp_counter_base + i,
 			.idx = i
 		};
 
-		/*
-		 * Only the low 32 bits are writable, and the value is
-		 * sign-extended.
-		 */
-		x = (uint64_t)(int64_t)(int32_t)val;
+	        /*
+	         * Without full-width writes, only the low 32 bits are writable,
+	         * and the value is sign-extended.
+	         */
+		if (gp_counter_base == MSR_IA32_PERFCTR0)
+			x = (uint64_t)(int64_t)(int32_t)val;
+		else
+			x = (uint64_t)(int64_t)val;
 
 		/* Mask according to the number of supported bits */
 		x &= (1ull << eax.split.bit_width) - 1;
 
-		wrmsr(MSR_IA32_PERFCTR0 + i, val);
+		wrmsr(gp_counter_base + i, val);
 		report(rdpmc(i) == x, "cntr-%d", i);
 
 		exc = test_for_exception(GP_VECTOR, do_rdpmc_fast, &cnt);
@@ -423,8 +432,9 @@ static void check_rdpmc(void)
 static void check_running_counter_wrmsr(void)
 {
 	uint64_t status;
+	uint64_t count;
 	pmu_counter_t evt = {
-		.ctr = MSR_IA32_PERFCTR0,
+		.ctr = gp_counter_base,
 		.config = EVNTSEL_OS | EVNTSEL_USR | gp_events[1].unit_sel,
 		.count = 0,
 	};
@@ -433,7 +443,7 @@ static void check_running_counter_wrmsr(void)
 
 	start_event(&evt);
 	loop();
-	wrmsr(MSR_IA32_PERFCTR0, 0);
+	wrmsr(gp_counter_base, 0);
 	stop_event(&evt);
 	report(evt.count < gp_events[1].min, "cntr");
 
@@ -443,7 +453,13 @@ static void check_running_counter_wrmsr(void)
 
 	evt.count = 0;
 	start_event(&evt);
-	wrmsr(MSR_IA32_PERFCTR0, -1);
+
+	count = -1;
+	if (gp_counter_base == MSR_IA32_PMC0)
+		count &= (1ul << eax.split.bit_width) - 1;
+
+	wrmsr(gp_counter_base, count);
+
 	loop();
 	stop_event(&evt);
 	status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
@@ -452,6 +468,66 @@ static void check_running_counter_wrmsr(void)
 	report_prefix_pop();
 }
 
+static void check_counters(void)
+{
+	check_gp_counters();
+	check_fixed_counters();
+	check_rdpmc();
+	check_counters_many();
+	check_counter_overflow();
+	check_gp_counter_cmask();
+	check_running_counter_wrmsr();
+}
+
+static void do_unsupported_width_counter_write(void *index)
+{
+	wrmsr(MSR_IA32_PMC0 + *((int *) index), 0xffffff0123456789ull);
+}
+
+static void  check_gp_counters_write_width(void)
+{
+	u64 val_64 = 0xffffff0123456789ull;
+	u64 val_32 = val_64 & ((1ul << 32) - 1);
+	u64 val_max_width = val_64 & ((1ul << eax.split.bit_width) - 1);
+	int i;
+
+	/*
+	 * MSR_IA32_PERFCTRn supports 64-bit writes,
+	 * but only the lowest 32 bits are valid.
+	 */
+	for (i = 0; i < num_counters; i++) {
+		wrmsr(MSR_IA32_PERFCTR0 + i, val_32);
+		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
+		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
+
+		wrmsr(MSR_IA32_PERFCTR0 + i, val_max_width);
+		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
+		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
+
+		wrmsr(MSR_IA32_PERFCTR0 + i, val_64);
+		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
+		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
+	}
+
+	/*
+	 * MSR_IA32_PMCn supports writing values ​​up to GP counter width,
+	 * and only the lowest bits of GP counter width are valid.
+	 */
+	for (i = 0; i < num_counters; i++) {
+		wrmsr(MSR_IA32_PMC0 + i, val_32);
+		assert(rdmsr(MSR_IA32_PMC0 + i) == val_32);
+		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_32);
+
+		wrmsr(MSR_IA32_PMC0 + i, val_max_width);
+		assert(rdmsr(MSR_IA32_PMC0 + i) == val_max_width);
+		assert(rdmsr(MSR_IA32_PERFCTR0 + i) == val_max_width);
+
+		report(test_for_exception(GP_VECTOR,
+			do_unsupported_width_counter_write, &i),
+		"writing unsupported width to MSR_IA32_PMC%d raises #GP", i);
+	}
+}
+
 int main(int ac, char **av)
 {
 	struct cpuid id = cpuid(10);
@@ -480,13 +556,14 @@ int main(int ac, char **av)
 
 	apic_write(APIC_LVTPC, PC_VECTOR);
 
-	check_gp_counters();
-	check_fixed_counters();
-	check_rdpmc();
-	check_counters_many();
-	check_counter_overflow();
-	check_gp_counter_cmask();
-	check_running_counter_wrmsr();
+	check_counters();
+
+	if (rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES) {
+		gp_counter_base = MSR_IA32_PMC0;
+		report_prefix_push("full-width writes");
+		check_counters();
+		check_gp_counters_write_width();
+	}
 
 	return report_summary();
 }
-- 
2.21.1


  reply	other threads:[~2020-05-08  8:32 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-08  8:32 [PATCH v3] KVM: x86/pmu: Support full width counting Like Xu
2020-05-08  8:32 ` Like Xu [this message]
2020-05-08  9:50 ` Paolo Bonzini
2020-05-12  4:42   ` [PATCH 1/2] KVM: x86/pmu: Tweak kvm_pmu_get_msr to pass 'struct msr_data' in Like Xu
2020-05-12  4:42     ` [PATCH v4 2/2] KVM: x86/pmu: Support full width counting Like Xu
2020-05-29  7:43 [PATCH RESEND] Enable full width counting for KVM: x86/pmu Like Xu
2020-05-29  7:43 ` [kvm-unit-tests PATCH] x86: pmu: Test full-width counter writes support Like Xu
2020-06-16 10:49   ` Thomas Huth
2020-06-16 12:28     ` Paolo Bonzini
2020-06-19 19:46       ` Nadav Amit
2021-05-11 21:27   ` Jim Mattson
2021-05-12  6:33     ` Like Xu
2022-01-08  0:06       ` Jim Mattson
2022-01-10  6:34         ` Like Xu
2022-03-11 16:36         ` Thomas Huth

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200508083218.120559-2-like.xu@linux.intel.com \
    --to=like.xu@linux.intel.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.