All of lore.kernel.org
 help / color / mirror / Atom feed
From: Aaron Lewis <aaronlewis@google.com>
To: kvm@vger.kernel.org
Cc: pbonzini@redhat.com, jmattson@google.com, seanjc@google.com,
	Aaron Lewis <aaronlewis@google.com>
Subject: [PATCH 3/4] selftests: kvm/x86: Add testing for masked events
Date: Mon, 23 May 2022 21:41:09 +0000	[thread overview]
Message-ID: <20220523214110.1282480-4-aaronlewis@google.com> (raw)
In-Reply-To: <20220523214110.1282480-1-aaronlewis@google.com>

Add testing for the pmu event filter's masked events.  These tests run
through different ways of finding an event the guest is attempting to
program in an event list.  For any given eventsel, there may be
multiple instances of it in an event list.  These tests try different
ways of looking up a match to force the matching algorithm to walk the
relevant eventsel's and ensure it is able to a) find a match, b) stays
within its bounds.

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 .../kvm/x86_64/pmu_event_filter_test.c        | 107 ++++++++++++++++++
 1 file changed, 107 insertions(+)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 4bff4c71ac45..4071043bbe26 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -18,8 +18,12 @@
 /*
  * In lieu of copying perf_event.h into tools...
  */
+#define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
 #define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
 #define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
+#define AMD64_EVENTSEL_EVENT	\
+	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
+
 
 union cpuid10_eax {
 	struct {
@@ -445,6 +449,107 @@ static bool use_amd_pmu(void)
 		 is_zen3(entry->eax));
 }
 
+#define ENCODE_MASKED_EVENT(select, mask, match, invert) \
+		KVM_PMU_EVENT_ENCODE_MASKED_EVENT(select, mask, match, invert)
+
+static void expect_success(uint64_t count)
+{
+	if (count != NUM_BRANCHES)
+		pr_info("masked filter: Branch instructions retired = %lu (expected %u)\n",
+			count, NUM_BRANCHES);
+	TEST_ASSERT(count, "Allowed PMU event is not counting");
+}
+
+static void expect_failure(uint64_t count)
+{
+	if (count)
+		pr_info("masked filter: Branch instructions retired = %lu (expected 0)\n",
+			count);
+	TEST_ASSERT(!count, "Disallowed PMU Event is counting");
+}
+
+static void run_masked_filter_test(struct kvm_vm *vm, uint64_t masked_events[],
+				   const int nmasked_events, uint64_t event,
+				   uint32_t action, bool invert,
+				   void (*expected_func)(uint64_t))
+{
+	struct kvm_pmu_event_filter *f;
+	uint64_t old_event;
+	uint64_t count;
+	int i;
+
+	for (i = 0; i < nmasked_events; i++) {
+		if((masked_events[i] & AMD64_EVENTSEL_EVENT) != EVENT(event, 0))
+			continue;
+
+		old_event = masked_events[i];
+
+		masked_events[i] =
+			ENCODE_MASKED_EVENT(event, ~0x00, 0x00, invert);
+
+		f = create_pmu_event_filter(masked_events, nmasked_events, action,
+				   KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+
+		count = test_with_filter(vm, f);
+		free(f);
+
+		expected_func(count);
+
+		masked_events[i] = old_event;
+	}
+}
+
+static void run_masked_filter_tests(struct kvm_vm *vm, uint64_t masked_events[],
+				    const int nmasked_events, uint64_t event)
+{
+	run_masked_filter_test(vm, masked_events, nmasked_events, event,
+			       KVM_PMU_EVENT_ALLOW, /*invert=*/false,
+			       expect_success);
+	run_masked_filter_test(vm, masked_events, nmasked_events, event,
+			       KVM_PMU_EVENT_ALLOW, /*invert=*/true,
+			       expect_failure);
+	run_masked_filter_test(vm, masked_events, nmasked_events, event,
+			       KVM_PMU_EVENT_DENY, /*invert=*/false,
+			       expect_failure);
+	run_masked_filter_test(vm, masked_events, nmasked_events, event,
+			       KVM_PMU_EVENT_DENY, /*invert=*/true,
+			       expect_success);
+}
+
+static void test_masked_filters(struct kvm_vm *vm)
+{
+	uint64_t masked_events[11];
+	const int nmasked_events = ARRAY_SIZE(masked_events);
+	uint64_t prev_event, event, next_event;
+	int i;
+
+	if (use_intel_pmu()) {
+		/* Instructions retired */
+		prev_event = 0xc0;
+		event = INTEL_BR_RETIRED;
+		/* Branch misses retired */
+		next_event = 0xc5;
+	} else {
+		TEST_ASSERT(use_amd_pmu(), "Unknown platform");
+		/* Retired instructions */
+		prev_event = 0xc0;
+		event = AMD_ZEN_BR_RETIRED;
+		/* Retired branch instructions mispredicted */
+		next_event = 0xc3;
+	}
+
+	for (i = 0; i < nmasked_events; i++)
+		masked_events[i] =
+			ENCODE_MASKED_EVENT(event, ~0x00, i+1, 0);
+
+	run_masked_filter_tests(vm, masked_events, nmasked_events, event);
+
+	masked_events[0] = ENCODE_MASKED_EVENT(prev_event, ~0x00, 0, 0);
+	masked_events[1] = ENCODE_MASKED_EVENT(next_event, ~0x00, 0, 0);
+
+	run_masked_filter_tests(vm, masked_events, nmasked_events, event);
+}
+
 int main(int argc, char *argv[])
 {
 	void (*guest_code)(void) = NULL;
@@ -489,6 +594,8 @@ int main(int argc, char *argv[])
 	test_not_member_deny_list(vm);
 	test_not_member_allow_list(vm);
 
+	test_masked_filters(vm);
+
 	kvm_vm_free(vm);
 
 	test_pmu_config_disable(guest_code);
-- 
2.36.1.124.g0e6072fb45-goog


  parent reply	other threads:[~2022-05-23 21:41 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-05-23 21:41 [PATCH 0/4] kvm: x86/pmu: Introduce and test masked events Aaron Lewis
2022-05-23 21:41 ` [PATCH 1/4] kvm: x86/pmu: Introduce masked events to the pmu event filter Aaron Lewis
2022-05-24  6:04   ` kernel test robot
2022-05-24 18:27   ` kernel test robot
2022-05-24 23:17   ` kernel test robot
2022-05-23 21:41 ` [PATCH 2/4] selftests: kvm/x86: Add flags when creating a " Aaron Lewis
2022-05-23 21:41 ` Aaron Lewis [this message]
2022-05-23 21:41 ` [PATCH 4/4] selftests: kvm/x86: Add testing for KVM_SET_PMU_EVENT_FILTER Aaron Lewis

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220523214110.1282480-4-aaronlewis@google.com \
    --to=aaronlewis@google.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.