From: Marc Orr <marcorr@google.com>
To: kvm@vger.kernel.org, jmattson@google.com, pshier@google.com
Cc: Marc Orr <marcorr@google.com>
Subject: [kvm-unit-tests PATCH] x86: nvmx: test max atomic switch MSRs
Date: Thu, 12 Sep 2019 11:09:28 -0700 [thread overview]
Message-ID: <20190912180928.123660-1-marcorr@google.com> (raw)
Excerise nested VMX's atomic MSR switch code (e.g., VM-entry MSR-load
list) at the maximum number of MSRs supported, as described in the SDM,
in the appendix chapter titled "MISCELLANEOUS DATA".
Suggested-by: Jim Mattson <jmattson@google.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Marc Orr <marcorr@google.com>
---
x86/vmx_tests.c | 139 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 139 insertions(+)
diff --git a/x86/vmx_tests.c b/x86/vmx_tests.c
index f035f24a771a..b3b4d5f7cc8f 100644
--- a/x86/vmx_tests.c
+++ b/x86/vmx_tests.c
@@ -2718,6 +2718,11 @@ static void ept_reserved_bit(int bit)
#define PAGE_2M_ORDER 9
#define PAGE_1G_ORDER 18
+static void *alloc_2m_page(void)
+{
+ return alloc_pages(PAGE_2M_ORDER);
+}
+
static void *get_1g_page(void)
{
static void *alloc;
@@ -8570,6 +8575,138 @@ static int invalid_msr_entry_failure(struct vmentry_failure *failure)
return VMX_TEST_VMEXIT;
}
+enum atomic_switch_msr_scenario {
+ VM_ENTER_LOAD,
+ VM_EXIT_LOAD,
+ VM_EXIT_STORE,
+ ATOMIC_SWITCH_MSR_SCENARIO_END,
+};
+
+static void atomic_switch_msr_limit_test_guest(void)
+{
+ vmcall();
+}
+
+static void populate_msr_list(struct vmx_msr_entry *msr_list, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ msr_list[i].index = MSR_IA32_TSC;
+ msr_list[i].reserved = 0;
+ msr_list[i].value = 0x1234567890abcdef;
+ }
+}
+
+static void configure_atomic_switch_msr_limit_test(
+ struct vmx_msr_entry *test_msr_list, int count)
+{
+ struct vmx_msr_entry *msr_list;
+ const u32 two_mb = 1 << 21;
+ enum atomic_switch_msr_scenario s;
+ enum Encoding addr_field;
+ enum Encoding cnt_field;
+
+ for (s = 0; s < ATOMIC_SWITCH_MSR_SCENARIO_END; s++) {
+ switch (s) {
+ case VM_ENTER_LOAD:
+ addr_field = ENTER_MSR_LD_ADDR;
+ cnt_field = ENT_MSR_LD_CNT;
+ break;
+ case VM_EXIT_LOAD:
+ addr_field = EXIT_MSR_LD_ADDR;
+ cnt_field = EXI_MSR_LD_CNT;
+ break;
+ case VM_EXIT_STORE:
+ addr_field = EXIT_MSR_ST_ADDR;
+ cnt_field = EXI_MSR_ST_CNT;
+ break;
+ default:
+ TEST_ASSERT(false);
+ }
+
+ msr_list = (struct vmx_msr_entry *)vmcs_read(addr_field);
+ memset(msr_list, 0xff, two_mb);
+ if (msr_list == test_msr_list) {
+ populate_msr_list(msr_list, count);
+ vmcs_write(cnt_field, count);
+ } else {
+ vmcs_write(cnt_field, 0);
+ }
+ }
+}
+
+static int max_msr_list_size(void)
+{
+ u32 vmx_misc = rdmsr(MSR_IA32_VMX_MISC);
+ u32 factor = ((vmx_misc & GENMASK(27, 25)) >> 25) + 1;
+
+ return factor * 512;
+}
+
+static void atomic_switch_msr_limit_test(void)
+{
+ struct vmx_msr_entry *msr_list;
+ enum atomic_switch_msr_scenario s;
+
+ /*
+ * Check for the IA32_TSC MSR,
+ * available with the "TSC flag" and used to populate the MSR lists.
+ */
+ if (!(cpuid(1).d & (1 << 4))) {
+ report_skip(__func__);
+ return;
+ }
+
+ /* Set L2 guest. */
+ test_set_guest(atomic_switch_msr_limit_test_guest);
+
+ /* Setup atomic MSR switch lists. */
+ msr_list = alloc_2m_page();
+ vmcs_write(ENTER_MSR_LD_ADDR, virt_to_phys(msr_list));
+ msr_list = alloc_2m_page();
+ vmcs_write(EXIT_MSR_LD_ADDR, virt_to_phys(msr_list));
+ msr_list = alloc_2m_page();
+ vmcs_write(EXIT_MSR_ST_ADDR, virt_to_phys(msr_list));
+
+ /* Execute each test case. */
+ for (s = 0; s < ATOMIC_SWITCH_MSR_SCENARIO_END; s++) {
+ struct vmx_msr_entry *msr_list;
+ int count = max_msr_list_size();
+
+ switch (s) {
+ case VM_ENTER_LOAD:
+ msr_list = (struct vmx_msr_entry *)vmcs_read(
+ ENTER_MSR_LD_ADDR);
+ break;
+ case VM_EXIT_LOAD:
+ msr_list = (struct vmx_msr_entry *)vmcs_read(
+ EXIT_MSR_LD_ADDR);
+ break;
+ case VM_EXIT_STORE:
+ msr_list = (struct vmx_msr_entry *)vmcs_read(
+ EXIT_MSR_ST_ADDR);
+ break;
+ default:
+ report("Bad test scenario, %d.", false, s);
+ continue;
+ }
+
+ configure_atomic_switch_msr_limit_test(msr_list, count);
+ enter_guest();
+ assert_exit_reason(VMX_VMCALL);
+ }
+
+ /* Reset the atomic MSR switch count to 0 for all three lists. */
+ configure_atomic_switch_msr_limit_test(0, 0);
+ /* Proceed past guest's single vmcall instruction. */
+ enter_guest();
+ skip_exit_vmcall();
+ /* Terminate the guest. */
+ enter_guest();
+ skip_exit_vmcall();
+}
+
#define TEST(name) { #name, .v2 = name }
@@ -8660,5 +8797,7 @@ struct vmx_test vmx_tests[] = {
TEST(ept_access_test_paddr_read_execute_ad_enabled),
TEST(ept_access_test_paddr_not_present_page_fault),
TEST(ept_access_test_force_2m_page),
+ /* Atomic MSR switch tests. */
+ TEST(atomic_switch_msr_limit_test),
{ NULL, NULL, NULL, NULL, NULL, {0} },
};
--
2.23.0.237.gc6a4ce50a0-goog
next reply other threads:[~2019-09-12 18:09 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-09-12 18:09 Marc Orr [this message]
2019-09-13 15:24 ` [kvm-unit-tests PATCH] x86: nvmx: test max atomic switch MSRs Sean Christopherson
2019-09-13 16:26 ` Jim Mattson
2019-09-13 17:15 ` Sean Christopherson
2019-09-13 17:21 ` Jim Mattson
2019-09-13 18:02 ` Marc Orr
2019-09-13 18:30 ` Sean Christopherson
2019-09-13 21:55 ` Marc Orr
2019-09-14 0:49 ` Marc Orr
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190912180928.123660-1-marcorr@google.com \
--to=marcorr@google.com \
--cc=jmattson@google.com \
--cc=kvm@vger.kernel.org \
--cc=pshier@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).