linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vitaly Kuznetsov <vkuznets@redhat.com>
To: kvm@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <seanjc@google.com>
Cc: Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>,
	Michael Kelley <mikelley@microsoft.com>,
	Siddharth Chandrasekaran <sidcha@amazon.de>,
	Yuan Yao <yuan.yao@linux.intel.com>,
	Maxim Levitsky <mlevitsk@redhat.com>,
	linux-hyperv@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH v13 46/48] KVM: selftests: evmcs_test: Introduce L2 TLB flush test
Date: Tue,  1 Nov 2022 15:54:24 +0100	[thread overview]
Message-ID: <20221101145426.251680-47-vkuznets@redhat.com> (raw)
In-Reply-To: <20221101145426.251680-1-vkuznets@redhat.com>

Enable Hyper-V L2 TLB flush and check that Hyper-V TLB flush hypercalls
from L2 don't exit to L1 unless 'TlbLockCount' is set in the
Partition assist page.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 .../selftests/kvm/include/x86_64/evmcs.h      |  2 +
 .../testing/selftests/kvm/x86_64/evmcs_test.c | 50 ++++++++++++++++++-
 2 files changed, 50 insertions(+), 2 deletions(-)

diff --git a/tools/testing/selftests/kvm/include/x86_64/evmcs.h b/tools/testing/selftests/kvm/include/x86_64/evmcs.h
index 94d6059e9a12..901caf0e0939 100644
--- a/tools/testing/selftests/kvm/include/x86_64/evmcs.h
+++ b/tools/testing/selftests/kvm/include/x86_64/evmcs.h
@@ -237,6 +237,8 @@ struct hv_enlightened_vmcs {
 #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ENLIGHTENMENTSCONTROL    BIT(15)
 #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL                      0xFFFF
 
+#define HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH 0x10000031
+
 extern struct hv_enlightened_vmcs *current_evmcs;
 
 int vcpu_enable_evmcs(struct kvm_vcpu *vcpu);
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index a9f511c192c2..d418954590b1 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -16,6 +16,7 @@
 
 #include "kvm_util.h"
 
+#include "hyperv.h"
 #include "vmx.h"
 
 static int ud_count;
@@ -32,6 +33,8 @@ static void guest_nmi_handler(struct ex_regs *regs)
 
 void l2_guest_code(void)
 {
+	u64 unused;
+
 	GUEST_SYNC(7);
 
 	GUEST_SYNC(8);
@@ -48,15 +51,31 @@ void l2_guest_code(void)
 	vmcall();
 	rdmsr_from_l2(MSR_GS_BASE); /* intercepted */
 
+	/* L2 TLB flush tests */
+	hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0,
+			 HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS);
+	rdmsr_from_l2(MSR_FS_BASE);
+	/*
+	 * Note: hypercall status (RAX) is not preserved correctly by L1 after
+	 * synthetic vmexit, use unchecked version.
+	 */
+	__hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT, 0x0,
+			   HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES | HV_FLUSH_ALL_PROCESSORS,
+			   &unused);
+
 	/* Done, exit to L1 and never come back.  */
 	vmcall();
 }
 
-void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages)
+void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages,
+		vm_vaddr_t hv_hcall_page_gpa)
 {
 #define L2_GUEST_STACK_SIZE 64
 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
 
+	wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
+	wrmsr(HV_X64_MSR_HYPERCALL, hv_hcall_page_gpa);
+
 	x2apic_enable();
 
 	GUEST_SYNC(1);
@@ -86,7 +105,17 @@ void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages)
 	vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmreadz(PIN_BASED_VM_EXEC_CONTROL) |
 		PIN_BASED_NMI_EXITING);
 
+	/* L2 TLB flush setup */
+	current_evmcs->partition_assist_page = hv_pages->partition_assist_gpa;
+	current_evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;
+	current_evmcs->hv_vm_id = 1;
+	current_evmcs->hv_vp_id = 1;
+	current_vp_assist->nested_control.features.directhypercall = 1;
+	*(u32 *)(hv_pages->partition_assist) = 0;
+
 	GUEST_ASSERT(!vmlaunch());
+	GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
+	GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), NMI_VECTOR);
 	GUEST_ASSERT(vmptrstz() == hv_pages->enlightened_vmcs_gpa);
 
 	/*
@@ -130,6 +159,18 @@ void guest_code(struct vmx_pages *vmx_pages, struct hyperv_test_pages *hv_pages)
 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
 	current_evmcs->guest_rip += 2; /* rdmsr */
 
+	/*
+	 * L2 TLB flush test. First VMCALL should be handled directly by L0,
+	 * no VMCALL exit expected.
+	 */
+	GUEST_ASSERT(!vmresume());
+	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_MSR_READ);
+	current_evmcs->guest_rip += 2; /* rdmsr */
+	/* Enable synthetic vmexit */
+	*(u32 *)(hv_pages->partition_assist) = 1;
+	GUEST_ASSERT(!vmresume());
+	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == HV_VMX_SYNTHETIC_EXIT_REASON_TRAP_AFTER_FLUSH);
+
 	GUEST_ASSERT(!vmresume());
 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 	GUEST_SYNC(11);
@@ -183,6 +224,7 @@ static struct kvm_vcpu *save_restore_vm(struct kvm_vm *vm,
 int main(int argc, char *argv[])
 {
 	vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0;
+	vm_vaddr_t hcall_page;
 
 	struct kvm_vcpu *vcpu;
 	struct kvm_vm *vm;
@@ -196,12 +238,16 @@ int main(int argc, char *argv[])
 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
 
+	hcall_page = vm_vaddr_alloc_pages(vm, 1);
+	memset(addr_gva2hva(vm, hcall_page), 0x0,  getpagesize());
+
 	vcpu_set_hv_cpuid(vcpu);
 	vcpu_enable_evmcs(vcpu);
 
 	vcpu_alloc_vmx(vm, &vmx_pages_gva);
 	vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
-	vcpu_args_set(vcpu, 2, vmx_pages_gva, hv_pages_gva);
+	vcpu_args_set(vcpu, 3, vmx_pages_gva, hv_pages_gva, addr_gva2gpa(vm, hcall_page));
+	vcpu_set_msr(vcpu, HV_X64_MSR_VP_INDEX, vcpu->id);
 
 	vm_init_descriptor_tables(vm);
 	vcpu_init_descriptor_tables(vcpu);
-- 
2.37.3


  parent reply	other threads:[~2022-11-01 15:03 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-11-01 14:53 [PATCH v13 00/48] KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush features Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 01/48] x86/hyperv: Move VMCB enlightenment definitions to hyperv-tlfs.h Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 02/48] KVM: selftests: Move "struct hv_enlightenments" to x86_64/svm.h Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 03/48] KVM: SVM: Add a proper field for Hyper-V VMCB enlightenments Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 04/48] x86/hyperv: KVM: Rename "hv_enlightenments" to "hv_vmcb_enlightenments" Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 05/48] KVM: x86: Rename 'enable_direct_tlbflush' to 'enable_l2_tlb_flush' Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 06/48] KVM: VMX: Rename "vmx/evmcs.{ch}" to "vmx/hyperv.{ch}" Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 07/48] KVM: x86: Move clearing of TLB_FLUSH_CURRENT to kvm_vcpu_flush_tlb_all() Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 08/48] KVM: x86: hyper-v: Resurrect dedicated KVM_REQ_HV_TLB_FLUSH flag Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 09/48] KVM: x86: hyper-v: Introduce TLB flush fifo Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 10/48] KVM: x86: hyper-v: Add helper to read hypercall data for array Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 11/48] KVM: x86: hyper-v: Handle HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST{,EX} calls gently Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 12/48] KVM: x86: hyper-v: Expose support for extended gva ranges for flush hypercalls Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 13/48] KVM: x86: Prepare kvm_hv_flush_tlb() to handle L2's GPAs Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 14/48] x86/hyperv: Introduce HV_MAX_SPARSE_VCPU_BANKS/HV_VCPUS_PER_SPARSE_BANK constants Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 15/48] KVM: x86: hyper-v: Use HV_MAX_SPARSE_VCPU_BANKS/HV_VCPUS_PER_SPARSE_BANK instead of raw '64' Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 16/48] KVM: x86: hyper-v: Don't use sparse_set_to_vcpu_mask() in kvm_hv_send_ipi() Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 17/48] KVM: x86: hyper-v: Create a separate fifo for L2 TLB flush Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 18/48] KVM: x86: hyper-v: Use preallocated buffer in 'struct kvm_vcpu_hv' instead of on-stack 'sparse_banks' Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 19/48] KVM: nVMX: Keep track of hv_vm_id/hv_vp_id when eVMCS is in use Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 20/48] KVM: nSVM: Keep track of Hyper-V hv_vm_id/hv_vp_id Vitaly Kuznetsov
2022-11-01 14:53 ` [PATCH v13 21/48] KVM: x86: Introduce .hv_inject_synthetic_vmexit_post_tlb_flush() nested hook Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 22/48] KVM: x86: hyper-v: Introduce kvm_hv_is_tlb_flush_hcall() Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 23/48] KVM: x86: hyper-v: L2 TLB flush Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 24/48] KVM: x86: hyper-v: Introduce fast guest_hv_cpuid_has_l2_tlb_flush() check Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 25/48] KVM: nVMX: hyper-v: Cache VP assist page in 'struct kvm_vcpu_hv' Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 26/48] KVM: nVMX: hyper-v: Enable L2 TLB flush Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 27/48] KVM: x86: Make kvm_hv_get_assist_page() return 0/-errno Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 28/48] KVM: nSVM: hyper-v: Enable L2 TLB flush Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 29/48] KVM: x86: Expose Hyper-V L2 TLB flush feature Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 30/48] KVM: selftests: Better XMM read/write helpers Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 31/48] KVM: selftests: Move HYPERV_LINUX_OS_ID definition to a common header Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 32/48] KVM: selftests: Move the function doing Hyper-V hypercall " Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 33/48] KVM: selftests: Hyper-V PV IPI selftest Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 34/48] KVM: selftests: Fill in vm->vpages_mapped bitmap in virt_map() too Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 35/48] KVM: selftests: Export vm_vaddr_unused_gap() to make it possible to request unmapped ranges Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 36/48] KVM: selftests: Drop helpers to read/write page table entries Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 37/48] KVM: selftests: Hyper-V PV TLB flush selftest Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 38/48] KVM: selftests: Sync 'struct hv_enlightened_vmcs' definition with hyperv-tlfs.h Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 39/48] KVM: selftests: Sync 'struct hv_vp_assist_page' " Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 40/48] KVM: selftests: Move Hyper-V VP assist page enablement out of evmcs.h Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 41/48] KVM: selftests: Split off load_evmcs() from load_vmcs() Vitaly Kuznetsov
2022-11-01 16:13   ` Sean Christopherson
2022-11-01 14:54 ` [PATCH v13 42/48] KVM: selftests: Create a vendor independent helper to allocate Hyper-V specific test pages Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 43/48] KVM: selftests: Allocate Hyper-V partition assist page Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 44/48] KVM: selftests: Stuff RAX/RCX with 'safe' values in vmmcall()/vmcall() Vitaly Kuznetsov
2022-11-01 15:21   ` Sean Christopherson
2022-11-01 14:54 ` [PATCH v13 45/48] KVM: selftests: Introduce rdmsr_from_l2() and use it for MSR-Bitmap tests Vitaly Kuznetsov
2022-11-01 16:11   ` Sean Christopherson
2022-11-01 16:27     ` Vitaly Kuznetsov
2022-11-01 17:26       ` Sean Christopherson
2022-11-01 14:54 ` Vitaly Kuznetsov [this message]
2022-11-01 14:54 ` [PATCH v13 47/48] KVM: selftests: hyperv_svm_test: Introduce L2 TLB flush test Vitaly Kuznetsov
2022-11-01 14:54 ` [PATCH v13 48/48] KVM: selftests: Rename 'evmcs_test' to 'hyperv_evmcs' Vitaly Kuznetsov
2022-11-01 15:21 ` [PATCH v13 00/48] KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush features Sean Christopherson
2022-11-01 16:29   ` Vitaly Kuznetsov
2022-11-18 18:16     ` Paolo Bonzini
2022-11-21  9:20       ` Vitaly Kuznetsov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221101145426.251680-47-vkuznets@redhat.com \
    --to=vkuznets@redhat.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-hyperv@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mikelley@microsoft.com \
    --cc=mlevitsk@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=sidcha@amazon.de \
    --cc=wanpengli@tencent.com \
    --cc=yuan.yao@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).