kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vishal Annapurve <vannapurve@google.com>
To: x86@kernel.org, kvm@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-kselftest@vger.kernel.org
Cc: pbonzini@redhat.com, shuah@kernel.org, bgardon@google.com,
	seanjc@google.com, oupton@google.com, peterx@redhat.com,
	vkuznets@redhat.com, dmatlack@google.com,
	Vishal Annapurve <vannapurve@google.com>
Subject: [V2 PATCH 6/8] KVM: selftests: x86: Execute cpu specific hypercall from nested guests
Date: Thu, 15 Sep 2022 00:04:46 +0000	[thread overview]
Message-ID: <20220915000448.1674802-7-vannapurve@google.com> (raw)
In-Reply-To: <20220915000448.1674802-1-vannapurve@google.com>

Execute vmcall/vmmcall from nested guests according to the cpu type.
This avoid exit to KVM which would anyway patch the hypercall
instruction according to the cpu type.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Vishal Annapurve <vannapurve@google.com>
---
 .../selftests/kvm/include/x86_64/processor.h      |  2 ++
 tools/testing/selftests/kvm/include/x86_64/vmx.h  |  9 ---------
 .../selftests/kvm/lib/x86_64/perf_test_util.c     |  2 +-
 .../testing/selftests/kvm/lib/x86_64/processor.c  | 15 +++++++++++++++
 tools/testing/selftests/kvm/x86_64/smm_test.c     |  2 +-
 tools/testing/selftests/kvm/x86_64/state_test.c   |  8 ++++----
 .../selftests/kvm/x86_64/vmx_dirty_log_test.c     |  2 +-
 .../kvm/x86_64/vmx_preemption_timer_test.c        |  2 +-
 8 files changed, 25 insertions(+), 17 deletions(-)

diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 0cbc71b7af50..18a8a6a2b786 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -833,6 +833,8 @@ void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
 		       uint64_t a3);
 
+void nested_guest_vmcall(void);
+
 void __vm_xsave_require_permission(int bit, const char *name);
 
 #define vm_xsave_require_permission(perm)	\
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 99fa1410964c..d8d4fd3353e5 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -480,15 +480,6 @@ static inline int vmresume(void)
 	return ret;
 }
 
-static inline void vmcall(void)
-{
-	/* Currently, L1 destroys our GPRs during vmexits.  */
-	__asm__ __volatile__("push %%rbp; vmcall; pop %%rbp" : : :
-			     "rax", "rbx", "rcx", "rdx",
-			     "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
-			     "r13", "r14", "r15");
-}
-
 static inline int vmread(uint64_t encoding, uint64_t *value)
 {
 	uint64_t tmp;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
index 0f344a7c89c4..b420b35b7f45 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/perf_test_util.c
@@ -18,7 +18,7 @@
 void perf_test_l2_guest_code(uint64_t vcpu_id)
 {
 	perf_test_guest_code(vcpu_id);
-	vmcall();
+	nested_guest_vmcall();
 }
 
 extern char perf_test_l2_guest_entry[];
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index c0ae938772f6..e12c8b543b8f 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1195,6 +1195,21 @@ uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
 	return r;
 }
 
+void nested_guest_vmcall(void)
+{
+	/* Currently, L1 destroys our GPRs during vmexits.  */
+	if (is_amd_cpu())
+		__asm__ __volatile__("push %%rbp; vmmcall; pop %%rbp" : : :
+			     "rax", "rbx", "rcx", "rdx",
+			     "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
+			     "r13", "r14", "r15");
+	else
+		__asm__ __volatile__("push %%rbp; vmcall; pop %%rbp" : : :
+			     "rax", "rbx", "rcx", "rdx",
+			     "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
+			     "r13", "r14", "r15");
+}
+
 const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
 {
 	static struct kvm_cpuid2 *cpuid;
diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c
index 1f136a81858e..bf04c78c9c8e 100644
--- a/tools/testing/selftests/kvm/x86_64/smm_test.c
+++ b/tools/testing/selftests/kvm/x86_64/smm_test.c
@@ -61,7 +61,7 @@ static void l2_guest_code(void)
 
 	sync_with_host(10);
 
-	vmcall();
+	nested_guest_vmcall();
 }
 
 static void guest_code(void *arg)
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index ea578971fb9f..a9634c06dc60 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -26,10 +26,10 @@ void svm_l2_guest_code(void)
 {
 	GUEST_SYNC(4);
 	/* Exit to L1 */
-	vmcall();
+	nested_guest_vmcall();
 	GUEST_SYNC(6);
 	/* Done, exit to L1 and never come back.  */
-	vmcall();
+	nested_guest_vmcall();
 }
 
 static void svm_l1_guest_code(struct svm_test_data *svm)
@@ -57,7 +57,7 @@ void vmx_l2_guest_code(void)
 	GUEST_SYNC(6);
 
 	/* Exit to L1 */
-	vmcall();
+	nested_guest_vmcall();
 
 	/* L1 has now set up a shadow VMCS for us.  */
 	GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
@@ -70,7 +70,7 @@ void vmx_l2_guest_code(void)
 	GUEST_SYNC(12);
 
 	/* Done, exit to L1 and never come back.  */
-	vmcall();
+	nested_guest_vmcall();
 }
 
 static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
index 2d8c23d639f7..fa24e69a806c 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
@@ -42,7 +42,7 @@ static void l2_guest_code(void)
 	GUEST_SYNC(false);
 
 	/* Exit to L1 and never come back.  */
-	vmcall();
+	nested_guest_vmcall();
 }
 
 void l1_guest_code(struct vmx_pages *vmx)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
index 0efdc05969a5..04bae6995344 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
@@ -38,7 +38,7 @@ void l2_guest_code(void)
 {
 	u64 vmx_pt_delta;
 
-	vmcall();
+	nested_guest_vmcall();
 	l2_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
 
 	/*
-- 
2.37.2.789.g6183377224-goog


  parent reply	other threads:[~2022-09-15  0:05 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-15  0:04 [V2 PATCH 0/8] Execute hypercalls from guests according to cpu type Vishal Annapurve
2022-09-15  0:04 ` [V2 PATCH 1/8] KVM: selftests: move common startup logic to kvm_util.c Vishal Annapurve
2022-09-15  9:45   ` Andrew Jones
2022-09-15  0:04 ` [V2 PATCH 2/8] KVM: selftests: Add arch specific initialization Vishal Annapurve
2022-09-15  9:44   ` Andrew Jones
2022-09-26 23:18     ` Vishal Annapurve
2022-09-21 20:50   ` David Matlack
2022-09-26 23:18     ` Vishal Annapurve
2022-09-15  0:04 ` [V2 PATCH 3/8] KVM: selftests: Add arch specific post vm load setup Vishal Annapurve
2022-09-21 20:54   ` David Matlack
2022-09-26 23:18     ` Vishal Annapurve
2022-10-03 15:42       ` Sean Christopherson
2022-10-04  0:02         ` Vishal Annapurve
2022-10-04 20:10           ` Sean Christopherson
2022-10-13 11:36             ` Vishal Annapurve
2022-09-15  0:04 ` [V2 PATCH 4/8] KVM: selftests: x86: Precompute the result for is_{intel,amd}_cpu() Vishal Annapurve
2022-09-21 21:19   ` David Matlack
2022-09-26 23:27     ` Vishal Annapurve
2022-09-26 23:34       ` David Matlack
2022-09-26 23:40         ` Vishal Annapurve
2022-09-21 21:39   ` David Matlack
2022-09-26 23:48     ` Vishal Annapurve
2022-09-26 23:53       ` David Matlack
2022-09-15  0:04 ` [V2 PATCH 5/8] KVM: selftests: x86: delete svm_vmcall_test Vishal Annapurve
2022-09-21 21:34   ` David Matlack
2022-09-26 23:32     ` Vishal Annapurve
2022-09-15  0:04 ` Vishal Annapurve [this message]
2022-09-15  0:04 ` [V2 PATCH 7/8] Kvm: selftests: x86: Execute cpu specific vmcall instruction Vishal Annapurve
2022-09-21 21:43   ` David Matlack
2022-09-26 23:35     ` Vishal Annapurve
2022-09-15  0:04 ` [V2 PATCH 8/8] KVM: selftests: x86: xen: " Vishal Annapurve
2022-09-21 21:47   ` David Matlack
2022-09-26 23:37     ` Vishal Annapurve

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220915000448.1674802-7-vannapurve@google.com \
    --to=vannapurve@google.com \
    --cc=bgardon@google.com \
    --cc=dmatlack@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=oupton@google.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=seanjc@google.com \
    --cc=shuah@kernel.org \
    --cc=vkuznets@redhat.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).