virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
From: "Adalbert Lazăr" <alazar@bitdefender.com>
To: kvm@vger.kernel.org
Cc: virtualization@lists.linux-foundation.org,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Ștefan Șicleru" <ssicleru@bitdefender.com>,
	"Adalbert Lazăr" <alazar@bitdefender.com>
Subject: [RFC PATCH v1 32/34] KVM: introspection: add KVMI_VCPU_SET_VE_INFO/KVMI_VCPU_DISABLE_VE
Date: Wed, 22 Jul 2020 19:01:19 +0300	[thread overview]
Message-ID: <20200722160121.9601-33-alazar@bitdefender.com> (raw)
In-Reply-To: <20200722160121.9601-1-alazar@bitdefender.com>

From: Ștefan Șicleru <ssicleru@bitdefender.com>

The introspection tool can use #VE to reduce the number of VM-exits
caused by SPT violations for some guests.

Signed-off-by: Ștefan Șicleru <ssicleru@bitdefender.com>
Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
 Documentation/virt/kvm/kvmi.rst               | 63 +++++++++++++++++++
 arch/x86/include/uapi/asm/kvmi.h              |  8 +++
 arch/x86/kvm/kvmi.c                           | 19 ++++++
 include/uapi/linux/kvmi.h                     |  2 +
 .../testing/selftests/kvm/x86_64/kvmi_test.c  | 52 +++++++++++++++
 virt/kvm/introspection/kvmi_int.h             |  3 +
 virt/kvm/introspection/kvmi_msg.c             | 30 +++++++++
 7 files changed, 177 insertions(+)

diff --git a/Documentation/virt/kvm/kvmi.rst b/Documentation/virt/kvm/kvmi.rst
index caa51fccc463..c50c40638d46 100644
--- a/Documentation/virt/kvm/kvmi.rst
+++ b/Documentation/virt/kvm/kvmi.rst
@@ -1230,6 +1230,69 @@ is terminated.
 * -KVM_EINVAL - padding is not zero
 * -KVM_EINVAL - the selected EPT view is not valid
 
+29. KVMI_VCPU_SET_VE_INFO
+-------------------------
+
+:Architecture: x86
+:Versions: >= 1
+:Parameters:
+
+::
+
+	struct kvmi_vcpu_hdr;
+	struct kvmi_vcpu_set_ve_info {
+		__u64 gpa;
+		__u8 trigger_vmexit;
+		__u8 padding1;
+		__u16 padding2;
+		__u32 padding3;
+	};
+
+:Returns:
+
+::
+
+	struct kvmi_error_code;
+
+Configures the guest physical address for the #VE info page and enables
+the #VE mechanism. If ``trigger_vmexit`` is true, any virtualization
+exception will trigger a vm-exit. Otherwise, the exception is delivered
+using gate descriptor 20 from the Interrupt Descriptor Table (IDT).
+
+:Errors:
+
+* -KVM_EINVAL - the selected vCPU is invalid
+* -KVM_EINVAL - one of the specified GPAs is invalid
+* -KVM_EOPNOTSUPP - the hardware does not support #VE
+* -KVM_EINVAL - padding is not zero
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+
+30. KVMI_VCPU_DISABLE_VE
+------------------------
+
+:Architecture: x86
+:Versions: >= 1
+:Parameters:
+
+::
+
+	struct kvmi_vcpu_hdr;
+
+:Returns:
+
+::
+
+	struct kvmi_error_code;
+
+Disables the #VE mechanism. All EPT violations will trigger a vm-exit,
+regardless of the corresponding spte 63rd bit (SVE) for the GPA that
+triggered the EPT violation within a specific EPT view.
+
+:Errors:
+
+* -KVM_EINVAL - the selected vCPU is invalid
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+
 Events
 ======
 
diff --git a/arch/x86/include/uapi/asm/kvmi.h b/arch/x86/include/uapi/asm/kvmi.h
index 56992dacfb69..d925e6d49f50 100644
--- a/arch/x86/include/uapi/asm/kvmi.h
+++ b/arch/x86/include/uapi/asm/kvmi.h
@@ -174,4 +174,12 @@ struct kvmi_vcpu_control_ept_view {
 	__u32 padding2;
 };
 
+struct kvmi_vcpu_set_ve_info {
+	__u64 gpa;
+	__u8 trigger_vmexit;
+	__u8 padding1;
+	__u16 padding2;
+	__u32 padding3;
+};
+
 #endif /* _UAPI_ASM_X86_KVMI_H */
diff --git a/arch/x86/kvm/kvmi.c b/arch/x86/kvm/kvmi.c
index 3e8c83623703..e101ac390809 100644
--- a/arch/x86/kvm/kvmi.c
+++ b/arch/x86/kvm/kvmi.c
@@ -1464,3 +1464,22 @@ int kvmi_arch_cmd_control_ept_view(struct kvm_vcpu *vcpu, u16 view,
 
 	return kvm_x86_ops.control_ept_view(vcpu, view, visible);
 }
+
+int kvmi_arch_cmd_set_ve_info(struct kvm_vcpu *vcpu, u64 gpa,
+			      bool trigger_vmexit)
+{
+	unsigned long ve_info = (unsigned long) gpa;
+
+	if (!kvm_x86_ops.set_ve_info)
+		return -KVM_EINVAL;
+
+	return kvm_x86_ops.set_ve_info(vcpu, ve_info, trigger_vmexit);
+}
+
+int kvmi_arch_cmd_disable_ve(struct kvm_vcpu *vcpu)
+{
+	if (!kvm_x86_ops.disable_ve)
+		return 0;
+
+	return kvm_x86_ops.disable_ve(vcpu);
+}
diff --git a/include/uapi/linux/kvmi.h b/include/uapi/linux/kvmi.h
index 505a865cd115..a17cd1fa16d0 100644
--- a/include/uapi/linux/kvmi.h
+++ b/include/uapi/linux/kvmi.h
@@ -52,6 +52,8 @@ enum {
 	KVMI_VCPU_GET_EPT_VIEW       = 26,
 	KVMI_VCPU_SET_EPT_VIEW       = 27,
 	KVMI_VCPU_CONTROL_EPT_VIEW   = 28,
+	KVMI_VCPU_SET_VE_INFO        = 29,
+	KVMI_VCPU_DISABLE_VE         = 30,
 
 	KVMI_NUM_MESSAGES
 };
diff --git a/tools/testing/selftests/kvm/x86_64/kvmi_test.c b/tools/testing/selftests/kvm/x86_64/kvmi_test.c
index 4e099cbfcf4e..a3ea22f546ec 100644
--- a/tools/testing/selftests/kvm/x86_64/kvmi_test.c
+++ b/tools/testing/selftests/kvm/x86_64/kvmi_test.c
@@ -35,6 +35,10 @@ static vm_vaddr_t test_gva;
 static void *test_hva;
 static vm_paddr_t test_gpa;
 
+static vm_vaddr_t test_ve_info_gva;
+static void *test_ve_info_hva;
+static vm_paddr_t test_ve_info_gpa;
+
 static uint8_t test_write_pattern;
 static int page_size;
 
@@ -2258,6 +2262,43 @@ static void test_cmd_vcpu_vmfunc(struct kvm_vm *vm)
 	test_guest_switch_to_visible_view(vm);
 }
 
+static void enable_ve(struct kvm_vm *vm)
+{
+	struct {
+		struct kvmi_msg_hdr hdr;
+		struct kvmi_vcpu_hdr vcpu_hdr;
+		struct kvmi_vcpu_set_ve_info cmd;
+	} req = {};
+
+	req.cmd.gpa = test_ve_info_gpa;
+	req.cmd.trigger_vmexit = 1;
+
+	test_vcpu0_command(vm, KVMI_VCPU_SET_VE_INFO, &req.hdr,
+				sizeof(req), NULL, 0);
+}
+
+static void disable_ve(struct kvm_vm *vm)
+{
+	struct {
+		struct kvmi_msg_hdr hdr;
+		struct kvmi_vcpu_hdr vcpu_hdr;
+	} req = {};
+
+	test_vcpu0_command(vm, KVMI_VCPU_DISABLE_VE, &req.hdr,
+				sizeof(req), NULL, 0);
+}
+
+static void test_virtualization_exceptions(struct kvm_vm *vm)
+{
+	if (!features.ve) {
+		print_skip("#VE not supported");
+		return;
+	}
+
+	enable_ve(vm);
+	disable_ve(vm);
+}
+
 static void test_introspection(struct kvm_vm *vm)
 {
 	srandom(time(0));
@@ -2297,6 +2338,7 @@ static void test_introspection(struct kvm_vm *vm)
 	test_cmd_vcpu_get_ept_view(vm);
 	test_cmd_vcpu_set_ept_view(vm);
 	test_cmd_vcpu_vmfunc(vm);
+	test_virtualization_exceptions(vm);
 
 	unhook_introspection(vm);
 }
@@ -2311,6 +2353,16 @@ static void setup_test_pages(struct kvm_vm *vm)
 	memset(test_hva, 0, page_size);
 
 	test_gpa = addr_gva2gpa(vm, test_gva);
+
+	/* Allocate #VE info page */
+	test_ve_info_gva = vm_vaddr_alloc(vm, page_size, KVM_UTIL_MIN_VADDR,
+					  0, 0);
+	sync_global_to_guest(vm, test_ve_info_gva);
+
+	test_ve_info_hva = addr_gva2hva(vm, test_ve_info_gva);
+	memset(test_ve_info_hva, 0, page_size);
+
+	test_ve_info_gpa = addr_gva2gpa(vm, test_ve_info_gva);
 }
 
 int main(int argc, char *argv[])
diff --git a/virt/kvm/introspection/kvmi_int.h b/virt/kvm/introspection/kvmi_int.h
index fc6dbd3a6472..a0062fbde49e 100644
--- a/virt/kvm/introspection/kvmi_int.h
+++ b/virt/kvm/introspection/kvmi_int.h
@@ -151,5 +151,8 @@ u16 kvmi_arch_cmd_get_ept_view(struct kvm_vcpu *vcpu);
 int kvmi_arch_cmd_set_ept_view(struct kvm_vcpu *vcpu, u16 view);
 int kvmi_arch_cmd_control_ept_view(struct kvm_vcpu *vcpu, u16 view,
 				   bool visible);
+int kvmi_arch_cmd_set_ve_info(struct kvm_vcpu *vcpu, u64 gpa,
+			      bool trigger_vmexit);
+int kvmi_arch_cmd_disable_ve(struct kvm_vcpu *vcpu);
 
 #endif
diff --git a/virt/kvm/introspection/kvmi_msg.c b/virt/kvm/introspection/kvmi_msg.c
index 696857f6d008..664b78d545c3 100644
--- a/virt/kvm/introspection/kvmi_msg.c
+++ b/virt/kvm/introspection/kvmi_msg.c
@@ -711,6 +711,34 @@ static int handle_vcpu_control_ept_view(const struct kvmi_vcpu_msg_job *job,
 	return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
 }
 
+static int handle_vcpu_set_ve_info(const struct kvmi_vcpu_msg_job *job,
+				   const struct kvmi_msg_hdr *msg,
+				   const void *_req)
+{
+	const struct kvmi_vcpu_set_ve_info *req = _req;
+	bool trigger_vmexit = !!req->trigger_vmexit;
+	int ec;
+
+	if (req->padding1 || req->padding2 || req->padding3)
+		ec = -KVM_EINVAL;
+	else
+		ec = kvmi_arch_cmd_set_ve_info(job->vcpu, req->gpa,
+						trigger_vmexit);
+
+	return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
+}
+
+static int handle_vcpu_disable_ve(const struct kvmi_vcpu_msg_job *job,
+				  const struct kvmi_msg_hdr *msg,
+				  const void *req)
+{
+	int ec;
+
+	ec = kvmi_arch_cmd_disable_ve(job->vcpu);
+
+	return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
+}
+
 /*
  * These functions are executed from the vCPU thread. The receiving thread
  * passes the messages using a newly allocated 'struct kvmi_vcpu_msg_job'
@@ -725,6 +753,7 @@ static int(*const msg_vcpu[])(const struct kvmi_vcpu_msg_job *,
 	[KVMI_VCPU_CONTROL_EVENTS]     = handle_vcpu_control_events,
 	[KVMI_VCPU_CONTROL_MSR]        = handle_vcpu_control_msr,
 	[KVMI_VCPU_CONTROL_SINGLESTEP] = handle_vcpu_control_singlestep,
+	[KVMI_VCPU_DISABLE_VE]         = handle_vcpu_disable_ve,
 	[KVMI_VCPU_GET_CPUID]          = handle_vcpu_get_cpuid,
 	[KVMI_VCPU_GET_EPT_VIEW]       = handle_vcpu_get_ept_view,
 	[KVMI_VCPU_GET_INFO]           = handle_vcpu_get_info,
@@ -736,6 +765,7 @@ static int(*const msg_vcpu[])(const struct kvmi_vcpu_msg_job *,
 	[KVMI_VCPU_SET_EPT_VIEW]       = handle_vcpu_set_ept_view,
 	[KVMI_VCPU_SET_REGISTERS]      = handle_vcpu_set_registers,
 	[KVMI_VCPU_SET_XSAVE]          = handle_vcpu_set_xsave,
+	[KVMI_VCPU_SET_VE_INFO]        = handle_vcpu_set_ve_info,
 	[KVMI_VCPU_TRANSLATE_GVA]      = handle_vcpu_translate_gva,
 };
 

  parent reply	other threads:[~2020-07-22 16:01 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-22 16:00 [RFC PATCH v1 00/34] VM introspection - EPT Views and Virtualization Exceptions Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 01/34] KVM: x86: export .get_vmfunc_status() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 02/34] KVM: x86: export .get_eptp_switching_status() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 03/34] KVM: x86: add kvm_get_ept_view() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 04/34] KVM: x86: mmu: reindent to avoid lines longer than 80 chars Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 05/34] KVM: x86: mmu: add EPT view parameter to kvm_mmu_get_page() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 06/34] KVM: x86: mmu: add support for EPT switching Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 07/34] KVM: x86: mmu: increase mmu_memory_cache size Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 08/34] KVM: x86: add .set_ept_view() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 09/34] KVM: x86: add .control_ept_view() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 10/34] KVM: x86: page track: allow page tracking for different EPT views Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 11/34] KVM: x86: mmu: allow zapping shadow pages for specific " Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 12/34] KVM: introspection: extend struct kvmi_features with the EPT views status support Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 13/34] KVM: introspection: add KVMI_VCPU_GET_EPT_VIEW Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 14/34] KVM: introspection: add 'view' field to struct kvmi_event_arch Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 15/34] KVM: introspection: add KVMI_VCPU_SET_EPT_VIEW Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 16/34] KVM: introspection: add KVMI_VCPU_CONTROL_EPT_VIEW Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 17/34] KVM: introspection: extend the access rights database with EPT view info Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 18/34] KVM: introspection: extend KVMI_VM_SET_PAGE_ACCESS " Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 19/34] KVM: introspection: clean non-default EPTs on unhook Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 20/34] KVM: x86: vmx: add support for virtualization exceptions Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 21/34] KVM: VMX: Define EPT suppress #VE bit (bit 63 in EPT leaf entries) Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 22/34] KVM: VMX: Suppress EPT violation #VE by default (when enabled) Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 23/34] KVM: x86: mmu: fix: update present_mask in spte_read_protect() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 24/34] KVM: vmx: trigger vm-exits for mmio sptes by default when #VE is enabled Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 25/34] KVM: x86: svm: set .clear_page() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 26/34] KVM: x86: add .set_ve_info() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 27/34] KVM: x86: add .disable_ve() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 28/34] KVM: x86: page_track: add support for suppress #VE bit Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 29/34] KVM: vmx: make use of EPTP_INDEX in vmx_handle_exit() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 30/34] KVM: vmx: make use of EPTP_INDEX in vmx_set_ept_view() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 31/34] KVM: introspection: add #VE host capability checker Adalbert Lazăr
2020-07-22 16:01 ` Adalbert Lazăr [this message]
2020-07-22 16:01 ` [RFC PATCH v1 33/34] KVM: introspection: mask out non-rwx flags when reading/writing from/to the internal database Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 34/34] KVM: introspection: add KVMI_VM_SET_PAGE_SVE Adalbert Lazăr

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200722160121.9601-33-alazar@bitdefender.com \
    --to=alazar@bitdefender.com \
    --cc=kvm@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=ssicleru@bitdefender.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).