virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
From: "Adalbert Lazăr" <alazar@bitdefender.com>
To: kvm@vger.kernel.org
Cc: "Adalbert Lazăr" <alazar@bitdefender.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Marian Rotariu" <marian.c.rotariu@gmail.com>,
	"Ștefan Șicleru" <ssicleru@bitdefender.com>,
	virtualization@lists.linux-foundation.org
Subject: [RFC PATCH v1 06/34] KVM: x86: mmu: add support for EPT switching
Date: Wed, 22 Jul 2020 19:00:53 +0300	[thread overview]
Message-ID: <20200722160121.9601-7-alazar@bitdefender.com> (raw)
In-Reply-To: <20200722160121.9601-1-alazar@bitdefender.com>

From: Marian Rotariu <marian.c.rotariu@gmail.com>

The introspection tool uses this function to check the hardware support
for EPT switching, which can be used either to singlestep vCPUs
on a unprotected EPT view or to use #VE in order to avoid filter out
VM-exits caused by EPT violations.

Signed-off-by: Marian Rotariu <marian.c.rotariu@gmail.com>
Co-developed-by: Ștefan Șicleru <ssicleru@bitdefender.com>
Signed-off-by: Ștefan Șicleru <ssicleru@bitdefender.com>
Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/mmu/mmu.c          | 12 ++--
 arch/x86/kvm/vmx/vmx.c          | 98 +++++++++++++++++++++++++++++++++
 arch/x86/kvm/vmx/vmx.h          |  1 +
 4 files changed, 108 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index bd45778e0904..1035308940fe 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -421,6 +421,7 @@ struct kvm_mmu {
 	void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			   u64 *spte, const void *pte);
 	hpa_t root_hpa;
+	hpa_t root_hpa_altviews[KVM_MAX_EPT_VIEWS];
 	gpa_t root_pgd;
 	union kvm_mmu_role mmu_role;
 	u8 root_level;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0b6527a1ebe6..553425ab3518 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3760,8 +3760,11 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 	if (free_active_root) {
 		if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
 		    (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
-			mmu_free_root_page(vcpu->kvm, &mmu->root_hpa,
-					   &invalid_list);
+			for (i = 0; i < KVM_MAX_EPT_VIEWS; i++)
+				mmu_free_root_page(vcpu->kvm,
+						   mmu->root_hpa_altviews + i,
+						   &invalid_list);
+			mmu->root_hpa = INVALID_PAGE;
 		} else {
 			for (i = 0; i < 4; ++i)
 				if (mmu->pae_root[i] != 0)
@@ -3821,9 +3824,10 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
 					      shadow_root_level, true, i);
 			if (!VALID_PAGE(root))
 				return -ENOSPC;
-			if (i == 0)
-				vcpu->arch.mmu->root_hpa = root;
+			vcpu->arch.mmu->root_hpa_altviews[i] = root;
 		}
+		vcpu->arch.mmu->root_hpa =
+		  vcpu->arch.mmu->root_hpa_altviews[kvm_get_ept_view(vcpu)];
 	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
 		for (i = 0; i < 4; ++i) {
 			MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 0256c3a93c87..2024ef4d9a74 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -3124,6 +3124,32 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
 	return eptp;
 }
 
+static void vmx_construct_eptp_with_index(struct kvm_vcpu *vcpu,
+					  unsigned short view)
+{
+	struct vcpu_vmx *vmx = to_vmx(vcpu);
+	u64 *eptp_list = NULL;
+
+	if (!vmx->eptp_list_pg)
+		return;
+
+	eptp_list = phys_to_virt(page_to_phys(vmx->eptp_list_pg));
+
+	if (!eptp_list)
+		return;
+
+	eptp_list[view] = construct_eptp(vcpu,
+				vcpu->arch.mmu->root_hpa_altviews[view]);
+}
+
+static void vmx_construct_eptp_list(struct kvm_vcpu *vcpu)
+{
+	unsigned short view;
+
+	for (view = 0; view < KVM_MAX_EPT_VIEWS; view++)
+		vmx_construct_eptp_with_index(vcpu, view);
+}
+
 void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd)
 {
 	struct kvm *kvm = vcpu->kvm;
@@ -3135,6 +3161,8 @@ void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd)
 		eptp = construct_eptp(vcpu, pgd);
 		vmcs_write64(EPT_POINTER, eptp);
 
+		vmx_construct_eptp_list(vcpu);
+
 		if (kvm_x86_ops.tlb_remote_flush) {
 			spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
 			to_vmx(vcpu)->ept_pointer = eptp;
@@ -4336,6 +4364,15 @@ static void ept_set_mmio_spte_mask(void)
 	kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 0);
 }
 
+static int vmx_alloc_eptp_list_page(struct vcpu_vmx *vmx)
+{
+	vmx->eptp_list_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!vmx->eptp_list_pg)
+		return -ENOMEM;
+
+	return 0;
+}
+
 #define VMX_XSS_EXIT_BITMAP 0
 
 /*
@@ -4426,6 +4463,10 @@ static void init_vmcs(struct vcpu_vmx *vmx)
 	if (cpu_has_vmx_encls_vmexit())
 		vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
 
+	if (vmx->eptp_list_pg)
+		vmcs_write64(EPTP_LIST_ADDRESS,
+				page_to_phys(vmx->eptp_list_pg));
+
 	if (vmx_pt_mode_is_host_guest()) {
 		memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
 		/* Bit[6~0] are forced to 1, writes are ignored. */
@@ -5913,6 +5954,24 @@ static void vmx_dump_dtsel(char *name, uint32_t limit)
 	       vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
 }
 
+static void dump_eptp_list(void)
+{
+	phys_addr_t eptp_list_phys, *eptp_list = NULL;
+	int i;
+
+	eptp_list_phys = (phys_addr_t)vmcs_read64(EPTP_LIST_ADDRESS);
+	if (!eptp_list_phys)
+		return;
+
+	eptp_list = phys_to_virt(eptp_list_phys);
+
+	pr_err("*** EPTP Switching ***\n");
+	pr_err("EPTP List Address: %p (phys %p)\n",
+		eptp_list, (void *)eptp_list_phys);
+	for (i = 0; i < KVM_MAX_EPT_VIEWS; i++)
+		pr_err("%d: %016llx\n", i, *(eptp_list + i));
+}
+
 void dump_vmcs(void)
 {
 	u32 vmentry_ctl, vmexit_ctl;
@@ -6061,6 +6120,23 @@ void dump_vmcs(void)
 	if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
 		pr_err("Virtual processor ID = 0x%04x\n",
 		       vmcs_read16(VIRTUAL_PROCESSOR_ID));
+
+	dump_eptp_list();
+}
+
+static unsigned int update_ept_view(struct vcpu_vmx *vmx)
+{
+	u64 *eptp_list = phys_to_virt(page_to_phys(vmx->eptp_list_pg));
+	u64 eptp = vmcs_read64(EPT_POINTER);
+	unsigned int view;
+
+	for (view = 0; view < KVM_MAX_EPT_VIEWS; view++)
+		if (eptp_list[view] == eptp) {
+			vmx->view = view;
+			break;
+		}
+
+	return vmx->view;
 }
 
 /*
@@ -6073,6 +6149,13 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
 	u32 exit_reason = vmx->exit_reason;
 	u32 vectoring_info = vmx->idt_vectoring_info;
 
+	if (vmx->eptp_list_pg) {
+		unsigned int view = update_ept_view(vmx);
+		struct kvm_mmu *mmu = vcpu->arch.mmu;
+
+		mmu->root_hpa = mmu->root_hpa_altviews[view];
+	}
+
 	/*
 	 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
 	 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -6951,12 +7034,21 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	return exit_fastpath;
 }
 
+static void vmx_destroy_eptp_list_page(struct vcpu_vmx *vmx)
+{
+	if (vmx->eptp_list_pg) {
+		__free_page(vmx->eptp_list_pg);
+		vmx->eptp_list_pg = NULL;
+	}
+}
+
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 
 	if (enable_pml)
 		vmx_destroy_pml_buffer(vmx);
+	vmx_destroy_eptp_list_page(vmx);
 	free_vpid(vmx->vpid);
 	nested_vmx_free_vcpu(vcpu);
 	free_loaded_vmcs(vmx->loaded_vmcs);
@@ -7021,6 +7113,12 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
 	if (err < 0)
 		goto free_pml;
 
+	if (kvm_eptp_switching_supported) {
+		err = vmx_alloc_eptp_list_page(vmx);
+		if (err)
+			goto free_pml;
+	}
+
 	msr_bitmap = vmx->vmcs01.msr_bitmap;
 	vmx_disable_intercept_for_msr(NULL, msr_bitmap, MSR_IA32_TSC, MSR_TYPE_R);
 	vmx_disable_intercept_for_msr(NULL, msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 14f0b9102d58..4e2f86458ca2 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -297,6 +297,7 @@ struct vcpu_vmx {
 
 	struct pt_desc pt_desc;
 
+	struct page *eptp_list_pg;
 	/* The view this vcpu operates on. */
 	u16 view;
 };
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  parent reply	other threads:[~2020-07-22 16:00 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-22 16:00 [RFC PATCH v1 00/34] VM introspection - EPT Views and Virtualization Exceptions Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 01/34] KVM: x86: export .get_vmfunc_status() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 02/34] KVM: x86: export .get_eptp_switching_status() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 03/34] KVM: x86: add kvm_get_ept_view() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 04/34] KVM: x86: mmu: reindent to avoid lines longer than 80 chars Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 05/34] KVM: x86: mmu: add EPT view parameter to kvm_mmu_get_page() Adalbert Lazăr
2020-07-22 16:00 ` Adalbert Lazăr [this message]
2020-07-22 16:00 ` [RFC PATCH v1 07/34] KVM: x86: mmu: increase mmu_memory_cache size Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 08/34] KVM: x86: add .set_ept_view() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 09/34] KVM: x86: add .control_ept_view() Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 10/34] KVM: x86: page track: allow page tracking for different EPT views Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 11/34] KVM: x86: mmu: allow zapping shadow pages for specific " Adalbert Lazăr
2020-07-22 16:00 ` [RFC PATCH v1 12/34] KVM: introspection: extend struct kvmi_features with the EPT views status support Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 13/34] KVM: introspection: add KVMI_VCPU_GET_EPT_VIEW Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 14/34] KVM: introspection: add 'view' field to struct kvmi_event_arch Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 15/34] KVM: introspection: add KVMI_VCPU_SET_EPT_VIEW Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 16/34] KVM: introspection: add KVMI_VCPU_CONTROL_EPT_VIEW Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 17/34] KVM: introspection: extend the access rights database with EPT view info Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 18/34] KVM: introspection: extend KVMI_VM_SET_PAGE_ACCESS " Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 19/34] KVM: introspection: clean non-default EPTs on unhook Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 20/34] KVM: x86: vmx: add support for virtualization exceptions Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 21/34] KVM: VMX: Define EPT suppress #VE bit (bit 63 in EPT leaf entries) Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 22/34] KVM: VMX: Suppress EPT violation #VE by default (when enabled) Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 23/34] KVM: x86: mmu: fix: update present_mask in spte_read_protect() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 24/34] KVM: vmx: trigger vm-exits for mmio sptes by default when #VE is enabled Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 25/34] KVM: x86: svm: set .clear_page() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 26/34] KVM: x86: add .set_ve_info() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 27/34] KVM: x86: add .disable_ve() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 28/34] KVM: x86: page_track: add support for suppress #VE bit Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 29/34] KVM: vmx: make use of EPTP_INDEX in vmx_handle_exit() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 30/34] KVM: vmx: make use of EPTP_INDEX in vmx_set_ept_view() Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 31/34] KVM: introspection: add #VE host capability checker Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 32/34] KVM: introspection: add KVMI_VCPU_SET_VE_INFO/KVMI_VCPU_DISABLE_VE Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 33/34] KVM: introspection: mask out non-rwx flags when reading/writing from/to the internal database Adalbert Lazăr
2020-07-22 16:01 ` [RFC PATCH v1 34/34] KVM: introspection: add KVMI_VM_SET_PAGE_SVE Adalbert Lazăr

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200722160121.9601-7-alazar@bitdefender.com \
    --to=alazar@bitdefender.com \
    --cc=kvm@vger.kernel.org \
    --cc=marian.c.rotariu@gmail.com \
    --cc=pbonzini@redhat.com \
    --cc=ssicleru@bitdefender.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).