linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Adalbert Lazăr" <alazar@bitdefender.com>
To: kvm@vger.kernel.org
Cc: linux-mm@kvack.org, virtualization@lists.linux-foundation.org,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"Tamas K Lengyel" <tamas@tklengyel.com>,
	"Mathieu Tarral" <mathieu.tarral@protonmail.com>,
	"Samuel Laurén" <samuel.lauren@iki.fi>,
	"Patrick Colp" <patrick.colp@oracle.com>,
	"Jan Kiszka" <jan.kiszka@siemens.com>,
	"Stefan Hajnoczi" <stefanha@redhat.com>,
	"Weijiang Yang" <weijiang.yang@intel.com>,
	Zhang@kvack.org, "Yu C" <yu.c.zhang@intel.com>,
	"Mihai Donțu" <mdontu@bitdefender.com>,
	"Adalbert Lazăr" <alazar@bitdefender.com>,
	"He Chen" <he.chen@linux.intel.com>,
	"Zhang Yi" <yi.z.zhang@linux.intel.com>
Subject: [RFC PATCH v6 36/92] KVM: VMX: Implement functions for SPPT paging setup
Date: Fri,  9 Aug 2019 18:59:51 +0300	[thread overview]
Message-ID: <20190809160047.8319-37-alazar@bitdefender.com> (raw)
In-Reply-To: <20190809160047.8319-1-alazar@bitdefender.com>

From: Yang Weijiang <weijiang.yang@intel.com>

SPPT is a 4-level paging structure similar to EPT, when SPP is
kicked for target physical page, bit 61 of the corresponding
EPT enty will be flaged, then SPPT is traversed with the gfn to
build up entries, the leaf entry of SPPT contains the access
bitmap for subpages inside the target 4KB physical page, one bit
per 128-byte subpage.

SPPT entries are set up in below cases:
1. the EPT faulted page is SPP protected.
2. SPP mis-config induced vmexit is handled.
3. User configures SPP protected pages via SPP IOCTLs.

Co-developed-by: He Chen <he.chen@linux.intel.com>
Signed-off-by: He Chen <he.chen@linux.intel.com>
Co-developed-by: Zhang Yi <yi.z.zhang@linux.intel.com>
Signed-off-by: Zhang Yi <yi.z.zhang@linux.intel.com>
Co-developed-by: Yang Weijiang <weijiang.yang@intel.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
Message-Id: <20190717133751.12910-4-weijiang.yang@intel.com>
Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
 arch/x86/include/asm/kvm_host.h |   7 +-
 arch/x86/kvm/mmu.c              | 207 ++++++++++++++++++++++++++++++++
 arch/x86/kvm/mmu.h              |   1 +
 include/linux/kvm_host.h        |   3 +
 4 files changed, 217 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f1b3d89a0430..c05984f39923 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -271,7 +271,8 @@ union kvm_mmu_page_role {
 		unsigned smap_andnot_wp:1;
 		unsigned ad_disabled:1;
 		unsigned guest_mode:1;
-		unsigned :6;
+		unsigned spp:1;
+		unsigned reserved:5;
 
 		/*
 		 * This is left at the top of the word so that
@@ -1410,6 +1411,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
 		       void *insn, int insn_len);
+
+int kvm_mmu_setup_spp_structure(struct kvm_vcpu *vcpu,
+				u32 access_map, gfn_t gfn);
+
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 810e3e5bd575..8a6287cd2be4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -206,6 +206,11 @@ static const union kvm_mmu_page_role mmu_base_role_mask = {
 		({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });	\
 	     __shadow_walk_next(&(_walker), spte))
 
+#define for_each_shadow_spp_entry(_vcpu, _addr, _walker)    \
+	for (shadow_spp_walk_init(&(_walker), _vcpu, _addr);	\
+	     shadow_walk_okay(&(_walker));			\
+	     shadow_walk_next(&(_walker)))
+
 static struct kmem_cache *pte_list_desc_cache;
 static struct kmem_cache *mmu_page_header_cache;
 static struct percpu_counter kvm_total_used_mmu_pages;
@@ -505,6 +510,11 @@ static int is_shadow_present_pte(u64 pte)
 	return (pte != 0) && !is_mmio_spte(pte);
 }
 
+static int is_spp_shadow_present(u64 pte)
+{
+	return pte & PT_PRESENT_MASK;
+}
+
 static int is_large_pte(u64 pte)
 {
 	return pte & PT_PAGE_SIZE_MASK;
@@ -524,6 +534,11 @@ static bool is_executable_pte(u64 spte)
 	return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
 }
 
+static bool is_spp_spte(struct kvm_mmu_page *sp)
+{
+	return sp->role.spp;
+}
+
 static kvm_pfn_t spte_to_pfn(u64 pte)
 {
 	return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -1751,6 +1766,87 @@ int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
+static bool __rmap_open_subpage_bit(struct kvm *kvm,
+				    struct kvm_rmap_head *rmap_head)
+{
+	struct rmap_iterator iter;
+	bool flush = false;
+	u64 *sptep;
+	u64 spte;
+
+	for_each_rmap_spte(rmap_head, &iter, sptep) {
+		/*
+		 * SPP works only when the page is write-protected
+		 * and SPP bit is set in EPT leaf entry.
+		 */
+		flush |= spte_write_protect(sptep, false);
+		spte = *sptep | PT_SPP_MASK;
+		flush |= mmu_spte_update(sptep, spte);
+	}
+
+	return flush;
+}
+
+static int kvm_mmu_open_subpage_write_protect(struct kvm *kvm,
+					      struct kvm_memory_slot *slot,
+					      gfn_t gfn)
+{
+	struct kvm_rmap_head *rmap_head;
+	bool flush = false;
+
+	/*
+	 * SPP is only supported with 4KB level1 memory page, check
+	 * if the page is mapped in EPT leaf entry.
+	 */
+	rmap_head = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot);
+
+	if (!rmap_head->val)
+		return -EFAULT;
+
+	flush |= __rmap_open_subpage_bit(kvm, rmap_head);
+
+	if (flush)
+		kvm_flush_remote_tlbs(kvm);
+
+	return 0;
+}
+
+static bool __rmap_clear_subpage_bit(struct kvm *kvm,
+				     struct kvm_rmap_head *rmap_head)
+{
+	struct rmap_iterator iter;
+	bool flush = false;
+	u64 *sptep;
+	u64 spte;
+
+	for_each_rmap_spte(rmap_head, &iter, sptep) {
+		spte = (*sptep & ~PT_SPP_MASK) | PT_WRITABLE_MASK;
+		flush |= mmu_spte_update(sptep, spte);
+	}
+
+	return flush;
+}
+
+static int kvm_mmu_clear_subpage_write_protect(struct kvm *kvm,
+					       struct kvm_memory_slot *slot,
+					       gfn_t gfn)
+{
+	struct kvm_rmap_head *rmap_head;
+	bool flush = false;
+
+	rmap_head = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot);
+
+	if (!rmap_head->val)
+		return -EFAULT;
+
+	flush |= __rmap_clear_subpage_bit(kvm, rmap_head);
+
+	if (flush)
+		kvm_flush_remote_tlbs(kvm);
+
+	return 0;
+}
+
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
 				    struct kvm_memory_slot *slot, u64 gfn)
 {
@@ -2505,6 +2601,30 @@ static unsigned int kvm_mmu_page_track_acc(struct kvm_vcpu *vcpu, gfn_t gfn,
 	return acc;
 }
 
+struct kvm_mmu_page *kvm_mmu_get_spp_page(struct kvm_vcpu *vcpu,
+						 gfn_t gfn,
+						 unsigned int level)
+
+{
+	struct kvm_mmu_page *sp;
+	union kvm_mmu_page_role role;
+
+	role = vcpu->arch.mmu->mmu_role.base;
+	role.level = level;
+	role.direct = true;
+	role.spp = true;
+
+	sp = kvm_mmu_alloc_page(vcpu, true);
+	sp->gfn = gfn;
+	sp->role = role;
+	hlist_add_head(&sp->hash_link,
+		       &vcpu->kvm->arch.mmu_page_hash
+		       [kvm_page_table_hashfn(gfn)]);
+	clear_page(sp->spt);
+	return sp;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_get_spp_page);
+
 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 					     gfn_t gfn,
 					     gva_t gaddr,
@@ -2632,6 +2752,16 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
 				    addr);
 }
 
+static void shadow_spp_walk_init(struct kvm_shadow_walk_iterator *iterator,
+				 struct kvm_vcpu *vcpu, u64 addr)
+{
+	iterator->addr = addr;
+	iterator->shadow_addr = vcpu->arch.mmu->sppt_root;
+
+	/* SPP Table is a 4-level paging structure */
+	iterator->level = 4;
+}
+
 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
 {
 	if (iterator->level < PT_PAGE_TABLE_LEVEL)
@@ -2682,6 +2812,18 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
 		mark_unsync(sptep);
 }
 
+static void link_spp_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
+				 struct kvm_mmu_page *sp)
+{
+	u64 spte;
+
+	spte = __pa(sp->spt) | PT_PRESENT_MASK;
+
+	mmu_spte_set(sptep, spte);
+
+	mmu_page_add_parent_pte(vcpu, sp, sptep);
+}
+
 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 				   unsigned direct_access)
 {
@@ -4253,6 +4395,71 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
 	return RET_PF_RETRY;
 }
 
+static u64 format_spp_spte(u32 spp_wp_bitmap)
+{
+	u64 new_spte = 0;
+	int i = 0;
+
+	/*
+	 * One 4K page contains 32 sub-pages, in SPP table L4E, old bits
+	 * are reserved, so we need to transfer u32 subpage write
+	 * protect bitmap to u64 SPP L4E format.
+	 */
+	while (i < 32) {
+		if (spp_wp_bitmap & (1ULL << i))
+			new_spte |= 1ULL << (i * 2);
+
+		i++;
+	}
+
+	return new_spte;
+}
+
+static void mmu_spp_spte_set(u64 *sptep, u64 new_spte)
+{
+	__set_spte(sptep, new_spte);
+}
+
+int kvm_mmu_setup_spp_structure(struct kvm_vcpu *vcpu,
+				u32 access_map, gfn_t gfn)
+{
+	struct kvm_shadow_walk_iterator iter;
+	struct kvm_mmu_page *sp;
+	gfn_t pseudo_gfn;
+	u64 old_spte, spp_spte;
+	int ret = -EFAULT;
+
+	/* direct_map spp start */
+	if (!VALID_PAGE(vcpu->arch.mmu->sppt_root))
+		return -EFAULT;
+
+	for_each_shadow_spp_entry(vcpu, (u64)gfn << PAGE_SHIFT, iter) {
+		if (iter.level == PT_PAGE_TABLE_LEVEL) {
+			spp_spte = format_spp_spte(access_map);
+			old_spte = mmu_spte_get_lockless(iter.sptep);
+			if (old_spte != spp_spte) {
+				mmu_spp_spte_set(iter.sptep, spp_spte);
+				kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+			}
+
+			ret = 0;
+			break;
+		}
+
+		if (!is_spp_shadow_present(*iter.sptep)) {
+			u64 base_addr = iter.addr;
+
+			base_addr &= PT64_LVL_ADDR_MASK(iter.level);
+			pseudo_gfn = base_addr >> PAGE_SHIFT;
+			sp = kvm_mmu_get_spp_page(vcpu, pseudo_gfn,
+						  iter.level - 1);
+			link_spp_shadow_page(vcpu, iter.sptep, sp);
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_setup_spp_structure);
 static void nonpaging_init_context(struct kvm_vcpu *vcpu,
 				   struct kvm_mmu *context)
 {
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 45948dabe0b6..8c34decd6422 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -26,6 +26,7 @@
 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
 #define PT_PAT_MASK (1ULL << 7)
 #define PT_GLOBAL_MASK (1ULL << 8)
+#define PT_SPP_MASK (1ULL << 61)
 #define PT64_NX_SHIFT 63
 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e876921938b6..ca7597e429df 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -832,6 +832,9 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
 
+struct kvm_mmu_page *kvm_mmu_get_spp_page(struct kvm_vcpu *vcpu,
+			gfn_t gfn, unsigned int level);
+
 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
 /*
  * All architectures that want to use vzalloc currently also


  parent reply	other threads:[~2019-08-09 16:02 UTC|newest]

Thread overview: 168+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-09 15:59 [RFC PATCH v6 00/92] VM introspection Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 01/92] kvm: introduce KVMI (VM introspection subsystem) Adalbert Lazăr
2019-08-12 20:20   ` Sean Christopherson
2019-08-13  9:11     ` Paolo Bonzini
2019-08-13 11:57     ` Adalbert Lazăr
     [not found]     ` <5d52a5ae.1c69fb81.5c260.1573SMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-13 12:09       ` Paolo Bonzini
2019-08-13 15:01         ` Sean Christopherson
2019-08-13 21:03           ` Paolo Bonzini
2019-08-14  9:48           ` Adalbert Lazăr
     [not found]           ` <5d53d8d1.1c69fb81.7d32.0bedSMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-14 10:37             ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 02/92] kvm: introspection: add basic ioctls (hook/unhook) Adalbert Lazăr
2019-08-13  8:44   ` Paolo Bonzini
2019-08-13 14:24     ` Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 03/92] kvm: introspection: add permission access ioctls Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 04/92] kvm: introspection: add the read/dispatch message function Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 05/92] kvm: introspection: add KVMI_GET_VERSION Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 06/92] kvm: introspection: add KVMI_CONTROL_CMD_RESPONSE Adalbert Lazăr
2019-08-13  9:15   ` Paolo Bonzini
2019-08-13 17:08     ` Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 07/92] kvm: introspection: honor the reply option when handling the KVMI_GET_VERSION command Adalbert Lazăr
2019-08-13  9:16   ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 08/92] kvm: introspection: add KVMI_CHECK_COMMAND and KVMI_CHECK_EVENT Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 09/92] kvm: introspection: add KVMI_GET_GUEST_INFO Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 10/92] kvm: introspection: add KVMI_CONTROL_VM_EVENTS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 11/92] kvm: introspection: add vCPU related data Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 12/92] kvm: introspection: add a jobs list to every introspected vCPU Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 13/92] kvm: introspection: make the vCPU wait even when its jobs list is empty Adalbert Lazăr
2019-08-13  8:43   ` Paolo Bonzini
2019-08-13 14:19     ` Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 14/92] kvm: introspection: handle introspection commands before returning to guest Adalbert Lazăr
2019-08-13  8:26   ` Paolo Bonzini
2019-08-13 13:54     ` Adalbert Lazăr
     [not found]     ` <5d52c10e.1c69fb81.26904.fd34SMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-13 14:45       ` Paolo Bonzini
2019-08-14  9:39         ` Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 15/92] kvm: introspection: handle vCPU related introspection commands Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 16/92] kvm: introspection: handle events and event replies Adalbert Lazăr
2019-08-13  8:55   ` Paolo Bonzini
2019-08-13 15:25     ` Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 17/92] kvm: introspection: introduce event actions Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 18/92] kvm: introspection: add KVMI_EVENT_UNHOOK Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 19/92] kvm: introspection: add KVMI_EVENT_CREATE_VCPU Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 20/92] kvm: introspection: add KVMI_GET_VCPU_INFO Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 21/92] kvm: page track: add track_create_slot() callback Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 22/92] kvm: x86: provide all page tracking hooks with the guest virtual address Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 23/92] kvm: page track: add support for preread, prewrite and preexec Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 24/92] kvm: x86: wire in the preread/prewrite/preexec page trackers Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 25/92] kvm: x86: intercept the write access on sidt and other emulated instructions Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 26/92] kvm: x86: add kvm_mmu_nested_pagefault() Adalbert Lazăr
2019-08-13  8:12   ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 27/92] kvm: introspection: use page track Adalbert Lazăr
2019-08-13  9:06   ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 28/92] kvm: x86: consult the page tracking from kvm_mmu_get_page() and __direct_map() Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 29/92] kvm: introspection: add KVMI_CONTROL_EVENTS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 30/92] kvm: x86: add kvm_spt_fault() Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 31/92] kvm: introspection: add KVMI_EVENT_PF Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 32/92] kvm: introspection: add KVMI_GET_PAGE_ACCESS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 33/92] kvm: introspection: add KVMI_SET_PAGE_ACCESS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 34/92] Documentation: Introduce EPT based Subpage Protection Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 35/92] KVM: VMX: Add control flags for SPP enabling Adalbert Lazăr
2019-08-09 15:59 ` Adalbert Lazăr [this message]
2019-08-09 15:59 ` [RFC PATCH v6 37/92] KVM: VMX: Introduce SPP access bitmap and operation functions Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 38/92] KVM: VMX: Add init/set/get functions for SPP Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 39/92] KVM: VMX: Introduce SPP user-space IOCTLs Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 40/92] KVM: VMX: Handle SPP induced vmexit and page fault Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 41/92] KVM: MMU: Enable Lazy mode SPPT setup Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 42/92] KVM: MMU: Handle host memory remapping and reclaim Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 43/92] kvm: introspection: add KVMI_CONTROL_SPP Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 44/92] kvm: introspection: extend the internal database of tracked pages with write_bitmap info Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 45/92] kvm: introspection: add KVMI_GET_PAGE_WRITE_BITMAP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 46/92] kvm: introspection: add KVMI_SET_PAGE_WRITE_BITMAP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 47/92] kvm: introspection: add KVMI_READ_PHYSICAL and KVMI_WRITE_PHYSICAL Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 48/92] kvm: add kvm_vcpu_kick_and_wait() Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 49/92] kvm: introspection: add KVMI_PAUSE_VCPU and KVMI_EVENT_PAUSE_VCPU Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 50/92] kvm: introspection: add KVMI_GET_REGISTERS Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 51/92] kvm: introspection: add KVMI_SET_REGISTERS Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 52/92] kvm: introspection: add KVMI_GET_CPUID Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 53/92] kvm: introspection: add KVMI_INJECT_EXCEPTION + KVMI_EVENT_TRAP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 54/92] kvm: introspection: add KVMI_CONTROL_CR and KVMI_EVENT_CR Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 55/92] kvm: introspection: add KVMI_CONTROL_MSR and KVMI_EVENT_MSR Adalbert Lazăr
2019-08-12 21:05   ` Sean Christopherson
2019-08-15  6:36     ` Nicusor CITU
2019-08-19 18:36       ` Sean Christopherson
2019-08-20  8:44         ` Nicusor CITU
2019-08-20 11:43           ` Mihai Donțu
2019-08-21 15:18             ` Sean Christopherson
2019-08-19 18:52   ` Sean Christopherson
2019-08-09 16:00 ` [RFC PATCH v6 56/92] kvm: x86: block any attempt to disable MSR interception if tracked by introspection Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 57/92] kvm: introspection: add KVMI_GET_XSAVE Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 58/92] kvm: introspection: add KVMI_GET_MTRR_TYPE Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 59/92] kvm: introspection: add KVMI_EVENT_XSETBV Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 60/92] kvm: x86: add kvm_arch_vcpu_set_guest_debug() Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 61/92] kvm: introspection: add KVMI_EVENT_BREAKPOINT Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 62/92] kvm: introspection: add KVMI_EVENT_HYPERCALL Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 63/92] kvm: introspection: add KVMI_EVENT_DESCRIPTOR Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 64/92] kvm: introspection: add single-stepping Adalbert Lazăr
2019-08-12 20:50   ` Sean Christopherson
2019-08-13 12:51     ` Adalbert Lazăr
2019-08-14 12:36     ` Nicusor CITU
2019-08-14 12:53       ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 65/92] kvm: introspection: add KVMI_EVENT_SINGLESTEP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 66/92] kvm: introspection: add custom input when single-stepping a vCPU Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 67/92] kvm: introspection: use single stepping on unimplemented instructions Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 68/92] kvm: x86: emulate a guest page table walk on SPT violations due to A/D bit updates Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 69/92] kvm: x86: keep the page protected if tracked by the introspection tool Adalbert Lazăr
2019-09-10 14:26   ` Konrad Rzeszutek Wilk
2019-09-10 16:28     ` Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 70/92] kvm: x86: filter out access rights only when " Adalbert Lazăr
2019-08-13  9:08   ` Paolo Bonzini
2019-08-13 16:06     ` Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 71/92] mm: add support for remote mapping Adalbert Lazăr
2019-08-09 16:24   ` DANGER WILL ROBINSON, DANGER Matthew Wilcox
2019-08-13  9:29     ` Paolo Bonzini
2019-08-13 11:24       ` Matthew Wilcox
2019-08-13 12:02         ` Paolo Bonzini
2019-08-13 11:01     ` Adalbert Lazăr
2019-08-15 19:19       ` Jerome Glisse
2019-08-15 20:16         ` Jerome Glisse
2019-08-16 17:45           ` Jason Gunthorpe
2019-08-23 12:39           ` Mircea CIRJALIU - MELIU
2019-09-05 18:09             ` Jerome Glisse
2019-09-09 17:00               ` Paolo Bonzini
2019-09-10  7:49                 ` Mircea CIRJALIU - MELIU
2019-10-02 19:27                   ` Jerome Glisse
2019-10-02 13:46                     ` Paolo Bonzini
2019-10-02 14:15                       ` Jerome Glisse
2019-10-02 16:18                         ` Paolo Bonzini
2019-10-02 17:04                           ` Jerome Glisse
2019-10-02 20:10                             ` Paolo Bonzini
2019-10-03 15:42                               ` Jerome Glisse
2019-10-03 15:50                                 ` Paolo Bonzini
2019-10-03 16:42                                   ` Mircea CIRJALIU - MELIU
2019-10-03 18:31                                     ` Jerome Glisse
2019-10-03 19:38                                       ` Paolo Bonzini
2019-10-04  9:41                                         ` Mircea CIRJALIU - MELIU
2019-10-04 11:46                                           ` Paolo Bonzini
2019-10-03 16:36                               ` Mircea CIRJALIU - MELIU
2019-08-09 16:00 ` [RFC PATCH v6 72/92] kvm: introspection: add memory map/unmap support on the guest side Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 73/92] kvm: introspection: use remote mapping Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 74/92] kvm: x86: do not unconditionally patch the hypercall instruction during emulation Adalbert Lazăr
2019-08-13  9:20   ` Paolo Bonzini
2019-08-14 12:07     ` Adalbert Lazăr
     [not found]     ` <5d53f965.1c69fb81.cd952.035bSMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-14 12:33       ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 75/92] kvm: x86: disable gpa_available optimization in emulator_read_write_onepage() Adalbert Lazăr
2019-08-13  8:47   ` Paolo Bonzini
2019-08-13 14:33     ` Adalbert Lazăr
     [not found]     ` <5d52ca22.1c69fb81.4ceb8.e90bSMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-13 14:35       ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 76/92] kvm: x86: disable EPT A/D bits if introspection is present Adalbert Lazăr
2019-08-13  9:18   ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 77/92] kvm: introspection: add trace functions Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 78/92] kvm: x86: add tracepoints for interrupt and exception injections Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 79/92] kvm: x86: emulate movsd xmm, m64 Adalbert Lazăr
2019-08-13  9:17   ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 80/92] kvm: x86: emulate movss xmm, m32 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 81/92] kvm: x86: emulate movq xmm, m64 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 82/92] kvm: x86: emulate movq r, xmm Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 83/92] kvm: x86: emulate movd xmm, m32 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 84/92] kvm: x86: enable the half part of movss, movsd, movups Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 85/92] kvm: x86: emulate lfence Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 86/92] kvm: x86: emulate xorpd xmm2/m128, xmm1 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 87/92] kvm: x86: emulate xorps xmm/m128, xmm Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 88/92] kvm: x86: emulate fst/fstp m64fp Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 89/92] kvm: x86: make lock cmpxchg r, r/m atomic Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 90/92] kvm: x86: emulate lock cmpxchg8b atomically Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 91/92] kvm: x86: emulate lock cmpxchg16b m128 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 92/92] kvm: x86: fallback to the single-step on multipage CMPXCHG emulation Adalbert Lazăr
2019-08-12 18:23 ` [RFC PATCH v6 00/92] VM introspection Sean Christopherson
2019-08-12 21:40 ` Sean Christopherson
2019-08-13  9:34 ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190809160047.8319-37-alazar@bitdefender.com \
    --to=alazar@bitdefender.com \
    --cc=Zhang@kvack.org \
    --cc=he.chen@linux.intel.com \
    --cc=jan.kiszka@siemens.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mathieu.tarral@protonmail.com \
    --cc=mdontu@bitdefender.com \
    --cc=patrick.colp@oracle.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=samuel.lauren@iki.fi \
    --cc=stefanha@redhat.com \
    --cc=tamas@tklengyel.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=weijiang.yang@intel.com \
    --cc=yi.z.zhang@linux.intel.com \
    --cc=yu.c.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).