All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Adalbert Lazăr" <alazar@bitdefender.com>
To: kvm@vger.kernel.org
Cc: virtualization@lists.linux-foundation.org,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Mihai Donțu" <mdontu@bitdefender.com>,
	"Marian Rotariu" <marian.c.rotariu@gmail.com>,
	"Stefan Sicleru" <ssicleru@bitdefender.com>,
	"Adalbert Lazăr" <alazar@bitdefender.com>
Subject: [PATCH v10 30/81] KVM: x86: wire in the preread/prewrite/preexec page trackers
Date: Wed, 25 Nov 2020 11:35:09 +0200	[thread overview]
Message-ID: <20201125093600.2766-31-alazar@bitdefender.com> (raw)
In-Reply-To: <20201125093600.2766-1-alazar@bitdefender.com>

From: Mihai Donțu <mdontu@bitdefender.com>

These are needed in order to notify the introspection tool when
read/write/execute access happens on one of the tracked memory pages.

Also, this patch adds the case when the introspection tool requests
that the vCPU re-enter in guest (and abort the emulation of the current
instruction).

Signed-off-by: Mihai Donțu <mdontu@bitdefender.com>
Co-developed-by: Marian Rotariu <marian.c.rotariu@gmail.com>
Signed-off-by: Marian Rotariu <marian.c.rotariu@gmail.com>
Co-developed-by: Stefan Sicleru <ssicleru@bitdefender.com>
Signed-off-by: Stefan Sicleru <ssicleru@bitdefender.com>
Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
 arch/x86/kvm/emulate.c     |  4 ++++
 arch/x86/kvm/kvm_emulate.h |  1 +
 arch/x86/kvm/mmu/mmu.c     | 38 +++++++++++++++++++++-----------
 arch/x86/kvm/mmu/spte.c    | 17 ++++++++++++++
 arch/x86/kvm/x86.c         | 45 ++++++++++++++++++++++++++++++--------
 5 files changed, 83 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 56cae1ff9e3f..ad32632d4dcb 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -5474,6 +5474,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 					ctxt->memopp->addr.mem.ea + ctxt->_eip);
 
 done:
+	if (rc == X86EMUL_RETRY_INSTR)
+		return EMULATION_RETRY_INSTR;
 	if (rc == X86EMUL_PROPAGATE_FAULT)
 		ctxt->have_exception = true;
 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
@@ -5845,6 +5847,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 	if (rc == X86EMUL_INTERCEPTED)
 		return EMULATION_INTERCEPTED;
 
+	if (rc == X86EMUL_RETRY_INSTR)
+		return EMULATION_RETRY_INSTR;
 	if (rc == X86EMUL_CONTINUE)
 		writeback_registers(ctxt);
 
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 43c93ffa76ed..5bfab8d65cd1 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -496,6 +496,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
 #define EMULATION_OK 0
 #define EMULATION_RESTART 1
 #define EMULATION_INTERCEPTED 2
+#define EMULATION_RETRY_INSTR 3
 void init_decode_cache(struct x86_emulate_ctxt *ctxt);
 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 36add6fb712f..23b72532cd18 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -769,9 +769,13 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 	slot = __gfn_to_memslot(slots, gfn);
 
 	/* the non-leaf shadow pages are keeping readonly. */
-	if (sp->role.level > PG_LEVEL_4K)
-		return kvm_slot_page_track_add_page(kvm, slot, gfn,
-						    KVM_PAGE_TRACK_WRITE);
+	if (sp->role.level > PG_LEVEL_4K) {
+		kvm_slot_page_track_add_page(kvm, slot, gfn,
+					     KVM_PAGE_TRACK_PREWRITE);
+		kvm_slot_page_track_add_page(kvm, slot, gfn,
+					     KVM_PAGE_TRACK_WRITE);
+		return;
+	}
 
 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
 }
@@ -797,9 +801,13 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 	gfn = sp->gfn;
 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
 	slot = __gfn_to_memslot(slots, gfn);
-	if (sp->role.level > PG_LEVEL_4K)
-		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
-						       KVM_PAGE_TRACK_WRITE);
+	if (sp->role.level > PG_LEVEL_4K) {
+		kvm_slot_page_track_remove_page(kvm, slot, gfn,
+						KVM_PAGE_TRACK_PREWRITE);
+		kvm_slot_page_track_remove_page(kvm, slot, gfn,
+						KVM_PAGE_TRACK_WRITE);
+		return;
+	}
 
 	kvm_mmu_gfn_allow_lpage(slot, gfn);
 }
@@ -2601,7 +2609,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 {
 	struct kvm_mmu_page *sp;
 
-	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREWRITE) ||
+	    kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
 		return true;
 
 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
@@ -3689,15 +3698,18 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
 	if (unlikely(error_code & PFERR_RSVD_MASK))
 		return false;
 
-	if (!(error_code & PFERR_PRESENT_MASK) ||
-	      !(error_code & PFERR_WRITE_MASK))
-		return false;
-
 	/*
-	 * guest is writing the page which is write tracked which can
+	 * guest is reading/writing/fetching the page which is
+	 * read/write/execute tracked which can
 	 * not be fixed by page fault handler.
 	 */
-	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+	if (((error_code & PFERR_USER_MASK)
+		&& kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREREAD))
+	    || ((error_code & PFERR_WRITE_MASK)
+		&& (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREWRITE)
+		 || kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)))
+	    || ((error_code & PFERR_FETCH_MASK)
+		&& kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREEXEC)))
 		return true;
 
 	return false;
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index fcac2cac78fe..9225b80ab209 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -81,6 +81,21 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
 				     E820_TYPE_RAM);
 }
 
+static unsigned int kvm_mmu_apply_introspection_access(struct kvm_vcpu *vcpu,
+							gfn_t gfn,
+							unsigned int acc)
+{
+	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREREAD))
+		acc &= ~ACC_USER_MASK;
+	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREWRITE) ||
+	    kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+		acc &= ~ACC_WRITE_MASK;
+	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREEXEC))
+		acc &= ~ACC_EXEC_MASK;
+
+	return acc;
+}
+
 int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
 		     gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
 		     bool can_unsync, bool host_writable, bool ad_disabled,
@@ -89,6 +104,8 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
 	u64 spte = 0;
 	int ret = 0;
 
+	pte_access = kvm_mmu_apply_introspection_access(vcpu, gfn, pte_access);
+
 	if (ad_disabled)
 		spte |= SPTE_AD_DISABLED_MASK;
 	else if (kvm_vcpu_ad_need_write_protect(vcpu))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4d19da016c12..a82db6b30aee 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5911,6 +5911,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
 
 		if (gpa == UNMAPPED_GVA)
 			return X86EMUL_PROPAGATE_FAULT;
+		if (!kvm_page_track_preread(vcpu, gpa, addr, toread))
+			return X86EMUL_RETRY_INSTR;
 		ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
 					       offset, toread);
 		if (ret < 0) {
@@ -5942,6 +5944,9 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
 	if (unlikely(gpa == UNMAPPED_GVA))
 		return X86EMUL_PROPAGATE_FAULT;
 
+	if (!kvm_page_track_preexec(vcpu, gpa, addr))
+		return X86EMUL_RETRY_INSTR;
+
 	offset = addr & (PAGE_SIZE-1);
 	if (WARN_ON(offset + bytes > PAGE_SIZE))
 		bytes = (unsigned)PAGE_SIZE - offset;
@@ -6010,11 +6015,14 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes
 
 		if (gpa == UNMAPPED_GVA)
 			return X86EMUL_PROPAGATE_FAULT;
+		if (!kvm_page_track_prewrite(vcpu, gpa, addr, data, towrite))
+			return X86EMUL_RETRY_INSTR;
 		ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
 		if (ret < 0) {
 			r = X86EMUL_IO_NEEDED;
 			goto out;
 		}
+		kvm_page_track_write(vcpu, gpa, addr, data, towrite);
 
 		bytes -= towrite;
 		data += towrite;
@@ -6118,13 +6126,22 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
 			const void *val, int bytes)
 {
-	int ret;
-
-	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
-	if (ret < 0)
-		return 0;
+	if (!kvm_page_track_prewrite(vcpu, gpa, gva, val, bytes))
+		return X86EMUL_RETRY_INSTR;
+	if (kvm_vcpu_write_guest(vcpu, gpa, val, bytes) < 0)
+		return X86EMUL_UNHANDLEABLE;
 	kvm_page_track_write(vcpu, gpa, gva, val, bytes);
-	return 1;
+	return X86EMUL_CONTINUE;
+}
+
+static int emulator_read_phys(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+			      void *val, int bytes)
+{
+	if (!kvm_page_track_preread(vcpu, gpa, gva, bytes))
+		return X86EMUL_RETRY_INSTR;
+	if (kvm_vcpu_read_guest(vcpu, gpa, val, bytes) < 0)
+		return X86EMUL_UNHANDLEABLE;
+	return X86EMUL_CONTINUE;
 }
 
 struct read_write_emulator_ops {
@@ -6154,7 +6171,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
 			void *val, int bytes)
 {
-	return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
+	return emulator_read_phys(vcpu, gpa, gva, val, bytes);
 }
 
 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
@@ -6228,8 +6245,11 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
 			return X86EMUL_PROPAGATE_FAULT;
 	}
 
-	if (!ret && ops->read_write_emulate(vcpu, gpa, addr, val, bytes))
-		return X86EMUL_CONTINUE;
+	if (!ret) {
+		ret = ops->read_write_emulate(vcpu, gpa, addr, val, bytes);
+		if (ret == X86EMUL_CONTINUE || ret == X86EMUL_RETRY_INSTR)
+			return ret;
+	}
 
 	/*
 	 * Is this MMIO handled locally?
@@ -6373,6 +6393,9 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
 		goto emul_write;
 
+	if (!kvm_page_track_prewrite(vcpu, gpa, addr, new, bytes))
+		return X86EMUL_RETRY_INSTR;
+
 	kaddr = map.hva + offset_in_page(gpa);
 
 	switch (bytes) {
@@ -7323,6 +7346,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 
 		trace_kvm_emulate_insn_start(vcpu);
 		++vcpu->stat.insn_emulation;
+		if (r == EMULATION_RETRY_INSTR)
+			return 1;
 		if (r != EMULATION_OK)  {
 			if ((emulation_type & EMULTYPE_TRAP_UD) ||
 			    (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
@@ -7392,6 +7417,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 
 	r = x86_emulate_insn(ctxt);
 
+	if (r == EMULATION_RETRY_INSTR)
+		return 1;
 	if (r == EMULATION_INTERCEPTED)
 		return 1;
 

WARNING: multiple messages have this Message-ID (diff)
From: "Adalbert Lazăr" <alazar@bitdefender.com>
To: kvm@vger.kernel.org
Cc: "Mihai Donțu" <mdontu@bitdefender.com>,
	"Stefan Sicleru" <ssicleru@bitdefender.com>,
	virtualization@lists.linux-foundation.org,
	"Adalbert Lazăr" <alazar@bitdefender.com>,
	"Marian Rotariu" <marian.c.rotariu@gmail.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>
Subject: [PATCH v10 30/81] KVM: x86: wire in the preread/prewrite/preexec page trackers
Date: Wed, 25 Nov 2020 11:35:09 +0200	[thread overview]
Message-ID: <20201125093600.2766-31-alazar@bitdefender.com> (raw)
In-Reply-To: <20201125093600.2766-1-alazar@bitdefender.com>

From: Mihai Donțu <mdontu@bitdefender.com>

These are needed in order to notify the introspection tool when
read/write/execute access happens on one of the tracked memory pages.

Also, this patch adds the case when the introspection tool requests
that the vCPU re-enter in guest (and abort the emulation of the current
instruction).

Signed-off-by: Mihai Donțu <mdontu@bitdefender.com>
Co-developed-by: Marian Rotariu <marian.c.rotariu@gmail.com>
Signed-off-by: Marian Rotariu <marian.c.rotariu@gmail.com>
Co-developed-by: Stefan Sicleru <ssicleru@bitdefender.com>
Signed-off-by: Stefan Sicleru <ssicleru@bitdefender.com>
Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
 arch/x86/kvm/emulate.c     |  4 ++++
 arch/x86/kvm/kvm_emulate.h |  1 +
 arch/x86/kvm/mmu/mmu.c     | 38 +++++++++++++++++++++-----------
 arch/x86/kvm/mmu/spte.c    | 17 ++++++++++++++
 arch/x86/kvm/x86.c         | 45 ++++++++++++++++++++++++++++++--------
 5 files changed, 83 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 56cae1ff9e3f..ad32632d4dcb 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -5474,6 +5474,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 					ctxt->memopp->addr.mem.ea + ctxt->_eip);
 
 done:
+	if (rc == X86EMUL_RETRY_INSTR)
+		return EMULATION_RETRY_INSTR;
 	if (rc == X86EMUL_PROPAGATE_FAULT)
 		ctxt->have_exception = true;
 	return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
@@ -5845,6 +5847,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 	if (rc == X86EMUL_INTERCEPTED)
 		return EMULATION_INTERCEPTED;
 
+	if (rc == X86EMUL_RETRY_INSTR)
+		return EMULATION_RETRY_INSTR;
 	if (rc == X86EMUL_CONTINUE)
 		writeback_registers(ctxt);
 
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 43c93ffa76ed..5bfab8d65cd1 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -496,6 +496,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
 #define EMULATION_OK 0
 #define EMULATION_RESTART 1
 #define EMULATION_INTERCEPTED 2
+#define EMULATION_RETRY_INSTR 3
 void init_decode_cache(struct x86_emulate_ctxt *ctxt);
 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 36add6fb712f..23b72532cd18 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -769,9 +769,13 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 	slot = __gfn_to_memslot(slots, gfn);
 
 	/* the non-leaf shadow pages are keeping readonly. */
-	if (sp->role.level > PG_LEVEL_4K)
-		return kvm_slot_page_track_add_page(kvm, slot, gfn,
-						    KVM_PAGE_TRACK_WRITE);
+	if (sp->role.level > PG_LEVEL_4K) {
+		kvm_slot_page_track_add_page(kvm, slot, gfn,
+					     KVM_PAGE_TRACK_PREWRITE);
+		kvm_slot_page_track_add_page(kvm, slot, gfn,
+					     KVM_PAGE_TRACK_WRITE);
+		return;
+	}
 
 	kvm_mmu_gfn_disallow_lpage(slot, gfn);
 }
@@ -797,9 +801,13 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
 	gfn = sp->gfn;
 	slots = kvm_memslots_for_spte_role(kvm, sp->role);
 	slot = __gfn_to_memslot(slots, gfn);
-	if (sp->role.level > PG_LEVEL_4K)
-		return kvm_slot_page_track_remove_page(kvm, slot, gfn,
-						       KVM_PAGE_TRACK_WRITE);
+	if (sp->role.level > PG_LEVEL_4K) {
+		kvm_slot_page_track_remove_page(kvm, slot, gfn,
+						KVM_PAGE_TRACK_PREWRITE);
+		kvm_slot_page_track_remove_page(kvm, slot, gfn,
+						KVM_PAGE_TRACK_WRITE);
+		return;
+	}
 
 	kvm_mmu_gfn_allow_lpage(slot, gfn);
 }
@@ -2601,7 +2609,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
 {
 	struct kvm_mmu_page *sp;
 
-	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREWRITE) ||
+	    kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
 		return true;
 
 	for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
@@ -3689,15 +3698,18 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
 	if (unlikely(error_code & PFERR_RSVD_MASK))
 		return false;
 
-	if (!(error_code & PFERR_PRESENT_MASK) ||
-	      !(error_code & PFERR_WRITE_MASK))
-		return false;
-
 	/*
-	 * guest is writing the page which is write tracked which can
+	 * guest is reading/writing/fetching the page which is
+	 * read/write/execute tracked which can
 	 * not be fixed by page fault handler.
 	 */
-	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+	if (((error_code & PFERR_USER_MASK)
+		&& kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREREAD))
+	    || ((error_code & PFERR_WRITE_MASK)
+		&& (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREWRITE)
+		 || kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)))
+	    || ((error_code & PFERR_FETCH_MASK)
+		&& kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREEXEC)))
 		return true;
 
 	return false;
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index fcac2cac78fe..9225b80ab209 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -81,6 +81,21 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
 				     E820_TYPE_RAM);
 }
 
+static unsigned int kvm_mmu_apply_introspection_access(struct kvm_vcpu *vcpu,
+							gfn_t gfn,
+							unsigned int acc)
+{
+	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREREAD))
+		acc &= ~ACC_USER_MASK;
+	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREWRITE) ||
+	    kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+		acc &= ~ACC_WRITE_MASK;
+	if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_PREEXEC))
+		acc &= ~ACC_EXEC_MASK;
+
+	return acc;
+}
+
 int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
 		     gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative,
 		     bool can_unsync, bool host_writable, bool ad_disabled,
@@ -89,6 +104,8 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
 	u64 spte = 0;
 	int ret = 0;
 
+	pte_access = kvm_mmu_apply_introspection_access(vcpu, gfn, pte_access);
+
 	if (ad_disabled)
 		spte |= SPTE_AD_DISABLED_MASK;
 	else if (kvm_vcpu_ad_need_write_protect(vcpu))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4d19da016c12..a82db6b30aee 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5911,6 +5911,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
 
 		if (gpa == UNMAPPED_GVA)
 			return X86EMUL_PROPAGATE_FAULT;
+		if (!kvm_page_track_preread(vcpu, gpa, addr, toread))
+			return X86EMUL_RETRY_INSTR;
 		ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
 					       offset, toread);
 		if (ret < 0) {
@@ -5942,6 +5944,9 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
 	if (unlikely(gpa == UNMAPPED_GVA))
 		return X86EMUL_PROPAGATE_FAULT;
 
+	if (!kvm_page_track_preexec(vcpu, gpa, addr))
+		return X86EMUL_RETRY_INSTR;
+
 	offset = addr & (PAGE_SIZE-1);
 	if (WARN_ON(offset + bytes > PAGE_SIZE))
 		bytes = (unsigned)PAGE_SIZE - offset;
@@ -6010,11 +6015,14 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes
 
 		if (gpa == UNMAPPED_GVA)
 			return X86EMUL_PROPAGATE_FAULT;
+		if (!kvm_page_track_prewrite(vcpu, gpa, addr, data, towrite))
+			return X86EMUL_RETRY_INSTR;
 		ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
 		if (ret < 0) {
 			r = X86EMUL_IO_NEEDED;
 			goto out;
 		}
+		kvm_page_track_write(vcpu, gpa, addr, data, towrite);
 
 		bytes -= towrite;
 		data += towrite;
@@ -6118,13 +6126,22 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
 			const void *val, int bytes)
 {
-	int ret;
-
-	ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
-	if (ret < 0)
-		return 0;
+	if (!kvm_page_track_prewrite(vcpu, gpa, gva, val, bytes))
+		return X86EMUL_RETRY_INSTR;
+	if (kvm_vcpu_write_guest(vcpu, gpa, val, bytes) < 0)
+		return X86EMUL_UNHANDLEABLE;
 	kvm_page_track_write(vcpu, gpa, gva, val, bytes);
-	return 1;
+	return X86EMUL_CONTINUE;
+}
+
+static int emulator_read_phys(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+			      void *val, int bytes)
+{
+	if (!kvm_page_track_preread(vcpu, gpa, gva, bytes))
+		return X86EMUL_RETRY_INSTR;
+	if (kvm_vcpu_read_guest(vcpu, gpa, val, bytes) < 0)
+		return X86EMUL_UNHANDLEABLE;
+	return X86EMUL_CONTINUE;
 }
 
 struct read_write_emulator_ops {
@@ -6154,7 +6171,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
 			void *val, int bytes)
 {
-	return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
+	return emulator_read_phys(vcpu, gpa, gva, val, bytes);
 }
 
 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
@@ -6228,8 +6245,11 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
 			return X86EMUL_PROPAGATE_FAULT;
 	}
 
-	if (!ret && ops->read_write_emulate(vcpu, gpa, addr, val, bytes))
-		return X86EMUL_CONTINUE;
+	if (!ret) {
+		ret = ops->read_write_emulate(vcpu, gpa, addr, val, bytes);
+		if (ret == X86EMUL_CONTINUE || ret == X86EMUL_RETRY_INSTR)
+			return ret;
+	}
 
 	/*
 	 * Is this MMIO handled locally?
@@ -6373,6 +6393,9 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
 	if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map))
 		goto emul_write;
 
+	if (!kvm_page_track_prewrite(vcpu, gpa, addr, new, bytes))
+		return X86EMUL_RETRY_INSTR;
+
 	kaddr = map.hva + offset_in_page(gpa);
 
 	switch (bytes) {
@@ -7323,6 +7346,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 
 		trace_kvm_emulate_insn_start(vcpu);
 		++vcpu->stat.insn_emulation;
+		if (r == EMULATION_RETRY_INSTR)
+			return 1;
 		if (r != EMULATION_OK)  {
 			if ((emulation_type & EMULTYPE_TRAP_UD) ||
 			    (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
@@ -7392,6 +7417,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 
 	r = x86_emulate_insn(ctxt);
 
+	if (r == EMULATION_RETRY_INSTR)
+		return 1;
 	if (r == EMULATION_INTERCEPTED)
 		return 1;
 
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

  parent reply	other threads:[~2020-11-25  9:42 UTC|newest]

Thread overview: 192+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-25  9:34 [PATCH v10 00/81] VM introspection Adalbert Lazăr
2020-11-25  9:34 ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 01/81] KVM: UAPI: add error codes used by the VM introspection code Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 02/81] KVM: add kvm_vcpu_kick_and_wait() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 03/81] KVM: add kvm_get_max_gfn() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 04/81] KVM: doc: fix the hypercalls numbering Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 05/81] KVM: x86: add kvm_arch_vcpu_get_regs() and kvm_arch_vcpu_get_sregs() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 06/81] KVM: x86: add kvm_arch_vcpu_set_regs() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 07/81] KVM: x86: avoid injecting #PF when emulate the VMCALL instruction Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 08/81] KVM: x86: add kvm_x86_ops.bp_intercepted() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 09/81] KVM: x86: add kvm_x86_ops.control_cr3_intercept() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 10/81] KVM: x86: add kvm_x86_ops.cr3_write_intercepted() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 11/81] KVM: x86: add kvm_x86_ops.desc_ctrl_supported() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 12/81] KVM: svm: add support for descriptor-table VM-exits Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 13/81] KVM: x86: add kvm_x86_ops.control_desc_intercept() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 14/81] KVM: x86: add kvm_x86_ops.desc_intercepted() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 15/81] KVM: x86: add kvm_x86_ops.msr_write_intercepted() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 16/81] KVM: x86: svm: use the vmx convention to control the MSR interception Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 17/81] KVM: x86: add kvm_x86_ops.control_msr_intercept() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 18/81] KVM: x86: vmx: use a symbolic constant when checking the exit qualifications Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 19/81] KVM: x86: save the error code during EPT/NPF exits handling Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:34 ` [PATCH v10 20/81] KVM: x86: add kvm_x86_ops.fault_gla() Adalbert Lazăr
2020-11-25  9:34   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 21/81] KVM: x86: add kvm_x86_ops.control_singlestep() Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 22/81] KVM: x86: export kvm_arch_vcpu_set_guest_debug() Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 23/81] KVM: x86: extend kvm_mmu_gva_to_gpa_system() with the 'access' parameter Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 24/81] KVM: x86: export kvm_inject_pending_exception() Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 25/81] KVM: x86: export kvm_vcpu_ioctl_x86_get_xsave() Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25 12:19   ` kernel test robot
2020-11-25 12:19     ` kernel test robot
2020-11-25 12:19     ` kernel test robot
2020-11-25 12:24   ` kernel test robot
2020-11-25 12:24     ` kernel test robot
2020-11-25 12:24     ` kernel test robot
2020-11-25  9:35 ` [PATCH v10 26/81] KVM: x86: export kvm_vcpu_ioctl_x86_set_xsave() Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 27/81] KVM: x86: page track: provide all callbacks with the guest virtual address Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 28/81] KVM: x86: page track: add track_create_slot() callback Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 29/81] KVM: x86: page_track: add support for preread, prewrite and preexec Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` Adalbert Lazăr [this message]
2020-11-25  9:35   ` [PATCH v10 30/81] KVM: x86: wire in the preread/prewrite/preexec page trackers Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 31/81] KVM: x86: disable gpa_available optimization for fetch and page-walk SPT violations Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 32/81] KVM: introduce VM introspection Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25 12:45   ` kernel test robot
2020-11-25 12:45     ` kernel test robot
2020-11-25 12:45     ` kernel test robot
2020-11-25 13:41   ` kernel test robot
2020-11-25 13:41     ` kernel test robot
2020-11-25 13:41     ` kernel test robot
2020-11-25 16:24   ` kernel test robot
2020-11-25 16:24     ` kernel test robot
2020-11-25 16:24     ` kernel test robot
2020-11-25  9:35 ` [PATCH v10 33/81] KVM: introspection: add hook/unhook ioctls Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25 11:49   ` kernel test robot
2020-11-25 11:49     ` kernel test robot
2020-11-25  9:35 ` [PATCH v10 34/81] KVM: introspection: add permission access ioctls Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 35/81] KVM: introspection: add the read/dispatch message function Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 36/81] KVM: introspection: add KVMI_GET_VERSION Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 37/81] KVM: introspection: add KVMI_VM_CHECK_COMMAND and KVMI_VM_CHECK_EVENT Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 38/81] KVM: introspection: add KVMI_VM_GET_INFO Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 39/81] KVM: introspection: add KVM_INTROSPECTION_PREUNHOOK Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 40/81] KVM: introspection: add KVMI_VM_EVENT_UNHOOK Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 41/81] KVM: introspection: add KVMI_VM_CONTROL_EVENTS Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 42/81] KVM: introspection: add KVMI_VM_READ_PHYSICAL/KVMI_VM_WRITE_PHYSICAL Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 43/81] KVM: introspection: add vCPU related data Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 44/81] KVM: introspection: add a jobs list to every introspected vCPU Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 45/81] KVM: introspection: handle vCPU introspection requests Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 46/81] KVM: introspection: handle vCPU commands Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 47/81] KVM: introspection: add KVMI_VCPU_GET_INFO Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 48/81] KVM: introspection: add KVMI_VM_PAUSE_VCPU Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 49/81] KVM: introspection: add support for vCPU events Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 50/81] KVM: introspection: add KVMI_VCPU_EVENT_PAUSE Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 51/81] KVM: introspection: add the crash action handling on the event reply Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 52/81] KVM: introspection: add KVMI_VCPU_CONTROL_EVENTS Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 53/81] KVM: introspection: add KVMI_VCPU_GET_REGISTERS Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 54/81] KVM: introspection: add KVMI_VCPU_SET_REGISTERS Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 55/81] KVM: introspection: add KVMI_VCPU_GET_CPUID Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 56/81] KVM: introspection: add KVMI_VCPU_EVENT_HYPERCALL Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 57/81] KVM: introspection: add KVMI_VCPU_EVENT_BREAKPOINT Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 58/81] KVM: introspection: add cleanup support for vCPUs Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 59/81] KVM: introspection: restore the state of #BP interception on unhook Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 60/81] KVM: introspection: add KVMI_VM_CONTROL_CLEANUP Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 61/81] KVM: introspection: add KVMI_VCPU_CONTROL_CR and KVMI_VCPU_EVENT_CR Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 62/81] KVM: introspection: restore the state of CR3 interception on unhook Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 63/81] KVM: introspection: add KVMI_VCPU_INJECT_EXCEPTION + KVMI_VCPU_EVENT_TRAP Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25 12:57   ` kernel test robot
2020-11-25 12:57     ` kernel test robot
2020-11-25  9:35 ` [PATCH v10 64/81] KVM: introspection: add KVMI_VM_GET_MAX_GFN Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 65/81] KVM: introspection: add KVMI_VCPU_EVENT_XSETBV Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 66/81] KVM: introspection: add KVMI_VCPU_GET_XCR Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25 15:22   ` kernel test robot
2020-11-25 15:22     ` kernel test robot
2020-11-25 15:22     ` kernel test robot
2020-11-25  9:35 ` [PATCH v10 67/81] KVM: introspection: add KVMI_VCPU_GET_XSAVE Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25 13:50   ` kernel test robot
2020-11-25 13:50     ` kernel test robot
2020-11-25  9:35 ` [PATCH v10 68/81] KVM: introspection: add KVMI_VCPU_SET_XSAVE Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 69/81] KVM: introspection: add KVMI_VCPU_GET_MTRR_TYPE Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 70/81] KVM: introspection: add KVMI_VCPU_EVENT_DESCRIPTOR Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 71/81] KVM: introspection: restore the state of descriptor-table register interception on unhook Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 72/81] KVM: introspection: add KVMI_VCPU_CONTROL_MSR and KVMI_VCPU_EVENT_MSR Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 73/81] KVM: introspection: restore the state of MSR interception on unhook Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 74/81] KVM: introspection: add KVMI_VM_SET_PAGE_ACCESS Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 75/81] KVM: introspection: add KVMI_VCPU_EVENT_PF Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25 14:44   ` kernel test robot
2020-11-25 14:44     ` kernel test robot
2020-11-25  9:35 ` [PATCH v10 76/81] KVM: introspection: extend KVMI_GET_VERSION with struct kvmi_features Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 77/81] KVM: introspection: add KVMI_VCPU_CONTROL_SINGLESTEP Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 78/81] KVM: introspection: add KVMI_VCPU_EVENT_SINGLESTEP Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:35 ` [PATCH v10 79/81] KVM: introspection: add KVMI_VCPU_TRANSLATE_GVA Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25 15:46   ` kernel test robot
2020-11-25 15:46     ` kernel test robot
2020-11-25  9:35 ` [PATCH v10 80/81] KVM: introspection: emulate a guest page table walk on SPT violations due to A/D bit updates Adalbert Lazăr
2020-11-25  9:35   ` Adalbert Lazăr
2020-11-25  9:36 ` [PATCH v10 81/81] KVM: x86: call the page tracking code on emulation failure Adalbert Lazăr
2020-11-25  9:36   ` Adalbert Lazăr

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201125093600.2766-31-alazar@bitdefender.com \
    --to=alazar@bitdefender.com \
    --cc=kvm@vger.kernel.org \
    --cc=marian.c.rotariu@gmail.com \
    --cc=mdontu@bitdefender.com \
    --cc=pbonzini@redhat.com \
    --cc=ssicleru@bitdefender.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.