linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Adalbert Lazăr" <alazar@bitdefender.com>
To: kvm@vger.kernel.org
Cc: linux-mm@kvack.org, virtualization@lists.linux-foundation.org,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
	"Tamas K Lengyel" <tamas@tklengyel.com>,
	"Mathieu Tarral" <mathieu.tarral@protonmail.com>,
	"Samuel Laurén" <samuel.lauren@iki.fi>,
	"Patrick Colp" <patrick.colp@oracle.com>,
	"Jan Kiszka" <jan.kiszka@siemens.com>,
	"Stefan Hajnoczi" <stefanha@redhat.com>,
	"Weijiang Yang" <weijiang.yang@intel.com>,
	Zhang@kvack.org, "Yu C" <yu.c.zhang@intel.com>,
	"Mihai Donțu" <mdontu@bitdefender.com>,
	"Adalbert Lazăr" <alazar@bitdefender.com>,
	"Nicușor Cîțu" <ncitu@bitdefender.com>,
	"Marian Rotariu" <marian.c.rotariu@gmail.com>
Subject: [RFC PATCH v6 27/92] kvm: introspection: use page track
Date: Fri,  9 Aug 2019 18:59:42 +0300	[thread overview]
Message-ID: <20190809160047.8319-28-alazar@bitdefender.com> (raw)
In-Reply-To: <20190809160047.8319-1-alazar@bitdefender.com>

From: Mihai Donțu <mdontu@bitdefender.com>

From preread, prewrite and preexec callbacks we will send the
KVMI_EVENT_PF events caused by access rights enforced by the introspection
tool.

Signed-off-by: Mihai Donțu <mdontu@bitdefender.com>
Co-developed-by: Nicușor Cîțu <ncitu@bitdefender.com>
Signed-off-by: Nicușor Cîțu <ncitu@bitdefender.com>
Co-developed-by: Marian Rotariu <marian.c.rotariu@gmail.com>
Signed-off-by: Marian Rotariu <marian.c.rotariu@gmail.com>
Co-developed-by: Adalbert Lazăr <alazar@bitdefender.com>
Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
 arch/x86/include/asm/kvmi_host.h |  12 ++
 arch/x86/kvm/kvmi.c              |  45 +++++
 include/uapi/linux/kvmi.h        |   4 +
 virt/kvm/kvmi.c                  | 293 ++++++++++++++++++++++++++++++-
 virt/kvm/kvmi_int.h              |  21 +++
 5 files changed, 374 insertions(+), 1 deletion(-)
 create mode 100644 arch/x86/include/asm/kvmi_host.h

diff --git a/arch/x86/include/asm/kvmi_host.h b/arch/x86/include/asm/kvmi_host.h
new file mode 100644
index 000000000000..7ab6dd71a0c2
--- /dev/null
+++ b/arch/x86/include/asm/kvmi_host.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_KVMI_HOST_H
+#define _ASM_X86_KVMI_HOST_H
+
+#include <asm/kvm_host.h>
+#include <asm/kvm_page_track.h>
+
+struct kvmi_arch_mem_access {
+	unsigned long active[KVM_PAGE_TRACK_MAX][BITS_TO_LONGS(KVM_MEM_SLOTS_NUM)];
+};
+
+#endif /* _ASM_X86_KVMI_HOST_H */
diff --git a/arch/x86/kvm/kvmi.c b/arch/x86/kvm/kvmi.c
index 97c72cdc6fb0..d7b9201582b4 100644
--- a/arch/x86/kvm/kvmi.c
+++ b/arch/x86/kvm/kvmi.c
@@ -91,6 +91,12 @@ void kvmi_arch_setup_event(struct kvm_vcpu *vcpu, struct kvmi_event *ev)
 	kvmi_get_msrs(vcpu, event);
 }
 
+bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+			u8 access)
+{
+	return KVMI_EVENT_ACTION_CONTINUE; /* TODO */
+}
+
 int kvmi_arch_cmd_get_vcpu_info(struct kvm_vcpu *vcpu,
 				struct kvmi_get_vcpu_info_reply *rpl)
 {
@@ -102,3 +108,42 @@ int kvmi_arch_cmd_get_vcpu_info(struct kvm_vcpu *vcpu,
 	return 0;
 }
 
+static const struct {
+	unsigned int allow_bit;
+	enum kvm_page_track_mode track_mode;
+} track_modes[] = {
+	{ KVMI_PAGE_ACCESS_R, KVM_PAGE_TRACK_PREREAD },
+	{ KVMI_PAGE_ACCESS_W, KVM_PAGE_TRACK_PREWRITE },
+	{ KVMI_PAGE_ACCESS_X, KVM_PAGE_TRACK_PREEXEC },
+};
+
+void kvmi_arch_update_page_tracking(struct kvm *kvm,
+				    struct kvm_memory_slot *slot,
+				    struct kvmi_mem_access *m)
+{
+	struct kvmi_arch_mem_access *arch = &m->arch;
+	int i;
+
+	if (!slot) {
+		slot = gfn_to_memslot(kvm, m->gfn);
+		if (!slot)
+			return;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(track_modes); i++) {
+		unsigned int allow_bit = track_modes[i].allow_bit;
+		enum kvm_page_track_mode mode = track_modes[i].track_mode;
+		bool slot_tracked = test_bit(slot->id, arch->active[mode]);
+
+		if (m->access & allow_bit) {
+			if (slot_tracked) {
+				kvm_slot_page_track_remove_page(kvm, slot,
+								m->gfn, mode);
+				clear_bit(slot->id, arch->active[mode]);
+			}
+		} else if (!slot_tracked) {
+			kvm_slot_page_track_add_page(kvm, slot, m->gfn, mode);
+			set_bit(slot->id, arch->active[mode]);
+		}
+	}
+}
diff --git a/include/uapi/linux/kvmi.h b/include/uapi/linux/kvmi.h
index aa5bc909e278..c56e676ddb2b 100644
--- a/include/uapi/linux/kvmi.h
+++ b/include/uapi/linux/kvmi.h
@@ -70,6 +70,10 @@ enum {
 #define KVMI_EVENT_ACTION_RETRY         1
 #define KVMI_EVENT_ACTION_CRASH         2
 
+#define KVMI_PAGE_ACCESS_R (1 << 0)
+#define KVMI_PAGE_ACCESS_W (1 << 1)
+#define KVMI_PAGE_ACCESS_X (1 << 2)
+
 #define KVMI_MSG_SIZE (4096 - sizeof(struct kvmi_msg_hdr))
 
 struct kvmi_msg_hdr {
diff --git a/virt/kvm/kvmi.c b/virt/kvm/kvmi.c
index d0d9adf5b6ed..5cbc82b284f4 100644
--- a/virt/kvm/kvmi.c
+++ b/virt/kvm/kvmi.c
@@ -11,10 +11,27 @@
 #include <linux/bitmap.h>
 
 static struct kmem_cache *msg_cache;
+static struct kmem_cache *radix_cache;
 static struct kmem_cache *job_cache;
 
 static bool kvmi_create_vcpu_event(struct kvm_vcpu *vcpu);
 static void kvmi_abort_events(struct kvm *kvm);
+static bool kvmi_track_preread(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+	u8 *new, int bytes, struct kvm_page_track_notifier_node *node,
+	bool *data_ready);
+static bool kvmi_track_prewrite(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+	const u8 *new, int bytes, struct kvm_page_track_notifier_node *node);
+static bool kvmi_track_preexec(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+	struct kvm_page_track_notifier_node *node);
+static void kvmi_track_create_slot(struct kvm *kvm,
+	struct kvm_memory_slot *slot, unsigned long npages,
+	struct kvm_page_track_notifier_node *node);
+static void kvmi_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
+	struct kvm_page_track_notifier_node *node);
+
+static const u8 full_access  =	KVMI_PAGE_ACCESS_R |
+				KVMI_PAGE_ACCESS_W |
+				KVMI_PAGE_ACCESS_X;
 
 void *kvmi_msg_alloc(void)
 {
@@ -34,23 +51,96 @@ void kvmi_msg_free(void *addr)
 		kmem_cache_free(msg_cache, addr);
 }
 
+static struct kvmi_mem_access *__kvmi_get_gfn_access(struct kvmi *ikvm,
+						     const gfn_t gfn)
+{
+	return radix_tree_lookup(&ikvm->access_tree, gfn);
+}
+
+static int kvmi_get_gfn_access(struct kvmi *ikvm, const gfn_t gfn,
+			       u8 *access)
+{
+	struct kvmi_mem_access *m;
+
+	*access = full_access;
+
+	read_lock(&ikvm->access_tree_lock);
+	m = __kvmi_get_gfn_access(ikvm, gfn);
+	if (m)
+		*access = m->access;
+	read_unlock(&ikvm->access_tree_lock);
+
+	return m ? 0 : -1;
+}
+
+static bool kvmi_restricted_access(struct kvmi *ikvm, gpa_t gpa, u8 access)
+{
+	u8 allowed_access;
+	int err;
+
+	err = kvmi_get_gfn_access(ikvm, gpa_to_gfn(gpa), &allowed_access);
+
+	if (err)
+		return false;
+
+	/*
+	 * We want to be notified only for violations involving access
+	 * bits that we've specifically cleared
+	 */
+	if ((~allowed_access) & access)
+		return true;
+
+	return false;
+}
+
+static void kvmi_clear_mem_access(struct kvm *kvm)
+{
+	void **slot;
+	struct radix_tree_iter iter;
+	struct kvmi *ikvm = IKVM(kvm);
+	int idx;
+
+	idx = srcu_read_lock(&kvm->srcu);
+	spin_lock(&kvm->mmu_lock);
+	write_lock(&ikvm->access_tree_lock);
+
+	radix_tree_for_each_slot(slot, &ikvm->access_tree, &iter, 0) {
+		struct kvmi_mem_access *m = *slot;
+
+		m->access = full_access;
+		kvmi_arch_update_page_tracking(kvm, NULL, m);
+
+		radix_tree_iter_delete(&ikvm->access_tree, &iter, slot);
+		kmem_cache_free(radix_cache, m);
+	}
+
+	write_unlock(&ikvm->access_tree_lock);
+	spin_unlock(&kvm->mmu_lock);
+	srcu_read_unlock(&kvm->srcu, idx);
+}
+
 static void kvmi_cache_destroy(void)
 {
 	kmem_cache_destroy(msg_cache);
 	msg_cache = NULL;
+	kmem_cache_destroy(radix_cache);
+	radix_cache = NULL;
 	kmem_cache_destroy(job_cache);
 	job_cache = NULL;
 }
 
 static int kvmi_cache_create(void)
 {
+	radix_cache = kmem_cache_create("kvmi_radix_tree",
+					sizeof(struct kvmi_mem_access),
+					0, SLAB_ACCOUNT, NULL);
 	job_cache = kmem_cache_create("kvmi_job",
 				      sizeof(struct kvmi_job),
 				      0, SLAB_ACCOUNT, NULL);
 	msg_cache = kmem_cache_create("kvmi_msg", KVMI_MSG_SIZE_ALLOC,
 				      4096, SLAB_ACCOUNT, NULL);
 
-	if (!msg_cache || !job_cache) {
+	if (!msg_cache || !radix_cache || !job_cache) {
 		kvmi_cache_destroy();
 
 		return -1;
@@ -77,6 +167,10 @@ static bool alloc_kvmi(struct kvm *kvm, const struct kvm_introspection *qemu)
 	if (!ikvm)
 		return false;
 
+	/* see comments of radix_tree_preload() - no direct reclaim */
+	INIT_RADIX_TREE(&ikvm->access_tree, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM);
+	rwlock_init(&ikvm->access_tree_lock);
+
 	atomic_set(&ikvm->ev_seq, 0);
 
 	set_bit(KVMI_GET_VERSION, ikvm->cmd_allow_mask);
@@ -85,6 +179,12 @@ static bool alloc_kvmi(struct kvm *kvm, const struct kvm_introspection *qemu)
 
 	memcpy(&ikvm->uuid, &qemu->uuid, sizeof(ikvm->uuid));
 
+	ikvm->kptn_node.track_preread = kvmi_track_preread;
+	ikvm->kptn_node.track_prewrite = kvmi_track_prewrite;
+	ikvm->kptn_node.track_preexec = kvmi_track_preexec;
+	ikvm->kptn_node.track_create_slot = kvmi_track_create_slot;
+	ikvm->kptn_node.track_flush_slot = kvmi_track_flush_slot;
+
 	ikvm->kvm = kvm;
 	kvm->kvmi = ikvm;
 
@@ -276,6 +376,179 @@ void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu)
 	vcpu->kvmi = NULL;
 }
 
+static bool is_pf_of_interest(struct kvm_vcpu *vcpu, gpa_t gpa, u8 access)
+{
+	struct kvm *kvm = vcpu->kvm;
+
+	if (kvm_mmu_nested_pagefault(vcpu))
+		return false;
+
+	/* Have we shown interest in this page? */
+	return kvmi_restricted_access(IKVM(kvm), gpa, access);
+}
+
+static bool __kvmi_track_preread(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+	u8 *new, int bytes, struct kvm_page_track_notifier_node *node,
+	bool *data_ready)
+{
+	bool ret;
+
+	if (!is_pf_of_interest(vcpu, gpa, KVMI_PAGE_ACCESS_R))
+		return true;
+
+	ret = kvmi_arch_pf_event(vcpu, gpa, gva, KVMI_PAGE_ACCESS_R);
+
+	return ret;
+}
+
+static bool kvmi_track_preread(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+	u8 *new, int bytes, struct kvm_page_track_notifier_node *node,
+	bool *data_ready)
+{
+	struct kvmi *ikvm;
+	bool ret = true;
+
+	ikvm = kvmi_get(vcpu->kvm);
+	if (!ikvm)
+		return true;
+
+	if (is_event_enabled(vcpu, KVMI_EVENT_PF))
+		ret = __kvmi_track_preread(vcpu, gpa, gva, new, bytes, node,
+					   data_ready);
+
+	kvmi_put(vcpu->kvm);
+
+	return ret;
+}
+
+static bool __kvmi_track_prewrite(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+	const u8 *new, int bytes,
+	struct kvm_page_track_notifier_node *node)
+{
+	if (!is_pf_of_interest(vcpu, gpa, KVMI_PAGE_ACCESS_W))
+		return true;
+
+	return kvmi_arch_pf_event(vcpu, gpa, gva, KVMI_PAGE_ACCESS_W);
+}
+
+static bool kvmi_track_prewrite(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+	const u8 *new, int bytes,
+	struct kvm_page_track_notifier_node *node)
+{
+	struct kvmi *ikvm;
+	bool ret = true;
+
+	ikvm = kvmi_get(vcpu->kvm);
+	if (!ikvm)
+		return true;
+
+	if (is_event_enabled(vcpu, KVMI_EVENT_PF))
+		ret = __kvmi_track_prewrite(vcpu, gpa, gva, new, bytes, node);
+
+	kvmi_put(vcpu->kvm);
+
+	return ret;
+}
+
+static bool __kvmi_track_preexec(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+	struct kvm_page_track_notifier_node *node)
+{
+	if (!is_pf_of_interest(vcpu, gpa, KVMI_PAGE_ACCESS_X))
+		return true;
+
+	return kvmi_arch_pf_event(vcpu, gpa, gva, KVMI_PAGE_ACCESS_X);
+}
+
+static bool kvmi_track_preexec(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+	struct kvm_page_track_notifier_node *node)
+{
+	struct kvmi *ikvm;
+	bool ret = true;
+
+	ikvm = kvmi_get(vcpu->kvm);
+	if (!ikvm)
+		return true;
+
+	if (is_event_enabled(vcpu, KVMI_EVENT_PF))
+		ret = __kvmi_track_preexec(vcpu, gpa, gva, node);
+
+	kvmi_put(vcpu->kvm);
+
+	return ret;
+}
+
+static void kvmi_track_create_slot(struct kvm *kvm,
+	struct kvm_memory_slot *slot,
+	unsigned long npages,
+	struct kvm_page_track_notifier_node *node)
+{
+	struct kvmi *ikvm;
+	gfn_t start = slot->base_gfn;
+	const gfn_t end = start + npages;
+	int idx;
+
+	ikvm = kvmi_get(kvm);
+	if (!ikvm)
+		return;
+
+	idx = srcu_read_lock(&kvm->srcu);
+	spin_lock(&kvm->mmu_lock);
+	read_lock(&ikvm->access_tree_lock);
+
+	while (start < end) {
+		struct kvmi_mem_access *m;
+
+		m = __kvmi_get_gfn_access(ikvm, start);
+		if (m)
+			kvmi_arch_update_page_tracking(kvm, slot, m);
+		start++;
+	}
+
+	read_unlock(&ikvm->access_tree_lock);
+	spin_unlock(&kvm->mmu_lock);
+	srcu_read_unlock(&kvm->srcu, idx);
+
+	kvmi_put(kvm);
+}
+
+static void kvmi_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
+	struct kvm_page_track_notifier_node *node)
+{
+	struct kvmi *ikvm;
+	gfn_t start = slot->base_gfn;
+	const gfn_t end = start + slot->npages;
+	int idx;
+
+	ikvm = kvmi_get(kvm);
+	if (!ikvm)
+		return;
+
+	idx = srcu_read_lock(&kvm->srcu);
+	spin_lock(&kvm->mmu_lock);
+	write_lock(&ikvm->access_tree_lock);
+
+	while (start < end) {
+		struct kvmi_mem_access *m;
+
+		m = __kvmi_get_gfn_access(ikvm, start);
+		if (m) {
+			u8 prev_access = m->access;
+
+			m->access = full_access;
+			kvmi_arch_update_page_tracking(kvm, slot, m);
+			m->access = prev_access;
+		}
+
+		start++;
+	}
+
+	write_unlock(&ikvm->access_tree_lock);
+	spin_unlock(&kvm->mmu_lock);
+	srcu_read_unlock(&kvm->srcu, idx);
+
+	kvmi_put(kvm);
+}
+
 static void kvmi_end_introspection(struct kvmi *ikvm)
 {
 	struct kvm *kvm = ikvm->kvm;
@@ -290,6 +563,22 @@ static void kvmi_end_introspection(struct kvmi *ikvm)
 	 */
 	kvmi_abort_events(kvm);
 
+	/*
+	 * This may sleep on synchronize_srcu() so it's not allowed to be
+	 * called under kvmi_put().
+	 * Also synchronize_srcu() may deadlock on (page tracking) read-side
+	 * regions that are waiting for reply to events, so must be called
+	 * after kvmi_abort_events().
+	 */
+	kvm_page_track_unregister_notifier(kvm, &ikvm->kptn_node);
+
+	/*
+	 * This function uses kvm->mmu_lock so it's not allowed to be
+	 * called under kvmi_put(). It can reach a deadlock if called
+	 * from kvm_mmu_load -> kvmi_tracked_gfn -> kvmi_put.
+	 */
+	kvmi_clear_mem_access(kvm);
+
 	/*
 	 * At this moment the socket is shut down, no more commands will come
 	 * from the introspector, and the only way into the introspection is
@@ -351,6 +640,8 @@ int kvmi_hook(struct kvm *kvm, const struct kvm_introspection *qemu)
 		goto err_alloc;
 	}
 
+	kvm_page_track_register_notifier(kvm, &ikvm->kptn_node);
+
 	/*
 	 * Make sure all the KVM/KVMI structures are linked and no pointer
 	 * is read as NULL after the reference count has been set.
diff --git a/virt/kvm/kvmi_int.h b/virt/kvm/kvmi_int.h
index 7cff91bc1acc..d798908d0f70 100644
--- a/virt/kvm/kvmi_int.h
+++ b/virt/kvm/kvmi_int.h
@@ -6,6 +6,7 @@
 #include <linux/kvm_host.h>
 
 #include <uapi/linux/kvmi.h>
+#include <asm/kvmi_host.h>
 
 #define kvmi_debug(ikvm, fmt, ...) \
 	kvm_debug("%pU " fmt, &ikvm->uuid, ## __VA_ARGS__)
@@ -104,6 +105,10 @@ struct kvmi_vcpu {
 
 struct kvmi {
 	struct kvm *kvm;
+	struct kvm_page_track_notifier_node kptn_node;
+
+	struct radix_tree_root access_tree;
+	rwlock_t access_tree_lock;
 
 	struct socket *sock;
 	struct task_struct *recv;
@@ -118,6 +123,17 @@ struct kvmi {
 	bool cmd_reply_disabled;
 };
 
+struct kvmi_mem_access {
+	gfn_t gfn;
+	u8 access;
+	struct kvmi_arch_mem_access arch;
+};
+
+static inline bool is_event_enabled(struct kvm_vcpu *vcpu, int event)
+{
+	return false; /* TODO */
+}
+
 /* kvmi_msg.c */
 bool kvmi_sock_get(struct kvmi *ikvm, int fd);
 void kvmi_sock_shutdown(struct kvmi *ikvm);
@@ -138,7 +154,12 @@ int kvmi_add_job(struct kvm_vcpu *vcpu,
 		 void *ctx, void (*free_fct)(void *ctx));
 
 /* arch */
+void kvmi_arch_update_page_tracking(struct kvm *kvm,
+				    struct kvm_memory_slot *slot,
+				    struct kvmi_mem_access *m);
 void kvmi_arch_setup_event(struct kvm_vcpu *vcpu, struct kvmi_event *ev);
+bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
+			u8 access);
 int kvmi_arch_cmd_get_vcpu_info(struct kvm_vcpu *vcpu,
 				struct kvmi_get_vcpu_info_reply *rpl);
 


  parent reply	other threads:[~2019-08-09 16:02 UTC|newest]

Thread overview: 168+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-09 15:59 [RFC PATCH v6 00/92] VM introspection Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 01/92] kvm: introduce KVMI (VM introspection subsystem) Adalbert Lazăr
2019-08-12 20:20   ` Sean Christopherson
2019-08-13  9:11     ` Paolo Bonzini
2019-08-13 11:57     ` Adalbert Lazăr
     [not found]     ` <5d52a5ae.1c69fb81.5c260.1573SMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-13 12:09       ` Paolo Bonzini
2019-08-13 15:01         ` Sean Christopherson
2019-08-13 21:03           ` Paolo Bonzini
2019-08-14  9:48           ` Adalbert Lazăr
     [not found]           ` <5d53d8d1.1c69fb81.7d32.0bedSMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-14 10:37             ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 02/92] kvm: introspection: add basic ioctls (hook/unhook) Adalbert Lazăr
2019-08-13  8:44   ` Paolo Bonzini
2019-08-13 14:24     ` Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 03/92] kvm: introspection: add permission access ioctls Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 04/92] kvm: introspection: add the read/dispatch message function Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 05/92] kvm: introspection: add KVMI_GET_VERSION Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 06/92] kvm: introspection: add KVMI_CONTROL_CMD_RESPONSE Adalbert Lazăr
2019-08-13  9:15   ` Paolo Bonzini
2019-08-13 17:08     ` Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 07/92] kvm: introspection: honor the reply option when handling the KVMI_GET_VERSION command Adalbert Lazăr
2019-08-13  9:16   ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 08/92] kvm: introspection: add KVMI_CHECK_COMMAND and KVMI_CHECK_EVENT Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 09/92] kvm: introspection: add KVMI_GET_GUEST_INFO Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 10/92] kvm: introspection: add KVMI_CONTROL_VM_EVENTS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 11/92] kvm: introspection: add vCPU related data Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 12/92] kvm: introspection: add a jobs list to every introspected vCPU Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 13/92] kvm: introspection: make the vCPU wait even when its jobs list is empty Adalbert Lazăr
2019-08-13  8:43   ` Paolo Bonzini
2019-08-13 14:19     ` Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 14/92] kvm: introspection: handle introspection commands before returning to guest Adalbert Lazăr
2019-08-13  8:26   ` Paolo Bonzini
2019-08-13 13:54     ` Adalbert Lazăr
     [not found]     ` <5d52c10e.1c69fb81.26904.fd34SMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-13 14:45       ` Paolo Bonzini
2019-08-14  9:39         ` Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 15/92] kvm: introspection: handle vCPU related introspection commands Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 16/92] kvm: introspection: handle events and event replies Adalbert Lazăr
2019-08-13  8:55   ` Paolo Bonzini
2019-08-13 15:25     ` Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 17/92] kvm: introspection: introduce event actions Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 18/92] kvm: introspection: add KVMI_EVENT_UNHOOK Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 19/92] kvm: introspection: add KVMI_EVENT_CREATE_VCPU Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 20/92] kvm: introspection: add KVMI_GET_VCPU_INFO Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 21/92] kvm: page track: add track_create_slot() callback Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 22/92] kvm: x86: provide all page tracking hooks with the guest virtual address Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 23/92] kvm: page track: add support for preread, prewrite and preexec Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 24/92] kvm: x86: wire in the preread/prewrite/preexec page trackers Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 25/92] kvm: x86: intercept the write access on sidt and other emulated instructions Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 26/92] kvm: x86: add kvm_mmu_nested_pagefault() Adalbert Lazăr
2019-08-13  8:12   ` Paolo Bonzini
2019-08-09 15:59 ` Adalbert Lazăr [this message]
2019-08-13  9:06   ` [RFC PATCH v6 27/92] kvm: introspection: use page track Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 28/92] kvm: x86: consult the page tracking from kvm_mmu_get_page() and __direct_map() Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 29/92] kvm: introspection: add KVMI_CONTROL_EVENTS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 30/92] kvm: x86: add kvm_spt_fault() Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 31/92] kvm: introspection: add KVMI_EVENT_PF Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 32/92] kvm: introspection: add KVMI_GET_PAGE_ACCESS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 33/92] kvm: introspection: add KVMI_SET_PAGE_ACCESS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 34/92] Documentation: Introduce EPT based Subpage Protection Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 35/92] KVM: VMX: Add control flags for SPP enabling Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 36/92] KVM: VMX: Implement functions for SPPT paging setup Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 37/92] KVM: VMX: Introduce SPP access bitmap and operation functions Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 38/92] KVM: VMX: Add init/set/get functions for SPP Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 39/92] KVM: VMX: Introduce SPP user-space IOCTLs Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 40/92] KVM: VMX: Handle SPP induced vmexit and page fault Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 41/92] KVM: MMU: Enable Lazy mode SPPT setup Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 42/92] KVM: MMU: Handle host memory remapping and reclaim Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 43/92] kvm: introspection: add KVMI_CONTROL_SPP Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 44/92] kvm: introspection: extend the internal database of tracked pages with write_bitmap info Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 45/92] kvm: introspection: add KVMI_GET_PAGE_WRITE_BITMAP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 46/92] kvm: introspection: add KVMI_SET_PAGE_WRITE_BITMAP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 47/92] kvm: introspection: add KVMI_READ_PHYSICAL and KVMI_WRITE_PHYSICAL Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 48/92] kvm: add kvm_vcpu_kick_and_wait() Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 49/92] kvm: introspection: add KVMI_PAUSE_VCPU and KVMI_EVENT_PAUSE_VCPU Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 50/92] kvm: introspection: add KVMI_GET_REGISTERS Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 51/92] kvm: introspection: add KVMI_SET_REGISTERS Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 52/92] kvm: introspection: add KVMI_GET_CPUID Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 53/92] kvm: introspection: add KVMI_INJECT_EXCEPTION + KVMI_EVENT_TRAP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 54/92] kvm: introspection: add KVMI_CONTROL_CR and KVMI_EVENT_CR Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 55/92] kvm: introspection: add KVMI_CONTROL_MSR and KVMI_EVENT_MSR Adalbert Lazăr
2019-08-12 21:05   ` Sean Christopherson
2019-08-15  6:36     ` Nicusor CITU
2019-08-19 18:36       ` Sean Christopherson
2019-08-20  8:44         ` Nicusor CITU
2019-08-20 11:43           ` Mihai Donțu
2019-08-21 15:18             ` Sean Christopherson
2019-08-19 18:52   ` Sean Christopherson
2019-08-09 16:00 ` [RFC PATCH v6 56/92] kvm: x86: block any attempt to disable MSR interception if tracked by introspection Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 57/92] kvm: introspection: add KVMI_GET_XSAVE Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 58/92] kvm: introspection: add KVMI_GET_MTRR_TYPE Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 59/92] kvm: introspection: add KVMI_EVENT_XSETBV Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 60/92] kvm: x86: add kvm_arch_vcpu_set_guest_debug() Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 61/92] kvm: introspection: add KVMI_EVENT_BREAKPOINT Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 62/92] kvm: introspection: add KVMI_EVENT_HYPERCALL Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 63/92] kvm: introspection: add KVMI_EVENT_DESCRIPTOR Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 64/92] kvm: introspection: add single-stepping Adalbert Lazăr
2019-08-12 20:50   ` Sean Christopherson
2019-08-13 12:51     ` Adalbert Lazăr
2019-08-14 12:36     ` Nicusor CITU
2019-08-14 12:53       ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 65/92] kvm: introspection: add KVMI_EVENT_SINGLESTEP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 66/92] kvm: introspection: add custom input when single-stepping a vCPU Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 67/92] kvm: introspection: use single stepping on unimplemented instructions Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 68/92] kvm: x86: emulate a guest page table walk on SPT violations due to A/D bit updates Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 69/92] kvm: x86: keep the page protected if tracked by the introspection tool Adalbert Lazăr
2019-09-10 14:26   ` Konrad Rzeszutek Wilk
2019-09-10 16:28     ` Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 70/92] kvm: x86: filter out access rights only when " Adalbert Lazăr
2019-08-13  9:08   ` Paolo Bonzini
2019-08-13 16:06     ` Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 71/92] mm: add support for remote mapping Adalbert Lazăr
2019-08-09 16:24   ` DANGER WILL ROBINSON, DANGER Matthew Wilcox
2019-08-13  9:29     ` Paolo Bonzini
2019-08-13 11:24       ` Matthew Wilcox
2019-08-13 12:02         ` Paolo Bonzini
2019-08-13 11:01     ` Adalbert Lazăr
2019-08-15 19:19       ` Jerome Glisse
2019-08-15 20:16         ` Jerome Glisse
2019-08-16 17:45           ` Jason Gunthorpe
2019-08-23 12:39           ` Mircea CIRJALIU - MELIU
2019-09-05 18:09             ` Jerome Glisse
2019-09-09 17:00               ` Paolo Bonzini
2019-09-10  7:49                 ` Mircea CIRJALIU - MELIU
2019-10-02 19:27                   ` Jerome Glisse
2019-10-02 13:46                     ` Paolo Bonzini
2019-10-02 14:15                       ` Jerome Glisse
2019-10-02 16:18                         ` Paolo Bonzini
2019-10-02 17:04                           ` Jerome Glisse
2019-10-02 20:10                             ` Paolo Bonzini
2019-10-03 15:42                               ` Jerome Glisse
2019-10-03 15:50                                 ` Paolo Bonzini
2019-10-03 16:42                                   ` Mircea CIRJALIU - MELIU
2019-10-03 18:31                                     ` Jerome Glisse
2019-10-03 19:38                                       ` Paolo Bonzini
2019-10-04  9:41                                         ` Mircea CIRJALIU - MELIU
2019-10-04 11:46                                           ` Paolo Bonzini
2019-10-03 16:36                               ` Mircea CIRJALIU - MELIU
2019-08-09 16:00 ` [RFC PATCH v6 72/92] kvm: introspection: add memory map/unmap support on the guest side Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 73/92] kvm: introspection: use remote mapping Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 74/92] kvm: x86: do not unconditionally patch the hypercall instruction during emulation Adalbert Lazăr
2019-08-13  9:20   ` Paolo Bonzini
2019-08-14 12:07     ` Adalbert Lazăr
     [not found]     ` <5d53f965.1c69fb81.cd952.035bSMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-14 12:33       ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 75/92] kvm: x86: disable gpa_available optimization in emulator_read_write_onepage() Adalbert Lazăr
2019-08-13  8:47   ` Paolo Bonzini
2019-08-13 14:33     ` Adalbert Lazăr
     [not found]     ` <5d52ca22.1c69fb81.4ceb8.e90bSMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-13 14:35       ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 76/92] kvm: x86: disable EPT A/D bits if introspection is present Adalbert Lazăr
2019-08-13  9:18   ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 77/92] kvm: introspection: add trace functions Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 78/92] kvm: x86: add tracepoints for interrupt and exception injections Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 79/92] kvm: x86: emulate movsd xmm, m64 Adalbert Lazăr
2019-08-13  9:17   ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 80/92] kvm: x86: emulate movss xmm, m32 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 81/92] kvm: x86: emulate movq xmm, m64 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 82/92] kvm: x86: emulate movq r, xmm Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 83/92] kvm: x86: emulate movd xmm, m32 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 84/92] kvm: x86: enable the half part of movss, movsd, movups Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 85/92] kvm: x86: emulate lfence Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 86/92] kvm: x86: emulate xorpd xmm2/m128, xmm1 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 87/92] kvm: x86: emulate xorps xmm/m128, xmm Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 88/92] kvm: x86: emulate fst/fstp m64fp Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 89/92] kvm: x86: make lock cmpxchg r, r/m atomic Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 90/92] kvm: x86: emulate lock cmpxchg8b atomically Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 91/92] kvm: x86: emulate lock cmpxchg16b m128 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 92/92] kvm: x86: fallback to the single-step on multipage CMPXCHG emulation Adalbert Lazăr
2019-08-12 18:23 ` [RFC PATCH v6 00/92] VM introspection Sean Christopherson
2019-08-12 21:40 ` Sean Christopherson
2019-08-13  9:34 ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190809160047.8319-28-alazar@bitdefender.com \
    --to=alazar@bitdefender.com \
    --cc=Zhang@kvack.org \
    --cc=jan.kiszka@siemens.com \
    --cc=konrad.wilk@oracle.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=marian.c.rotariu@gmail.com \
    --cc=mathieu.tarral@protonmail.com \
    --cc=mdontu@bitdefender.com \
    --cc=ncitu@bitdefender.com \
    --cc=patrick.colp@oracle.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=samuel.lauren@iki.fi \
    --cc=stefanha@redhat.com \
    --cc=tamas@tklengyel.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=weijiang.yang@intel.com \
    --cc=yu.c.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).