From: "Adalbert Lazăr" <alazar@bitdefender.com>
To: kvm@vger.kernel.org
Cc: linux-mm@kvack.org, virtualization@lists.linux-foundation.org,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Radim Krčmář" <rkrcmar@redhat.com>,
"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
"Tamas K Lengyel" <tamas@tklengyel.com>,
"Mathieu Tarral" <mathieu.tarral@protonmail.com>,
"Samuel Laurén" <samuel.lauren@iki.fi>,
"Patrick Colp" <patrick.colp@oracle.com>,
"Jan Kiszka" <jan.kiszka@siemens.com>,
"Stefan Hajnoczi" <stefanha@redhat.com>,
"Weijiang Yang" <weijiang.yang@intel.com>,
Zhang@vger.kernel.org, "Yu C" <yu.c.zhang@intel.com>,
"Mihai Donțu" <mdontu@bitdefender.com>,
"Adalbert Lazăr" <alazar@bitdefender.com>,
"He Chen" <he.chen@linux.intel.com>,
"Zhang Yi" <yi.z.zhang@linux.intel.com>
Subject: [RFC PATCH v6 38/92] KVM: VMX: Add init/set/get functions for SPP
Date: Fri, 9 Aug 2019 18:59:53 +0300 [thread overview]
Message-ID: <20190809160047.8319-39-alazar@bitdefender.com> (raw)
In-Reply-To: <20190809160047.8319-1-alazar@bitdefender.com>
From: Yang Weijiang <weijiang.yang@intel.com>
init_spp() must be called before {get, set}_subpage
functions, it creates subpage access bitmaps for memory pages
and issues a KVM request to setup SPPT root pages.
kvm_mmu_set_subpages() is to enable SPP bit in EPT leaf page
and setup corresponding SPPT entries. The mmu_lock
is held before above operation. If it's called in EPT fault and
SPPT mis-config induced handler, mmu_lock is acquired outside
the function, otherwise, it's acquired inside it.
kvm_mmu_get_subpages() is used to query access bitmap for
protected page, it's also used in EPT fault handler to check
whether the fault EPT page is SPP protected as well.
Co-developed-by: He Chen <he.chen@linux.intel.com>
Signed-off-by: He Chen <he.chen@linux.intel.com>
Co-developed-by: Zhang Yi <yi.z.zhang@linux.intel.com>
Signed-off-by: Zhang Yi <yi.z.zhang@linux.intel.com>
Co-developed-by: Yang Weijiang <weijiang.yang@intel.com>
Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
Message-Id: <20190717133751.12910-6-weijiang.yang@intel.com>
Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
arch/x86/include/asm/kvm_host.h | 18 ++++
arch/x86/include/asm/vmx.h | 2 +
arch/x86/kvm/mmu.c | 160 ++++++++++++++++++++++++++++++++
arch/x86/kvm/vmx/vmx.c | 48 ++++++++++
arch/x86/kvm/x86.c | 57 ++++++++++++
include/linux/kvm_host.h | 3 +
include/uapi/linux/kvm.h | 9 ++
7 files changed, 297 insertions(+)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f0878631b12a..7ee6e1ff5ee9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -399,8 +399,13 @@ struct kvm_mmu {
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte);
+ int (*get_subpages)(struct kvm *kvm, struct kvm_subpage *spp_info);
+ int (*set_subpages)(struct kvm *kvm, struct kvm_subpage *spp_info);
+ int (*init_spp)(struct kvm *kvm);
+
hpa_t root_hpa;
gpa_t root_cr3;
+ hpa_t sppt_root;
union kvm_mmu_role mmu_role;
u8 root_level;
u8 shadow_root_level;
@@ -929,6 +934,8 @@ struct kvm_arch {
bool guest_can_read_msr_platform_info;
bool exception_payload_enabled;
+
+ bool spp_active;
};
struct kvm_vm_stat {
@@ -1202,6 +1209,11 @@ struct kvm_x86_ops {
int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version);
uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
+
+ bool (*get_spp_status)(void);
+ int (*get_subpages)(struct kvm *kvm, struct kvm_subpage *spp_info);
+ int (*set_subpages)(struct kvm *kvm, struct kvm_subpage *spp_info);
+ int (*init_spp)(struct kvm *kvm);
};
struct kvm_arch_async_pf {
@@ -1420,6 +1432,12 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
+int kvm_mmu_get_subpages(struct kvm *kvm, struct kvm_subpage *spp_info,
+ bool mmu_locked);
+int kvm_mmu_set_subpages(struct kvm *kvm, struct kvm_subpage *spp_info,
+ bool mmu_locked);
+int kvm_mmu_init_spp(struct kvm *kvm);
+
void kvm_enable_tdp(void);
void kvm_disable_tdp(void);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index a2c9e18e0ad7..6cb05ac07453 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -224,6 +224,8 @@ enum vmcs_field {
XSS_EXIT_BITMAP_HIGH = 0x0000202D,
ENCLS_EXITING_BITMAP = 0x0000202E,
ENCLS_EXITING_BITMAP_HIGH = 0x0000202F,
+ SPPT_POINTER = 0x00002030,
+ SPPT_POINTER_HIGH = 0x00002031,
TSC_MULTIPLIER = 0x00002032,
TSC_MULTIPLIER_HIGH = 0x00002033,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f2774bbcfeed..38e79210d010 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3846,6 +3846,9 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
(mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
mmu_free_root_page(vcpu->kvm, &mmu->root_hpa,
&invalid_list);
+ if (vcpu->kvm->arch.spp_active)
+ mmu_free_root_page(vcpu->kvm, &mmu->sppt_root,
+ &invalid_list);
} else {
for (i = 0; i < 4; ++i)
if (mmu->pae_root[i] != 0)
@@ -4510,6 +4513,158 @@ int kvm_mmu_setup_spp_structure(struct kvm_vcpu *vcpu,
return ret;
}
EXPORT_SYMBOL_GPL(kvm_mmu_setup_spp_structure);
+
+int kvm_mmu_init_spp(struct kvm *kvm)
+{
+ int i, ret;
+ struct kvm_vcpu *vcpu;
+ int root_level;
+ struct kvm_mmu_page *ssp_sp;
+
+
+ if (!kvm_x86_ops->get_spp_status())
+ return -ENODEV;
+
+ if (kvm->arch.spp_active)
+ return 0;
+
+ ret = kvm_subpage_create_bitmaps(kvm);
+
+ if (ret)
+ return ret;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ /* prepare caches for SPP setup.*/
+ mmu_topup_memory_caches(vcpu);
+ root_level = vcpu->arch.mmu->shadow_root_level;
+ ssp_sp = kvm_mmu_get_spp_page(vcpu, 0, root_level);
+ ++ssp_sp->root_count;
+ vcpu->arch.mmu->sppt_root = __pa(ssp_sp->spt);
+ kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
+ }
+
+ kvm->arch.spp_active = true;
+ return 0;
+}
+
+int kvm_mmu_get_subpages(struct kvm *kvm, struct kvm_subpage *spp_info,
+ bool mmu_locked)
+{
+ u32 *access = spp_info->access_map;
+ gfn_t gfn = spp_info->base_gfn;
+ int npages = spp_info->npages;
+ struct kvm_memory_slot *slot;
+ int i;
+ int ret;
+
+ if (!kvm->arch.spp_active)
+ return -ENODEV;
+
+ if (!mmu_locked)
+ spin_lock(&kvm->mmu_lock);
+
+ for (i = 0; i < npages; i++, gfn++) {
+ slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+ access[i] = *gfn_to_subpage_wp_info(slot, gfn);
+ }
+
+ ret = i;
+
+out_unlock:
+ if (!mmu_locked)
+ spin_unlock(&kvm->mmu_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_get_subpages);
+
+int kvm_mmu_set_subpages(struct kvm *kvm, struct kvm_subpage *spp_info,
+ bool mmu_locked)
+{
+ u32 *access = spp_info->access_map;
+ gfn_t gfn = spp_info->base_gfn;
+ int npages = spp_info->npages;
+ struct kvm_memory_slot *slot;
+ struct kvm_vcpu *vcpu;
+ struct kvm_rmap_head *rmap_head;
+ int i, k;
+ u32 *wp_map;
+ int ret = -EFAULT;
+
+ if (!kvm->arch.spp_active)
+ return -ENODEV;
+
+ if (!mmu_locked)
+ spin_lock(&kvm->mmu_lock);
+
+ for (i = 0; i < npages; i++, gfn++) {
+ slot = gfn_to_memslot(kvm, gfn);
+ if (!slot)
+ goto out_unlock;
+
+ /*
+ * check whether the target 4KB page exists in EPT leaf
+ * entries.If it's there, we can setup SPP protection now,
+ * otherwise, need to defer it to EPT page fault handler.
+ */
+ rmap_head = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot);
+
+ if (rmap_head->val) {
+ /*
+ * if all subpages are not writable, open SPP bit in
+ * EPT leaf entry to enable SPP protection for
+ * corresponding page.
+ */
+ if (access[i] != FULL_SPP_ACCESS) {
+ ret = kvm_mmu_open_subpage_write_protect(kvm,
+ slot, gfn);
+
+ if (ret)
+ goto out_err;
+
+ kvm_for_each_vcpu(k, vcpu, kvm)
+ kvm_mmu_setup_spp_structure(vcpu,
+ access[i], gfn);
+ } else {
+ ret = kvm_mmu_clear_subpage_write_protect(kvm,
+ slot, gfn);
+ if (ret)
+ goto out_err;
+ }
+
+ } else
+ pr_info("%s - No ETP entry, gfn = 0x%llx, access = 0x%x.\n", __func__, gfn, access[i]);
+
+ /* if this function is called in tdp_page_fault() or
+ * spp_handler(), mmu_locked = true, SPP access bitmap
+ * is being used, otherwise, it's being stored.
+ */
+ if (!mmu_locked) {
+ wp_map = gfn_to_subpage_wp_info(slot, gfn);
+ *wp_map = access[i];
+ }
+ }
+
+ ret = i;
+out_err:
+ if (ret < 0)
+ pr_info("SPP-Error, didn't get the gfn:" \
+ "%llx from EPT leaf.\n"
+ "Current we don't support SPP on" \
+ "huge page.\n"
+ "Please disable huge page and have" \
+ "another try.\n", gfn);
+out_unlock:
+ if (!mmu_locked)
+ spin_unlock(&kvm->mmu_lock);
+
+ return ret;
+}
+
static void nonpaging_init_context(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
@@ -5207,6 +5362,9 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->get_cr3 = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
+ context->get_subpages = kvm_x86_ops->get_subpages;
+ context->set_subpages = kvm_x86_ops->set_subpages;
+ context->init_spp = kvm_x86_ops->init_spp;
if (!is_paging(vcpu)) {
context->nx = false;
@@ -5403,6 +5561,8 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
uint i;
vcpu->arch.mmu->root_hpa = INVALID_PAGE;
+ if (!vcpu->kvm->arch.spp_active)
+ vcpu->arch.mmu->sppt_root = INVALID_PAGE;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f94e3defd9cf..a50dd2b9d438 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2853,11 +2853,17 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
return eptp;
}
+static inline u64 construct_spptp(unsigned long root_hpa)
+{
+ return root_hpa & PAGE_MASK;
+}
+
void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
struct kvm *kvm = vcpu->kvm;
unsigned long guest_cr3;
u64 eptp;
+ u64 spptp;
guest_cr3 = cr3;
if (enable_ept) {
@@ -2880,6 +2886,12 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
ept_load_pdptrs(vcpu);
}
+ if (kvm->arch.spp_active && VALID_PAGE(vcpu->arch.mmu->sppt_root)) {
+ spptp = construct_spptp(vcpu->arch.mmu->sppt_root);
+ vmcs_write64(SPPT_POINTER, spptp);
+ vmx_flush_tlb(vcpu, true);
+ }
+
vmcs_writel(GUEST_CR3, guest_cr3);
}
@@ -5743,6 +5755,9 @@ static void dump_vmcs(void)
pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
+ if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_SPP))
+ pr_err("SPPT pointer = 0x%016llx\n", vmcs_read64(SPPT_POINTER));
+
n = vmcs_read32(CR3_TARGET_COUNT);
for (i = 0; i + 1 < n; i += 4)
pr_err("CR3 target%u=%016lx target%u=%016lx\n",
@@ -7646,6 +7661,12 @@ static __init int hardware_setup(void)
kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
}
+ if (!spp_supported) {
+ kvm_x86_ops->get_subpages = NULL;
+ kvm_x86_ops->set_subpages = NULL;
+ kvm_x86_ops->init_spp = NULL;
+ }
+
if (!cpu_has_vmx_preemption_timer())
kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
@@ -7706,6 +7727,28 @@ static bool vmx_spt_fault(struct kvm_vcpu *vcpu)
return (vmx->exit_reason == EXIT_REASON_EPT_VIOLATION);
}
+static bool vmx_get_spp_status(void)
+{
+ return spp_supported;
+}
+
+static int vmx_get_subpages(struct kvm *kvm,
+ struct kvm_subpage *spp_info)
+{
+ return kvm_get_subpages(kvm, spp_info);
+}
+
+static int vmx_set_subpages(struct kvm *kvm,
+ struct kvm_subpage *spp_info)
+{
+ return kvm_set_subpages(kvm, spp_info);
+}
+
+static int vmx_init_spp(struct kvm *kvm)
+{
+ return kvm_init_spp(kvm);
+}
+
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -7856,6 +7899,11 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.set_nested_state = NULL,
.get_vmcs12_pages = NULL,
.nested_enable_evmcs = NULL,
+
+ .get_spp_status = vmx_get_spp_status,
+ .get_subpages = vmx_get_subpages,
+ .set_subpages = vmx_set_subpages,
+ .init_spp = vmx_init_spp,
};
static void vmx_cleanup_l1d_flush(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2ac1e0aba1fc..b8ae25cb227b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4576,6 +4576,61 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
return r;
}
+static int kvm_vm_ioctl_get_subpages(struct kvm *kvm,
+ struct kvm_subpage *spp_info)
+{
+ return kvm_arch_get_subpages(kvm, spp_info);
+}
+
+static int kvm_vm_ioctl_set_subpages(struct kvm *kvm,
+ struct kvm_subpage *spp_info)
+{
+ return kvm_arch_set_subpages(kvm, spp_info);
+}
+
+static int kvm_vm_ioctl_init_spp(struct kvm *kvm)
+{
+ return kvm_arch_init_spp(kvm);
+}
+
+int kvm_get_subpages(struct kvm *kvm,
+ struct kvm_subpage *spp_info)
+{
+ int ret;
+
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_mmu_get_subpages(kvm, spp_info, false);
+ mutex_unlock(&kvm->slots_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_get_subpages);
+
+int kvm_set_subpages(struct kvm *kvm,
+ struct kvm_subpage *spp_info)
+{
+ int ret;
+
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_mmu_set_subpages(kvm, spp_info, false);
+ mutex_unlock(&kvm->slots_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_set_subpages);
+
+int kvm_init_spp(struct kvm *kvm)
+{
+ int ret;
+
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_mmu_init_spp(kvm);
+ mutex_unlock(&kvm->slots_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_init_spp);
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -9352,6 +9407,8 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
}
kvm_page_track_free_memslot(free, dont);
+ if (kvm->arch.spp_active)
+ kvm_subpage_free_memslot(free, dont);
}
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ca7597e429df..0b9a0f546397 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -834,6 +834,9 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
struct kvm_mmu_page *kvm_mmu_get_spp_page(struct kvm_vcpu *vcpu,
gfn_t gfn, unsigned int level);
+int kvm_get_subpages(struct kvm *kvm, struct kvm_subpage *spp_info);
+int kvm_set_subpages(struct kvm *kvm, struct kvm_subpage *spp_info);
+int kvm_init_spp(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 2ff05fd123e3..ad8f2a3ca72d 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -102,6 +102,15 @@ struct kvm_userspace_memory_region {
__u64 userspace_addr; /* start of the userspace allocated memory */
};
+/* for KVM_SUBPAGES_GET_ACCESS and KVM_SUBPAGES_SET_ACCESS */
+#define SUBPAGE_MAX_BITMAP 64
+struct kvm_subpage {
+ __u64 base_gfn;
+ __u64 npages;
+ /* sub-page write-access bitmap array */
+ __u32 access_map[SUBPAGE_MAX_BITMAP];
+};
+
/*
* The bit 0 ~ bit 15 of kvm_memory_region::flags are visible for userspace,
* other bits are reserved for kvm internal use which are defined in
next prev parent reply other threads:[~2019-08-09 16:16 UTC|newest]
Thread overview: 158+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-09 15:59 [RFC PATCH v6 00/92] VM introspection Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 01/92] kvm: introduce KVMI (VM introspection subsystem) Adalbert Lazăr
2019-08-12 20:20 ` Sean Christopherson
2019-08-13 9:11 ` Paolo Bonzini
[not found] ` <5d52a5ae.1c69fb81.5c260.1573SMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-13 12:09 ` Paolo Bonzini
2019-08-13 15:01 ` Sean Christopherson
2019-08-13 21:03 ` Paolo Bonzini
[not found] ` <5d53d8d1.1c69fb81.7d32.0bedSMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-14 10:37 ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 02/92] kvm: introspection: add basic ioctls (hook/unhook) Adalbert Lazăr
2019-08-13 8:44 ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 03/92] kvm: introspection: add permission access ioctls Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 04/92] kvm: introspection: add the read/dispatch message function Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 05/92] kvm: introspection: add KVMI_GET_VERSION Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 06/92] kvm: introspection: add KVMI_CONTROL_CMD_RESPONSE Adalbert Lazăr
2019-08-13 9:15 ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 07/92] kvm: introspection: honor the reply option when handling the KVMI_GET_VERSION command Adalbert Lazăr
2019-08-13 9:16 ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 08/92] kvm: introspection: add KVMI_CHECK_COMMAND and KVMI_CHECK_EVENT Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 09/92] kvm: introspection: add KVMI_GET_GUEST_INFO Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 10/92] kvm: introspection: add KVMI_CONTROL_VM_EVENTS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 11/92] kvm: introspection: add vCPU related data Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 12/92] kvm: introspection: add a jobs list to every introspected vCPU Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 13/92] kvm: introspection: make the vCPU wait even when its jobs list is empty Adalbert Lazăr
2019-08-13 8:43 ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 14/92] kvm: introspection: handle introspection commands before returning to guest Adalbert Lazăr
2019-08-13 8:26 ` Paolo Bonzini
[not found] ` <5d52c10e.1c69fb81.26904.fd34SMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-13 14:45 ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 15/92] kvm: introspection: handle vCPU related introspection commands Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 16/92] kvm: introspection: handle events and event replies Adalbert Lazăr
2019-08-13 8:55 ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 17/92] kvm: introspection: introduce event actions Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 18/92] kvm: introspection: add KVMI_EVENT_UNHOOK Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 19/92] kvm: introspection: add KVMI_EVENT_CREATE_VCPU Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 20/92] kvm: introspection: add KVMI_GET_VCPU_INFO Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 21/92] kvm: page track: add track_create_slot() callback Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 22/92] kvm: x86: provide all page tracking hooks with the guest virtual address Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 23/92] kvm: page track: add support for preread, prewrite and preexec Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 24/92] kvm: x86: wire in the preread/prewrite/preexec page trackers Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 25/92] kvm: x86: intercept the write access on sidt and other emulated instructions Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 26/92] kvm: x86: add kvm_mmu_nested_pagefault() Adalbert Lazăr
2019-08-13 8:12 ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 27/92] kvm: introspection: use page track Adalbert Lazăr
2019-08-13 9:06 ` Paolo Bonzini
2019-08-09 15:59 ` [RFC PATCH v6 28/92] kvm: x86: consult the page tracking from kvm_mmu_get_page() and __direct_map() Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 29/92] kvm: introspection: add KVMI_CONTROL_EVENTS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 30/92] kvm: x86: add kvm_spt_fault() Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 31/92] kvm: introspection: add KVMI_EVENT_PF Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 32/92] kvm: introspection: add KVMI_GET_PAGE_ACCESS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 33/92] kvm: introspection: add KVMI_SET_PAGE_ACCESS Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 34/92] Documentation: Introduce EPT based Subpage Protection Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 35/92] KVM: VMX: Add control flags for SPP enabling Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 36/92] KVM: VMX: Implement functions for SPPT paging setup Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 37/92] KVM: VMX: Introduce SPP access bitmap and operation functions Adalbert Lazăr
2019-08-09 15:59 ` Adalbert Lazăr [this message]
2019-08-09 15:59 ` [RFC PATCH v6 39/92] KVM: VMX: Introduce SPP user-space IOCTLs Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 40/92] KVM: VMX: Handle SPP induced vmexit and page fault Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 41/92] KVM: MMU: Enable Lazy mode SPPT setup Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 42/92] KVM: MMU: Handle host memory remapping and reclaim Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 43/92] kvm: introspection: add KVMI_CONTROL_SPP Adalbert Lazăr
2019-08-09 15:59 ` [RFC PATCH v6 44/92] kvm: introspection: extend the internal database of tracked pages with write_bitmap info Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 45/92] kvm: introspection: add KVMI_GET_PAGE_WRITE_BITMAP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 46/92] kvm: introspection: add KVMI_SET_PAGE_WRITE_BITMAP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 47/92] kvm: introspection: add KVMI_READ_PHYSICAL and KVMI_WRITE_PHYSICAL Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 48/92] kvm: add kvm_vcpu_kick_and_wait() Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 49/92] kvm: introspection: add KVMI_PAUSE_VCPU and KVMI_EVENT_PAUSE_VCPU Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 50/92] kvm: introspection: add KVMI_GET_REGISTERS Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 51/92] kvm: introspection: add KVMI_SET_REGISTERS Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 52/92] kvm: introspection: add KVMI_GET_CPUID Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 53/92] kvm: introspection: add KVMI_INJECT_EXCEPTION + KVMI_EVENT_TRAP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 54/92] kvm: introspection: add KVMI_CONTROL_CR and KVMI_EVENT_CR Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 55/92] kvm: introspection: add KVMI_CONTROL_MSR and KVMI_EVENT_MSR Adalbert Lazăr
2019-08-12 21:05 ` Sean Christopherson
2019-08-15 6:36 ` Nicusor CITU
2019-08-19 18:36 ` Sean Christopherson
2019-08-20 8:44 ` Nicusor CITU
2019-08-20 11:43 ` Mihai Donțu
2019-08-21 15:18 ` Sean Christopherson
2019-08-19 18:52 ` Sean Christopherson
2019-08-09 16:00 ` [RFC PATCH v6 56/92] kvm: x86: block any attempt to disable MSR interception if tracked by introspection Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 57/92] kvm: introspection: add KVMI_GET_XSAVE Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 58/92] kvm: introspection: add KVMI_GET_MTRR_TYPE Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 59/92] kvm: introspection: add KVMI_EVENT_XSETBV Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 60/92] kvm: x86: add kvm_arch_vcpu_set_guest_debug() Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 61/92] kvm: introspection: add KVMI_EVENT_BREAKPOINT Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 62/92] kvm: introspection: add KVMI_EVENT_HYPERCALL Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 63/92] kvm: introspection: add KVMI_EVENT_DESCRIPTOR Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 64/92] kvm: introspection: add single-stepping Adalbert Lazăr
2019-08-12 20:50 ` Sean Christopherson
2019-08-14 12:36 ` Nicusor CITU
2019-08-14 12:53 ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 65/92] kvm: introspection: add KVMI_EVENT_SINGLESTEP Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 66/92] kvm: introspection: add custom input when single-stepping a vCPU Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 67/92] kvm: introspection: use single stepping on unimplemented instructions Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 68/92] kvm: x86: emulate a guest page table walk on SPT violations due to A/D bit updates Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 69/92] kvm: x86: keep the page protected if tracked by the introspection tool Adalbert Lazăr
2019-09-10 14:26 ` Konrad Rzeszutek Wilk
2019-09-10 16:28 ` Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 70/92] kvm: x86: filter out access rights only when " Adalbert Lazăr
2019-08-13 9:08 ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 71/92] mm: add support for remote mapping Adalbert Lazăr
2019-08-09 16:24 ` DANGER WILL ROBINSON, DANGER Matthew Wilcox
2019-08-13 9:29 ` Paolo Bonzini
2019-08-13 11:24 ` Matthew Wilcox
2019-08-13 12:02 ` Paolo Bonzini
[not found] ` <1565694095.D172a51.28640.@15f23d3a749365d981e968181cce585d2dcb3ffa>
2019-08-15 19:19 ` Jerome Glisse
2019-08-15 20:16 ` Jerome Glisse
2019-08-16 17:45 ` Jason Gunthorpe
2019-08-23 12:39 ` Mircea CIRJALIU - MELIU
2019-09-05 18:09 ` Jerome Glisse
2019-09-09 17:00 ` Paolo Bonzini
2019-09-10 7:49 ` Mircea CIRJALIU - MELIU
2019-10-02 19:27 ` Jerome Glisse
2019-10-02 13:46 ` Paolo Bonzini
2019-10-02 14:15 ` Jerome Glisse
2019-10-02 16:18 ` Paolo Bonzini
2019-10-02 17:04 ` Jerome Glisse
2019-10-02 20:10 ` Paolo Bonzini
2019-10-03 15:42 ` Jerome Glisse
2019-10-03 15:50 ` Paolo Bonzini
2019-10-03 16:42 ` Mircea CIRJALIU - MELIU
2019-10-03 18:31 ` Jerome Glisse
2019-10-03 19:38 ` Paolo Bonzini
2019-10-04 9:41 ` Mircea CIRJALIU - MELIU
2019-10-04 11:46 ` Paolo Bonzini
2019-10-03 16:36 ` Mircea CIRJALIU - MELIU
2019-08-09 16:00 ` [RFC PATCH v6 72/92] kvm: introspection: add memory map/unmap support on the guest side Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 73/92] kvm: introspection: use remote mapping Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 74/92] kvm: x86: do not unconditionally patch the hypercall instruction during emulation Adalbert Lazăr
2019-08-13 9:20 ` Paolo Bonzini
[not found] ` <5d53f965.1c69fb81.cd952.035bSMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-14 12:33 ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 75/92] kvm: x86: disable gpa_available optimization in emulator_read_write_onepage() Adalbert Lazăr
2019-08-13 8:47 ` Paolo Bonzini
[not found] ` <5d52ca22.1c69fb81.4ceb8.e90bSMTPIN_ADDED_BROKEN@mx.google.com>
2019-08-13 14:35 ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 76/92] kvm: x86: disable EPT A/D bits if introspection is present Adalbert Lazăr
2019-08-13 9:18 ` Paolo Bonzini
[not found] ` <0550f8d65bb97486e98d88255ea45d490da6b802.camel@bitdefender.com>
2019-08-13 21:05 ` Paolo Bonzini
2019-08-14 8:53 ` Mihai Donțu
2019-08-14 10:36 ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 77/92] kvm: introspection: add trace functions Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 78/92] kvm: x86: add tracepoints for interrupt and exception injections Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 79/92] kvm: x86: emulate movsd xmm, m64 Adalbert Lazăr
2019-08-13 9:17 ` Paolo Bonzini
2019-08-09 16:00 ` [RFC PATCH v6 80/92] kvm: x86: emulate movss xmm, m32 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 81/92] kvm: x86: emulate movq xmm, m64 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 82/92] kvm: x86: emulate movq r, xmm Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 83/92] kvm: x86: emulate movd xmm, m32 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 84/92] kvm: x86: enable the half part of movss, movsd, movups Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 85/92] kvm: x86: emulate lfence Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 86/92] kvm: x86: emulate xorpd xmm2/m128, xmm1 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 87/92] kvm: x86: emulate xorps xmm/m128, xmm Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 88/92] kvm: x86: emulate fst/fstp m64fp Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 89/92] kvm: x86: make lock cmpxchg r, r/m atomic Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 90/92] kvm: x86: emulate lock cmpxchg8b atomically Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 91/92] kvm: x86: emulate lock cmpxchg16b m128 Adalbert Lazăr
2019-08-09 16:00 ` [RFC PATCH v6 92/92] kvm: x86: fallback to the single-step on multipage CMPXCHG emulation Adalbert Lazăr
2019-08-12 18:23 ` [RFC PATCH v6 00/92] VM introspection Sean Christopherson
2019-08-12 21:40 ` Sean Christopherson
2019-08-13 9:34 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190809160047.8319-39-alazar@bitdefender.com \
--to=alazar@bitdefender.com \
--cc=Zhang@vger.kernel.org \
--cc=he.chen@linux.intel.com \
--cc=jan.kiszka@siemens.com \
--cc=konrad.wilk@oracle.com \
--cc=kvm@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mathieu.tarral@protonmail.com \
--cc=mdontu@bitdefender.com \
--cc=patrick.colp@oracle.com \
--cc=pbonzini@redhat.com \
--cc=rkrcmar@redhat.com \
--cc=samuel.lauren@iki.fi \
--cc=stefanha@redhat.com \
--cc=tamas@tklengyel.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=weijiang.yang@intel.com \
--cc=yi.z.zhang@linux.intel.com \
--cc=yu.c.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).