All of lore.kernel.org
 help / color / mirror / Atom feed
From: Razvan Cojocaru <rcojocaru@bitdefender.com>
To: xen-devel@lists.xen.org
Cc: kevin.tian@intel.com, tamas@tklengyel.com, wei.liu2@citrix.com,
	jun.nakajima@intel.com,
	Razvan Cojocaru <rcojocaru@bitdefender.com>,
	andrew.cooper3@citrix.com, ian.jackson@eu.citrix.com,
	jbeulich@suse.com, keir@xen.org
Subject: [PATCH V2] vm_event: Allow subscribing to write events for specific MSR-s
Date: Wed, 13 Apr 2016 08:11:20 +0300	[thread overview]
Message-ID: <1460524280-6902-1-git-send-email-rcojocaru@bitdefender.com> (raw)

Previously, subscribing to MSR write events was an all-or-none
approach, with special cases for introspection MSR-s. This patch
allows the vm_event consumer to specify exactly what MSR-s it is
interested in, and as a side-effect gets rid of the
vmx_introspection_force_enabled_msrs[] special case.
This replaces the previously posted "xen: Filter out MSR write
events" patch.

Signed-off-by: Razvan Cojocaru <rcojocaru@bitdefender.com>

---
Changes since V1:
 - Removed ARM stubs.
 - Corrected domain_unpause(d) omission.
 - Moved enable / disable and query functions from vm_event.c to
   monitor.c.
---
 tools/libxc/include/xenctrl.h      |  4 +-
 tools/libxc/xc_monitor.c           |  6 +--
 xen/arch/x86/hvm/event.c           |  3 +-
 xen/arch/x86/hvm/hvm.c             |  3 +-
 xen/arch/x86/hvm/vmx/vmcs.c        | 26 ++-----------
 xen/arch/x86/hvm/vmx/vmx.c         | 10 ++---
 xen/arch/x86/monitor.c             | 79 ++++++++++++++++++++++++++++++++------
 xen/arch/x86/vm_event.c            | 10 +++++
 xen/include/asm-x86/domain.h       |  4 +-
 xen/include/asm-x86/hvm/hvm.h      |  8 ++--
 xen/include/asm-x86/hvm/vmx/vmcs.h |  7 ----
 xen/include/asm-x86/monitor.h      |  2 +
 xen/include/public/domctl.h        |  3 +-
 13 files changed, 99 insertions(+), 66 deletions(-)

diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index f5a034a..9698d46 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -2183,8 +2183,8 @@ int xc_monitor_get_capabilities(xc_interface *xch, domid_t domain_id,
 int xc_monitor_write_ctrlreg(xc_interface *xch, domid_t domain_id,
                              uint16_t index, bool enable, bool sync,
                              bool onchangeonly);
-int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, bool enable,
-                          bool extended_capture);
+int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, uint32_t msr,
+                          bool enable);
 int xc_monitor_singlestep(xc_interface *xch, domid_t domain_id, bool enable);
 int xc_monitor_software_breakpoint(xc_interface *xch, domid_t domain_id,
                                    bool enable);
diff --git a/tools/libxc/xc_monitor.c b/tools/libxc/xc_monitor.c
index b1705dd..78131b2 100644
--- a/tools/libxc/xc_monitor.c
+++ b/tools/libxc/xc_monitor.c
@@ -86,8 +86,8 @@ int xc_monitor_write_ctrlreg(xc_interface *xch, domid_t domain_id,
     return do_domctl(xch, &domctl);
 }
 
-int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, bool enable,
-                          bool extended_capture)
+int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, uint32_t msr,
+                          bool enable)
 {
     DECLARE_DOMCTL;
 
@@ -96,7 +96,7 @@ int xc_monitor_mov_to_msr(xc_interface *xch, domid_t domain_id, bool enable,
     domctl.u.monitor_op.op = enable ? XEN_DOMCTL_MONITOR_OP_ENABLE
                                     : XEN_DOMCTL_MONITOR_OP_DISABLE;
     domctl.u.monitor_op.event = XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR;
-    domctl.u.monitor_op.u.mov_to_msr.extended_capture = extended_capture;
+    domctl.u.monitor_op.u.mov_to_msr.msr = msr;
 
     return do_domctl(xch, &domctl);
 }
diff --git a/xen/arch/x86/hvm/event.c b/xen/arch/x86/hvm/event.c
index 56c5514..015910b 100644
--- a/xen/arch/x86/hvm/event.c
+++ b/xen/arch/x86/hvm/event.c
@@ -57,9 +57,8 @@ bool_t hvm_event_cr(unsigned int index, unsigned long value, unsigned long old)
 void hvm_event_msr(unsigned int msr, uint64_t value)
 {
     struct vcpu *curr = current;
-    struct arch_domain *ad = &curr->domain->arch;
 
-    if ( ad->monitor.mov_to_msr_enabled )
+    if ( arch_monitor_is_msr_enabled(curr->domain, msr) )
     {
         vm_event_request_t req = {
             .reason = VM_EVENT_REASON_MOV_TO_MSR,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f24126d..7c2a98c 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3695,7 +3695,6 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
     bool_t mtrr;
     unsigned int edx, index;
     int ret = X86EMUL_OKAY;
-    struct arch_domain *currad = &current->domain->arch;
 
     HVMTRACE_3D(MSR_WRITE, msr,
                (uint32_t)msr_content, (uint32_t)(msr_content >> 32));
@@ -3703,7 +3702,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
     hvm_cpuid(1, NULL, NULL, NULL, &edx);
     mtrr = !!(edx & cpufeat_mask(X86_FEATURE_MTRR));
 
-    if ( may_defer && unlikely(currad->monitor.mov_to_msr_enabled) )
+    if ( may_defer && unlikely(arch_monitor_is_msr_enabled(v->domain, msr)) )
     {
         ASSERT(v->arch.vm_event);
 
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 8284281..f92f4b8 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -37,6 +37,7 @@
 #include <asm/hvm/vmx/vvmx.h>
 #include <asm/hvm/vmx/vmcs.h>
 #include <asm/flushtlb.h>
+#include <asm/monitor.h>
 #include <asm/shadow.h>
 #include <asm/tboot.h>
 #include <asm/apic.h>
@@ -108,18 +109,6 @@ u64 vmx_ept_vpid_cap __read_mostly;
 u64 vmx_vmfunc __read_mostly;
 bool_t vmx_virt_exception __read_mostly;
 
-const u32 vmx_introspection_force_enabled_msrs[] = {
-    MSR_IA32_SYSENTER_EIP,
-    MSR_IA32_SYSENTER_ESP,
-    MSR_IA32_SYSENTER_CS,
-    MSR_IA32_MC0_CTL,
-    MSR_STAR,
-    MSR_LSTAR
-};
-
-const unsigned int vmx_introspection_force_enabled_msrs_size =
-    ARRAY_SIZE(vmx_introspection_force_enabled_msrs);
-
 static DEFINE_PER_CPU_READ_MOSTLY(paddr_t, vmxon_region);
 static DEFINE_PER_CPU(paddr_t, current_vmcs);
 static DEFINE_PER_CPU(struct list_head, active_vmcs_list);
@@ -810,17 +799,8 @@ void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type)
     if ( msr_bitmap == NULL )
         return;
 
-    if ( unlikely(d->arch.monitor.mov_to_msr_enabled &&
-                  d->arch.monitor.mov_to_msr_extended) &&
-         vm_event_check_ring(&d->vm_event->monitor) )
-    {
-        unsigned int i;
-
-        /* Filter out MSR-s needed for memory introspection */
-        for ( i = 0; i < vmx_introspection_force_enabled_msrs_size; i++ )
-            if ( msr == vmx_introspection_force_enabled_msrs[i] )
-                return;
-    }
+    if ( unlikely(arch_monitor_is_msr_enabled(d, msr)) )
+        return;
 
     /*
      * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index bc4410f..9135441 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1958,16 +1958,12 @@ void vmx_hypervisor_cpuid_leaf(uint32_t sub_idx,
         *eax |= XEN_HVM_CPUID_X2APIC_VIRT;
 }
 
-static void vmx_enable_msr_exit_interception(struct domain *d)
+static void vmx_enable_msr_interception(struct domain *d, uint32_t msr)
 {
     struct vcpu *v;
-    unsigned int i;
 
-    /* Enable interception for MSRs needed for memory introspection. */
     for_each_vcpu ( d, v )
-        for ( i = 0; i < vmx_introspection_force_enabled_msrs_size; i++ )
-            vmx_enable_intercept_for_msr(v, vmx_introspection_force_enabled_msrs[i],
-                                         MSR_TYPE_W);
+        vmx_enable_intercept_for_msr(v, msr, MSR_TYPE_W);
 }
 
 static bool_t vmx_is_singlestep_supported(void)
@@ -2166,7 +2162,7 @@ static struct hvm_function_table __initdata vmx_function_table = {
     .handle_eoi           = vmx_handle_eoi,
     .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
     .hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf,
-    .enable_msr_exit_interception = vmx_enable_msr_exit_interception,
+    .enable_msr_interception = vmx_enable_msr_interception,
     .is_singlestep_supported = vmx_is_singlestep_supported,
     .set_mode = vmx_set_mode,
     .altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
index 1fec412..4c96968 100644
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -22,6 +22,58 @@
 #include <asm/monitor.h>
 #include <public/vm_event.h>
 
+static int arch_monitor_enable_msr(struct domain *d, u32 msr)
+{
+    if ( !d->arch.monitor_msr_bitmap )
+        return -EINVAL;
+
+    if ( msr <= 0x1fff )
+        set_bit(msr, d->arch.monitor_msr_bitmap + 0x000/BYTES_PER_LONG);
+    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+    {
+        msr &= 0x1fff;
+        set_bit(msr, d->arch.monitor_msr_bitmap + 0x400/BYTES_PER_LONG);
+    }
+
+    hvm_enable_msr_interception(d, msr);
+
+    return 0;
+}
+
+static int arch_monitor_disable_msr(struct domain *d, u32 msr)
+{
+    if ( !d->arch.monitor_msr_bitmap )
+        return -EINVAL;
+
+    if ( msr <= 0x1fff )
+        clear_bit(msr, d->arch.monitor_msr_bitmap + 0x000/BYTES_PER_LONG);
+    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+    {
+        msr &= 0x1fff;
+        clear_bit(msr, d->arch.monitor_msr_bitmap + 0x400/BYTES_PER_LONG);
+    }
+
+    return 0;
+}
+
+bool_t arch_monitor_is_msr_enabled(const struct domain *d, u32 msr)
+{
+    bool_t rc = 0;
+
+    if ( !d->arch.monitor_msr_bitmap )
+        return 0;
+
+    if ( msr <= 0x1fff )
+        rc = test_bit(msr, d->arch.monitor_msr_bitmap + 0x000/BYTES_PER_LONG);
+    else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+    {
+        msr &= 0x1fff;
+        rc = test_bit(msr, d->arch.monitor_msr_bitmap + 0x400/BYTES_PER_LONG);
+    }
+
+    return rc;
+}
+
 int arch_monitor_domctl_event(struct domain *d,
                               struct xen_domctl_monitor_op *mop)
 {
@@ -77,25 +129,28 @@ int arch_monitor_domctl_event(struct domain *d,
 
     case XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR:
     {
-        bool_t old_status = ad->monitor.mov_to_msr_enabled;
+        bool_t old_status;
+        int rc;
+        u32 msr = mop->u.mov_to_msr.msr;
 
-        if ( unlikely(old_status == requested_status) )
-            return -EEXIST;
+        domain_pause(d);
 
-        if ( requested_status && mop->u.mov_to_msr.extended_capture &&
-             !hvm_enable_msr_exit_interception(d) )
-            return -EOPNOTSUPP;
+        old_status = arch_monitor_is_msr_enabled(d, msr);
 
-        domain_pause(d);
+        if ( unlikely(old_status == requested_status) )
+        {
+            domain_unpause(d);
+            return -EEXIST;
+        }
 
-        if ( requested_status && mop->u.mov_to_msr.extended_capture )
-            ad->monitor.mov_to_msr_extended = 1;
+        if ( requested_status )
+            rc = arch_monitor_enable_msr(d, msr);
         else
-            ad->monitor.mov_to_msr_extended = 0;
+            rc = arch_monitor_disable_msr(d, msr);
 
-        ad->monitor.mov_to_msr_enabled = requested_status;
         domain_unpause(d);
-        break;
+
+        return rc;
     }
 
     case XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP:
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index 5635603..9b4267e 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -27,6 +27,13 @@ int vm_event_init_domain(struct domain *d)
 {
     struct vcpu *v;
 
+    d->arch.monitor_msr_bitmap = alloc_xenheap_page();
+
+    if ( !d->arch.monitor_msr_bitmap )
+        return -ENOMEM;
+
+    memset(d->arch.monitor_msr_bitmap, 0, PAGE_SIZE);
+
     for_each_vcpu ( d, v )
     {
         if ( v->arch.vm_event )
@@ -55,6 +62,9 @@ void vm_event_cleanup_domain(struct domain *d)
         v->arch.vm_event = NULL;
     }
 
+    free_xenheap_page(d->arch.monitor_msr_bitmap);
+    d->arch.monitor_msr_bitmap = NULL;
+
     d->arch.mem_access_emulate_each_rep = 0;
     memset(&d->arch.monitor, 0, sizeof(d->arch.monitor));
     memset(&d->monitor, 0, sizeof(d->monitor));
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index d393ed2..d8d91c2 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -398,12 +398,12 @@ struct arch_domain
         unsigned int write_ctrlreg_enabled       : 4;
         unsigned int write_ctrlreg_sync          : 4;
         unsigned int write_ctrlreg_onchangeonly  : 4;
-        unsigned int mov_to_msr_enabled          : 1;
-        unsigned int mov_to_msr_extended         : 1;
         unsigned int singlestep_enabled          : 1;
         unsigned int software_breakpoint_enabled : 1;
     } monitor;
 
+    unsigned long *monitor_msr_bitmap;
+
     /* Mem_access emulation control */
     bool_t mem_access_emulate_each_rep;
 
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 7b7ff3f..9d1c0ef 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -211,7 +211,7 @@ struct hvm_function_table {
                                   uint32_t *eax, uint32_t *ebx,
                                   uint32_t *ecx, uint32_t *edx);
 
-    void (*enable_msr_exit_interception)(struct domain *d);
+    void (*enable_msr_interception)(struct domain *d, uint32_t msr);
     bool_t (*is_singlestep_supported)(void);
     int (*set_mode)(struct vcpu *v, int mode);
 
@@ -565,11 +565,11 @@ static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
     return hvm_funcs.nhvm_intr_blocked(v);
 }
 
-static inline bool_t hvm_enable_msr_exit_interception(struct domain *d)
+static inline bool_t hvm_enable_msr_interception(struct domain *d, uint32_t msr)
 {
-    if ( hvm_funcs.enable_msr_exit_interception )
+    if ( hvm_funcs.enable_msr_interception )
     {
-        hvm_funcs.enable_msr_exit_interception(d);
+        hvm_funcs.enable_msr_interception(d, msr);
         return 1;
     }
 
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index b54f52f..7bf5326 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -562,13 +562,6 @@ enum vmcs_field {
     HOST_RIP                        = 0x00006c16,
 };
 
-/*
- * A set of MSR-s that need to be enabled for memory introspection
- * to work.
- */
-extern const u32 vmx_introspection_force_enabled_msrs[];
-extern const unsigned int vmx_introspection_force_enabled_msrs_size;
-
 #define VMCS_VPID_WIDTH 16
 
 #define MSR_TYPE_R 1
diff --git a/xen/include/asm-x86/monitor.h b/xen/include/asm-x86/monitor.h
index 0954b59..74e5b1b 100644
--- a/xen/include/asm-x86/monitor.h
+++ b/xen/include/asm-x86/monitor.h
@@ -50,4 +50,6 @@ int arch_monitor_domctl_op(struct domain *d, struct xen_domctl_monitor_op *mop)
 int arch_monitor_domctl_event(struct domain *d,
                               struct xen_domctl_monitor_op *mop);
 
+bool_t arch_monitor_is_msr_enabled(const struct domain *d, u32 msr);
+
 #endif /* __ASM_X86_MONITOR_H__ */
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 2457698..875c09a 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1107,8 +1107,7 @@ struct xen_domctl_monitor_op {
         } mov_to_cr;
 
         struct {
-            /* Enable the capture of an extended set of MSRs */
-            uint8_t extended_capture;
+            uint32_t msr;
         } mov_to_msr;
 
         struct {
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

             reply	other threads:[~2016-04-13  5:11 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-13  5:11 Razvan Cojocaru [this message]
2016-04-13  9:47 ` [PATCH V2] vm_event: Allow subscribing to write events for specific MSR-s Konrad Rzeszutek Wilk
2016-04-13 10:07   ` Andrew Cooper
2016-04-13 11:57   ` Razvan Cojocaru
2016-04-13 14:52     ` Tamas K Lengyel
2016-04-13 14:56       ` Razvan Cojocaru
2016-04-13 15:01         ` Andrew Cooper
2016-04-13 15:05           ` Tamas K Lengyel
2016-04-14  9:37             ` Razvan Cojocaru
2016-04-14 15:20               ` Jan Beulich
2016-04-14 15:33                 ` Tamas K Lengyel
2016-04-14 15:37                   ` Razvan Cojocaru
2016-04-13 14:50 ` Tamas K Lengyel
2016-04-13 14:52   ` Razvan Cojocaru

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1460524280-6902-1-git-send-email-rcojocaru@bitdefender.com \
    --to=rcojocaru@bitdefender.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=tamas@tklengyel.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.