All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Michał Leszczyński" <michal.leszczynski@cert.pl>
To: xen-devel@lists.xenproject.org
Cc: "Kevin Tian" <kevin.tian@intel.com>,
	tamas.lengyel@intel.com, "Jun Nakajima" <jun.nakajima@intel.com>,
	"Wei Liu" <wl@xen.org>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Michal Leszczynski" <michal.leszczynski@cert.pl>,
	"Jan Beulich" <jbeulich@suse.com>,
	luwei.kang@intel.com, "Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH v4 04/10] x86/vmx: implement processor tracing for VMX
Date: Tue, 30 Jun 2020 14:33:47 +0200	[thread overview]
Message-ID: <70df90dad7e759f4bb3dba405dc45e372a57fab7.1593519420.git.michal.leszczynski@cert.pl> (raw)
In-Reply-To: <cover.1593519420.git.michal.leszczynski@cert.pl>
In-Reply-To: <cover.1593519420.git.michal.leszczynski@cert.pl>

From: Michal Leszczynski <michal.leszczynski@cert.pl>

Use Intel Processor Trace feature in order to
provision vmtrace_pt_* features.

Signed-off-by: Michal Leszczynski <michal.leszczynski@cert.pl>
---
 xen/arch/x86/hvm/vmx/vmx.c         | 89 ++++++++++++++++++++++++++++++
 xen/include/asm-x86/hvm/hvm.h      | 38 +++++++++++++
 xen/include/asm-x86/hvm/vmx/vmcs.h |  3 +
 xen/include/asm-x86/hvm/vmx/vmx.h  | 14 +++++
 4 files changed, 144 insertions(+)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index ab19d9424e..db3f051b40 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -508,11 +508,24 @@ static void vmx_restore_host_msrs(void)
 
 static void vmx_save_guest_msrs(struct vcpu *v)
 {
+    uint64_t rtit_ctl;
+
     /*
      * We cannot cache SHADOW_GS_BASE while the VCPU runs, as it can
      * be updated at any time via SWAPGS, which we cannot trap.
      */
     v->arch.hvm.vmx.shadow_gs = rdgsshadow();
+
+    if ( unlikely(v->arch.hvm.vmx.pt_state &&
+                  v->arch.hvm.vmx.pt_state->active) )
+    {
+        rdmsrl(MSR_RTIT_CTL, rtit_ctl);
+        BUG_ON(rtit_ctl & RTIT_CTL_TRACEEN);
+
+        rdmsrl(MSR_RTIT_STATUS, v->arch.hvm.vmx.pt_state->status);
+        rdmsrl(MSR_RTIT_OUTPUT_MASK,
+               v->arch.hvm.vmx.pt_state->output_mask.raw);
+    }
 }
 
 static void vmx_restore_guest_msrs(struct vcpu *v)
@@ -524,6 +537,17 @@ static void vmx_restore_guest_msrs(struct vcpu *v)
 
     if ( cpu_has_msr_tsc_aux )
         wrmsr_tsc_aux(v->arch.msrs->tsc_aux);
+
+    if ( unlikely(v->arch.hvm.vmx.pt_state &&
+                  v->arch.hvm.vmx.pt_state->active) )
+    {
+        wrmsrl(MSR_RTIT_OUTPUT_BASE,
+               v->arch.hvm.vmx.pt_state->output_base);
+        wrmsrl(MSR_RTIT_OUTPUT_MASK,
+               v->arch.hvm.vmx.pt_state->output_mask.raw);
+        wrmsrl(MSR_RTIT_STATUS,
+               v->arch.hvm.vmx.pt_state->status);
+    }
 }
 
 void vmx_update_cpu_exec_control(struct vcpu *v)
@@ -2240,6 +2264,60 @@ static bool vmx_get_pending_event(struct vcpu *v, struct x86_event *info)
     return true;
 }
 
+static int vmx_init_pt(struct vcpu *v)
+{
+    v->arch.hvm.vmx.pt_state = xzalloc(struct pt_state);
+
+    if ( !v->arch.hvm.vmx.pt_state )
+        return -EFAULT;
+
+    if ( !v->arch.vmtrace.pt_buf )
+        return -EINVAL;
+
+    if ( !v->domain->vmtrace_pt_size )
+	return -EINVAL;
+
+    v->arch.hvm.vmx.pt_state->output_base = page_to_maddr(v->arch.vmtrace.pt_buf);
+    v->arch.hvm.vmx.pt_state->output_mask.raw = v->domain->vmtrace_pt_size - 1;
+
+    if ( vmx_add_host_load_msr(v, MSR_RTIT_CTL, 0) )
+        return -EFAULT;
+
+    if ( vmx_add_guest_msr(v, MSR_RTIT_CTL,
+                              RTIT_CTL_TRACEEN | RTIT_CTL_OS |
+                              RTIT_CTL_USR | RTIT_CTL_BRANCH_EN) )
+        return -EFAULT;
+
+    return 0;
+}
+
+static int vmx_destroy_pt(struct vcpu* v)
+{
+    if ( v->arch.hvm.vmx.pt_state )
+        xfree(v->arch.hvm.vmx.pt_state);
+
+    v->arch.hvm.vmx.pt_state = NULL;
+    return 0;
+}
+
+static int vmx_control_pt(struct vcpu *v, bool_t enable)
+{
+    if ( !v->arch.hvm.vmx.pt_state )
+        return -EINVAL;
+
+    v->arch.hvm.vmx.pt_state->active = enable;
+    return 0;
+}
+
+static int vmx_get_pt_offset(struct vcpu *v, uint64_t *offset)
+{
+    if ( !v->arch.hvm.vmx.pt_state )
+        return -EINVAL;
+
+    *offset = v->arch.hvm.vmx.pt_state->output_mask.offset;
+    return 0;
+}
+
 static struct hvm_function_table __initdata vmx_function_table = {
     .name                 = "VMX",
     .cpu_up_prepare       = vmx_cpu_up_prepare,
@@ -2295,6 +2373,10 @@ static struct hvm_function_table __initdata vmx_function_table = {
     .altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
     .altp2m_vcpu_emulate_ve = vmx_vcpu_emulate_ve,
     .altp2m_vcpu_emulate_vmfunc = vmx_vcpu_emulate_vmfunc,
+    .vmtrace_init_pt = vmx_init_pt,
+    .vmtrace_destroy_pt = vmx_destroy_pt,
+    .vmtrace_control_pt = vmx_control_pt,
+    .vmtrace_get_pt_offset = vmx_get_pt_offset,
     .tsc_scaling = {
         .max_ratio = VMX_TSC_MULTIPLIER_MAX,
     },
@@ -3674,6 +3756,13 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
 
     hvm_invalidate_regs_fields(regs);
 
+    if ( unlikely(v->arch.hvm.vmx.pt_state &&
+                  v->arch.hvm.vmx.pt_state->active) )
+    {
+        rdmsrl(MSR_RTIT_OUTPUT_MASK,
+               v->arch.hvm.vmx.pt_state->output_mask.raw);
+    }
+
     if ( paging_mode_hap(v->domain) )
     {
         /*
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 1eb377dd82..8f194889e5 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -214,6 +214,12 @@ struct hvm_function_table {
     bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
     int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs);
 
+    /* vmtrace */
+    int (*vmtrace_init_pt)(struct vcpu *v);
+    int (*vmtrace_destroy_pt)(struct vcpu *v);
+    int (*vmtrace_control_pt)(struct vcpu *v, bool_t enable);
+    int (*vmtrace_get_pt_offset)(struct vcpu *v, uint64_t *offset);
+
     /*
      * Parameters and callbacks for hardware-assisted TSC scaling,
      * which are valid only when the hardware feature is available.
@@ -655,6 +661,38 @@ static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
     return false;
 }
 
+static inline int vmtrace_init_pt(struct vcpu *v)
+{
+    if ( hvm_funcs.vmtrace_init_pt )
+        return hvm_funcs.vmtrace_init_pt(v);
+
+    return -EOPNOTSUPP;
+}
+
+static inline int vmtrace_destroy_pt(struct vcpu *v)
+{
+    if ( hvm_funcs.vmtrace_destroy_pt )
+        return hvm_funcs.vmtrace_destroy_pt(v);
+
+    return -EOPNOTSUPP;
+}
+
+static inline int vmtrace_control_pt(struct vcpu *v, bool_t enable)
+{
+    if ( hvm_funcs.vmtrace_control_pt )
+        return hvm_funcs.vmtrace_control_pt(v, enable);
+
+    return -EOPNOTSUPP;
+}
+
+static inline int vmtrace_get_pt_offset(struct vcpu *v, uint64_t *offset)
+{
+    if ( hvm_funcs.vmtrace_get_pt_offset )
+        return hvm_funcs.vmtrace_get_pt_offset(v, offset);
+
+    return -EOPNOTSUPP;
+}
+
 /*
  * This must be defined as a macro instead of an inline function,
  * because it uses 'struct vcpu' and 'struct domain' which have
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 0e9a0b8de6..64c0d82614 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -186,6 +186,9 @@ struct vmx_vcpu {
      * pCPU and wakeup the related vCPU.
      */
     struct pi_blocking_vcpu pi_blocking;
+
+    /* State of processor trace feature */
+    struct pt_state      *pt_state;
 };
 
 int vmx_create_vmcs(struct vcpu *v);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 111ccd7e61..be7213d3c0 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -689,4 +689,18 @@ typedef union ldt_or_tr_instr_info {
     };
 } ldt_or_tr_instr_info_t;
 
+/* Processor Trace state per vCPU */
+struct pt_state {
+    bool_t active;
+    uint64_t status;
+    uint64_t output_base;
+    union {
+        uint64_t raw;
+        struct {
+            uint32_t size;
+            uint32_t offset;
+        };
+    } output_mask;
+};
+
 #endif /* __ASM_X86_HVM_VMX_VMX_H__ */
-- 
2.20.1



  parent reply	other threads:[~2020-06-30 12:35 UTC|newest]

Thread overview: 75+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-30 12:33 [PATCH v4 00/10] Implement support for external IPT monitoring Michał Leszczyński
2020-06-30 12:33 ` [PATCH v4 01/10] x86/vmx: add Intel PT MSR definitions Michał Leszczyński
2020-06-30 16:23   ` Jan Beulich
2020-06-30 17:37   ` Andrew Cooper
2020-06-30 18:03     ` Tamas K Lengyel
2020-06-30 18:27       ` Michał Leszczyński
2020-07-01 17:52   ` Andrew Cooper
2020-06-30 12:33 ` [PATCH v4 02/10] x86/vmx: add IPT cpu feature Michał Leszczyński
2020-07-01  9:49   ` Roger Pau Monné
2020-07-01 15:12   ` Julien Grall
2020-07-01 16:06     ` Andrew Cooper
2020-07-01 16:17       ` Julien Grall
2020-07-01 16:18         ` Julien Grall
2020-07-01 17:26           ` Andrew Cooper
2020-07-01 18:02             ` Julien Grall
2020-07-01 18:06               ` Andrew Cooper
2020-07-01 18:09                 ` Julien Grall
2020-07-02  8:29                   ` Jan Beulich
2020-07-02  8:42                     ` Julien Grall
2020-07-02  8:50                       ` Jan Beulich
2020-07-02  8:54                         ` Julien Grall
2020-07-02  9:18                           ` Jan Beulich
2020-07-02  9:57                             ` Julien Grall
2020-07-02 13:30                               ` Jan Beulich
2020-07-02 14:14                                 ` Julien Grall
2020-07-02 14:17                                   ` Jan Beulich
2020-07-02 14:31                                     ` Julien Grall
2020-07-02 20:28                                       ` Michał Leszczyński
2020-07-03  7:58                                         ` Julien Grall
2020-07-04 19:16                                           ` Michał Leszczyński
2020-07-01 21:42   ` Andrew Cooper
2020-07-02  8:10     ` Roger Pau Monné
2020-07-02  8:34       ` Jan Beulich
2020-07-02 20:29         ` Michał Leszczyński
2020-06-30 12:33 ` [PATCH v4 03/10] tools/libxl: add vmtrace_pt_size parameter Michał Leszczyński
2020-07-01 10:05   ` Roger Pau Monné
2020-07-02  9:00   ` Roger Pau Monné
2020-07-02 16:23     ` Michał Leszczyński
2020-07-03  9:44       ` Roger Pau Monné
2020-07-03  9:56         ` Jan Beulich
2020-07-03 10:11           ` Roger Pau Monné
2020-07-04 17:23             ` Julien Grall
2020-07-06  8:46               ` Jan Beulich
2020-07-07  8:44                 ` Julien Grall
2020-07-07  9:10                   ` Jan Beulich
2020-07-07  9:16                     ` Julien Grall
2020-07-07 11:17                       ` Michał Leszczyński
2020-07-07 11:21                         ` Jan Beulich
2020-07-07 11:35                           ` Michał Leszczyński
2020-07-02 10:24   ` Anthony PERARD
2020-07-04 17:48   ` Julien Grall
2020-06-30 12:33 ` Michał Leszczyński [this message]
2020-07-01 10:30   ` [PATCH v4 04/10] x86/vmx: implement processor tracing for VMX Roger Pau Monné
2020-06-30 12:33 ` [PATCH v4 05/10] common/domain: allocate vmtrace_pt_buffer Michał Leszczyński
2020-07-01 10:38   ` Roger Pau Monné
2020-07-01 15:35   ` Julien Grall
2020-06-30 12:33 ` [PATCH v4 06/10] memory: batch processing in acquire_resource() Michał Leszczyński
2020-07-01 10:46   ` Roger Pau Monné
2020-07-03 10:35   ` Julien Grall
2020-07-03 10:52     ` Paul Durrant
2020-07-03 11:17       ` Julien Grall
2020-07-03 11:22         ` Jan Beulich
2020-07-03 11:36           ` Julien Grall
2020-07-03 12:50             ` Jan Beulich
2020-07-03 11:40         ` Paul Durrant
2020-06-30 12:33 ` [PATCH v4 07/10] x86/mm: add vmtrace_buf resource type Michał Leszczyński
2020-07-01 10:52   ` Roger Pau Monné
2020-06-30 12:33 ` [PATCH v4 08/10] x86/domctl: add XEN_DOMCTL_vmtrace_op Michał Leszczyński
2020-07-01 11:00   ` Roger Pau Monné
2020-06-30 12:33 ` [PATCH v4 09/10] tools/libxc: add xc_vmtrace_* functions Michał Leszczyński
2020-07-21 10:52   ` Wei Liu
2020-06-30 12:33 ` [PATCH v4 10/10] tools/proctrace: add proctrace tool Michał Leszczyński
2020-07-02 15:10   ` Andrew Cooper
2020-07-21 10:52     ` Wei Liu
2020-06-30 12:48 ` [PATCH v4 00/10] Implement support for external IPT monitoring Hubert Jasudowicz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=70df90dad7e759f4bb3dba405dc45e372a57fab7.1593519420.git.michal.leszczynski@cert.pl \
    --to=michal.leszczynski@cert.pl \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=kevin.tian@intel.com \
    --cc=luwei.kang@intel.com \
    --cc=roger.pau@citrix.com \
    --cc=tamas.lengyel@intel.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.