All of lore.kernel.org
 help / color / mirror / Atom feed
From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
To: jbeulich@suse.com, kevin.tian@intel.com, suravee.suthikulpanit@amd.com
Cc: keir@xen.org, andrew.cooper3@citrix.com, tim@xen.org,
	xen-devel@lists.xen.org, jun.nakajima@intel.com,
	boris.ostrovsky@oracle.com
Subject: [PATCH v9 17/20] x86/VPMU: Add privileged PMU mode
Date: Fri,  8 Aug 2014 12:55:43 -0400	[thread overview]
Message-ID: <1407516946-17833-18-git-send-email-boris.ostrovsky@oracle.com> (raw)
In-Reply-To: <1407516946-17833-1-git-send-email-boris.ostrovsky@oracle.com>

Add support for privileged PMU mode which allows privileged domain (dom0)
profile both itself (and the hypervisor) and the guests. While this mode is on
profiling in guests is disabled.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
Tested-by: Dietmar Hahn <dietmar.hahn@ts.fujitsu.com>
---
 xen/arch/x86/hvm/vpmu.c  | 92 +++++++++++++++++++++++++++++++++++-------------
 xen/arch/x86/traps.c     | 11 ++++++
 xen/include/public/pmu.h |  3 ++
 3 files changed, 81 insertions(+), 25 deletions(-)

diff --git a/xen/arch/x86/hvm/vpmu.c b/xen/arch/x86/hvm/vpmu.c
index c9cf6c0..5b1584a 100644
--- a/xen/arch/x86/hvm/vpmu.c
+++ b/xen/arch/x86/hvm/vpmu.c
@@ -98,7 +98,9 @@ int vpmu_do_msr(unsigned int msr, uint64_t *msr_content, int rw)
     struct arch_vpmu_ops *ops = vpmu->arch_vpmu_ops;
     int ret = 0;
 
-    if ( !(vpmu_mode & XENPMU_MODE_SELF) )
+    if ( (vpmu_mode == XENPMU_MODE_OFF) ||
+         ((vpmu_mode & XENPMU_MODE_ALL) &&
+          !is_hardware_domain(current->domain)) )
         return 0;
 
     switch ( rw )
@@ -161,8 +163,12 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs)
     struct vcpu *sampled = current, *sampling;
     struct vpmu_struct *vpmu;
 
-    /* dom0 will handle interrupt for special domains (e.g. idle domain) */
-    if ( sampled->domain->domain_id >= DOMID_FIRST_RESERVED )
+    /*
+     * dom0 will handle interrupt for special domains (e.g. idle domain) or,
+     * in XENPMU_MODE_ALL, for everyone.
+     */
+    if ( (vpmu_mode & XENPMU_MODE_ALL) ||
+         (sampled->domain->domain_id >= DOMID_FIRST_RESERVED) )
     {
         sampling = choose_hwdom_vcpu();
         if ( !sampling )
@@ -172,7 +178,7 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs)
         sampling = sampled;
 
     vpmu = vcpu_vpmu(sampling);
-    if ( !is_hvm_domain(sampling->domain) )
+    if ( !is_hvm_domain(sampling->domain) || (vpmu_mode & XENPMU_MODE_ALL) )
     {
         /* PV(H) guest or dom0 is doing system profiling */
         const struct cpu_user_regs *gregs;
@@ -184,6 +190,7 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs)
             return 1;
 
         if ( is_pvh_domain(sampled->domain) &&
+             !(vpmu_mode & XENPMU_MODE_ALL) &&
              !vpmu->arch_vpmu_ops->do_interrupt(regs) )
             return 0;
 
@@ -192,32 +199,65 @@ int vpmu_do_interrupt(struct cpu_user_regs *regs)
         vpmu->arch_vpmu_ops->arch_vpmu_save(sampling);
         vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
 
-        /* Store appropriate registers in xenpmu_data */
-        if ( is_pv_32bit_domain(sampled->domain) )
+        if ( !is_hvm_domain(sampled->domain) )
         {
-            /*
-             * 32-bit dom0 cannot process Xen's addresses (which are 64 bit)
-             * and therefore we treat it the same way as a non-priviledged
-             * PV 32-bit domain.
-             */
-            struct compat_cpu_user_regs *cmp;
-
-            gregs = guest_cpu_user_regs();
-
-            cmp = (void *)&vpmu->xenpmu_data->pmu.r.regs;
-            XLAT_cpu_user_regs(cmp, gregs);
+            /* Store appropriate registers in xenpmu_data */
+            if ( is_pv_32bit_domain(sampled->domain) )
+            {
+                /*
+                 * 32-bit dom0 cannot process Xen's addresses (which are 64 bit)
+                 * and therefore we treat it the same way as a non-priviledged
+                 * PV 32-bit domain.
+                 */
+                struct compat_cpu_user_regs *cmp;
+
+                gregs = guest_cpu_user_regs();
+
+                cmp = (void *)&vpmu->xenpmu_data->pmu.r.regs;
+                XLAT_cpu_user_regs(cmp, gregs);
+
+                /* Adjust RPL for kernel mode */
+                if ((cmp->cs & 3) == 1)
+                    cmp->cs &= ~3;
+            }
+            else if ( !is_hardware_domain(sampled->domain) &&
+                      !is_idle_vcpu(sampled) )
+            {
+                /* 64-bit PV(H) unprivileged guest */
+                gregs = guest_cpu_user_regs();
+                vpmu->xenpmu_data->pmu.r.regs = *gregs;
+            }
+            else
+                vpmu->xenpmu_data->pmu.r.regs = *regs;
+
+            if ( !is_pvh_domain(sampled->domain) )
+            {
+                if ( sampled->arch.flags & TF_kernel_mode )
+                    sampling->arch.vpmu.xenpmu_data->pmu.r.regs.cs &= ~3;
+            }
+            else
+            {
+                struct segment_register seg_cs;
+
+                hvm_get_segment_register(sampled, x86_seg_cs, &seg_cs);
+                sampling->arch.vpmu.xenpmu_data->pmu.r.regs.cs = seg_cs.sel;
+            }
         }
-        else if ( !is_hardware_domain(sampled->domain) &&
-                  !is_idle_vcpu(sampled) )
+        else
         {
-            /* PV(H) guest */
+            /* HVM guest */
+            struct segment_register seg_cs;
+
             gregs = guest_cpu_user_regs();
             vpmu->xenpmu_data->pmu.r.regs = *gregs;
+
+            hvm_get_segment_register(sampled, x86_seg_cs, &seg_cs);
+            sampling->arch.vpmu.xenpmu_data->pmu.r.regs.cs = seg_cs.sel;
         }
-        else
-            vpmu->xenpmu_data->pmu.r.regs = *regs;
 
-        vpmu->xenpmu_data->domain_id = sampled->domain->domain_id;
+        vpmu->xenpmu_data->domain_id = (sampled == sampling) ?
+                                       DOMID_SELF :
+                                       sampled->domain->domain_id;
         vpmu->xenpmu_data->vcpu_id = sampled->vcpu_id;
         vpmu->xenpmu_data->pcpu_id = smp_processor_id();
 
@@ -569,7 +609,9 @@ long do_xenpmu_op(int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg)
         if ( copy_from_guest(&pmu_params, arg, 1) )
             return -EFAULT;
 
-        if ( pmu_params.val & ~XENPMU_MODE_SELF )
+        if ( (pmu_params.val & ~(XENPMU_MODE_SELF | XENPMU_MODE_ALL)) ||
+             ((pmu_params.val & XENPMU_MODE_SELF) &&
+              (pmu_params.val & XENPMU_MODE_ALL)) )
             return -EINVAL;
 
         /*
@@ -583,7 +625,7 @@ long do_xenpmu_op(int op, XEN_GUEST_HANDLE_PARAM(xen_pmu_params_t) arg)
         current_mode = vpmu_mode;
         vpmu_mode = pmu_params.val;
 
-        if ( vpmu_mode == XENPMU_MODE_OFF )
+        if ( (vpmu_mode == XENPMU_MODE_OFF) || (vpmu_mode == XENPMU_MODE_ALL) )
         {
             /*
              * Make sure all (non-dom0) VCPUs have unloaded their VPMUs. This
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 4c4292b..c9b6ab8 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2575,6 +2575,10 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
             if ( vpmu_msr ||
                  ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !vpmu_msr) )
             {
+                if ( (vpmu_mode & XENPMU_MODE_ALL) &&
+                     !is_hardware_domain(v->domain) )
+                    break;
+
                 if ( vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_WRITE) )
                     goto fail;
                 break;
@@ -2698,6 +2702,13 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
             if ( vpmu_msr ||
                  ((boot_cpu_data.x86_vendor != X86_VENDOR_AMD) && !vpmu_msr) )
             {
+                if ( (vpmu_mode & XENPMU_MODE_ALL) &&
+                     !is_hardware_domain(v->domain) )
+                {
+                    /* Don't leak PMU MSRs to unprivileged domains */
+                    regs->eax = regs->edx = 0;
+                    break;
+                }
                 if ( vpmu_do_msr(regs->ecx, &msr_content, VPMU_MSR_READ) )
                     goto fail;
 
diff --git a/xen/include/public/pmu.h b/xen/include/public/pmu.h
index 44bf43f..3023e52 100644
--- a/xen/include/public/pmu.h
+++ b/xen/include/public/pmu.h
@@ -50,9 +50,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_pmu_params_t);
  * - XENPMU_MODE_OFF:   No PMU virtualization
  * - XENPMU_MODE_SELF:  Guests can profile themselves, dom0 profiles
  *                      itself and Xen
+ * - XENPMU_MODE_ALL:   Only dom0 has access to VPMU and it profiles
+ *                      everyone: itself, the hypervisor and the guests.
  */
 #define XENPMU_MODE_OFF           0
 #define XENPMU_MODE_SELF          (1<<0)
+#define XENPMU_MODE_ALL           (1<<1)
 
 /*
  * PMU features:
-- 
1.8.1.4

  parent reply	other threads:[~2014-08-08 16:55 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-08-08 16:55 [PATCH v9 00/20] x86/PMU: Xen PMU PV(H) support Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 01/20] common/symbols: Export hypervisor symbols to privileged guest Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 02/20] x86/VPMU: Manage VPMU_CONTEXT_SAVE flag in vpmu_save_force() Boris Ostrovsky
2014-08-11 13:28   ` Jan Beulich
2014-08-11 15:35     ` Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 03/20] x86/VPMU: Set MSR bitmaps only for HVM/PVH guests Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 04/20] x86/VPMU: Make vpmu macros a bit more efficient Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 05/20] intel/VPMU: Clean up Intel VPMU code Boris Ostrovsky
2014-08-11 13:45   ` Jan Beulich
2014-08-11 16:01     ` Boris Ostrovsky
2014-08-11 16:13       ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 06/20] vmx: Merge MSR management routines Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 07/20] x86/VPMU: Handle APIC_LVTPC accesses Boris Ostrovsky
2014-08-11 13:49   ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 08/20] intel/VPMU: MSR_CORE_PERF_GLOBAL_CTRL should be initialized to zero Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 09/20] x86/VPMU: Add public xenpmu.h Boris Ostrovsky
2014-08-11 14:08   ` Jan Beulich
2014-08-11 16:15     ` Boris Ostrovsky
2014-08-18 16:02       ` Boris Ostrovsky
2014-08-18 20:23         ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 10/20] x86/VPMU: Make vpmu not HVM-specific Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 11/20] x86/VPMU: Interface for setting PMU mode and flags Boris Ostrovsky
2014-08-12 10:37   ` Jan Beulich
2014-08-12 15:12     ` Boris Ostrovsky
2014-08-12 15:35       ` Jan Beulich
2014-08-12 16:25         ` Boris Ostrovsky
2014-08-14 16:32           ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 12/20] x86/VPMU: Initialize PMU for PV(H) guests Boris Ostrovsky
2014-08-12 11:59   ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 13/20] x86/VPMU: When handling MSR accesses, leave fault injection to callers Boris Ostrovsky
2014-08-12 12:45   ` Jan Beulich
2014-08-12 15:47     ` Boris Ostrovsky
2014-08-12 16:00       ` Jan Beulich
2014-08-12 16:30         ` Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 14/20] x86/VPMU: Add support for PMU register handling on PV guests Boris Ostrovsky
2014-08-12 12:55   ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 15/20] x86/VPMU: Handle PMU interrupts for " Boris Ostrovsky
2014-08-12 12:58   ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 16/20] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr Boris Ostrovsky
2014-08-08 16:55 ` Boris Ostrovsky [this message]
2014-08-12 13:06   ` [PATCH v9 17/20] x86/VPMU: Add privileged PMU mode Jan Beulich
2014-08-12 16:14     ` Boris Ostrovsky
2014-08-08 16:55 ` [PATCH v9 18/20] x86/VPMU: Save VPMU state for PV guests during context switch Boris Ostrovsky
2014-08-12 14:15   ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 19/20] x86/VPMU: NMI-based VPMU support Boris Ostrovsky
2014-08-12 14:24   ` Jan Beulich
2014-08-08 16:55 ` [PATCH v9 20/20] x86/VPMU: Move VPMU files up from hvm/ directory Boris Ostrovsky
2014-08-12 14:26   ` Jan Beulich
2014-08-11 13:32 ` [PATCH v9 00/20] x86/PMU: Xen PMU PV(H) support Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1407516946-17833-18-git-send-email-boris.ostrovsky@oracle.com \
    --to=boris.ostrovsky@oracle.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=kevin.tian@intel.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.