All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Tian, Kevin" <kevin.tian@intel.com>
To: Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	"jbeulich@suse.com" <jbeulich@suse.com>,
	"suravee.suthikulpanit@amd.com" <suravee.suthikulpanit@amd.com>,
	"Dong, Eddie" <eddie.dong@intel.com>,
	"Aravind.Gopalakrishnan@amd.com" <Aravind.Gopalakrishnan@amd.com>
Cc: "andrew.cooper3@citrix.com" <andrew.cooper3@citrix.com>,
	"xen-devel@lists.xen.org" <xen-devel@lists.xen.org>,
	"keir@xen.org" <keir@xen.org>,
	"Nakajima, Jun" <jun.nakajima@intel.com>,
	"tim@xen.org" <tim@xen.org>
Subject: Re: [PATCH v10 13/20] x86/VPMU: When handling MSR accesses, leave fault injection to callers
Date: Thu, 18 Sep 2014 05:01:18 +0000	[thread overview]
Message-ID: <AADFC41AFE54684AB9EE6CBC0274A5D126086683@SHSMSX101.ccr.corp.intel.com> (raw)
In-Reply-To: <1409802080-6160-14-git-send-email-boris.ostrovsky@oracle.com>

> From: Boris Ostrovsky [mailto:boris.ostrovsky@oracle.com]
> Sent: Wednesday, September 03, 2014 8:41 PM
> 
> With this patch return value of 1 of vpmu_do_msr() will now indicate whether
> an
> error was encountered during MSR processing (instead of stating that the
> access
> was to a VPMU register).
> 
> As part of this patch we also check for validity of certain MSR accesses right
> when we determine which register is being written, as opposed to postponing
> this
> until later.
> 
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>

Acked-by: Kevin Tian <kevin.tian@intel.com>

> ---
>  xen/arch/x86/hvm/svm/svm.c        |  6 ++-
>  xen/arch/x86/hvm/svm/vpmu.c       |  6 +--
>  xen/arch/x86/hvm/vmx/vmx.c        | 24 +++++++++---
>  xen/arch/x86/hvm/vmx/vpmu_core2.c | 78
> ++++++++++++++-------------------------
>  4 files changed, 53 insertions(+), 61 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 8b9a34e..326cad9 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1654,7 +1654,8 @@ static int svm_msr_read_intercept(unsigned int msr,
> uint64_t *msr_content)
>      case MSR_AMD_FAM15H_EVNTSEL3:
>      case MSR_AMD_FAM15H_EVNTSEL4:
>      case MSR_AMD_FAM15H_EVNTSEL5:
> -        vpmu_do_rdmsr(msr, msr_content);
> +        if ( vpmu_do_rdmsr(msr, msr_content) )
> +            goto gpf;
>          break;
> 
>      case MSR_AMD64_DR0_ADDRESS_MASK:
> @@ -1805,7 +1806,8 @@ static int svm_msr_write_intercept(unsigned int msr,
> uint64_t msr_content)
>      case MSR_AMD_FAM15H_EVNTSEL3:
>      case MSR_AMD_FAM15H_EVNTSEL4:
>      case MSR_AMD_FAM15H_EVNTSEL5:
> -        vpmu_do_wrmsr(msr, msr_content, 0);
> +        if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> +            goto gpf;
>          break;
> 
>      case MSR_IA32_MCx_MISC(4): /* Threshold register */
> diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
> index be3ab27..63c099c 100644
> --- a/xen/arch/x86/hvm/svm/vpmu.c
> +++ b/xen/arch/x86/hvm/svm/vpmu.c
> @@ -306,7 +306,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
>          is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu,
> VPMU_RUNNING) )
>      {
>          if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
> -            return 1;
> +            return 0;
>          vpmu_set(vpmu, VPMU_RUNNING);
> 
>          if ( has_hvm_container_domain(v->domain) &&
> is_msr_bitmap_on(vpmu) )
> @@ -336,7 +336,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
> 
>      /* Write to hw counters */
>      wrmsrl(msr, msr_content);
> -    return 1;
> +    return 0;
>  }
> 
>  static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> @@ -354,7 +354,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr,
> uint64_t *msr_content)
> 
>      rdmsrl(msr, *msr_content);
> 
> -    return 1;
> +    return 0;
>  }
> 
>  static int amd_vpmu_initialise(struct vcpu *v)
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 6ca7c32..3c63bb0 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2076,12 +2076,17 @@ static int vmx_msr_read_intercept(unsigned int
> msr, uint64_t *msr_content)
>          *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
>                         MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
>          /* Perhaps vpmu will change some bits. */
> +        /* FALLTHROUGH */
> +    case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1:
> +    case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1:
> +    case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
> +    case
> MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> +    case MSR_IA32_PEBS_ENABLE:
> +    case MSR_IA32_DS_AREA:
>          if ( vpmu_do_rdmsr(msr, msr_content) )
> -            goto done;
> +            goto gp_fault;
>          break;
>      default:
> -        if ( vpmu_do_rdmsr(msr, msr_content) )
> -            break;
>          if ( passive_domain_do_rdmsr(msr, msr_content) )
>              goto done;
>          switch ( long_mode_do_msr_read(msr, msr_content) )
> @@ -2254,7 +2259,7 @@ static int vmx_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>          if ( msr_content & ~supported )
>          {
>              /* Perhaps some other bits are supported in vpmu. */
> -            if ( !vpmu_do_wrmsr(msr, msr_content, supported) )
> +            if ( vpmu_do_wrmsr(msr, msr_content, supported) )
>                  break;
>          }
>          if ( msr_content & IA32_DEBUGCTLMSR_LBR )
> @@ -2282,9 +2287,16 @@ static int vmx_msr_write_intercept(unsigned int
> msr, uint64_t msr_content)
>          if ( !nvmx_msr_write_intercept(msr, msr_content) )
>              goto gp_fault;
>          break;
> +    case MSR_P6_PERFCTR0...MSR_P6_PERFCTR1:
> +    case MSR_P6_EVNTSEL0...MSR_P6_EVNTSEL1:
> +    case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
> +    case
> MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> +    case MSR_IA32_PEBS_ENABLE:
> +    case MSR_IA32_DS_AREA:
> +         if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> +            goto gp_fault;
> +        break;
>      default:
> -        if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> -            return X86EMUL_OKAY;
>          if ( passive_domain_do_wrmsr(msr, msr_content) )
>              return X86EMUL_OKAY;
> 
> diff --git a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> index 5c9fa19..8d8ce97 100644
> --- a/xen/arch/x86/hvm/vmx/vpmu_core2.c
> +++ b/xen/arch/x86/hvm/vmx/vpmu_core2.c
> @@ -468,36 +468,41 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
>                               IA32_DEBUGCTLMSR_BTS_OFF_USR;
>              if ( !(msr_content & ~supported) &&
>                   vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
> -                return 1;
> +                return 0;
>              if ( (msr_content & supported) &&
>                   !vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
>                  printk(XENLOG_G_WARNING
>                         "%pv: Debug Store unsupported on this CPU\n",
>                         current);
>          }
> -        return 0;
> +        return 1;
>      }
> 
>      ASSERT(!supported);
> 
> +    if ( type == MSR_TYPE_COUNTER &&
> +         (msr_content &
> +          ~((1ull << core2_get_bitwidth_fix_count()) - 1)) )
> +        /* Writing unsupported bits to a fixed counter */
> +        return 1;
> +
>      core2_vpmu_cxt = vpmu->context;
>      enabled_cntrs = vpmu->priv_context;
>      switch ( msr )
>      {
>      case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
>          core2_vpmu_cxt->global_status &= ~msr_content;
> -        return 1;
> +        return 0;
>      case MSR_CORE_PERF_GLOBAL_STATUS:
>          gdprintk(XENLOG_INFO, "Can not write readonly MSR: "
>                   "MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
> -        hvm_inject_hw_exception(TRAP_gp_fault, 0);
>          return 1;
>      case MSR_IA32_PEBS_ENABLE:
>          if ( msr_content & 1 )
>              gdprintk(XENLOG_WARNING, "Guest is trying to enable PEBS, "
>                       "which is not supported.\n");
>          core2_vpmu_cxt->pebs_enable = msr_content;
> -        return 1;
> +        return 0;
>      case MSR_IA32_DS_AREA:
>          if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) )
>          {
> @@ -506,18 +511,21 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
>                  gdprintk(XENLOG_WARNING,
>                           "Illegal address for IA32_DS_AREA: %#" PRIx64
> "x\n",
>                           msr_content);
> -                hvm_inject_hw_exception(TRAP_gp_fault, 0);
>                  return 1;
>              }
>              core2_vpmu_cxt->ds_area = msr_content;
>              break;
>          }
>          gdprintk(XENLOG_WARNING, "Guest setting of DTS is ignored.\n");
> -        return 1;
> +        return 0;
>      case MSR_CORE_PERF_GLOBAL_CTRL:
>          global_ctrl = msr_content;
>          break;
>      case MSR_CORE_PERF_FIXED_CTR_CTRL:
> +        if ( msr_content &
> +             ( ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) - 1)) )
> +            return 1;
> +
>          vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
> &global_ctrl);
>          *enabled_cntrs &= ~(((1ULL << fixed_pmc_cnt) - 1) << 32);
>          if ( msr_content != 0 )
> @@ -540,6 +548,9 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
>              struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
>                  vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
> 
> +            if ( msr_content & (~((1ull << 32) - 1)) )
> +                return 1;
> +
>              vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
> &global_ctrl);
> 
>              if ( msr_content & (1ULL << 22) )
> @@ -551,45 +562,17 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
>          }
>      }
> 
> +    if ( type != MSR_TYPE_GLOBAL )
> +        wrmsrl(msr, msr_content);
> +    else
> +        vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
> msr_content);
> +
>      if ( (global_ctrl & *enabled_cntrs) || (core2_vpmu_cxt->ds_area != 0) )
>          vpmu_set(vpmu, VPMU_RUNNING);
>      else
>          vpmu_reset(vpmu, VPMU_RUNNING);
> 
> -    if ( type != MSR_TYPE_GLOBAL )
> -    {
> -        u64 mask;
> -        int inject_gp = 0;
> -        switch ( type )
> -        {
> -        case MSR_TYPE_ARCH_CTRL:      /* MSR_P6_EVNTSEL[0,...] */
> -            mask = ~((1ull << 32) - 1);
> -            if (msr_content & mask)
> -                inject_gp = 1;
> -            break;
> -        case MSR_TYPE_CTRL:           /* IA32_FIXED_CTR_CTRL */
> -            if  ( msr == MSR_IA32_DS_AREA )
> -                break;
> -            /* 4 bits per counter, currently 3 fixed counters implemented.
> */
> -            mask = ~((1ull << (fixed_pmc_cnt * FIXED_CTR_CTRL_BITS)) -
> 1);
> -            if (msr_content & mask)
> -                inject_gp = 1;
> -            break;
> -        case MSR_TYPE_COUNTER:        /* IA32_FIXED_CTR[0-2] */
> -            mask = ~((1ull << core2_get_bitwidth_fix_count()) - 1);
> -            if (msr_content & mask)
> -                inject_gp = 1;
> -            break;
> -        }
> -        if (inject_gp)
> -            hvm_inject_hw_exception(TRAP_gp_fault, 0);
> -        else
> -            wrmsrl(msr, msr_content);
> -    }
> -    else
> -        vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
> msr_content);
> -
> -    return 1;
> +    return 0;
>  }
> 
>  static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> @@ -617,19 +600,14 @@ static int core2_vpmu_do_rdmsr(unsigned int msr,
> uint64_t *msr_content)
>              rdmsrl(msr, *msr_content);
>          }
>      }
> -    else
> +    else if ( msr == MSR_IA32_MISC_ENABLE )
>      {
>          /* Extension for BTS */
> -        if ( msr == MSR_IA32_MISC_ENABLE )
> -        {
> -            if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
> -                *msr_content &=
> ~MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
> -        }
> -        else
> -            return 0;
> +        if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_BTS) )
> +            *msr_content &= ~MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
>      }
> 
> -    return 1;
> +    return 0;
>  }
> 
>  static void core2_vpmu_do_cpuid(unsigned int input,
> --
> 1.8.1.4

  reply	other threads:[~2014-09-18  5:01 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-04  3:41 [PATCH v10 00/20] x86/PMU: Xen PMU PV(H) support Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 01/20] common/symbols: Export hypervisor symbols to privileged guest Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 02/20] x86/VPMU: Manage VPMU_CONTEXT_SAVE flag in vpmu_save_force() Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 03/20] x86/VPMU: Set MSR bitmaps only for HVM/PVH guests Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 04/20] x86/VPMU: Make vpmu macros a bit more efficient Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 05/20] intel/VPMU: Clean up Intel VPMU code Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 06/20] vmx: Merge MSR management routines Boris Ostrovsky
2014-09-08 16:07   ` Jan Beulich
2014-09-08 17:28     ` Boris Ostrovsky
2014-09-09  9:11       ` Jan Beulich
2014-09-04  3:41 ` [PATCH v10 07/20] x86/VPMU: Handle APIC_LVTPC accesses Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 08/20] intel/VPMU: MSR_CORE_PERF_GLOBAL_CTRL should be initialized to zero Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 09/20] x86/VPMU: Add public xenpmu.h Boris Ostrovsky
2014-09-10 14:45   ` Jan Beulich
2014-09-10 17:23     ` Boris Ostrovsky
2014-09-11  6:39       ` Jan Beulich
2014-09-11 13:54         ` Boris Ostrovsky
2014-09-11 14:55           ` Jan Beulich
2014-09-11 15:26             ` Boris Ostrovsky
2014-09-11 15:59               ` Jan Beulich
2014-09-11 16:51                 ` Boris Ostrovsky
2014-09-12  6:50                   ` Jan Beulich
2014-09-12 14:21                     ` Boris Ostrovsky
2014-09-12 14:38                       ` Jan Beulich
2014-09-12 15:18                         ` Boris Ostrovsky
2014-09-15 11:56                           ` Konrad Rzeszutek Wilk
2014-09-15 13:06                             ` Jan Beulich
2014-09-16  1:00                               ` Boris Ostrovsky
2014-09-16  0:49                             ` Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 10/20] x86/VPMU: Make vpmu not HVM-specific Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 11/20] x86/VPMU: Interface for setting PMU mode and flags Boris Ostrovsky
2014-09-10 15:05   ` Jan Beulich
2014-09-10 17:37     ` Boris Ostrovsky
2014-09-11  6:44       ` Jan Beulich
2014-09-11 14:12         ` Boris Ostrovsky
2014-09-11 14:59           ` Jan Beulich
2014-09-11 16:10             ` Boris Ostrovsky
2014-09-12  6:49               ` Jan Beulich
2014-09-12 14:12                 ` Boris Ostrovsky
2014-09-12 14:39                   ` Jan Beulich
2014-09-12 15:03                     ` Boris Ostrovsky
2014-09-12 15:30                       ` Jan Beulich
2014-09-12 15:54                         ` Boris Ostrovsky
2014-09-12 16:05                           ` Jan Beulich
2014-09-12 11:41   ` Dietmar Hahn
2014-09-12 14:25     ` Boris Ostrovsky
2014-09-15 13:35       ` Dietmar Hahn
2014-09-18  4:11   ` Tian, Kevin
2014-09-18 21:50     ` Boris Ostrovsky
2014-09-19  6:51       ` Jan Beulich
2014-09-19 12:42         ` Boris Ostrovsky
2014-09-19 13:28           ` Jan Beulich
2014-09-22 22:29             ` Tian, Kevin
2014-09-22 22:32       ` Tian, Kevin
2014-09-22 22:48         ` Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 12/20] x86/VPMU: Initialize PMU for PV(H) guests Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 13/20] x86/VPMU: When handling MSR accesses, leave fault injection to callers Boris Ostrovsky
2014-09-18  5:01   ` Tian, Kevin [this message]
2014-09-04  3:41 ` [PATCH v10 14/20] x86/VPMU: Add support for PMU register handling on PV guests Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 15/20] x86/VPMU: Handle PMU interrupts for " Boris Ostrovsky
2014-09-10 15:30   ` Jan Beulich
2014-09-04  3:41 ` [PATCH v10 16/20] x86/VPMU: Merge vpmu_rdmsr and vpmu_wrmsr Boris Ostrovsky
2014-09-10 15:33   ` Jan Beulich
2014-09-18  4:16   ` Tian, Kevin
2014-09-04  3:41 ` [PATCH v10 17/20] x86/VPMU: Add privileged PMU mode Boris Ostrovsky
2014-09-10 15:39   ` Jan Beulich
2014-09-04  3:41 ` [PATCH v10 18/20] x86/VPMU: Save VPMU state for PV guests during context switch Boris Ostrovsky
2014-09-10 15:44   ` Jan Beulich
2014-09-04  3:41 ` [PATCH v10 19/20] x86/VPMU: NMI-based VPMU support Boris Ostrovsky
2014-09-04  3:41 ` [PATCH v10 20/20] x86/VPMU: Move VPMU files up from hvm/ directory Boris Ostrovsky
2014-09-10 15:48   ` Jan Beulich
2014-09-10 15:54 ` [PATCH v10 00/20] x86/PMU: Xen PMU PV(H) support Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=AADFC41AFE54684AB9EE6CBC0274A5D126086683@SHSMSX101.ccr.corp.intel.com \
    --to=kevin.tian@intel.com \
    --cc=Aravind.Gopalakrishnan@amd.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=eddie.dong@intel.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir@xen.org \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.