All of lore.kernel.org
 help / color / mirror / Atom feed
From: Sergey Dyasli <sergey.dyasli@citrix.com>
To: xen-devel@lists.xen.org
Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>,
	Kevin Tian <kevin.tian@intel.com>,
	Jan Beulich <jbeulich@suse.com>,
	Jun Nakajima <jun.nakajima@intel.com>,
	Sergey Dyasli <sergey.dyasli@citrix.com>
Subject: [PATCH 2/2] x86/VMX: fix vmentry failure with TSX bits in LBR
Date: Wed, 25 Jan 2017 17:26:31 +0000	[thread overview]
Message-ID: <1485365191-26692-3-git-send-email-sergey.dyasli@citrix.com> (raw)
In-Reply-To: <1485365191-26692-1-git-send-email-sergey.dyasli@citrix.com>

During VM entry, H/W will automatically load guest's MSRs from MSR-load
area in the same way as they would be written by WRMSR.

However, under the following conditions:

    1. LBR (Last Branch Record) MSRs were placed in the MSR-load area
    2. Address format of LBR includes TSX bits 61:62
    3. CPU has TSX support disabled

VM entry will fail with a message in the log similar to:

    (XEN) [   97.239514] d1v0 vmentry failure (reason 0x80000022): MSR loading (entry 3)
    (XEN) [   97.239516]   msr 00000680 val 1fff800000102e60 (mbz 0)

This happens because of the following behaviour:

    - When capturing branches, LBR H/W will always clear bits 61:62
      regardless of the sign extension
    - For WRMSR, bits 61:62 are considered the part of the sign extension

This bug affects only certain pCPUs (e.g. Haswell) with vCPUs that
use LBR.  Fix it by sign-extending TSX bits in all LBR entries during
VM entry in affected cases.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
 xen/arch/x86/hvm/vmx/vmx.c         | 68 ++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/cpufeature.h   |  3 ++
 xen/include/asm-x86/hvm/vmx/vmcs.h |  2 ++
 3 files changed, 73 insertions(+)

diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index a5e5ffd..586aaca 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2241,6 +2241,25 @@ static void pi_notification_interrupt(struct cpu_user_regs *regs)
     raise_softirq(VCPU_KICK_SOFTIRQ);
 }
 
+static bool __read_mostly vmx_lbr_tsx_fixup_needed;
+
+static void __init vmx_lbr_tsx_fixup_check(void)
+{
+    bool tsx_support = cpu_has_hle || cpu_has_rtm;
+    u64 caps;
+    u32 lbr_format;
+
+    if ( !cpu_has_pdcm )
+        return;
+
+    rdmsrl(MSR_IA32_PERF_CAPABILITIES, caps);
+    /* Lower 6 bits define the format of the address in the LBR stack */
+    lbr_format = caps & 0x3f;
+
+    /* 000100B -- TSX info is reported in the upper bits of 'FROM' registers */
+    vmx_lbr_tsx_fixup_needed = !tsx_support && lbr_format == 0x4;
+}
+
 const struct hvm_function_table * __init start_vmx(void)
 {
     set_in_cr4(X86_CR4_VMXE);
@@ -2310,6 +2329,8 @@ const struct hvm_function_table * __init start_vmx(void)
 
     setup_vmcs_dump();
 
+    vmx_lbr_tsx_fixup_check();
+
     return &vmx_function_table;
 }
 
@@ -2833,7 +2854,11 @@ static int vmx_msr_write_intercept(unsigned int msr, uint64_t msr_content)
             for ( ; (rc == 0) && lbr->count; lbr++ )
                 for ( i = 0; (rc == 0) && (i < lbr->count); i++ )
                     if ( (rc = vmx_add_guest_msr(lbr->base + i)) == 0 )
+                    {
                         vmx_disable_intercept_for_msr(v, lbr->base + i, MSR_TYPE_R | MSR_TYPE_W);
+                        if ( vmx_lbr_tsx_fixup_needed )
+                            v->arch.hvm_vmx.lbr_tsx_fixup_enabled = true;
+                    }
         }
 
         if ( (rc < 0) ||
@@ -3854,6 +3879,46 @@ out:
     }
 }
 
+static void vmx_lbr_tsx_fixup(void)
+{
+    static const u64 LBR_FROM_SIGNEXT_2MSB = (1ULL << 59) | (1ULL << 60);
+    static u32 lbr_from_start = 0, lbr_from_end = 0, last_in_from_ip = 0;
+
+    const struct lbr_info *lbr;
+    const struct vcpu *curr = current;
+    const unsigned int msr_count = curr->arch.hvm_vmx.msr_count;
+    const const struct vmx_msr_entry *msr_area = curr->arch.hvm_vmx.msr_area;
+    struct vmx_msr_entry *msr;
+
+    if ( lbr_from_start == ~0U )
+        return;
+
+    if ( unlikely(lbr_from_start == 0) )
+    {
+        lbr = last_branch_msr_get();
+        if ( lbr == NULL )
+        {
+            lbr_from_start = ~0U;
+            return;
+        }
+
+        /* Warning: this depends on struct lbr_info[] layout! */
+        last_in_from_ip = lbr[0].base;
+        lbr_from_start = lbr[3].base;
+        lbr_from_end = lbr_from_start + lbr[3].count;
+    }
+
+    if ( (msr = vmx_find_guest_msr(lbr_from_start)) != NULL )
+    {
+        /* Sign extend into bits 61:62 while preserving bit 63 */
+        for ( ; msr < msr_area + msr_count && msr->index < lbr_from_end; msr++ )
+            msr->data |= ((LBR_FROM_SIGNEXT_2MSB & msr->data) << 2);
+    }
+
+    if ( (msr = vmx_find_guest_msr(last_in_from_ip)) != NULL )
+        msr->data |= ((LBR_FROM_SIGNEXT_2MSB & msr->data) << 2);
+}
+
 void vmx_vmenter_helper(const struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
@@ -3910,6 +3975,9 @@ void vmx_vmenter_helper(const struct cpu_user_regs *regs)
     }
 
  out:
+    if ( unlikely(curr->arch.hvm_vmx.lbr_tsx_fixup_enabled) )
+        vmx_lbr_tsx_fixup();
+
     HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
 
     __vmwrite(GUEST_RIP,    regs->rip);
diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index 39ad582..8e14728 100644
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -78,6 +78,9 @@
 #define cpu_has_cmp_legacy	boot_cpu_has(X86_FEATURE_CMP_LEGACY)
 #define cpu_has_tbm		boot_cpu_has(X86_FEATURE_TBM)
 #define cpu_has_itsc		boot_cpu_has(X86_FEATURE_ITSC)
+#define cpu_has_hle             boot_cpu_has(X86_FEATURE_HLE)
+#define cpu_has_rtm             boot_cpu_has(X86_FEATURE_RTM)
+#define cpu_has_pdcm            boot_cpu_has(X86_FEATURE_PDCM)
 
 enum _cache_type {
     CACHE_TYPE_NULL = 0,
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index d01099e..8d9db36 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -231,6 +231,8 @@ struct arch_vmx_struct {
      * pCPU and wakeup the related vCPU.
      */
     struct pi_blocking_vcpu pi_blocking;
+
+    bool lbr_tsx_fixup_enabled;
 };
 
 int vmx_create_vmcs(struct vcpu *v);
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-01-25 17:26 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-01-25 17:26 [PATCH 0/2] x86/VMX: fix for vmentry failure with TSX bits in LBR Sergey Dyasli
2017-01-25 17:26 ` [PATCH 1/2] x86/VMX: introduce vmx_find_guest_msr() Sergey Dyasli
2017-01-31 11:29   ` Jan Beulich
2017-01-31 11:49     ` Andrew Cooper
2017-01-31 11:54       ` Jan Beulich
2017-01-31 12:06         ` Andrew Cooper
2017-01-31 12:43           ` Jan Beulich
2017-02-01  9:38             ` Sergey Dyasli
2017-02-01 10:19               ` Jan Beulich
2017-02-01 10:43                 ` Sergey Dyasli
2017-02-01 10:58                   ` Jan Beulich
2017-01-25 17:26 ` Sergey Dyasli [this message]
2017-01-31 11:52   ` [PATCH 2/2] x86/VMX: fix vmentry failure with TSX bits in LBR Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1485365191-26692-3-git-send-email-sergey.dyasli@citrix.com \
    --to=sergey.dyasli@citrix.com \
    --cc=Andrew.Cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=jun.nakajima@intel.com \
    --cc=kevin.tian@intel.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.