All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] x86/HVM: misc improvements
@ 2016-12-06 11:35 Jan Beulich
  2016-12-06 11:43 ` [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook Jan Beulich
                   ` (2 more replies)
  0 siblings, 3 replies; 29+ messages in thread
From: Jan Beulich @ 2016-12-06 11:35 UTC (permalink / raw)
  To: xen-devel
  Cc: Kevin Tian, Suravee Suthikulpanit, George Dunlap, Andrew Cooper,
	Jun Nakajima, Boris Ostrovsky

1: introduce hvm_get_cpl() and respective hook
2: support (emulate) UMIP
3: prefer structure assignment for seg reg copying

Signed-off-by: Jan Beulich <jbeulich@suse.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook
  2016-12-06 11:35 [PATCH 0/3] x86/HVM: misc improvements Jan Beulich
@ 2016-12-06 11:43 ` Jan Beulich
  2016-12-06 12:16   ` Razvan Cojocaru
                     ` (4 more replies)
  2016-12-06 11:44 ` [PATCH 2/3] x86/HVM: support (emulate) UMIP Jan Beulich
  2016-12-06 11:45 ` [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg copying Jan Beulich
  2 siblings, 5 replies; 29+ messages in thread
From: Jan Beulich @ 2016-12-06 11:43 UTC (permalink / raw)
  To: xen-devel
  Cc: Kevin Tian, tamas, Jun Nakajima, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Tim Deegan, Suravee Suthikulpanit,
	Boris Ostrovsky

[-- Attachment #1: Type: text/plain, Size: 11871 bytes --]

... instead of repeating the same code in various places (and getting
it  wrong in some of them).

In vmx_inst_check_privilege() also stop open coding vmx_get_x86_mode().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2744,7 +2744,7 @@ static void hvm_unmap_entry(void *p)
 static int hvm_load_segment_selector(
     enum x86_segment seg, uint16_t sel, unsigned int eflags)
 {
-    struct segment_register desctab, cs, segr;
+    struct segment_register desctab, segr;
     struct desc_struct *pdesc, desc;
     u8 dpl, rpl, cpl;
     bool_t writable;
@@ -2776,7 +2776,6 @@ static int hvm_load_segment_selector(
     if ( (seg == x86_seg_ldtr) && (sel & 4) )
         goto fail;
 
-    hvm_get_segment_register(v, x86_seg_cs, &cs);
     hvm_get_segment_register(
         v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
 
@@ -2801,7 +2800,7 @@ static int hvm_load_segment_selector(
 
         dpl = (desc.b >> 13) & 3;
         rpl = sel & 3;
-        cpl = cs.sel & 3;
+        cpl = hvm_get_cpl(v);
 
         switch ( seg )
         {
@@ -3708,16 +3707,10 @@ void hvm_cpuid(unsigned int input, unsig
 
 bool hvm_check_cpuid_faulting(struct vcpu *v)
 {
-    struct segment_register sreg;
-
     if ( !v->arch.cpuid_faulting )
         return false;
 
-    hvm_get_segment_register(v, x86_seg_ss, &sreg);
-    if ( sreg.attr.fields.dpl == 0 )
-        return false;
-
-    return true;
+    return hvm_get_cpl(v) > 0;
 }
 
 static uint64_t _hvm_rdtsc_intercept(void)
@@ -3729,13 +3722,10 @@ static uint64_t _hvm_rdtsc_intercept(voi
     if ( currd->arch.vtsc )
         switch ( hvm_guest_x86_mode(curr) )
         {
-            struct segment_register sreg;
-
         case 8:
         case 4:
         case 2:
-            hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-            if ( unlikely(sreg.attr.fields.dpl) )
+            if ( unlikely(hvm_get_cpl(curr)) )
             {
         case 1:
                 currd->arch.vtsc_usercount++;
@@ -4303,7 +4293,6 @@ int hvm_do_hypercall(struct cpu_user_reg
 {
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
-    struct segment_register sreg;
     int mode = hvm_guest_x86_mode(curr);
     unsigned long eax = regs->_eax;
 
@@ -4314,8 +4303,7 @@ int hvm_do_hypercall(struct cpu_user_reg
         /* Fallthrough to permission check. */
     case 4:
     case 2:
-        hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-        if ( unlikely(sreg.attr.fields.dpl) )
+        if ( unlikely(hvm_get_cpl(curr)) )
         {
     default:
             regs->eax = -EPERM;
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -78,8 +78,7 @@ static inline unsigned long gfn_of_rip(u
     struct segment_register sreg;
     uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
 
-    hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-    if ( sreg.attr.fields.dpl == 3 )
+    if ( hvm_get_cpl(curr) == 3 )
         pfec |= PFEC_user_mode;
 
     hvm_get_segment_register(curr, x86_seg_cs, &sreg);
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -616,6 +616,11 @@ static void svm_sync_vmcb(struct vcpu *v
     svm_vmsave(arch_svm->vmcb);
 }
 
+static unsigned int svm_get_cpl(struct vcpu *v)
+{
+    return vmcb_get_cpl(v->arch.hvm_svm.vmcb);
+}
+
 static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                                      struct segment_register *reg)
 {
@@ -2231,6 +2236,7 @@ static struct hvm_function_table __initd
     .get_interrupt_shadow = svm_get_interrupt_shadow,
     .set_interrupt_shadow = svm_set_interrupt_shadow,
     .guest_x86_mode       = svm_guest_x86_mode,
+    .get_cpl              = svm_get_cpl,
     .get_segment_register = svm_get_segment_register,
     .set_segment_register = svm_set_segment_register,
     .get_shadow_gs_base   = svm_get_shadow_gs_base,
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -557,7 +557,7 @@ static void vmx_update_guest_vendor(stru
     vmx_vmcs_exit(v);
 }
 
-static int vmx_guest_x86_mode(struct vcpu *v)
+int vmx_guest_x86_mode(struct vcpu *v)
 {
     unsigned long cs_ar_bytes;
 
@@ -921,6 +921,26 @@ static void vmx_ctxt_switch_to(struct vc
 }
 
 
+unsigned int vmx_get_cpl(void)
+{
+    unsigned long attr;
+
+    __vmread(GUEST_SS_AR_BYTES, &attr);
+
+    return (attr >> 5) & 3;
+}
+
+static unsigned int _vmx_get_cpl(struct vcpu *v)
+{
+    unsigned int cpl;
+
+    vmx_vmcs_enter(v);
+    cpl = vmx_get_cpl();
+    vmx_vmcs_exit(v);
+
+    return cpl;
+}
+
 /* SDM volume 3b section 22.3.1.2: we can only enter virtual 8086 mode
  * if all of CS, SS, DS, ES, FS and GS are 16bit ring-3 data segments.
  * The guest thinks it's got ring-0 segments, so we need to fudge
@@ -2089,6 +2109,7 @@ static struct hvm_function_table __initd
     .get_interrupt_shadow = vmx_get_interrupt_shadow,
     .set_interrupt_shadow = vmx_set_interrupt_shadow,
     .guest_x86_mode       = vmx_guest_x86_mode,
+    .get_cpl              = _vmx_get_cpl,
     .get_segment_register = vmx_get_segment_register,
     .set_segment_register = vmx_set_segment_register,
     .get_shadow_gs_base   = vmx_get_shadow_gs_base,
@@ -3837,19 +3858,13 @@ void vmx_vmexit_handler(struct cpu_user_
     /* fall through */
     default:
     exit_and_crash:
-        {
-            struct segment_register ss;
-
-            gdprintk(XENLOG_WARNING, "Bad vmexit (reason %#lx)\n",
-                     exit_reason);
+        gdprintk(XENLOG_WARNING, "Bad vmexit (reason %#lx)\n", exit_reason);
 
-            hvm_get_segment_register(v, x86_seg_ss, &ss);
-            if ( ss.attr.fields.dpl )
-                hvm_inject_hw_exception(TRAP_invalid_op,
-                                        X86_EVENT_NO_EC);
-            else
-                domain_crash(v->domain);
-        }
+        if ( vmx_get_cpl() )
+            hvm_inject_hw_exception(TRAP_invalid_op,
+                                    X86_EVENT_NO_EC);
+        else
+            domain_crash(v->domain);
         break;
     }
 
@@ -3871,12 +3886,9 @@ out:
     if ( mode == 8 ? !is_canonical_address(regs->rip)
                    : regs->rip != regs->_eip )
     {
-        struct segment_register ss;
-
         gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode);
 
-        hvm_get_segment_register(v, x86_seg_ss, &ss);
-        if ( ss.attr.fields.dpl )
+        if ( vmx_get_cpl() )
         {
             __vmread(VM_ENTRY_INTR_INFO, &intr_info);
             if ( !(intr_info & INTR_INFO_VALID_MASK) )
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -349,7 +349,6 @@ static inline u32 __n2_secondary_exec_co
 static int vmx_inst_check_privilege(struct cpu_user_regs *regs, int vmxop_check)
 {
     struct vcpu *v = current;
-    struct segment_register cs;
 
     if ( vmxop_check )
     {
@@ -360,15 +359,12 @@ static int vmx_inst_check_privilege(stru
     else if ( !vcpu_2_nvmx(v).vmxon_region_pa )
         goto invalid_op;
 
-    hvm_get_segment_register(v, x86_seg_cs, &cs);
-
-    if ( (regs->eflags & X86_EFLAGS_VM) ||
-         (hvm_long_mode_enabled(v) && cs.attr.fields.l == 0) )
+    if ( vmx_guest_x86_mode(v) < (hvm_long_mode_enabled(v) ? 8 : 2) )
         goto invalid_op;
     else if ( nestedhvm_vcpu_in_guestmode(v) )
         goto vmexit;
 
-    if ( (cs.sel & 3) > 0 )
+    if ( vmx_get_cpl() > 0 )
         goto gp_fault;
 
     return X86EMUL_OKAY;
@@ -413,16 +409,10 @@ static int decode_vmx_inst(struct cpu_us
     }
     else
     {
-        bool_t mode_64bit = 0;
+        bool mode_64bit = (vmx_guest_x86_mode(v) == 8);
 
         decode->type = VMX_INST_MEMREG_TYPE_MEMORY;
 
-        if ( hvm_long_mode_enabled(v) )
-        {
-            hvm_get_segment_register(v, x86_seg_cs, &seg);
-            mode_64bit = seg.attr.fields.l;
-        }
-
         if ( info.fields.segment > VMX_SREG_GS )
             goto gp_fault;
         hvm_get_segment_register(v, sreg_to_index[info.fields.segment], &seg);
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -174,7 +174,6 @@ guest_walk_tables(struct vcpu *v, struct
 
     if ( is_hvm_domain(d) && !(pfec & PFEC_user_mode) )
     {
-        struct segment_register seg;
         const struct cpu_user_regs *regs = guest_cpu_user_regs();
 
         /* SMEP: kernel-mode instruction fetches from user-mode mappings
@@ -186,8 +185,6 @@ guest_walk_tables(struct vcpu *v, struct
         switch ( v->arch.smap_check_policy )
         {
         case SMAP_CHECK_HONOR_CPL_AC:
-            hvm_get_segment_register(v, x86_seg_ss, &seg);
-
             /*
              * SMAP: kernel-mode data accesses from user-mode mappings
              * should fault.
@@ -199,8 +196,7 @@ guest_walk_tables(struct vcpu *v, struct
              *   - Page fault in kernel mode
              */
             smap = hvm_smap_enabled(v) &&
-                   ((seg.attr.fields.dpl == 3) ||
-                    !(regs->eflags & X86_EFLAGS_AC));
+                   ((hvm_get_cpl(v) == 3) || !(regs->eflags & X86_EFLAGS_AC));
             break;
         case SMAP_CHECK_ENABLED:
             smap = hvm_smap_enabled(v);
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1779,7 +1779,7 @@ void *sh_emulate_map_dest(struct vcpu *v
 #ifndef NDEBUG
     /* We don't emulate user-mode writes to page tables. */
     if ( has_hvm_container_domain(d)
-         ? hvm_get_seg_reg(x86_seg_ss, sh_ctxt)->attr.fields.dpl == 3
+         ? hvm_get_cpl(v) == 3
          : !guest_kernel_mode(v, guest_cpu_user_regs()) )
     {
         gdprintk(XENLOG_DEBUG, "User-mode write to pagetable reached "
--- a/xen/arch/x86/oprofile/xenoprof.c
+++ b/xen/arch/x86/oprofile/xenoprof.c
@@ -84,15 +84,12 @@ int xenoprofile_get_mode(struct vcpu *cu
 
     switch ( hvm_guest_x86_mode(curr) )
     {
-        struct segment_register ss;
-
     case 0: /* real mode */
         return 1;
     case 1: /* vm86 mode */
         return 0;
     default:
-        hvm_get_segment_register(curr, x86_seg_ss, &ss);
-        return (ss.sel & 3) != 3;
+        return hvm_get_cpl(curr) != 3;
     }
 }
 
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -117,6 +117,7 @@ struct hvm_function_table {
     unsigned int (*get_interrupt_shadow)(struct vcpu *v);
     void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
     int (*guest_x86_mode)(struct vcpu *v);
+    unsigned int (*get_cpl)(struct vcpu *v);
     void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
                                  struct segment_register *reg);
     void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
@@ -358,6 +359,12 @@ static inline void hvm_flush_guest_tlbs(
 void hvm_hypercall_page_initialise(struct domain *d,
                                    void *hypercall_page);
 
+static inline unsigned int
+hvm_get_cpl(struct vcpu *v)
+{
+    return hvm_funcs.get_cpl(v);
+}
+
 void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                               struct segment_register *reg);
 void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -550,6 +550,9 @@ static inline int __vmxon(u64 addr)
     return rc;
 }
 
+int vmx_guest_x86_mode(struct vcpu *v);
+unsigned int vmx_get_cpl(void);
+
 void vmx_inject_extint(int trap, uint8_t source);
 void vmx_inject_nmi(void);
 



[-- Attachment #2: x86-HVM-get-CPL.patch --]
[-- Type: text/plain, Size: 11923 bytes --]

x86/HVM: introduce hvm_get_cpl() and respective hook

... instead of repeating the same code in various places (and getting
it  wrong in some of them).

In vmx_inst_check_privilege() also stop open coding vmx_get_x86_mode().

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2744,7 +2744,7 @@ static void hvm_unmap_entry(void *p)
 static int hvm_load_segment_selector(
     enum x86_segment seg, uint16_t sel, unsigned int eflags)
 {
-    struct segment_register desctab, cs, segr;
+    struct segment_register desctab, segr;
     struct desc_struct *pdesc, desc;
     u8 dpl, rpl, cpl;
     bool_t writable;
@@ -2776,7 +2776,6 @@ static int hvm_load_segment_selector(
     if ( (seg == x86_seg_ldtr) && (sel & 4) )
         goto fail;
 
-    hvm_get_segment_register(v, x86_seg_cs, &cs);
     hvm_get_segment_register(
         v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
 
@@ -2801,7 +2800,7 @@ static int hvm_load_segment_selector(
 
         dpl = (desc.b >> 13) & 3;
         rpl = sel & 3;
-        cpl = cs.sel & 3;
+        cpl = hvm_get_cpl(v);
 
         switch ( seg )
         {
@@ -3708,16 +3707,10 @@ void hvm_cpuid(unsigned int input, unsig
 
 bool hvm_check_cpuid_faulting(struct vcpu *v)
 {
-    struct segment_register sreg;
-
     if ( !v->arch.cpuid_faulting )
         return false;
 
-    hvm_get_segment_register(v, x86_seg_ss, &sreg);
-    if ( sreg.attr.fields.dpl == 0 )
-        return false;
-
-    return true;
+    return hvm_get_cpl(v) > 0;
 }
 
 static uint64_t _hvm_rdtsc_intercept(void)
@@ -3729,13 +3722,10 @@ static uint64_t _hvm_rdtsc_intercept(voi
     if ( currd->arch.vtsc )
         switch ( hvm_guest_x86_mode(curr) )
         {
-            struct segment_register sreg;
-
         case 8:
         case 4:
         case 2:
-            hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-            if ( unlikely(sreg.attr.fields.dpl) )
+            if ( unlikely(hvm_get_cpl(curr)) )
             {
         case 1:
                 currd->arch.vtsc_usercount++;
@@ -4303,7 +4293,6 @@ int hvm_do_hypercall(struct cpu_user_reg
 {
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
-    struct segment_register sreg;
     int mode = hvm_guest_x86_mode(curr);
     unsigned long eax = regs->_eax;
 
@@ -4314,8 +4303,7 @@ int hvm_do_hypercall(struct cpu_user_reg
         /* Fallthrough to permission check. */
     case 4:
     case 2:
-        hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-        if ( unlikely(sreg.attr.fields.dpl) )
+        if ( unlikely(hvm_get_cpl(curr)) )
         {
     default:
             regs->eax = -EPERM;
--- a/xen/arch/x86/hvm/monitor.c
+++ b/xen/arch/x86/hvm/monitor.c
@@ -78,8 +78,7 @@ static inline unsigned long gfn_of_rip(u
     struct segment_register sreg;
     uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
 
-    hvm_get_segment_register(curr, x86_seg_ss, &sreg);
-    if ( sreg.attr.fields.dpl == 3 )
+    if ( hvm_get_cpl(curr) == 3 )
         pfec |= PFEC_user_mode;
 
     hvm_get_segment_register(curr, x86_seg_cs, &sreg);
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -616,6 +616,11 @@ static void svm_sync_vmcb(struct vcpu *v
     svm_vmsave(arch_svm->vmcb);
 }
 
+static unsigned int svm_get_cpl(struct vcpu *v)
+{
+    return vmcb_get_cpl(v->arch.hvm_svm.vmcb);
+}
+
 static void svm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                                      struct segment_register *reg)
 {
@@ -2231,6 +2236,7 @@ static struct hvm_function_table __initd
     .get_interrupt_shadow = svm_get_interrupt_shadow,
     .set_interrupt_shadow = svm_set_interrupt_shadow,
     .guest_x86_mode       = svm_guest_x86_mode,
+    .get_cpl              = svm_get_cpl,
     .get_segment_register = svm_get_segment_register,
     .set_segment_register = svm_set_segment_register,
     .get_shadow_gs_base   = svm_get_shadow_gs_base,
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -557,7 +557,7 @@ static void vmx_update_guest_vendor(stru
     vmx_vmcs_exit(v);
 }
 
-static int vmx_guest_x86_mode(struct vcpu *v)
+int vmx_guest_x86_mode(struct vcpu *v)
 {
     unsigned long cs_ar_bytes;
 
@@ -921,6 +921,26 @@ static void vmx_ctxt_switch_to(struct vc
 }
 
 
+unsigned int vmx_get_cpl(void)
+{
+    unsigned long attr;
+
+    __vmread(GUEST_SS_AR_BYTES, &attr);
+
+    return (attr >> 5) & 3;
+}
+
+static unsigned int _vmx_get_cpl(struct vcpu *v)
+{
+    unsigned int cpl;
+
+    vmx_vmcs_enter(v);
+    cpl = vmx_get_cpl();
+    vmx_vmcs_exit(v);
+
+    return cpl;
+}
+
 /* SDM volume 3b section 22.3.1.2: we can only enter virtual 8086 mode
  * if all of CS, SS, DS, ES, FS and GS are 16bit ring-3 data segments.
  * The guest thinks it's got ring-0 segments, so we need to fudge
@@ -2089,6 +2109,7 @@ static struct hvm_function_table __initd
     .get_interrupt_shadow = vmx_get_interrupt_shadow,
     .set_interrupt_shadow = vmx_set_interrupt_shadow,
     .guest_x86_mode       = vmx_guest_x86_mode,
+    .get_cpl              = _vmx_get_cpl,
     .get_segment_register = vmx_get_segment_register,
     .set_segment_register = vmx_set_segment_register,
     .get_shadow_gs_base   = vmx_get_shadow_gs_base,
@@ -3837,19 +3858,13 @@ void vmx_vmexit_handler(struct cpu_user_
     /* fall through */
     default:
     exit_and_crash:
-        {
-            struct segment_register ss;
-
-            gdprintk(XENLOG_WARNING, "Bad vmexit (reason %#lx)\n",
-                     exit_reason);
+        gdprintk(XENLOG_WARNING, "Bad vmexit (reason %#lx)\n", exit_reason);
 
-            hvm_get_segment_register(v, x86_seg_ss, &ss);
-            if ( ss.attr.fields.dpl )
-                hvm_inject_hw_exception(TRAP_invalid_op,
-                                        X86_EVENT_NO_EC);
-            else
-                domain_crash(v->domain);
-        }
+        if ( vmx_get_cpl() )
+            hvm_inject_hw_exception(TRAP_invalid_op,
+                                    X86_EVENT_NO_EC);
+        else
+            domain_crash(v->domain);
         break;
     }
 
@@ -3871,12 +3886,9 @@ out:
     if ( mode == 8 ? !is_canonical_address(regs->rip)
                    : regs->rip != regs->_eip )
     {
-        struct segment_register ss;
-
         gprintk(XENLOG_WARNING, "Bad rIP %lx for mode %u\n", regs->rip, mode);
 
-        hvm_get_segment_register(v, x86_seg_ss, &ss);
-        if ( ss.attr.fields.dpl )
+        if ( vmx_get_cpl() )
         {
             __vmread(VM_ENTRY_INTR_INFO, &intr_info);
             if ( !(intr_info & INTR_INFO_VALID_MASK) )
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -349,7 +349,6 @@ static inline u32 __n2_secondary_exec_co
 static int vmx_inst_check_privilege(struct cpu_user_regs *regs, int vmxop_check)
 {
     struct vcpu *v = current;
-    struct segment_register cs;
 
     if ( vmxop_check )
     {
@@ -360,15 +359,12 @@ static int vmx_inst_check_privilege(stru
     else if ( !vcpu_2_nvmx(v).vmxon_region_pa )
         goto invalid_op;
 
-    hvm_get_segment_register(v, x86_seg_cs, &cs);
-
-    if ( (regs->eflags & X86_EFLAGS_VM) ||
-         (hvm_long_mode_enabled(v) && cs.attr.fields.l == 0) )
+    if ( vmx_guest_x86_mode(v) < (hvm_long_mode_enabled(v) ? 8 : 2) )
         goto invalid_op;
     else if ( nestedhvm_vcpu_in_guestmode(v) )
         goto vmexit;
 
-    if ( (cs.sel & 3) > 0 )
+    if ( vmx_get_cpl() > 0 )
         goto gp_fault;
 
     return X86EMUL_OKAY;
@@ -413,16 +409,10 @@ static int decode_vmx_inst(struct cpu_us
     }
     else
     {
-        bool_t mode_64bit = 0;
+        bool mode_64bit = (vmx_guest_x86_mode(v) == 8);
 
         decode->type = VMX_INST_MEMREG_TYPE_MEMORY;
 
-        if ( hvm_long_mode_enabled(v) )
-        {
-            hvm_get_segment_register(v, x86_seg_cs, &seg);
-            mode_64bit = seg.attr.fields.l;
-        }
-
         if ( info.fields.segment > VMX_SREG_GS )
             goto gp_fault;
         hvm_get_segment_register(v, sreg_to_index[info.fields.segment], &seg);
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -174,7 +174,6 @@ guest_walk_tables(struct vcpu *v, struct
 
     if ( is_hvm_domain(d) && !(pfec & PFEC_user_mode) )
     {
-        struct segment_register seg;
         const struct cpu_user_regs *regs = guest_cpu_user_regs();
 
         /* SMEP: kernel-mode instruction fetches from user-mode mappings
@@ -186,8 +185,6 @@ guest_walk_tables(struct vcpu *v, struct
         switch ( v->arch.smap_check_policy )
         {
         case SMAP_CHECK_HONOR_CPL_AC:
-            hvm_get_segment_register(v, x86_seg_ss, &seg);
-
             /*
              * SMAP: kernel-mode data accesses from user-mode mappings
              * should fault.
@@ -199,8 +196,7 @@ guest_walk_tables(struct vcpu *v, struct
              *   - Page fault in kernel mode
              */
             smap = hvm_smap_enabled(v) &&
-                   ((seg.attr.fields.dpl == 3) ||
-                    !(regs->eflags & X86_EFLAGS_AC));
+                   ((hvm_get_cpl(v) == 3) || !(regs->eflags & X86_EFLAGS_AC));
             break;
         case SMAP_CHECK_ENABLED:
             smap = hvm_smap_enabled(v);
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -1779,7 +1779,7 @@ void *sh_emulate_map_dest(struct vcpu *v
 #ifndef NDEBUG
     /* We don't emulate user-mode writes to page tables. */
     if ( has_hvm_container_domain(d)
-         ? hvm_get_seg_reg(x86_seg_ss, sh_ctxt)->attr.fields.dpl == 3
+         ? hvm_get_cpl(v) == 3
          : !guest_kernel_mode(v, guest_cpu_user_regs()) )
     {
         gdprintk(XENLOG_DEBUG, "User-mode write to pagetable reached "
--- a/xen/arch/x86/oprofile/xenoprof.c
+++ b/xen/arch/x86/oprofile/xenoprof.c
@@ -84,15 +84,12 @@ int xenoprofile_get_mode(struct vcpu *cu
 
     switch ( hvm_guest_x86_mode(curr) )
     {
-        struct segment_register ss;
-
     case 0: /* real mode */
         return 1;
     case 1: /* vm86 mode */
         return 0;
     default:
-        hvm_get_segment_register(curr, x86_seg_ss, &ss);
-        return (ss.sel & 3) != 3;
+        return hvm_get_cpl(curr) != 3;
     }
 }
 
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -117,6 +117,7 @@ struct hvm_function_table {
     unsigned int (*get_interrupt_shadow)(struct vcpu *v);
     void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
     int (*guest_x86_mode)(struct vcpu *v);
+    unsigned int (*get_cpl)(struct vcpu *v);
     void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
                                  struct segment_register *reg);
     void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
@@ -358,6 +359,12 @@ static inline void hvm_flush_guest_tlbs(
 void hvm_hypercall_page_initialise(struct domain *d,
                                    void *hypercall_page);
 
+static inline unsigned int
+hvm_get_cpl(struct vcpu *v)
+{
+    return hvm_funcs.get_cpl(v);
+}
+
 void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                               struct segment_register *reg);
 void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -550,6 +550,9 @@ static inline int __vmxon(u64 addr)
     return rc;
 }
 
+int vmx_guest_x86_mode(struct vcpu *v);
+unsigned int vmx_get_cpl(void);
+
 void vmx_inject_extint(int trap, uint8_t source);
 void vmx_inject_nmi(void);
 

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-06 11:35 [PATCH 0/3] x86/HVM: misc improvements Jan Beulich
  2016-12-06 11:43 ` [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook Jan Beulich
@ 2016-12-06 11:44 ` Jan Beulich
  2016-12-06 14:47   ` Andrew Cooper
  2016-12-06 16:23   ` Boris Ostrovsky
  2016-12-06 11:45 ` [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg copying Jan Beulich
  2 siblings, 2 replies; 29+ messages in thread
From: Jan Beulich @ 2016-12-06 11:44 UTC (permalink / raw)
  To: xen-devel
  Cc: Kevin Tian, Wei Liu, Jun Nakajima, George Dunlap, Andrew Cooper,
	Ian Jackson, Suravee Suthikulpanit, Boris Ostrovsky

[-- Attachment #1: Type: text/plain, Size: 12921 bytes --]

There are three noteworthy drawbacks:
1) The intercepts we need to enable here are CPL-independent, i.e. we
   now have to emulate certain instructions for ring 0.
2) On VMX there's no intercept for SMSW, so the emulation isn't really
   complete there.
3) The CR4 write intercept on SVM is lower priority than all exception
   checks, so we need to intercept #GP.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
The tool stack change could be left out - it updates a table which is
rather out of date anyway.
---
This once again points out that handle_mmio() is rather badly named, as
it's about more than just MMIO. Since we have hvm_emulate_one()
already, I am, however, lacking an idea for a good alternative name.

--- a/tools/libxl/libxl_cpuid.c
+++ b/tools/libxl/libxl_cpuid.c
@@ -158,6 +158,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
         {"de",           0x00000001, NA, CPUID_REG_EDX,  2,  1},
         {"vme",          0x00000001, NA, CPUID_REG_EDX,  1,  1},
         {"fpu",          0x00000001, NA, CPUID_REG_EDX,  0,  1},
+        {"umip",         0x00000007,  0, CPUID_REG_ECX,  2,  1},
         {"topoext",      0x80000001, NA, CPUID_REG_ECX, 22,  1},
         {"tbm",          0x80000001, NA, CPUID_REG_ECX, 21,  1},
         {"nodeid",       0x80000001, NA, CPUID_REG_ECX, 19,  1},
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
     __set_bit(X86_FEATURE_APIC, hvm_featureset);
 
     /*
+     * Xen can often provide UMIP emulation to HVM guests even if the host
+     * doesn't have such functionality.
+     */
+    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
+        __set_bit(X86_FEATURE_UMIP, hvm_featureset);
+
+    /*
      * On AMD, PV guests are entirely unable to use SYSENTER as Xen runs in
      * long mode (and init_amd() has cleared it out of host capabilities), but
      * HVM guests are able if running in protected mode.
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1008,6 +1008,8 @@ unsigned long hvm_cr4_guest_reserved_bit
               X86_CR4_OSFXSR : 0) |
              (leaf1_edx & cpufeat_mask(X86_FEATURE_SSE) ?
               X86_CR4_OSXMMEXCPT : 0) |
+             (leaf7_0_ecx & cpufeat_mask(X86_FEATURE_UMIP) ?
+              X86_CR4_UMIP : 0) |
              ((restore || nestedhvm_enabled(v->domain)) &&
               (leaf1_ecx & cpufeat_mask(X86_FEATURE_VMX)) ?
               X86_CR4_VMXE : 0) |
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -570,6 +570,29 @@ void svm_update_guest_cr(struct vcpu *v,
         if ( paging_mode_hap(v->domain) )
             value &= ~X86_CR4_PAE;
         value |= v->arch.hvm_vcpu.guest_cr[4];
+
+        if ( !cpu_has_umip )
+        {
+            u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+
+            if ( v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_UMIP )
+            {
+                value &= ~X86_CR4_UMIP;
+                ASSERT(vmcb_get_cr_intercepts(vmcb) & CR_INTERCEPT_CR0_READ);
+                general1_intercepts |= GENERAL1_INTERCEPT_IDTR_READ |
+                                       GENERAL1_INTERCEPT_GDTR_READ |
+                                       GENERAL1_INTERCEPT_LDTR_READ |
+                                       GENERAL1_INTERCEPT_TR_READ;
+            }
+            else
+                general1_intercepts &= ~(GENERAL1_INTERCEPT_IDTR_READ |
+                                         GENERAL1_INTERCEPT_GDTR_READ |
+                                         GENERAL1_INTERCEPT_LDTR_READ |
+                                         GENERAL1_INTERCEPT_TR_READ);
+
+            vmcb_set_general1_intercepts(vmcb, general1_intercepts);
+        }
+
         vmcb_set_cr4(vmcb, value);
         break;
     default:
@@ -2444,6 +2467,13 @@ void svm_vmexit_handler(struct cpu_user_
         svm_fpu_dirty_intercept();
         break;  
 
+    case VMEXIT_EXCEPTION_GP:
+        HVMTRACE_1D(TRAP, TRAP_gp_fault);
+        /* We only care about ring 0 faults with error code zero. */
+        if ( vmcb->exitinfo1 || vmcb_get_cpl(vmcb) || !handle_mmio() )
+            hvm_inject_hw_exception(TRAP_gp_fault, vmcb->exitinfo1);
+        break;
+
     case VMEXIT_EXCEPTION_PF: {
         unsigned long va;
         va = vmcb->exitinfo2;
@@ -2551,7 +2581,25 @@ void svm_vmexit_handler(struct cpu_user_
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
         break;
 
-    case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
+    case VMEXIT_IDTR_READ:
+    case VMEXIT_GDTR_READ:
+    case VMEXIT_LDTR_READ:
+    case VMEXIT_TR_READ:
+        ASSERT((v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_UMIP) && !cpu_has_umip);
+        if ( vmcb_get_cpl(vmcb) || !handle_mmio() )
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+        break;
+
+    case VMEXIT_CR0_READ:
+        if ( (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_UMIP) &&
+             vmcb_get_cpl(vmcb) )
+        {
+            ASSERT(!cpu_has_umip);
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+            break;
+        }
+        /* fall through */
+    case VMEXIT_CR1_READ ... VMEXIT_CR15_READ:
     case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
         if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) )
             svm_vmexit_do_cr_access(vmcb, regs);
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -194,6 +194,10 @@ static int construct_vmcb(struct vcpu *v
         HVM_TRAP_MASK
         | (1U << TRAP_no_device);
 
+    /* For UMIP emulation intercept #GP to catch faulting CR4 writes. */
+    if ( !cpu_has_umip )
+        vmcb->_exception_intercepts |= 1U << TRAP_gp_fault;
+
     if ( paging_mode_hap(v->domain) )
     {
         vmcb->_np_enable = 1; /* enable nested paging */
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -227,6 +227,7 @@ static int vmx_init_vmcs_config(void)
         opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                SECONDARY_EXEC_WBINVD_EXITING |
                SECONDARY_EXEC_ENABLE_EPT |
+               SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING |
                SECONDARY_EXEC_ENABLE_RDTSCP |
                SECONDARY_EXEC_PAUSE_LOOP_EXITING |
                SECONDARY_EXEC_ENABLE_INVPCID |
@@ -987,6 +988,10 @@ static int construct_vmcs(struct vcpu *v
 
     v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
 
+    /* Disable descriptor table exiting: It's controlled by CR4.UMIP writes. */
+    v->arch.hvm_vmx.secondary_exec_control &=
+        ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+
     /* Disable VPID for now: we decide when to enable it on VMENTER. */
     v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1531,6 +1531,22 @@ static void vmx_update_guest_cr(struct v
             v->arch.hvm_vcpu.hw_cr[4] &=
                 ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
         }
+
+        if ( !cpu_has_umip )
+        {
+            if ( (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_UMIP) )
+            {
+                ASSERT(cpu_has_vmx_dt_exiting);
+                v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_UMIP;
+                v->arch.hvm_vmx.secondary_exec_control |=
+                    SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+            }
+            else
+                v->arch.hvm_vmx.secondary_exec_control &=
+                    ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+            vmx_update_secondary_exec_control(v);
+        }
+
         __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
         break;
     default:
@@ -3853,6 +3869,11 @@ void vmx_vmexit_handler(struct cpu_user_
 
     case EXIT_REASON_ACCESS_GDTR_OR_IDTR:
     case EXIT_REASON_ACCESS_LDTR_OR_TR:
+        ASSERT((v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_UMIP) && !cpu_has_umip);
+        if ( vmx_get_cpl() || !handle_mmio() )
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+        break;
+
     case EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED:
     case EXIT_REASON_INVPCID:
     /* fall through */
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1360,7 +1360,7 @@ protmode_load_seg(
     const struct x86_emulate_ops *ops)
 {
     enum x86_segment sel_seg = (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr;
-    struct { uint32_t a, b; } desc;
+    struct { uint32_t a, b; } desc, desc_hi = {};
     uint8_t dpl, rpl;
     int cpl = get_cpl(ctxt, ops);
     uint32_t a_flag = 0x100;
@@ -1411,9 +1411,6 @@ protmode_load_seg(
         /* System segments must have S flag == 0. */
         if ( desc.b & (1u << 12) )
             goto raise_exn;
-        /* We do not support 64-bit descriptor types. */
-        if ( in_longmode(ctxt, ops) )
-            return X86EMUL_UNHANDLEABLE;
     }
     /* User segments must have S flag == 1. */
     else if ( !(desc.b & (1u << 12)) )
@@ -1487,6 +1484,33 @@ protmode_load_seg(
         goto raise_exn;
     }
 
+    if ( !is_x86_user_segment(seg) )
+    {
+        int lm = in_longmode(ctxt, ops);
+
+        if ( lm < 0 )
+            return X86EMUL_UNHANDLEABLE;
+        if ( lm )
+        {
+            switch ( rc = ops->read(sel_seg, (sel & 0xfff8) + 8,
+                                    &desc_hi, sizeof(desc_hi), ctxt) )
+            {
+            case X86EMUL_OKAY:
+                break;
+
+            case X86EMUL_EXCEPTION:
+                if ( !ctxt->event_pending )
+                    goto raise_exn;
+                /* fall through */
+            default:
+                return rc;
+            }
+            if ( (desc_hi.b & 0x00001f00) ||
+                 !is_canonical_address((uint64_t)desc_hi.a << 32) )
+                goto raise_exn;
+        }
+    }
+
     /* Ensure Accessed flag is set. */
     if ( a_flag && !(desc.b & a_flag) )
     {
@@ -1513,7 +1537,8 @@ protmode_load_seg(
         desc.b = new_desc_b;
     }
 
-    sreg->base = (((desc.b <<  0) & 0xff000000u) |
+    sreg->base = (((uint64_t)desc_hi.a << 32) |
+                  ((desc.b <<  0) & 0xff000000u) |
                   ((desc.b << 16) & 0x00ff0000u) |
                   ((desc.a >> 16) & 0x0000ffffu));
     sreg->attr.bytes = (((desc.b >>  8) & 0x00ffu) |
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -72,6 +72,7 @@ XEN_CPUFEATURE(XEN_SMAP,        (FSCAPIN
 #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
 #define cpu_has_smap            boot_cpu_has(X86_FEATURE_SMAP)
 #define cpu_has_fpu_sel         (!boot_cpu_has(X86_FEATURE_NO_FPU_SEL))
+#define cpu_has_umip            boot_cpu_has(X86_FEATURE_UMIP)
 #define cpu_has_ffxsr           ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) \
                                  && boot_cpu_has(X86_FEATURE_FFXSR))
 #define cpu_has_x2apic          boot_cpu_has(X86_FEATURE_X2APIC)
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -346,6 +346,8 @@ extern u64 vmx_ept_vpid_cap;
     (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
 #define cpu_has_vmx_ept \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)
+#define cpu_has_vmx_dt_exiting \
+    (vmx_secondary_exec_control & SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING)
 #define cpu_has_vmx_vpid \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
 #define cpu_has_monitor_trap_flag \
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -82,6 +82,7 @@
 #define X86_CR4_PCE        0x00000100 /* enable performance counters at ipl 3 */
 #define X86_CR4_OSFXSR     0x00000200 /* enable fast FPU save and restore */
 #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
+#define X86_CR4_UMIP       0x00000800 /* disable user mode S[GIL]DT/STR/SMSW */
 #define X86_CR4_VMXE       0x00002000 /* enable VMX */
 #define X86_CR4_SMXE       0x00004000 /* enable SMX */
 #define X86_CR4_FSGSBASE   0x00010000 /* enable {rd,wr}{fs,gs}base */
--- a/xen/include/public/arch-x86/cpufeatureset.h
+++ b/xen/include/public/arch-x86/cpufeatureset.h
@@ -224,6 +224,7 @@ XEN_CPUFEATURE(AVX512VL,      5*32+31) /
 /* Intel-defined CPU features, CPUID level 0x00000007:0.ecx, word 6 */
 XEN_CPUFEATURE(PREFETCHWT1,   6*32+ 0) /*A  PREFETCHWT1 instruction */
 XEN_CPUFEATURE(AVX512VBMI,    6*32+ 1) /*A  AVX-512 Vector Byte Manipulation Instrs */
+XEN_CPUFEATURE(UMIP,          6*32+ 2) /*S  User Mode Instruction Prevention */
 XEN_CPUFEATURE(PKU,           6*32+ 3) /*H  Protection Keys for Userspace */
 XEN_CPUFEATURE(OSPKE,         6*32+ 4) /*!  OS Protection Keys Enable */
 



[-- Attachment #2: x86-HVM-UMIP.patch --]
[-- Type: text/plain, Size: 12952 bytes --]

x86/HVM: support (emulate) UMIP

There are three noteworthy drawbacks:
1) The intercepts we need to enable here are CPL-independent, i.e. we
   now have to emulate certain instructions for ring 0.
2) On VMX there's no intercept for SMSW, so the emulation isn't really
   complete there.
3) The CR4 write intercept on SVM is lower priority than all exception
   checks, so we need to intercept #GP.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
The tool stack change could be left out - it updates a table which is
rather out of date anyway.
---
This once again points out that handle_mmio() is rather badly named, as
it's about more than just MMIO. Since we have hvm_emulate_one()
already, I am, however, lacking an idea for a good alternative name.

--- a/tools/libxl/libxl_cpuid.c
+++ b/tools/libxl/libxl_cpuid.c
@@ -158,6 +158,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
         {"de",           0x00000001, NA, CPUID_REG_EDX,  2,  1},
         {"vme",          0x00000001, NA, CPUID_REG_EDX,  1,  1},
         {"fpu",          0x00000001, NA, CPUID_REG_EDX,  0,  1},
+        {"umip",         0x00000007,  0, CPUID_REG_ECX,  2,  1},
         {"topoext",      0x80000001, NA, CPUID_REG_ECX, 22,  1},
         {"tbm",          0x80000001, NA, CPUID_REG_ECX, 21,  1},
         {"nodeid",       0x80000001, NA, CPUID_REG_ECX, 19,  1},
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
     __set_bit(X86_FEATURE_APIC, hvm_featureset);
 
     /*
+     * Xen can often provide UMIP emulation to HVM guests even if the host
+     * doesn't have such functionality.
+     */
+    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
+        __set_bit(X86_FEATURE_UMIP, hvm_featureset);
+
+    /*
      * On AMD, PV guests are entirely unable to use SYSENTER as Xen runs in
      * long mode (and init_amd() has cleared it out of host capabilities), but
      * HVM guests are able if running in protected mode.
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1008,6 +1008,8 @@ unsigned long hvm_cr4_guest_reserved_bit
               X86_CR4_OSFXSR : 0) |
              (leaf1_edx & cpufeat_mask(X86_FEATURE_SSE) ?
               X86_CR4_OSXMMEXCPT : 0) |
+             (leaf7_0_ecx & cpufeat_mask(X86_FEATURE_UMIP) ?
+              X86_CR4_UMIP : 0) |
              ((restore || nestedhvm_enabled(v->domain)) &&
               (leaf1_ecx & cpufeat_mask(X86_FEATURE_VMX)) ?
               X86_CR4_VMXE : 0) |
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -570,6 +570,29 @@ void svm_update_guest_cr(struct vcpu *v,
         if ( paging_mode_hap(v->domain) )
             value &= ~X86_CR4_PAE;
         value |= v->arch.hvm_vcpu.guest_cr[4];
+
+        if ( !cpu_has_umip )
+        {
+            u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
+
+            if ( v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_UMIP )
+            {
+                value &= ~X86_CR4_UMIP;
+                ASSERT(vmcb_get_cr_intercepts(vmcb) & CR_INTERCEPT_CR0_READ);
+                general1_intercepts |= GENERAL1_INTERCEPT_IDTR_READ |
+                                       GENERAL1_INTERCEPT_GDTR_READ |
+                                       GENERAL1_INTERCEPT_LDTR_READ |
+                                       GENERAL1_INTERCEPT_TR_READ;
+            }
+            else
+                general1_intercepts &= ~(GENERAL1_INTERCEPT_IDTR_READ |
+                                         GENERAL1_INTERCEPT_GDTR_READ |
+                                         GENERAL1_INTERCEPT_LDTR_READ |
+                                         GENERAL1_INTERCEPT_TR_READ);
+
+            vmcb_set_general1_intercepts(vmcb, general1_intercepts);
+        }
+
         vmcb_set_cr4(vmcb, value);
         break;
     default:
@@ -2444,6 +2467,13 @@ void svm_vmexit_handler(struct cpu_user_
         svm_fpu_dirty_intercept();
         break;  
 
+    case VMEXIT_EXCEPTION_GP:
+        HVMTRACE_1D(TRAP, TRAP_gp_fault);
+        /* We only care about ring 0 faults with error code zero. */
+        if ( vmcb->exitinfo1 || vmcb_get_cpl(vmcb) || !handle_mmio() )
+            hvm_inject_hw_exception(TRAP_gp_fault, vmcb->exitinfo1);
+        break;
+
     case VMEXIT_EXCEPTION_PF: {
         unsigned long va;
         va = vmcb->exitinfo2;
@@ -2551,7 +2581,25 @@ void svm_vmexit_handler(struct cpu_user_
             hvm_inject_hw_exception(TRAP_gp_fault, 0);
         break;
 
-    case VMEXIT_CR0_READ ... VMEXIT_CR15_READ:
+    case VMEXIT_IDTR_READ:
+    case VMEXIT_GDTR_READ:
+    case VMEXIT_LDTR_READ:
+    case VMEXIT_TR_READ:
+        ASSERT((v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_UMIP) && !cpu_has_umip);
+        if ( vmcb_get_cpl(vmcb) || !handle_mmio() )
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+        break;
+
+    case VMEXIT_CR0_READ:
+        if ( (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_UMIP) &&
+             vmcb_get_cpl(vmcb) )
+        {
+            ASSERT(!cpu_has_umip);
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+            break;
+        }
+        /* fall through */
+    case VMEXIT_CR1_READ ... VMEXIT_CR15_READ:
     case VMEXIT_CR0_WRITE ... VMEXIT_CR15_WRITE:
         if ( cpu_has_svm_decode && (vmcb->exitinfo1 & (1ULL << 63)) )
             svm_vmexit_do_cr_access(vmcb, regs);
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -194,6 +194,10 @@ static int construct_vmcb(struct vcpu *v
         HVM_TRAP_MASK
         | (1U << TRAP_no_device);
 
+    /* For UMIP emulation intercept #GP to catch faulting CR4 writes. */
+    if ( !cpu_has_umip )
+        vmcb->_exception_intercepts |= 1U << TRAP_gp_fault;
+
     if ( paging_mode_hap(v->domain) )
     {
         vmcb->_np_enable = 1; /* enable nested paging */
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -227,6 +227,7 @@ static int vmx_init_vmcs_config(void)
         opt = (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                SECONDARY_EXEC_WBINVD_EXITING |
                SECONDARY_EXEC_ENABLE_EPT |
+               SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING |
                SECONDARY_EXEC_ENABLE_RDTSCP |
                SECONDARY_EXEC_PAUSE_LOOP_EXITING |
                SECONDARY_EXEC_ENABLE_INVPCID |
@@ -987,6 +988,10 @@ static int construct_vmcs(struct vcpu *v
 
     v->arch.hvm_vmx.secondary_exec_control = vmx_secondary_exec_control;
 
+    /* Disable descriptor table exiting: It's controlled by CR4.UMIP writes. */
+    v->arch.hvm_vmx.secondary_exec_control &=
+        ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+
     /* Disable VPID for now: we decide when to enable it on VMENTER. */
     v->arch.hvm_vmx.secondary_exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
 
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1531,6 +1531,22 @@ static void vmx_update_guest_cr(struct v
             v->arch.hvm_vcpu.hw_cr[4] &=
                 ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
         }
+
+        if ( !cpu_has_umip )
+        {
+            if ( (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_UMIP) )
+            {
+                ASSERT(cpu_has_vmx_dt_exiting);
+                v->arch.hvm_vcpu.hw_cr[4] &= ~X86_CR4_UMIP;
+                v->arch.hvm_vmx.secondary_exec_control |=
+                    SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+            }
+            else
+                v->arch.hvm_vmx.secondary_exec_control &=
+                    ~SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING;
+            vmx_update_secondary_exec_control(v);
+        }
+
         __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
         break;
     default:
@@ -3853,6 +3869,11 @@ void vmx_vmexit_handler(struct cpu_user_
 
     case EXIT_REASON_ACCESS_GDTR_OR_IDTR:
     case EXIT_REASON_ACCESS_LDTR_OR_TR:
+        ASSERT((v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_UMIP) && !cpu_has_umip);
+        if ( vmx_get_cpl() || !handle_mmio() )
+            hvm_inject_hw_exception(TRAP_gp_fault, 0);
+        break;
+
     case EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED:
     case EXIT_REASON_INVPCID:
     /* fall through */
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1360,7 +1360,7 @@ protmode_load_seg(
     const struct x86_emulate_ops *ops)
 {
     enum x86_segment sel_seg = (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr;
-    struct { uint32_t a, b; } desc;
+    struct { uint32_t a, b; } desc, desc_hi = {};
     uint8_t dpl, rpl;
     int cpl = get_cpl(ctxt, ops);
     uint32_t a_flag = 0x100;
@@ -1411,9 +1411,6 @@ protmode_load_seg(
         /* System segments must have S flag == 0. */
         if ( desc.b & (1u << 12) )
             goto raise_exn;
-        /* We do not support 64-bit descriptor types. */
-        if ( in_longmode(ctxt, ops) )
-            return X86EMUL_UNHANDLEABLE;
     }
     /* User segments must have S flag == 1. */
     else if ( !(desc.b & (1u << 12)) )
@@ -1487,6 +1484,33 @@ protmode_load_seg(
         goto raise_exn;
     }
 
+    if ( !is_x86_user_segment(seg) )
+    {
+        int lm = in_longmode(ctxt, ops);
+
+        if ( lm < 0 )
+            return X86EMUL_UNHANDLEABLE;
+        if ( lm )
+        {
+            switch ( rc = ops->read(sel_seg, (sel & 0xfff8) + 8,
+                                    &desc_hi, sizeof(desc_hi), ctxt) )
+            {
+            case X86EMUL_OKAY:
+                break;
+
+            case X86EMUL_EXCEPTION:
+                if ( !ctxt->event_pending )
+                    goto raise_exn;
+                /* fall through */
+            default:
+                return rc;
+            }
+            if ( (desc_hi.b & 0x00001f00) ||
+                 !is_canonical_address((uint64_t)desc_hi.a << 32) )
+                goto raise_exn;
+        }
+    }
+
     /* Ensure Accessed flag is set. */
     if ( a_flag && !(desc.b & a_flag) )
     {
@@ -1513,7 +1537,8 @@ protmode_load_seg(
         desc.b = new_desc_b;
     }
 
-    sreg->base = (((desc.b <<  0) & 0xff000000u) |
+    sreg->base = (((uint64_t)desc_hi.a << 32) |
+                  ((desc.b <<  0) & 0xff000000u) |
                   ((desc.b << 16) & 0x00ff0000u) |
                   ((desc.a >> 16) & 0x0000ffffu));
     sreg->attr.bytes = (((desc.b >>  8) & 0x00ffu) |
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -72,6 +72,7 @@ XEN_CPUFEATURE(XEN_SMAP,        (FSCAPIN
 #define cpu_has_smep            boot_cpu_has(X86_FEATURE_SMEP)
 #define cpu_has_smap            boot_cpu_has(X86_FEATURE_SMAP)
 #define cpu_has_fpu_sel         (!boot_cpu_has(X86_FEATURE_NO_FPU_SEL))
+#define cpu_has_umip            boot_cpu_has(X86_FEATURE_UMIP)
 #define cpu_has_ffxsr           ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) \
                                  && boot_cpu_has(X86_FEATURE_FFXSR))
 #define cpu_has_x2apic          boot_cpu_has(X86_FEATURE_X2APIC)
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -346,6 +346,8 @@ extern u64 vmx_ept_vpid_cap;
     (vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
 #define cpu_has_vmx_ept \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT)
+#define cpu_has_vmx_dt_exiting \
+    (vmx_secondary_exec_control & SECONDARY_EXEC_DESCRIPTOR_TABLE_EXITING)
 #define cpu_has_vmx_vpid \
     (vmx_secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
 #define cpu_has_monitor_trap_flag \
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -82,6 +82,7 @@
 #define X86_CR4_PCE        0x00000100 /* enable performance counters at ipl 3 */
 #define X86_CR4_OSFXSR     0x00000200 /* enable fast FPU save and restore */
 #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
+#define X86_CR4_UMIP       0x00000800 /* disable user mode S[GIL]DT/STR/SMSW */
 #define X86_CR4_VMXE       0x00002000 /* enable VMX */
 #define X86_CR4_SMXE       0x00004000 /* enable SMX */
 #define X86_CR4_FSGSBASE   0x00010000 /* enable {rd,wr}{fs,gs}base */
--- a/xen/include/public/arch-x86/cpufeatureset.h
+++ b/xen/include/public/arch-x86/cpufeatureset.h
@@ -224,6 +224,7 @@ XEN_CPUFEATURE(AVX512VL,      5*32+31) /
 /* Intel-defined CPU features, CPUID level 0x00000007:0.ecx, word 6 */
 XEN_CPUFEATURE(PREFETCHWT1,   6*32+ 0) /*A  PREFETCHWT1 instruction */
 XEN_CPUFEATURE(AVX512VBMI,    6*32+ 1) /*A  AVX-512 Vector Byte Manipulation Instrs */
+XEN_CPUFEATURE(UMIP,          6*32+ 2) /*S  User Mode Instruction Prevention */
 XEN_CPUFEATURE(PKU,           6*32+ 3) /*H  Protection Keys for Userspace */
 XEN_CPUFEATURE(OSPKE,         6*32+ 4) /*!  OS Protection Keys Enable */
 

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg copying
  2016-12-06 11:35 [PATCH 0/3] x86/HVM: misc improvements Jan Beulich
  2016-12-06 11:43 ` [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook Jan Beulich
  2016-12-06 11:44 ` [PATCH 2/3] x86/HVM: support (emulate) UMIP Jan Beulich
@ 2016-12-06 11:45 ` Jan Beulich
  2016-12-06 11:47   ` Paul Durrant
                     ` (2 more replies)
  2 siblings, 3 replies; 29+ messages in thread
From: Jan Beulich @ 2016-12-06 11:45 UTC (permalink / raw)
  To: xen-devel
  Cc: George Dunlap, Andrew Cooper, Boris Ostrovsky, Paul Durrant,
	Suravee Suthikulpanit

[-- Attachment #1: Type: text/plain, Size: 3192 bytes --]

This makes things type safe.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1434,7 +1434,8 @@ static int hvmemul_read_segment(
     if ( IS_ERR(sreg) )
          return -PTR_ERR(sreg);
 
-    memcpy(reg, sreg, sizeof(struct segment_register));
+    *reg = *sreg;
+
     return X86EMUL_OKAY;
 }
 
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -654,39 +654,39 @@ static void svm_get_segment_register(str
     switch ( seg )
     {
     case x86_seg_cs:
-        memcpy(reg, &vmcb->cs, sizeof(*reg));
+        *reg = vmcb->cs;
         break;
     case x86_seg_ds:
-        memcpy(reg, &vmcb->ds, sizeof(*reg));
+        *reg = vmcb->ds;
         break;
     case x86_seg_es:
-        memcpy(reg, &vmcb->es, sizeof(*reg));
+        *reg = vmcb->es;
         break;
     case x86_seg_fs:
         svm_sync_vmcb(v);
-        memcpy(reg, &vmcb->fs, sizeof(*reg));
+        *reg = vmcb->fs;
         break;
     case x86_seg_gs:
         svm_sync_vmcb(v);
-        memcpy(reg, &vmcb->gs, sizeof(*reg));
+        *reg = vmcb->gs;
         break;
     case x86_seg_ss:
-        memcpy(reg, &vmcb->ss, sizeof(*reg));
+        *reg = vmcb->ss;
         reg->attr.fields.dpl = vmcb->_cpl;
         break;
     case x86_seg_tr:
         svm_sync_vmcb(v);
-        memcpy(reg, &vmcb->tr, sizeof(*reg));
+        *reg = vmcb->tr;
         break;
     case x86_seg_gdtr:
-        memcpy(reg, &vmcb->gdtr, sizeof(*reg));
+        *reg = vmcb->gdtr;
         break;
     case x86_seg_idtr:
-        memcpy(reg, &vmcb->idtr, sizeof(*reg));
+        *reg = vmcb->idtr;
         break;
     case x86_seg_ldtr:
         svm_sync_vmcb(v);
-        memcpy(reg, &vmcb->ldtr, sizeof(*reg));
+        *reg = vmcb->ldtr;
         break;
     default:
         BUG();
@@ -729,26 +729,26 @@ static void svm_set_segment_register(str
     switch ( seg )
     {
     case x86_seg_cs:
-        memcpy(&vmcb->cs, reg, sizeof(*reg));
+        vmcb->cs = *reg;
         break;
     case x86_seg_ds:
-        memcpy(&vmcb->ds, reg, sizeof(*reg));
+        vmcb->ds = *reg;
         break;
     case x86_seg_es:
-        memcpy(&vmcb->es, reg, sizeof(*reg));
+        vmcb->es = *reg;
         break;
     case x86_seg_fs:
-        memcpy(&vmcb->fs, reg, sizeof(*reg));
+        vmcb->fs = *reg;
         break;
     case x86_seg_gs:
-        memcpy(&vmcb->gs, reg, sizeof(*reg));
+        vmcb->gs = *reg;
         break;
     case x86_seg_ss:
-        memcpy(&vmcb->ss, reg, sizeof(*reg));
+        vmcb->ss = *reg;
         vmcb->_cpl = vmcb->ss.attr.fields.dpl;
         break;
     case x86_seg_tr:
-        memcpy(&vmcb->tr, reg, sizeof(*reg));
+        vmcb->tr = *reg;
         break;
     case x86_seg_gdtr:
         vmcb->gdtr.base = reg->base;
@@ -759,7 +759,7 @@ static void svm_set_segment_register(str
         vmcb->idtr.limit = reg->limit;
         break;
     case x86_seg_ldtr:
-        memcpy(&vmcb->ldtr, reg, sizeof(*reg));
+        vmcb->ldtr = *reg;
         break;
     default:
         BUG();




[-- Attachment #2: x86-HVM-sreg-copying.patch --]
[-- Type: text/plain, Size: 3246 bytes --]

x86/HVM: prefer structure assignment for seg reg copying

This makes things type safe.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1434,7 +1434,8 @@ static int hvmemul_read_segment(
     if ( IS_ERR(sreg) )
          return -PTR_ERR(sreg);
 
-    memcpy(reg, sreg, sizeof(struct segment_register));
+    *reg = *sreg;
+
     return X86EMUL_OKAY;
 }
 
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -654,39 +654,39 @@ static void svm_get_segment_register(str
     switch ( seg )
     {
     case x86_seg_cs:
-        memcpy(reg, &vmcb->cs, sizeof(*reg));
+        *reg = vmcb->cs;
         break;
     case x86_seg_ds:
-        memcpy(reg, &vmcb->ds, sizeof(*reg));
+        *reg = vmcb->ds;
         break;
     case x86_seg_es:
-        memcpy(reg, &vmcb->es, sizeof(*reg));
+        *reg = vmcb->es;
         break;
     case x86_seg_fs:
         svm_sync_vmcb(v);
-        memcpy(reg, &vmcb->fs, sizeof(*reg));
+        *reg = vmcb->fs;
         break;
     case x86_seg_gs:
         svm_sync_vmcb(v);
-        memcpy(reg, &vmcb->gs, sizeof(*reg));
+        *reg = vmcb->gs;
         break;
     case x86_seg_ss:
-        memcpy(reg, &vmcb->ss, sizeof(*reg));
+        *reg = vmcb->ss;
         reg->attr.fields.dpl = vmcb->_cpl;
         break;
     case x86_seg_tr:
         svm_sync_vmcb(v);
-        memcpy(reg, &vmcb->tr, sizeof(*reg));
+        *reg = vmcb->tr;
         break;
     case x86_seg_gdtr:
-        memcpy(reg, &vmcb->gdtr, sizeof(*reg));
+        *reg = vmcb->gdtr;
         break;
     case x86_seg_idtr:
-        memcpy(reg, &vmcb->idtr, sizeof(*reg));
+        *reg = vmcb->idtr;
         break;
     case x86_seg_ldtr:
         svm_sync_vmcb(v);
-        memcpy(reg, &vmcb->ldtr, sizeof(*reg));
+        *reg = vmcb->ldtr;
         break;
     default:
         BUG();
@@ -729,26 +729,26 @@ static void svm_set_segment_register(str
     switch ( seg )
     {
     case x86_seg_cs:
-        memcpy(&vmcb->cs, reg, sizeof(*reg));
+        vmcb->cs = *reg;
         break;
     case x86_seg_ds:
-        memcpy(&vmcb->ds, reg, sizeof(*reg));
+        vmcb->ds = *reg;
         break;
     case x86_seg_es:
-        memcpy(&vmcb->es, reg, sizeof(*reg));
+        vmcb->es = *reg;
         break;
     case x86_seg_fs:
-        memcpy(&vmcb->fs, reg, sizeof(*reg));
+        vmcb->fs = *reg;
         break;
     case x86_seg_gs:
-        memcpy(&vmcb->gs, reg, sizeof(*reg));
+        vmcb->gs = *reg;
         break;
     case x86_seg_ss:
-        memcpy(&vmcb->ss, reg, sizeof(*reg));
+        vmcb->ss = *reg;
         vmcb->_cpl = vmcb->ss.attr.fields.dpl;
         break;
     case x86_seg_tr:
-        memcpy(&vmcb->tr, reg, sizeof(*reg));
+        vmcb->tr = *reg;
         break;
     case x86_seg_gdtr:
         vmcb->gdtr.base = reg->base;
@@ -759,7 +759,7 @@ static void svm_set_segment_register(str
         vmcb->idtr.limit = reg->limit;
         break;
     case x86_seg_ldtr:
-        memcpy(&vmcb->ldtr, reg, sizeof(*reg));
+        vmcb->ldtr = *reg;
         break;
     default:
         BUG();

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg copying
  2016-12-06 11:45 ` [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg copying Jan Beulich
@ 2016-12-06 11:47   ` Paul Durrant
  2016-12-06 13:55   ` Andrew Cooper
  2016-12-06 16:25   ` Boris Ostrovsky
  2 siblings, 0 replies; 29+ messages in thread
From: Paul Durrant @ 2016-12-06 11:47 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Andrew Cooper, Boris Ostrovsky, George Dunlap, Suravee Suthikulpanit

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 06 December 2016 11:45
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>; Andrew
> Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant
> <Paul.Durrant@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
> Boris Ostrovsky <boris.ostrovsky@oracle.com>
> Subject: [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg
> copying
> 
> This makes things type safe.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1434,7 +1434,8 @@ static int hvmemul_read_segment(
>      if ( IS_ERR(sreg) )
>           return -PTR_ERR(sreg);
> 
> -    memcpy(reg, sreg, sizeof(struct segment_register));
> +    *reg = *sreg;
> +
>      return X86EMUL_OKAY;
>  }
> 
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -654,39 +654,39 @@ static void svm_get_segment_register(str
>      switch ( seg )
>      {
>      case x86_seg_cs:
> -        memcpy(reg, &vmcb->cs, sizeof(*reg));
> +        *reg = vmcb->cs;
>          break;
>      case x86_seg_ds:
> -        memcpy(reg, &vmcb->ds, sizeof(*reg));
> +        *reg = vmcb->ds;
>          break;
>      case x86_seg_es:
> -        memcpy(reg, &vmcb->es, sizeof(*reg));
> +        *reg = vmcb->es;
>          break;
>      case x86_seg_fs:
>          svm_sync_vmcb(v);
> -        memcpy(reg, &vmcb->fs, sizeof(*reg));
> +        *reg = vmcb->fs;
>          break;
>      case x86_seg_gs:
>          svm_sync_vmcb(v);
> -        memcpy(reg, &vmcb->gs, sizeof(*reg));
> +        *reg = vmcb->gs;
>          break;
>      case x86_seg_ss:
> -        memcpy(reg, &vmcb->ss, sizeof(*reg));
> +        *reg = vmcb->ss;
>          reg->attr.fields.dpl = vmcb->_cpl;
>          break;
>      case x86_seg_tr:
>          svm_sync_vmcb(v);
> -        memcpy(reg, &vmcb->tr, sizeof(*reg));
> +        *reg = vmcb->tr;
>          break;
>      case x86_seg_gdtr:
> -        memcpy(reg, &vmcb->gdtr, sizeof(*reg));
> +        *reg = vmcb->gdtr;
>          break;
>      case x86_seg_idtr:
> -        memcpy(reg, &vmcb->idtr, sizeof(*reg));
> +        *reg = vmcb->idtr;
>          break;
>      case x86_seg_ldtr:
>          svm_sync_vmcb(v);
> -        memcpy(reg, &vmcb->ldtr, sizeof(*reg));
> +        *reg = vmcb->ldtr;
>          break;
>      default:
>          BUG();
> @@ -729,26 +729,26 @@ static void svm_set_segment_register(str
>      switch ( seg )
>      {
>      case x86_seg_cs:
> -        memcpy(&vmcb->cs, reg, sizeof(*reg));
> +        vmcb->cs = *reg;
>          break;
>      case x86_seg_ds:
> -        memcpy(&vmcb->ds, reg, sizeof(*reg));
> +        vmcb->ds = *reg;
>          break;
>      case x86_seg_es:
> -        memcpy(&vmcb->es, reg, sizeof(*reg));
> +        vmcb->es = *reg;
>          break;
>      case x86_seg_fs:
> -        memcpy(&vmcb->fs, reg, sizeof(*reg));
> +        vmcb->fs = *reg;
>          break;
>      case x86_seg_gs:
> -        memcpy(&vmcb->gs, reg, sizeof(*reg));
> +        vmcb->gs = *reg;
>          break;
>      case x86_seg_ss:
> -        memcpy(&vmcb->ss, reg, sizeof(*reg));
> +        vmcb->ss = *reg;
>          vmcb->_cpl = vmcb->ss.attr.fields.dpl;
>          break;
>      case x86_seg_tr:
> -        memcpy(&vmcb->tr, reg, sizeof(*reg));
> +        vmcb->tr = *reg;
>          break;
>      case x86_seg_gdtr:
>          vmcb->gdtr.base = reg->base;
> @@ -759,7 +759,7 @@ static void svm_set_segment_register(str
>          vmcb->idtr.limit = reg->limit;
>          break;
>      case x86_seg_ldtr:
> -        memcpy(&vmcb->ldtr, reg, sizeof(*reg));
> +        vmcb->ldtr = *reg;
>          break;
>      default:
>          BUG();
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook
  2016-12-06 11:43 ` [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook Jan Beulich
@ 2016-12-06 12:16   ` Razvan Cojocaru
  2016-12-06 13:49   ` Andrew Cooper
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 29+ messages in thread
From: Razvan Cojocaru @ 2016-12-06 12:16 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Kevin Tian, tamas, Suravee Suthikulpanit, George Dunlap,
	Andrew Cooper, Tim Deegan, Jun Nakajima, Boris Ostrovsky

On 12/06/2016 01:43 PM, Jan Beulich wrote:
> ... instead of repeating the same code in various places (and getting
> it  wrong in some of them).
> 
> In vmx_inst_check_privilege() also stop open coding vmx_get_x86_mode().
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>

May I just say I find this very useful.


Thanks,
Razvan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook
  2016-12-06 11:43 ` [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook Jan Beulich
  2016-12-06 12:16   ` Razvan Cojocaru
@ 2016-12-06 13:49   ` Andrew Cooper
  2016-12-06 14:07     ` Jan Beulich
  2016-12-06 15:49   ` Boris Ostrovsky
                     ` (2 subsequent siblings)
  4 siblings, 1 reply; 29+ messages in thread
From: Andrew Cooper @ 2016-12-06 13:49 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Kevin Tian, tamas, Jun Nakajima, Razvan Cojocaru, George Dunlap,
	Tim Deegan, Suravee Suthikulpanit, Boris Ostrovsky

On 06/12/16 11:43, Jan Beulich wrote:
> @@ -921,6 +921,26 @@ static void vmx_ctxt_switch_to(struct vc
>  }
>  
>  
> +unsigned int vmx_get_cpl(void)
> +{
> +    unsigned long attr;
> +
> +    __vmread(GUEST_SS_AR_BYTES, &attr);
> +
> +    return (attr >> 5) & 3;
> +}
> +
> +static unsigned int _vmx_get_cpl(struct vcpu *v)
> +{
> +    unsigned int cpl;
> +
> +    vmx_vmcs_enter(v);
> +    cpl = vmx_get_cpl();
> +    vmx_vmcs_exit(v);
> +
> +    return cpl;
> +}

Our ususal convention for functions with leading underscores would have
these the other way around.

As an alternative, how about vmx_hvmfunc_get_cpl() ? It is never called
directly.

Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg copying
  2016-12-06 11:45 ` [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg copying Jan Beulich
  2016-12-06 11:47   ` Paul Durrant
@ 2016-12-06 13:55   ` Andrew Cooper
  2016-12-06 16:25   ` Boris Ostrovsky
  2 siblings, 0 replies; 29+ messages in thread
From: Andrew Cooper @ 2016-12-06 13:55 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: George Dunlap, Boris Ostrovsky, Paul Durrant, Suravee Suthikulpanit

On 06/12/16 11:45, Jan Beulich wrote:
> This makes things type safe.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook
  2016-12-06 13:49   ` Andrew Cooper
@ 2016-12-06 14:07     ` Jan Beulich
  2016-12-06 14:10       ` Andrew Cooper
  0 siblings, 1 reply; 29+ messages in thread
From: Jan Beulich @ 2016-12-06 14:07 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Kevin Tian, tamas, JunNakajima, Razvan Cojocaru, George Dunlap,
	Tim Deegan, Suravee Suthikulpanit, xen-devel, BorisOstrovsky

>>> On 06.12.16 at 14:49, <andrew.cooper3@citrix.com> wrote:
> On 06/12/16 11:43, Jan Beulich wrote:
>> @@ -921,6 +921,26 @@ static void vmx_ctxt_switch_to(struct vc
>>  }
>>  
>>  
>> +unsigned int vmx_get_cpl(void)
>> +{
>> +    unsigned long attr;
>> +
>> +    __vmread(GUEST_SS_AR_BYTES, &attr);
>> +
>> +    return (attr >> 5) & 3;
>> +}
>> +
>> +static unsigned int _vmx_get_cpl(struct vcpu *v)
>> +{
>> +    unsigned int cpl;
>> +
>> +    vmx_vmcs_enter(v);
>> +    cpl = vmx_get_cpl();
>> +    vmx_vmcs_exit(v);
>> +
>> +    return cpl;
>> +}
> 
> Our ususal convention for functions with leading underscores would have
> these the other way around.

Our usual convention doesn't fit here: The static function is the
only one allowed to have an underscore prefix.

> As an alternative, how about vmx_hvmfunc_get_cpl() ? It is never called
> directly.

I don't like this. If anything, vmx_do_get_cpl(), albeit it's only
slightly better imo.

> Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

Let me know if you can live with the existing naming.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook
  2016-12-06 14:07     ` Jan Beulich
@ 2016-12-06 14:10       ` Andrew Cooper
  0 siblings, 0 replies; 29+ messages in thread
From: Andrew Cooper @ 2016-12-06 14:10 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Kevin Tian, tamas, Suravee Suthikulpanit, Razvan Cojocaru,
	George Dunlap, Tim Deegan, JunNakajima, xen-devel,
	BorisOstrovsky

On 06/12/16 14:07, Jan Beulich wrote:
>>>> On 06.12.16 at 14:49, <andrew.cooper3@citrix.com> wrote:
>> On 06/12/16 11:43, Jan Beulich wrote:
>>> @@ -921,6 +921,26 @@ static void vmx_ctxt_switch_to(struct vc
>>>  }
>>>  
>>>  
>>> +unsigned int vmx_get_cpl(void)
>>> +{
>>> +    unsigned long attr;
>>> +
>>> +    __vmread(GUEST_SS_AR_BYTES, &attr);
>>> +
>>> +    return (attr >> 5) & 3;
>>> +}
>>> +
>>> +static unsigned int _vmx_get_cpl(struct vcpu *v)
>>> +{
>>> +    unsigned int cpl;
>>> +
>>> +    vmx_vmcs_enter(v);
>>> +    cpl = vmx_get_cpl();
>>> +    vmx_vmcs_exit(v);
>>> +
>>> +    return cpl;
>>> +}
>> Our ususal convention for functions with leading underscores would have
>> these the other way around.
> Our usual convention doesn't fit here: The static function is the
> only one allowed to have an underscore prefix.
>
>> As an alternative, how about vmx_hvmfunc_get_cpl() ? It is never called
>> directly.
> I don't like this. If anything, vmx_do_get_cpl(), albeit it's only
> slightly better imo.

That would also be slightly better, if you don't mind switching to it.

>
>> Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Let me know if you can live with the existing naming.

If not, then fine.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-06 11:44 ` [PATCH 2/3] x86/HVM: support (emulate) UMIP Jan Beulich
@ 2016-12-06 14:47   ` Andrew Cooper
  2016-12-06 14:55     ` Jan Beulich
  2016-12-08 12:20     ` Jan Beulich
  2016-12-06 16:23   ` Boris Ostrovsky
  1 sibling, 2 replies; 29+ messages in thread
From: Andrew Cooper @ 2016-12-06 14:47 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Kevin Tian, Wei Liu, Suravee Suthikulpanit, George Dunlap,
	Ian Jackson, Jun Nakajima, Boris Ostrovsky

On 06/12/16 11:44, Jan Beulich wrote:
> There are three noteworthy drawbacks:
> 1) The intercepts we need to enable here are CPL-independent, i.e. we
>    now have to emulate certain instructions for ring 0.
> 2) On VMX there's no intercept for SMSW, so the emulation isn't really
>    complete there.
> 3) The CR4 write intercept on SVM is lower priority than all exception
>    checks, so we need to intercept #GP.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

I have had a brief look over the patch, but not reviewed it in detail yet.

First of all, would you mind pulling the 64bit segment implementation
into a separate patch?  They are somewhat orthogonal to UMIP, and the
code looks fine.  They will also let me finish my comprehensive user and
system segment emulation handling XTF tests which I developed during the
XSA-191 work.

As for UMIP itself, there are a number of issues which we should
consider here.

First, this adds quite a lot of emulation and extra handling in security
sensitive areas.  That isn't a problem per say, but given concerns with
emulation in general (and indeed the efforts to remove all emulation
from some usecases), making it unilaterally enabled is a problem.

As such, I think emulated-UMIP is an option which the user must
explicitly opt-in to.  The easiest option might be to defer adding
emulated-UMIP until I have split the default and max featureset options
in the CPUID policy ABI (which is the task I am currently working ok).

However, it would also require only enabling the SVM GP intercept in the
hvm_update_guest_vendor() path (which should be renamed to something
slightly more generic like hvm_cpuid_policy_updated()).


Given the complexity of the interactions, I think we should have a
dedicated XTF test for UMIP behaviour, similar to the CPUID faulting
one.  I can see about implementing that if you don't want to.

> ---
> The tool stack change could be left out - it updates a table which is
> rather out of date anyway.
> ---
> This once again points out that handle_mmio() is rather badly named, as
> it's about more than just MMIO. Since we have hvm_emulate_one()
> already, I am, however, lacking an idea for a good alternative name.

As am I.  A number of its current uses are not for MMIO at all, and
would ideally go with some further restriction of emulated instruction.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-06 14:47   ` Andrew Cooper
@ 2016-12-06 14:55     ` Jan Beulich
  2016-12-06 14:57       ` Andrew Cooper
  2016-12-08 12:20     ` Jan Beulich
  1 sibling, 1 reply; 29+ messages in thread
From: Jan Beulich @ 2016-12-06 14:55 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Kevin Tian, Wei Liu, Suravee Suthikulpanit, George Dunlap,
	IanJackson, Jun Nakajima, xen-devel, Boris Ostrovsky

>>> On 06.12.16 at 15:47, <andrew.cooper3@citrix.com> wrote:
> On 06/12/16 11:44, Jan Beulich wrote:
>> This once again points out that handle_mmio() is rather badly named, as
>> it's about more than just MMIO. Since we have hvm_emulate_one()
>> already, I am, however, lacking an idea for a good alternative name.
> 
> As am I.  A number of its current uses are not for MMIO at all, and
> would ideally go with some further restriction of emulated instruction.

Actually meanwhile I think I have a plan here, which could go
together with renaming the function: To efficiently make use of
the new .validate() hook the emulator obtains with the PV priv-op
patch, I'd like to have callers of handle_mmio() pass in their hook
function (and I think we should not make this optional).

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-06 14:55     ` Jan Beulich
@ 2016-12-06 14:57       ` Andrew Cooper
  0 siblings, 0 replies; 29+ messages in thread
From: Andrew Cooper @ 2016-12-06 14:57 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Kevin Tian, Wei Liu, Suravee Suthikulpanit, George Dunlap,
	IanJackson, Jun Nakajima, xen-devel, Boris Ostrovsky

On 06/12/16 14:55, Jan Beulich wrote:
>>>> On 06.12.16 at 15:47, <andrew.cooper3@citrix.com> wrote:
>> On 06/12/16 11:44, Jan Beulich wrote:
>>> This once again points out that handle_mmio() is rather badly named, as
>>> it's about more than just MMIO. Since we have hvm_emulate_one()
>>> already, I am, however, lacking an idea for a good alternative name.
>> As am I.  A number of its current uses are not for MMIO at all, and
>> would ideally go with some further restriction of emulated instruction.
> Actually meanwhile I think I have a plan here, which could go
> together with renaming the function: To efficiently make use of
> the new .validate() hook the emulator obtains with the PV priv-op
> patch, I'd like to have callers of handle_mmio() pass in their hook
> function (and I think we should not make this optional).

I haven't got that far in the PV series yet, but this plan sounds good
to me.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook
  2016-12-06 11:43 ` [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook Jan Beulich
  2016-12-06 12:16   ` Razvan Cojocaru
  2016-12-06 13:49   ` Andrew Cooper
@ 2016-12-06 15:49   ` Boris Ostrovsky
  2016-12-07  6:21   ` Tian, Kevin
  2016-12-13  8:45   ` Ping: " Jan Beulich
  4 siblings, 0 replies; 29+ messages in thread
From: Boris Ostrovsky @ 2016-12-06 15:49 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Kevin Tian, tamas, Jun Nakajima, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Tim Deegan, Suravee Suthikulpanit

On 12/06/2016 06:43 AM, Jan Beulich wrote:
> ... instead of repeating the same code in various places (and getting
> it  wrong in some of them).
>
> In vmx_inst_check_privilege() also stop open coding vmx_get_x86_mode().

vmx_guest_x86_mode()

>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-06 11:44 ` [PATCH 2/3] x86/HVM: support (emulate) UMIP Jan Beulich
  2016-12-06 14:47   ` Andrew Cooper
@ 2016-12-06 16:23   ` Boris Ostrovsky
  2016-12-07 11:29     ` Jan Beulich
  1 sibling, 1 reply; 29+ messages in thread
From: Boris Ostrovsky @ 2016-12-06 16:23 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Kevin Tian, Wei Liu, Jun Nakajima, George Dunlap, Andrew Cooper,
	Ian Jackson, Suravee Suthikulpanit

On 12/06/2016 06:44 AM, Jan Beulich wrote:
> There are three noteworthy drawbacks:
> 1) The intercepts we need to enable here are CPL-independent, i.e. we
>    now have to emulate certain instructions for ring 0.
> 2) On VMX there's no intercept for SMSW, so the emulation isn't really
>    complete there.
> 3) The CR4 write intercept on SVM is lower priority than all exception
>    checks, so we need to intercept #GP.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> The tool stack change could be left out - it updates a table which is
> rather out of date anyway.
> ---
> This once again points out that handle_mmio() is rather badly named, as
> it's about more than just MMIO. Since we have hvm_emulate_one()
> already, I am, however, lacking an idea for a good alternative name.
>
> --- a/tools/libxl/libxl_cpuid.c
> +++ b/tools/libxl/libxl_cpuid.c
> @@ -158,6 +158,7 @@ int libxl_cpuid_parse_config(libxl_cpuid
>          {"de",           0x00000001, NA, CPUID_REG_EDX,  2,  1},
>          {"vme",          0x00000001, NA, CPUID_REG_EDX,  1,  1},
>          {"fpu",          0x00000001, NA, CPUID_REG_EDX,  0,  1},
> +        {"umip",         0x00000007,  0, CPUID_REG_ECX,  2,  1},
>          {"topoext",      0x80000001, NA, CPUID_REG_ECX, 22,  1},
>          {"tbm",          0x80000001, NA, CPUID_REG_ECX, 21,  1},
>          {"nodeid",       0x80000001, NA, CPUID_REG_ECX, 19,  1},
> --- a/xen/arch/x86/cpuid.c
> +++ b/xen/arch/x86/cpuid.c
> @@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
>      __set_bit(X86_FEATURE_APIC, hvm_featureset);
>  
>      /*
> +     * Xen can often provide UMIP emulation to HVM guests even if the host
> +     * doesn't have such functionality.
> +     */
> +    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
> +        __set_bit(X86_FEATURE_UMIP, hvm_featureset);

I don't think I understand how this is going to work for processors that
don't support UMIP.

How, for example, can guest_cr[4] have X86_CR4_UMIP set on these
processors when CPUID will not show the feature being there?

-boris


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg copying
  2016-12-06 11:45 ` [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg copying Jan Beulich
  2016-12-06 11:47   ` Paul Durrant
  2016-12-06 13:55   ` Andrew Cooper
@ 2016-12-06 16:25   ` Boris Ostrovsky
  2 siblings, 0 replies; 29+ messages in thread
From: Boris Ostrovsky @ 2016-12-06 16:25 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: George Dunlap, Andrew Cooper, Paul Durrant, Suravee Suthikulpanit

On 12/06/2016 06:45 AM, Jan Beulich wrote:
> This makes things type safe.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook
  2016-12-06 11:43 ` [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook Jan Beulich
                     ` (2 preceding siblings ...)
  2016-12-06 15:49   ` Boris Ostrovsky
@ 2016-12-07  6:21   ` Tian, Kevin
  2016-12-13  8:45   ` Ping: " Jan Beulich
  4 siblings, 0 replies; 29+ messages in thread
From: Tian, Kevin @ 2016-12-07  6:21 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: tamas, Nakajima, Jun, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Tim Deegan, Suravee Suthikulpanit,
	Boris Ostrovsky

> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: Tuesday, December 06, 2016 7:43 PM
> 
> ... instead of repeating the same code in various places (and getting
> it  wrong in some of them).
> 
> In vmx_inst_check_privilege() also stop open coding vmx_get_x86_mode().
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Acked-by: Kevin Tian <kevin.tian@intel.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-06 16:23   ` Boris Ostrovsky
@ 2016-12-07 11:29     ` Jan Beulich
  2016-12-07 15:10       ` Boris Ostrovsky
  0 siblings, 1 reply; 29+ messages in thread
From: Jan Beulich @ 2016-12-07 11:29 UTC (permalink / raw)
  To: Boris Ostrovsky
  Cc: Kevin Tian, Wei Liu, Jun Nakajima, George Dunlap, Andrew Cooper,
	Ian Jackson, Suravee Suthikulpanit, xen-devel

>>> On 06.12.16 at 17:23, <boris.ostrovsky@oracle.com> wrote:
> On 12/06/2016 06:44 AM, Jan Beulich wrote:
>> --- a/xen/arch/x86/cpuid.c
>> +++ b/xen/arch/x86/cpuid.c
>> @@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
>>      __set_bit(X86_FEATURE_APIC, hvm_featureset);
>>  
>>      /*
>> +     * Xen can often provide UMIP emulation to HVM guests even if the host
>> +     * doesn't have such functionality.
>> +     */
>> +    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
>> +        __set_bit(X86_FEATURE_UMIP, hvm_featureset);
> 
> I don't think I understand how this is going to work for processors that
> don't support UMIP.
> 
> How, for example, can guest_cr[4] have X86_CR4_UMIP set on these
> processors when CPUID will not show the feature being there?

What we allow the guest to see and what we store into hardware
registers are two different things: Note how svm_update_guest_cr()
masks off X86_CR4_UMIP from the vale to be put into the VMCB.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-07 11:29     ` Jan Beulich
@ 2016-12-07 15:10       ` Boris Ostrovsky
  2016-12-07 15:14         ` Jan Beulich
  0 siblings, 1 reply; 29+ messages in thread
From: Boris Ostrovsky @ 2016-12-07 15:10 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Kevin Tian, Wei Liu, Jun Nakajima, George Dunlap, Andrew Cooper,
	Ian Jackson, Suravee Suthikulpanit, xen-devel

On 12/07/2016 06:29 AM, Jan Beulich wrote:
>>>> On 06.12.16 at 17:23, <boris.ostrovsky@oracle.com> wrote:
>> On 12/06/2016 06:44 AM, Jan Beulich wrote:
>>> --- a/xen/arch/x86/cpuid.c
>>> +++ b/xen/arch/x86/cpuid.c
>>> @@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
>>>      __set_bit(X86_FEATURE_APIC, hvm_featureset);
>>>  
>>>      /*
>>> +     * Xen can often provide UMIP emulation to HVM guests even if the host
>>> +     * doesn't have such functionality.
>>> +     */
>>> +    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
>>> +        __set_bit(X86_FEATURE_UMIP, hvm_featureset);
>> I don't think I understand how this is going to work for processors that
>> don't support UMIP.
>>
>> How, for example, can guest_cr[4] have X86_CR4_UMIP set on these
>> processors when CPUID will not show the feature being there?
> What we allow the guest to see and what we store into hardware
> registers are two different things: Note how svm_update_guest_cr()
> masks off X86_CR4_UMIP from the vale to be put into the VMCB.

So that was kind of my question --- why would a guest ever try to set
this bit? As far as it is concerned, UMIP is not available and the guest
is then trying to set an unsupported bit in cr4. And that should result
in a #GP.

-boris

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-07 15:10       ` Boris Ostrovsky
@ 2016-12-07 15:14         ` Jan Beulich
  2016-12-07 15:31           ` Boris Ostrovsky
  0 siblings, 1 reply; 29+ messages in thread
From: Jan Beulich @ 2016-12-07 15:14 UTC (permalink / raw)
  To: Boris Ostrovsky
  Cc: Kevin Tian, Wei Liu, Jun Nakajima, George Dunlap, Andrew Cooper,
	Ian Jackson, Suravee Suthikulpanit, xen-devel

>>> On 07.12.16 at 16:10, <boris.ostrovsky@oracle.com> wrote:
> On 12/07/2016 06:29 AM, Jan Beulich wrote:
>>>>> On 06.12.16 at 17:23, <boris.ostrovsky@oracle.com> wrote:
>>> On 12/06/2016 06:44 AM, Jan Beulich wrote:
>>>> --- a/xen/arch/x86/cpuid.c
>>>> +++ b/xen/arch/x86/cpuid.c
>>>> @@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
>>>>      __set_bit(X86_FEATURE_APIC, hvm_featureset);
>>>>  
>>>>      /*
>>>> +     * Xen can often provide UMIP emulation to HVM guests even if the host
>>>> +     * doesn't have such functionality.
>>>> +     */
>>>> +    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
>>>> +        __set_bit(X86_FEATURE_UMIP, hvm_featureset);
>>> I don't think I understand how this is going to work for processors that
>>> don't support UMIP.
>>>
>>> How, for example, can guest_cr[4] have X86_CR4_UMIP set on these
>>> processors when CPUID will not show the feature being there?
>> What we allow the guest to see and what we store into hardware
>> registers are two different things: Note how svm_update_guest_cr()
>> masks off X86_CR4_UMIP from the vale to be put into the VMCB.
> 
> So that was kind of my question --- why would a guest ever try to set
> this bit? As far as it is concerned, UMIP is not available and the guest
> is then trying to set an unsupported bit in cr4. And that should result
> in a #GP.

But the code fragment above adds the respective CPUID bit to the
permitted features. Believe me, I've tried this with a UMIP-enabled
Linux (including proper CPUID based detection).

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-07 15:14         ` Jan Beulich
@ 2016-12-07 15:31           ` Boris Ostrovsky
  2016-12-07 15:39             ` Jan Beulich
  0 siblings, 1 reply; 29+ messages in thread
From: Boris Ostrovsky @ 2016-12-07 15:31 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Kevin Tian, Wei Liu, Jun Nakajima, George Dunlap, Andrew Cooper,
	Ian Jackson, Suravee Suthikulpanit, xen-devel

On 12/07/2016 10:14 AM, Jan Beulich wrote:
>>>> On 07.12.16 at 16:10, <boris.ostrovsky@oracle.com> wrote:
>> On 12/07/2016 06:29 AM, Jan Beulich wrote:
>>>>>> On 06.12.16 at 17:23, <boris.ostrovsky@oracle.com> wrote:
>>>> On 12/06/2016 06:44 AM, Jan Beulich wrote:
>>>>> --- a/xen/arch/x86/cpuid.c
>>>>> +++ b/xen/arch/x86/cpuid.c
>>>>> @@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
>>>>>      __set_bit(X86_FEATURE_APIC, hvm_featureset);
>>>>>  
>>>>>      /*
>>>>> +     * Xen can often provide UMIP emulation to HVM guests even if the host
>>>>> +     * doesn't have such functionality.
>>>>> +     */
>>>>> +    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
>>>>> +        __set_bit(X86_FEATURE_UMIP, hvm_featureset);
>>>> I don't think I understand how this is going to work for processors that
>>>> don't support UMIP.
>>>>
>>>> How, for example, can guest_cr[4] have X86_CR4_UMIP set on these
>>>> processors when CPUID will not show the feature being there?
>>> What we allow the guest to see and what we store into hardware
>>> registers are two different things: Note how svm_update_guest_cr()
>>> masks off X86_CR4_UMIP from the vale to be put into the VMCB.
>> So that was kind of my question --- why would a guest ever try to set
>> this bit? As far as it is concerned, UMIP is not available and the guest
>> is then trying to set an unsupported bit in cr4. And that should result
>> in a #GP.
> But the code fragment above adds the respective CPUID bit to the
> permitted features. Believe me, I've tried this with a UMIP-enabled
> Linux (including proper CPUID based detection).

Are you referring to these patches: https://lkml.org/lkml/2016/11/8/68 ?
If yes then they look to be Intel-specific.

If AMD decides to use CPUID0x7.ecx[2] for something else --- won't this
be a problem for this patch?

-boris



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-07 15:31           ` Boris Ostrovsky
@ 2016-12-07 15:39             ` Jan Beulich
  2016-12-07 16:06               ` Andrew Cooper
  0 siblings, 1 reply; 29+ messages in thread
From: Jan Beulich @ 2016-12-07 15:39 UTC (permalink / raw)
  To: Boris Ostrovsky
  Cc: Kevin Tian, Wei Liu, Jun Nakajima, George Dunlap, Andrew Cooper,
	Ian Jackson, Suravee Suthikulpanit, xen-devel

>>> On 07.12.16 at 16:31, <boris.ostrovsky@oracle.com> wrote:
> On 12/07/2016 10:14 AM, Jan Beulich wrote:
>>>>> On 07.12.16 at 16:10, <boris.ostrovsky@oracle.com> wrote:
>>> On 12/07/2016 06:29 AM, Jan Beulich wrote:
>>>>>>> On 06.12.16 at 17:23, <boris.ostrovsky@oracle.com> wrote:
>>>>> On 12/06/2016 06:44 AM, Jan Beulich wrote:
>>>>>> --- a/xen/arch/x86/cpuid.c
>>>>>> +++ b/xen/arch/x86/cpuid.c
>>>>>> @@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
>>>>>>      __set_bit(X86_FEATURE_APIC, hvm_featureset);
>>>>>>  
>>>>>>      /*
>>>>>> +     * Xen can often provide UMIP emulation to HVM guests even if the host
>>>>>> +     * doesn't have such functionality.
>>>>>> +     */
>>>>>> +    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
>>>>>> +        __set_bit(X86_FEATURE_UMIP, hvm_featureset);
>>>>> I don't think I understand how this is going to work for processors that
>>>>> don't support UMIP.
>>>>>
>>>>> How, for example, can guest_cr[4] have X86_CR4_UMIP set on these
>>>>> processors when CPUID will not show the feature being there?
>>>> What we allow the guest to see and what we store into hardware
>>>> registers are two different things: Note how svm_update_guest_cr()
>>>> masks off X86_CR4_UMIP from the vale to be put into the VMCB.
>>> So that was kind of my question --- why would a guest ever try to set
>>> this bit? As far as it is concerned, UMIP is not available and the guest
>>> is then trying to set an unsupported bit in cr4. And that should result
>>> in a #GP.
>> But the code fragment above adds the respective CPUID bit to the
>> permitted features. Believe me, I've tried this with a UMIP-enabled
>> Linux (including proper CPUID based detection).
> 
> Are you referring to these patches: https://lkml.org/lkml/2016/11/8/68 ?
> If yes then they look to be Intel-specific.

No, I had written my own before these had been posted. I did
actually post mine too, but that was a week or so after theirs,
and theirs appears to be more complete.

> If AMD decides to use CPUID0x7.ecx[2] for something else --- won't this
> be a problem for this patch?

I think the two vendors meanwhile do a good job not interfering with
one another's CPUID bits. We'd have ugly problems elsewhere if any
new dual purpose CPUID bit appeared.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-07 15:39             ` Jan Beulich
@ 2016-12-07 16:06               ` Andrew Cooper
  2016-12-08  8:20                 ` Tian, Kevin
  2016-12-26  3:56                 ` Suravee Suthikulpanit
  0 siblings, 2 replies; 29+ messages in thread
From: Andrew Cooper @ 2016-12-07 16:06 UTC (permalink / raw)
  To: Jan Beulich, Boris Ostrovsky
  Cc: Kevin Tian, Wei Liu, Suravee Suthikulpanit, George Dunlap,
	Ian Jackson, Jun Nakajima, xen-devel

On 07/12/16 15:39, Jan Beulich wrote:
>>>> On 07.12.16 at 16:31, <boris.ostrovsky@oracle.com> wrote:
>> On 12/07/2016 10:14 AM, Jan Beulich wrote:
>>>>>> On 07.12.16 at 16:10, <boris.ostrovsky@oracle.com> wrote:
>>>> On 12/07/2016 06:29 AM, Jan Beulich wrote:
>>>>>>>> On 06.12.16 at 17:23, <boris.ostrovsky@oracle.com> wrote:
>>>>>> On 12/06/2016 06:44 AM, Jan Beulich wrote:
>>>>>>> --- a/xen/arch/x86/cpuid.c
>>>>>>> +++ b/xen/arch/x86/cpuid.c
>>>>>>> @@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
>>>>>>>      __set_bit(X86_FEATURE_APIC, hvm_featureset);
>>>>>>>  
>>>>>>>      /*
>>>>>>> +     * Xen can often provide UMIP emulation to HVM guests even if the host
>>>>>>> +     * doesn't have such functionality.
>>>>>>> +     */
>>>>>>> +    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
>>>>>>> +        __set_bit(X86_FEATURE_UMIP, hvm_featureset);
>>>>>> I don't think I understand how this is going to work for processors that
>>>>>> don't support UMIP.
>>>>>>
>>>>>> How, for example, can guest_cr[4] have X86_CR4_UMIP set on these
>>>>>> processors when CPUID will not show the feature being there?
>>>>> What we allow the guest to see and what we store into hardware
>>>>> registers are two different things: Note how svm_update_guest_cr()
>>>>> masks off X86_CR4_UMIP from the vale to be put into the VMCB.
>>>> So that was kind of my question --- why would a guest ever try to set
>>>> this bit? As far as it is concerned, UMIP is not available and the guest
>>>> is then trying to set an unsupported bit in cr4. And that should result
>>>> in a #GP.
>>> But the code fragment above adds the respective CPUID bit to the
>>> permitted features. Believe me, I've tried this with a UMIP-enabled
>>> Linux (including proper CPUID based detection).
>> Are you referring to these patches: https://lkml.org/lkml/2016/11/8/68 ?
>> If yes then they look to be Intel-specific.
> No, I had written my own before these had been posted. I did
> actually post mine too, but that was a week or so after theirs,
> and theirs appears to be more complete.
>
>> If AMD decides to use CPUID0x7.ecx[2] for something else --- won't this
>> be a problem for this patch?
> I think the two vendors meanwhile do a good job not interfering with
> one another's CPUID bits. We'd have ugly problems elsewhere if any
> new dual purpose CPUID bit appeared.

[root@minuet-1 ~]# head /proc/cpuinfo
processor    : 0
vendor_id    : AuthenticAMD
cpu family    : 21
model        : 96

[root@minuet-1 ~]# xen-cpuid
nr_features: 10
                      KEY 1d       1c       e1d      e1c      Da1     
7b0      7c0      e7d      e8b      7d0     

Static sets:
Known                    
b7ebfbff:fffef3ff:efd3fbff:2469bfff:0000000f:fdbfffff:0000001b:00000500:00000001:0000000c
Special                  
10000200:88200000:00000000:00000002:00000000:00002040:00000010:00000000:00000000:00000000
PV Mask                  
17c9cbf5:f6f83203:e2500800:042109e3:00000007:fdaf0b39:00000003:00000000:00000001:0000000c
HVM Shadow Mask          
17cbfbff:f7f83223:ea500800:04218df7:0000000f:fdbf4bbb:00000003:00000000:00000001:0000000c
HVM Hap Mask             
17cbfbff:f7fa3223:ee500800:04218df7:0000000f:fdbf4fbb:0000000b:00000000:00000001:0000000c

Dynamic sets:
Raw                      
178bfbff:fed8320b:2fd3fbff:2febbfff:00000001:000001a9:00000000:000037d9:00000000:00000000
Host                     
178bf3ff:f6d8320b:2fd3fbff:2469bfff:00000001:000001a9:00000000:00000500:00000000:00000000
PV                       
1789c3f5:f6f83203:23d1cbf5:042109e3:00000001:00000129:00000000:00000000:00000000:00000000
HVM                      
178bfbff:f6f83203:2fd3fbff:04218df7:00000001:000001a9:00000000:00000000:00000000:00000000


This processor already implements some of Intel's features from
0x7[0].ebx, including SMEP.  According to marketing, AMD Zen processors
will add the ADX, RDSEED, SHA, CLFLUSHOPT and SMAP features

I can't reasonably see them using a different feature word.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-07 16:06               ` Andrew Cooper
@ 2016-12-08  8:20                 ` Tian, Kevin
  2016-12-26  3:56                 ` Suravee Suthikulpanit
  1 sibling, 0 replies; 29+ messages in thread
From: Tian, Kevin @ 2016-12-08  8:20 UTC (permalink / raw)
  To: Andrew Cooper, Jan Beulich, Boris Ostrovsky
  Cc: Wei Liu, Suravee Suthikulpanit, George Dunlap, Ian Jackson,
	Nakajima, Jun, xen-devel

> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: Thursday, December 08, 2016 12:07 AM
> 
> On 07/12/16 15:39, Jan Beulich wrote:
> >>>> On 07.12.16 at 16:31, <boris.ostrovsky@oracle.com> wrote:
> >> On 12/07/2016 10:14 AM, Jan Beulich wrote:
> >>>>>> On 07.12.16 at 16:10, <boris.ostrovsky@oracle.com> wrote:
> >>>> On 12/07/2016 06:29 AM, Jan Beulich wrote:
> >>>>>>>> On 06.12.16 at 17:23, <boris.ostrovsky@oracle.com> wrote:
> >>>>>> On 12/06/2016 06:44 AM, Jan Beulich wrote:
> >>>>>>> --- a/xen/arch/x86/cpuid.c
> >>>>>>> +++ b/xen/arch/x86/cpuid.c
> >>>>>>> @@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
> >>>>>>>      __set_bit(X86_FEATURE_APIC, hvm_featureset);
> >>>>>>>
> >>>>>>>      /*
> >>>>>>> +     * Xen can often provide UMIP emulation to HVM guests even if the host
> >>>>>>> +     * doesn't have such functionality.
> >>>>>>> +     */
> >>>>>>> +    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
> >>>>>>> +        __set_bit(X86_FEATURE_UMIP, hvm_featureset);
> >>>>>> I don't think I understand how this is going to work for processors that
> >>>>>> don't support UMIP.
> >>>>>>
> >>>>>> How, for example, can guest_cr[4] have X86_CR4_UMIP set on these
> >>>>>> processors when CPUID will not show the feature being there?
> >>>>> What we allow the guest to see and what we store into hardware
> >>>>> registers are two different things: Note how svm_update_guest_cr()
> >>>>> masks off X86_CR4_UMIP from the vale to be put into the VMCB.
> >>>> So that was kind of my question --- why would a guest ever try to set
> >>>> this bit? As far as it is concerned, UMIP is not available and the guest
> >>>> is then trying to set an unsupported bit in cr4. And that should result
> >>>> in a #GP.
> >>> But the code fragment above adds the respective CPUID bit to the
> >>> permitted features. Believe me, I've tried this with a UMIP-enabled
> >>> Linux (including proper CPUID based detection).
> >> Are you referring to these patches: https://lkml.org/lkml/2016/11/8/68 ?
> >> If yes then they look to be Intel-specific.
> > No, I had written my own before these had been posted. I did
> > actually post mine too, but that was a week or so after theirs,
> > and theirs appears to be more complete.
> >
> >> If AMD decides to use CPUID0x7.ecx[2] for something else --- won't this
> >> be a problem for this patch?
> > I think the two vendors meanwhile do a good job not interfering with
> > one another's CPUID bits. We'd have ugly problems elsewhere if any
> > new dual purpose CPUID bit appeared.
> 
> [root@minuet-1 ~]# head /proc/cpuinfo
> processor    : 0
> vendor_id    : AuthenticAMD
> cpu family    : 21
> model        : 96
> 
> [root@minuet-1 ~]# xen-cpuid
> nr_features: 10
>                       KEY 1d       1c       e1d      e1c      Da1
> 7b0      7c0      e7d      e8b      7d0
> 
> Static sets:
> Known
> b7ebfbff:fffef3ff:efd3fbff:2469bfff:0000000f:fdbfffff:0000001b:00000500:00000001:
> 0000000c
> Special
> 10000200:88200000:00000000:00000002:00000000:00002040:00000010:00000000:000
> 00000:00000000
> PV Mask
> 17c9cbf5:f6f83203:e2500800:042109e3:00000007:fdaf0b39:00000003:00000000:0000
> 0001:0000000c
> HVM Shadow Mask
> 17cbfbff:f7f83223:ea500800:04218df7:0000000f:fdbf4bbb:00000003:00000000:00000
> 001:0000000c
> HVM Hap Mask
> 17cbfbff:f7fa3223:ee500800:04218df7:0000000f:fdbf4fbb:0000000b:00000000:00000
> 001:0000000c
> 
> Dynamic sets:
> Raw
> 178bfbff:fed8320b:2fd3fbff:2febbfff:00000001:000001a9:00000000:000037d9:000000
> 00:00000000
> Host
> 178bf3ff:f6d8320b:2fd3fbff:2469bfff:00000001:000001a9:00000000:00000500:00000
> 000:00000000
> PV
> 1789c3f5:f6f83203:23d1cbf5:042109e3:00000001:00000129:00000000:00000000:0000
> 0000:00000000
> HVM
> 178bfbff:f6f83203:2fd3fbff:04218df7:00000001:000001a9:00000000:00000000:00000
> 000:00000000
> 
> 
> This processor already implements some of Intel's features from
> 0x7[0].ebx, including SMEP.  According to marketing, AMD Zen processors
> will add the ADX, RDSEED, SHA, CLFLUSHOPT and SMAP features
> 
> I can't reasonably see them using a different feature word.
> 
> ~Andrew

There is agreement that common features will use the same fields.

Thanks
Kevin

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-06 14:47   ` Andrew Cooper
  2016-12-06 14:55     ` Jan Beulich
@ 2016-12-08 12:20     ` Jan Beulich
  2016-12-09 18:42       ` Andrew Cooper
  1 sibling, 1 reply; 29+ messages in thread
From: Jan Beulich @ 2016-12-08 12:20 UTC (permalink / raw)
  To: Andrew Cooper
  Cc: Kevin Tian, Wei Liu, Suravee Suthikulpanit, George Dunlap,
	IanJackson, Jun Nakajima, xen-devel, Boris Ostrovsky

>>> On 06.12.16 at 15:47, <andrew.cooper3@citrix.com> wrote:
> As for UMIP itself, there are a number of issues which we should
> consider here.
> 
> First, this adds quite a lot of emulation and extra handling in security
> sensitive areas.  That isn't a problem per say, but given concerns with
> emulation in general (and indeed the efforts to remove all emulation
> from some usecases), making it unilaterally enabled is a problem.

As mentioned in the commit description.

> As such, I think emulated-UMIP is an option which the user must
> explicitly opt-in to.  The easiest option might be to defer adding
> emulated-UMIP until I have split the default and max featureset options
> in the CPUID policy ABI (which is the task I am currently working ok).

Makes sense.

> However, it would also require only enabling the SVM GP intercept in the
> hvm_update_guest_vendor() path (which should be renamed to something
> slightly more generic like hvm_cpuid_policy_updated()).

Why that? We always need it intercepted as long as the guest
wants UMIP, but the hardware doesn't offer it. The feature isn't
tied to the vendor being Intel or some such.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-08 12:20     ` Jan Beulich
@ 2016-12-09 18:42       ` Andrew Cooper
  0 siblings, 0 replies; 29+ messages in thread
From: Andrew Cooper @ 2016-12-09 18:42 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Kevin Tian, Wei Liu, Suravee Suthikulpanit, George Dunlap,
	IanJackson, Jun Nakajima, xen-devel, Boris Ostrovsky

On 08/12/16 12:20, Jan Beulich wrote:
>
>> However, it would also require only enabling the SVM GP intercept in the
>> hvm_update_guest_vendor() path (which should be renamed to something
>> slightly more generic like hvm_cpuid_policy_updated()).
> Why that? We always need it intercepted as long as the guest
> wants UMIP, but the hardware doesn't offer it. The feature isn't
> tied to the vendor being Intel or some such.

The hvm_update_guest_vendor() path is post-domain_create() way of
signalling "cpuid has changed - you might want to reconfigure intercepts".

It is currently used only to alter the #UD intercept based on the set
CPUID vendor (hence its name), but the name now looks rather short sighted.

With the proposal of having emulated-UMIP as explicitly opt-in, the
required intercepts shouldn't be enabled at domain_create() time, and
should be enabled later after the toolstack has set a policy.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Ping: [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook
  2016-12-06 11:43 ` [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook Jan Beulich
                     ` (3 preceding siblings ...)
  2016-12-07  6:21   ` Tian, Kevin
@ 2016-12-13  8:45   ` Jan Beulich
  2016-12-13  9:33     ` Tim Deegan
  4 siblings, 1 reply; 29+ messages in thread
From: Jan Beulich @ 2016-12-13  8:45 UTC (permalink / raw)
  To: George Dunlap, Tim Deegan
  Cc: Kevin Tian, tamas, Suravee Suthikulpanit, Razvan Cojocaru,
	Andrew Cooper, Jun Nakajima, xen-devel, Boris Ostrovsky

>>> On 06.12.16 at 12:43, <JBeulich@suse.com> wrote:
> --- a/xen/arch/x86/mm/guest_walk.c
> +++ b/xen/arch/x86/mm/guest_walk.c
> @@ -174,7 +174,6 @@ guest_walk_tables(struct vcpu *v, struct
>  
>      if ( is_hvm_domain(d) && !(pfec & PFEC_user_mode) )
>      {
> -        struct segment_register seg;
>          const struct cpu_user_regs *regs = guest_cpu_user_regs();
>  
>          /* SMEP: kernel-mode instruction fetches from user-mode mappings
> @@ -186,8 +185,6 @@ guest_walk_tables(struct vcpu *v, struct
>          switch ( v->arch.smap_check_policy )
>          {
>          case SMAP_CHECK_HONOR_CPL_AC:
> -            hvm_get_segment_register(v, x86_seg_ss, &seg);
> -
>              /*
>               * SMAP: kernel-mode data accesses from user-mode mappings
>               * should fault.
> @@ -199,8 +196,7 @@ guest_walk_tables(struct vcpu *v, struct
>               *   - Page fault in kernel mode
>               */
>              smap = hvm_smap_enabled(v) &&
> -                   ((seg.attr.fields.dpl == 3) ||
> -                    !(regs->eflags & X86_EFLAGS_AC));
> +                   ((hvm_get_cpl(v) == 3) || !(regs->eflags & X86_EFLAGS_AC));
>              break;
>          case SMAP_CHECK_ENABLED:
>              smap = hvm_smap_enabled(v);

George, any word on these?

> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -1779,7 +1779,7 @@ void *sh_emulate_map_dest(struct vcpu *v
>  #ifndef NDEBUG
>      /* We don't emulate user-mode writes to page tables. */
>      if ( has_hvm_container_domain(d)
> -         ? hvm_get_seg_reg(x86_seg_ss, sh_ctxt)->attr.fields.dpl == 3
> +         ? hvm_get_cpl(v) == 3
>           : !guest_kernel_mode(v, guest_cpu_user_regs()) )
>      {
>          gdprintk(XENLOG_DEBUG, "User-mode write to pagetable reached "

Tim, how about this one?

Thanks, Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: Ping: [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook
  2016-12-13  8:45   ` Ping: " Jan Beulich
@ 2016-12-13  9:33     ` Tim Deegan
  0 siblings, 0 replies; 29+ messages in thread
From: Tim Deegan @ 2016-12-13  9:33 UTC (permalink / raw)
  To: Jan Beulich
  Cc: Kevin Tian, tamas, Jun Nakajima, Razvan Cojocaru, George Dunlap,
	Andrew Cooper, Suravee Suthikulpanit, xen-devel, Boris Ostrovsky

At 01:45 -0700 on 13 Dec (1481593536), Jan Beulich wrote:
> > --- a/xen/arch/x86/mm/shadow/common.c
> > +++ b/xen/arch/x86/mm/shadow/common.c
> > @@ -1779,7 +1779,7 @@ void *sh_emulate_map_dest(struct vcpu *v
> >  #ifndef NDEBUG
> >      /* We don't emulate user-mode writes to page tables. */
> >      if ( has_hvm_container_domain(d)
> > -         ? hvm_get_seg_reg(x86_seg_ss, sh_ctxt)->attr.fields.dpl == 3
> > +         ? hvm_get_cpl(v) == 3
> >           : !guest_kernel_mode(v, guest_cpu_user_regs()) )
> >      {
> >          gdprintk(XENLOG_DEBUG, "User-mode write to pagetable reached "
> 
> Tim, how about this one?

Sorry, didn't see that there.  Acked-by: Tim Deegan <tim@xen.org>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: [PATCH 2/3] x86/HVM: support (emulate) UMIP
  2016-12-07 16:06               ` Andrew Cooper
  2016-12-08  8:20                 ` Tian, Kevin
@ 2016-12-26  3:56                 ` Suravee Suthikulpanit
  1 sibling, 0 replies; 29+ messages in thread
From: Suravee Suthikulpanit @ 2016-12-26  3:56 UTC (permalink / raw)
  To: Andrew Cooper, Jan Beulich, Boris Ostrovsky
  Cc: Kevin Tian, Wei Liu, George Dunlap, Ian Jackson, Jun Nakajima, xen-devel

On 12/7/16 23:06, Andrew Cooper wrote:
> On 07/12/16 15:39, Jan Beulich wrote:
>>>>> On 07.12.16 at 16:31, <boris.ostrovsky@oracle.com> wrote:
>>> On 12/07/2016 10:14 AM, Jan Beulich wrote:
>>>>>>> On 07.12.16 at 16:10, <boris.ostrovsky@oracle.com> wrote:
>>>>> On 12/07/2016 06:29 AM, Jan Beulich wrote:
>>>>>>>>> On 06.12.16 at 17:23, <boris.ostrovsky@oracle.com> wrote:
>>>>>>> On 12/06/2016 06:44 AM, Jan Beulich wrote:
>>>>>>>> --- a/xen/arch/x86/cpuid.c
>>>>>>>> +++ b/xen/arch/x86/cpuid.c
>>>>>>>> @@ -154,6 +154,13 @@ static void __init calculate_hvm_feature
>>>>>>>>      __set_bit(X86_FEATURE_APIC, hvm_featureset);
>>>>>>>>
>>>>>>>>      /*
>>>>>>>> +     * Xen can often provide UMIP emulation to HVM guests even if the host
>>>>>>>> +     * doesn't have such functionality.
>>>>>>>> +     */
>>>>>>>> +    if ( cpu_has_vmx_dt_exiting || cpu_has_svm )
>>>>>>>> +        __set_bit(X86_FEATURE_UMIP, hvm_featureset);
>>>>>>> I don't think I understand how this is going to work for processors that
>>>>>>> don't support UMIP.
>>>>>>>
>>>>>>> How, for example, can guest_cr[4] have X86_CR4_UMIP set on these
>>>>>>> processors when CPUID will not show the feature being there?
>>>>>> What we allow the guest to see and what we store into hardware
>>>>>> registers are two different things: Note how svm_update_guest_cr()
>>>>>> masks off X86_CR4_UMIP from the vale to be put into the VMCB.
>>>>> So that was kind of my question --- why would a guest ever try to set
>>>>> this bit? As far as it is concerned, UMIP is not available and the guest
>>>>> is then trying to set an unsupported bit in cr4. And that should result
>>>>> in a #GP.
>>>> But the code fragment above adds the respective CPUID bit to the
>>>> permitted features. Believe me, I've tried this with a UMIP-enabled
>>>> Linux (including proper CPUID based detection).
>>> Are you referring to these patches: https://lkml.org/lkml/2016/11/8/68 ?
>>> If yes then they look to be Intel-specific.
>> No, I had written my own before these had been posted. I did
>> actually post mine too, but that was a week or so after theirs,
>> and theirs appears to be more complete.
>>
>>> If AMD decides to use CPUID0x7.ecx[2] for something else --- won't this
>>> be a problem for this patch?
>> I think the two vendors meanwhile do a good job not interfering with
>> one another's CPUID bits. We'd have ugly problems elsewhere if any
>> new dual purpose CPUID bit appeared.
>
> [root@minuet-1 ~]# head /proc/cpuinfo
> processor    : 0
> vendor_id    : AuthenticAMD
> cpu family    : 21
> model        : 96
>
> [root@minuet-1 ~]# xen-cpuid
> nr_features: 10
>                       KEY 1d       1c       e1d      e1c      Da1
> 7b0      7c0      e7d      e8b      7d0
>
> Static sets:
> Known
> b7ebfbff:fffef3ff:efd3fbff:2469bfff:0000000f:fdbfffff:0000001b:00000500:00000001:0000000c
> Special
> 10000200:88200000:00000000:00000002:00000000:00002040:00000010:00000000:00000000:00000000
> PV Mask
> 17c9cbf5:f6f83203:e2500800:042109e3:00000007:fdaf0b39:00000003:00000000:00000001:0000000c
> HVM Shadow Mask
> 17cbfbff:f7f83223:ea500800:04218df7:0000000f:fdbf4bbb:00000003:00000000:00000001:0000000c
> HVM Hap Mask
> 17cbfbff:f7fa3223:ee500800:04218df7:0000000f:fdbf4fbb:0000000b:00000000:00000001:0000000c
>
> Dynamic sets:
> Raw
> 178bfbff:fed8320b:2fd3fbff:2febbfff:00000001:000001a9:00000000:000037d9:00000000:00000000
> Host
> 178bf3ff:f6d8320b:2fd3fbff:2469bfff:00000001:000001a9:00000000:00000500:00000000:00000000
> PV
> 1789c3f5:f6f83203:23d1cbf5:042109e3:00000001:00000129:00000000:00000000:00000000:00000000
> HVM
> 178bfbff:f6f83203:2fd3fbff:04218df7:00000001:000001a9:00000000:00000000:00000000:00000000
>
>
> This processor already implements some of Intel's features from
> 0x7[0].ebx, including SMEP.  According to marketing, AMD Zen processors
> will add the ADX, RDSEED, SHA, CLFLUSHOPT and SMAP features
>
> I can't reasonably see them using a different feature word.
>
> ~Andrew
>

I have checked on the family17h, the CPUID Fn0000_0007_ECX is reserved 
same as other older models.

Suravee


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 29+ messages in thread

end of thread, other threads:[~2016-12-26  3:56 UTC | newest]

Thread overview: 29+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-06 11:35 [PATCH 0/3] x86/HVM: misc improvements Jan Beulich
2016-12-06 11:43 ` [PATCH 1/3] x86/HVM: introduce hvm_get_cpl() and respective hook Jan Beulich
2016-12-06 12:16   ` Razvan Cojocaru
2016-12-06 13:49   ` Andrew Cooper
2016-12-06 14:07     ` Jan Beulich
2016-12-06 14:10       ` Andrew Cooper
2016-12-06 15:49   ` Boris Ostrovsky
2016-12-07  6:21   ` Tian, Kevin
2016-12-13  8:45   ` Ping: " Jan Beulich
2016-12-13  9:33     ` Tim Deegan
2016-12-06 11:44 ` [PATCH 2/3] x86/HVM: support (emulate) UMIP Jan Beulich
2016-12-06 14:47   ` Andrew Cooper
2016-12-06 14:55     ` Jan Beulich
2016-12-06 14:57       ` Andrew Cooper
2016-12-08 12:20     ` Jan Beulich
2016-12-09 18:42       ` Andrew Cooper
2016-12-06 16:23   ` Boris Ostrovsky
2016-12-07 11:29     ` Jan Beulich
2016-12-07 15:10       ` Boris Ostrovsky
2016-12-07 15:14         ` Jan Beulich
2016-12-07 15:31           ` Boris Ostrovsky
2016-12-07 15:39             ` Jan Beulich
2016-12-07 16:06               ` Andrew Cooper
2016-12-08  8:20                 ` Tian, Kevin
2016-12-26  3:56                 ` Suravee Suthikulpanit
2016-12-06 11:45 ` [PATCH 3/3] x86/HVM: prefer structure assignment for seg reg copying Jan Beulich
2016-12-06 11:47   ` Paul Durrant
2016-12-06 13:55   ` Andrew Cooper
2016-12-06 16:25   ` Boris Ostrovsky

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.