All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86emul: don't unconditionally clear segment bases upon null selector loads
@ 2016-12-20  8:18 Jan Beulich
  2016-12-20 11:57 ` Andrew Cooper
  0 siblings, 1 reply; 2+ messages in thread
From: Jan Beulich @ 2016-12-20  8:18 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper

[-- Attachment #1: Type: text/plain, Size: 4026 bytes --]

AMD explicitly documents that namely FS and GS don't have their bases
cleared in that case, and I see no reason why guests may not rely on
that behavior. To facilitate this a new input field (the CPU vendor) is
being added.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -119,6 +119,7 @@ int main(int argc, char **argv)
 
     ctxt.regs = &regs;
     ctxt.force_writeback = 0;
+    ctxt.vendor    = X86_VENDOR_UNKNOWN;
     ctxt.addr_size = 8 * sizeof(void *);
     ctxt.sp_size   = 8 * sizeof(void *);
 
--- a/tools/tests/x86_emulator/x86_emulate.h
+++ b/tools/tests/x86_emulator/x86_emulate.h
@@ -38,6 +38,11 @@
 
 #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63))
 
+/* There's no strict need for these to be in sync with processor.h. */
+#define X86_VENDOR_INTEL   0
+#define X86_VENDOR_AMD     2
+#define X86_VENDOR_UNKNOWN 0xff
+
 #define MMAP_SZ 16384
 bool emul_test_make_stack_executable(void);
 
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1897,6 +1897,7 @@ void hvm_emulate_init_once(
     hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
 
     hvmemul_ctxt->ctxt.regs = regs;
+    hvmemul_ctxt->ctxt.vendor = current->domain->arch.x86_vendor;
     hvmemul_ctxt->ctxt.force_writeback = true;
 
     if ( cpu_has_vmx )
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5358,6 +5358,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
     struct ptwr_emulate_ctxt ptwr_ctxt = {
         .ctxt = {
             .regs = regs,
+            .vendor = d->arch.x86_vendor,
             .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .sp_size   = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .swint_emulate = x86_swint_emulate_none,
@@ -5513,6 +5514,7 @@ int mmio_ro_do_page_fault(struct vcpu *v
     struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr };
     struct x86_emulate_ctxt ctxt = {
         .regs = regs,
+        .vendor = v->domain->arch.x86_vendor,
         .addr_size = addr_size,
         .sp_size = addr_size,
         .swint_emulate = x86_swint_emulate_none,
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -330,6 +330,7 @@ const struct x86_emulate_ops *shadow_ini
     memset(sh_ctxt, 0, sizeof(*sh_ctxt));
 
     sh_ctxt->ctxt.regs = regs;
+    sh_ctxt->ctxt.vendor = v->domain->arch.x86_vendor;
     sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
 
     /* Segment cache initialisation. Primed with CS. */
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3352,7 +3352,10 @@ static int emulate_privileged_op(struct
 {
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
-    struct priv_op_ctxt ctxt = { .ctxt.regs = regs };
+    struct priv_op_ctxt ctxt = {
+        .ctxt.regs = regs,
+        .ctxt.vendor = currd->arch.x86_vendor,
+    };
     int rc;
     unsigned int eflags, ar;
 
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1395,7 +1395,11 @@ protmode_load_seg(
         case x86_seg_tr:
             goto raise_exn;
         }
-        memset(sreg, 0, sizeof(*sreg));
+        if ( ctxt->vendor != X86_VENDOR_AMD || !ops->read_segment ||
+             ops->read_segment(seg, sreg, ctxt) != X86EMUL_OKAY )
+            memset(sreg, 0, sizeof(*sreg));
+        else
+            sreg->attr.bytes = 0;
         sreg->sel = sel;
         return X86EMUL_OKAY;
     }
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -462,6 +462,9 @@ struct x86_emulate_ctxt
     /* Software event injection support. */
     enum x86_swint_emulation swint_emulate;
 
+    /* CPU vendor (X86_VENDOR_UNKNOWN for "don't care") */
+    unsigned char vendor;
+
     /* Set this if writes may have side effects. */
     bool force_writeback;
 




[-- Attachment #2: x86emul-null-sel-AMD.patch --]
[-- Type: text/plain, Size: 4099 bytes --]

x86emul: don't unconditionally clear segment bases upon null selector loads

AMD explicitly documents that namely FS and GS don't have their bases
cleared in that case, and I see no reason why guests may not rely on
that behavior. To facilitate this a new input field (the CPU vendor) is
being added.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -119,6 +119,7 @@ int main(int argc, char **argv)
 
     ctxt.regs = &regs;
     ctxt.force_writeback = 0;
+    ctxt.vendor    = X86_VENDOR_UNKNOWN;
     ctxt.addr_size = 8 * sizeof(void *);
     ctxt.sp_size   = 8 * sizeof(void *);
 
--- a/tools/tests/x86_emulator/x86_emulate.h
+++ b/tools/tests/x86_emulator/x86_emulate.h
@@ -38,6 +38,11 @@
 
 #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63))
 
+/* There's no strict need for these to be in sync with processor.h. */
+#define X86_VENDOR_INTEL   0
+#define X86_VENDOR_AMD     2
+#define X86_VENDOR_UNKNOWN 0xff
+
 #define MMAP_SZ 16384
 bool emul_test_make_stack_executable(void);
 
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1897,6 +1897,7 @@ void hvm_emulate_init_once(
     hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
 
     hvmemul_ctxt->ctxt.regs = regs;
+    hvmemul_ctxt->ctxt.vendor = current->domain->arch.x86_vendor;
     hvmemul_ctxt->ctxt.force_writeback = true;
 
     if ( cpu_has_vmx )
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5358,6 +5358,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
     struct ptwr_emulate_ctxt ptwr_ctxt = {
         .ctxt = {
             .regs = regs,
+            .vendor = d->arch.x86_vendor,
             .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .sp_size   = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .swint_emulate = x86_swint_emulate_none,
@@ -5513,6 +5514,7 @@ int mmio_ro_do_page_fault(struct vcpu *v
     struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr };
     struct x86_emulate_ctxt ctxt = {
         .regs = regs,
+        .vendor = v->domain->arch.x86_vendor,
         .addr_size = addr_size,
         .sp_size = addr_size,
         .swint_emulate = x86_swint_emulate_none,
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -330,6 +330,7 @@ const struct x86_emulate_ops *shadow_ini
     memset(sh_ctxt, 0, sizeof(*sh_ctxt));
 
     sh_ctxt->ctxt.regs = regs;
+    sh_ctxt->ctxt.vendor = v->domain->arch.x86_vendor;
     sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
 
     /* Segment cache initialisation. Primed with CS. */
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3352,7 +3352,10 @@ static int emulate_privileged_op(struct
 {
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
-    struct priv_op_ctxt ctxt = { .ctxt.regs = regs };
+    struct priv_op_ctxt ctxt = {
+        .ctxt.regs = regs,
+        .ctxt.vendor = currd->arch.x86_vendor,
+    };
     int rc;
     unsigned int eflags, ar;
 
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1395,7 +1395,11 @@ protmode_load_seg(
         case x86_seg_tr:
             goto raise_exn;
         }
-        memset(sreg, 0, sizeof(*sreg));
+        if ( ctxt->vendor != X86_VENDOR_AMD || !ops->read_segment ||
+             ops->read_segment(seg, sreg, ctxt) != X86EMUL_OKAY )
+            memset(sreg, 0, sizeof(*sreg));
+        else
+            sreg->attr.bytes = 0;
         sreg->sel = sel;
         return X86EMUL_OKAY;
     }
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -462,6 +462,9 @@ struct x86_emulate_ctxt
     /* Software event injection support. */
     enum x86_swint_emulation swint_emulate;
 
+    /* CPU vendor (X86_VENDOR_UNKNOWN for "don't care") */
+    unsigned char vendor;
+
     /* Set this if writes may have side effects. */
     bool force_writeback;
 

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] x86emul: don't unconditionally clear segment bases upon null selector loads
  2016-12-20  8:18 [PATCH] x86emul: don't unconditionally clear segment bases upon null selector loads Jan Beulich
@ 2016-12-20 11:57 ` Andrew Cooper
  0 siblings, 0 replies; 2+ messages in thread
From: Andrew Cooper @ 2016-12-20 11:57 UTC (permalink / raw)
  To: Jan Beulich, xen-devel

On 20/12/2016 08:18, Jan Beulich wrote:
> AMD explicitly documents that namely FS and GS don't have their bases
> cleared in that case, and I see no reason why guests may not rely on
> that behavior. To facilitate this a new input field (the CPU vendor) is
> being added.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

This looks better overall.

Longterm I think it would be better to pass the full cpuid policy in to
the emulator. This removes the need to use the cpuid() hook for both
emulation and instruction related purposes, which we have seen gets
complicated with CPUID Faulting handling.  Looking further than that,
passing the full MSR banks would simplify that side of things as well.

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>, with one minor
correction

> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1897,6 +1897,7 @@ void hvm_emulate_init_once(
>      hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
>  
>      hvmemul_ctxt->ctxt.regs = regs;
> +    hvmemul_ctxt->ctxt.vendor = current->domain->arch.x86_vendor;

curr is available here.

~Andrew

>      hvmemul_ctxt->ctxt.force_writeback = true;
>  
>      if ( cpu_has_vmx )
>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2016-12-20 11:57 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-12-20  8:18 [PATCH] x86emul: don't unconditionally clear segment bases upon null selector loads Jan Beulich
2016-12-20 11:57 ` Andrew Cooper

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.