All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Jan Beulich" <JBeulich@suse.com>
To: xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: [PATCH] x86emul: don't unconditionally clear segment bases upon null selector loads
Date: Tue, 20 Dec 2016 01:18:23 -0700	[thread overview]
Message-ID: <5858F75F020000780012ACDB@prv-mh.provo.novell.com> (raw)

[-- Attachment #1: Type: text/plain, Size: 4026 bytes --]

AMD explicitly documents that namely FS and GS don't have their bases
cleared in that case, and I see no reason why guests may not rely on
that behavior. To facilitate this a new input field (the CPU vendor) is
being added.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -119,6 +119,7 @@ int main(int argc, char **argv)
 
     ctxt.regs = &regs;
     ctxt.force_writeback = 0;
+    ctxt.vendor    = X86_VENDOR_UNKNOWN;
     ctxt.addr_size = 8 * sizeof(void *);
     ctxt.sp_size   = 8 * sizeof(void *);
 
--- a/tools/tests/x86_emulator/x86_emulate.h
+++ b/tools/tests/x86_emulator/x86_emulate.h
@@ -38,6 +38,11 @@
 
 #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63))
 
+/* There's no strict need for these to be in sync with processor.h. */
+#define X86_VENDOR_INTEL   0
+#define X86_VENDOR_AMD     2
+#define X86_VENDOR_UNKNOWN 0xff
+
 #define MMAP_SZ 16384
 bool emul_test_make_stack_executable(void);
 
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1897,6 +1897,7 @@ void hvm_emulate_init_once(
     hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
 
     hvmemul_ctxt->ctxt.regs = regs;
+    hvmemul_ctxt->ctxt.vendor = current->domain->arch.x86_vendor;
     hvmemul_ctxt->ctxt.force_writeback = true;
 
     if ( cpu_has_vmx )
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5358,6 +5358,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
     struct ptwr_emulate_ctxt ptwr_ctxt = {
         .ctxt = {
             .regs = regs,
+            .vendor = d->arch.x86_vendor,
             .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .sp_size   = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .swint_emulate = x86_swint_emulate_none,
@@ -5513,6 +5514,7 @@ int mmio_ro_do_page_fault(struct vcpu *v
     struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr };
     struct x86_emulate_ctxt ctxt = {
         .regs = regs,
+        .vendor = v->domain->arch.x86_vendor,
         .addr_size = addr_size,
         .sp_size = addr_size,
         .swint_emulate = x86_swint_emulate_none,
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -330,6 +330,7 @@ const struct x86_emulate_ops *shadow_ini
     memset(sh_ctxt, 0, sizeof(*sh_ctxt));
 
     sh_ctxt->ctxt.regs = regs;
+    sh_ctxt->ctxt.vendor = v->domain->arch.x86_vendor;
     sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
 
     /* Segment cache initialisation. Primed with CS. */
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3352,7 +3352,10 @@ static int emulate_privileged_op(struct
 {
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
-    struct priv_op_ctxt ctxt = { .ctxt.regs = regs };
+    struct priv_op_ctxt ctxt = {
+        .ctxt.regs = regs,
+        .ctxt.vendor = currd->arch.x86_vendor,
+    };
     int rc;
     unsigned int eflags, ar;
 
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1395,7 +1395,11 @@ protmode_load_seg(
         case x86_seg_tr:
             goto raise_exn;
         }
-        memset(sreg, 0, sizeof(*sreg));
+        if ( ctxt->vendor != X86_VENDOR_AMD || !ops->read_segment ||
+             ops->read_segment(seg, sreg, ctxt) != X86EMUL_OKAY )
+            memset(sreg, 0, sizeof(*sreg));
+        else
+            sreg->attr.bytes = 0;
         sreg->sel = sel;
         return X86EMUL_OKAY;
     }
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -462,6 +462,9 @@ struct x86_emulate_ctxt
     /* Software event injection support. */
     enum x86_swint_emulation swint_emulate;
 
+    /* CPU vendor (X86_VENDOR_UNKNOWN for "don't care") */
+    unsigned char vendor;
+
     /* Set this if writes may have side effects. */
     bool force_writeback;
 




[-- Attachment #2: x86emul-null-sel-AMD.patch --]
[-- Type: text/plain, Size: 4099 bytes --]

x86emul: don't unconditionally clear segment bases upon null selector loads

AMD explicitly documents that namely FS and GS don't have their bases
cleared in that case, and I see no reason why guests may not rely on
that behavior. To facilitate this a new input field (the CPU vendor) is
being added.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -119,6 +119,7 @@ int main(int argc, char **argv)
 
     ctxt.regs = &regs;
     ctxt.force_writeback = 0;
+    ctxt.vendor    = X86_VENDOR_UNKNOWN;
     ctxt.addr_size = 8 * sizeof(void *);
     ctxt.sp_size   = 8 * sizeof(void *);
 
--- a/tools/tests/x86_emulator/x86_emulate.h
+++ b/tools/tests/x86_emulator/x86_emulate.h
@@ -38,6 +38,11 @@
 
 #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63))
 
+/* There's no strict need for these to be in sync with processor.h. */
+#define X86_VENDOR_INTEL   0
+#define X86_VENDOR_AMD     2
+#define X86_VENDOR_UNKNOWN 0xff
+
 #define MMAP_SZ 16384
 bool emul_test_make_stack_executable(void);
 
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1897,6 +1897,7 @@ void hvm_emulate_init_once(
     hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
 
     hvmemul_ctxt->ctxt.regs = regs;
+    hvmemul_ctxt->ctxt.vendor = current->domain->arch.x86_vendor;
     hvmemul_ctxt->ctxt.force_writeback = true;
 
     if ( cpu_has_vmx )
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5358,6 +5358,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
     struct ptwr_emulate_ctxt ptwr_ctxt = {
         .ctxt = {
             .regs = regs,
+            .vendor = d->arch.x86_vendor,
             .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .sp_size   = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
             .swint_emulate = x86_swint_emulate_none,
@@ -5513,6 +5514,7 @@ int mmio_ro_do_page_fault(struct vcpu *v
     struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr };
     struct x86_emulate_ctxt ctxt = {
         .regs = regs,
+        .vendor = v->domain->arch.x86_vendor,
         .addr_size = addr_size,
         .sp_size = addr_size,
         .swint_emulate = x86_swint_emulate_none,
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -330,6 +330,7 @@ const struct x86_emulate_ops *shadow_ini
     memset(sh_ctxt, 0, sizeof(*sh_ctxt));
 
     sh_ctxt->ctxt.regs = regs;
+    sh_ctxt->ctxt.vendor = v->domain->arch.x86_vendor;
     sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none;
 
     /* Segment cache initialisation. Primed with CS. */
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3352,7 +3352,10 @@ static int emulate_privileged_op(struct
 {
     struct vcpu *curr = current;
     struct domain *currd = curr->domain;
-    struct priv_op_ctxt ctxt = { .ctxt.regs = regs };
+    struct priv_op_ctxt ctxt = {
+        .ctxt.regs = regs,
+        .ctxt.vendor = currd->arch.x86_vendor,
+    };
     int rc;
     unsigned int eflags, ar;
 
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1395,7 +1395,11 @@ protmode_load_seg(
         case x86_seg_tr:
             goto raise_exn;
         }
-        memset(sreg, 0, sizeof(*sreg));
+        if ( ctxt->vendor != X86_VENDOR_AMD || !ops->read_segment ||
+             ops->read_segment(seg, sreg, ctxt) != X86EMUL_OKAY )
+            memset(sreg, 0, sizeof(*sreg));
+        else
+            sreg->attr.bytes = 0;
         sreg->sel = sel;
         return X86EMUL_OKAY;
     }
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -462,6 +462,9 @@ struct x86_emulate_ctxt
     /* Software event injection support. */
     enum x86_swint_emulation swint_emulate;
 
+    /* CPU vendor (X86_VENDOR_UNKNOWN for "don't care") */
+    unsigned char vendor;
+
     /* Set this if writes may have side effects. */
     bool force_writeback;
 

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

             reply	other threads:[~2016-12-20  8:18 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-20  8:18 Jan Beulich [this message]
2016-12-20 11:57 ` [PATCH] x86emul: don't unconditionally clear segment bases upon null selector loads Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5858F75F020000780012ACDB@prv-mh.provo.novell.com \
    --to=jbeulich@suse.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.