All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] x86/HVM: XSA-192 follow-ups
@ 2016-11-22 13:51 Jan Beulich
  2016-11-22 13:55 ` [PATCH 1/3] x86/HVM: limit writes to incoming TSS during task switch Jan Beulich
                   ` (3 more replies)
  0 siblings, 4 replies; 12+ messages in thread
From: Jan Beulich @ 2016-11-22 13:51 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Wei Liu

1: limit writes to incoming TSS during task switch
2: limit writes to outgoing TSS during task switch
3: correct error code writing during task switch

Signed-off-by: Jan Beulich <jbeulich@suse.com>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/3] x86/HVM: limit writes to incoming TSS during task switch
  2016-11-22 13:51 [PATCH 0/3] x86/HVM: XSA-192 follow-ups Jan Beulich
@ 2016-11-22 13:55 ` Jan Beulich
  2016-11-22 16:32   ` Andrew Cooper
  2016-11-22 13:55 ` [PATCH 2/3] x86/HVM: limit writes to outgoing " Jan Beulich
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 12+ messages in thread
From: Jan Beulich @ 2016-11-22 13:55 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Wei Liu

[-- Attachment #1: Type: text/plain, Size: 2089 bytes --]

The only field modified (and even that conditionally) is the back link.
Write only that field, and only when it actually has been written to.

Take the opportunity and also ditch the pointless initializer from the
"tss" local variable.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2890,7 +2890,7 @@ void hvm_task_switch(
         u32 cr3, eip, eflags, eax, ecx, edx, ebx, esp, ebp, esi, edi;
         u16 es, _3, cs, _4, ss, _5, ds, _6, fs, _7, gs, _8, ldt, _9;
         u16 trace, iomap;
-    } tss = { 0 };
+    } tss;
 
     hvm_get_segment_register(v, x86_seg_gdtr, &gdt);
     hvm_get_segment_register(v, x86_seg_tr, &prev_tr);
@@ -3010,12 +3010,6 @@ void hvm_task_switch(
     regs->esi    = tss.esi;
     regs->edi    = tss.edi;
 
-    if ( (taskswitch_reason == TSW_call_or_int) )
-    {
-        regs->eflags |= X86_EFLAGS_NT;
-        tss.back_link = prev_tr.sel;
-    }
-
     exn_raised = 0;
     if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
          hvm_load_segment_selector(x86_seg_cs, tss.cs, tss.eflags) ||
@@ -3025,12 +3019,18 @@ void hvm_task_switch(
          hvm_load_segment_selector(x86_seg_gs, tss.gs, tss.eflags) )
         exn_raised = 1;
 
-    rc = hvm_copy_to_guest_virt(
-        tr.base, &tss, sizeof(tss), PFEC_page_present);
-    if ( rc == HVMCOPY_bad_gva_to_gfn )
-        exn_raised = 1;
-    else if ( rc != HVMCOPY_okay )
-        goto out;
+    if ( taskswitch_reason == TSW_call_or_int )
+    {
+        regs->eflags |= X86_EFLAGS_NT;
+        tss.back_link = prev_tr.sel;
+
+        rc = hvm_copy_to_guest_virt(tr.base + offsetof(typeof(tss), back_link),
+                                    &tss.back_link, sizeof(tss.back_link), 0);
+        if ( rc == HVMCOPY_bad_gva_to_gfn )
+            exn_raised = 1;
+        else if ( rc != HVMCOPY_okay )
+            goto out;
+    }
 
     if ( (tss.trace & 1) && !exn_raised )
         hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);




[-- Attachment #2: x86-HVM-task-switch-TSS-writeback.patch --]
[-- Type: text/plain, Size: 2143 bytes --]

x86/HVM: limit writes to incoming TSS during task switch

The only field modified (and even that conditionally) is the back link.
Write only that field, and only when it actually has been written to.

Take the opportunity and also ditch the pointless initializer from the
"tss" local variable.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2890,7 +2890,7 @@ void hvm_task_switch(
         u32 cr3, eip, eflags, eax, ecx, edx, ebx, esp, ebp, esi, edi;
         u16 es, _3, cs, _4, ss, _5, ds, _6, fs, _7, gs, _8, ldt, _9;
         u16 trace, iomap;
-    } tss = { 0 };
+    } tss;
 
     hvm_get_segment_register(v, x86_seg_gdtr, &gdt);
     hvm_get_segment_register(v, x86_seg_tr, &prev_tr);
@@ -3010,12 +3010,6 @@ void hvm_task_switch(
     regs->esi    = tss.esi;
     regs->edi    = tss.edi;
 
-    if ( (taskswitch_reason == TSW_call_or_int) )
-    {
-        regs->eflags |= X86_EFLAGS_NT;
-        tss.back_link = prev_tr.sel;
-    }
-
     exn_raised = 0;
     if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
          hvm_load_segment_selector(x86_seg_cs, tss.cs, tss.eflags) ||
@@ -3025,12 +3019,18 @@ void hvm_task_switch(
          hvm_load_segment_selector(x86_seg_gs, tss.gs, tss.eflags) )
         exn_raised = 1;
 
-    rc = hvm_copy_to_guest_virt(
-        tr.base, &tss, sizeof(tss), PFEC_page_present);
-    if ( rc == HVMCOPY_bad_gva_to_gfn )
-        exn_raised = 1;
-    else if ( rc != HVMCOPY_okay )
-        goto out;
+    if ( taskswitch_reason == TSW_call_or_int )
+    {
+        regs->eflags |= X86_EFLAGS_NT;
+        tss.back_link = prev_tr.sel;
+
+        rc = hvm_copy_to_guest_virt(tr.base + offsetof(typeof(tss), back_link),
+                                    &tss.back_link, sizeof(tss.back_link), 0);
+        if ( rc == HVMCOPY_bad_gva_to_gfn )
+            exn_raised = 1;
+        else if ( rc != HVMCOPY_okay )
+            goto out;
+    }
 
     if ( (tss.trace & 1) && !exn_raised )
         hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 2/3] x86/HVM: limit writes to outgoing TSS during task switch
  2016-11-22 13:51 [PATCH 0/3] x86/HVM: XSA-192 follow-ups Jan Beulich
  2016-11-22 13:55 ` [PATCH 1/3] x86/HVM: limit writes to incoming TSS during task switch Jan Beulich
@ 2016-11-22 13:55 ` Jan Beulich
  2016-11-22 16:46   ` Andrew Cooper
  2016-11-22 13:56 ` [PATCH 3/3] x86/HVM: correct error code writing " Jan Beulich
  2016-11-23 14:14 ` [PATCH 0/3] x86/HVM: XSA-192 follow-ups Wei Liu
  3 siblings, 1 reply; 12+ messages in thread
From: Jan Beulich @ 2016-11-22 13:55 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Wei Liu

[-- Attachment #1: Type: text/plain, Size: 1035 bytes --]

The only fields modified are EIP, EFLAGS, GPRs, and segment selectors.
CR3 in particular is not supposed to be updated.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2952,7 +2952,6 @@ void hvm_task_switch(
     if ( taskswitch_reason == TSW_iret )
         eflags &= ~X86_EFLAGS_NT;
 
-    tss.cr3    = v->arch.hvm_vcpu.guest_cr[3];
     tss.eip    = regs->eip;
     tss.eflags = eflags;
     tss.eax    = regs->eax;
@@ -2979,8 +2978,10 @@ void hvm_task_switch(
     hvm_get_segment_register(v, x86_seg_ldtr, &segr);
     tss.ldt = segr.sel;
 
-    rc = hvm_copy_to_guest_virt(
-        prev_tr.base, &tss, sizeof(tss), PFEC_page_present);
+    rc = hvm_copy_to_guest_virt(prev_tr.base + offsetof(typeof(tss), eip),
+                                &tss.eip,
+                                (void *)&tss.trace - (void *)&tss.eip,
+                                PFEC_page_present);
     if ( rc != HVMCOPY_okay )
         goto out;
 




[-- Attachment #2: x86-HVM-task-switch-no-CR3.patch --]
[-- Type: text/plain, Size: 1089 bytes --]

x86/HVM: limit writes to outgoing TSS during task switch

The only fields modified are EIP, EFLAGS, GPRs, and segment selectors.
CR3 in particular is not supposed to be updated.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2952,7 +2952,6 @@ void hvm_task_switch(
     if ( taskswitch_reason == TSW_iret )
         eflags &= ~X86_EFLAGS_NT;
 
-    tss.cr3    = v->arch.hvm_vcpu.guest_cr[3];
     tss.eip    = regs->eip;
     tss.eflags = eflags;
     tss.eax    = regs->eax;
@@ -2979,8 +2978,10 @@ void hvm_task_switch(
     hvm_get_segment_register(v, x86_seg_ldtr, &segr);
     tss.ldt = segr.sel;
 
-    rc = hvm_copy_to_guest_virt(
-        prev_tr.base, &tss, sizeof(tss), PFEC_page_present);
+    rc = hvm_copy_to_guest_virt(prev_tr.base + offsetof(typeof(tss), eip),
+                                &tss.eip,
+                                (void *)&tss.trace - (void *)&tss.eip,
+                                PFEC_page_present);
     if ( rc != HVMCOPY_okay )
         goto out;
 

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 3/3] x86/HVM: correct error code writing during task switch
  2016-11-22 13:51 [PATCH 0/3] x86/HVM: XSA-192 follow-ups Jan Beulich
  2016-11-22 13:55 ` [PATCH 1/3] x86/HVM: limit writes to incoming TSS during task switch Jan Beulich
  2016-11-22 13:55 ` [PATCH 2/3] x86/HVM: limit writes to outgoing " Jan Beulich
@ 2016-11-22 13:56 ` Jan Beulich
  2016-11-22 16:58   ` Andrew Cooper
  2016-11-23 14:14 ` [PATCH 0/3] x86/HVM: XSA-192 follow-ups Wei Liu
  3 siblings, 1 reply; 12+ messages in thread
From: Jan Beulich @ 2016-11-22 13:56 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Wei Liu

[-- Attachment #1: Type: text/plain, Size: 2303 bytes --]

Whether to write 32 or just 16 bits depends on the D bit of the target
CS. The width of the stack pointer to use depends on the B bit of the
target SS.

Also avoid using the no-fault copying routine.

Finally avoid using yet another struct segment_register variable here.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3033,9 +3033,6 @@ void hvm_task_switch(
             goto out;
     }
 
-    if ( (tss.trace & 1) && !exn_raised )
-        hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
-
     tr.attr.fields.type = 0xb; /* busy 32-bit tss */
     hvm_set_segment_register(v, x86_seg_tr, &tr);
 
@@ -3051,17 +3048,32 @@ void hvm_task_switch(
 
     if ( errcode >= 0 )
     {
-        struct segment_register reg;
         unsigned long linear_addr;
-        regs->esp -= 4;
-        hvm_get_segment_register(current, x86_seg_ss, &reg);
-        /* Todo: do not ignore access faults here. */
-        if ( hvm_virtual_to_linear_addr(x86_seg_ss, &reg, regs->esp,
-                                        4, hvm_access_write, 32,
+        unsigned int opsz, sp;
+
+        hvm_get_segment_register(current, x86_seg_cs, &segr);
+        opsz = segr.attr.fields.db ? 4 : 2;
+        hvm_get_segment_register(current, x86_seg_ss, &segr);
+        if ( segr.attr.fields.db )
+            sp = regs->_esp -= opsz;
+        else
+            sp = *(uint16_t *)&regs->esp -= opsz;
+        if ( hvm_virtual_to_linear_addr(x86_seg_ss, &segr, sp, opsz,
+                                        hvm_access_write,
+                                        16 << segr.attr.fields.db,
                                         &linear_addr) )
-            hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4, 0);
+        {
+            rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0);
+            if ( rc == HVMCOPY_bad_gva_to_gfn )
+                exn_raised = 1;
+            else if ( rc != HVMCOPY_okay )
+                goto out;
+        }
     }
 
+    if ( (tss.trace & 1) && !exn_raised )
+        hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
+
  out:
     hvm_unmap_entry(optss_desc);
     hvm_unmap_entry(nptss_desc);




[-- Attachment #2: x86-HVM-task-switch-EC.patch --]
[-- Type: text/plain, Size: 2355 bytes --]

x86/HVM: correct error code writing during task switch

Whether to write 32 or just 16 bits depends on the D bit of the target
CS. The width of the stack pointer to use depends on the B bit of the
target SS.

Also avoid using the no-fault copying routine.

Finally avoid using yet another struct segment_register variable here.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3033,9 +3033,6 @@ void hvm_task_switch(
             goto out;
     }
 
-    if ( (tss.trace & 1) && !exn_raised )
-        hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
-
     tr.attr.fields.type = 0xb; /* busy 32-bit tss */
     hvm_set_segment_register(v, x86_seg_tr, &tr);
 
@@ -3051,17 +3048,32 @@ void hvm_task_switch(
 
     if ( errcode >= 0 )
     {
-        struct segment_register reg;
         unsigned long linear_addr;
-        regs->esp -= 4;
-        hvm_get_segment_register(current, x86_seg_ss, &reg);
-        /* Todo: do not ignore access faults here. */
-        if ( hvm_virtual_to_linear_addr(x86_seg_ss, &reg, regs->esp,
-                                        4, hvm_access_write, 32,
+        unsigned int opsz, sp;
+
+        hvm_get_segment_register(current, x86_seg_cs, &segr);
+        opsz = segr.attr.fields.db ? 4 : 2;
+        hvm_get_segment_register(current, x86_seg_ss, &segr);
+        if ( segr.attr.fields.db )
+            sp = regs->_esp -= opsz;
+        else
+            sp = *(uint16_t *)&regs->esp -= opsz;
+        if ( hvm_virtual_to_linear_addr(x86_seg_ss, &segr, sp, opsz,
+                                        hvm_access_write,
+                                        16 << segr.attr.fields.db,
                                         &linear_addr) )
-            hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4, 0);
+        {
+            rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0);
+            if ( rc == HVMCOPY_bad_gva_to_gfn )
+                exn_raised = 1;
+            else if ( rc != HVMCOPY_okay )
+                goto out;
+        }
     }
 
+    if ( (tss.trace & 1) && !exn_raised )
+        hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
+
  out:
     hvm_unmap_entry(optss_desc);
     hvm_unmap_entry(nptss_desc);

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/3] x86/HVM: limit writes to incoming TSS during task switch
  2016-11-22 13:55 ` [PATCH 1/3] x86/HVM: limit writes to incoming TSS during task switch Jan Beulich
@ 2016-11-22 16:32   ` Andrew Cooper
  2016-11-23  8:27     ` Jan Beulich
  0 siblings, 1 reply; 12+ messages in thread
From: Andrew Cooper @ 2016-11-22 16:32 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Wei Liu

On 22/11/16 13:55, Jan Beulich wrote:
> The only field modified (and even that conditionally) is the back link.
> Write only that field, and only when it actually has been written to.
>
> Take the opportunity and also ditch the pointless initializer from the
> "tss" local variable.

It would help to point out that tss is unconditionally filled completely
from guest memory.

> Signed-off-by: Jan Beulich <jbeulich@suse.com>

As for the mechanical adjustments here, Reviewed-by: Andrew Cooper
<andrew.cooper3@citrix.com>

However, is the position of the backlink write actually correct?  I'd
have thought that all access to the old tss happen before switching cr3.

I can't find a useful description of the order of events in a task
switch in either manual.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] x86/HVM: limit writes to outgoing TSS during task switch
  2016-11-22 13:55 ` [PATCH 2/3] x86/HVM: limit writes to outgoing " Jan Beulich
@ 2016-11-22 16:46   ` Andrew Cooper
  2016-11-23  8:30     ` Jan Beulich
  0 siblings, 1 reply; 12+ messages in thread
From: Andrew Cooper @ 2016-11-22 16:46 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Wei Liu

On 22/11/16 13:55, Jan Beulich wrote:
> The only fields modified are EIP, EFLAGS, GPRs, and segment selectors.
> CR3 in particular is not supposed to be updated.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2952,7 +2952,6 @@ void hvm_task_switch(
>      if ( taskswitch_reason == TSW_iret )
>          eflags &= ~X86_EFLAGS_NT;
>  
> -    tss.cr3    = v->arch.hvm_vcpu.guest_cr[3];
>      tss.eip    = regs->eip;
>      tss.eflags = eflags;
>      tss.eax    = regs->eax;
> @@ -2979,8 +2978,10 @@ void hvm_task_switch(
>      hvm_get_segment_register(v, x86_seg_ldtr, &segr);
>      tss.ldt = segr.sel;
>  
> -    rc = hvm_copy_to_guest_virt(
> -        prev_tr.base, &tss, sizeof(tss), PFEC_page_present);
> +    rc = hvm_copy_to_guest_virt(prev_tr.base + offsetof(typeof(tss), eip),
> +                                &tss.eip,
> +                                (void *)&tss.trace - (void *)&tss.eip,

How about offsetof(typeof(tss), trace) - offsetof(typeof(tss), eip)?

It avoids some casts and void pointer arithmetic.

Either way, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/3] x86/HVM: correct error code writing during task switch
  2016-11-22 13:56 ` [PATCH 3/3] x86/HVM: correct error code writing " Jan Beulich
@ 2016-11-22 16:58   ` Andrew Cooper
  2016-11-23  8:33     ` Jan Beulich
  0 siblings, 1 reply; 12+ messages in thread
From: Andrew Cooper @ 2016-11-22 16:58 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Wei Liu

On 22/11/16 13:56, Jan Beulich wrote:
> Whether to write 32 or just 16 bits depends on the D bit of the target
> CS. The width of the stack pointer to use depends on the B bit of the
> target SS.
>
> Also avoid using the no-fault copying routine.
>
> Finally avoid using yet another struct segment_register variable here.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3033,9 +3033,6 @@ void hvm_task_switch(
>              goto out;
>      }
>  
> -    if ( (tss.trace & 1) && !exn_raised )
> -        hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
> -
>      tr.attr.fields.type = 0xb; /* busy 32-bit tss */
>      hvm_set_segment_register(v, x86_seg_tr, &tr);
>  
> @@ -3051,17 +3048,32 @@ void hvm_task_switch(
>  
>      if ( errcode >= 0 )
>      {
> -        struct segment_register reg;
>          unsigned long linear_addr;
> -        regs->esp -= 4;
> -        hvm_get_segment_register(current, x86_seg_ss, &reg);
> -        /* Todo: do not ignore access faults here. */
> -        if ( hvm_virtual_to_linear_addr(x86_seg_ss, &reg, regs->esp,
> -                                        4, hvm_access_write, 32,
> +        unsigned int opsz, sp;
> +
> +        hvm_get_segment_register(current, x86_seg_cs, &segr);

You already have current latched in v at this point.

Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

> +        opsz = segr.attr.fields.db ? 4 : 2;
> +        hvm_get_segment_register(current, x86_seg_ss, &segr);
> +        if ( segr.attr.fields.db )
> +            sp = regs->_esp -= opsz;
> +        else
> +            sp = *(uint16_t *)&regs->esp -= opsz;
> +        if ( hvm_virtual_to_linear_addr(x86_seg_ss, &segr, sp, opsz,
> +                                        hvm_access_write,
> +                                        16 << segr.attr.fields.db,
>                                          &linear_addr) )
> -            hvm_copy_to_guest_virt_nofault(linear_addr, &errcode, 4, 0);
> +        {
> +            rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0);
> +            if ( rc == HVMCOPY_bad_gva_to_gfn )
> +                exn_raised = 1;
> +            else if ( rc != HVMCOPY_okay )
> +                goto out;
> +        }
>      }
>  
> +    if ( (tss.trace & 1) && !exn_raised )
> +        hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
> +
>   out:
>      hvm_unmap_entry(optss_desc);
>      hvm_unmap_entry(nptss_desc);
>
>
>


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/3] x86/HVM: limit writes to incoming TSS during task switch
  2016-11-22 16:32   ` Andrew Cooper
@ 2016-11-23  8:27     ` Jan Beulich
  2016-11-23 10:59       ` Andrew Cooper
  0 siblings, 1 reply; 12+ messages in thread
From: Jan Beulich @ 2016-11-23  8:27 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel, Wei Liu

>>> On 22.11.16 at 17:32, <andrew.cooper3@citrix.com> wrote:
> On 22/11/16 13:55, Jan Beulich wrote:
>> The only field modified (and even that conditionally) is the back link.
>> Write only that field, and only when it actually has been written to.
>>
>> Take the opportunity and also ditch the pointless initializer from the
>> "tss" local variable.
> 
> It would help to point out that tss is unconditionally filled completely
> from guest memory.
> 
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> As for the mechanical adjustments here, Reviewed-by: Andrew Cooper
> <andrew.cooper3@citrix.com>
> 
> However, is the position of the backlink write actually correct?  I'd
> have thought that all access to the old tss happen before switching cr3.

But the backlink gets written into the incoming TSS. And I think it
is being assumed anyway that both TSSes (just like the GDT) are
visible through either CR3, the more that the incoming TSS
necessarily is being read in the old address space context.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/3] x86/HVM: limit writes to outgoing TSS during task switch
  2016-11-22 16:46   ` Andrew Cooper
@ 2016-11-23  8:30     ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2016-11-23  8:30 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel, Wei Liu

>>> On 22.11.16 at 17:46, <andrew.cooper3@citrix.com> wrote:
> On 22/11/16 13:55, Jan Beulich wrote:
>> The only fields modified are EIP, EFLAGS, GPRs, and segment selectors.
>> CR3 in particular is not supposed to be updated.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -2952,7 +2952,6 @@ void hvm_task_switch(
>>      if ( taskswitch_reason == TSW_iret )
>>          eflags &= ~X86_EFLAGS_NT;
>>  
>> -    tss.cr3    = v->arch.hvm_vcpu.guest_cr[3];
>>      tss.eip    = regs->eip;
>>      tss.eflags = eflags;
>>      tss.eax    = regs->eax;
>> @@ -2979,8 +2978,10 @@ void hvm_task_switch(
>>      hvm_get_segment_register(v, x86_seg_ldtr, &segr);
>>      tss.ldt = segr.sel;
>>  
>> -    rc = hvm_copy_to_guest_virt(
>> -        prev_tr.base, &tss, sizeof(tss), PFEC_page_present);
>> +    rc = hvm_copy_to_guest_virt(prev_tr.base + offsetof(typeof(tss), eip),
>> +                                &tss.eip,
>> +                                (void *)&tss.trace - (void *)&tss.eip,
> 
> How about offsetof(typeof(tss), trace) - offsetof(typeof(tss), eip)?
> 
> It avoids some casts and void pointer arithmetic.

Oh, yes, that's better.

> Either way, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

Thanks, Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/3] x86/HVM: correct error code writing during task switch
  2016-11-22 16:58   ` Andrew Cooper
@ 2016-11-23  8:33     ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2016-11-23  8:33 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel, Wei Liu

>>> On 22.11.16 at 17:58, <andrew.cooper3@citrix.com> wrote:
> On 22/11/16 13:56, Jan Beulich wrote:
>> Whether to write 32 or just 16 bits depends on the D bit of the target
>> CS. The width of the stack pointer to use depends on the B bit of the
>> target SS.
>>
>> Also avoid using the no-fault copying routine.
>>
>> Finally avoid using yet another struct segment_register variable here.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -3033,9 +3033,6 @@ void hvm_task_switch(
>>              goto out;
>>      }
>>  
>> -    if ( (tss.trace & 1) && !exn_raised )
>> -        hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
>> -
>>      tr.attr.fields.type = 0xb; /* busy 32-bit tss */
>>      hvm_set_segment_register(v, x86_seg_tr, &tr);
>>  
>> @@ -3051,17 +3048,32 @@ void hvm_task_switch(
>>  
>>      if ( errcode >= 0 )
>>      {
>> -        struct segment_register reg;
>>          unsigned long linear_addr;
>> -        regs->esp -= 4;
>> -        hvm_get_segment_register(current, x86_seg_ss, &reg);
>> -        /* Todo: do not ignore access faults here. */
>> -        if ( hvm_virtual_to_linear_addr(x86_seg_ss, &reg, regs->esp,
>> -                                        4, hvm_access_write, 32,
>> +        unsigned int opsz, sp;
>> +
>> +        hvm_get_segment_register(current, x86_seg_cs, &segr);
> 
> You already have current latched in v at this point.

Oh, right - I've blindly modified the old function invocation.

> Otherwise, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

Thanks, Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/3] x86/HVM: limit writes to incoming TSS during task switch
  2016-11-23  8:27     ` Jan Beulich
@ 2016-11-23 10:59       ` Andrew Cooper
  0 siblings, 0 replies; 12+ messages in thread
From: Andrew Cooper @ 2016-11-23 10:59 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Wei Liu

On 23/11/16 08:27, Jan Beulich wrote:
>>>> On 22.11.16 at 17:32, <andrew.cooper3@citrix.com> wrote:
>> On 22/11/16 13:55, Jan Beulich wrote:
>>> The only field modified (and even that conditionally) is the back link.
>>> Write only that field, and only when it actually has been written to.
>>>
>>> Take the opportunity and also ditch the pointless initializer from the
>>> "tss" local variable.
>> It would help to point out that tss is unconditionally filled completely
>> from guest memory.
>>
>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> As for the mechanical adjustments here, Reviewed-by: Andrew Cooper
>> <andrew.cooper3@citrix.com>
>>
>> However, is the position of the backlink write actually correct?  I'd
>> have thought that all access to the old tss happen before switching cr3.
> But the backlink gets written into the incoming TSS.

Ah - of course it does.  I was getting confused by the repeated use of
the tss structure.  Sorry for the noise.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 0/3] x86/HVM: XSA-192 follow-ups
  2016-11-22 13:51 [PATCH 0/3] x86/HVM: XSA-192 follow-ups Jan Beulich
                   ` (2 preceding siblings ...)
  2016-11-22 13:56 ` [PATCH 3/3] x86/HVM: correct error code writing " Jan Beulich
@ 2016-11-23 14:14 ` Wei Liu
  3 siblings, 0 replies; 12+ messages in thread
From: Wei Liu @ 2016-11-23 14:14 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Wei Liu, Andrew Cooper

On Tue, Nov 22, 2016 at 06:51:27AM -0700, Jan Beulich wrote:
> 1: limit writes to incoming TSS during task switch
> 2: limit writes to outgoing TSS during task switch
> 3: correct error code writing during task switch
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Release-acked-by: Wei Liu <wei.liu2@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2016-11-23 14:14 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-11-22 13:51 [PATCH 0/3] x86/HVM: XSA-192 follow-ups Jan Beulich
2016-11-22 13:55 ` [PATCH 1/3] x86/HVM: limit writes to incoming TSS during task switch Jan Beulich
2016-11-22 16:32   ` Andrew Cooper
2016-11-23  8:27     ` Jan Beulich
2016-11-23 10:59       ` Andrew Cooper
2016-11-22 13:55 ` [PATCH 2/3] x86/HVM: limit writes to outgoing " Jan Beulich
2016-11-22 16:46   ` Andrew Cooper
2016-11-23  8:30     ` Jan Beulich
2016-11-22 13:56 ` [PATCH 3/3] x86/HVM: correct error code writing " Jan Beulich
2016-11-22 16:58   ` Andrew Cooper
2016-11-23  8:33     ` Jan Beulich
2016-11-23 14:14 ` [PATCH 0/3] x86/HVM: XSA-192 follow-ups Wei Liu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.