All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations
@ 2017-07-10 10:39 Jan Beulich
  2017-08-10  7:19 ` Ping: " Jan Beulich
  2017-09-05 12:26 ` Andrew Cooper
  0 siblings, 2 replies; 9+ messages in thread
From: Jan Beulich @ 2017-07-10 10:39 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper

[-- Attachment #1: Type: text/plain, Size: 2519 bytes --]

Real hardware wraps silently in most cases, so we should behave the
same. Also split real and VM86 mode handling, as the latter really
ought to have limit checks applied.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Restore 32-bit wrap check for AMD.
v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
    is_canonical_address() invocation.

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2416,16 +2416,21 @@ bool_t hvm_virtual_to_linear_addr(
      */
     ASSERT(seg < x86_seg_none);
 
-    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
-         (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
+    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
     {
         /*
-         * REAL/VM86 MODE: Don't bother with segment access checks.
+         * REAL MODE: Don't bother with segment access checks.
          * Certain of them are not done in native real mode anyway.
          */
         addr = (uint32_t)(addr + reg->base);
-        last_byte = (uint32_t)addr + bytes - !!bytes;
-        if ( last_byte < addr )
+    }
+    else if ( (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) &&
+              is_x86_user_segment(seg) )
+    {
+        /* VM86 MODE: Fixed 64k limits on all user segments. */
+        addr = (uint32_t)(addr + reg->base);
+        last_byte = (uint32_t)offset + bytes - !!bytes;
+        if ( max(offset, last_byte) >> 16 )
             goto out;
     }
     else if ( hvm_long_mode_active(curr) &&
@@ -2447,8 +2452,7 @@ bool_t hvm_virtual_to_linear_addr(
             addr += reg->base;
 
         last_byte = addr + bytes - !!bytes;
-        if ( !is_canonical_address(addr) || last_byte < addr ||
-             !is_canonical_address(last_byte) )
+        if ( !is_canonical_address((long)addr < 0 ? addr : last_byte) )
             goto out;
     }
     else
@@ -2498,8 +2502,11 @@ bool_t hvm_virtual_to_linear_addr(
             if ( (offset <= reg->limit) || (last_byte < offset) )
                 goto out;
         }
-        else if ( (last_byte > reg->limit) || (last_byte < offset) )
-            goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */
+        else if ( last_byte > reg->limit )
+            goto out; /* last byte is beyond limit */
+        else if ( last_byte < offset &&
+                  curr->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
+            goto out; /* access wraps */
     }
 
     /* All checks ok. */




[-- Attachment #2: x86-HVM-v2l-wrap.patch --]
[-- Type: text/plain, Size: 2577 bytes --]

x86/HVM: don't #GP/#SS on wrapping virt->linear translations

Real hardware wraps silently in most cases, so we should behave the
same. Also split real and VM86 mode handling, as the latter really
ought to have limit checks applied.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Restore 32-bit wrap check for AMD.
v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
    is_canonical_address() invocation.

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2416,16 +2416,21 @@ bool_t hvm_virtual_to_linear_addr(
      */
     ASSERT(seg < x86_seg_none);
 
-    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
-         (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
+    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
     {
         /*
-         * REAL/VM86 MODE: Don't bother with segment access checks.
+         * REAL MODE: Don't bother with segment access checks.
          * Certain of them are not done in native real mode anyway.
          */
         addr = (uint32_t)(addr + reg->base);
-        last_byte = (uint32_t)addr + bytes - !!bytes;
-        if ( last_byte < addr )
+    }
+    else if ( (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) &&
+              is_x86_user_segment(seg) )
+    {
+        /* VM86 MODE: Fixed 64k limits on all user segments. */
+        addr = (uint32_t)(addr + reg->base);
+        last_byte = (uint32_t)offset + bytes - !!bytes;
+        if ( max(offset, last_byte) >> 16 )
             goto out;
     }
     else if ( hvm_long_mode_active(curr) &&
@@ -2447,8 +2452,7 @@ bool_t hvm_virtual_to_linear_addr(
             addr += reg->base;
 
         last_byte = addr + bytes - !!bytes;
-        if ( !is_canonical_address(addr) || last_byte < addr ||
-             !is_canonical_address(last_byte) )
+        if ( !is_canonical_address((long)addr < 0 ? addr : last_byte) )
             goto out;
     }
     else
@@ -2498,8 +2502,11 @@ bool_t hvm_virtual_to_linear_addr(
             if ( (offset <= reg->limit) || (last_byte < offset) )
                 goto out;
         }
-        else if ( (last_byte > reg->limit) || (last_byte < offset) )
-            goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */
+        else if ( last_byte > reg->limit )
+            goto out; /* last byte is beyond limit */
+        else if ( last_byte < offset &&
+                  curr->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
+            goto out; /* access wraps */
     }
 
     /* All checks ok. */

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Ping: [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations
  2017-07-10 10:39 [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations Jan Beulich
@ 2017-08-10  7:19 ` Jan Beulich
  2017-08-25 14:59   ` Ping#2: " Jan Beulich
  2017-12-04 10:16   ` Ping#3: " Jan Beulich
  2017-09-05 12:26 ` Andrew Cooper
  1 sibling, 2 replies; 9+ messages in thread
From: Jan Beulich @ 2017-08-10  7:19 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel

>>> On 10.07.17 at 12:39, <JBeulich@suse.com> wrote:
> Real hardware wraps silently in most cases, so we should behave the
> same. Also split real and VM86 mode handling, as the latter really
> ought to have limit checks applied.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v3: Restore 32-bit wrap check for AMD.
> v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
>     is_canonical_address() invocation.
> 
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2416,16 +2416,21 @@ bool_t hvm_virtual_to_linear_addr(
>       */
>      ASSERT(seg < x86_seg_none);
>  
> -    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
> -         (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
> +    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
>      {
>          /*
> -         * REAL/VM86 MODE: Don't bother with segment access checks.
> +         * REAL MODE: Don't bother with segment access checks.
>           * Certain of them are not done in native real mode anyway.
>           */
>          addr = (uint32_t)(addr + reg->base);
> -        last_byte = (uint32_t)addr + bytes - !!bytes;
> -        if ( last_byte < addr )
> +    }
> +    else if ( (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) &&
> +              is_x86_user_segment(seg) )
> +    {
> +        /* VM86 MODE: Fixed 64k limits on all user segments. */
> +        addr = (uint32_t)(addr + reg->base);
> +        last_byte = (uint32_t)offset + bytes - !!bytes;
> +        if ( max(offset, last_byte) >> 16 )
>              goto out;
>      }
>      else if ( hvm_long_mode_active(curr) &&
> @@ -2447,8 +2452,7 @@ bool_t hvm_virtual_to_linear_addr(
>              addr += reg->base;
>  
>          last_byte = addr + bytes - !!bytes;
> -        if ( !is_canonical_address(addr) || last_byte < addr ||
> -             !is_canonical_address(last_byte) )
> +        if ( !is_canonical_address((long)addr < 0 ? addr : last_byte) )
>              goto out;
>      }
>      else
> @@ -2498,8 +2502,11 @@ bool_t hvm_virtual_to_linear_addr(
>              if ( (offset <= reg->limit) || (last_byte < offset) )
>                  goto out;
>          }
> -        else if ( (last_byte > reg->limit) || (last_byte < offset) )
> -            goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */
> +        else if ( last_byte > reg->limit )
> +            goto out; /* last byte is beyond limit */
> +        else if ( last_byte < offset &&
> +                  curr->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
> +            goto out; /* access wraps */
>      }
>  
>      /* All checks ok. */




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Ping#2: [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations
  2017-08-10  7:19 ` Ping: " Jan Beulich
@ 2017-08-25 14:59   ` Jan Beulich
  2017-12-04 10:16   ` Ping#3: " Jan Beulich
  1 sibling, 0 replies; 9+ messages in thread
From: Jan Beulich @ 2017-08-25 14:59 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel

>>> On 10.08.17 at 09:19, <JBeulich@suse.com> wrote:
>>>> On 10.07.17 at 12:39, <JBeulich@suse.com> wrote:
>> Real hardware wraps silently in most cases, so we should behave the
>> same. Also split real and VM86 mode handling, as the latter really
>> ought to have limit checks applied.
>> 
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> v3: Restore 32-bit wrap check for AMD.
>> v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
>>     is_canonical_address() invocation.
>> 
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -2416,16 +2416,21 @@ bool_t hvm_virtual_to_linear_addr(
>>       */
>>      ASSERT(seg < x86_seg_none);
>>  
>> -    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
>> -         (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
>> +    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
>>      {
>>          /*
>> -         * REAL/VM86 MODE: Don't bother with segment access checks.
>> +         * REAL MODE: Don't bother with segment access checks.
>>           * Certain of them are not done in native real mode anyway.
>>           */
>>          addr = (uint32_t)(addr + reg->base);
>> -        last_byte = (uint32_t)addr + bytes - !!bytes;
>> -        if ( last_byte < addr )
>> +    }
>> +    else if ( (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) &&
>> +              is_x86_user_segment(seg) )
>> +    {
>> +        /* VM86 MODE: Fixed 64k limits on all user segments. */
>> +        addr = (uint32_t)(addr + reg->base);
>> +        last_byte = (uint32_t)offset + bytes - !!bytes;
>> +        if ( max(offset, last_byte) >> 16 )
>>              goto out;
>>      }
>>      else if ( hvm_long_mode_active(curr) &&
>> @@ -2447,8 +2452,7 @@ bool_t hvm_virtual_to_linear_addr(
>>              addr += reg->base;
>>  
>>          last_byte = addr + bytes - !!bytes;
>> -        if ( !is_canonical_address(addr) || last_byte < addr ||
>> -             !is_canonical_address(last_byte) )
>> +        if ( !is_canonical_address((long)addr < 0 ? addr : last_byte) )
>>              goto out;
>>      }
>>      else
>> @@ -2498,8 +2502,11 @@ bool_t hvm_virtual_to_linear_addr(
>>              if ( (offset <= reg->limit) || (last_byte < offset) )
>>                  goto out;
>>          }
>> -        else if ( (last_byte > reg->limit) || (last_byte < offset) )
>> -            goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */
>> +        else if ( last_byte > reg->limit )
>> +            goto out; /* last byte is beyond limit */
>> +        else if ( last_byte < offset &&
>> +                  curr->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
>> +            goto out; /* access wraps */
>>      }
>>  
>>      /* All checks ok. */



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations
  2017-07-10 10:39 [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations Jan Beulich
  2017-08-10  7:19 ` Ping: " Jan Beulich
@ 2017-09-05 12:26 ` Andrew Cooper
  2017-09-05 13:30   ` Jan Beulich
  1 sibling, 1 reply; 9+ messages in thread
From: Andrew Cooper @ 2017-09-05 12:26 UTC (permalink / raw)
  To: Jan Beulich, xen-devel

On 10/07/17 11:39, Jan Beulich wrote:
> Real hardware wraps silently in most cases, so we should behave the
> same. Also split real and VM86 mode handling, as the latter really
> ought to have limit checks applied.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

The change looks ok, but this is a subtle adjustment with a lot of
changes in boundary cases.

ISTR you had an XTF test for some of these?  I'd feel rather more
confident if we could get that into automation.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations
  2017-09-05 12:26 ` Andrew Cooper
@ 2017-09-05 13:30   ` Jan Beulich
  0 siblings, 0 replies; 9+ messages in thread
From: Jan Beulich @ 2017-09-05 13:30 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel

>>> On 05.09.17 at 14:26, <andrew.cooper3@citrix.com> wrote:
> On 10/07/17 11:39, Jan Beulich wrote:
>> Real hardware wraps silently in most cases, so we should behave the
>> same. Also split real and VM86 mode handling, as the latter really
>> ought to have limit checks applied.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> The change looks ok, but this is a subtle adjustment with a lot of
> changes in boundary cases.
> 
> ISTR you had an XTF test for some of these?  I'd feel rather more
> confident if we could get that into automation.

Yes, that was a test you had handed to me, which I then extended
and handed back to you ("Compatibility mode LLDT/LTR testing"). I
still have it, but still in the raw shape it was in back then (i.e. unlikely
to be ready to go in).

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Ping#3: [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations
  2017-08-10  7:19 ` Ping: " Jan Beulich
  2017-08-25 14:59   ` Ping#2: " Jan Beulich
@ 2017-12-04 10:16   ` Jan Beulich
  2017-12-04 16:39     ` Andrew Cooper
  1 sibling, 1 reply; 9+ messages in thread
From: Jan Beulich @ 2017-12-04 10:16 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel

>>> On 25.08.17 at 16:59,  wrote:
>>>> On 10.08.17 at 09:19, <JBeulich@suse.com> wrote:
> >>>> On 10.07.17 at 12:39, <JBeulich@suse.com> wrote:
> >> Real hardware wraps silently in most cases, so we should behave the
> >> same. Also split real and VM86 mode handling, as the latter really
> >> ought to have limit checks applied.
> >> 
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> ---
> >> v3: Restore 32-bit wrap check for AMD.
> >> v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
> >>     is_canonical_address() invocation.

Same here - I think I've been carrying this for long enough.

Jan

> >> --- a/xen/arch/x86/hvm/hvm.c
> >> +++ b/xen/arch/x86/hvm/hvm.c
> >> @@ -2416,16 +2416,21 @@ bool_t hvm_virtual_to_linear_addr(
> >>       */
> >>      ASSERT(seg < x86_seg_none);
> >>  
> >> -    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) ||
> >> -         (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
> >> +    if ( !(curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
> >>      {
> >>          /*
> >> -         * REAL/VM86 MODE: Don't bother with segment access checks.
> >> +         * REAL MODE: Don't bother with segment access checks.
> >>           * Certain of them are not done in native real mode anyway.
> >>           */
> >>          addr = (uint32_t)(addr + reg->base);
> >> -        last_byte = (uint32_t)addr + bytes - !!bytes;
> >> -        if ( last_byte < addr )
> >> +    }
> >> +    else if ( (guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) &&
> >> +              is_x86_user_segment(seg) )
> >> +    {
> >> +        /* VM86 MODE: Fixed 64k limits on all user segments. */
> >> +        addr = (uint32_t)(addr + reg->base);
> >> +        last_byte = (uint32_t)offset + bytes - !!bytes;
> >> +        if ( max(offset, last_byte) >> 16 )
> >>              goto out;
> >>      }
> >>      else if ( hvm_long_mode_active(curr) &&
> >> @@ -2447,8 +2452,7 @@ bool_t hvm_virtual_to_linear_addr(
> >>              addr += reg->base;
> >>  
> >>          last_byte = addr + bytes - !!bytes;
> >> -        if ( !is_canonical_address(addr) || last_byte < addr ||
> >> -             !is_canonical_address(last_byte) )
> >> +        if ( !is_canonical_address((long)addr < 0 ? addr : last_byte) )
> >>              goto out;
> >>      }
> >>      else
> >> @@ -2498,8 +2502,11 @@ bool_t hvm_virtual_to_linear_addr(
> >>              if ( (offset <= reg->limit) || (last_byte < offset) )
> >>                  goto out;
> >>          }
> >> -        else if ( (last_byte > reg->limit) || (last_byte < offset) )
> >> -            goto out; /* last byte is beyond limit or wraps 0xFFFFFFFF */
> >> +        else if ( last_byte > reg->limit )
> >> +            goto out; /* last byte is beyond limit */
> >> +        else if ( last_byte < offset &&
> >> +                  curr->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
> >> +            goto out; /* access wraps */
> >>      }
> >>  
> >>      /* All checks ok. */
> 
> 




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: Ping#3: [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations
  2017-12-04 10:16   ` Ping#3: " Jan Beulich
@ 2017-12-04 16:39     ` Andrew Cooper
  2017-12-06  7:44       ` Jan Beulich
  2018-02-28 14:51       ` Ping: " Jan Beulich
  0 siblings, 2 replies; 9+ messages in thread
From: Andrew Cooper @ 2017-12-04 16:39 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel

On 04/12/17 10:16, Jan Beulich wrote:
>>>> On 25.08.17 at 16:59,  wrote:
>>>>> On 10.08.17 at 09:19, <JBeulich@suse.com> wrote:
>>>>>> On 10.07.17 at 12:39, <JBeulich@suse.com> wrote:
>>>> Real hardware wraps silently in most cases, so we should behave the
>>>> same. Also split real and VM86 mode handling, as the latter really
>>>> ought to have limit checks applied.
>>>>
>>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>>> ---
>>>> v3: Restore 32-bit wrap check for AMD.
>>>> v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
>>>>     is_canonical_address() invocation.
> Same here - I think I've been carrying this for long enough.

I'm not sure what to say.  I'm not comfortable taking this change
without a regression test in place, which also serves to demonstrate the
correctness of the change.

Its simply a matter of time, not any other objection to the change.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: Ping#3: [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations
  2017-12-04 16:39     ` Andrew Cooper
@ 2017-12-06  7:44       ` Jan Beulich
  2018-02-28 14:51       ` Ping: " Jan Beulich
  1 sibling, 0 replies; 9+ messages in thread
From: Jan Beulich @ 2017-12-06  7:44 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel

>>> On 04.12.17 at 17:39, <andrew.cooper3@citrix.com> wrote:
> On 04/12/17 10:16, Jan Beulich wrote:
>>>>> On 25.08.17 at 16:59,  wrote:
>>>>>> On 10.08.17 at 09:19, <JBeulich@suse.com> wrote:
>>>>>>> On 10.07.17 at 12:39, <JBeulich@suse.com> wrote:
>>>>> Real hardware wraps silently in most cases, so we should behave the
>>>>> same. Also split real and VM86 mode handling, as the latter really
>>>>> ought to have limit checks applied.
>>>>>
>>>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>>>>> ---
>>>>> v3: Restore 32-bit wrap check for AMD.
>>>>> v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
>>>>>     is_canonical_address() invocation.
>> Same here - I think I've been carrying this for long enough.
> 
> I'm not sure what to say.  I'm not comfortable taking this change
> without a regression test in place, which also serves to demonstrate the
> correctness of the change.
> 
> Its simply a matter of time, not any other objection to the change.

Well, I had sent you a tentative XTF test long ago (non-publicly
at the time, I believe). Here it is again. I'll send a second change
in a minute, which iirc is necessary as prereq to the one here.

Jan

add split memory access tests

Add tests to verify that accesses crossing the upper address boundary
are being handled similarly with and without the emulator involved.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Use FS overrides. Add 64-bit and PV tests. Remove stray '-s from
    log messages. Add "X" (ex_record_fault_eax) constraints.

--- /dev/null
+++ b/tests/split-access/Makefile
@@ -0,0 +1,9 @@
+include $(ROOT)/build/common.mk
+
+NAME      := split-access
+CATEGORY  := functional
+TEST-ENVS := $(ALL_ENVIRONMENTS)
+
+obj-perenv += main.o
+
+include $(ROOT)/build/gen.mk
--- /dev/null
+++ b/tests/split-access/main.c
@@ -0,0 +1,251 @@
+/**
+ * @file tests/split-access/main.c
+ * @ref test-split-access
+ *
+ * @page test-split-access split-access
+ *
+ * @todo Docs for test-split-access
+ *
+ * @see tests/split-access/main.c
+ */
+#include <xtf.h>
+
+#include <arch/decode.h>
+#include <arch/pagetable.h>
+
+const char test_title[] = "Split memory access insns";
+
+const void *volatile boundary = NULL;
+
+/* Keep the compiler from leveraging undefined behavior. */
+#define touch(x) ({ asm ( "" : "+g" (x) ); })
+
+void do_mov(bool force)
+{
+    const unsigned long *ptr = boundary;
+
+    touch(ptr);
+    for ( --ptr; ; )
+    {
+        unsigned long val;
+        exinfo_t fault = 0;
+
+        asm volatile ( "test %[fep], %[fep];"
+                       "jz 1f;"
+                       _ASM_XEN_FEP
+                       "1: mov %%fs:%[src],%[dst]; 2:"
+                       _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_eax)
+                       : [dst] "=r" (val), "+a" (fault)
+                       : [src] "m" (*ptr), [fep] "q" (force),
+                         "X" (ex_record_fault_eax) );
+        if ( fault )
+            xtf_warning("Got %pe for %p\n", _p(fault), ptr);
+        else if ( val != *ptr )
+            xtf_failure("%lx != %lx for %p\n", val, *ptr, ptr);
+
+        touch(ptr);
+        if ( ptr == boundary )
+            break;
+
+        ptr = (void *)(long)ptr + 1;
+    }
+}
+
+void do_lfs(bool force)
+{
+    const struct __packed { unsigned long off; uint16_t sel; } *ptr = boundary;
+
+    touch(ptr);
+    for ( --ptr; ; )
+    {
+        unsigned long off;
+        exinfo_t fault = 0;
+
+        asm volatile ( "test %[fep], %[fep];"
+                       "jz 1f;"
+                       _ASM_XEN_FEP
+                       "1: lfs %%fs:%[src],%[dst]; 2:"
+                       _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_eax)
+                       : [dst] "=r" (off), "+a" (fault)
+                       : [src] "m" (*ptr), [fep] "q" (force),
+                         "X" (ex_record_fault_eax) );
+        if ( fault )
+            xtf_warning("Got %pe for %p\n", _p(fault), ptr);
+        else if ( off != ptr->off )
+            xtf_failure("%lx != %lx for %p\n", off, ptr->off, ptr);
+
+        touch(ptr);
+        if ( ptr == boundary )
+            break;
+
+        ptr = (void *)(long)ptr + 1;
+    }
+}
+
+#ifdef CONFIG_HVM
+void do_lidt(bool force)
+{
+    const desc_ptr *ptr = boundary;
+
+    touch(ptr);
+    for ( --ptr; ; )
+    {
+        exinfo_t fault = 0;
+
+        asm volatile ( "test %[fep], %[fep];"
+                       "jz 1f;"
+                       _ASM_XEN_FEP
+                       "1: lidt %%fs:%[src]; 2:"
+                       _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_eax)
+                       : "+a" (fault)
+                       : [src] "m" (*ptr), [fep] "q" (force),
+                         "X" (ex_record_fault_eax) );
+        if ( fault )
+            xtf_warning("Got %pe for %p\n", _p(fault), ptr);
+        else
+            asm volatile ( "lidt %0" :: "m" (idt_ptr) );
+
+        touch(ptr);
+        if ( ptr == boundary )
+            break;
+
+        ptr = (void *)(long)ptr + 1;
+    }
+}
+#endif
+
+#ifndef __x86_64__
+void do_bound(bool force)
+{
+    const struct { unsigned long lo, hi; } *ptr = boundary;
+
+    touch(ptr);
+    for ( --ptr; ; )
+    {
+        exinfo_t fault = 0;
+
+        asm volatile ( "test %[fep], %[fep];"
+                       "jz 1f;"
+                       _ASM_XEN_FEP
+                       "1: bound %[off], %%fs:%[bnd]; 2:"
+                       _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_eax)
+                       : "+a" (fault)
+                       : [bnd] "m" (*ptr), [off] "r" (0), [fep] "q" (force),
+                         "X" (ex_record_fault_eax) );
+        if ( fault )
+            xtf_warning("Got %pe for %p\n", _p(fault), ptr);
+
+        touch(ptr);
+        if ( ptr == boundary )
+            break;
+
+        ptr = (void *)(long)ptr + 1;
+    }
+}
+#endif
+
+void run_tests(bool force)
+{
+    printk("Testing%s MOV\n", force ? " emulated" : "");
+    do_mov(force);
+
+    printk("Testing%s LFS\n", force ? " emulated" : "");
+    do_lfs(force);
+
+#ifdef CONFIG_HVM
+    printk("Testing%s LIDT\n", force ? " emulated" : "");
+    do_lidt(force);
+#endif
+
+#ifndef __x86_64__
+    printk("Testing%s BOUND\n", force ? " emulated" : "");
+    do_bound(force);
+#endif
+}
+
+void test_main(void)
+{
+#if defined(__x86_64__)
+    if ( !boundary )
+    {
+        asm volatile ( "push $0; pop %%fs" ::: "memory" );
+
+# if CONFIG_PAGING_LEVELS == 4
+        boundary = (void *)(1L << 47);
+# elif CONFIG_PAGING_LEVELS == 5
+        boundary = (void *)(1L << 56);
+# else
+#  error Unknown 64-bit paging mode!
+# endif
+        printk("Testing at lower canonical boundary\n");
+        test_main();
+
+        boundary = NULL;
+        printk("Testing at upper address boundary\n");
+    }
+#elif defined(CONFIG_PV)
+    /* Shrink %fs limit to below the compat limit. */
+    static struct seg_desc32 __page_aligned_data desc[] = {
+        [1] = {
+            .limit0 = 0x4fff, .limit1 = 0xf, .g = 1,
+            .p = 1, .s = 1, .type = 3, .dpl = 1,
+        },
+    };
+    unsigned long frame = virt_to_mfn(desc);
+    int rc;
+
+    rc = hypercall_update_va_mapping((unsigned long)desc,
+                                     pte_from_gfn(frame,
+                                                  _PAGE_PRESENT|_PAGE_AD),
+                                     0);
+    if ( !rc )
+        rc = HYPERCALL2(int, __HYPERVISOR_set_gdt, &frame, ARRAY_SIZE(desc));
+    if ( rc )
+    {
+        xtf_error("Cannot set GDT entry: %d\n", rc);
+        return;
+    }
+
+    asm volatile ( "mov %1, %%fs; lsl %1, %0"
+                   : "=r" (boundary)
+                   : "r" (sizeof(*desc) | 1)
+                   : "memory" );
+#else
+    /*
+     * To better tell actual hardware behavior, zap the mapping for the last
+     * (large) page below 4Gb. That'll make us see page faults on hardware
+     * when all segmentation checks pass, rather than observing #GP/#SS due to
+     * the emulator being invoked anyway due to accesses touching an unmapped
+     * MMIO range. This matches x86-64 behavior at the 2^^64 boundary.
+     */
+# if CONFIG_PAGING_LEVELS == 2
+    pse_l2_identmap[pse_l2_table_offset(~0UL)] = 0;
+# elif CONFIG_PAGING_LEVELS == 3
+    pae_l2_identmap[pae_l2_table_offset(~0UL)] = 0;
+# elif CONFIG_PAGING_LEVELS
+#  error Unknown 32-bit paging mode!
+# endif
+
+    invlpg((void *)~0UL);
+    asm volatile ( "push %%ds; pop %%fs" ::: "memory" );
+#endif
+
+    run_tests(false);
+
+    if ( !xtf_has_fep )
+        xtf_skip("FEP support not detected - some tests will be skipped\n");
+    else
+        run_tests(true);
+
+    xtf_success(NULL);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Ping: Ping#3: [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations
  2017-12-04 16:39     ` Andrew Cooper
  2017-12-06  7:44       ` Jan Beulich
@ 2018-02-28 14:51       ` Jan Beulich
  1 sibling, 0 replies; 9+ messages in thread
From: Jan Beulich @ 2018-02-28 14:51 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel

>>> On 06.12.17 at 08:44,  wrote:
>>>> On 04.12.17 at 17:39, <andrew.cooper3@citrix.com> wrote:
> > On 04/12/17 10:16, Jan Beulich wrote:
> >>>>> On 25.08.17 at 16:59,  wrote:
> >>>>>> On 10.08.17 at 09:19, <JBeulich@suse.com> wrote:
> >>>>>>> On 10.07.17 at 12:39, <JBeulich@suse.com> wrote:
> >>>>> Real hardware wraps silently in most cases, so we should behave the
> >>>>> same. Also split real and VM86 mode handling, as the latter really
> >>>>> ought to have limit checks applied.
> >>>>>
> >>>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >>>>> ---
> >>>>> v3: Restore 32-bit wrap check for AMD.
> >>>>> v2: Extend to non-64-bit modes. Reduce 64-bit check to a single
> >>>>>     is_canonical_address() invocation.
> >> Same here - I think I've been carrying this for long enough.
> > 
> > I'm not sure what to say.  I'm not comfortable taking this change
> > without a regression test in place, which also serves to demonstrate the
> > correctness of the change.
> > 
> > Its simply a matter of time, not any other objection to the change.
> 
> Well, I had sent you a tentative XTF test long ago (non-publicly
> at the time, I believe). Here it is again. I'll send a second change
> in a minute, which iirc is necessary as prereq to the one here.
> 
> Jan

No matter that hopefully no-one will exercise us currently getting
things wrong, I'd still like to re-raise the fact that the original bug
fix in this thread has been pending for a really long time, and this
XTF test has now also been sent almost 3 months ago.

Jan

> add split memory access tests
> 
> Add tests to verify that accesses crossing the upper address boundary
> are being handled similarly with and without the emulator involved.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Use FS overrides. Add 64-bit and PV tests. Remove stray '-s from
>     log messages. Add "X" (ex_record_fault_eax) constraints.
> 
> --- /dev/null
> +++ b/tests/split-access/Makefile
> @@ -0,0 +1,9 @@
> +include $(ROOT)/build/common.mk
> +
> +NAME      := split-access
> +CATEGORY  := functional
> +TEST-ENVS := $(ALL_ENVIRONMENTS)
> +
> +obj-perenv += main.o
> +
> +include $(ROOT)/build/gen.mk
> --- /dev/null
> +++ b/tests/split-access/main.c
> @@ -0,0 +1,251 @@
> +/**
> + * @file tests/split-access/main.c
> + * @ref test-split-access
> + *
> + * @page test-split-access split-access
> + *
> + * @todo Docs for test-split-access
> + *
> + * @see tests/split-access/main.c
> + */
> +#include <xtf.h>
> +
> +#include <arch/decode.h>
> +#include <arch/pagetable.h>
> +
> +const char test_title[] = "Split memory access insns";
> +
> +const void *volatile boundary = NULL;
> +
> +/* Keep the compiler from leveraging undefined behavior. */
> +#define touch(x) ({ asm ( "" : "+g" (x) ); })
> +
> +void do_mov(bool force)
> +{
> +    const unsigned long *ptr = boundary;
> +
> +    touch(ptr);
> +    for ( --ptr; ; )
> +    {
> +        unsigned long val;
> +        exinfo_t fault = 0;
> +
> +        asm volatile ( "test %[fep], %[fep];"
> +                       "jz 1f;"
> +                       _ASM_XEN_FEP
> +                       "1: mov %%fs:%[src],%[dst]; 2:"
> +                       _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_eax)
> +                       : [dst] "=r" (val), "+a" (fault)
> +                       : [src] "m" (*ptr), [fep] "q" (force),
> +                         "X" (ex_record_fault_eax) );
> +        if ( fault )
> +            xtf_warning("Got %pe for %p\n", _p(fault), ptr);
> +        else if ( val != *ptr )
> +            xtf_failure("%lx != %lx for %p\n", val, *ptr, ptr);
> +
> +        touch(ptr);
> +        if ( ptr == boundary )
> +            break;
> +
> +        ptr = (void *)(long)ptr + 1;
> +    }
> +}
> +
> +void do_lfs(bool force)
> +{
> +    const struct __packed { unsigned long off; uint16_t sel; } *ptr = boundary;
> +
> +    touch(ptr);
> +    for ( --ptr; ; )
> +    {
> +        unsigned long off;
> +        exinfo_t fault = 0;
> +
> +        asm volatile ( "test %[fep], %[fep];"
> +                       "jz 1f;"
> +                       _ASM_XEN_FEP
> +                       "1: lfs %%fs:%[src],%[dst]; 2:"
> +                       _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_eax)
> +                       : [dst] "=r" (off), "+a" (fault)
> +                       : [src] "m" (*ptr), [fep] "q" (force),
> +                         "X" (ex_record_fault_eax) );
> +        if ( fault )
> +            xtf_warning("Got %pe for %p\n", _p(fault), ptr);
> +        else if ( off != ptr->off )
> +            xtf_failure("%lx != %lx for %p\n", off, ptr->off, ptr);
> +
> +        touch(ptr);
> +        if ( ptr == boundary )
> +            break;
> +
> +        ptr = (void *)(long)ptr + 1;
> +    }
> +}
> +
> +#ifdef CONFIG_HVM
> +void do_lidt(bool force)
> +{
> +    const desc_ptr *ptr = boundary;
> +
> +    touch(ptr);
> +    for ( --ptr; ; )
> +    {
> +        exinfo_t fault = 0;
> +
> +        asm volatile ( "test %[fep], %[fep];"
> +                       "jz 1f;"
> +                       _ASM_XEN_FEP
> +                       "1: lidt %%fs:%[src]; 2:"
> +                       _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_eax)
> +                       : "+a" (fault)
> +                       : [src] "m" (*ptr), [fep] "q" (force),
> +                         "X" (ex_record_fault_eax) );
> +        if ( fault )
> +            xtf_warning("Got %pe for %p\n", _p(fault), ptr);
> +        else
> +            asm volatile ( "lidt %0" :: "m" (idt_ptr) );
> +
> +        touch(ptr);
> +        if ( ptr == boundary )
> +            break;
> +
> +        ptr = (void *)(long)ptr + 1;
> +    }
> +}
> +#endif
> +
> +#ifndef __x86_64__
> +void do_bound(bool force)
> +{
> +    const struct { unsigned long lo, hi; } *ptr = boundary;
> +
> +    touch(ptr);
> +    for ( --ptr; ; )
> +    {
> +        exinfo_t fault = 0;
> +
> +        asm volatile ( "test %[fep], %[fep];"
> +                       "jz 1f;"
> +                       _ASM_XEN_FEP
> +                       "1: bound %[off], %%fs:%[bnd]; 2:"
> +                       _ASM_EXTABLE_HANDLER(1b, 2b, ex_record_fault_eax)
> +                       : "+a" (fault)
> +                       : [bnd] "m" (*ptr), [off] "r" (0), [fep] "q" (force),
> +                         "X" (ex_record_fault_eax) );
> +        if ( fault )
> +            xtf_warning("Got %pe for %p\n", _p(fault), ptr);
> +
> +        touch(ptr);
> +        if ( ptr == boundary )
> +            break;
> +
> +        ptr = (void *)(long)ptr + 1;
> +    }
> +}
> +#endif
> +
> +void run_tests(bool force)
> +{
> +    printk("Testing%s MOV\n", force ? " emulated" : "");
> +    do_mov(force);
> +
> +    printk("Testing%s LFS\n", force ? " emulated" : "");
> +    do_lfs(force);
> +
> +#ifdef CONFIG_HVM
> +    printk("Testing%s LIDT\n", force ? " emulated" : "");
> +    do_lidt(force);
> +#endif
> +
> +#ifndef __x86_64__
> +    printk("Testing%s BOUND\n", force ? " emulated" : "");
> +    do_bound(force);
> +#endif
> +}
> +
> +void test_main(void)
> +{
> +#if defined(__x86_64__)
> +    if ( !boundary )
> +    {
> +        asm volatile ( "push $0; pop %%fs" ::: "memory" );
> +
> +# if CONFIG_PAGING_LEVELS == 4
> +        boundary = (void *)(1L << 47);
> +# elif CONFIG_PAGING_LEVELS == 5
> +        boundary = (void *)(1L << 56);
> +# else
> +#  error Unknown 64-bit paging mode!
> +# endif
> +        printk("Testing at lower canonical boundary\n");
> +        test_main();
> +
> +        boundary = NULL;
> +        printk("Testing at upper address boundary\n");
> +    }
> +#elif defined(CONFIG_PV)
> +    /* Shrink %fs limit to below the compat limit. */
> +    static struct seg_desc32 __page_aligned_data desc[] = {
> +        [1] = {
> +            .limit0 = 0x4fff, .limit1 = 0xf, .g = 1,
> +            .p = 1, .s = 1, .type = 3, .dpl = 1,
> +        },
> +    };
> +    unsigned long frame = virt_to_mfn(desc);
> +    int rc;
> +
> +    rc = hypercall_update_va_mapping((unsigned long)desc,
> +                                     pte_from_gfn(frame,
> +                                                  _PAGE_PRESENT|_PAGE_AD),
> +                                     0);
> +    if ( !rc )
> +        rc = HYPERCALL2(int, __HYPERVISOR_set_gdt, &frame, ARRAY_SIZE(desc));
> +    if ( rc )
> +    {
> +        xtf_error("Cannot set GDT entry: %d\n", rc);
> +        return;
> +    }
> +
> +    asm volatile ( "mov %1, %%fs; lsl %1, %0"
> +                   : "=r" (boundary)
> +                   : "r" (sizeof(*desc) | 1)
> +                   : "memory" );
> +#else
> +    /*
> +     * To better tell actual hardware behavior, zap the mapping for the last
> +     * (large) page below 4Gb. That'll make us see page faults on hardware
> +     * when all segmentation checks pass, rather than observing #GP/#SS due to
> +     * the emulator being invoked anyway due to accesses touching an unmapped
> +     * MMIO range. This matches x86-64 behavior at the 2^^64 boundary.
> +     */
> +# if CONFIG_PAGING_LEVELS == 2
> +    pse_l2_identmap[pse_l2_table_offset(~0UL)] = 0;
> +# elif CONFIG_PAGING_LEVELS == 3
> +    pae_l2_identmap[pae_l2_table_offset(~0UL)] = 0;
> +# elif CONFIG_PAGING_LEVELS
> +#  error Unknown 32-bit paging mode!
> +# endif
> +
> +    invlpg((void *)~0UL);
> +    asm volatile ( "push %%ds; pop %%fs" ::: "memory" );
> +#endif
> +
> +    run_tests(false);
> +
> +    if ( !xtf_has_fep )
> +        xtf_skip("FEP support not detected - some tests will be skipped\n");
> +    else
> +        run_tests(true);
> +
> +    xtf_success(NULL);
> +}
> +
> +/*
> + * Local variables:
> + * mode: C
> + * c-file-style: "BSD"
> + * c-basic-offset: 4
> + * tab-width: 4
> + * indent-tabs-mode: nil
> + * End:
> + */
> 
> 
> 



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2018-02-28 14:51 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-07-10 10:39 [PATCH v3] x86/HVM: don't #GP/#SS on wrapping virt->linear translations Jan Beulich
2017-08-10  7:19 ` Ping: " Jan Beulich
2017-08-25 14:59   ` Ping#2: " Jan Beulich
2017-12-04 10:16   ` Ping#3: " Jan Beulich
2017-12-04 16:39     ` Andrew Cooper
2017-12-06  7:44       ` Jan Beulich
2018-02-28 14:51       ` Ping: " Jan Beulich
2017-09-05 12:26 ` Andrew Cooper
2017-09-05 13:30   ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.