All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86: fold code in load_segments()
@ 2016-09-14 15:24 Jan Beulich
  2016-09-14 17:12 ` Andrew Cooper
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2016-09-14 15:24 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper

[-- Attachment #1: Type: text/plain, Size: 2520 bytes --]

No need to have the same logic twice.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1745,22 +1745,22 @@ static void load_segments(struct vcpu *n
             (unsigned long *)pv->kernel_sp;
         unsigned long cs_and_mask, rflags;
 
+        /* Fold upcall mask and architectural IOPL into RFLAGS.IF. */
+        rflags  = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
+        rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
+        if ( VM_ASSIST(n->domain, architectural_iopl) )
+            rflags |= n->arch.pv_vcpu.iopl;
+
         if ( is_pv_32bit_vcpu(n) )
         {
             unsigned int *esp = ring_1(regs) ?
                                 (unsigned int *)regs->rsp :
                                 (unsigned int *)pv->kernel_sp;
-            unsigned int cs_and_mask, eflags;
             int ret = 0;
 
             /* CS longword also contains full evtchn_upcall_mask. */
             cs_and_mask = (unsigned short)regs->cs |
                 ((unsigned int)vcpu_info(n, evtchn_upcall_mask) << 16);
-            /* Fold upcall mask into RFLAGS.IF. */
-            eflags  = regs->_eflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
-            eflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
-            if ( VM_ASSIST(n->domain, architectural_iopl) )
-                eflags |= n->arch.pv_vcpu.iopl;
 
             if ( !ring_1(regs) )
             {
@@ -1770,7 +1770,7 @@ static void load_segments(struct vcpu *n
             }
 
             if ( ret |
-                 put_user(eflags,              esp-1) |
+                 put_user(rflags,              esp-1) |
                  put_user(cs_and_mask,         esp-2) |
                  put_user(regs->_eip,          esp-3) |
                  put_user(uregs->gs,           esp-4) |
@@ -1805,12 +1805,6 @@ static void load_segments(struct vcpu *n
         cs_and_mask = (unsigned long)regs->cs |
             ((unsigned long)vcpu_info(n, evtchn_upcall_mask) << 32);
 
-        /* Fold upcall mask into RFLAGS.IF. */
-        rflags  = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
-        rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
-        if ( VM_ASSIST(n->domain, architectural_iopl) )
-            rflags |= n->arch.pv_vcpu.iopl;
-
         if ( put_user(regs->ss,            rsp- 1) |
              put_user(regs->rsp,           rsp- 2) |
              put_user(rflags,              rsp- 3) |




[-- Attachment #2: x86-failsafe-simplify.patch --]
[-- Type: text/plain, Size: 2551 bytes --]

x86: fold code in load_segments()

No need to have the same logic twice.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1745,22 +1745,22 @@ static void load_segments(struct vcpu *n
             (unsigned long *)pv->kernel_sp;
         unsigned long cs_and_mask, rflags;
 
+        /* Fold upcall mask and architectural IOPL into RFLAGS.IF. */
+        rflags  = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
+        rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
+        if ( VM_ASSIST(n->domain, architectural_iopl) )
+            rflags |= n->arch.pv_vcpu.iopl;
+
         if ( is_pv_32bit_vcpu(n) )
         {
             unsigned int *esp = ring_1(regs) ?
                                 (unsigned int *)regs->rsp :
                                 (unsigned int *)pv->kernel_sp;
-            unsigned int cs_and_mask, eflags;
             int ret = 0;
 
             /* CS longword also contains full evtchn_upcall_mask. */
             cs_and_mask = (unsigned short)regs->cs |
                 ((unsigned int)vcpu_info(n, evtchn_upcall_mask) << 16);
-            /* Fold upcall mask into RFLAGS.IF. */
-            eflags  = regs->_eflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
-            eflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
-            if ( VM_ASSIST(n->domain, architectural_iopl) )
-                eflags |= n->arch.pv_vcpu.iopl;
 
             if ( !ring_1(regs) )
             {
@@ -1770,7 +1770,7 @@ static void load_segments(struct vcpu *n
             }
 
             if ( ret |
-                 put_user(eflags,              esp-1) |
+                 put_user(rflags,              esp-1) |
                  put_user(cs_and_mask,         esp-2) |
                  put_user(regs->_eip,          esp-3) |
                  put_user(uregs->gs,           esp-4) |
@@ -1805,12 +1805,6 @@ static void load_segments(struct vcpu *n
         cs_and_mask = (unsigned long)regs->cs |
             ((unsigned long)vcpu_info(n, evtchn_upcall_mask) << 32);
 
-        /* Fold upcall mask into RFLAGS.IF. */
-        rflags  = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
-        rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
-        if ( VM_ASSIST(n->domain, architectural_iopl) )
-            rflags |= n->arch.pv_vcpu.iopl;
-
         if ( put_user(regs->ss,            rsp- 1) |
              put_user(regs->rsp,           rsp- 2) |
              put_user(rflags,              rsp- 3) |

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] x86: fold code in load_segments()
  2016-09-14 15:24 [PATCH] x86: fold code in load_segments() Jan Beulich
@ 2016-09-14 17:12 ` Andrew Cooper
  2016-09-15  6:28   ` Jan Beulich
  0 siblings, 1 reply; 4+ messages in thread
From: Andrew Cooper @ 2016-09-14 17:12 UTC (permalink / raw)
  To: Jan Beulich, xen-devel


[-- Attachment #1.1: Type: text/plain, Size: 2993 bytes --]

On 14/09/16 16:24, Jan Beulich wrote:
> No need to have the same logic twice.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -1745,22 +1745,22 @@ static void load_segments(struct vcpu *n
>              (unsigned long *)pv->kernel_sp;
>          unsigned long cs_and_mask, rflags;
>  
> +        /* Fold upcall mask and architectural IOPL into RFLAGS.IF. */
> +        rflags  = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
> +        rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
> +        if ( VM_ASSIST(n->domain, architectural_iopl) )
> +            rflags |= n->arch.pv_vcpu.iopl;
> +
>          if ( is_pv_32bit_vcpu(n) )
>          {
>              unsigned int *esp = ring_1(regs) ?
>                                  (unsigned int *)regs->rsp :
>                                  (unsigned int *)pv->kernel_sp;
> -            unsigned int cs_and_mask, eflags;

The unshadowed cs_and_mask is unsigned long, not int, which means the
put_user() below will clobber a 32bit PV guests stack frame.

Other than that, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
for the intended change.

~Andrew

>              int ret = 0;
>  
>              /* CS longword also contains full evtchn_upcall_mask. */
>              cs_and_mask = (unsigned short)regs->cs |
>                  ((unsigned int)vcpu_info(n, evtchn_upcall_mask) << 16);
> -            /* Fold upcall mask into RFLAGS.IF. */
> -            eflags  = regs->_eflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
> -            eflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
> -            if ( VM_ASSIST(n->domain, architectural_iopl) )
> -                eflags |= n->arch.pv_vcpu.iopl;
>  
>              if ( !ring_1(regs) )
>              {
> @@ -1770,7 +1770,7 @@ static void load_segments(struct vcpu *n
>              }
>  
>              if ( ret |
> -                 put_user(eflags,              esp-1) |
> +                 put_user(rflags,              esp-1) |
>                   put_user(cs_and_mask,         esp-2) |
>                   put_user(regs->_eip,          esp-3) |
>                   put_user(uregs->gs,           esp-4) |
> @@ -1805,12 +1805,6 @@ static void load_segments(struct vcpu *n
>          cs_and_mask = (unsigned long)regs->cs |
>              ((unsigned long)vcpu_info(n, evtchn_upcall_mask) << 32);
>  
> -        /* Fold upcall mask into RFLAGS.IF. */
> -        rflags  = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
> -        rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
> -        if ( VM_ASSIST(n->domain, architectural_iopl) )
> -            rflags |= n->arch.pv_vcpu.iopl;
> -
>          if ( put_user(regs->ss,            rsp- 1) |
>               put_user(regs->rsp,           rsp- 2) |
>               put_user(rflags,              rsp- 3) |
>
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel


[-- Attachment #1.2: Type: text/html, Size: 3947 bytes --]

[-- Attachment #2: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] x86: fold code in load_segments()
  2016-09-14 17:12 ` Andrew Cooper
@ 2016-09-15  6:28   ` Jan Beulich
  2016-09-15  7:25     ` Andrew Cooper
  0 siblings, 1 reply; 4+ messages in thread
From: Jan Beulich @ 2016-09-15  6:28 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel

>>> On 14.09.16 at 19:12, <andrew.cooper3@citrix.com> wrote:
> On 14/09/16 16:24, Jan Beulich wrote:
>> --- a/xen/arch/x86/domain.c
>> +++ b/xen/arch/x86/domain.c
>> @@ -1745,22 +1745,22 @@ static void load_segments(struct vcpu *n
>>              (unsigned long *)pv->kernel_sp;
>>          unsigned long cs_and_mask, rflags;
>>  
>> +        /* Fold upcall mask and architectural IOPL into RFLAGS.IF. */
>> +        rflags  = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
>> +        rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
>> +        if ( VM_ASSIST(n->domain, architectural_iopl) )
>> +            rflags |= n->arch.pv_vcpu.iopl;
>> +
>>          if ( is_pv_32bit_vcpu(n) )
>>          {
>>              unsigned int *esp = ring_1(regs) ?
>>                                  (unsigned int *)regs->rsp :
>>                                  (unsigned int *)pv->kernel_sp;
>> -            unsigned int cs_and_mask, eflags;
> 
> The unshadowed cs_and_mask is unsigned long, not int, which means the
> put_user() below will clobber a 32bit PV guests stack frame.

No, put_user() determines the access size from its second (pointer)
argument.

> Other than that, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> for the intended change.

Well, with the above (it not being clear what change you would have
expected, should one be needed in the first place) I'll have to wait
for clarification.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] x86: fold code in load_segments()
  2016-09-15  6:28   ` Jan Beulich
@ 2016-09-15  7:25     ` Andrew Cooper
  0 siblings, 0 replies; 4+ messages in thread
From: Andrew Cooper @ 2016-09-15  7:25 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel

On 15/09/2016 07:28, Jan Beulich wrote:
>>>> On 14.09.16 at 19:12, <andrew.cooper3@citrix.com> wrote:
>> On 14/09/16 16:24, Jan Beulich wrote:
>>> --- a/xen/arch/x86/domain.c
>>> +++ b/xen/arch/x86/domain.c
>>> @@ -1745,22 +1745,22 @@ static void load_segments(struct vcpu *n
>>>              (unsigned long *)pv->kernel_sp;
>>>          unsigned long cs_and_mask, rflags;
>>>  
>>> +        /* Fold upcall mask and architectural IOPL into RFLAGS.IF. */
>>> +        rflags  = regs->rflags & ~(X86_EFLAGS_IF|X86_EFLAGS_IOPL);
>>> +        rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9;
>>> +        if ( VM_ASSIST(n->domain, architectural_iopl) )
>>> +            rflags |= n->arch.pv_vcpu.iopl;
>>> +
>>>          if ( is_pv_32bit_vcpu(n) )
>>>          {
>>>              unsigned int *esp = ring_1(regs) ?
>>>                                  (unsigned int *)regs->rsp :
>>>                                  (unsigned int *)pv->kernel_sp;
>>> -            unsigned int cs_and_mask, eflags;
>> The unshadowed cs_and_mask is unsigned long, not int, which means the
>> put_user() below will clobber a 32bit PV guests stack frame.
> No, put_user() determines the access size from its second (pointer)
> argument.

Oh - so it does.  Mind putting at least note to that effect in the
commit message?  My first thought upon seeing it was wondering whether
you had a stale patch which didn't compile.

With that, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2016-09-15  7:25 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-09-14 15:24 [PATCH] x86: fold code in load_segments() Jan Beulich
2016-09-14 17:12 ` Andrew Cooper
2016-09-15  6:28   ` Jan Beulich
2016-09-15  7:25     ` Andrew Cooper

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.