All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86emul: relax asm() constraints
@ 2016-02-15 12:06 Jan Beulich
  2016-02-15 13:39 ` Andrew Cooper
  0 siblings, 1 reply; 3+ messages in thread
From: Jan Beulich @ 2016-02-15 12:06 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 2883 bytes --]

Let's give the compiler as much liberty in picking instruction operands
as possible. Also drop unnecessary size modifiers when the correct size
can already be derived from the asm() operands. Finally also drop an
"unsigned" from idiv_dbl()'s second parameter, allowing a cast to be
eliminated.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -611,7 +611,7 @@ do {
  */
 static bool_t even_parity(uint8_t v)
 {
-    asm ( "test %b0,%b0; setp %b0" : "=a" (v) : "0" (v) );
+    asm ( "test %1,%1; setp %0" : "=qm" (v) : "q" (v) );
     return v;
 }
 
@@ -813,9 +813,9 @@ static int read_ulong(
  */
 static bool_t mul_dbl(unsigned long m[2])
 {
-    bool_t rc = 0;
-    asm ( "mul %1; seto %b2"
-          : "+a" (m[0]), "+d" (m[1]), "+q" (rc) );
+    bool_t rc;
+    asm ( "mul %1; seto %2"
+          : "+a" (m[0]), "+d" (m[1]), "=q" (rc) );
     return rc;
 }
 
@@ -826,9 +826,9 @@ static bool_t mul_dbl(unsigned long m[2]
  */
 static bool_t imul_dbl(unsigned long m[2])
 {
-    bool_t rc = 0;
+    bool_t rc;
     asm ( "imul %1; seto %b2"
-          : "+a" (m[0]), "+d" (m[1]), "+q" (rc) );
+          : "+a" (m[0]), "+d" (m[1]), "=q" (rc) );
     return rc;
 }
 
@@ -854,9 +854,9 @@ static bool_t div_dbl(unsigned long u[2]
  * NB. We don't use idiv directly as it's moderately hard to work out
  *     ahead of time whether it will #DE, which we cannot allow to happen.
  */
-static bool_t idiv_dbl(unsigned long u[2], unsigned long v)
+static bool_t idiv_dbl(unsigned long u[2], long v)
 {
-    bool_t negu = (long)u[1] < 0, negv = (long)v < 0;
+    bool_t negu = (long)u[1] < 0, negv = v < 0;
 
     /* u = abs(u) */
     if ( negu )
@@ -4542,9 +4542,10 @@ x86_emulate(
 
     case 0xbc: /* bsf or tzcnt */ {
         bool_t zf;
-        asm ( "bsf %2,%0; setz %b1"
+
+        asm ( "bsf %2,%0; setz %1"
               : "=r" (dst.val), "=q" (zf)
-              : "r" (src.val) );
+              : "rm" (src.val) );
         _regs.eflags &= ~EFLG_ZF;
         if ( (vex.pfx == vex_f3) && vcpu_has_bmi1() )
         {
@@ -4567,9 +4568,10 @@ x86_emulate(
 
     case 0xbd: /* bsr or lzcnt */ {
         bool_t zf;
-        asm ( "bsr %2,%0; setz %b1"
+
+        asm ( "bsr %2,%0; setz %1"
               : "=r" (dst.val), "=q" (zf)
-              : "r" (src.val) );
+              : "rm" (src.val) );
         _regs.eflags &= ~EFLG_ZF;
         if ( (vex.pfx == vex_f3) && vcpu_has_lzcnt() )
         {
@@ -4698,7 +4700,7 @@ x86_emulate(
             break;
         case 4:
 #ifdef __x86_64__
-            asm ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) );
+            asm ( "bswap %k0" : "=r" (dst.val) : "0" (*(uint32_t *)dst.reg) );
             break;
         case 8:
 #endif




[-- Attachment #2: x86emul-asm-constraint.patch --]
[-- Type: text/plain, Size: 2913 bytes --]

x86emul: relax asm() constraints

Let's give the compiler as much liberty in picking instruction operands
as possible. Also drop unnecessary size modifiers when the correct size
can already be derived from the asm() operands. Finally also drop an
"unsigned" from idiv_dbl()'s second parameter, allowing a cast to be
eliminated.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -611,7 +611,7 @@ do {
  */
 static bool_t even_parity(uint8_t v)
 {
-    asm ( "test %b0,%b0; setp %b0" : "=a" (v) : "0" (v) );
+    asm ( "test %1,%1; setp %0" : "=qm" (v) : "q" (v) );
     return v;
 }
 
@@ -813,9 +813,9 @@ static int read_ulong(
  */
 static bool_t mul_dbl(unsigned long m[2])
 {
-    bool_t rc = 0;
-    asm ( "mul %1; seto %b2"
-          : "+a" (m[0]), "+d" (m[1]), "+q" (rc) );
+    bool_t rc;
+    asm ( "mul %1; seto %2"
+          : "+a" (m[0]), "+d" (m[1]), "=q" (rc) );
     return rc;
 }
 
@@ -826,9 +826,9 @@ static bool_t mul_dbl(unsigned long m[2]
  */
 static bool_t imul_dbl(unsigned long m[2])
 {
-    bool_t rc = 0;
+    bool_t rc;
     asm ( "imul %1; seto %b2"
-          : "+a" (m[0]), "+d" (m[1]), "+q" (rc) );
+          : "+a" (m[0]), "+d" (m[1]), "=q" (rc) );
     return rc;
 }
 
@@ -854,9 +854,9 @@ static bool_t div_dbl(unsigned long u[2]
  * NB. We don't use idiv directly as it's moderately hard to work out
  *     ahead of time whether it will #DE, which we cannot allow to happen.
  */
-static bool_t idiv_dbl(unsigned long u[2], unsigned long v)
+static bool_t idiv_dbl(unsigned long u[2], long v)
 {
-    bool_t negu = (long)u[1] < 0, negv = (long)v < 0;
+    bool_t negu = (long)u[1] < 0, negv = v < 0;
 
     /* u = abs(u) */
     if ( negu )
@@ -4542,9 +4542,10 @@ x86_emulate(
 
     case 0xbc: /* bsf or tzcnt */ {
         bool_t zf;
-        asm ( "bsf %2,%0; setz %b1"
+
+        asm ( "bsf %2,%0; setz %1"
               : "=r" (dst.val), "=q" (zf)
-              : "r" (src.val) );
+              : "rm" (src.val) );
         _regs.eflags &= ~EFLG_ZF;
         if ( (vex.pfx == vex_f3) && vcpu_has_bmi1() )
         {
@@ -4567,9 +4568,10 @@ x86_emulate(
 
     case 0xbd: /* bsr or lzcnt */ {
         bool_t zf;
-        asm ( "bsr %2,%0; setz %b1"
+
+        asm ( "bsr %2,%0; setz %1"
               : "=r" (dst.val), "=q" (zf)
-              : "r" (src.val) );
+              : "rm" (src.val) );
         _regs.eflags &= ~EFLG_ZF;
         if ( (vex.pfx == vex_f3) && vcpu_has_lzcnt() )
         {
@@ -4698,7 +4700,7 @@ x86_emulate(
             break;
         case 4:
 #ifdef __x86_64__
-            asm ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) );
+            asm ( "bswap %k0" : "=r" (dst.val) : "0" (*(uint32_t *)dst.reg) );
             break;
         case 8:
 #endif

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] x86emul: relax asm() constraints
  2016-02-15 12:06 [PATCH] x86emul: relax asm() constraints Jan Beulich
@ 2016-02-15 13:39 ` Andrew Cooper
  2016-02-15 14:24   ` Jan Beulich
  0 siblings, 1 reply; 3+ messages in thread
From: Andrew Cooper @ 2016-02-15 13:39 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Keir Fraser

On 15/02/16 12:06, Jan Beulich wrote:
> Let's give the compiler as much liberty in picking instruction operands
> as possible. Also drop unnecessary size modifiers when the correct size
> can already be derived from the asm() operands. Finally also drop an
> "unsigned" from idiv_dbl()'s second parameter, allowing a cast to be
> eliminated.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -611,7 +611,7 @@ do {
>   */
>  static bool_t even_parity(uint8_t v)
>  {
> -    asm ( "test %b0,%b0; setp %b0" : "=a" (v) : "0" (v) );
> +    asm ( "test %1,%1; setp %0" : "=qm" (v) : "q" (v) );
>      return v;
>  }
>  
> @@ -813,9 +813,9 @@ static int read_ulong(
>   */
>  static bool_t mul_dbl(unsigned long m[2])
>  {
> -    bool_t rc = 0;
> -    asm ( "mul %1; seto %b2"
> -          : "+a" (m[0]), "+d" (m[1]), "+q" (rc) );
> +    bool_t rc;
> +    asm ( "mul %1; seto %2"
> +          : "+a" (m[0]), "+d" (m[1]), "=q" (rc) );
>      return rc;
>  }
>  
> @@ -826,9 +826,9 @@ static bool_t mul_dbl(unsigned long m[2]
>   */
>  static bool_t imul_dbl(unsigned long m[2])
>  {
> -    bool_t rc = 0;
> +    bool_t rc;
>      asm ( "imul %1; seto %b2"
> -          : "+a" (m[0]), "+d" (m[1]), "+q" (rc) );
> +          : "+a" (m[0]), "+d" (m[1]), "=q" (rc) );
>      return rc;
>  }
>  
> @@ -854,9 +854,9 @@ static bool_t div_dbl(unsigned long u[2]
>   * NB. We don't use idiv directly as it's moderately hard to work out
>   *     ahead of time whether it will #DE, which we cannot allow to happen.
>   */
> -static bool_t idiv_dbl(unsigned long u[2], unsigned long v)
> +static bool_t idiv_dbl(unsigned long u[2], long v)
>  {
> -    bool_t negu = (long)u[1] < 0, negv = (long)v < 0;
> +    bool_t negu = (long)u[1] < 0, negv = v < 0;
>  
>      /* u = abs(u) */
>      if ( negu )
> @@ -4542,9 +4542,10 @@ x86_emulate(
>  
>      case 0xbc: /* bsf or tzcnt */ {
>          bool_t zf;
> -        asm ( "bsf %2,%0; setz %b1"
> +
> +        asm ( "bsf %2,%0; setz %1"
>                : "=r" (dst.val), "=q" (zf)

This =q could become =qm, like the even_parity() change.

> -              : "r" (src.val) );
> +              : "rm" (src.val) );
>          _regs.eflags &= ~EFLG_ZF;
>          if ( (vex.pfx == vex_f3) && vcpu_has_bmi1() )
>          {
> @@ -4567,9 +4568,10 @@ x86_emulate(
>  
>      case 0xbd: /* bsr or lzcnt */ {
>          bool_t zf;
> -        asm ( "bsr %2,%0; setz %b1"
> +
> +        asm ( "bsr %2,%0; setz %1"
>                : "=r" (dst.val), "=q" (zf)
> -              : "r" (src.val) );
> +              : "rm" (src.val) );
>          _regs.eflags &= ~EFLG_ZF;
>          if ( (vex.pfx == vex_f3) && vcpu_has_lzcnt() )
>          {
> @@ -4698,7 +4700,7 @@ x86_emulate(
>              break;
>          case 4:
>  #ifdef __x86_64__
> -            asm ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) );
> +            asm ( "bswap %k0" : "=r" (dst.val) : "0" (*(uint32_t *)dst.reg) );

What is the purpose of both the explicit cast and k constraint?

~Andrew

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] x86emul: relax asm() constraints
  2016-02-15 13:39 ` Andrew Cooper
@ 2016-02-15 14:24   ` Jan Beulich
  0 siblings, 0 replies; 3+ messages in thread
From: Jan Beulich @ 2016-02-15 14:24 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel, Keir Fraser

>>> On 15.02.16 at 14:39, <andrew.cooper3@citrix.com> wrote:
> On 15/02/16 12:06, Jan Beulich wrote:
>> @@ -4542,9 +4542,10 @@ x86_emulate(
>>  
>>      case 0xbc: /* bsf or tzcnt */ {
>>          bool_t zf;
>> -        asm ( "bsf %2,%0; setz %b1"
>> +
>> +        asm ( "bsf %2,%0; setz %1"
>>                : "=r" (dst.val), "=q" (zf)
> 
> This =q could become =qm, like the even_parity() change.

Ah, indeed. And there are a couple more.

>> @@ -4698,7 +4700,7 @@ x86_emulate(
>>              break;
>>          case 4:
>>  #ifdef __x86_64__
>> -            asm ( "bswap %k0" : "=r" (dst.val) : "0" (*dst.reg) );
>> +            asm ( "bswap %k0" : "=r" (dst.val) : "0" (*(uint32_t *)dst.reg) );
> 
> What is the purpose of both the explicit cast and k constraint?

Operand size gets (or at least may get) derived from the output
operand. While we could also constrain that one to 32 bits, it
seems better to have the whole dst.val written just in case. Of
src.val, otoh, we definitely only need to load the low 32 bits
(possibly saving a REX prefix), and we also definitely need to
force the bswap to have 32-bit operand size.

Jan

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2016-02-15 14:24 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-15 12:06 [PATCH] x86emul: relax asm() constraints Jan Beulich
2016-02-15 13:39 ` Andrew Cooper
2016-02-15 14:24   ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.