All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86: simplify a few macros / inline functions
@ 2015-05-08  6:42 Jan Beulich
  2015-05-08  8:25 ` Andrew Cooper
  0 siblings, 1 reply; 3+ messages in thread
From: Jan Beulich @ 2015-05-08  6:42 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 6390 bytes --]

Drop pointless casts and write_atomic()'s bogus and unused "return
value".

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/include/asm-x86/atomic.h
+++ b/xen/include/asm-x86/atomic.h
@@ -17,12 +17,11 @@ static inline void name(volatile type *a
 build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", )
 build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", )
 build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", )
+build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
 
 build_write_atomic(write_u8_atomic, "b", uint8_t, "q", )
 build_write_atomic(write_u16_atomic, "w", uint16_t, "r", )
 build_write_atomic(write_u32_atomic, "l", uint32_t, "r", )
-
-build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
 build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
 
 #undef build_read_atomic
@@ -30,29 +29,28 @@ build_write_atomic(write_u64_atomic, "q"
 
 void __bad_atomic_size(void);
 
-#define read_atomic(p) ({                                               \
-    unsigned long x_;                                                   \
-    switch ( sizeof(*(p)) ) {                                           \
-    case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;                 \
-    case 2: x_ = read_u16_atomic((uint16_t *)(p)); break;               \
-    case 4: x_ = read_u32_atomic((uint32_t *)(p)); break;               \
-    case 8: x_ = read_u64_atomic((uint64_t *)(p)); break;               \
-    default: x_ = 0; __bad_atomic_size(); break;                        \
-    }                                                                   \
-    (typeof(*(p)))x_;                                                   \
+#define read_atomic(p) ({                                 \
+    unsigned long x_;                                     \
+    switch ( sizeof(*(p)) ) {                             \
+    case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;   \
+    case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \
+    case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \
+    case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \
+    default: x_ = 0; __bad_atomic_size(); break;          \
+    }                                                     \
+    (typeof(*(p)))x_;                                     \
 })
 
-#define write_atomic(p, x) ({                                           \
-    typeof(*(p)) __x = (x);                                             \
-    unsigned long x_ = (unsigned long)__x;                              \
-    switch ( sizeof(*(p)) ) {                                           \
-    case 1: write_u8_atomic((uint8_t *)(p), (uint8_t)x_); break;        \
-    case 2: write_u16_atomic((uint16_t *)(p), (uint16_t)x_); break;     \
-    case 4: write_u32_atomic((uint32_t *)(p), (uint32_t)x_); break;     \
-    case 8: write_u64_atomic((uint64_t *)(p), (uint64_t)x_); break;     \
-    default: __bad_atomic_size(); break;                                \
-    }                                                                   \
-    __x;                                                                \
+#define write_atomic(p, x) ({                             \
+    typeof(*(p)) __x = (x);                               \
+    unsigned long x_ = (unsigned long)__x;                \
+    switch ( sizeof(*(p)) ) {                             \
+    case 1: write_u8_atomic((uint8_t *)(p), x_); break;   \
+    case 2: write_u16_atomic((uint16_t *)(p), x_); break; \
+    case 4: write_u32_atomic((uint32_t *)(p), x_); break; \
+    case 8: write_u64_atomic((uint64_t *)(p), x_); break; \
+    default: __bad_atomic_size(); break;                  \
+    }                                                     \
 })
 
 /*
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -41,25 +41,25 @@ static always_inline unsigned long __xch
     case 1:
         asm volatile ( "xchgb %b0,%1"
                        : "=q" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     case 2:
         asm volatile ( "xchgw %w0,%1"
                        : "=r" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     case 4:
         asm volatile ( "xchgl %k0,%1"
                        : "=r" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     case 8:
         asm volatile ( "xchgq %0,%1"
                        : "=r" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     }
@@ -81,28 +81,28 @@ static always_inline unsigned long __cmp
     case 1:
         asm volatile ( "lock; cmpxchgb %b1,%2"
                        : "=a" (prev)
-                       : "q" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "q" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;
     case 2:
         asm volatile ( "lock; cmpxchgw %w1,%2"
                        : "=a" (prev)
-                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "r" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;
     case 4:
         asm volatile ( "lock; cmpxchgl %k1,%2"
                        : "=a" (prev)
-                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "r" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;
     case 8:
         asm volatile ( "lock; cmpxchgq %1,%2"
                        : "=a" (prev)
-                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "r" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;



[-- Attachment #2: x86-simplify-macros.patch --]
[-- Type: text/plain, Size: 6435 bytes --]

x86: simplify a few macros / inline functions

Drop pointless casts and write_atomic()'s bogus and unused "return
value".

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/include/asm-x86/atomic.h
+++ b/xen/include/asm-x86/atomic.h
@@ -17,12 +17,11 @@ static inline void name(volatile type *a
 build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", )
 build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", )
 build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", )
+build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
 
 build_write_atomic(write_u8_atomic, "b", uint8_t, "q", )
 build_write_atomic(write_u16_atomic, "w", uint16_t, "r", )
 build_write_atomic(write_u32_atomic, "l", uint32_t, "r", )
-
-build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
 build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
 
 #undef build_read_atomic
@@ -30,29 +29,28 @@ build_write_atomic(write_u64_atomic, "q"
 
 void __bad_atomic_size(void);
 
-#define read_atomic(p) ({                                               \
-    unsigned long x_;                                                   \
-    switch ( sizeof(*(p)) ) {                                           \
-    case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;                 \
-    case 2: x_ = read_u16_atomic((uint16_t *)(p)); break;               \
-    case 4: x_ = read_u32_atomic((uint32_t *)(p)); break;               \
-    case 8: x_ = read_u64_atomic((uint64_t *)(p)); break;               \
-    default: x_ = 0; __bad_atomic_size(); break;                        \
-    }                                                                   \
-    (typeof(*(p)))x_;                                                   \
+#define read_atomic(p) ({                                 \
+    unsigned long x_;                                     \
+    switch ( sizeof(*(p)) ) {                             \
+    case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;   \
+    case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \
+    case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \
+    case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \
+    default: x_ = 0; __bad_atomic_size(); break;          \
+    }                                                     \
+    (typeof(*(p)))x_;                                     \
 })
 
-#define write_atomic(p, x) ({                                           \
-    typeof(*(p)) __x = (x);                                             \
-    unsigned long x_ = (unsigned long)__x;                              \
-    switch ( sizeof(*(p)) ) {                                           \
-    case 1: write_u8_atomic((uint8_t *)(p), (uint8_t)x_); break;        \
-    case 2: write_u16_atomic((uint16_t *)(p), (uint16_t)x_); break;     \
-    case 4: write_u32_atomic((uint32_t *)(p), (uint32_t)x_); break;     \
-    case 8: write_u64_atomic((uint64_t *)(p), (uint64_t)x_); break;     \
-    default: __bad_atomic_size(); break;                                \
-    }                                                                   \
-    __x;                                                                \
+#define write_atomic(p, x) ({                             \
+    typeof(*(p)) __x = (x);                               \
+    unsigned long x_ = (unsigned long)__x;                \
+    switch ( sizeof(*(p)) ) {                             \
+    case 1: write_u8_atomic((uint8_t *)(p), x_); break;   \
+    case 2: write_u16_atomic((uint16_t *)(p), x_); break; \
+    case 4: write_u32_atomic((uint32_t *)(p), x_); break; \
+    case 8: write_u64_atomic((uint64_t *)(p), x_); break; \
+    default: __bad_atomic_size(); break;                  \
+    }                                                     \
 })
 
 /*
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -41,25 +41,25 @@ static always_inline unsigned long __xch
     case 1:
         asm volatile ( "xchgb %b0,%1"
                        : "=q" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     case 2:
         asm volatile ( "xchgw %w0,%1"
                        : "=r" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     case 4:
         asm volatile ( "xchgl %k0,%1"
                        : "=r" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     case 8:
         asm volatile ( "xchgq %0,%1"
                        : "=r" (x)
-                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
+                       : "m" (*__xg(ptr)), "0" (x)
                        : "memory" );
         break;
     }
@@ -81,28 +81,28 @@ static always_inline unsigned long __cmp
     case 1:
         asm volatile ( "lock; cmpxchgb %b1,%2"
                        : "=a" (prev)
-                       : "q" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "q" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;
     case 2:
         asm volatile ( "lock; cmpxchgw %w1,%2"
                        : "=a" (prev)
-                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "r" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;
     case 4:
         asm volatile ( "lock; cmpxchgl %k1,%2"
                        : "=a" (prev)
-                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "r" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;
     case 8:
         asm volatile ( "lock; cmpxchgq %1,%2"
                        : "=a" (prev)
-                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
+                       : "r" (new), "m" (*__xg(ptr)),
                        "0" (old)
                        : "memory" );
         return prev;

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] x86: simplify a few macros / inline functions
  2015-05-08  6:42 [PATCH] x86: simplify a few macros / inline functions Jan Beulich
@ 2015-05-08  8:25 ` Andrew Cooper
  2015-05-08  8:29   ` Jan Beulich
  0 siblings, 1 reply; 3+ messages in thread
From: Andrew Cooper @ 2015-05-08  8:25 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Keir Fraser

On 08/05/15 07:42, Jan Beulich wrote:
> Drop pointless casts and write_atomic()'s bogus and unused "return
> value".
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>, given
confirmation of one query...

>
> --- a/xen/include/asm-x86/atomic.h
> +++ b/xen/include/asm-x86/atomic.h
> @@ -17,12 +17,11 @@ static inline void name(volatile type *a
>  build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", )
>  build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", )
>  build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", )
> +build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
>  
>  build_write_atomic(write_u8_atomic, "b", uint8_t, "q", )
>  build_write_atomic(write_u16_atomic, "w", uint16_t, "r", )
>  build_write_atomic(write_u32_atomic, "l", uint32_t, "r", )
> -
> -build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
>  build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
>  
>  #undef build_read_atomic
> @@ -30,29 +29,28 @@ build_write_atomic(write_u64_atomic, "q"
>  
>  void __bad_atomic_size(void);
>  
> -#define read_atomic(p) ({                                               \
> -    unsigned long x_;                                                   \
> -    switch ( sizeof(*(p)) ) {                                           \
> -    case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;                 \
> -    case 2: x_ = read_u16_atomic((uint16_t *)(p)); break;               \
> -    case 4: x_ = read_u32_atomic((uint32_t *)(p)); break;               \
> -    case 8: x_ = read_u64_atomic((uint64_t *)(p)); break;               \
> -    default: x_ = 0; __bad_atomic_size(); break;                        \
> -    }                                                                   \
> -    (typeof(*(p)))x_;                                                   \
> +#define read_atomic(p) ({                                 \
> +    unsigned long x_;                                     \
> +    switch ( sizeof(*(p)) ) {                             \
> +    case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;   \
> +    case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \
> +    case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \
> +    case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \
> +    default: x_ = 0; __bad_atomic_size(); break;          \
> +    }                                                     \
> +    (typeof(*(p)))x_;                                     \
>  })

I cant spot a change in read_atomic(), other than the alignment of the
\'s.  I just want to double check that I haven't missed something.

~Andrew

>  
> -#define write_atomic(p, x) ({                                           \
> -    typeof(*(p)) __x = (x);                                             \
> -    unsigned long x_ = (unsigned long)__x;                              \
> -    switch ( sizeof(*(p)) ) {                                           \
> -    case 1: write_u8_atomic((uint8_t *)(p), (uint8_t)x_); break;        \
> -    case 2: write_u16_atomic((uint16_t *)(p), (uint16_t)x_); break;     \
> -    case 4: write_u32_atomic((uint32_t *)(p), (uint32_t)x_); break;     \
> -    case 8: write_u64_atomic((uint64_t *)(p), (uint64_t)x_); break;     \
> -    default: __bad_atomic_size(); break;                                \
> -    }                                                                   \
> -    __x;                                                                \
> +#define write_atomic(p, x) ({                             \
> +    typeof(*(p)) __x = (x);                               \
> +    unsigned long x_ = (unsigned long)__x;                \
> +    switch ( sizeof(*(p)) ) {                             \
> +    case 1: write_u8_atomic((uint8_t *)(p), x_); break;   \
> +    case 2: write_u16_atomic((uint16_t *)(p), x_); break; \
> +    case 4: write_u32_atomic((uint32_t *)(p), x_); break; \
> +    case 8: write_u64_atomic((uint64_t *)(p), x_); break; \
> +    default: __bad_atomic_size(); break;                  \
> +    }                                                     \
>  })
>  
>  /*
> --- a/xen/include/asm-x86/system.h
> +++ b/xen/include/asm-x86/system.h
> @@ -41,25 +41,25 @@ static always_inline unsigned long __xch
>      case 1:
>          asm volatile ( "xchgb %b0,%1"
>                         : "=q" (x)
> -                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
> +                       : "m" (*__xg(ptr)), "0" (x)
>                         : "memory" );
>          break;
>      case 2:
>          asm volatile ( "xchgw %w0,%1"
>                         : "=r" (x)
> -                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
> +                       : "m" (*__xg(ptr)), "0" (x)
>                         : "memory" );
>          break;
>      case 4:
>          asm volatile ( "xchgl %k0,%1"
>                         : "=r" (x)
> -                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
> +                       : "m" (*__xg(ptr)), "0" (x)
>                         : "memory" );
>          break;
>      case 8:
>          asm volatile ( "xchgq %0,%1"
>                         : "=r" (x)
> -                       : "m" (*__xg((volatile void *)ptr)), "0" (x)
> +                       : "m" (*__xg(ptr)), "0" (x)
>                         : "memory" );
>          break;
>      }
> @@ -81,28 +81,28 @@ static always_inline unsigned long __cmp
>      case 1:
>          asm volatile ( "lock; cmpxchgb %b1,%2"
>                         : "=a" (prev)
> -                       : "q" (new), "m" (*__xg((volatile void *)ptr)),
> +                       : "q" (new), "m" (*__xg(ptr)),
>                         "0" (old)
>                         : "memory" );
>          return prev;
>      case 2:
>          asm volatile ( "lock; cmpxchgw %w1,%2"
>                         : "=a" (prev)
> -                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
> +                       : "r" (new), "m" (*__xg(ptr)),
>                         "0" (old)
>                         : "memory" );
>          return prev;
>      case 4:
>          asm volatile ( "lock; cmpxchgl %k1,%2"
>                         : "=a" (prev)
> -                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
> +                       : "r" (new), "m" (*__xg(ptr)),
>                         "0" (old)
>                         : "memory" );
>          return prev;
>      case 8:
>          asm volatile ( "lock; cmpxchgq %1,%2"
>                         : "=a" (prev)
> -                       : "r" (new), "m" (*__xg((volatile void *)ptr)),
> +                       : "r" (new), "m" (*__xg(ptr)),
>                         "0" (old)
>                         : "memory" );
>          return prev;
>
>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] x86: simplify a few macros / inline functions
  2015-05-08  8:25 ` Andrew Cooper
@ 2015-05-08  8:29   ` Jan Beulich
  0 siblings, 0 replies; 3+ messages in thread
From: Jan Beulich @ 2015-05-08  8:29 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel, Keir Fraser

>>> On 08.05.15 at 10:25, <andrew.cooper3@citrix.com> wrote:
> On 08/05/15 07:42, Jan Beulich wrote:
>> @@ -30,29 +29,28 @@ build_write_atomic(write_u64_atomic, "q"
>>  
>>  void __bad_atomic_size(void);
>>  
>> -#define read_atomic(p) ({                                               \
>> -    unsigned long x_;                                                   \
>> -    switch ( sizeof(*(p)) ) {                                           \
>> -    case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;                 \
>> -    case 2: x_ = read_u16_atomic((uint16_t *)(p)); break;               \
>> -    case 4: x_ = read_u32_atomic((uint32_t *)(p)); break;               \
>> -    case 8: x_ = read_u64_atomic((uint64_t *)(p)); break;               \
>> -    default: x_ = 0; __bad_atomic_size(); break;                        \
>> -    }                                                                   \
>> -    (typeof(*(p)))x_;                                                   \
>> +#define read_atomic(p) ({                                 \
>> +    unsigned long x_;                                     \
>> +    switch ( sizeof(*(p)) ) {                             \
>> +    case 1: x_ = read_u8_atomic((uint8_t *)(p)); break;   \
>> +    case 2: x_ = read_u16_atomic((uint16_t *)(p)); break; \
>> +    case 4: x_ = read_u32_atomic((uint32_t *)(p)); break; \
>> +    case 8: x_ = read_u64_atomic((uint64_t *)(p)); break; \
>> +    default: x_ = 0; __bad_atomic_size(); break;          \
>> +    }                                                     \
>> +    (typeof(*(p)))x_;                                     \
>>  })
> 
> I cant spot a change in read_atomic(), other than the alignment of the
> \'s.  I just want to double check that I haven't missed something.

That's correct - I just wanted them to align with the write_atomic()
ones again without needlessly padding the now shorter lines there.

Jan

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2015-05-08  8:29 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-05-08  6:42 [PATCH] x86: simplify a few macros / inline functions Jan Beulich
2015-05-08  8:25 ` Andrew Cooper
2015-05-08  8:29   ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.