All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 0/2] x86: bitops adjustments
@ 2015-03-20 14:47 Jan Beulich
  2015-03-20 14:53 ` [PATCH v3 1/2] x86: simplify non‑atomic bitops Jan Beulich
  2015-03-20 14:54 ` [PATCH v3 2/2] x86: make atomic bitops consistent with non‑atomic ones Jan Beulich
  0 siblings, 2 replies; 6+ messages in thread
From: Jan Beulich @ 2015-03-20 14:47 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser

1: simplify non-atomic bitops
2: make atomic bitops consistent with non-atomic ones

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Replace ADDR uses by *(int *)addr to not re-gain the volatile
    qualifier. New patch 2.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3 1/2] x86: simplify non‑atomic bitops
  2015-03-20 14:47 [PATCH v3 0/2] x86: bitops adjustments Jan Beulich
@ 2015-03-20 14:53 ` Jan Beulich
  2015-03-25 17:49   ` Andrew Cooper
  2015-03-20 14:54 ` [PATCH v3 2/2] x86: make atomic bitops consistent with non‑atomic ones Jan Beulich
  1 sibling, 1 reply; 6+ messages in thread
From: Jan Beulich @ 2015-03-20 14:53 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 4904 bytes --]

- being non-atomic, their pointer arguments shouldn't be volatile-
  qualified
- their (half fake) memory operands can be a single "+m" instead of
  being both an output and an input

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Replace ADDR uses by *(int *)addr to not re-gain the volatile
    qualifier.
v2: Drop "+m" related sentence from comment at the top of the file as
    being wrong (the referenced indication in gcc's documentation got
    removed quite some time ago too).

--- a/xen/include/asm-x86/bitops.h
+++ b/xen/include/asm-x86/bitops.h
@@ -14,8 +14,7 @@
  * operand is both read from and written to. Since the operand is in fact a
  * word array, we also specify "memory" in the clobbers list to indicate that
  * words other than the one directly addressed by the memory operand may be
- * modified. We don't use "+m" because the gcc manual says that it should be
- * used only when the constraint allows the operand to reside in a register.
+ * modified.
  */
 
 #define ADDR (*(volatile long *) addr)
@@ -55,12 +54,9 @@ static inline void set_bit(int nr, volat
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __set_bit(int nr, volatile void *addr)
+static inline void __set_bit(int nr, void *addr)
 {
-    asm volatile (
-        "btsl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "btsl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" );
 }
 #define __set_bit(nr, addr) ({                          \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -95,12 +91,9 @@ static inline void clear_bit(int nr, vol
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(int nr, void *addr)
 {
-    asm volatile (
-        "btrl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "btrl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" );
 }
 #define __clear_bit(nr, addr) ({                        \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -116,12 +109,9 @@ static inline void __clear_bit(int nr, v
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(int nr, void *addr)
 {
-    asm volatile (
-        "btcl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "btcl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" );
 }
 #define __change_bit(nr, addr) ({                       \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -181,14 +171,14 @@ static inline int test_and_set_bit(int n
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_set_bit(int nr, volatile void *addr)
+static inline int __test_and_set_bit(int nr, void *addr)
 {
     int oldbit;
 
     asm volatile (
         "btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+        : "=r" (oldbit), "+m" (*(int *)addr)
+        : "Ir" (nr) : "memory" );
     return oldbit;
 }
 #define __test_and_set_bit(nr, addr) ({                 \
@@ -228,14 +218,14 @@ static inline int test_and_clear_bit(int
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_clear_bit(int nr, volatile void *addr)
+static inline int __test_and_clear_bit(int nr, void *addr)
 {
     int oldbit;
 
     asm volatile (
         "btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+        : "=r" (oldbit), "+m" (*(int *)addr)
+        : "Ir" (nr) : "memory" );
     return oldbit;
 }
 #define __test_and_clear_bit(nr, addr) ({               \
@@ -244,14 +234,14 @@ static inline int __test_and_clear_bit(i
 })
 
 /* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(int nr, void *addr)
 {
     int oldbit;
 
     asm volatile (
         "btcl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+        : "=r" (oldbit), "+m" (*(int *)addr)
+        : "Ir" (nr) : "memory" );
     return oldbit;
 }
 #define __test_and_change_bit(nr, addr) ({              \



[-- Attachment #2: x86-non-atomic-bitops.patch --]
[-- Type: text/plain, Size: 4935 bytes --]

x86: simplify non-atomic bitops

- being non-atomic, their pointer arguments shouldn't be volatile-
  qualified
- their (half fake) memory operands can be a single "+m" instead of
  being both an output and an input

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Replace ADDR uses by *(int *)addr to not re-gain the volatile
    qualifier.
v2: Drop "+m" related sentence from comment at the top of the file as
    being wrong (the referenced indication in gcc's documentation got
    removed quite some time ago too).

--- a/xen/include/asm-x86/bitops.h
+++ b/xen/include/asm-x86/bitops.h
@@ -14,8 +14,7 @@
  * operand is both read from and written to. Since the operand is in fact a
  * word array, we also specify "memory" in the clobbers list to indicate that
  * words other than the one directly addressed by the memory operand may be
- * modified. We don't use "+m" because the gcc manual says that it should be
- * used only when the constraint allows the operand to reside in a register.
+ * modified.
  */
 
 #define ADDR (*(volatile long *) addr)
@@ -55,12 +54,9 @@ static inline void set_bit(int nr, volat
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __set_bit(int nr, volatile void *addr)
+static inline void __set_bit(int nr, void *addr)
 {
-    asm volatile (
-        "btsl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "btsl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" );
 }
 #define __set_bit(nr, addr) ({                          \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -95,12 +91,9 @@ static inline void clear_bit(int nr, vol
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(int nr, void *addr)
 {
-    asm volatile (
-        "btrl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "btrl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" );
 }
 #define __clear_bit(nr, addr) ({                        \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -116,12 +109,9 @@ static inline void __clear_bit(int nr, v
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(int nr, void *addr)
 {
-    asm volatile (
-        "btcl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "btcl %1,%0" : "+m" (*(int *)addr) : "Ir" (nr) : "memory" );
 }
 #define __change_bit(nr, addr) ({                       \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -181,14 +171,14 @@ static inline int test_and_set_bit(int n
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_set_bit(int nr, volatile void *addr)
+static inline int __test_and_set_bit(int nr, void *addr)
 {
     int oldbit;
 
     asm volatile (
         "btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+        : "=r" (oldbit), "+m" (*(int *)addr)
+        : "Ir" (nr) : "memory" );
     return oldbit;
 }
 #define __test_and_set_bit(nr, addr) ({                 \
@@ -228,14 +218,14 @@ static inline int test_and_clear_bit(int
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_clear_bit(int nr, volatile void *addr)
+static inline int __test_and_clear_bit(int nr, void *addr)
 {
     int oldbit;
 
     asm volatile (
         "btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+        : "=r" (oldbit), "+m" (*(int *)addr)
+        : "Ir" (nr) : "memory" );
     return oldbit;
 }
 #define __test_and_clear_bit(nr, addr) ({               \
@@ -244,14 +234,14 @@ static inline int __test_and_clear_bit(i
 })
 
 /* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(int nr, void *addr)
 {
     int oldbit;
 
     asm volatile (
         "btcl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+        : "=r" (oldbit), "+m" (*(int *)addr)
+        : "Ir" (nr) : "memory" );
     return oldbit;
 }
 #define __test_and_change_bit(nr, addr) ({              \

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v3 2/2] x86: make atomic bitops consistent with non‑atomic ones
  2015-03-20 14:47 [PATCH v3 0/2] x86: bitops adjustments Jan Beulich
  2015-03-20 14:53 ` [PATCH v3 1/2] x86: simplify non‑atomic bitops Jan Beulich
@ 2015-03-20 14:54 ` Jan Beulich
  1 sibling, 0 replies; 6+ messages in thread
From: Jan Beulich @ 2015-03-20 14:54 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 3373 bytes --]

- use int instead of long pointers (matching the 'l' suffix on insns)
- use "+m" instead  or a pair of "=m" and "m" in asm() constraints

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/include/asm-x86/bitops.h
+++ b/xen/include/asm-x86/bitops.h
@@ -17,8 +17,8 @@
  * modified.
  */
 
-#define ADDR (*(volatile long *) addr)
-#define CONST_ADDR (*(const volatile long *) addr)
+#define ADDR (*(volatile int *) addr)
+#define CONST_ADDR (*(const volatile int *) addr)
 
 extern void __bitop_bad_size(void);
 #define bitop_bad_size(addr) (sizeof(*(addr)) < 4)
@@ -35,10 +35,8 @@ extern void __bitop_bad_size(void);
  */
 static inline void set_bit(int nr, volatile void *addr)
 {
-    asm volatile (
-        "lock; btsl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btsl %1,%0"
+                   : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define set_bit(nr, addr) ({                            \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -72,10 +70,8 @@ static inline void __set_bit(int nr, voi
  */
 static inline void clear_bit(int nr, volatile void *addr)
 {
-    asm volatile (
-        "lock; btrl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btrl %1,%0"
+                   : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define clear_bit(nr, addr) ({                          \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -129,10 +125,8 @@ static inline void __change_bit(int nr, 
  */
 static inline void change_bit(int nr, volatile void *addr)
 {
-    asm volatile (
-        "lock; btcl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btcl %1,%0"
+                    : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define change_bit(nr, addr) ({                         \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -151,10 +145,8 @@ static inline int test_and_set_bit(int n
 {
     int oldbit;
 
-    asm volatile (
-        "lock; btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btsl %2,%1\n\tsbbl %0,%0"
+                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
     return oldbit;
 }
 #define test_and_set_bit(nr, addr) ({                   \
@@ -198,10 +190,8 @@ static inline int test_and_clear_bit(int
 {
     int oldbit;
 
-    asm volatile (
-        "lock; btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btrl %2,%1\n\tsbbl %0,%0"
+                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
     return oldbit;
 }
 #define test_and_clear_bit(nr, addr) ({                 \
@@ -261,10 +251,8 @@ static inline int test_and_change_bit(in
 {
     int oldbit;
 
-    asm volatile (
-        "lock; btcl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btcl %2,%1\n\tsbbl %0,%0"
+                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
     return oldbit;
 }
 #define test_and_change_bit(nr, addr) ({                \




[-- Attachment #2: x86-consistent-atomic-bitops.patch --]
[-- Type: text/plain, Size: 3426 bytes --]

x86: make atomic bitops consistent with non-atomic ones

- use int instead of long pointers (matching the 'l' suffix on insns)
- use "+m" instead  or a pair of "=m" and "m" in asm() constraints

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/include/asm-x86/bitops.h
+++ b/xen/include/asm-x86/bitops.h
@@ -17,8 +17,8 @@
  * modified.
  */
 
-#define ADDR (*(volatile long *) addr)
-#define CONST_ADDR (*(const volatile long *) addr)
+#define ADDR (*(volatile int *) addr)
+#define CONST_ADDR (*(const volatile int *) addr)
 
 extern void __bitop_bad_size(void);
 #define bitop_bad_size(addr) (sizeof(*(addr)) < 4)
@@ -35,10 +35,8 @@ extern void __bitop_bad_size(void);
  */
 static inline void set_bit(int nr, volatile void *addr)
 {
-    asm volatile (
-        "lock; btsl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btsl %1,%0"
+                   : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define set_bit(nr, addr) ({                            \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -72,10 +70,8 @@ static inline void __set_bit(int nr, voi
  */
 static inline void clear_bit(int nr, volatile void *addr)
 {
-    asm volatile (
-        "lock; btrl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btrl %1,%0"
+                   : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define clear_bit(nr, addr) ({                          \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -129,10 +125,8 @@ static inline void __change_bit(int nr, 
  */
 static inline void change_bit(int nr, volatile void *addr)
 {
-    asm volatile (
-        "lock; btcl %1,%0"
-        : "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btcl %1,%0"
+                    : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define change_bit(nr, addr) ({                         \
     if ( bitop_bad_size(addr) ) __bitop_bad_size();     \
@@ -151,10 +145,8 @@ static inline int test_and_set_bit(int n
 {
     int oldbit;
 
-    asm volatile (
-        "lock; btsl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btsl %2,%1\n\tsbbl %0,%0"
+                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
     return oldbit;
 }
 #define test_and_set_bit(nr, addr) ({                   \
@@ -198,10 +190,8 @@ static inline int test_and_clear_bit(int
 {
     int oldbit;
 
-    asm volatile (
-        "lock; btrl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btrl %2,%1\n\tsbbl %0,%0"
+                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
     return oldbit;
 }
 #define test_and_clear_bit(nr, addr) ({                 \
@@ -261,10 +251,8 @@ static inline int test_and_change_bit(in
 {
     int oldbit;
 
-    asm volatile (
-        "lock; btcl %2,%1\n\tsbbl %0,%0"
-        : "=r" (oldbit), "=m" (ADDR)
-        : "Ir" (nr), "m" (ADDR) : "memory");
+    asm volatile ( "lock; btcl %2,%1\n\tsbbl %0,%0"
+                   : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
     return oldbit;
 }
 #define test_and_change_bit(nr, addr) ({                \

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/2] x86: simplify non‑atomic bitops
  2015-03-20 14:53 ` [PATCH v3 1/2] x86: simplify non‑atomic bitops Jan Beulich
@ 2015-03-25 17:49   ` Andrew Cooper
  2015-03-26  8:00     ` Jan Beulich
  0 siblings, 1 reply; 6+ messages in thread
From: Andrew Cooper @ 2015-03-25 17:49 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Keir Fraser

On 20/03/15 14:53, Jan Beulich wrote:
> - being non-atomic, their pointer arguments shouldn't be volatile-
>    qualified
> - their (half fake) memory operands can be a single "+m" instead of
>    being both an output and an input
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

After further consideration, would it not be better to change the 
non-atomic variants to being straight C.

e.g.

static inline void __set_bit(int nr, void *_addr)
{
     int *addr = _addr;
     addr[nr / sizeof(int)] |= (1U << (nr % sizeof(int)));
}

This would drop the memory clobber from the asm statement and allow the 
compiler to optimise repeated __set_bit() calls to the same word into a 
single action.

~Andrew

(On a separate note, I feel that all of these operations should be 
acting on unsigned rather than signed ints, but that applies to all of 
these operations, not just the non-atomic ones)

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/2] x86: simplify non‑atomic bitops
  2015-03-25 17:49   ` Andrew Cooper
@ 2015-03-26  8:00     ` Jan Beulich
  2015-03-26  9:54       ` Andrew Cooper
  0 siblings, 1 reply; 6+ messages in thread
From: Jan Beulich @ 2015-03-26  8:00 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel, Keir Fraser

>>> On 25.03.15 at 18:49, <andrew.cooper3@citrix.com> wrote:
> On 20/03/15 14:53, Jan Beulich wrote:
>> - being non-atomic, their pointer arguments shouldn't be volatile-
>>    qualified
>> - their (half fake) memory operands can be a single "+m" instead of
>>    being both an output and an input
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> After further consideration, would it not be better to change the 
> non-atomic variants to being straight C.
> 
> e.g.
> 
> static inline void __set_bit(int nr, void *_addr)
> {
>      int *addr = _addr;
>      addr[nr / sizeof(int)] |= (1U << (nr % sizeof(int)));
> }
> 
> This would drop the memory clobber from the asm statement and allow the 
> compiler to optimise repeated __set_bit() calls to the same word into a 
> single action.

I wouldn't want to do this in this patch - a similar change that I
proposed years ago for Linux (not eliminating the asm(), but
dropping the clobbers) caused not really understood regressions and
hence needed to be reverted. We could still try whether this works in
Xen, but in another patch. Apart from that I doubt the operation
above would reliably get converted to BTS by the compiler
(independent of version).

> (On a separate note, I feel that all of these operations should be 
> acting on unsigned rather than signed ints, but that applies to all of 
> these operations, not just the non-atomic ones)

That makes zero difference for these ops - all that really matters
is the width. In the C version above however I agree that using
"unsigned int" would be more natural.

Jan

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v3 1/2] x86: simplify non‑atomic bitops
  2015-03-26  8:00     ` Jan Beulich
@ 2015-03-26  9:54       ` Andrew Cooper
  0 siblings, 0 replies; 6+ messages in thread
From: Andrew Cooper @ 2015-03-26  9:54 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Keir Fraser

On 26/03/15 08:00, Jan Beulich wrote:
>>>> On 25.03.15 at 18:49, <andrew.cooper3@citrix.com> wrote:
>> On 20/03/15 14:53, Jan Beulich wrote:
>>> - being non-atomic, their pointer arguments shouldn't be volatile-
>>>     qualified
>>> - their (half fake) memory operands can be a single "+m" instead of
>>>     being both an output and an input
>>>
>>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> After further consideration, would it not be better to change the
>> non-atomic variants to being straight C.
>>
>> e.g.
>>
>> static inline void __set_bit(int nr, void *_addr)
>> {
>>       int *addr = _addr;
>>       addr[nr / sizeof(int)] |= (1U << (nr % sizeof(int)));
>> }
>>
>> This would drop the memory clobber from the asm statement and allow the
>> compiler to optimise repeated __set_bit() calls to the same word into a
>> single action.
> I wouldn't want to do this in this patch - a similar change that I
> proposed years ago for Linux (not eliminating the asm(), but
> dropping the clobbers) caused not really understood regressions and
> hence needed to be reverted.

Presumably some code was lacking barrier()s

>   We could still try whether this works in
> Xen, but in another patch. Apart from that I doubt the operation
> above would reliably get converted to BTS by the compiler
> (independent of version).

Ok - lets leave that for v2.

Both patches Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

>
>> (On a separate note, I feel that all of these operations should be
>> acting on unsigned rather than signed ints, but that applies to all of
>> these operations, not just the non-atomic ones)
> That makes zero difference for these ops - all that really matters
> is the width. In the C version above however I agree that using
> "unsigned int" would be more natural.
>
> Jan
>

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2015-03-26 10:03 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-20 14:47 [PATCH v3 0/2] x86: bitops adjustments Jan Beulich
2015-03-20 14:53 ` [PATCH v3 1/2] x86: simplify non‑atomic bitops Jan Beulich
2015-03-25 17:49   ` Andrew Cooper
2015-03-26  8:00     ` Jan Beulich
2015-03-26  9:54       ` Andrew Cooper
2015-03-20 14:54 ` [PATCH v3 2/2] x86: make atomic bitops consistent with non‑atomic ones Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.