* [RFC PATCH] arm64: make bitops 32-bit tolerant
@ 2014-04-27 9:21 Vladimir Murzin
2014-04-28 8:45 ` Will Deacon
2014-04-28 8:45 ` Will Deacon
0 siblings, 2 replies; 6+ messages in thread
From: Vladimir Murzin @ 2014-04-27 9:21 UTC (permalink / raw)
To: linux-arm-kernel
Xen assumes that bit operations are able to operate on 32-bit size and
alignment [1]. For arm64 bitops are based on atomic exclusive load/store
instructions to guarantee that changes are made atomically. However, these
instructions require that address to be aligned to the data size. Because, by
default, bitops operates on 64-bit size it implies that address should be
aligned appropriately. All these lead to breakage of Xen assumption for bitops
properties.
This patch turns bitops to operate on 32-bit size/alignment.
[1] http://www.gossamer-threads.com/lists/xen/devel/325613
Cc: catalin.marinas at arm.com
Cc: will.deacon at arm.com
Cc: david.vrabel at citrix.com
Cc: Ian.Campbell at citrix.com
Signed-off-by: Vladimir Murzin <murzin.v@gmail.com>
---
With this patch applied I was able to boot both Dom0 and DomU.
@Pranav: Could I have your Tested-by on this patch?
arch/arm64/lib/bitops.S | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index 7dac371..a6acfff 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -26,14 +26,14 @@
*/
.macro bitop, name, instr
ENTRY( \name )
- and w3, w0, #63 // Get bit offset
+ and w3, w0, #31 // Get bit offset
eor w0, w0, w3 // Clear low bits
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x3, x2, x3 // Create mask
-1: ldxr x2, [x1]
- \instr x2, x2, x3
- stxr w0, x2, [x1]
+1: ldxr w2, [x1]
+ \instr w2, w2, w3
+ stxr w0, w2, [x1]
cbnz w0, 1b
ret
ENDPROC(\name )
@@ -41,15 +41,15 @@ ENDPROC(\name )
.macro testop, name, instr
ENTRY( \name )
- and w3, w0, #63 // Get bit offset
+ and w3, w0, #31 // Get bit offset
eor w0, w0, w3 // Clear low bits
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x4, x2, x3 // Create mask
-1: ldxr x2, [x1]
+1: ldxr w2, [x1]
lsr x0, x2, x3 // Save old value of bit
- \instr x2, x2, x4 // toggle bit
- stlxr w5, x2, [x1]
+ \instr w2, w2, w4 // toggle bit
+ stlxr w5, w2, [x1]
cbnz w5, 1b
dmb ish
and x0, x0, #1
--
1.8.3.2
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [RFC PATCH] arm64: make bitops 32-bit tolerant
2014-04-27 9:21 [RFC PATCH] arm64: make bitops 32-bit tolerant Vladimir Murzin
2014-04-28 8:45 ` Will Deacon
@ 2014-04-28 8:45 ` Will Deacon
2014-04-28 9:15 ` Vladimir Murzin
2014-04-28 9:15 ` Vladimir Murzin
1 sibling, 2 replies; 6+ messages in thread
From: Will Deacon @ 2014-04-28 8:45 UTC (permalink / raw)
To: linux-arm-kernel
On Sun, Apr 27, 2014 at 10:21:17AM +0100, Vladimir Murzin wrote:
> Xen assumes that bit operations are able to operate on 32-bit size and
> alignment [1]. For arm64 bitops are based on atomic exclusive load/store
> instructions to guarantee that changes are made atomically. However, these
> instructions require that address to be aligned to the data size. Because, by
> default, bitops operates on 64-bit size it implies that address should be
> aligned appropriately. All these lead to breakage of Xen assumption for bitops
> properties.
>
> This patch turns bitops to operate on 32-bit size/alignment.
Please stop sending this patch. I have _absolutely_ _no_ intention of
butchering our bitops in this way, and I don't think the Xen guys require
changing the core bitops like this either (certainly, Ian's last reply to
you suggested you should be doing this another way).
NAK.
Will
> [1] http://www.gossamer-threads.com/lists/xen/devel/325613
>
> Cc: catalin.marinas at arm.com
> Cc: will.deacon at arm.com
> Cc: david.vrabel at citrix.com
> Cc: Ian.Campbell at citrix.com
>
> Signed-off-by: Vladimir Murzin <murzin.v@gmail.com>
> ---
> With this patch applied I was able to boot both Dom0 and DomU.
>
> @Pranav: Could I have your Tested-by on this patch?
>
> arch/arm64/lib/bitops.S | 16 ++++++++--------
> 1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
> index 7dac371..a6acfff 100644
> --- a/arch/arm64/lib/bitops.S
> +++ b/arch/arm64/lib/bitops.S
> @@ -26,14 +26,14 @@
> */
> .macro bitop, name, instr
> ENTRY( \name )
> - and w3, w0, #63 // Get bit offset
> + and w3, w0, #31 // Get bit offset
> eor w0, w0, w3 // Clear low bits
> mov x2, #1
> add x1, x1, x0, lsr #3 // Get word offset
> lsl x3, x2, x3 // Create mask
> -1: ldxr x2, [x1]
> - \instr x2, x2, x3
> - stxr w0, x2, [x1]
> +1: ldxr w2, [x1]
> + \instr w2, w2, w3
> + stxr w0, w2, [x1]
> cbnz w0, 1b
> ret
> ENDPROC(\name )
> @@ -41,15 +41,15 @@ ENDPROC(\name )
>
> .macro testop, name, instr
> ENTRY( \name )
> - and w3, w0, #63 // Get bit offset
> + and w3, w0, #31 // Get bit offset
> eor w0, w0, w3 // Clear low bits
> mov x2, #1
> add x1, x1, x0, lsr #3 // Get word offset
> lsl x4, x2, x3 // Create mask
> -1: ldxr x2, [x1]
> +1: ldxr w2, [x1]
> lsr x0, x2, x3 // Save old value of bit
> - \instr x2, x2, x4 // toggle bit
> - stlxr w5, x2, [x1]
> + \instr w2, w2, w4 // toggle bit
> + stlxr w5, w2, [x1]
> cbnz w5, 1b
> dmb ish
> and x0, x0, #1
> --
> 1.8.3.2
>
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [RFC PATCH] arm64: make bitops 32-bit tolerant
2014-04-27 9:21 [RFC PATCH] arm64: make bitops 32-bit tolerant Vladimir Murzin
@ 2014-04-28 8:45 ` Will Deacon
2014-04-28 8:45 ` Will Deacon
1 sibling, 0 replies; 6+ messages in thread
From: Will Deacon @ 2014-04-28 8:45 UTC (permalink / raw)
To: Vladimir Murzin
Cc: Ian.Campbell, Catalin Marinas, david.vrabel, xen-devel,
linux-arm-kernel, pranavkumar
On Sun, Apr 27, 2014 at 10:21:17AM +0100, Vladimir Murzin wrote:
> Xen assumes that bit operations are able to operate on 32-bit size and
> alignment [1]. For arm64 bitops are based on atomic exclusive load/store
> instructions to guarantee that changes are made atomically. However, these
> instructions require that address to be aligned to the data size. Because, by
> default, bitops operates on 64-bit size it implies that address should be
> aligned appropriately. All these lead to breakage of Xen assumption for bitops
> properties.
>
> This patch turns bitops to operate on 32-bit size/alignment.
Please stop sending this patch. I have _absolutely_ _no_ intention of
butchering our bitops in this way, and I don't think the Xen guys require
changing the core bitops like this either (certainly, Ian's last reply to
you suggested you should be doing this another way).
NAK.
Will
> [1] http://www.gossamer-threads.com/lists/xen/devel/325613
>
> Cc: catalin.marinas@arm.com
> Cc: will.deacon@arm.com
> Cc: david.vrabel@citrix.com
> Cc: Ian.Campbell@citrix.com
>
> Signed-off-by: Vladimir Murzin <murzin.v@gmail.com>
> ---
> With this patch applied I was able to boot both Dom0 and DomU.
>
> @Pranav: Could I have your Tested-by on this patch?
>
> arch/arm64/lib/bitops.S | 16 ++++++++--------
> 1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
> index 7dac371..a6acfff 100644
> --- a/arch/arm64/lib/bitops.S
> +++ b/arch/arm64/lib/bitops.S
> @@ -26,14 +26,14 @@
> */
> .macro bitop, name, instr
> ENTRY( \name )
> - and w3, w0, #63 // Get bit offset
> + and w3, w0, #31 // Get bit offset
> eor w0, w0, w3 // Clear low bits
> mov x2, #1
> add x1, x1, x0, lsr #3 // Get word offset
> lsl x3, x2, x3 // Create mask
> -1: ldxr x2, [x1]
> - \instr x2, x2, x3
> - stxr w0, x2, [x1]
> +1: ldxr w2, [x1]
> + \instr w2, w2, w3
> + stxr w0, w2, [x1]
> cbnz w0, 1b
> ret
> ENDPROC(\name )
> @@ -41,15 +41,15 @@ ENDPROC(\name )
>
> .macro testop, name, instr
> ENTRY( \name )
> - and w3, w0, #63 // Get bit offset
> + and w3, w0, #31 // Get bit offset
> eor w0, w0, w3 // Clear low bits
> mov x2, #1
> add x1, x1, x0, lsr #3 // Get word offset
> lsl x4, x2, x3 // Create mask
> -1: ldxr x2, [x1]
> +1: ldxr w2, [x1]
> lsr x0, x2, x3 // Save old value of bit
> - \instr x2, x2, x4 // toggle bit
> - stlxr w5, x2, [x1]
> + \instr w2, w2, w4 // toggle bit
> + stlxr w5, w2, [x1]
> cbnz w5, 1b
> dmb ish
> and x0, x0, #1
> --
> 1.8.3.2
>
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* [RFC PATCH] arm64: make bitops 32-bit tolerant
2014-04-28 8:45 ` Will Deacon
2014-04-28 9:15 ` Vladimir Murzin
@ 2014-04-28 9:15 ` Vladimir Murzin
1 sibling, 0 replies; 6+ messages in thread
From: Vladimir Murzin @ 2014-04-28 9:15 UTC (permalink / raw)
To: linux-arm-kernel
On Mon, Apr 28, 2014 at 9:45 AM, Will Deacon <will.deacon@arm.com> wrote:
> On Sun, Apr 27, 2014 at 10:21:17AM +0100, Vladimir Murzin wrote:
>> Xen assumes that bit operations are able to operate on 32-bit size and
>> alignment [1]. For arm64 bitops are based on atomic exclusive load/store
>> instructions to guarantee that changes are made atomically. However, these
>> instructions require that address to be aligned to the data size. Because, by
>> default, bitops operates on 64-bit size it implies that address should be
>> aligned appropriately. All these lead to breakage of Xen assumption for bitops
>> properties.
>>
>> This patch turns bitops to operate on 32-bit size/alignment.
>
> Please stop sending this patch. I have _absolutely_ _no_ intention of
> butchering our bitops in this way, and I don't think the Xen guys require
> changing the core bitops like this either (certainly, Ian's last reply to
> you suggested you should be doing this another way).
>
> NAK.
>
> Will
>
Hi Will
Thanks for making your point clear!
Vladimir
>> [1] http://www.gossamer-threads.com/lists/xen/devel/325613
>>
>> Cc: catalin.marinas at arm.com
>> Cc: will.deacon at arm.com
>> Cc: david.vrabel at citrix.com
>> Cc: Ian.Campbell at citrix.com
>>
>> Signed-off-by: Vladimir Murzin <murzin.v@gmail.com>
>> ---
>> With this patch applied I was able to boot both Dom0 and DomU.
>>
>> @Pranav: Could I have your Tested-by on this patch?
>>
>> arch/arm64/lib/bitops.S | 16 ++++++++--------
>> 1 file changed, 8 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
>> index 7dac371..a6acfff 100644
>> --- a/arch/arm64/lib/bitops.S
>> +++ b/arch/arm64/lib/bitops.S
>> @@ -26,14 +26,14 @@
>> */
>> .macro bitop, name, instr
>> ENTRY( \name )
>> - and w3, w0, #63 // Get bit offset
>> + and w3, w0, #31 // Get bit offset
>> eor w0, w0, w3 // Clear low bits
>> mov x2, #1
>> add x1, x1, x0, lsr #3 // Get word offset
>> lsl x3, x2, x3 // Create mask
>> -1: ldxr x2, [x1]
>> - \instr x2, x2, x3
>> - stxr w0, x2, [x1]
>> +1: ldxr w2, [x1]
>> + \instr w2, w2, w3
>> + stxr w0, w2, [x1]
>> cbnz w0, 1b
>> ret
>> ENDPROC(\name )
>> @@ -41,15 +41,15 @@ ENDPROC(\name )
>>
>> .macro testop, name, instr
>> ENTRY( \name )
>> - and w3, w0, #63 // Get bit offset
>> + and w3, w0, #31 // Get bit offset
>> eor w0, w0, w3 // Clear low bits
>> mov x2, #1
>> add x1, x1, x0, lsr #3 // Get word offset
>> lsl x4, x2, x3 // Create mask
>> -1: ldxr x2, [x1]
>> +1: ldxr w2, [x1]
>> lsr x0, x2, x3 // Save old value of bit
>> - \instr x2, x2, x4 // toggle bit
>> - stlxr w5, x2, [x1]
>> + \instr w2, w2, w4 // toggle bit
>> + stlxr w5, w2, [x1]
>> cbnz w5, 1b
>> dmb ish
>> and x0, x0, #1
>> --
>> 1.8.3.2
>>
>>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [RFC PATCH] arm64: make bitops 32-bit tolerant
2014-04-28 8:45 ` Will Deacon
@ 2014-04-28 9:15 ` Vladimir Murzin
2014-04-28 9:15 ` Vladimir Murzin
1 sibling, 0 replies; 6+ messages in thread
From: Vladimir Murzin @ 2014-04-28 9:15 UTC (permalink / raw)
To: Will Deacon
Cc: Ian.Campbell, Catalin Marinas, david.vrabel, xen-devel,
linux-arm-kernel, pranavkumar
On Mon, Apr 28, 2014 at 9:45 AM, Will Deacon <will.deacon@arm.com> wrote:
> On Sun, Apr 27, 2014 at 10:21:17AM +0100, Vladimir Murzin wrote:
>> Xen assumes that bit operations are able to operate on 32-bit size and
>> alignment [1]. For arm64 bitops are based on atomic exclusive load/store
>> instructions to guarantee that changes are made atomically. However, these
>> instructions require that address to be aligned to the data size. Because, by
>> default, bitops operates on 64-bit size it implies that address should be
>> aligned appropriately. All these lead to breakage of Xen assumption for bitops
>> properties.
>>
>> This patch turns bitops to operate on 32-bit size/alignment.
>
> Please stop sending this patch. I have _absolutely_ _no_ intention of
> butchering our bitops in this way, and I don't think the Xen guys require
> changing the core bitops like this either (certainly, Ian's last reply to
> you suggested you should be doing this another way).
>
> NAK.
>
> Will
>
Hi Will
Thanks for making your point clear!
Vladimir
>> [1] http://www.gossamer-threads.com/lists/xen/devel/325613
>>
>> Cc: catalin.marinas@arm.com
>> Cc: will.deacon@arm.com
>> Cc: david.vrabel@citrix.com
>> Cc: Ian.Campbell@citrix.com
>>
>> Signed-off-by: Vladimir Murzin <murzin.v@gmail.com>
>> ---
>> With this patch applied I was able to boot both Dom0 and DomU.
>>
>> @Pranav: Could I have your Tested-by on this patch?
>>
>> arch/arm64/lib/bitops.S | 16 ++++++++--------
>> 1 file changed, 8 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
>> index 7dac371..a6acfff 100644
>> --- a/arch/arm64/lib/bitops.S
>> +++ b/arch/arm64/lib/bitops.S
>> @@ -26,14 +26,14 @@
>> */
>> .macro bitop, name, instr
>> ENTRY( \name )
>> - and w3, w0, #63 // Get bit offset
>> + and w3, w0, #31 // Get bit offset
>> eor w0, w0, w3 // Clear low bits
>> mov x2, #1
>> add x1, x1, x0, lsr #3 // Get word offset
>> lsl x3, x2, x3 // Create mask
>> -1: ldxr x2, [x1]
>> - \instr x2, x2, x3
>> - stxr w0, x2, [x1]
>> +1: ldxr w2, [x1]
>> + \instr w2, w2, w3
>> + stxr w0, w2, [x1]
>> cbnz w0, 1b
>> ret
>> ENDPROC(\name )
>> @@ -41,15 +41,15 @@ ENDPROC(\name )
>>
>> .macro testop, name, instr
>> ENTRY( \name )
>> - and w3, w0, #63 // Get bit offset
>> + and w3, w0, #31 // Get bit offset
>> eor w0, w0, w3 // Clear low bits
>> mov x2, #1
>> add x1, x1, x0, lsr #3 // Get word offset
>> lsl x4, x2, x3 // Create mask
>> -1: ldxr x2, [x1]
>> +1: ldxr w2, [x1]
>> lsr x0, x2, x3 // Save old value of bit
>> - \instr x2, x2, x4 // toggle bit
>> - stlxr w5, x2, [x1]
>> + \instr w2, w2, w4 // toggle bit
>> + stlxr w5, w2, [x1]
>> cbnz w5, 1b
>> dmb ish
>> and x0, x0, #1
>> --
>> 1.8.3.2
>>
>>
^ permalink raw reply [flat|nested] 6+ messages in thread
* [RFC PATCH] arm64: make bitops 32-bit tolerant
@ 2014-04-27 9:21 Vladimir Murzin
0 siblings, 0 replies; 6+ messages in thread
From: Vladimir Murzin @ 2014-04-27 9:21 UTC (permalink / raw)
To: xen-devel, linux-arm-kernel
Cc: Ian.Campbell, catalin.marinas, Vladimir Murzin, will.deacon,
david.vrabel, pranavkumar
Xen assumes that bit operations are able to operate on 32-bit size and
alignment [1]. For arm64 bitops are based on atomic exclusive load/store
instructions to guarantee that changes are made atomically. However, these
instructions require that address to be aligned to the data size. Because, by
default, bitops operates on 64-bit size it implies that address should be
aligned appropriately. All these lead to breakage of Xen assumption for bitops
properties.
This patch turns bitops to operate on 32-bit size/alignment.
[1] http://www.gossamer-threads.com/lists/xen/devel/325613
Cc: catalin.marinas@arm.com
Cc: will.deacon@arm.com
Cc: david.vrabel@citrix.com
Cc: Ian.Campbell@citrix.com
Signed-off-by: Vladimir Murzin <murzin.v@gmail.com>
---
With this patch applied I was able to boot both Dom0 and DomU.
@Pranav: Could I have your Tested-by on this patch?
arch/arm64/lib/bitops.S | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index 7dac371..a6acfff 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -26,14 +26,14 @@
*/
.macro bitop, name, instr
ENTRY( \name )
- and w3, w0, #63 // Get bit offset
+ and w3, w0, #31 // Get bit offset
eor w0, w0, w3 // Clear low bits
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x3, x2, x3 // Create mask
-1: ldxr x2, [x1]
- \instr x2, x2, x3
- stxr w0, x2, [x1]
+1: ldxr w2, [x1]
+ \instr w2, w2, w3
+ stxr w0, w2, [x1]
cbnz w0, 1b
ret
ENDPROC(\name )
@@ -41,15 +41,15 @@ ENDPROC(\name )
.macro testop, name, instr
ENTRY( \name )
- and w3, w0, #63 // Get bit offset
+ and w3, w0, #31 // Get bit offset
eor w0, w0, w3 // Clear low bits
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x4, x2, x3 // Create mask
-1: ldxr x2, [x1]
+1: ldxr w2, [x1]
lsr x0, x2, x3 // Save old value of bit
- \instr x2, x2, x4 // toggle bit
- stlxr w5, x2, [x1]
+ \instr w2, w2, w4 // toggle bit
+ stlxr w5, w2, [x1]
cbnz w5, 1b
dmb ish
and x0, x0, #1
--
1.8.3.2
^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2014-04-28 9:15 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-04-27 9:21 [RFC PATCH] arm64: make bitops 32-bit tolerant Vladimir Murzin
2014-04-28 8:45 ` Will Deacon
2014-04-28 8:45 ` Will Deacon
2014-04-28 9:15 ` Vladimir Murzin
2014-04-28 9:15 ` Vladimir Murzin
2014-04-27 9:21 Vladimir Murzin
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.