From: Oliver Swede <oli.swede@arm.com>
To: Will Deacon <will@kernel.org>, Catalin Marinas <catalin.marinas@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>,
linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org
Subject: [PATCH v4 14/14] arm64: Improve accuracy of fixup for UAO cases
Date: Tue, 30 Jun 2020 19:48:22 +0000 [thread overview]
Message-ID: <20200630194822.1082-15-oli.swede@arm.com> (raw)
In-Reply-To: <20200630194822.1082-1-oli.swede@arm.com>
This accounts for variations in the number of bytes copied to the
destination buffer that could result from the substitution of
STP instructions with 2x unprivileged STTR variants if UAO is
supported and enabled.
Rather than duplicating the store fixups with the modifications,
the relevant alternatives are inserted in-line.
Signed-off-by: Oliver Swede <oli.swede@arm.com>
---
arch/arm64/lib/copy_user_fixup.S | 47 ++++++++++++++++++++++++++++----
1 file changed, 41 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/lib/copy_user_fixup.S b/arch/arm64/lib/copy_user_fixup.S
index 37ca3d99a02a..2d413f9ba5d3 100644
--- a/arch/arm64/lib/copy_user_fixup.S
+++ b/arch/arm64/lib/copy_user_fixup.S
@@ -205,7 +205,12 @@ addr .req x15
/* 32 < count < 128 -> count - ((addr-dst)&15) */
cmp count, 128
sub x0, addr, dst // relative fault offset
+ /* fault offset within dest. buffer */
+ alternative_if ARM64_HAS_UAO
+ bic x0, x0, 7 // stp subst. for 2x sttr
+ alternative_else
bic x0, x0, 15 // bytes already copied (steps of 16B stores)
+ alternative_endif
sub x0, count, x0 // bytes yet to copy
b.le L(end_fixup)
/* 128 < count -> count */
@@ -265,7 +270,12 @@ addr .req x15
sub tmp1, count, tmp1 // remaining bytes after non-overlapping section
sub x0, dstend, 64
sub x0, addr, x0
- bic x0, x0, 15 // fault offset within dest. buffer
+ /* fault offset within dest. buffer */
+ alternative_if ARM64_HAS_UAO
+ bic x0, x0, 7 // stp subst. for 2x sttr
+ alternative_else
+ bic x0, x0, 15 // bytes already copied (steps of 16B stores)
+ alternative_endif
add x0, dstend, x0
sub x0, x0, 64
sub x0, dstend, x0 // remaining bytes in final (overlapping) 64B
@@ -295,7 +305,12 @@ addr .req x15
*/
sub tmp1, dstend, 32
sub tmp1, addr, tmp1
- bic tmp1, tmp1, 15
+ /* fault offset */
+ alternative_if ARM64_HAS_UAO
+ bic tmp1, tmp1, 7 // stp subst. for 2x sttr
+ alternative_else
+ bic tmp1, tmp1, 15 // bytes already copied (steps of 16B stores)
+ alternative_endif
mov x0, 32
sub tmp1, x0, tmp1
sub x0, count, 32
@@ -309,7 +324,12 @@ addr .req x15
*/
sub tmp1, dstend, 32
sub tmp1, addr, tmp1
- bic tmp1, tmp1, 15
+ /* fault offset */
+ alternative_if ARM64_HAS_UAO
+ bic tmp1, tmp1, 7 // stp subst. for 2x sttr
+ alternative_else
+ bic tmp1, tmp1, 15 // bytes already copied (steps of 16B stores)
+ alternative_endif
mov x0, 32
sub tmp1, x0, tmp1
sub x0, count, 64
@@ -324,7 +344,12 @@ addr .req x15
*/
sub tmp1, dstend, 64
sub tmp1, addr, tmp1
- bic tmp1, tmp1, 15
+ /* fault offset */
+ alternative_if ARM64_HAS_UAO
+ bic tmp1, tmp1, 7 // stp subst. for 2x sttr
+ alternative_else
+ bic tmp1, tmp1, 15 // bytes already copied (steps of 16B stores)
+ alternative_endif
mov x0, 64
sub tmp1, x0, tmp1
cmp count, 128
@@ -378,10 +403,20 @@ addr .req x15
/* Take the min from {16,(fault_addr&15)-(dst&15)}
* and subtract from count to obtain the return value */
bic tmp1, dst, 15 // aligned dst
- bic x0, addr, 15
+ /* fault offset */
+ alternative_if ARM64_HAS_UAO
+ bic x0, addr, 7 // stp subst. for 2x sttr
+ alternative_else
+ bic x0, addr, 15 // bytes already copied (steps of 16B stores)
+ alternative_endif
sub x0, x0, tmp1 // relative fault offset
cmp x0, 16
- bic x0, addr, 15
+ /* fault offset */
+ alternative_if ARM64_HAS_UAO
+ bic x0, addr, 7 // stp subst. for 2x sttr
+ alternative_else
+ bic x0, addr, 15 // bytes already copied (steps of 16B stores)
+ alternative_endif
sub x0, x0, dst
sub x0, count, x0
b.gt L(end_fixup)
--
2.17.1
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2020-06-30 19:52 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-06-30 19:48 [PATCH v4 00/14] arm64: Optimise and update memcpy, user copy and string routines Oliver Swede
2020-06-30 19:48 ` [PATCH v4 01/14] arm64: Allow passing fault address to fixup handlers Oliver Swede
2020-06-30 19:48 ` [PATCH v4 02/14] arm64: kprobes: Drop open-coded exception fixup Oliver Swede
2020-06-30 19:48 ` [PATCH v4 03/14] arm64: Import latest version of Cortex Strings' memcmp Oliver Swede
2020-06-30 19:48 ` [PATCH v4 04/14] arm64: Import latest version of Cortex Strings' memmove Oliver Swede
2020-06-30 19:48 ` [PATCH v4 05/14] arm64: Import latest version of Cortex Strings' strcmp Oliver Swede
2020-06-30 19:48 ` [PATCH v4 06/14] arm64: Import latest version of Cortex Strings' strlen Oliver Swede
2020-06-30 19:48 ` [PATCH v4 07/14] arm64: Import latest version of Cortex Strings' strncmp Oliver Swede
2020-06-30 19:48 ` [PATCH v4 08/14] arm64: Import latest optimization of memcpy Oliver Swede
2020-06-30 19:48 ` [PATCH v4 09/14] arm64: Tidy up _asm_extable_faultaddr usage Oliver Swede
2020-06-30 19:48 ` [PATCH v4 10/14] arm64: Store the arguments to copy_*_user on the stack Oliver Swede
2020-06-30 19:48 ` [PATCH v4 11/14] arm64: Use additional memcpy macros and fixups Oliver Swede
2020-06-30 19:48 ` [PATCH v4 12/14] arm64: Add fixup routines for usercopy load exceptions Oliver Swede
2020-06-30 19:48 ` [PATCH v4 13/14] arm64: Add fixup routines for usercopy store exceptions Oliver Swede
2020-06-30 19:48 ` Oliver Swede [this message]
2020-07-01 8:12 ` [PATCH v4 00/14] arm64: Optimise and update memcpy, user copy and string routines Oli Swede
2020-09-07 10:10 ` Will Deacon
2020-09-11 11:29 ` Catalin Marinas
2020-09-11 15:14 ` Oli Swede
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200630194822.1082-15-oli.swede@arm.com \
--to=oli.swede@arm.com \
--cc=catalin.marinas@arm.com \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=robin.murphy@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).