* [PATCH V4 0/2] arm64: copy to/in/from user optimization
@ 2015-08-21 22:00 Feng Kan
2015-08-21 22:01 ` [PATCH V4 1/2] arm64: copy_to-from-in_user optimization using copy template Feng Kan
2015-08-21 22:01 ` [PATCH V4 2/2] arm64: Change memcpy in kernel to use the copy template file Feng Kan
0 siblings, 2 replies; 5+ messages in thread
From: Feng Kan @ 2015-08-21 22:00 UTC (permalink / raw)
To: patches, linux-arm-kernel, linux-kernel, philipp.tomsich,
dann.frazier, tim.gardner, craig.magina, soni.trilok.oss
Cc: Feng Kan
This coverts all copy in/from/to user file to use the copy template file.
The copy template file is based on the memcpy.S. The second patch converts
the memcpy to use the copy template as well. Overnight trinity test and
10G iperf was used to test correctness and performance. Noticeable
iperf improvement (~30%) on the receive side.
V4 Change:
- base the copy template file on the existing memcpy.S
- convert copy*.s to user copy template
- conver the memcpy to use the copy template.
Feng Kan (2):
arm64: copy_to-from-in_user optimization using copy template
arm64: Change memcpy in kernel to use the copy template file
arch/arm64/lib/copy_from_user.S | 78 +++++++++-------
arch/arm64/lib/copy_in_user.S | 66 ++++++++------
arch/arm64/lib/copy_template.S | 196 ++++++++++++++++++++++++++++++++++++++++
arch/arm64/lib/copy_to_user.S | 66 ++++++++------
arch/arm64/lib/memcpy.S | 179 ++++++------------------------------
5 files changed, 340 insertions(+), 245 deletions(-)
create mode 100644 arch/arm64/lib/copy_template.S
--
1.9.1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH V4 1/2] arm64: copy_to-from-in_user optimization using copy template
@ 2015-08-21 22:01 ` Feng Kan
2015-09-07 16:54 ` Catalin Marinas
0 siblings, 1 reply; 5+ messages in thread
From: Feng Kan @ 2015-08-21 22:01 UTC (permalink / raw)
To: patches, linux-arm-kernel, linux-kernel, philipp.tomsich,
dann.frazier, tim.gardner, craig.magina, soni.trilok.oss
Cc: Feng Kan, Balamurugan Shanmugam
This patch optimize copy_to-from-in_user for arm 64bit architecture.
The copy template is using the memcpy.S as a base. This allows the
sharing of the copy template with all of the copy*.S files.
Signed-off-by: Feng Kan <fkan@apm.com>
Signed-off-by: Balamurugan Shanmugam <bshanmugam@apm.com>
---
arch/arm64/lib/copy_from_user.S | 78 +++++++++-------
arch/arm64/lib/copy_in_user.S | 66 ++++++++------
arch/arm64/lib/copy_template.S | 196 ++++++++++++++++++++++++++++++++++++++++
arch/arm64/lib/copy_to_user.S | 66 ++++++++------
4 files changed, 314 insertions(+), 92 deletions(-)
create mode 100644 arch/arm64/lib/copy_template.S
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 1be9ef2..cb085cf 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -18,6 +18,7 @@
#include <asm/alternative.h>
#include <asm/assembler.h>
+#include <asm/cache.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
@@ -31,49 +32,58 @@
* Returns:
* x0 - bytes not copied
*/
+
+ .macro ldrb1 label, ptr, regB, val
+ USER(\label, ldrb \ptr, [\regB], \val)
+ .endm
+
+ .macro strb1 label, ptr, regB, val
+ strb \ptr, [\regB], \val
+ .endm
+
+ .macro ldrh1 label, ptr, regB, val
+ USER(\label, ldrh \ptr, [\regB], \val)
+ .endm
+
+ .macro strh1 label, ptr, regB, val
+ strh \ptr, [\regB], \val
+ .endm
+
+ .macro ldr1 label, ptr, regB, val
+ USER(\label, ldr \ptr, [\regB], \val)
+ .endm
+
+ .macro str1 label, ptr, regB, val
+ str \ptr, [\regB], \val
+ .endm
+
+ .macro ldp1 label, ptr, regB, regC, val
+ USER(\label, ldp \ptr, \regB, [\regC], \val)
+ .endm
+
+ .macro stp1 label, ptr, regB, regC, val
+ stp \ptr, \regB, [\regC], \val
+ .endm
+
ENTRY(__copy_from_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
- add x5, x1, x2 // upper user buffer boundary
- subs x2, x2, #16
- b.mi 1f
-0:
-USER(9f, ldp x3, x4, [x1], #16)
- subs x2, x2, #16
- stp x3, x4, [x0], #16
- b.pl 0b
-1: adds x2, x2, #8
- b.mi 2f
-USER(9f, ldr x3, [x1], #8 )
- sub x2, x2, #8
- str x3, [x0], #8
-2: adds x2, x2, #4
- b.mi 3f
-USER(9f, ldr w3, [x1], #4 )
- sub x2, x2, #4
- str w3, [x0], #4
-3: adds x2, x2, #2
- b.mi 4f
-USER(9f, ldrh w3, [x1], #2 )
- sub x2, x2, #2
- strh w3, [x0], #2
-4: adds x2, x2, #1
- b.mi 5f
-USER(9f, ldrb w3, [x1] )
- strb w3, [x0]
-5: mov x0, #0
+#include "copy_template.S"
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
+ mov x0, #0 // Nothing to copy
ret
ENDPROC(__copy_from_user)
.section .fixup,"ax"
.align 2
-9: sub x2, x5, x1
- mov x3, x2
-10: strb wzr, [x0], #1 // zero remaining buffer space
- subs x3, x3, #1
- b.ne 10b
- mov x0, x2 // bytes not copied
+11:
+ sub x4, tmp3, dst
+ mov x0, x4
+ sub dst, tmp3, x4
+
+20: strb wzr, [dst], #1 // zero remaining buffer space
+ subs x4, x4, #1
+ b.ne 20b
ret
.previous
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 1b94661e..b54d44e 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -20,6 +20,7 @@
#include <asm/alternative.h>
#include <asm/assembler.h>
+#include <asm/cache.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
@@ -33,44 +34,51 @@
* Returns:
* x0 - bytes not copied
*/
+ .macro ldrb1 label, ptr, regB, val
+ USER(\label, ldrb \ptr, [\regB], \val)
+ .endm
+
+ .macro strb1 label, ptr, regB, val
+ USER(\label, strb \ptr, [\regB], \val)
+ .endm
+
+ .macro ldrh1 label, ptr, regB, val
+ USER(\label, ldrh \ptr, [\regB], \val)
+ .endm
+
+ .macro strh1 label, ptr, regB, val
+ USER(\label, strh \ptr, [\regB], \val)
+ .endm
+
+ .macro ldr1 label, ptr, regB, val
+ USER(\label, ldr \ptr, [\regB], \val)
+ .endm
+
+ .macro str1 label, ptr, regB, val
+ USER(\label, str \ptr, [\regB], \val)
+ .endm
+
+ .macro ldp1 label, ptr, regB, regC, val
+ USER(\label, ldp \ptr, \regB, [\regC], \val)
+ .endm
+
+ .macro stp1 label, ptr, regB, regC, val
+ USER(\label, stp \ptr, \regB, [\regC], \val)
+ .endm
+
ENTRY(__copy_in_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
- add x5, x0, x2 // upper user buffer boundary
- subs x2, x2, #16
- b.mi 1f
-0:
-USER(9f, ldp x3, x4, [x1], #16)
- subs x2, x2, #16
-USER(9f, stp x3, x4, [x0], #16)
- b.pl 0b
-1: adds x2, x2, #8
- b.mi 2f
-USER(9f, ldr x3, [x1], #8 )
- sub x2, x2, #8
-USER(9f, str x3, [x0], #8 )
-2: adds x2, x2, #4
- b.mi 3f
-USER(9f, ldr w3, [x1], #4 )
- sub x2, x2, #4
-USER(9f, str w3, [x0], #4 )
-3: adds x2, x2, #2
- b.mi 4f
-USER(9f, ldrh w3, [x1], #2 )
- sub x2, x2, #2
-USER(9f, strh w3, [x0], #2 )
-4: adds x2, x2, #1
- b.mi 5f
-USER(9f, ldrb w3, [x1] )
-USER(9f, strb w3, [x0] )
-5: mov x0, #0
+#include "copy_template.S"
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
+ mov x0, #0
ret
ENDPROC(__copy_in_user)
.section .fixup,"ax"
.align 2
-9: sub x0, x5, x0 // bytes not copied
+11: sub tmp3, tmp3, dst // bytes not copied
+ mov x0, tmp3
ret
.previous
diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S
new file mode 100644
index 0000000..c9ece2f
--- /dev/null
+++ b/arch/arm64/lib/copy_template.S
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+/*
+ * Copy a buffer from src to dest (alignment handled by the hardware)
+ *
+ * Parameters:
+ * x0 - dest
+ * x1 - src
+ * x2 - n
+ * Returns:
+ * x0 - dest
+ */
+dstin .req x0
+src .req x1
+count .req x2
+tmp1 .req x3
+tmp1w .req w3
+tmp2 .req x4
+tmp2w .req w4
+tmp3 .req x5
+tmp3w .req w5
+dst .req x6
+
+A_l .req x7
+A_h .req x8
+B_l .req x9
+B_h .req x10
+C_l .req x11
+C_h .req x12
+D_l .req x13
+D_h .req x14
+
+ mov dst, dstin
+ add tmp3, dst, count
+ cmp count, #16
+ /*When memory length is less than 16, the accessed are not aligned.*/
+ b.lo .Ltiny15
+
+ neg tmp2, src
+ ands tmp2, tmp2, #15/* Bytes to reach alignment. */
+ b.eq .LSrcAligned
+ sub count, count, tmp2
+ /*
+ * Copy the leading memory data from src to dst in an increasing
+ * address order.By this way,the risk of overwritting the source
+ * memory data is eliminated when the distance between src and
+ * dst is less than 16. The memory accesses here are alignment.
+ */
+ tbz tmp2, #0, 1f
+ ldrb1 11f, tmp1w, src, #1
+ strb1 11f, tmp1w, dst, #1
+1:
+ tbz tmp2, #1, 2f
+ ldrh1 11f, tmp1w, src, #2
+ strh1 11f, tmp1w, dst, #2
+2:
+ tbz tmp2, #2, 3f
+ ldr1 11f, tmp1w, src, #4
+ str1 11f, tmp1w, dst, #4
+3:
+ tbz tmp2, #3, .LSrcAligned
+ ldr1 11f, tmp1, src, #8
+ str1 11f, tmp1, dst, #8
+
+.LSrcAligned:
+ cmp count, #64
+ b.ge .Lcpy_over64
+ /*
+ * Deal with small copies quickly by dropping straight into the
+ * exit block.
+ */
+.Ltail63:
+ /*
+ * Copy up to 48 bytes of data. At this point we only need the
+ * bottom 6 bits of count to be accurate.
+ */
+ ands tmp1, count, #0x30
+ b.eq .Ltiny15
+ cmp tmp1w, #0x20
+ b.eq 1f
+ b.lt 2f
+ ldp1 11f, A_l, A_h, src, #16
+ stp1 11f, A_l, A_h, dst, #16
+1:
+ ldp1 11f, A_l, A_h, src, #16
+ stp1 11f, A_l, A_h, dst, #16
+2:
+ ldp1 11f, A_l, A_h, src, #16
+ stp1 11f, A_l, A_h, dst, #16
+.Ltiny15:
+ /*
+ * Prefer to break one ldp/stp into several load/store to access
+ * memory in an increasing address order,rather than to load/store 16
+ * bytes from (src-16) to (dst-16) and to backward the src to aligned
+ * address,which way is used in original cortex memcpy. If keeping
+ * the original memcpy process here, memmove need to satisfy the
+ * precondition that src address is at least 16 bytes bigger than dst
+ * address,otherwise some source data will be overwritten when memove
+ * call memcpy directly. To make memmove simpler and decouple the
+ * memcpy's dependency on memmove, withdrew the original process.
+ */
+ tbz count, #3, 1f
+ ldr1 11f, tmp1, src, #8
+ str1 11f, tmp1, dst, #8
+1:
+ tbz count, #2, 2f
+ ldr1 11f, tmp1w, src, #4
+ str1 11f, tmp1w, dst, #4
+2:
+ tbz count, #1, 3f
+ ldrh1 11f, tmp1w, src, #2
+ strh1 11f, tmp1w, dst, #2
+3:
+ tbz count, #0, .Lexitfunc
+ ldrb1 11f, tmp1w, src, #1
+ strb1 11f, tmp1w, dst, #1
+
+ b .Lexitfunc
+
+.Lcpy_over64:
+ subs count, count, #128
+ b.ge .Lcpy_body_large
+ /*
+ * Less than 128 bytes to copy, so handle 64 here and then jump
+ * to the tail.
+ */
+ ldp1 11f, A_l, A_h, src, #16
+ stp1 11f, A_l, A_h, dst, #16
+ ldp1 11f, B_l, B_h, src, #16
+ ldp1 11f, C_l, C_h, src, #16
+ stp1 11f, B_l, B_h, dst, #16
+ stp1 11f, C_l, C_h, dst, #16
+ ldp1 11f, D_l, D_h, src, #16
+ stp1 11f, D_l, D_h, dst, #16
+
+ tst count, #0x3f
+ b.ne .Ltail63
+ b .Lexitfunc
+
+ /*
+ * Critical loop. Start at a new cache line boundary. Assuming
+ * 64 bytes per line this ensures the entire loop is in one line.
+ */
+ .p2align L1_CACHE_SHIFT
+.Lcpy_body_large:
+ /* pre-get 64 bytes data. */
+ ldp1 11f, A_l, A_h, src, #16
+ ldp1 11f, B_l, B_h, src, #16
+ ldp1 11f, C_l, C_h, src, #16
+ ldp1 11f, D_l, D_h, src, #16
+1:
+ /*
+ * interlace the load of next 64 bytes data block with store of the last
+ * loaded 64 bytes data.
+ */
+ stp1 11f, A_l, A_h, dst, #16
+ ldp1 11f, A_l, A_h, src, #16
+ stp1 11f, B_l, B_h, dst, #16
+ ldp1 11f, B_l, B_h, src, #16
+ stp1 11f, C_l, C_h, dst, #16
+ ldp1 11f, C_l, C_h, src, #16
+ stp1 11f, D_l, D_h, dst, #16
+ ldp1 11f, D_l, D_h, src, #16
+ subs count, count, #64
+ b.ge 1b
+ stp1 11f, A_l, A_h, dst, #16
+ stp1 11f, B_l, B_h, dst, #16
+ stp1 11f, C_l, C_h, dst, #16
+ stp1 11f, D_l, D_h, dst, #16
+
+ tst count, #0x3f
+ b.ne .Ltail63
+.Lexitfunc:
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index a257b47..0ef3eb2 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -18,6 +18,7 @@
#include <asm/alternative.h>
#include <asm/assembler.h>
+#include <asm/cache.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
@@ -31,44 +32,51 @@
* Returns:
* x0 - bytes not copied
*/
+ .macro ldrb1 label, ptr, regB, val
+ ldrb \ptr, [\regB], \val
+ .endm
+
+ .macro strb1 label, ptr, regB, val
+ USER(\label, strb \ptr, [\regB], \val)
+ .endm
+
+ .macro ldrh1 label, ptr, regB, val
+ ldrh \ptr, [\regB], \val
+ .endm
+
+ .macro strh1 label, ptr, regB, val
+ USER(\label, strh \ptr, [\regB], \val)
+ .endm
+
+ .macro ldr1 label, ptr, regB, val
+ ldr \ptr, [\regB], \val
+ .endm
+
+ .macro str1 label, ptr, regB, val
+ USER(\label, str \ptr, [\regB], \val)
+ .endm
+
+ .macro ldp1 label, ptr, regB, regC, val
+ ldp \ptr, \regB, [\regC], \val
+ .endm
+
+ .macro stp1 label, ptr, regB, regC, val
+ USER(\label, stp \ptr, \regB, [\regC], \val)
+ .endm
+
ENTRY(__copy_to_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
- add x5, x0, x2 // upper user buffer boundary
- subs x2, x2, #16
- b.mi 1f
-0:
- ldp x3, x4, [x1], #16
- subs x2, x2, #16
-USER(9f, stp x3, x4, [x0], #16)
- b.pl 0b
-1: adds x2, x2, #8
- b.mi 2f
- ldr x3, [x1], #8
- sub x2, x2, #8
-USER(9f, str x3, [x0], #8 )
-2: adds x2, x2, #4
- b.mi 3f
- ldr w3, [x1], #4
- sub x2, x2, #4
-USER(9f, str w3, [x0], #4 )
-3: adds x2, x2, #2
- b.mi 4f
- ldrh w3, [x1], #2
- sub x2, x2, #2
-USER(9f, strh w3, [x0], #2 )
-4: adds x2, x2, #1
- b.mi 5f
- ldrb w3, [x1]
-USER(9f, strb w3, [x0] )
-5: mov x0, #0
+#include "copy_template.S"
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
+ mov x0, #0
ret
ENDPROC(__copy_to_user)
.section .fixup,"ax"
.align 2
-9: sub x0, x5, x0 // bytes not copied
+11: sub tmp3, tmp3, dst // bytes not copied
+ mov x0, tmp3
ret
.previous
--
1.9.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH V4 2/2] arm64: Change memcpy in kernel to use the copy template file
@ 2015-08-21 22:01 ` Feng Kan
2015-09-07 11:13 ` Catalin Marinas
0 siblings, 1 reply; 5+ messages in thread
From: Feng Kan @ 2015-08-21 22:01 UTC (permalink / raw)
To: patches, linux-arm-kernel, linux-kernel, philipp.tomsich,
dann.frazier, tim.gardner, craig.magina, soni.trilok.oss
Cc: Feng Kan
This converts the memcpy.S to use the copy template file. The copy
template file was based originally on the memcpy.S. Minor changes
was made to it to accommodate the copy to/from/in user files.
Signed-off-by: Feng Kan <fkan@apm.com>
---
arch/arm64/lib/memcpy.S | 179 +++++++-----------------------------------------
1 file changed, 26 insertions(+), 153 deletions(-)
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index 8a9a96d..761acd7 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -36,166 +36,39 @@
* Returns:
* x0 - dest
*/
-dstin .req x0
-src .req x1
-count .req x2
-tmp1 .req x3
-tmp1w .req w3
-tmp2 .req x4
-tmp2w .req w4
-tmp3 .req x5
-tmp3w .req w5
-dst .req x6
+ .macro ldrb1 label, ptr, regB, val
+ ldrb \ptr, [\regB], \val
+ .endm
-A_l .req x7
-A_h .req x8
-B_l .req x9
-B_h .req x10
-C_l .req x11
-C_h .req x12
-D_l .req x13
-D_h .req x14
+ .macro strb1 label, ptr, regB, val
+ strb \ptr, [\regB], \val
+ .endm
-ENTRY(memcpy)
- mov dst, dstin
- cmp count, #16
- /*When memory length is less than 16, the accessed are not aligned.*/
- b.lo .Ltiny15
+ .macro ldrh1 label, ptr, regB, val
+ ldrh \ptr, [\regB], \val
+ .endm
- neg tmp2, src
- ands tmp2, tmp2, #15/* Bytes to reach alignment. */
- b.eq .LSrcAligned
- sub count, count, tmp2
- /*
- * Copy the leading memory data from src to dst in an increasing
- * address order.By this way,the risk of overwritting the source
- * memory data is eliminated when the distance between src and
- * dst is less than 16. The memory accesses here are alignment.
- */
- tbz tmp2, #0, 1f
- ldrb tmp1w, [src], #1
- strb tmp1w, [dst], #1
-1:
- tbz tmp2, #1, 2f
- ldrh tmp1w, [src], #2
- strh tmp1w, [dst], #2
-2:
- tbz tmp2, #2, 3f
- ldr tmp1w, [src], #4
- str tmp1w, [dst], #4
-3:
- tbz tmp2, #3, .LSrcAligned
- ldr tmp1, [src],#8
- str tmp1, [dst],#8
+ .macro strh1 label, ptr, regB, val
+ strh \ptr, [\regB], \val
+ .endm
-.LSrcAligned:
- cmp count, #64
- b.ge .Lcpy_over64
- /*
- * Deal with small copies quickly by dropping straight into the
- * exit block.
- */
-.Ltail63:
- /*
- * Copy up to 48 bytes of data. At this point we only need the
- * bottom 6 bits of count to be accurate.
- */
- ands tmp1, count, #0x30
- b.eq .Ltiny15
- cmp tmp1w, #0x20
- b.eq 1f
- b.lt 2f
- ldp A_l, A_h, [src], #16
- stp A_l, A_h, [dst], #16
-1:
- ldp A_l, A_h, [src], #16
- stp A_l, A_h, [dst], #16
-2:
- ldp A_l, A_h, [src], #16
- stp A_l, A_h, [dst], #16
-.Ltiny15:
- /*
- * Prefer to break one ldp/stp into several load/store to access
- * memory in an increasing address order,rather than to load/store 16
- * bytes from (src-16) to (dst-16) and to backward the src to aligned
- * address,which way is used in original cortex memcpy. If keeping
- * the original memcpy process here, memmove need to satisfy the
- * precondition that src address is at least 16 bytes bigger than dst
- * address,otherwise some source data will be overwritten when memove
- * call memcpy directly. To make memmove simpler and decouple the
- * memcpy's dependency on memmove, withdrew the original process.
- */
- tbz count, #3, 1f
- ldr tmp1, [src], #8
- str tmp1, [dst], #8
-1:
- tbz count, #2, 2f
- ldr tmp1w, [src], #4
- str tmp1w, [dst], #4
-2:
- tbz count, #1, 3f
- ldrh tmp1w, [src], #2
- strh tmp1w, [dst], #2
-3:
- tbz count, #0, .Lexitfunc
- ldrb tmp1w, [src]
- strb tmp1w, [dst]
+ .macro ldr1 label, ptr, regB, val
+ ldr \ptr, [\regB], \val
+ .endm
-.Lexitfunc:
- ret
+ .macro str1 label, ptr, regB, val
+ str \ptr, [\regB], \val
+ .endm
-.Lcpy_over64:
- subs count, count, #128
- b.ge .Lcpy_body_large
- /*
- * Less than 128 bytes to copy, so handle 64 here and then jump
- * to the tail.
- */
- ldp A_l, A_h, [src],#16
- stp A_l, A_h, [dst],#16
- ldp B_l, B_h, [src],#16
- ldp C_l, C_h, [src],#16
- stp B_l, B_h, [dst],#16
- stp C_l, C_h, [dst],#16
- ldp D_l, D_h, [src],#16
- stp D_l, D_h, [dst],#16
+ .macro ldp1 label, ptr, regB, regC, val
+ ldp \ptr, \regB, [\regC], \val
+ .endm
- tst count, #0x3f
- b.ne .Ltail63
- ret
+ .macro stp1 label, ptr, regB, regC, val
+ stp \ptr, \regB, [\regC], \val
+ .endm
- /*
- * Critical loop. Start at a new cache line boundary. Assuming
- * 64 bytes per line this ensures the entire loop is in one line.
- */
- .p2align L1_CACHE_SHIFT
-.Lcpy_body_large:
- /* pre-get 64 bytes data. */
- ldp A_l, A_h, [src],#16
- ldp B_l, B_h, [src],#16
- ldp C_l, C_h, [src],#16
- ldp D_l, D_h, [src],#16
-1:
- /*
- * interlace the load of next 64 bytes data block with store of the last
- * loaded 64 bytes data.
- */
- stp A_l, A_h, [dst],#16
- ldp A_l, A_h, [src],#16
- stp B_l, B_h, [dst],#16
- ldp B_l, B_h, [src],#16
- stp C_l, C_h, [dst],#16
- ldp C_l, C_h, [src],#16
- stp D_l, D_h, [dst],#16
- ldp D_l, D_h, [src],#16
- subs count, count, #64
- b.ge 1b
- stp A_l, A_h, [dst],#16
- stp B_l, B_h, [dst],#16
- stp C_l, C_h, [dst],#16
- stp D_l, D_h, [dst],#16
-
- tst count, #0x3f
- b.ne .Ltail63
+ENTRY(memcpy)
+#include "copy_template.S"
ret
ENDPROC(memcpy)
--
1.9.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH V4 2/2] arm64: Change memcpy in kernel to use the copy template file
2015-08-21 22:01 ` [PATCH V4 2/2] arm64: Change memcpy in kernel to use the copy template file Feng Kan
@ 2015-09-07 11:13 ` Catalin Marinas
0 siblings, 0 replies; 5+ messages in thread
From: Catalin Marinas @ 2015-09-07 11:13 UTC (permalink / raw)
To: Feng Kan
Cc: patches, linux-arm-kernel, linux-kernel, philipp.tomsich,
dann.frazier, tim.gardner, craig.magina, soni.trilok.oss
On Fri, Aug 21, 2015 at 03:01:41PM -0700, Feng Kan wrote:
> This converts the memcpy.S to use the copy template file. The copy
> template file was based originally on the memcpy.S. Minor changes
> was made to it to accommodate the copy to/from/in user files.
I think it will be easier to follow if you make this patch the first,
just a simple move of code without any functional change.
>
> Signed-off-by: Feng Kan <fkan@apm.com>
> ---
> arch/arm64/lib/memcpy.S | 179 +++++++-----------------------------------------
> 1 file changed, 26 insertions(+), 153 deletions(-)
>
> diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
> index 8a9a96d..761acd7 100644
> --- a/arch/arm64/lib/memcpy.S
> +++ b/arch/arm64/lib/memcpy.S
[...]
> + .macro strh1 label, ptr, regB, val
> + strh \ptr, [\regB], \val
> + .endm
There is a problem with the tab/space conversion, either in your editor
or the email server. The above indentation should use tabs (and the
subsequent ones).
--
Catalin
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH V4 1/2] arm64: copy_to-from-in_user optimization using copy template
2015-08-21 22:01 ` [PATCH V4 1/2] arm64: copy_to-from-in_user optimization using copy template Feng Kan
@ 2015-09-07 16:54 ` Catalin Marinas
0 siblings, 0 replies; 5+ messages in thread
From: Catalin Marinas @ 2015-09-07 16:54 UTC (permalink / raw)
To: Feng Kan
Cc: patches, linux-arm-kernel, linux-kernel, philipp.tomsich,
dann.frazier, tim.gardner, craig.magina, soni.trilok.oss,
Balamurugan Shanmugam
On Fri, Aug 21, 2015 at 03:01:33PM -0700, Feng Kan wrote:
> diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
> index 1be9ef2..cb085cf 100644
> --- a/arch/arm64/lib/copy_from_user.S
> +++ b/arch/arm64/lib/copy_from_user.S
> @@ -18,6 +18,7 @@
>
> #include <asm/alternative.h>
> #include <asm/assembler.h>
> +#include <asm/cache.h>
> #include <asm/cpufeature.h>
> #include <asm/sysreg.h>
>
> @@ -31,49 +32,58 @@
> * Returns:
> * x0 - bytes not copied
> */
> +
> + .macro ldrb1 label, ptr, regB, val
> + USER(\label, ldrb \ptr, [\regB], \val)
> + .endm
> +
> + .macro strb1 label, ptr, regB, val
> + strb \ptr, [\regB], \val
> + .endm
> +
> + .macro ldrh1 label, ptr, regB, val
> + USER(\label, ldrh \ptr, [\regB], \val)
> + .endm
> +
> + .macro strh1 label, ptr, regB, val
> + strh \ptr, [\regB], \val
> + .endm
> +
> + .macro ldr1 label, ptr, regB, val
> + USER(\label, ldr \ptr, [\regB], \val)
> + .endm
> +
> + .macro str1 label, ptr, regB, val
> + str \ptr, [\regB], \val
> + .endm
> +
> + .macro ldp1 label, ptr, regB, regC, val
> + USER(\label, ldp \ptr, \regB, [\regC], \val)
> + .endm
> +
> + .macro stp1 label, ptr, regB, regC, val
> + stp \ptr, \regB, [\regC], \val
> + .endm
Since it's only the user access functions caring about the abort label
and this label is always the same (11f as I can see), we can ignore it
completely in copy_template.S. Just use a large label above, something
like:
.macro ldp1 ptr, regB, regC, val
USER(9998f, ldp \ptr, \regB, [\regC], \val)
.endm
.macro stp1 ptr, regB, regC, val
stp \ptr, \regB, [\regC], \val
.endm
> ENTRY(__copy_from_user)
> ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
> CONFIG_ARM64_PAN)
> - add x5, x1, x2 // upper user buffer boundary
> - subs x2, x2, #16
> - b.mi 1f
> -0:
> -USER(9f, ldp x3, x4, [x1], #16)
> - subs x2, x2, #16
> - stp x3, x4, [x0], #16
> - b.pl 0b
> -1: adds x2, x2, #8
> - b.mi 2f
> -USER(9f, ldr x3, [x1], #8 )
> - sub x2, x2, #8
> - str x3, [x0], #8
> -2: adds x2, x2, #4
> - b.mi 3f
> -USER(9f, ldr w3, [x1], #4 )
> - sub x2, x2, #4
> - str w3, [x0], #4
> -3: adds x2, x2, #2
> - b.mi 4f
> -USER(9f, ldrh w3, [x1], #2 )
> - sub x2, x2, #2
> - strh w3, [x0], #2
> -4: adds x2, x2, #1
> - b.mi 5f
> -USER(9f, ldrb w3, [x1] )
> - strb w3, [x0]
> -5: mov x0, #0
> +#include "copy_template.S"
> ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
> CONFIG_ARM64_PAN)
> + mov x0, #0 // Nothing to copy
> ret
> ENDPROC(__copy_from_user)
>
> .section .fixup,"ax"
> .align 2
> -9: sub x2, x5, x1
> - mov x3, x2
> -10: strb wzr, [x0], #1 // zero remaining buffer space
> - subs x3, x3, #1
> - b.ne 10b
> - mov x0, x2 // bytes not copied
> +11:
> + sub x4, tmp3, dst
> + mov x0, x4
> + sub dst, tmp3, x4
Here you would have the 9998: label
> +
> +20: strb wzr, [dst], #1 // zero remaining buffer space
> + subs x4, x4, #1
> + b.ne 20b
and 9999 here.
BTW, you should use tmp1 instead of x4 to avoid clashes in case we
rename the register aliases. And you can probably write this with some
fewer instructions:
9998:
sub x0, tmp3, dst
9999:
strb wzr, [dst], #1 // zero remaining buffer space
cmp dst, tmp3
b.lo 9999b
> ENTRY(__copy_in_user)
> ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
> CONFIG_ARM64_PAN)
> - add x5, x0, x2 // upper user buffer boundary
> - subs x2, x2, #16
> - b.mi 1f
> -0:
> -USER(9f, ldp x3, x4, [x1], #16)
> - subs x2, x2, #16
> -USER(9f, stp x3, x4, [x0], #16)
> - b.pl 0b
> -1: adds x2, x2, #8
> - b.mi 2f
> -USER(9f, ldr x3, [x1], #8 )
> - sub x2, x2, #8
> -USER(9f, str x3, [x0], #8 )
> -2: adds x2, x2, #4
> - b.mi 3f
> -USER(9f, ldr w3, [x1], #4 )
> - sub x2, x2, #4
> -USER(9f, str w3, [x0], #4 )
> -3: adds x2, x2, #2
> - b.mi 4f
> -USER(9f, ldrh w3, [x1], #2 )
> - sub x2, x2, #2
> -USER(9f, strh w3, [x0], #2 )
> -4: adds x2, x2, #1
> - b.mi 5f
> -USER(9f, ldrb w3, [x1] )
> -USER(9f, strb w3, [x0] )
> -5: mov x0, #0
> +#include "copy_template.S"
> ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
> CONFIG_ARM64_PAN)
> + mov x0, #0
> ret
> ENDPROC(__copy_in_user)
>
> .section .fixup,"ax"
> .align 2
> -9: sub x0, x5, x0 // bytes not copied
> +11: sub tmp3, tmp3, dst // bytes not copied
> + mov x0, tmp3
Why not "sub x0, tmp3, dst" directly?
> diff --git a/arch/arm64/lib/copy_template.S b/arch/arm64/lib/copy_template.S
> new file mode 100644
> index 0000000..c9ece2f
> --- /dev/null
> +++ b/arch/arm64/lib/copy_template.S
> @@ -0,0 +1,196 @@
[...]
> +/*
> + * Copy a buffer from src to dest (alignment handled by the hardware)
> + *
> + * Parameters:
> + * x0 - dest
> + * x1 - src
> + * x2 - n
> + * Returns:
> + * x0 - dest
> + */
> +dstin .req x0
> +src .req x1
> +count .req x2
> +tmp1 .req x3
> +tmp1w .req w3
> +tmp2 .req x4
> +tmp2w .req w4
> +tmp3 .req x5
> +tmp3w .req w5
> +dst .req x6
> +
> +A_l .req x7
> +A_h .req x8
> +B_l .req x9
> +B_h .req x10
> +C_l .req x11
> +C_h .req x12
> +D_l .req x13
> +D_h .req x14
> +
> + mov dst, dstin
> + add tmp3, dst, count
You could keep this in in the copy_from_user.S etc. files to avoid
another addition for memcpy where it's not needed. And you could keep
the .req in there as well (together with the "end" alias):
end .req x5 // same as tmp3
ENTRY(__copy_from_user)
...
add end, x0, x2
#include "copy_template.S"
...
> ENTRY(__copy_to_user)
> ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
> CONFIG_ARM64_PAN)
> - add x5, x0, x2 // upper user buffer boundary
> - subs x2, x2, #16
> - b.mi 1f
> -0:
> - ldp x3, x4, [x1], #16
> - subs x2, x2, #16
> -USER(9f, stp x3, x4, [x0], #16)
> - b.pl 0b
> -1: adds x2, x2, #8
> - b.mi 2f
> - ldr x3, [x1], #8
> - sub x2, x2, #8
> -USER(9f, str x3, [x0], #8 )
> -2: adds x2, x2, #4
> - b.mi 3f
> - ldr w3, [x1], #4
> - sub x2, x2, #4
> -USER(9f, str w3, [x0], #4 )
> -3: adds x2, x2, #2
> - b.mi 4f
> - ldrh w3, [x1], #2
> - sub x2, x2, #2
> -USER(9f, strh w3, [x0], #2 )
> -4: adds x2, x2, #1
> - b.mi 5f
> - ldrb w3, [x1]
> -USER(9f, strb w3, [x0] )
> -5: mov x0, #0
> +#include "copy_template.S"
> ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
> CONFIG_ARM64_PAN)
> + mov x0, #0
> ret
> ENDPROC(__copy_to_user)
>
> .section .fixup,"ax"
> .align 2
> -9: sub x0, x5, x0 // bytes not copied
> +11: sub tmp3, tmp3, dst // bytes not copied
> + mov x0, tmp3
Same here, "sub x0, end, dst" directly.
--
Catalin
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2015-09-07 16:54 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-08-21 22:00 [PATCH V4 0/2] arm64: copy to/in/from user optimization Feng Kan
2015-08-21 22:01 ` [PATCH V4 1/2] arm64: copy_to-from-in_user optimization using copy template Feng Kan
2015-09-07 16:54 ` Catalin Marinas
2015-08-21 22:01 ` [PATCH V4 2/2] arm64: Change memcpy in kernel to use the copy template file Feng Kan
2015-09-07 11:13 ` Catalin Marinas
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).