All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch
@ 2012-05-31  6:22 Anton Blanchard
  2012-12-07 23:20 ` Jimi Xenidis
  0 siblings, 1 reply; 10+ messages in thread
From: Anton Blanchard @ 2012-05-31  6:22 UTC (permalink / raw)
  To: benh, paulus, michael; +Cc: linuxppc-dev


Implement a POWER7 optimised memcpy using VMX and enhanced prefetch
instructions.

This is a copy of the POWER7 optimised copy_to_user/copy_from_user
loop. Detailed implementation and performance details can be found in
commit a66086b8197d (powerpc: POWER7 optimised
copy_to_user/copy_from_user using VMX).

I noticed memcpy issues when profiling a RAID6 workload:

	.memcpy
	.async_memcpy
	.async_copy_data
	.__raid_run_ops
	.handle_stripe
	.raid5d
	.md_thread

I created a simplified testcase by building a RAID6 array with 4 1GB
ramdisks (booting with brd.rd_size=1048576):

# mdadm -CR -e 1.2 /dev/md0 --level=6 -n4 /dev/ram[0-3]

I then timed how long it took to write to the entire array:

# dd if=/dev/zero of=/dev/md0 bs=1M

Before: 892 MB/s
After:  999 MB/s

A 12% improvement.

Signed-off-by: Anton Blanchard <anton@samba.org>
---

Index: linux-build/arch/powerpc/lib/Makefile
===================================================================
--- linux-build.orig/arch/powerpc/lib/Makefile	2012-05-30 15:27:30.000000000 +1000
+++ linux-build/arch/powerpc/lib/Makefile	2012-05-31 09:12:27.574372864 +1000
@@ -17,7 +17,8 @@ obj-$(CONFIG_HAS_IOMEM)	+= devres.o
 obj-$(CONFIG_PPC64)	+= copypage_64.o copyuser_64.o \
 			   memcpy_64.o usercopy_64.o mem_64.o string.o \
 			   checksum_wrappers_64.o hweight_64.o \
-			   copyuser_power7.o string_64.o copypage_power7.o
+			   copyuser_power7.o string_64.o copypage_power7.o \
+			   memcpy_power7.o
 obj-$(CONFIG_XMON)	+= sstep.o ldstfp.o
 obj-$(CONFIG_KPROBES)	+= sstep.o ldstfp.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= sstep.o ldstfp.o
Index: linux-build/arch/powerpc/lib/memcpy_64.S
===================================================================
--- linux-build.orig/arch/powerpc/lib/memcpy_64.S	2012-05-30 09:39:59.000000000 +1000
+++ linux-build/arch/powerpc/lib/memcpy_64.S	2012-05-31 09:12:00.093876936 +1000
@@ -11,7 +11,11 @@
 
 	.align	7
 _GLOBAL(memcpy)
+BEGIN_FTR_SECTION
 	std	r3,48(r1)	/* save destination pointer for return value */
+FTR_SECTION_ELSE
+	b	memcpy_power7
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
 	PPC_MTOCRF(0x01,r5)
 	cmpldi	cr1,r5,16
 	neg	r6,r3		# LS 3 bits = # bytes to 8-byte dest bdry
Index: linux-build/arch/powerpc/lib/memcpy_power7.S
===================================================================
--- /dev/null	1970-01-01 00:00:00.000000000 +0000
+++ linux-build/arch/powerpc/lib/memcpy_power7.S	2012-05-31 15:28:03.495781127 +1000
@@ -0,0 +1,650 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/ppc_asm.h>
+
+#define STACKFRAMESIZE	256
+#define STK_REG(i)	(112 + ((i)-14)*8)
+
+_GLOBAL(memcpy_power7)
+#ifdef CONFIG_ALTIVEC
+	cmpldi	r5,16
+	cmpldi	cr1,r5,4096
+
+	std	r3,48(r1)
+
+	blt	.Lshort_copy
+	bgt	cr1,.Lvmx_copy
+#else
+	cmpldi	r5,16
+
+	std	r3,48(r1)
+
+	blt	.Lshort_copy
+#endif
+
+.Lnonvmx_copy:
+	/* Get the source 8B aligned */
+	neg	r6,r4
+	mtocrf	0x01,r6
+	clrldi	r6,r6,(64-3)
+
+	bf	cr7*4+3,1f
+	lbz	r0,0(r4)
+	addi	r4,r4,1
+	stb	r0,0(r3)
+	addi	r3,r3,1
+
+1:	bf	cr7*4+2,2f
+	lhz	r0,0(r4)
+	addi	r4,r4,2
+	sth	r0,0(r3)
+	addi	r3,r3,2
+
+2:	bf	cr7*4+1,3f
+	lwz	r0,0(r4)
+	addi	r4,r4,4
+	stw	r0,0(r3)
+	addi	r3,r3,4
+
+3:	sub	r5,r5,r6
+	cmpldi	r5,128
+	blt	5f
+
+	mflr	r0
+	stdu	r1,-STACKFRAMESIZE(r1)
+	std	r14,STK_REG(r14)(r1)
+	std	r15,STK_REG(r15)(r1)
+	std	r16,STK_REG(r16)(r1)
+	std	r17,STK_REG(r17)(r1)
+	std	r18,STK_REG(r18)(r1)
+	std	r19,STK_REG(r19)(r1)
+	std	r20,STK_REG(r20)(r1)
+	std	r21,STK_REG(r21)(r1)
+	std	r22,STK_REG(r22)(r1)
+	std	r0,STACKFRAMESIZE+16(r1)
+
+	srdi	r6,r5,7
+	mtctr	r6
+
+	/* Now do cacheline (128B) sized loads and stores. */
+	.align	5
+4:
+	ld	r0,0(r4)
+	ld	r6,8(r4)
+	ld	r7,16(r4)
+	ld	r8,24(r4)
+	ld	r9,32(r4)
+	ld	r10,40(r4)
+	ld	r11,48(r4)
+	ld	r12,56(r4)
+	ld	r14,64(r4)
+	ld	r15,72(r4)
+	ld	r16,80(r4)
+	ld	r17,88(r4)
+	ld	r18,96(r4)
+	ld	r19,104(r4)
+	ld	r20,112(r4)
+	ld	r21,120(r4)
+	addi	r4,r4,128
+	std	r0,0(r3)
+	std	r6,8(r3)
+	std	r7,16(r3)
+	std	r8,24(r3)
+	std	r9,32(r3)
+	std	r10,40(r3)
+	std	r11,48(r3)
+	std	r12,56(r3)
+	std	r14,64(r3)
+	std	r15,72(r3)
+	std	r16,80(r3)
+	std	r17,88(r3)
+	std	r18,96(r3)
+	std	r19,104(r3)
+	std	r20,112(r3)
+	std	r21,120(r3)
+	addi	r3,r3,128
+	bdnz	4b
+
+	clrldi	r5,r5,(64-7)
+
+	ld	r14,STK_REG(r14)(r1)
+	ld	r15,STK_REG(r15)(r1)
+	ld	r16,STK_REG(r16)(r1)
+	ld	r17,STK_REG(r17)(r1)
+	ld	r18,STK_REG(r18)(r1)
+	ld	r19,STK_REG(r19)(r1)
+	ld	r20,STK_REG(r20)(r1)
+	ld	r21,STK_REG(r21)(r1)
+	ld	r22,STK_REG(r22)(r1)
+	addi	r1,r1,STACKFRAMESIZE
+
+	/* Up to 127B to go */
+5:	srdi	r6,r5,4
+	mtocrf	0x01,r6
+
+6:	bf	cr7*4+1,7f
+	ld	r0,0(r4)
+	ld	r6,8(r4)
+	ld	r7,16(r4)
+	ld	r8,24(r4)
+	ld	r9,32(r4)
+	ld	r10,40(r4)
+	ld	r11,48(r4)
+	ld	r12,56(r4)
+	addi	r4,r4,64
+	std	r0,0(r3)
+	std	r6,8(r3)
+	std	r7,16(r3)
+	std	r8,24(r3)
+	std	r9,32(r3)
+	std	r10,40(r3)
+	std	r11,48(r3)
+	std	r12,56(r3)
+	addi	r3,r3,64
+
+	/* Up to 63B to go */
+7:	bf	cr7*4+2,8f
+	ld	r0,0(r4)
+	ld	r6,8(r4)
+	ld	r7,16(r4)
+	ld	r8,24(r4)
+	addi	r4,r4,32
+	std	r0,0(r3)
+	std	r6,8(r3)
+	std	r7,16(r3)
+	std	r8,24(r3)
+	addi	r3,r3,32
+
+	/* Up to 31B to go */
+8:	bf	cr7*4+3,9f
+	ld	r0,0(r4)
+	ld	r6,8(r4)
+	addi	r4,r4,16
+	std	r0,0(r3)
+	std	r6,8(r3)
+	addi	r3,r3,16
+
+9:	clrldi	r5,r5,(64-4)
+
+	/* Up to 15B to go */
+.Lshort_copy:
+	mtocrf	0x01,r5
+	bf	cr7*4+0,12f
+	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
+	lwz	r6,4(r4)
+	addi	r4,r4,8
+	stw	r0,0(r3)
+	stw	r6,4(r3)
+	addi	r3,r3,8
+
+12:	bf	cr7*4+1,13f
+	lwz	r0,0(r4)
+	addi	r4,r4,4
+	stw	r0,0(r3)
+	addi	r3,r3,4
+
+13:	bf	cr7*4+2,14f
+	lhz	r0,0(r4)
+	addi	r4,r4,2
+	sth	r0,0(r3)
+	addi	r3,r3,2
+
+14:	bf	cr7*4+3,15f
+	lbz	r0,0(r4)
+	stb	r0,0(r3)
+
+15:	ld	r3,48(r1)
+	blr
+
+.Lunwind_stack_nonvmx_copy:
+	addi	r1,r1,STACKFRAMESIZE
+	b	.Lnonvmx_copy
+
+#ifdef CONFIG_ALTIVEC
+.Lvmx_copy:
+	mflr	r0
+	std	r4,56(r1)
+	std	r5,64(r1)
+	std	r0,16(r1)
+	stdu	r1,-STACKFRAMESIZE(r1)
+	bl	.enter_vmx_copy
+	cmpwi	r3,0
+	ld	r0,STACKFRAMESIZE+16(r1)
+	ld	r3,STACKFRAMESIZE+48(r1)
+	ld	r4,STACKFRAMESIZE+56(r1)
+	ld	r5,STACKFRAMESIZE+64(r1)
+	mtlr	r0
+
+	/*
+	 * We prefetch both the source and destination using enhanced touch
+	 * instructions. We use a stream ID of 0 for the load side and
+	 * 1 for the store side.
+	 */
+	clrrdi	r6,r4,7
+	clrrdi	r9,r3,7
+	ori	r9,r9,1		/* stream=1 */
+
+	srdi	r7,r5,7		/* length in cachelines, capped at 0x3FF */
+	cmpldi	cr1,r7,0x3FF
+	ble	cr1,1f
+	li	r7,0x3FF
+1:	lis	r0,0x0E00	/* depth=7 */
+	sldi	r7,r7,7
+	or	r7,r7,r0
+	ori	r10,r7,1	/* stream=1 */
+
+	lis	r8,0x8000	/* GO=1 */
+	clrldi	r8,r8,32
+
+.machine push
+.machine "power4"
+	dcbt	r0,r6,0b01000
+	dcbt	r0,r7,0b01010
+	dcbtst	r0,r9,0b01000
+	dcbtst	r0,r10,0b01010
+	eieio
+	dcbt	r0,r8,0b01010	/* GO */
+.machine pop
+
+	beq	.Lunwind_stack_nonvmx_copy
+
+	/*
+	 * If source and destination are not relatively aligned we use a
+	 * slower permute loop.
+	 */
+	xor	r6,r4,r3
+	rldicl.	r6,r6,0,(64-4)
+	bne	.Lvmx_unaligned_copy
+
+	/* Get the destination 16B aligned */
+	neg	r6,r3
+	mtocrf	0x01,r6
+	clrldi	r6,r6,(64-4)
+
+	bf	cr7*4+3,1f
+	lbz	r0,0(r4)
+	addi	r4,r4,1
+	stb	r0,0(r3)
+	addi	r3,r3,1
+
+1:	bf	cr7*4+2,2f
+	lhz	r0,0(r4)
+	addi	r4,r4,2
+	sth	r0,0(r3)
+	addi	r3,r3,2
+
+2:	bf	cr7*4+1,3f
+	lwz	r0,0(r4)
+	addi	r4,r4,4
+	stw	r0,0(r3)
+	addi	r3,r3,4
+
+3:	bf	cr7*4+0,4f
+	ld	r0,0(r4)
+	addi	r4,r4,8
+	std	r0,0(r3)
+	addi	r3,r3,8
+
+4:	sub	r5,r5,r6
+
+	/* Get the desination 128B aligned */
+	neg	r6,r3
+	srdi	r7,r6,4
+	mtocrf	0x01,r7
+	clrldi	r6,r6,(64-7)
+
+	li	r9,16
+	li	r10,32
+	li	r11,48
+
+	bf	cr7*4+3,5f
+	lvx	vr1,r0,r4
+	addi	r4,r4,16
+	stvx	vr1,r0,r3
+	addi	r3,r3,16
+
+5:	bf	cr7*4+2,6f
+	lvx	vr1,r0,r4
+	lvx	vr0,r4,r9
+	addi	r4,r4,32
+	stvx	vr1,r0,r3
+	stvx	vr0,r3,r9
+	addi	r3,r3,32
+
+6:	bf	cr7*4+1,7f
+	lvx	vr3,r0,r4
+	lvx	vr2,r4,r9
+	lvx	vr1,r4,r10
+	lvx	vr0,r4,r11
+	addi	r4,r4,64
+	stvx	vr3,r0,r3
+	stvx	vr2,r3,r9
+	stvx	vr1,r3,r10
+	stvx	vr0,r3,r11
+	addi	r3,r3,64
+
+7:	sub	r5,r5,r6
+	srdi	r6,r5,7
+
+	std	r14,STK_REG(r14)(r1)
+	std	r15,STK_REG(r15)(r1)
+	std	r16,STK_REG(r16)(r1)
+
+	li	r12,64
+	li	r14,80
+	li	r15,96
+	li	r16,112
+
+	mtctr	r6
+
+	/*
+	 * Now do cacheline sized loads and stores. By this stage the
+	 * cacheline stores are also cacheline aligned.
+	 */
+	.align	5
+8:
+	lvx	vr7,r0,r4
+	lvx	vr6,r4,r9
+	lvx	vr5,r4,r10
+	lvx	vr4,r4,r11
+	lvx	vr3,r4,r12
+	lvx	vr2,r4,r14
+	lvx	vr1,r4,r15
+	lvx	vr0,r4,r16
+	addi	r4,r4,128
+	stvx	vr7,r0,r3
+	stvx	vr6,r3,r9
+	stvx	vr5,r3,r10
+	stvx	vr4,r3,r11
+	stvx	vr3,r3,r12
+	stvx	vr2,r3,r14
+	stvx	vr1,r3,r15
+	stvx	vr0,r3,r16
+	addi	r3,r3,128
+	bdnz	8b
+
+	ld	r14,STK_REG(r14)(r1)
+	ld	r15,STK_REG(r15)(r1)
+	ld	r16,STK_REG(r16)(r1)
+
+	/* Up to 127B to go */
+	clrldi	r5,r5,(64-7)
+	srdi	r6,r5,4
+	mtocrf	0x01,r6
+
+	bf	cr7*4+1,9f
+	lvx	vr3,r0,r4
+	lvx	vr2,r4,r9
+	lvx	vr1,r4,r10
+	lvx	vr0,r4,r11
+	addi	r4,r4,64
+	stvx	vr3,r0,r3
+	stvx	vr2,r3,r9
+	stvx	vr1,r3,r10
+	stvx	vr0,r3,r11
+	addi	r3,r3,64
+
+9:	bf	cr7*4+2,10f
+	lvx	vr1,r0,r4
+	lvx	vr0,r4,r9
+	addi	r4,r4,32
+	stvx	vr1,r0,r3
+	stvx	vr0,r3,r9
+	addi	r3,r3,32
+
+10:	bf	cr7*4+3,11f
+	lvx	vr1,r0,r4
+	addi	r4,r4,16
+	stvx	vr1,r0,r3
+	addi	r3,r3,16
+
+	/* Up to 15B to go */
+11:	clrldi	r5,r5,(64-4)
+	mtocrf	0x01,r5
+	bf	cr7*4+0,12f
+	ld	r0,0(r4)
+	addi	r4,r4,8
+	std	r0,0(r3)
+	addi	r3,r3,8
+
+12:	bf	cr7*4+1,13f
+	lwz	r0,0(r4)
+	addi	r4,r4,4
+	stw	r0,0(r3)
+	addi	r3,r3,4
+
+13:	bf	cr7*4+2,14f
+	lhz	r0,0(r4)
+	addi	r4,r4,2
+	sth	r0,0(r3)
+	addi	r3,r3,2
+
+14:	bf	cr7*4+3,15f
+	lbz	r0,0(r4)
+	stb	r0,0(r3)
+
+15:	addi	r1,r1,STACKFRAMESIZE
+	ld	r3,48(r1)
+	b	.exit_vmx_copy		/* tail call optimise */
+
+.Lvmx_unaligned_copy:
+	/* Get the destination 16B aligned */
+	neg	r6,r3
+	mtocrf	0x01,r6
+	clrldi	r6,r6,(64-4)
+
+	bf	cr7*4+3,1f
+	lbz	r0,0(r4)
+	addi	r4,r4,1
+	stb	r0,0(r3)
+	addi	r3,r3,1
+
+1:	bf	cr7*4+2,2f
+	lhz	r0,0(r4)
+	addi	r4,r4,2
+	sth	r0,0(r3)
+	addi	r3,r3,2
+
+2:	bf	cr7*4+1,3f
+	lwz	r0,0(r4)
+	addi	r4,r4,4
+	stw	r0,0(r3)
+	addi	r3,r3,4
+
+3:	bf	cr7*4+0,4f
+	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
+	lwz	r7,4(r4)
+	addi	r4,r4,8
+	stw	r0,0(r3)
+	stw	r7,4(r3)
+	addi	r3,r3,8
+
+4:	sub	r5,r5,r6
+
+	/* Get the desination 128B aligned */
+	neg	r6,r3
+	srdi	r7,r6,4
+	mtocrf	0x01,r7
+	clrldi	r6,r6,(64-7)
+
+	li	r9,16
+	li	r10,32
+	li	r11,48
+
+	lvsl	vr16,0,r4	/* Setup permute control vector */
+	lvx	vr0,0,r4
+	addi	r4,r4,16
+
+	bf	cr7*4+3,5f
+	lvx	vr1,r0,r4
+	vperm	vr8,vr0,vr1,vr16
+	addi	r4,r4,16
+	stvx	vr8,r0,r3
+	addi	r3,r3,16
+	vor	vr0,vr1,vr1
+
+5:	bf	cr7*4+2,6f
+	lvx	vr1,r0,r4
+	vperm	vr8,vr0,vr1,vr16
+	lvx	vr0,r4,r9
+	vperm	vr9,vr1,vr0,vr16
+	addi	r4,r4,32
+	stvx	vr8,r0,r3
+	stvx	vr9,r3,r9
+	addi	r3,r3,32
+
+6:	bf	cr7*4+1,7f
+	lvx	vr3,r0,r4
+	vperm	vr8,vr0,vr3,vr16
+	lvx	vr2,r4,r9
+	vperm	vr9,vr3,vr2,vr16
+	lvx	vr1,r4,r10
+	vperm	vr10,vr2,vr1,vr16
+	lvx	vr0,r4,r11
+	vperm	vr11,vr1,vr0,vr16
+	addi	r4,r4,64
+	stvx	vr8,r0,r3
+	stvx	vr9,r3,r9
+	stvx	vr10,r3,r10
+	stvx	vr11,r3,r11
+	addi	r3,r3,64
+
+7:	sub	r5,r5,r6
+	srdi	r6,r5,7
+
+	std	r14,STK_REG(r14)(r1)
+	std	r15,STK_REG(r15)(r1)
+	std	r16,STK_REG(r16)(r1)
+
+	li	r12,64
+	li	r14,80
+	li	r15,96
+	li	r16,112
+
+	mtctr	r6
+
+	/*
+	 * Now do cacheline sized loads and stores. By this stage the
+	 * cacheline stores are also cacheline aligned.
+	 */
+	.align	5
+8:
+	lvx	vr7,r0,r4
+	vperm	vr8,vr0,vr7,vr16
+	lvx	vr6,r4,r9
+	vperm	vr9,vr7,vr6,vr16
+	lvx	vr5,r4,r10
+	vperm	vr10,vr6,vr5,vr16
+	lvx	vr4,r4,r11
+	vperm	vr11,vr5,vr4,vr16
+	lvx	vr3,r4,r12
+	vperm	vr12,vr4,vr3,vr16
+	lvx	vr2,r4,r14
+	vperm	vr13,vr3,vr2,vr16
+	lvx	vr1,r4,r15
+	vperm	vr14,vr2,vr1,vr16
+	lvx	vr0,r4,r16
+	vperm	vr15,vr1,vr0,vr16
+	addi	r4,r4,128
+	stvx	vr8,r0,r3
+	stvx	vr9,r3,r9
+	stvx	vr10,r3,r10
+	stvx	vr11,r3,r11
+	stvx	vr12,r3,r12
+	stvx	vr13,r3,r14
+	stvx	vr14,r3,r15
+	stvx	vr15,r3,r16
+	addi	r3,r3,128
+	bdnz	8b
+
+	ld	r14,STK_REG(r14)(r1)
+	ld	r15,STK_REG(r15)(r1)
+	ld	r16,STK_REG(r16)(r1)
+
+	/* Up to 127B to go */
+	clrldi	r5,r5,(64-7)
+	srdi	r6,r5,4
+	mtocrf	0x01,r6
+
+	bf	cr7*4+1,9f
+	lvx	vr3,r0,r4
+	vperm	vr8,vr0,vr3,vr16
+	lvx	vr2,r4,r9
+	vperm	vr9,vr3,vr2,vr16
+	lvx	vr1,r4,r10
+	vperm	vr10,vr2,vr1,vr16
+	lvx	vr0,r4,r11
+	vperm	vr11,vr1,vr0,vr16
+	addi	r4,r4,64
+	stvx	vr8,r0,r3
+	stvx	vr9,r3,r9
+	stvx	vr10,r3,r10
+	stvx	vr11,r3,r11
+	addi	r3,r3,64
+
+9:	bf	cr7*4+2,10f
+	lvx	vr1,r0,r4
+	vperm	vr8,vr0,vr1,vr16
+	lvx	vr0,r4,r9
+	vperm	vr9,vr1,vr0,vr16
+	addi	r4,r4,32
+	stvx	vr8,r0,r3
+	stvx	vr9,r3,r9
+	addi	r3,r3,32
+
+10:	bf	cr7*4+3,11f
+	lvx	vr1,r0,r4
+	vperm	vr8,vr0,vr1,vr16
+	addi	r4,r4,16
+	stvx	vr8,r0,r3
+	addi	r3,r3,16
+
+	/* Up to 15B to go */
+11:	clrldi	r5,r5,(64-4)
+	addi	r4,r4,-16	/* Unwind the +16 load offset */
+	mtocrf	0x01,r5
+	bf	cr7*4+0,12f
+	lwz	r0,0(r4)	/* Less chance of a reject with word ops */
+	lwz	r6,4(r4)
+	addi	r4,r4,8
+	stw	r0,0(r3)
+	stw	r6,4(r3)
+	addi	r3,r3,8
+
+12:	bf	cr7*4+1,13f
+	lwz	r0,0(r4)
+	addi	r4,r4,4
+	stw	r0,0(r3)
+	addi	r3,r3,4
+
+13:	bf	cr7*4+2,14f
+	lhz	r0,0(r4)
+	addi	r4,r4,2
+	sth	r0,0(r3)
+	addi	r3,r3,2
+
+14:	bf	cr7*4+3,15f
+	lbz	r0,0(r4)
+	stb	r0,0(r3)
+
+15:	addi	r1,r1,STACKFRAMESIZE
+	ld	r3,48(r1)
+	b	.exit_vmx_copy		/* tail call optimise */
+#endif /* CONFiG_ALTIVEC */

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch
  2012-05-31  6:22 [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch Anton Blanchard
@ 2012-12-07 23:20 ` Jimi Xenidis
  2012-12-17 11:33   ` Anton Blanchard
  0 siblings, 1 reply; 10+ messages in thread
From: Jimi Xenidis @ 2012-12-07 23:20 UTC (permalink / raw)
  To: Anton Blanchard; +Cc: Kumar Gala, paulus, linuxppc-dev


On May 31, 2012, at 1:22 AM, Anton Blanchard <anton@samba.org> wrote:

>=20
> Implement a POWER7 optimised memcpy using VMX and enhanced prefetch
> instructions.

<<snip>>

>=20
> Index: linux-build/arch/powerpc/lib/Makefile
> =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
> --- linux-build.orig/arch/powerpc/lib/Makefile	2012-05-30 =
15:27:30.000000000 +1000
> +++ linux-build/arch/powerpc/lib/Makefile	2012-05-31 =
09:12:27.574372864 +1000
> @@ -17,7 +17,8 @@ obj-$(CONFIG_HAS_IOMEM)	+=3D devres.o
> obj-$(CONFIG_PPC64)	+=3D copypage_64.o copyuser_64.o \
> 			   memcpy_64.o usercopy_64.o mem_64.o string.o \
> 			   checksum_wrappers_64.o hweight_64.o \
> -			   copyuser_power7.o string_64.o =
copypage_power7.o
> +			   copyuser_power7.o string_64.o =
copypage_power7.o \
> +			   memcpy_power7.o

Hi,
I know this is a little late, but shouldn't these power7 specific =
thingies be in "obj-$(CONFIG_PPC_BOOK3S_64)".
The reason I ask is that my compiler pukes on "dcbtst" and as I deal =
with that I wanted to point this out.

-jx


> obj-$(CONFIG_XMON)	+=3D sstep.o ldstfp.o
> obj-$(CONFIG_KPROBES)	+=3D sstep.o ldstfp.o
> obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+=3D sstep.o ldstfp.o
> Index: linux-build/arch/powerpc/lib/memcpy_64.S
> =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
> --- linux-build.orig/arch/powerpc/lib/memcpy_64.S	2012-05-30 =
09:39:59.000000000 +1000
> +++ linux-build/arch/powerpc/lib/memcpy_64.S	2012-05-31 =
09:12:00.093876936 +1000
> @@ -11,7 +11,11 @@
>=20
> 	.align	7
> _GLOBAL(memcpy)
> +BEGIN_FTR_SECTION
> 	std	r3,48(r1)	/* save destination pointer for return =
value */
> +FTR_SECTION_ELSE
> +	b	memcpy_power7
> +ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
> 	PPC_MTOCRF(0x01,r5)
> 	cmpldi	cr1,r5,16
> 	neg	r6,r3		# LS 3 bits =3D # bytes to 8-byte dest =
bdry
> Index: linux-build/arch/powerpc/lib/memcpy_power7.S
> =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=
=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D
> --- /dev/null	1970-01-01 00:00:00.000000000 +0000
> +++ linux-build/arch/powerpc/lib/memcpy_power7.S	2012-05-31 =
15:28:03.495781127 +1000
> @@ -0,0 +1,650 @@
> +/*
> + * This program is free software; you can redistribute it and/or =
modify
> + * it under the terms of the GNU General Public License as published =
by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA =
02111-1307, USA.
> + *
> + * Copyright (C) IBM Corporation, 2012
> + *
> + * Author: Anton Blanchard <anton@au.ibm.com>
> + */
> +#include <asm/ppc_asm.h>
> +
> +#define STACKFRAMESIZE	256
> +#define STK_REG(i)	(112 + ((i)-14)*8)
> +
> +_GLOBAL(memcpy_power7)
> +#ifdef CONFIG_ALTIVEC
> +	cmpldi	r5,16
> +	cmpldi	cr1,r5,4096
> +
> +	std	r3,48(r1)
> +
> +	blt	.Lshort_copy
> +	bgt	cr1,.Lvmx_copy
> +#else
> +	cmpldi	r5,16
> +
> +	std	r3,48(r1)
> +
> +	blt	.Lshort_copy
> +#endif
> +
> +.Lnonvmx_copy:
> +	/* Get the source 8B aligned */
> +	neg	r6,r4
> +	mtocrf	0x01,r6
> +	clrldi	r6,r6,(64-3)
> +
> +	bf	cr7*4+3,1f
> +	lbz	r0,0(r4)
> +	addi	r4,r4,1
> +	stb	r0,0(r3)
> +	addi	r3,r3,1
> +
> +1:	bf	cr7*4+2,2f
> +	lhz	r0,0(r4)
> +	addi	r4,r4,2
> +	sth	r0,0(r3)
> +	addi	r3,r3,2
> +
> +2:	bf	cr7*4+1,3f
> +	lwz	r0,0(r4)
> +	addi	r4,r4,4
> +	stw	r0,0(r3)
> +	addi	r3,r3,4
> +
> +3:	sub	r5,r5,r6
> +	cmpldi	r5,128
> +	blt	5f
> +
> +	mflr	r0
> +	stdu	r1,-STACKFRAMESIZE(r1)
> +	std	r14,STK_REG(r14)(r1)
> +	std	r15,STK_REG(r15)(r1)
> +	std	r16,STK_REG(r16)(r1)
> +	std	r17,STK_REG(r17)(r1)
> +	std	r18,STK_REG(r18)(r1)
> +	std	r19,STK_REG(r19)(r1)
> +	std	r20,STK_REG(r20)(r1)
> +	std	r21,STK_REG(r21)(r1)
> +	std	r22,STK_REG(r22)(r1)
> +	std	r0,STACKFRAMESIZE+16(r1)
> +
> +	srdi	r6,r5,7
> +	mtctr	r6
> +
> +	/* Now do cacheline (128B) sized loads and stores. */
> +	.align	5
> +4:
> +	ld	r0,0(r4)
> +	ld	r6,8(r4)
> +	ld	r7,16(r4)
> +	ld	r8,24(r4)
> +	ld	r9,32(r4)
> +	ld	r10,40(r4)
> +	ld	r11,48(r4)
> +	ld	r12,56(r4)
> +	ld	r14,64(r4)
> +	ld	r15,72(r4)
> +	ld	r16,80(r4)
> +	ld	r17,88(r4)
> +	ld	r18,96(r4)
> +	ld	r19,104(r4)
> +	ld	r20,112(r4)
> +	ld	r21,120(r4)
> +	addi	r4,r4,128
> +	std	r0,0(r3)
> +	std	r6,8(r3)
> +	std	r7,16(r3)
> +	std	r8,24(r3)
> +	std	r9,32(r3)
> +	std	r10,40(r3)
> +	std	r11,48(r3)
> +	std	r12,56(r3)
> +	std	r14,64(r3)
> +	std	r15,72(r3)
> +	std	r16,80(r3)
> +	std	r17,88(r3)
> +	std	r18,96(r3)
> +	std	r19,104(r3)
> +	std	r20,112(r3)
> +	std	r21,120(r3)
> +	addi	r3,r3,128
> +	bdnz	4b
> +
> +	clrldi	r5,r5,(64-7)
> +
> +	ld	r14,STK_REG(r14)(r1)
> +	ld	r15,STK_REG(r15)(r1)
> +	ld	r16,STK_REG(r16)(r1)
> +	ld	r17,STK_REG(r17)(r1)
> +	ld	r18,STK_REG(r18)(r1)
> +	ld	r19,STK_REG(r19)(r1)
> +	ld	r20,STK_REG(r20)(r1)
> +	ld	r21,STK_REG(r21)(r1)
> +	ld	r22,STK_REG(r22)(r1)
> +	addi	r1,r1,STACKFRAMESIZE
> +
> +	/* Up to 127B to go */
> +5:	srdi	r6,r5,4
> +	mtocrf	0x01,r6
> +
> +6:	bf	cr7*4+1,7f
> +	ld	r0,0(r4)
> +	ld	r6,8(r4)
> +	ld	r7,16(r4)
> +	ld	r8,24(r4)
> +	ld	r9,32(r4)
> +	ld	r10,40(r4)
> +	ld	r11,48(r4)
> +	ld	r12,56(r4)
> +	addi	r4,r4,64
> +	std	r0,0(r3)
> +	std	r6,8(r3)
> +	std	r7,16(r3)
> +	std	r8,24(r3)
> +	std	r9,32(r3)
> +	std	r10,40(r3)
> +	std	r11,48(r3)
> +	std	r12,56(r3)
> +	addi	r3,r3,64
> +
> +	/* Up to 63B to go */
> +7:	bf	cr7*4+2,8f
> +	ld	r0,0(r4)
> +	ld	r6,8(r4)
> +	ld	r7,16(r4)
> +	ld	r8,24(r4)
> +	addi	r4,r4,32
> +	std	r0,0(r3)
> +	std	r6,8(r3)
> +	std	r7,16(r3)
> +	std	r8,24(r3)
> +	addi	r3,r3,32
> +
> +	/* Up to 31B to go */
> +8:	bf	cr7*4+3,9f
> +	ld	r0,0(r4)
> +	ld	r6,8(r4)
> +	addi	r4,r4,16
> +	std	r0,0(r3)
> +	std	r6,8(r3)
> +	addi	r3,r3,16
> +
> +9:	clrldi	r5,r5,(64-4)
> +
> +	/* Up to 15B to go */
> +.Lshort_copy:
> +	mtocrf	0x01,r5
> +	bf	cr7*4+0,12f
> +	lwz	r0,0(r4)	/* Less chance of a reject with word ops =
*/
> +	lwz	r6,4(r4)
> +	addi	r4,r4,8
> +	stw	r0,0(r3)
> +	stw	r6,4(r3)
> +	addi	r3,r3,8
> +
> +12:	bf	cr7*4+1,13f
> +	lwz	r0,0(r4)
> +	addi	r4,r4,4
> +	stw	r0,0(r3)
> +	addi	r3,r3,4
> +
> +13:	bf	cr7*4+2,14f
> +	lhz	r0,0(r4)
> +	addi	r4,r4,2
> +	sth	r0,0(r3)
> +	addi	r3,r3,2
> +
> +14:	bf	cr7*4+3,15f
> +	lbz	r0,0(r4)
> +	stb	r0,0(r3)
> +
> +15:	ld	r3,48(r1)
> +	blr
> +
> +.Lunwind_stack_nonvmx_copy:
> +	addi	r1,r1,STACKFRAMESIZE
> +	b	.Lnonvmx_copy
> +
> +#ifdef CONFIG_ALTIVEC
> +.Lvmx_copy:
> +	mflr	r0
> +	std	r4,56(r1)
> +	std	r5,64(r1)
> +	std	r0,16(r1)
> +	stdu	r1,-STACKFRAMESIZE(r1)
> +	bl	.enter_vmx_copy
> +	cmpwi	r3,0
> +	ld	r0,STACKFRAMESIZE+16(r1)
> +	ld	r3,STACKFRAMESIZE+48(r1)
> +	ld	r4,STACKFRAMESIZE+56(r1)
> +	ld	r5,STACKFRAMESIZE+64(r1)
> +	mtlr	r0
> +
> +	/*
> +	 * We prefetch both the source and destination using enhanced =
touch
> +	 * instructions. We use a stream ID of 0 for the load side and
> +	 * 1 for the store side.
> +	 */
> +	clrrdi	r6,r4,7
> +	clrrdi	r9,r3,7
> +	ori	r9,r9,1		/* stream=3D1 */
> +
> +	srdi	r7,r5,7		/* length in cachelines, capped at 0x3FF =
*/
> +	cmpldi	cr1,r7,0x3FF
> +	ble	cr1,1f
> +	li	r7,0x3FF
> +1:	lis	r0,0x0E00	/* depth=3D7 */
> +	sldi	r7,r7,7
> +	or	r7,r7,r0
> +	ori	r10,r7,1	/* stream=3D1 */
> +
> +	lis	r8,0x8000	/* GO=3D1 */
> +	clrldi	r8,r8,32
> +
> +.machine push
> +.machine "power4"
> +	dcbt	r0,r6,0b01000
> +	dcbt	r0,r7,0b01010
> +	dcbtst	r0,r9,0b01000
> +	dcbtst	r0,r10,0b01010
> +	eieio
> +	dcbt	r0,r8,0b01010	/* GO */
> +.machine pop
> +
> +	beq	.Lunwind_stack_nonvmx_copy
> +
> +	/*
> +	 * If source and destination are not relatively aligned we use a
> +	 * slower permute loop.
> +	 */
> +	xor	r6,r4,r3
> +	rldicl.	r6,r6,0,(64-4)
> +	bne	.Lvmx_unaligned_copy
> +
> +	/* Get the destination 16B aligned */
> +	neg	r6,r3
> +	mtocrf	0x01,r6
> +	clrldi	r6,r6,(64-4)
> +
> +	bf	cr7*4+3,1f
> +	lbz	r0,0(r4)
> +	addi	r4,r4,1
> +	stb	r0,0(r3)
> +	addi	r3,r3,1
> +
> +1:	bf	cr7*4+2,2f
> +	lhz	r0,0(r4)
> +	addi	r4,r4,2
> +	sth	r0,0(r3)
> +	addi	r3,r3,2
> +
> +2:	bf	cr7*4+1,3f
> +	lwz	r0,0(r4)
> +	addi	r4,r4,4
> +	stw	r0,0(r3)
> +	addi	r3,r3,4
> +
> +3:	bf	cr7*4+0,4f
> +	ld	r0,0(r4)
> +	addi	r4,r4,8
> +	std	r0,0(r3)
> +	addi	r3,r3,8
> +
> +4:	sub	r5,r5,r6
> +
> +	/* Get the desination 128B aligned */
> +	neg	r6,r3
> +	srdi	r7,r6,4
> +	mtocrf	0x01,r7
> +	clrldi	r6,r6,(64-7)
> +
> +	li	r9,16
> +	li	r10,32
> +	li	r11,48
> +
> +	bf	cr7*4+3,5f
> +	lvx	vr1,r0,r4
> +	addi	r4,r4,16
> +	stvx	vr1,r0,r3
> +	addi	r3,r3,16
> +
> +5:	bf	cr7*4+2,6f
> +	lvx	vr1,r0,r4
> +	lvx	vr0,r4,r9
> +	addi	r4,r4,32
> +	stvx	vr1,r0,r3
> +	stvx	vr0,r3,r9
> +	addi	r3,r3,32
> +
> +6:	bf	cr7*4+1,7f
> +	lvx	vr3,r0,r4
> +	lvx	vr2,r4,r9
> +	lvx	vr1,r4,r10
> +	lvx	vr0,r4,r11
> +	addi	r4,r4,64
> +	stvx	vr3,r0,r3
> +	stvx	vr2,r3,r9
> +	stvx	vr1,r3,r10
> +	stvx	vr0,r3,r11
> +	addi	r3,r3,64
> +
> +7:	sub	r5,r5,r6
> +	srdi	r6,r5,7
> +
> +	std	r14,STK_REG(r14)(r1)
> +	std	r15,STK_REG(r15)(r1)
> +	std	r16,STK_REG(r16)(r1)
> +
> +	li	r12,64
> +	li	r14,80
> +	li	r15,96
> +	li	r16,112
> +
> +	mtctr	r6
> +
> +	/*
> +	 * Now do cacheline sized loads and stores. By this stage the
> +	 * cacheline stores are also cacheline aligned.
> +	 */
> +	.align	5
> +8:
> +	lvx	vr7,r0,r4
> +	lvx	vr6,r4,r9
> +	lvx	vr5,r4,r10
> +	lvx	vr4,r4,r11
> +	lvx	vr3,r4,r12
> +	lvx	vr2,r4,r14
> +	lvx	vr1,r4,r15
> +	lvx	vr0,r4,r16
> +	addi	r4,r4,128
> +	stvx	vr7,r0,r3
> +	stvx	vr6,r3,r9
> +	stvx	vr5,r3,r10
> +	stvx	vr4,r3,r11
> +	stvx	vr3,r3,r12
> +	stvx	vr2,r3,r14
> +	stvx	vr1,r3,r15
> +	stvx	vr0,r3,r16
> +	addi	r3,r3,128
> +	bdnz	8b
> +
> +	ld	r14,STK_REG(r14)(r1)
> +	ld	r15,STK_REG(r15)(r1)
> +	ld	r16,STK_REG(r16)(r1)
> +
> +	/* Up to 127B to go */
> +	clrldi	r5,r5,(64-7)
> +	srdi	r6,r5,4
> +	mtocrf	0x01,r6
> +
> +	bf	cr7*4+1,9f
> +	lvx	vr3,r0,r4
> +	lvx	vr2,r4,r9
> +	lvx	vr1,r4,r10
> +	lvx	vr0,r4,r11
> +	addi	r4,r4,64
> +	stvx	vr3,r0,r3
> +	stvx	vr2,r3,r9
> +	stvx	vr1,r3,r10
> +	stvx	vr0,r3,r11
> +	addi	r3,r3,64
> +
> +9:	bf	cr7*4+2,10f
> +	lvx	vr1,r0,r4
> +	lvx	vr0,r4,r9
> +	addi	r4,r4,32
> +	stvx	vr1,r0,r3
> +	stvx	vr0,r3,r9
> +	addi	r3,r3,32
> +
> +10:	bf	cr7*4+3,11f
> +	lvx	vr1,r0,r4
> +	addi	r4,r4,16
> +	stvx	vr1,r0,r3
> +	addi	r3,r3,16
> +
> +	/* Up to 15B to go */
> +11:	clrldi	r5,r5,(64-4)
> +	mtocrf	0x01,r5
> +	bf	cr7*4+0,12f
> +	ld	r0,0(r4)
> +	addi	r4,r4,8
> +	std	r0,0(r3)
> +	addi	r3,r3,8
> +
> +12:	bf	cr7*4+1,13f
> +	lwz	r0,0(r4)
> +	addi	r4,r4,4
> +	stw	r0,0(r3)
> +	addi	r3,r3,4
> +
> +13:	bf	cr7*4+2,14f
> +	lhz	r0,0(r4)
> +	addi	r4,r4,2
> +	sth	r0,0(r3)
> +	addi	r3,r3,2
> +
> +14:	bf	cr7*4+3,15f
> +	lbz	r0,0(r4)
> +	stb	r0,0(r3)
> +
> +15:	addi	r1,r1,STACKFRAMESIZE
> +	ld	r3,48(r1)
> +	b	.exit_vmx_copy		/* tail call optimise */
> +
> +.Lvmx_unaligned_copy:
> +	/* Get the destination 16B aligned */
> +	neg	r6,r3
> +	mtocrf	0x01,r6
> +	clrldi	r6,r6,(64-4)
> +
> +	bf	cr7*4+3,1f
> +	lbz	r0,0(r4)
> +	addi	r4,r4,1
> +	stb	r0,0(r3)
> +	addi	r3,r3,1
> +
> +1:	bf	cr7*4+2,2f
> +	lhz	r0,0(r4)
> +	addi	r4,r4,2
> +	sth	r0,0(r3)
> +	addi	r3,r3,2
> +
> +2:	bf	cr7*4+1,3f
> +	lwz	r0,0(r4)
> +	addi	r4,r4,4
> +	stw	r0,0(r3)
> +	addi	r3,r3,4
> +
> +3:	bf	cr7*4+0,4f
> +	lwz	r0,0(r4)	/* Less chance of a reject with word ops =
*/
> +	lwz	r7,4(r4)
> +	addi	r4,r4,8
> +	stw	r0,0(r3)
> +	stw	r7,4(r3)
> +	addi	r3,r3,8
> +
> +4:	sub	r5,r5,r6
> +
> +	/* Get the desination 128B aligned */
> +	neg	r6,r3
> +	srdi	r7,r6,4
> +	mtocrf	0x01,r7
> +	clrldi	r6,r6,(64-7)
> +
> +	li	r9,16
> +	li	r10,32
> +	li	r11,48
> +
> +	lvsl	vr16,0,r4	/* Setup permute control vector */
> +	lvx	vr0,0,r4
> +	addi	r4,r4,16
> +
> +	bf	cr7*4+3,5f
> +	lvx	vr1,r0,r4
> +	vperm	vr8,vr0,vr1,vr16
> +	addi	r4,r4,16
> +	stvx	vr8,r0,r3
> +	addi	r3,r3,16
> +	vor	vr0,vr1,vr1
> +
> +5:	bf	cr7*4+2,6f
> +	lvx	vr1,r0,r4
> +	vperm	vr8,vr0,vr1,vr16
> +	lvx	vr0,r4,r9
> +	vperm	vr9,vr1,vr0,vr16
> +	addi	r4,r4,32
> +	stvx	vr8,r0,r3
> +	stvx	vr9,r3,r9
> +	addi	r3,r3,32
> +
> +6:	bf	cr7*4+1,7f
> +	lvx	vr3,r0,r4
> +	vperm	vr8,vr0,vr3,vr16
> +	lvx	vr2,r4,r9
> +	vperm	vr9,vr3,vr2,vr16
> +	lvx	vr1,r4,r10
> +	vperm	vr10,vr2,vr1,vr16
> +	lvx	vr0,r4,r11
> +	vperm	vr11,vr1,vr0,vr16
> +	addi	r4,r4,64
> +	stvx	vr8,r0,r3
> +	stvx	vr9,r3,r9
> +	stvx	vr10,r3,r10
> +	stvx	vr11,r3,r11
> +	addi	r3,r3,64
> +
> +7:	sub	r5,r5,r6
> +	srdi	r6,r5,7
> +
> +	std	r14,STK_REG(r14)(r1)
> +	std	r15,STK_REG(r15)(r1)
> +	std	r16,STK_REG(r16)(r1)
> +
> +	li	r12,64
> +	li	r14,80
> +	li	r15,96
> +	li	r16,112
> +
> +	mtctr	r6
> +
> +	/*
> +	 * Now do cacheline sized loads and stores. By this stage the
> +	 * cacheline stores are also cacheline aligned.
> +	 */
> +	.align	5
> +8:
> +	lvx	vr7,r0,r4
> +	vperm	vr8,vr0,vr7,vr16
> +	lvx	vr6,r4,r9
> +	vperm	vr9,vr7,vr6,vr16
> +	lvx	vr5,r4,r10
> +	vperm	vr10,vr6,vr5,vr16
> +	lvx	vr4,r4,r11
> +	vperm	vr11,vr5,vr4,vr16
> +	lvx	vr3,r4,r12
> +	vperm	vr12,vr4,vr3,vr16
> +	lvx	vr2,r4,r14
> +	vperm	vr13,vr3,vr2,vr16
> +	lvx	vr1,r4,r15
> +	vperm	vr14,vr2,vr1,vr16
> +	lvx	vr0,r4,r16
> +	vperm	vr15,vr1,vr0,vr16
> +	addi	r4,r4,128
> +	stvx	vr8,r0,r3
> +	stvx	vr9,r3,r9
> +	stvx	vr10,r3,r10
> +	stvx	vr11,r3,r11
> +	stvx	vr12,r3,r12
> +	stvx	vr13,r3,r14
> +	stvx	vr14,r3,r15
> +	stvx	vr15,r3,r16
> +	addi	r3,r3,128
> +	bdnz	8b
> +
> +	ld	r14,STK_REG(r14)(r1)
> +	ld	r15,STK_REG(r15)(r1)
> +	ld	r16,STK_REG(r16)(r1)
> +
> +	/* Up to 127B to go */
> +	clrldi	r5,r5,(64-7)
> +	srdi	r6,r5,4
> +	mtocrf	0x01,r6
> +
> +	bf	cr7*4+1,9f
> +	lvx	vr3,r0,r4
> +	vperm	vr8,vr0,vr3,vr16
> +	lvx	vr2,r4,r9
> +	vperm	vr9,vr3,vr2,vr16
> +	lvx	vr1,r4,r10
> +	vperm	vr10,vr2,vr1,vr16
> +	lvx	vr0,r4,r11
> +	vperm	vr11,vr1,vr0,vr16
> +	addi	r4,r4,64
> +	stvx	vr8,r0,r3
> +	stvx	vr9,r3,r9
> +	stvx	vr10,r3,r10
> +	stvx	vr11,r3,r11
> +	addi	r3,r3,64
> +
> +9:	bf	cr7*4+2,10f
> +	lvx	vr1,r0,r4
> +	vperm	vr8,vr0,vr1,vr16
> +	lvx	vr0,r4,r9
> +	vperm	vr9,vr1,vr0,vr16
> +	addi	r4,r4,32
> +	stvx	vr8,r0,r3
> +	stvx	vr9,r3,r9
> +	addi	r3,r3,32
> +
> +10:	bf	cr7*4+3,11f
> +	lvx	vr1,r0,r4
> +	vperm	vr8,vr0,vr1,vr16
> +	addi	r4,r4,16
> +	stvx	vr8,r0,r3
> +	addi	r3,r3,16
> +
> +	/* Up to 15B to go */
> +11:	clrldi	r5,r5,(64-4)
> +	addi	r4,r4,-16	/* Unwind the +16 load offset */
> +	mtocrf	0x01,r5
> +	bf	cr7*4+0,12f
> +	lwz	r0,0(r4)	/* Less chance of a reject with word ops =
*/
> +	lwz	r6,4(r4)
> +	addi	r4,r4,8
> +	stw	r0,0(r3)
> +	stw	r6,4(r3)
> +	addi	r3,r3,8
> +
> +12:	bf	cr7*4+1,13f
> +	lwz	r0,0(r4)
> +	addi	r4,r4,4
> +	stw	r0,0(r3)
> +	addi	r3,r3,4
> +
> +13:	bf	cr7*4+2,14f
> +	lhz	r0,0(r4)
> +	addi	r4,r4,2
> +	sth	r0,0(r3)
> +	addi	r3,r3,2
> +
> +14:	bf	cr7*4+3,15f
> +	lbz	r0,0(r4)
> +	stb	r0,0(r3)
> +
> +15:	addi	r1,r1,STACKFRAMESIZE
> +	ld	r3,48(r1)
> +	b	.exit_vmx_copy		/* tail call optimise */
> +#endif /* CONFiG_ALTIVEC */
> _______________________________________________
> Linuxppc-dev mailing list
> Linuxppc-dev@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/linuxppc-dev

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch
  2012-12-07 23:20 ` Jimi Xenidis
@ 2012-12-17 11:33   ` Anton Blanchard
  2012-12-18  0:26     ` Peter Bergner
  2012-12-18 13:21     ` Jimi Xenidis
  0 siblings, 2 replies; 10+ messages in thread
From: Anton Blanchard @ 2012-12-17 11:33 UTC (permalink / raw)
  To: Jimi Xenidis; +Cc: Kumar Gala, paulus, linuxppc-dev


Hi Jimi,

> I know this is a little late, but shouldn't these power7 specific
> thingies be in "obj-$(CONFIG_PPC_BOOK3S_64)". The reason I ask is
> that my compiler pukes on "dcbtst" and as I deal with that I wanted
> to point this out.

I guess we could do that. It's a bit strange your assembler is
complaining about the dcbtst instructions since we wrap them with
power4:

.machine push
.machine "power4"
        dcbt    r0,r4,0b01000
        dcbt    r0,r7,0b01010
        dcbtst  r0,r9,0b01000
        dcbtst  r0,r10,0b01010
        eieio
        dcbt    r0,r8,0b01010   /* GO */
.machine pop

Anton

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch
  2012-12-17 11:33   ` Anton Blanchard
@ 2012-12-18  0:26     ` Peter Bergner
  2012-12-18 13:28       ` Jimi Xenidis
  2012-12-18 13:21     ` Jimi Xenidis
  1 sibling, 1 reply; 10+ messages in thread
From: Peter Bergner @ 2012-12-18  0:26 UTC (permalink / raw)
  To: Jimi Xenidis; +Cc: paulus, linuxppc-dev, Kumar Gala, Anton Blanchard

On Mon, 2012-12-17 at 22:33 +1100, Anton Blanchard wrote:
> Hi Jimi,
> 
> > I know this is a little late, but shouldn't these power7 specific
> > thingies be in "obj-$(CONFIG_PPC_BOOK3S_64)". The reason I ask is
> > that my compiler pukes on "dcbtst" and as I deal with that I wanted
> > to point this out.
> 
> I guess we could do that. It's a bit strange your assembler is
> complaining about the dcbtst instructions since we wrap them with
> power4:
> 
> .machine push
> .machine "power4"
>         dcbt    r0,r4,0b01000
>         dcbt    r0,r7,0b01010
>         dcbtst  r0,r9,0b01000
>         dcbtst  r0,r10,0b01010
>         eieio
>         dcbt    r0,r8,0b01010   /* GO */
> .machine pop

Jimi, are you using an "old" binutils from before my patch that
changed the operand order for these types of instructions?

    http://sourceware.org/ml/binutils/2009-02/msg00044.html

Peter

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch
  2012-12-17 11:33   ` Anton Blanchard
  2012-12-18  0:26     ` Peter Bergner
@ 2012-12-18 13:21     ` Jimi Xenidis
  1 sibling, 0 replies; 10+ messages in thread
From: Jimi Xenidis @ 2012-12-18 13:21 UTC (permalink / raw)
  To: Anton Blanchard; +Cc: Kumar Gala, paulus, linuxppc-dev


On Dec 17, 2012, at 5:33 AM, Anton Blanchard <anton@samba.org> wrote:

>=20
> Hi Jimi,
>=20
>> I know this is a little late, but shouldn't these power7 specific
>> thingies be in "obj-$(CONFIG_PPC_BOOK3S_64)". The reason I ask is
>> that my compiler pukes on "dcbtst" and as I deal with that I wanted
>> to point this out.
>=20
> I guess we could do that.

I think it is the right idea since it is unclear that your optimizations =
would actually help an embedded system where most of these cache =
prefetches are NOPs and only wait decode/dispatch cycles.

> It's a bit strange your assembler is
> complaining about the dcbtst instructions since we wrap them with
> power4:

Not really, the binutils is a little old (RHEL 6.2), unfortunately it =
_is_ the toolchain most people are using at the moment.
It will take me a while to get everyone using newer ones since most are =
scientists using the packages they get.

My suggestion was really for correctness,  My current patches for BG/Q =
introduce a macro replacement.
-jx


>=20
> .machine push
> .machine "power4"
>        dcbt    r0,r4,0b01000
>        dcbt    r0,r7,0b01010
>        dcbtst  r0,r9,0b01000
>        dcbtst  r0,r10,0b01010
>        eieio
>        dcbt    r0,r8,0b01010   /* GO */
> .machine pop
>=20
> Anton

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch
  2012-12-18  0:26     ` Peter Bergner
@ 2012-12-18 13:28       ` Jimi Xenidis
  2012-12-18 14:14         ` David Laight
  2012-12-18 16:31         ` Peter Bergner
  0 siblings, 2 replies; 10+ messages in thread
From: Jimi Xenidis @ 2012-12-18 13:28 UTC (permalink / raw)
  To: Peter Bergner; +Cc: paulus, linuxppc-dev, Kumar Gala, Anton Blanchard


On Dec 17, 2012, at 6:26 PM, Peter Bergner <bergner@vnet.ibm.com> wrote:

> On Mon, 2012-12-17 at 22:33 +1100, Anton Blanchard wrote:
>> Hi Jimi,
>>=20
>>> I know this is a little late, but shouldn't these power7 specific
>>> thingies be in "obj-$(CONFIG_PPC_BOOK3S_64)". The reason I ask is
>>> that my compiler pukes on "dcbtst" and as I deal with that I wanted
>>> to point this out.
>>=20
>> I guess we could do that. It's a bit strange your assembler is
>> complaining about the dcbtst instructions since we wrap them with
>> power4:
>>=20
>> .machine push
>> .machine "power4"
>>        dcbt    r0,r4,0b01000
>>        dcbt    r0,r7,0b01010
>>        dcbtst  r0,r9,0b01000
>>        dcbtst  r0,r10,0b01010
>>        eieio
>>        dcbt    r0,r8,0b01010   /* GO */
>> .machine pop
>=20
> Jimi, are you using an "old" binutils from before my patch that
> changed the operand order for these types of instructions?
>=20
>    http://sourceware.org/ml/binutils/2009-02/msg00044.html

Actually, this confused me as well, that embedded has the same =
instruction encoding but different mnemonic.
I was under the impression that the assembler made no instruction =
decisions based on CPU.
So your only hint would be that '0b' prefix.
Does AS even see that?

If not, then without a _normalizing_ macro, I think will need that =
obj-$(CONFIG_PPC_BOOK3S_64) and .S files with the two can never be =
shared.

-jx


>=20
> Peter
>=20
>=20

^ permalink raw reply	[flat|nested] 10+ messages in thread

* RE: [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch
  2012-12-18 13:28       ` Jimi Xenidis
@ 2012-12-18 14:14         ` David Laight
  2012-12-18 16:31         ` Peter Bergner
  1 sibling, 0 replies; 10+ messages in thread
From: David Laight @ 2012-12-18 14:14 UTC (permalink / raw)
  To: Jimi Xenidis, Peter Bergner
  Cc: Kumar Gala, linuxppc-dev, paulus, Anton Blanchard

> >>        dcbt    r0,r8,0b01010   /* GO */
> >> .machine pop
> >
> > Jimi, are you using an "old" binutils from before my patch that
> > changed the operand order for these types of instructions?
> >
> >    http://sourceware.org/ml/binutils/2009-02/msg00044.html
>=20
> Actually, this confused me as well, that embedded has the same
> instruction encoding but different mnemonic.

That it utterly horrid!

> I was under the impression that the assembler made no instruction =
decisions based on CPU.
> So your only hint would be that '0b' prefix.
> Does AS even see that?

Or maybe see the 'r' prefix.
I know they tend to be absent making ppc asm even more unreadable.
It isn't as though the mnemonics were designed at a time when
the source file size or difference in decode time (or code space)
would be significant.

Otherwise it is a complete recipe for disaster.

	David

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch
  2012-12-18 13:28       ` Jimi Xenidis
  2012-12-18 14:14         ` David Laight
@ 2012-12-18 16:31         ` Peter Bergner
  2013-01-09 22:19           ` Jimi Xenidis
  1 sibling, 1 reply; 10+ messages in thread
From: Peter Bergner @ 2012-12-18 16:31 UTC (permalink / raw)
  To: Jimi Xenidis; +Cc: paulus, linuxppc-dev, Kumar Gala, Anton Blanchard

On Tue, 2012-12-18 at 07:28 -0600, Jimi Xenidis wrote:
> On Dec 17, 2012, at 6:26 PM, Peter Bergner <bergner@vnet.ibm.com> wrote:
> > Jimi, are you using an "old" binutils from before my patch that
> > changed the operand order for these types of instructions?
> > 
> >    http://sourceware.org/ml/binutils/2009-02/msg00044.html
> 
> Actually, this confused me as well, that embedded has the same instruction
> encoding but different mnemonic.

The mnemonic is the same (ie, dcbtst), and yes, the encoding is the same.
All that is different is the accepted operand ordering...and yes, it is
very unfortunate the operand ordering is different between embedded and
server. :(


> I was under the impression that the assembler made no instruction decisions
> based on CPU.  So your only hint would be that '0b' prefix.
> Does AS even see that?

GAS definitely makes decisions based on CPU (ie, -m<cpu> option).  Below is
the GAS code used in recognizing the dcbtst instruction.  This shows that
the "server" operand ordering is enabled for POWER4 and later cpus while
the "embedded" operand ordering is enabled for pre POWER4 cpus (yes, not
exactly a server versus embedded trigger, but that's we agreed on to
mitigate breaking any old asm code out there).

{"dcbtst",	X(31,246),	X_MASK,      POWER4,	PPCNONE,	{RA0, RB, CT}},
{"dcbtst",	X(31,246),	X_MASK,      PPC|PPCVLE, POWER4,	{CT, RA0, RB}},

GAS doesn't look at how the operands are written to try and guess what
operand ordering you are attempting to use.  Rather, it knows what ordering
it expects and the values had better match that ordering.


Peter

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch
  2012-12-18 16:31         ` Peter Bergner
@ 2013-01-09 22:19           ` Jimi Xenidis
  2013-01-09 23:06             ` Peter Bergner
  0 siblings, 1 reply; 10+ messages in thread
From: Jimi Xenidis @ 2013-01-09 22:19 UTC (permalink / raw)
  To: Peter Bergner
  Cc: paulus@samba.org Mackerras, linuxppc-dev, Kumar Gala, Anton Blanchard


On Dec 18, 2012, at 10:31 AM, Peter Bergner <bergner@vnet.ibm.com> =
wrote:

> On Tue, 2012-12-18 at 07:28 -0600, Jimi Xenidis wrote:
>> On Dec 17, 2012, at 6:26 PM, Peter Bergner <bergner@vnet.ibm.com> =
wrote:
>>> Jimi, are you using an "old" binutils from before my patch that
>>> changed the operand order for these types of instructions?
>>>=20
>>>   http://sourceware.org/ml/binutils/2009-02/msg00044.html
>>=20
>> Actually, this confused me as well, that embedded has the same =
instruction
>> encoding but different mnemonic.
>=20
> The mnemonic is the same (ie, dcbtst), and yes, the encoding is the =
same.
> All that is different is the accepted operand ordering...and yes, it =
is
> very unfortunate the operand ordering is different between embedded =
and
> server. :(
>=20
>=20
>> I was under the impression that the assembler made no instruction =
decisions
>> based on CPU.  So your only hint would be that '0b' prefix.
>> Does AS even see that?
>=20
> GAS definitely makes decisions based on CPU (ie, -m<cpu> option).  =
Below is
> the GAS code used in recognizing the dcbtst instruction.  This shows =
that
> the "server" operand ordering is enabled for POWER4 and later cpus =
while
> the "embedded" operand ordering is enabled for pre POWER4 cpus (yes, =
not
> exactly a server versus embedded trigger, but that's we agreed on to
> mitigate breaking any old asm code out there).
>=20
> {"dcbtst",	X(31,246),	X_MASK,      POWER4,	PPCNONE,	=
{RA0, RB, CT}},
> {"dcbtst",	X(31,246),	X_MASK,      PPC|PPCVLE, POWER4,	=
{CT, RA0, RB}},
>=20
> GAS doesn't look at how the operands are written to try and guess what
> operand ordering you are attempting to use.  Rather, it knows what =
ordering
> it expects and the values had better match that ordering.
>=20

I agree, but that means it is impossible for the same .S file can be =
compiled but -mcpu=3De500mc and -mcpu=3Dpowerpc?
So either these files have to be Book3S versus Book3E --or-- we use a =
CPP macro to get them right.
FWIW, I prefer the latter which allows more code reuse.

-jx


>=20
> Peter
>=20
>=20
>=20

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch
  2013-01-09 22:19           ` Jimi Xenidis
@ 2013-01-09 23:06             ` Peter Bergner
  0 siblings, 0 replies; 10+ messages in thread
From: Peter Bergner @ 2013-01-09 23:06 UTC (permalink / raw)
  To: Jimi Xenidis
  Cc: paulus@samba.org Mackerras, linuxppc-dev, Kumar Gala, Anton Blanchard

On Wed, 2013-01-09 at 16:19 -0600, Jimi Xenidis wrote:
> I agree, but that means it is impossible for the same .S file can be compiled
> but -mcpu=e500mc and -mcpu=powerpc?  So either these files have to be Book3S
> versus Book3E --or-- we use a CPP macro to get them right.
> FWIW, I prefer the latter which allows more code reuse.

I agree using a CPP macro - like we do for "new" instructions for which some
older assemblers might not support yet - is probably the best solution.

Peter

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2013-01-09 23:06 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-05-31  6:22 [PATCH] powerpc: POWER7 optimised memcpy using VMX and enhanced prefetch Anton Blanchard
2012-12-07 23:20 ` Jimi Xenidis
2012-12-17 11:33   ` Anton Blanchard
2012-12-18  0:26     ` Peter Bergner
2012-12-18 13:28       ` Jimi Xenidis
2012-12-18 14:14         ` David Laight
2012-12-18 16:31         ` Peter Bergner
2013-01-09 22:19           ` Jimi Xenidis
2013-01-09 23:06             ` Peter Bergner
2012-12-18 13:21     ` Jimi Xenidis

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.