All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] lib/raid6: Add AVX2 optimized xor_syndrome functions
@ 2016-09-30 18:14 Gayatri Kammela
  2016-10-05 23:11 ` Shaohua Li
  0 siblings, 1 reply; 3+ messages in thread
From: Gayatri Kammela @ 2016-09-30 18:14 UTC (permalink / raw)
  To: linux-raid
  Cc: linux-kernel, shli, ravi.v.shankar, Gayatri Kammela,
	H . Peter Anvin, Yuanhan Liu, Fenghua Yu

Implement the AVX2 optimization of RAID6 xor_syndrome functions which is
simply based on sse2.c written by hpa.

Cc: H. Peter Anvin <hpa@linux.intel.com>
Cc: Yuanhan Liu <yuanhan.liu@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Gayatri Kammela <gayatri.kammela@intel.com>
---
 lib/raid6/avx2.c | 232 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 229 insertions(+), 3 deletions(-)

diff --git a/lib/raid6/avx2.c b/lib/raid6/avx2.c
index 76734004358d..20bca3d44f67 100644
--- a/lib/raid6/avx2.c
+++ b/lib/raid6/avx2.c
@@ -87,9 +87,57 @@ static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
 	kernel_fpu_end();
 }
 
+static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
+				     size_t bytes, void **ptrs)
+{
+	u8 **dptr = (u8 **)ptrs;
+	u8 *p, *q;
+	int d, z, z0;
+
+	z0 = stop;		/* P/Q right side optimization */
+	p = dptr[disks-2];	/* XOR parity */
+	q = dptr[disks-1];	/* RS syndrome */
+
+	kernel_fpu_begin();
+
+	asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+
+	for (d = 0 ; d < bytes ; d += 32) {
+		asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+		asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+		asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+		/* P/Q data pages */
+		for (z = z0-1 ; z >= start ; z--) {
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+			asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+		}
+		/* P/Q left side optimization */
+		for (z = start-1 ; z >= 0 ; z--) {
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+		}
+		asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+		/* Don't use movntdq for r/w memory area < cache line */
+		asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
+		asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
+	}
+
+	asm volatile("sfence" : : : "memory");
+	kernel_fpu_end();
+}
+
 const struct raid6_calls raid6_avx2x1 = {
 	raid6_avx21_gen_syndrome,
-	NULL,			/* XOR not yet implemented */
+	raid6_avx21_xor_syndrome,
 	raid6_have_avx2,
 	"avx2x1",
 	1			/* Has cache hints */
@@ -149,9 +197,77 @@ static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
 	kernel_fpu_end();
 }
 
+static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
+				     size_t bytes, void **ptrs)
+{
+	u8 **dptr = (u8 **)ptrs;
+	u8 *p, *q;
+	int d, z, z0;
+
+	z0 = stop;		/* P/Q right side optimization */
+	p = dptr[disks-2];	/* XOR parity */
+	q = dptr[disks-1];	/* RS syndrome */
+
+	kernel_fpu_begin();
+
+	asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
+
+	for (d = 0 ; d < bytes ; d += 64) {
+		asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+		asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
+		asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+		asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
+		asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+		asm volatile("vpxor %ymm6,%ymm3,%ymm3");
+		/* P/Q data pages */
+		for (z = z0-1 ; z >= start ; z--) {
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpand %ymm0,%ymm7,%ymm7");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+			asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+			asm volatile("vmovdqa %0,%%ymm7"
+				     :: "m" (dptr[z][d+32]));
+			asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+			asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+		}
+		/* P/Q left side optimization */
+		for (z = start-1 ; z >= 0 ; z--) {
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpand %ymm0,%ymm7,%ymm7");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+		}
+		asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+		asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
+		/* Don't use movntdq for r/w memory area < cache line */
+		asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
+		asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32]));
+		asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
+		asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32]));
+	}
+
+	asm volatile("sfence" : : : "memory");
+	kernel_fpu_end();
+}
+
 const struct raid6_calls raid6_avx2x2 = {
 	raid6_avx22_gen_syndrome,
-	NULL,			/* XOR not yet implemented */
+	raid6_avx22_xor_syndrome,
 	raid6_have_avx2,
 	"avx2x2",
 	1			/* Has cache hints */
@@ -242,9 +358,119 @@ static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
 	kernel_fpu_end();
 }
 
+static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
+				     size_t bytes, void **ptrs)
+{
+	u8 **dptr = (u8 **)ptrs;
+	u8 *p, *q;
+	int d, z, z0;
+
+	z0 = stop;		/* P/Q right side optimization */
+	p = dptr[disks-2];	/* XOR parity */
+	q = dptr[disks-1];	/* RS syndrome */
+
+	kernel_fpu_begin();
+
+	asm volatile("vmovdqa %0,%%ymm0" :: "m" (raid6_avx2_constants.x1d[0]));
+
+	for (d = 0 ; d < bytes ; d += 128) {
+		asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
+		asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
+		asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d+64]));
+		asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d+96]));
+		asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
+		asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
+		asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64]));
+		asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96]));
+		asm volatile("vpxor %ymm4,%ymm2,%ymm2");
+		asm volatile("vpxor %ymm6,%ymm3,%ymm3");
+		asm volatile("vpxor %ymm12,%ymm10,%ymm10");
+		asm volatile("vpxor %ymm14,%ymm11,%ymm11");
+		/* P/Q data pages */
+		for (z = z0-1 ; z >= start ; z--) {
+			asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
+			asm volatile("prefetchnta %0" :: "m" (dptr[z][d+64]));
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+			asm volatile("vpxor %ymm13,%ymm13,%ymm13");
+			asm volatile("vpxor %ymm15,%ymm15,%ymm15");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+			asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
+			asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+			asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
+			asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpand %ymm0,%ymm7,%ymm7");
+			asm volatile("vpand %ymm0,%ymm13,%ymm13");
+			asm volatile("vpand %ymm0,%ymm15,%ymm15");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+			asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+			asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+			asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
+			asm volatile("vmovdqa %0,%%ymm7"
+				     :: "m" (dptr[z][d+32]));
+			asm volatile("vmovdqa %0,%%ymm13"
+				     :: "m" (dptr[z][d+64]));
+			asm volatile("vmovdqa %0,%%ymm15"
+				     :: "m" (dptr[z][d+96]));
+			asm volatile("vpxor %ymm5,%ymm2,%ymm2");
+			asm volatile("vpxor %ymm7,%ymm3,%ymm3");
+			asm volatile("vpxor %ymm13,%ymm10,%ymm10");
+			asm volatile("vpxor %ymm15,%ymm11,%ymm11");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+			asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+			asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+		}
+		asm volatile("prefetchnta %0" :: "m" (q[d]));
+		asm volatile("prefetchnta %0" :: "m" (q[d+64]));
+		/* P/Q left side optimization */
+		for (z = start-1 ; z >= 0 ; z--) {
+			asm volatile("vpxor %ymm5,%ymm5,%ymm5");
+			asm volatile("vpxor %ymm7,%ymm7,%ymm7");
+			asm volatile("vpxor %ymm13,%ymm13,%ymm13");
+			asm volatile("vpxor %ymm15,%ymm15,%ymm15");
+			asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
+			asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
+			asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
+			asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
+			asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
+			asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
+			asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
+			asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
+			asm volatile("vpand %ymm0,%ymm5,%ymm5");
+			asm volatile("vpand %ymm0,%ymm7,%ymm7");
+			asm volatile("vpand %ymm0,%ymm13,%ymm13");
+			asm volatile("vpand %ymm0,%ymm15,%ymm15");
+			asm volatile("vpxor %ymm5,%ymm4,%ymm4");
+			asm volatile("vpxor %ymm7,%ymm6,%ymm6");
+			asm volatile("vpxor %ymm13,%ymm12,%ymm12");
+			asm volatile("vpxor %ymm15,%ymm14,%ymm14");
+		}
+		asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
+		asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
+		asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
+		asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
+		asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
+		asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
+		asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64]));
+		asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96]));
+		asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
+		asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
+		asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
+		asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
+	}
+	asm volatile("sfence" : : : "memory");
+	kernel_fpu_end();
+}
+
 const struct raid6_calls raid6_avx2x4 = {
 	raid6_avx24_gen_syndrome,
-	NULL,			/* XOR not yet implemented */
+	raid6_avx24_xor_syndrome,
 	raid6_have_avx2,
 	"avx2x4",
 	1			/* Has cache hints */
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] lib/raid6: Add AVX2 optimized xor_syndrome functions
  2016-09-30 18:14 [PATCH] lib/raid6: Add AVX2 optimized xor_syndrome functions Gayatri Kammela
@ 2016-10-05 23:11 ` Shaohua Li
  2016-10-06 20:56   ` H. Peter Anvin
  0 siblings, 1 reply; 3+ messages in thread
From: Shaohua Li @ 2016-10-05 23:11 UTC (permalink / raw)
  To: Gayatri Kammela
  Cc: linux-raid, linux-kernel, ravi.v.shankar, H . Peter Anvin,
	Yuanhan Liu, Fenghua Yu

On Fri, Sep 30, 2016 at 11:14:49AM -0700, Gayatri Kammela wrote:
> Implement the AVX2 optimization of RAID6 xor_syndrome functions which is
> simply based on sse2.c written by hpa.

Applied, thanks! Could you please add xor_syndrome for sse1/mmx too?

Thanks,
Shaohua

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] lib/raid6: Add AVX2 optimized xor_syndrome functions
  2016-10-05 23:11 ` Shaohua Li
@ 2016-10-06 20:56   ` H. Peter Anvin
  0 siblings, 0 replies; 3+ messages in thread
From: H. Peter Anvin @ 2016-10-06 20:56 UTC (permalink / raw)
  To: Shaohua Li, Gayatri Kammela
  Cc: linux-raid, linux-kernel, ravi.v.shankar, Yuanhan Liu, Fenghua Yu

On 10/05/16 16:11, Shaohua Li wrote:
> On Fri, Sep 30, 2016 at 11:14:49AM -0700, Gayatri Kammela wrote:
>> Implement the AVX2 optimization of RAID6 xor_syndrome functions which is
>> simply based on sse2.c written by hpa.
> 
> Applied, thanks! Could you please add xor_syndrome for sse1/mmx too?
> 

Not that it would be a bad thing to have, but do you think anyone
actually cares?  We are talking hardware over a decade old.

	-hpa

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2016-10-06 20:56 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-09-30 18:14 [PATCH] lib/raid6: Add AVX2 optimized xor_syndrome functions Gayatri Kammela
2016-10-05 23:11 ` Shaohua Li
2016-10-06 20:56   ` H. Peter Anvin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.