All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RFC] x86:Improve memset with general 64bit instruction
@ 2014-04-07 14:50 ling.ma.program
  2014-04-07 14:52 ` Ling Ma
  2014-04-07 16:42 ` Andi Kleen
  0 siblings, 2 replies; 8+ messages in thread
From: ling.ma.program @ 2014-04-07 14:50 UTC (permalink / raw)
  To: mingo; +Cc: tglx, hpa, neleai, linux-kernel, Ling Ma

From: Ling Ma <ling.ml@alibaba-inc.com>

In this patch we manage to reduce miss branch prediction by 
avoiding using branch instructions and force destination to be aligned
with general 64bit instruction. 
Below compared results shows we improve performance up to 1.8x
(We modified test suit from Ondra, send after this patch)

Bytes: ORG_TIME: NEW_TIME: ORG vs NEW:
7       0.51    0.48    1.06
16      0.55    0.38    1.44
18      0.61    0.44    1.38
21      0.62    0.47    1.31
25      0.64    0.45    1.42
30      0.65    0.45    1.44
36      0.66    0.44    1.50
38      0.67    0.46    1.45
62      0.70    0.44    1.59
75      0.71    0.44    1.61
85      0.73    0.46    1.58
120     0.78    0.44    1.77
193     0.81    0.46    1.76
245     0.84    0.52    1.61
256     0.83    0.45    1.84
356     0.86    0.55    1.56
601     0.98    0.65    1.50
958     1.14    0.81    1.40
1024    1.19    0.86    1.38
2048    1.69    1.34    1.26
Signed-off-by: Ling Ma <ling.ml@alibaba-inc.com>
---
 arch/x86/include/asm/alternative-asm.h |   4 +-
 arch/x86/lib/memset_64.S               | 172 +++++++++++++++++++++------------
 2 files changed, 110 insertions(+), 66 deletions(-)

diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 372231c..aaac545 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -22,8 +22,8 @@
 	.long \orig - .
 	.long \alt - .
 	.word \feature
-	.byte \orig_len
-	.byte \alt_len
+	.word \orig_len
+	.word \alt_len
 .endm
 
 #endif  /*  __ASSEMBLY__  */
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 2dcb380..3eca27c 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -59,77 +59,121 @@
 ENTRY(memset)
 ENTRY(__memset)
 	CFI_STARTPROC
-	movq %rdi,%r10
-
-	/* expand byte value  */
 	movzbl %sil,%ecx
-	movabs $0x0101010101010101,%rax
-	imulq  %rcx,%rax
-
-	/* align dst */
-	movl  %edi,%r9d
-	andl  $7,%r9d
-	jnz  .Lbad_alignment
-	CFI_REMEMBER_STATE
-.Lafter_bad_alignment:
-
-	movq  %rdx,%rcx
-	shrq  $6,%rcx
-	jz	 .Lhandle_tail
-
+	mov $0x0101010101010101,%rsi
+	imulq  %rsi,%rcx
+	movq %rdi,%rax
+	lea	(%rdi, %rdx), %r8
+	cmp	$128, %rdx
+	ja	.Lmore128bytes
+	cmp	$64, %edx
+	jb	.Lless_64bytes
+	/*
+	 * Move data from 65 bytes to 128 bytes.
+	 */
+	mov %rcx, 0x00(%rdi)
+	mov %rcx, 0x08(%rdi)
+	mov %rcx, 0x10(%rdi)
+	mov %rcx, 0x18(%rdi)
+	mov %rcx, 0x20(%rdi)
+	mov %rcx, 0x28(%rdi)
+	mov %rcx, 0x30(%rdi)
+	mov %rcx, 0x38(%rdi)
+	mov %rcx, -0x40(%r8)
+	mov %rcx, -0x38(%r8)
+	mov %rcx, -0x30(%r8)
+	mov %rcx, -0x28(%r8)
+	mov %rcx, -0x20(%r8)
+	mov %rcx, -0x18(%r8)
+	mov %rcx, -0x10(%r8)
+	mov %rcx, -0x08(%r8)
+	ret
 	.p2align 4
-.Lloop_64:
-	decq  %rcx
-	movq  %rax,(%rdi)
-	movq  %rax,8(%rdi)
-	movq  %rax,16(%rdi)
-	movq  %rax,24(%rdi)
-	movq  %rax,32(%rdi)
-	movq  %rax,40(%rdi)
-	movq  %rax,48(%rdi)
-	movq  %rax,56(%rdi)
-	leaq  64(%rdi),%rdi
-	jnz    .Lloop_64
-
-	/* Handle tail in loops. The loops should be faster than hard
-	   to predict jump tables. */
+.Lless_64bytes:
+	cmp	$32, %edx
+	jb	.Lless_32bytes
+	/*
+	 * Move data from 33 bytes to 64 bytes.
+	 */
+	mov %rcx, 0x00(%rdi)
+	mov %rcx, 0x08(%rdi)
+	mov %rcx, 0x10(%rdi)
+	mov %rcx, 0x18(%rdi)
+	mov %rcx, -0x20(%r8)
+	mov %rcx, -0x18(%r8)
+	mov %rcx, -0x10(%r8)
+	mov %rcx, -0x08(%r8)
+	ret
 	.p2align 4
-.Lhandle_tail:
-	movl	%edx,%ecx
-	andl    $63&(~7),%ecx
-	jz 		.Lhandle_7
-	shrl	$3,%ecx
+.Lless_32bytes:
+	cmp	$16, %edx
+	jb	.Lless_16bytes
+	mov %rcx, 0x00(%rdi)
+	mov %rcx, 0x08(%rdi)
+	mov %rcx, -0x10(%r8)
+	mov %rcx, -0x08(%r8)
+	ret
 	.p2align 4
-.Lloop_8:
-	decl   %ecx
-	movq  %rax,(%rdi)
-	leaq  8(%rdi),%rdi
-	jnz    .Lloop_8
-
-.Lhandle_7:
-	andl	$7,%edx
-	jz      .Lende
+.Lless_16bytes:
+	cmp	$8, %edx
+	jb	.Lless_8bytes
+	mov %rcx, (%rdi)
+	mov %rcx, -0x08(%r8)
+	ret
 	.p2align 4
-.Lloop_1:
-	decl    %edx
-	movb 	%al,(%rdi)
-	leaq	1(%rdi),%rdi
-	jnz     .Lloop_1
-
-.Lende:
-	movq	%r10,%rax
+.Lless_8bytes:
+	cmp	$4, %edx
+	jb	.Lless_4bytes
+	mov %ecx, (%rdi)
+	mov %ecx, -0x04(%r8)
+	.p2align 4
+.Lless_4bytes:
+	cmp	$2, %edx
+	jb	.Lless_2bytes
+	mov	%cx, (%rdi)
+	mov	%cx, -0x02(%r8)
+	ret
+	.p2align 4
+.Lless_2bytes:
+	cmp	$1, %edx
+	jb	.Lless_1bytes
+	mov	%cl, (%rdi)
+.Lless_1bytes:
 	ret
 
-	CFI_RESTORE_STATE
-.Lbad_alignment:
-	cmpq $7,%rdx
-	jbe	.Lhandle_7
-	movq %rax,(%rdi)	/* unaligned store */
-	movq $8,%r8
-	subq %r9,%r8
-	addq %r8,%rdi
-	subq %r8,%rdx
-	jmp .Lafter_bad_alignment
+	.p2align 4
+.Lmore128bytes:
+	mov	%rcx, (%rdi)
+	mov	%rdi, %r9
+	and	$-0x08, %rdi
+	add	$0x08, %rdi
+	sub	%rdi, %r9
+	add	%r9, %rdx
+	sub	$0x40, %rdx
+.Lgobble_64_loop:
+	mov		%rcx, 0x00(%rdi)
+	mov		%rcx, 0x08(%rdi)
+	mov		%rcx, 0x10(%rdi)
+	mov		%rcx, 0x18(%rdi)
+	mov		%rcx, 0x20(%rdi)
+	mov		%rcx, 0x28(%rdi)
+	mov		%rcx, 0x30(%rdi)
+	mov		%rcx, 0x38(%rdi)
+	lea	0x40(%rdi), %rdi
+	sub	$0x40, %rdx
+	jae	.Lgobble_64_loop
+	/*
+	 * Move data from 0 bytes to 63 bytes.
+	 */
+	mov		%rcx, -0x40(%r8)
+	mov		%rcx, -0x38(%r8)
+	mov		%rcx, -0x30(%r8)
+	mov		%rcx, -0x28(%r8)
+	mov		%rcx, -0x20(%r8)
+	mov		%rcx, -0x18(%r8)
+	mov		%rcx, -0x10(%r8)
+	mov		%rcx, -0x08(%r8)
+	ret
 .Lfinal:
 	CFI_ENDPROC
 ENDPROC(memset)
-- 
1.8.1.4


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC] x86:Improve memset with general 64bit instruction
  2014-04-07 14:50 [PATCH RFC] x86:Improve memset with general 64bit instruction ling.ma.program
@ 2014-04-07 14:52 ` Ling Ma
  2014-04-07 16:42 ` Andi Kleen
  1 sibling, 0 replies; 8+ messages in thread
From: Ling Ma @ 2014-04-07 14:52 UTC (permalink / raw)
  To: mingo; +Cc: tglx, hpa, neleai, linux-kernel, Ling Ma

[-- Attachment #1: Type: text/plain, Size: 5824 bytes --]

Append test suit
after tar, run ./test command please.

thanks

2014-04-07 22:50 GMT+08:00, ling.ma.program@gmail.com
<ling.ma.program@gmail.com>:
> From: Ling Ma <ling.ml@alibaba-inc.com>
>
> In this patch we manage to reduce miss branch prediction by
> avoiding using branch instructions and force destination to be aligned
> with general 64bit instruction.
> Below compared results shows we improve performance up to 1.8x
> (We modified test suit from Ondra, send after this patch)
>
> Bytes: ORG_TIME: NEW_TIME: ORG vs NEW:
> 7       0.51    0.48    1.06
> 16      0.55    0.38    1.44
> 18      0.61    0.44    1.38
> 21      0.62    0.47    1.31
> 25      0.64    0.45    1.42
> 30      0.65    0.45    1.44
> 36      0.66    0.44    1.50
> 38      0.67    0.46    1.45
> 62      0.70    0.44    1.59
> 75      0.71    0.44    1.61
> 85      0.73    0.46    1.58
> 120     0.78    0.44    1.77
> 193     0.81    0.46    1.76
> 245     0.84    0.52    1.61
> 256     0.83    0.45    1.84
> 356     0.86    0.55    1.56
> 601     0.98    0.65    1.50
> 958     1.14    0.81    1.40
> 1024    1.19    0.86    1.38
> 2048    1.69    1.34    1.26
> Signed-off-by: Ling Ma <ling.ml@alibaba-inc.com>
> ---
>  arch/x86/include/asm/alternative-asm.h |   4 +-
>  arch/x86/lib/memset_64.S               | 172
> +++++++++++++++++++++------------
>  2 files changed, 110 insertions(+), 66 deletions(-)
>
> diff --git a/arch/x86/include/asm/alternative-asm.h
> b/arch/x86/include/asm/alternative-asm.h
> index 372231c..aaac545 100644
> --- a/arch/x86/include/asm/alternative-asm.h
> +++ b/arch/x86/include/asm/alternative-asm.h
> @@ -22,8 +22,8 @@
>  	.long \orig - .
>  	.long \alt - .
>  	.word \feature
> -	.byte \orig_len
> -	.byte \alt_len
> +	.word \orig_len
> +	.word \alt_len
>  .endm
>
>  #endif  /*  __ASSEMBLY__  */
> diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
> index 2dcb380..3eca27c 100644
> --- a/arch/x86/lib/memset_64.S
> +++ b/arch/x86/lib/memset_64.S
> @@ -59,77 +59,121 @@
>  ENTRY(memset)
>  ENTRY(__memset)
>  	CFI_STARTPROC
> -	movq %rdi,%r10
> -
> -	/* expand byte value  */
>  	movzbl %sil,%ecx
> -	movabs $0x0101010101010101,%rax
> -	imulq  %rcx,%rax
> -
> -	/* align dst */
> -	movl  %edi,%r9d
> -	andl  $7,%r9d
> -	jnz  .Lbad_alignment
> -	CFI_REMEMBER_STATE
> -.Lafter_bad_alignment:
> -
> -	movq  %rdx,%rcx
> -	shrq  $6,%rcx
> -	jz	 .Lhandle_tail
> -
> +	mov $0x0101010101010101,%rsi
> +	imulq  %rsi,%rcx
> +	movq %rdi,%rax
> +	lea	(%rdi, %rdx), %r8
> +	cmp	$128, %rdx
> +	ja	.Lmore128bytes
> +	cmp	$64, %edx
> +	jb	.Lless_64bytes
> +	/*
> +	 * Move data from 65 bytes to 128 bytes.
> +	 */
> +	mov %rcx, 0x00(%rdi)
> +	mov %rcx, 0x08(%rdi)
> +	mov %rcx, 0x10(%rdi)
> +	mov %rcx, 0x18(%rdi)
> +	mov %rcx, 0x20(%rdi)
> +	mov %rcx, 0x28(%rdi)
> +	mov %rcx, 0x30(%rdi)
> +	mov %rcx, 0x38(%rdi)
> +	mov %rcx, -0x40(%r8)
> +	mov %rcx, -0x38(%r8)
> +	mov %rcx, -0x30(%r8)
> +	mov %rcx, -0x28(%r8)
> +	mov %rcx, -0x20(%r8)
> +	mov %rcx, -0x18(%r8)
> +	mov %rcx, -0x10(%r8)
> +	mov %rcx, -0x08(%r8)
> +	ret
>  	.p2align 4
> -.Lloop_64:
> -	decq  %rcx
> -	movq  %rax,(%rdi)
> -	movq  %rax,8(%rdi)
> -	movq  %rax,16(%rdi)
> -	movq  %rax,24(%rdi)
> -	movq  %rax,32(%rdi)
> -	movq  %rax,40(%rdi)
> -	movq  %rax,48(%rdi)
> -	movq  %rax,56(%rdi)
> -	leaq  64(%rdi),%rdi
> -	jnz    .Lloop_64
> -
> -	/* Handle tail in loops. The loops should be faster than hard
> -	   to predict jump tables. */
> +.Lless_64bytes:
> +	cmp	$32, %edx
> +	jb	.Lless_32bytes
> +	/*
> +	 * Move data from 33 bytes to 64 bytes.
> +	 */
> +	mov %rcx, 0x00(%rdi)
> +	mov %rcx, 0x08(%rdi)
> +	mov %rcx, 0x10(%rdi)
> +	mov %rcx, 0x18(%rdi)
> +	mov %rcx, -0x20(%r8)
> +	mov %rcx, -0x18(%r8)
> +	mov %rcx, -0x10(%r8)
> +	mov %rcx, -0x08(%r8)
> +	ret
>  	.p2align 4
> -.Lhandle_tail:
> -	movl	%edx,%ecx
> -	andl    $63&(~7),%ecx
> -	jz 		.Lhandle_7
> -	shrl	$3,%ecx
> +.Lless_32bytes:
> +	cmp	$16, %edx
> +	jb	.Lless_16bytes
> +	mov %rcx, 0x00(%rdi)
> +	mov %rcx, 0x08(%rdi)
> +	mov %rcx, -0x10(%r8)
> +	mov %rcx, -0x08(%r8)
> +	ret
>  	.p2align 4
> -.Lloop_8:
> -	decl   %ecx
> -	movq  %rax,(%rdi)
> -	leaq  8(%rdi),%rdi
> -	jnz    .Lloop_8
> -
> -.Lhandle_7:
> -	andl	$7,%edx
> -	jz      .Lende
> +.Lless_16bytes:
> +	cmp	$8, %edx
> +	jb	.Lless_8bytes
> +	mov %rcx, (%rdi)
> +	mov %rcx, -0x08(%r8)
> +	ret
>  	.p2align 4
> -.Lloop_1:
> -	decl    %edx
> -	movb 	%al,(%rdi)
> -	leaq	1(%rdi),%rdi
> -	jnz     .Lloop_1
> -
> -.Lende:
> -	movq	%r10,%rax
> +.Lless_8bytes:
> +	cmp	$4, %edx
> +	jb	.Lless_4bytes
> +	mov %ecx, (%rdi)
> +	mov %ecx, -0x04(%r8)
> +	.p2align 4
> +.Lless_4bytes:
> +	cmp	$2, %edx
> +	jb	.Lless_2bytes
> +	mov	%cx, (%rdi)
> +	mov	%cx, -0x02(%r8)
> +	ret
> +	.p2align 4
> +.Lless_2bytes:
> +	cmp	$1, %edx
> +	jb	.Lless_1bytes
> +	mov	%cl, (%rdi)
> +.Lless_1bytes:
>  	ret
>
> -	CFI_RESTORE_STATE
> -.Lbad_alignment:
> -	cmpq $7,%rdx
> -	jbe	.Lhandle_7
> -	movq %rax,(%rdi)	/* unaligned store */
> -	movq $8,%r8
> -	subq %r9,%r8
> -	addq %r8,%rdi
> -	subq %r8,%rdx
> -	jmp .Lafter_bad_alignment
> +	.p2align 4
> +.Lmore128bytes:
> +	mov	%rcx, (%rdi)
> +	mov	%rdi, %r9
> +	and	$-0x08, %rdi
> +	add	$0x08, %rdi
> +	sub	%rdi, %r9
> +	add	%r9, %rdx
> +	sub	$0x40, %rdx
> +.Lgobble_64_loop:
> +	mov		%rcx, 0x00(%rdi)
> +	mov		%rcx, 0x08(%rdi)
> +	mov		%rcx, 0x10(%rdi)
> +	mov		%rcx, 0x18(%rdi)
> +	mov		%rcx, 0x20(%rdi)
> +	mov		%rcx, 0x28(%rdi)
> +	mov		%rcx, 0x30(%rdi)
> +	mov		%rcx, 0x38(%rdi)
> +	lea	0x40(%rdi), %rdi
> +	sub	$0x40, %rdx
> +	jae	.Lgobble_64_loop
> +	/*
> +	 * Move data from 0 bytes to 63 bytes.
> +	 */
> +	mov		%rcx, -0x40(%r8)
> +	mov		%rcx, -0x38(%r8)
> +	mov		%rcx, -0x30(%r8)
> +	mov		%rcx, -0x28(%r8)
> +	mov		%rcx, -0x20(%r8)
> +	mov		%rcx, -0x18(%r8)
> +	mov		%rcx, -0x10(%r8)
> +	mov		%rcx, -0x08(%r8)
> +	ret
>  .Lfinal:
>  	CFI_ENDPROC
>  ENDPROC(memset)
> --
> 1.8.1.4
>
>

[-- Attachment #2: memset_kernel.tar --]
[-- Type: application/x-tar, Size: 20480 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC] x86:Improve memset with general 64bit instruction
  2014-04-07 14:50 [PATCH RFC] x86:Improve memset with general 64bit instruction ling.ma.program
  2014-04-07 14:52 ` Ling Ma
@ 2014-04-07 16:42 ` Andi Kleen
  2014-04-08 14:00   ` Ling Ma
  1 sibling, 1 reply; 8+ messages in thread
From: Andi Kleen @ 2014-04-07 16:42 UTC (permalink / raw)
  To: ling.ma.program; +Cc: mingo, tglx, hpa, neleai, linux-kernel, Ling Ma

ling.ma.program@gmail.com writes:

> From: Ling Ma <ling.ml@alibaba-inc.com>
>
> In this patch we manage to reduce miss branch prediction by 
> avoiding using branch instructions and force destination to be aligned
> with general 64bit instruction. 
> Below compared results shows we improve performance up to 1.8x
> (We modified test suit from Ondra, send after this patch)

You didn't specify the CPU?

I assume it's some Atom, as nothing else uses these open coded functions
anymore?

-Andi

-- 
ak@linux.intel.com -- Speaking for myself only

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC] x86:Improve memset with general 64bit instruction
  2014-04-07 16:42 ` Andi Kleen
@ 2014-04-08 14:00   ` Ling Ma
  2014-04-13 15:11     ` Ling Ma
  0 siblings, 1 reply; 8+ messages in thread
From: Ling Ma @ 2014-04-08 14:00 UTC (permalink / raw)
  To: Andi Kleen; +Cc: mingo, tglx, hpa, neleai, linux-kernel, Ling Ma

[-- Attachment #1: Type: text/plain, Size: 1397 bytes --]

Andi,

The below is compared result on older machine(cpu info is attached):
That shows new code get better performance up to 1.6x.

Bytes: ORG_TIME: NEW_TIME: ORG vs NEW:
7       0.87    0.76    1.14
16      0.99    0.68    1.45
18      1.07    0.77    1.38
21      1.09    0.78    1.39
25      1.11    0.77    1.44
30      1.12    0.73    1.53
36      1.15    0.75    1.53
38      1.12    0.75    1.49
62      1.18    0.77    1.53
75      1.25    0.79    1.58
85      1.28    0.80    1.60
120     1.33    0.82    1.62
193     1.45    0.88    1.64
245     1.48    0.96    1.54
256     1.45    0.90    1.61
356     1.61    1.02    1.57
601     1.78    1.22    1.45
958     2.04    1.47    1.38
1024    2.07    1.48    1.39
2048    2.80    2.21    1.26

Thanks
Ling

2014-04-08 0:42 GMT+08:00, Andi Kleen <andi@firstfloor.org>:
> ling.ma.program@gmail.com writes:
>
>> From: Ling Ma <ling.ml@alibaba-inc.com>
>>
>> In this patch we manage to reduce miss branch prediction by
>> avoiding using branch instructions and force destination to be aligned
>> with general 64bit instruction.
>> Below compared results shows we improve performance up to 1.8x
>> (We modified test suit from Ondra, send after this patch)
>
> You didn't specify the CPU?
>
> I assume it's some Atom, as nothing else uses these open coded functions
> anymore?
>
> -Andi
>
> --
> ak@linux.intel.com -- Speaking for myself only
>

[-- Attachment #2: cpu-info --]
[-- Type: text/plain, Size: 4992 bytes --]

processor	: 0
vendor_id	: GenuineIntel
cpu family	: 6
model		: 23
model name	: Intel(R) Xeon(R) CPU           E5410  @ 2.33GHz
stepping	: 10
cpu MHz		: 2327.506
cache size	: 6144 KB
physical id	: 0
siblings	: 4
core id		: 0
cpu cores	: 4
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc pni monitor ds_cpl vmx est tm2 cx16 xtpr lahf_lm
bogomips	: 4658.27
clflush size	: 64
cache_alignment	: 64
address sizes	: 38 bits physical, 48 bits virtual
power management:

processor	: 1
vendor_id	: GenuineIntel
cpu family	: 6
model		: 23
model name	: Intel(R) Xeon(R) CPU           E5410  @ 2.33GHz
stepping	: 10
cpu MHz		: 2327.506
cache size	: 6144 KB
physical id	: 1
siblings	: 4
core id		: 0
cpu cores	: 4
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc pni monitor ds_cpl vmx est tm2 cx16 xtpr lahf_lm
bogomips	: 4655.03
clflush size	: 64
cache_alignment	: 64
address sizes	: 38 bits physical, 48 bits virtual
power management:

processor	: 2
vendor_id	: GenuineIntel
cpu family	: 6
model		: 23
model name	: Intel(R) Xeon(R) CPU           E5410  @ 2.33GHz
stepping	: 10
cpu MHz		: 2327.506
cache size	: 6144 KB
physical id	: 0
siblings	: 4
core id		: 2
cpu cores	: 4
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc pni monitor ds_cpl vmx est tm2 cx16 xtpr lahf_lm
bogomips	: 4655.00
clflush size	: 64
cache_alignment	: 64
address sizes	: 38 bits physical, 48 bits virtual
power management:

processor	: 3
vendor_id	: GenuineIntel
cpu family	: 6
model		: 23
model name	: Intel(R) Xeon(R) CPU           E5410  @ 2.33GHz
stepping	: 10
cpu MHz		: 2327.506
cache size	: 6144 KB
physical id	: 1
siblings	: 4
core id		: 2
cpu cores	: 4
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc pni monitor ds_cpl vmx est tm2 cx16 xtpr lahf_lm
bogomips	: 4654.53
clflush size	: 64
cache_alignment	: 64
address sizes	: 38 bits physical, 48 bits virtual
power management:

processor	: 4
vendor_id	: GenuineIntel
cpu family	: 6
model		: 23
model name	: Intel(R) Xeon(R) CPU           E5410  @ 2.33GHz
stepping	: 10
cpu MHz		: 2327.506
cache size	: 6144 KB
physical id	: 0
siblings	: 4
core id		: 1
cpu cores	: 4
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc pni monitor ds_cpl vmx est tm2 cx16 xtpr lahf_lm
bogomips	: 4655.02
clflush size	: 64
cache_alignment	: 64
address sizes	: 38 bits physical, 48 bits virtual
power management:

processor	: 5
vendor_id	: GenuineIntel
cpu family	: 6
model		: 23
model name	: Intel(R) Xeon(R) CPU           E5410  @ 2.33GHz
stepping	: 10
cpu MHz		: 2327.506
cache size	: 6144 KB
physical id	: 1
siblings	: 4
core id		: 1
cpu cores	: 4
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc pni monitor ds_cpl vmx est tm2 cx16 xtpr lahf_lm
bogomips	: 4654.97
clflush size	: 64
cache_alignment	: 64
address sizes	: 38 bits physical, 48 bits virtual
power management:

processor	: 6
vendor_id	: GenuineIntel
cpu family	: 6
model		: 23
model name	: Intel(R) Xeon(R) CPU           E5410  @ 2.33GHz
stepping	: 10
cpu MHz		: 2327.506
cache size	: 6144 KB
physical id	: 0
siblings	: 4
core id		: 3
cpu cores	: 4
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc pni monitor ds_cpl vmx est tm2 cx16 xtpr lahf_lm
bogomips	: 4655.00
clflush size	: 64
cache_alignment	: 64
address sizes	: 38 bits physical, 48 bits virtual
power management:

processor	: 7
vendor_id	: GenuineIntel
cpu family	: 6
model		: 23
model name	: Intel(R) Xeon(R) CPU           E5410  @ 2.33GHz
stepping	: 10
cpu MHz		: 2327.506
cache size	: 6144 KB
physical id	: 1
siblings	: 4
core id		: 3
cpu cores	: 4
fpu		: yes
fpu_exception	: yes
cpuid level	: 13
wp		: yes
flags		: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm syscall nx lm constant_tsc pni monitor ds_cpl vmx est tm2 cx16 xtpr lahf_lm
bogomips	: 4655.01
clflush size	: 64
cache_alignment	: 64
address sizes	: 38 bits physical, 48 bits virtual
power management:


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC] x86:Improve memset with general 64bit instruction
  2014-04-08 14:00   ` Ling Ma
@ 2014-04-13 15:11     ` Ling Ma
  2014-04-13 22:03       ` Andi Kleen
  0 siblings, 1 reply; 8+ messages in thread
From: Ling Ma @ 2014-04-13 15:11 UTC (permalink / raw)
  To: Andi Kleen; +Cc: mingo, tglx, hpa, neleai, linux-kernel, Ling Ma

Any further comments  ?

Thanks
Ling


2014-04-08 22:00 GMT+08:00, Ling Ma <ling.ma.program@gmail.com>:
> Andi,
>
> The below is compared result on older machine(cpu info is attached):
> That shows new code get better performance up to 1.6x.
>
> Bytes: ORG_TIME: NEW_TIME: ORG vs NEW:
> 7       0.87    0.76    1.14
> 16      0.99    0.68    1.45
> 18      1.07    0.77    1.38
> 21      1.09    0.78    1.39
> 25      1.11    0.77    1.44
> 30      1.12    0.73    1.53
> 36      1.15    0.75    1.53
> 38      1.12    0.75    1.49
> 62      1.18    0.77    1.53
> 75      1.25    0.79    1.58
> 85      1.28    0.80    1.60
> 120     1.33    0.82    1.62
> 193     1.45    0.88    1.64
> 245     1.48    0.96    1.54
> 256     1.45    0.90    1.61
> 356     1.61    1.02    1.57
> 601     1.78    1.22    1.45
> 958     2.04    1.47    1.38
> 1024    2.07    1.48    1.39
> 2048    2.80    2.21    1.26
>
> Thanks
> Ling
>
> 2014-04-08 0:42 GMT+08:00, Andi Kleen <andi@firstfloor.org>:
>> ling.ma.program@gmail.com writes:
>>
>>> From: Ling Ma <ling.ml@alibaba-inc.com>
>>>
>>> In this patch we manage to reduce miss branch prediction by
>>> avoiding using branch instructions and force destination to be aligned
>>> with general 64bit instruction.
>>> Below compared results shows we improve performance up to 1.8x
>>> (We modified test suit from Ondra, send after this patch)
>>
>> You didn't specify the CPU?
>>
>> I assume it's some Atom, as nothing else uses these open coded functions
>> anymore?
>>
>> -Andi
>>
>> --
>> ak@linux.intel.com -- Speaking for myself only
>>
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC] x86:Improve memset with general 64bit instruction
  2014-04-13 15:11     ` Ling Ma
@ 2014-04-13 22:03       ` Andi Kleen
  2014-04-14 13:31         ` Ling Ma
  0 siblings, 1 reply; 8+ messages in thread
From: Andi Kleen @ 2014-04-13 22:03 UTC (permalink / raw)
  To: Ling Ma; +Cc: Andi Kleen, mingo, tglx, hpa, neleai, linux-kernel, Ling Ma

On Sun, Apr 13, 2014 at 11:11:59PM +0800, Ling Ma wrote:
> Any further comments  ?

It would be good to test on some more machines that you don't cause
regressions.

But I'm not aware of any workload that is doing a lot of memset (not
counting clear_page).  Copies likely matter much more.

-Andi

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC] x86:Improve memset with general 64bit instruction
  2014-04-13 22:03       ` Andi Kleen
@ 2014-04-14 13:31         ` Ling Ma
  2014-04-15  5:01           ` Andi Kleen
  0 siblings, 1 reply; 8+ messages in thread
From: Ling Ma @ 2014-04-14 13:31 UTC (permalink / raw)
  To: Andi Kleen; +Cc: mingo, tglx, hpa, neleai, linux-kernel, Ling Ma

The kernel version 3.14 shows memcpy, memset occur 19622 and  14189
times respectively.
so memset is still important for us, correct?

Thanks
Ling

2014-04-14 6:03 GMT+08:00, Andi Kleen <andi@firstfloor.org>:
> On Sun, Apr 13, 2014 at 11:11:59PM +0800, Ling Ma wrote:
>> Any further comments  ?
>
> It would be good to test on some more machines that you don't cause
> regressions.
>
> But I'm not aware of any workload that is doing a lot of memset (not
> counting clear_page).  Copies likely matter much more.
>
> -Andi
>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH RFC] x86:Improve memset with general 64bit instruction
  2014-04-14 13:31         ` Ling Ma
@ 2014-04-15  5:01           ` Andi Kleen
  0 siblings, 0 replies; 8+ messages in thread
From: Andi Kleen @ 2014-04-15  5:01 UTC (permalink / raw)
  To: Ling Ma; +Cc: Andi Kleen, mingo, tglx, hpa, neleai, linux-kernel, Ling Ma

On Mon, Apr 14, 2014 at 09:31:16PM +0800, Ling Ma wrote:
> The kernel version 3.14 shows memcpy, memset occur 19622 and  14189
> times respectively.
> so memset is still important for us, correct?

Did you ever see it in a profile log as being hot?
I haven't. Static counts don't mean much.

-Andi

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2014-04-15  5:01 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-04-07 14:50 [PATCH RFC] x86:Improve memset with general 64bit instruction ling.ma.program
2014-04-07 14:52 ` Ling Ma
2014-04-07 16:42 ` Andi Kleen
2014-04-08 14:00   ` Ling Ma
2014-04-13 15:11     ` Ling Ma
2014-04-13 22:03       ` Andi Kleen
2014-04-14 13:31         ` Ling Ma
2014-04-15  5:01           ` Andi Kleen

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.