linux-crypto.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] crypto: x86/chacha-sse3 - use unaligned loads for state array
@ 2020-07-08  9:11 Ard Biesheuvel
  2020-07-08 11:56 ` Martin Willi
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Ard Biesheuvel @ 2020-07-08  9:11 UTC (permalink / raw)
  To: linux-crypto; +Cc: Ard Biesheuvel, Martin Willi, Herbert Xu, Eric Biggers

Due to the fact that the x86 port does not support allocating objects
on the stack with an alignment that exceeds 8 bytes, we have a rather
ugly hack in the x86 code for ChaCha to ensure that the state array is
aligned to 16 bytes, allowing the SSE3 implementation of the algorithm
to use aligned loads.

Given that the performance benefit of using of aligned loads appears to
be limited (~0.25% for 1k blocks using tcrypt on a Corei7-8650U), and
the fact that this hack has leaked into generic ChaCha code, let's just
remove it.

Cc: Martin Willi <martin@strongswan.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/x86/crypto/chacha-ssse3-x86_64.S | 16 ++++++++--------
 arch/x86/crypto/chacha_glue.c         | 17 ++---------------
 include/crypto/chacha.h               |  4 ----
 3 files changed, 10 insertions(+), 27 deletions(-)

diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S
index a38ab2512a6f..ca1788bfee16 100644
--- a/arch/x86/crypto/chacha-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha-ssse3-x86_64.S
@@ -120,10 +120,10 @@ SYM_FUNC_START(chacha_block_xor_ssse3)
 	FRAME_BEGIN
 
 	# x0..3 = s0..3
-	movdqa		0x00(%rdi),%xmm0
-	movdqa		0x10(%rdi),%xmm1
-	movdqa		0x20(%rdi),%xmm2
-	movdqa		0x30(%rdi),%xmm3
+	movdqu		0x00(%rdi),%xmm0
+	movdqu		0x10(%rdi),%xmm1
+	movdqu		0x20(%rdi),%xmm2
+	movdqu		0x30(%rdi),%xmm3
 	movdqa		%xmm0,%xmm8
 	movdqa		%xmm1,%xmm9
 	movdqa		%xmm2,%xmm10
@@ -205,10 +205,10 @@ SYM_FUNC_START(hchacha_block_ssse3)
 	# %edx: nrounds
 	FRAME_BEGIN
 
-	movdqa		0x00(%rdi),%xmm0
-	movdqa		0x10(%rdi),%xmm1
-	movdqa		0x20(%rdi),%xmm2
-	movdqa		0x30(%rdi),%xmm3
+	movdqu		0x00(%rdi),%xmm0
+	movdqu		0x10(%rdi),%xmm1
+	movdqu		0x20(%rdi),%xmm2
+	movdqu		0x30(%rdi),%xmm3
 
 	mov		%edx,%r8d
 	call		chacha_permute
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
index 22250091cdbe..e67a59130025 100644
--- a/arch/x86/crypto/chacha_glue.c
+++ b/arch/x86/crypto/chacha_glue.c
@@ -14,8 +14,6 @@
 #include <linux/module.h>
 #include <asm/simd.h>
 
-#define CHACHA_STATE_ALIGN 16
-
 asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
 				       unsigned int len, int nrounds);
 asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
@@ -124,8 +122,6 @@ static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
 
 void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
 {
-	state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
 	if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) {
 		hchacha_block_generic(state, stream, nrounds);
 	} else {
@@ -138,8 +134,6 @@ EXPORT_SYMBOL(hchacha_block_arch);
 
 void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
 {
-	state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
 	chacha_init_generic(state, key, iv);
 }
 EXPORT_SYMBOL(chacha_init_arch);
@@ -147,8 +141,6 @@ EXPORT_SYMBOL(chacha_init_arch);
 void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
 		       int nrounds)
 {
-	state = PTR_ALIGN(state, CHACHA_STATE_ALIGN);
-
 	if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() ||
 	    bytes <= CHACHA_BLOCK_SIZE)
 		return chacha_crypt_generic(state, dst, src, bytes, nrounds);
@@ -170,15 +162,12 @@ EXPORT_SYMBOL(chacha_crypt_arch);
 static int chacha_simd_stream_xor(struct skcipher_request *req,
 				  const struct chacha_ctx *ctx, const u8 *iv)
 {
-	u32 *state, state_buf[16 + 2] __aligned(8);
+	u32 state[CHACHA_STATE_WORDS] __aligned(8);
 	struct skcipher_walk walk;
 	int err;
 
 	err = skcipher_walk_virt(&walk, req, false);
 
-	BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
-	state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
-
 	chacha_init_generic(state, ctx->key, iv);
 
 	while (walk.nbytes > 0) {
@@ -217,12 +206,10 @@ static int xchacha_simd(struct skcipher_request *req)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
-	u32 *state, state_buf[16 + 2] __aligned(8);
+	u32 state[CHACHA_STATE_WORDS] __aligned(8);
 	struct chacha_ctx subctx;
 	u8 real_iv[16];
 
-	BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
-	state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
 	chacha_init_generic(state, ctx->key, req->iv);
 
 	if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) {
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index 2676f4fbd4c1..3a1c72fdb7cf 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -25,11 +25,7 @@
 #define CHACHA_BLOCK_SIZE	64
 #define CHACHAPOLY_IV_SIZE	12
 
-#ifdef CONFIG_X86_64
-#define CHACHA_STATE_WORDS	((CHACHA_BLOCK_SIZE + 12) / sizeof(u32))
-#else
 #define CHACHA_STATE_WORDS	(CHACHA_BLOCK_SIZE / sizeof(u32))
-#endif
 
 /* 192-bit nonce, then 64-bit stream position */
 #define XCHACHA_IV_SIZE		32
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] crypto: x86/chacha-sse3 - use unaligned loads for state array
  2020-07-08  9:11 [PATCH] crypto: x86/chacha-sse3 - use unaligned loads for state array Ard Biesheuvel
@ 2020-07-08 11:56 ` Martin Willi
  2020-07-08 16:19 ` Eric Biggers
  2020-07-16 11:54 ` Herbert Xu
  2 siblings, 0 replies; 4+ messages in thread
From: Martin Willi @ 2020-07-08 11:56 UTC (permalink / raw)
  To: Ard Biesheuvel, linux-crypto; +Cc: Herbert Xu, Eric Biggers


> Due to the fact that the x86 port does not support allocating objects
> on the stack with an alignment that exceeds 8 bytes, we have a rather
> ugly hack in the x86 code for ChaCha to ensure that the state array
> is aligned to 16 bytes, allowing the SSE3 implementation of the
> algorithm to use aligned loads.
> 
> Given that the performance benefit of using of aligned loads appears
> to be limited (~0.25% for 1k blocks using tcrypt on a Corei7-8650U),
> and the fact that this hack has leaked into generic ChaCha code,
> let's just remove it.

Reviewed-by: Martin Willi <martin@strongswan.org>

Thanks,
Martin


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] crypto: x86/chacha-sse3 - use unaligned loads for state array
  2020-07-08  9:11 [PATCH] crypto: x86/chacha-sse3 - use unaligned loads for state array Ard Biesheuvel
  2020-07-08 11:56 ` Martin Willi
@ 2020-07-08 16:19 ` Eric Biggers
  2020-07-16 11:54 ` Herbert Xu
  2 siblings, 0 replies; 4+ messages in thread
From: Eric Biggers @ 2020-07-08 16:19 UTC (permalink / raw)
  To: Ard Biesheuvel; +Cc: linux-crypto, Martin Willi, Herbert Xu

On Wed, Jul 08, 2020 at 12:11:18PM +0300, Ard Biesheuvel wrote:
> Due to the fact that the x86 port does not support allocating objects
> on the stack with an alignment that exceeds 8 bytes, we have a rather
> ugly hack in the x86 code for ChaCha to ensure that the state array is
> aligned to 16 bytes, allowing the SSE3 implementation of the algorithm
> to use aligned loads.
> 
> Given that the performance benefit of using of aligned loads appears to
> be limited (~0.25% for 1k blocks using tcrypt on a Corei7-8650U), and
> the fact that this hack has leaked into generic ChaCha code, let's just
> remove it.
> 
> Cc: Martin Willi <martin@strongswan.org>
> Cc: Herbert Xu <herbert@gondor.apana.org.au>
> Cc: Eric Biggers <ebiggers@kernel.org>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

Reviewed-by: Eric Biggers <ebiggers@google.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH] crypto: x86/chacha-sse3 - use unaligned loads for state array
  2020-07-08  9:11 [PATCH] crypto: x86/chacha-sse3 - use unaligned loads for state array Ard Biesheuvel
  2020-07-08 11:56 ` Martin Willi
  2020-07-08 16:19 ` Eric Biggers
@ 2020-07-16 11:54 ` Herbert Xu
  2 siblings, 0 replies; 4+ messages in thread
From: Herbert Xu @ 2020-07-16 11:54 UTC (permalink / raw)
  To: Ard Biesheuvel; +Cc: linux-crypto, Martin Willi, Eric Biggers

On Wed, Jul 08, 2020 at 12:11:18PM +0300, Ard Biesheuvel wrote:
> Due to the fact that the x86 port does not support allocating objects
> on the stack with an alignment that exceeds 8 bytes, we have a rather
> ugly hack in the x86 code for ChaCha to ensure that the state array is
> aligned to 16 bytes, allowing the SSE3 implementation of the algorithm
> to use aligned loads.
> 
> Given that the performance benefit of using of aligned loads appears to
> be limited (~0.25% for 1k blocks using tcrypt on a Corei7-8650U), and
> the fact that this hack has leaked into generic ChaCha code, let's just
> remove it.
> 
> Cc: Martin Willi <martin@strongswan.org>
> Cc: Herbert Xu <herbert@gondor.apana.org.au>
> Cc: Eric Biggers <ebiggers@kernel.org>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
>  arch/x86/crypto/chacha-ssse3-x86_64.S | 16 ++++++++--------
>  arch/x86/crypto/chacha_glue.c         | 17 ++---------------
>  include/crypto/chacha.h               |  4 ----
>  3 files changed, 10 insertions(+), 27 deletions(-)

Patch applied.  Thanks.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-07-16 11:54 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-08  9:11 [PATCH] crypto: x86/chacha-sse3 - use unaligned loads for state array Ard Biesheuvel
2020-07-08 11:56 ` Martin Willi
2020-07-08 16:19 ` Eric Biggers
2020-07-16 11:54 ` Herbert Xu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).