linux-crypto.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH 0/7] crypto: Add helpers for allocating with DMA alignment
       [not found]                 ` <Yl0jPdfdUkaStDN5@gondor.apana.org.au>
@ 2022-05-10 11:03                   ` Herbert Xu
  2022-05-10 11:07                     ` [RFC PATCH 1/7] crypto: Prepare to move crypto_tfm_ctx Herbert Xu
                                       ` (6 more replies)
  0 siblings, 7 replies; 10+ messages in thread
From: Herbert Xu @ 2022-05-10 11:03 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: Ard Biesheuvel, Will Deacon, Marc Zyngier, Arnd Bergmann,
	Greg Kroah-Hartman, Andrew Morton, Linus Torvalds,
	Linux Memory Management List, Linux ARM,
	Linux Kernel Mailing List, David S. Miller,
	Linux Crypto Mailing List

This patch series adds helpers to allow drivers to explicitly
request ARCH_DMA_MINALIGN when allocating memory through the
Crypto API.

Note that I've only converted one file in one driver as this
is only meant to show how it's done and find out what else we
may need.

Thanks,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [RFC PATCH 1/7] crypto: Prepare to move crypto_tfm_ctx
  2022-05-10 11:03                   ` [RFC PATCH 0/7] crypto: Add helpers for allocating with DMA alignment Herbert Xu
@ 2022-05-10 11:07                     ` Herbert Xu
  2022-05-10 11:07                     ` [RFC PATCH 2/7] crypto: api - Add crypto_tfm_ctx_dma Herbert Xu
                                       ` (5 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Herbert Xu @ 2022-05-10 11:07 UTC (permalink / raw)
  To: Catalin Marinas, Ard Biesheuvel, Will Deacon, Marc Zyngier,
	Arnd Bergmann, Greg Kroah-Hartman, Andrew Morton, Linus Torvalds,
	Linux Memory Management List, Linux ARM,
	Linux Kernel Mailing List, David S. Miller,
	Linux Crypto Mailing List

The helper crypto_tfm_ctx is only used by the Crypto API algorithm
code and should really be in algapi.h.  However, for historical
reasons many files relied on it to be in crypto.h.  This patch
changes those files to use algapi.h instead in prepartion for a
move.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---

 arch/arm/crypto/aes-cipher-glue.c      |    2 +-
 arch/arm64/crypto/aes-ce-glue.c        |    2 +-
 arch/arm64/crypto/aes-cipher-glue.c    |    2 +-
 arch/arm64/crypto/sm4-ce-cipher-glue.c |    2 +-
 arch/x86/crypto/twofish_glue.c         |    2 +-
 crypto/aes_generic.c                   |    2 +-
 crypto/aes_ti.c                        |    2 +-
 crypto/anubis.c                        |    2 +-
 crypto/blowfish_common.c               |    3 ++-
 crypto/blowfish_generic.c              |    3 ++-
 crypto/camellia_generic.c              |    2 +-
 crypto/cast5_generic.c                 |    2 +-
 crypto/cast6_generic.c                 |    2 +-
 crypto/des_generic.c                   |    2 +-
 crypto/fcrypt.c                        |    2 +-
 crypto/khazad.c                        |    2 +-
 crypto/seed.c                          |    2 +-
 crypto/serpent_generic.c               |    2 +-
 crypto/sm4_generic.c                   |    2 +-
 crypto/tea.c                           |    2 +-
 crypto/twofish_common.c                |    2 +-
 crypto/twofish_generic.c               |    2 +-
 drivers/crypto/nx/nx-842.h             |    2 +-
 include/crypto/internal/acompress.h    |    2 ++
 include/crypto/internal/scompress.h    |    3 ++-
 25 files changed, 29 insertions(+), 24 deletions(-)

diff --git a/arch/arm/crypto/aes-cipher-glue.c b/arch/arm/crypto/aes-cipher-glue.c
index 8cd00f56800e7..6dfaef2d8f913 100644
--- a/arch/arm/crypto/aes-cipher-glue.c
+++ b/arch/arm/crypto/aes-cipher-glue.c
@@ -7,7 +7,7 @@
  */
 
 #include <crypto/aes.h>
-#include <linux/crypto.h>
+#include <crypto/algapi.h>
 #include <linux/module.h>
 
 asmlinkage void __aes_arm_encrypt(u32 *rk, int rounds, const u8 *in, u8 *out);
diff --git a/arch/arm64/crypto/aes-ce-glue.c b/arch/arm64/crypto/aes-ce-glue.c
index 56a5f6f0b0c12..e921823ca103a 100644
--- a/arch/arm64/crypto/aes-ce-glue.c
+++ b/arch/arm64/crypto/aes-ce-glue.c
@@ -9,9 +9,9 @@
 #include <asm/simd.h>
 #include <asm/unaligned.h>
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 #include <crypto/internal/simd.h>
 #include <linux/cpufeature.h>
-#include <linux/crypto.h>
 #include <linux/module.h>
 
 #include "aes-ce-setkey.h"
diff --git a/arch/arm64/crypto/aes-cipher-glue.c b/arch/arm64/crypto/aes-cipher-glue.c
index 8caf6dfefce88..4ec55e568941c 100644
--- a/arch/arm64/crypto/aes-cipher-glue.c
+++ b/arch/arm64/crypto/aes-cipher-glue.c
@@ -6,7 +6,7 @@
  */
 
 #include <crypto/aes.h>
-#include <linux/crypto.h>
+#include <crypto/algapi.h>
 #include <linux/module.h>
 
 asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
diff --git a/arch/arm64/crypto/sm4-ce-cipher-glue.c b/arch/arm64/crypto/sm4-ce-cipher-glue.c
index 76a34ef4abbbf..c31d76fb5a177 100644
--- a/arch/arm64/crypto/sm4-ce-cipher-glue.c
+++ b/arch/arm64/crypto/sm4-ce-cipher-glue.c
@@ -2,11 +2,11 @@
 
 #include <asm/neon.h>
 #include <asm/simd.h>
+#include <crypto/algapi.h>
 #include <crypto/sm4.h>
 #include <crypto/internal/simd.h>
 #include <linux/module.h>
 #include <linux/cpufeature.h>
-#include <linux/crypto.h>
 #include <linux/types.h>
 
 MODULE_ALIAS_CRYPTO("sm4");
diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
index f9c4adc274040..0614beece2793 100644
--- a/arch/x86/crypto/twofish_glue.c
+++ b/arch/x86/crypto/twofish_glue.c
@@ -38,8 +38,8 @@
  * Third Edition.
  */
 
+#include <crypto/algapi.h>
 #include <crypto/twofish.h>
-#include <linux/crypto.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/types.h>
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 27ab279318137..666474b81c6aa 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -48,11 +48,11 @@
  */
 
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/crypto.h>
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 
diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
index 205c2c257d492..a3b342f92fab6 100644
--- a/crypto/aes_ti.c
+++ b/crypto/aes_ti.c
@@ -6,7 +6,7 @@
  */
 
 #include <crypto/aes.h>
-#include <linux/crypto.h>
+#include <crypto/algapi.h>
 #include <linux/module.h>
 
 static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key,
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 5da0241ef453c..9f0cf61bbc6e2 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -29,11 +29,11 @@
  *
  */
 
+#include <crypto/algapi.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <asm/byteorder.h>
-#include <linux/crypto.h>
 #include <linux/types.h>
 
 #define ANUBIS_MIN_KEY_SIZE	16
diff --git a/crypto/blowfish_common.c b/crypto/blowfish_common.c
index 1c072012baff4..c0208ce269a33 100644
--- a/crypto/blowfish_common.c
+++ b/crypto/blowfish_common.c
@@ -14,11 +14,12 @@
  * Copyright (c) Kyle McMartin <kyle@debian.org>
  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  */
+
+#include <crypto/algapi.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <asm/byteorder.h>
-#include <linux/crypto.h>
 #include <linux/types.h>
 #include <crypto/blowfish.h>
 
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 003b52c6880ea..0e74c7242e77e 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -11,11 +11,12 @@
  * Copyright (c) Kyle McMartin <kyle@debian.org>
  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
  */
+
+#include <crypto/algapi.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <asm/unaligned.h>
-#include <linux/crypto.h>
 #include <linux/types.h>
 #include <crypto/blowfish.h>
 
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index fd1a88af9e77f..c04670cf51acf 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -9,7 +9,7 @@
  *  https://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
  */
 
-#include <linux/crypto.h>
+#include <crypto/algapi.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 0257c14cefc25..085a1eedae03b 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -14,8 +14,8 @@
 
 
 #include <asm/unaligned.h>
+#include <crypto/algapi.h>
 #include <linux/init.h>
-#include <linux/crypto.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/string.h>
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index 75346380aa0bc..34f1ab53e3a71 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -11,8 +11,8 @@
 
 
 #include <asm/unaligned.h>
+#include <crypto/algapi.h>
 #include <linux/init.h>
-#include <linux/crypto.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/string.h>
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index c85354a5e94c7..1274e18d3eb90 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -8,11 +8,11 @@
  */
 
 #include <asm/byteorder.h>
+#include <crypto/algapi.h>
 #include <linux/bitops.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/errno.h>
-#include <linux/crypto.h>
 
 #include <crypto/internal/des.h>
 
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 76a04d000c0d3..95a16e88899ba 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -43,10 +43,10 @@
  */
 
 #include <asm/byteorder.h>
+#include <crypto/algapi.h>
 #include <linux/bitops.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/crypto.h>
 
 #define ROUNDS 16
 
diff --git a/crypto/khazad.c b/crypto/khazad.c
index f19339954c89e..70cafe73f9740 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -19,11 +19,11 @@
  *
  */
 
+#include <crypto/algapi.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <asm/byteorder.h>
-#include <linux/crypto.h>
 #include <linux/types.h>
 
 #define KHAZAD_KEY_SIZE		16
diff --git a/crypto/seed.c b/crypto/seed.c
index 27720140820ef..d0506ade2a5f8 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -8,11 +8,11 @@
  * Copyright (C) 2007 Korea Information Security Agency (KISA).
  */
 
+#include <crypto/algapi.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/crypto.h>
 #include <asm/byteorder.h>
 
 #define SEED_NUM_KCONSTANTS	16
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 45f98b750053e..c6bca47931e21 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -7,11 +7,11 @@
  * Copyright (C) 2002 Dag Arne Osvik <osvik@ii.uib.no>
  */
 
+#include <crypto/algapi.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <asm/unaligned.h>
-#include <linux/crypto.h>
 #include <linux/types.h>
 #include <crypto/serpent.h>
 
diff --git a/crypto/sm4_generic.c b/crypto/sm4_generic.c
index 4a6480a27fee5..560eba37dc55e 100644
--- a/crypto/sm4_generic.c
+++ b/crypto/sm4_generic.c
@@ -7,12 +7,12 @@
  * All rights reserved.
  */
 
+#include <crypto/algapi.h>
 #include <crypto/sm4.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/crypto.h>
 #include <asm/byteorder.h>
 #include <asm/unaligned.h>
 
diff --git a/crypto/tea.c b/crypto/tea.c
index 02efc5d816903..896f863f3067c 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -14,11 +14,11 @@
  * Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com
  */
 
+#include <crypto/algapi.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <asm/byteorder.h>
-#include <linux/crypto.h>
 #include <linux/types.h>
 
 #define TEA_KEY_SIZE		16
diff --git a/crypto/twofish_common.c b/crypto/twofish_common.c
index d23fa531b91f1..3a95e232aa236 100644
--- a/crypto/twofish_common.c
+++ b/crypto/twofish_common.c
@@ -25,9 +25,9 @@
  * Third Edition.
  */
 
+#include <crypto/algapi.h>
 #include <crypto/twofish.h>
 #include <linux/bitops.h>
-#include <linux/crypto.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 86b2f067a4162..557915e4062d7 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -25,12 +25,12 @@
  */
 
 #include <asm/unaligned.h>
+#include <crypto/algapi.h>
 #include <crypto/twofish.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/errno.h>
-#include <linux/crypto.h>
 #include <linux/bitops.h>
 
 /* Macros to compute the g() function in the encryption and decryption
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index b66f19ac600f2..7590bfb24d79b 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -3,10 +3,10 @@
 #ifndef __NX_842_H__
 #define __NX_842_H__
 
+#include <crypto/algapi.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/crypto.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/io.h>
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index cfc47e18820fb..49339003bd2ce 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -8,7 +8,9 @@
  */
 #ifndef _CRYPTO_ACOMP_INT_H
 #define _CRYPTO_ACOMP_INT_H
+
 #include <crypto/acompress.h>
+#include <crypto/algapi.h>
 
 /*
  * Transform internal helpers.
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index f834274c2493f..252cc949d4ee5 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -8,7 +8,8 @@
  */
 #ifndef _CRYPTO_SCOMP_INT_H
 #define _CRYPTO_SCOMP_INT_H
-#include <linux/crypto.h>
+
+#include <crypto/algapi.h>
 
 #define SCOMP_SCRATCH_SIZE	131072
 

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 2/7] crypto: api - Add crypto_tfm_ctx_dma
  2022-05-10 11:03                   ` [RFC PATCH 0/7] crypto: Add helpers for allocating with DMA alignment Herbert Xu
  2022-05-10 11:07                     ` [RFC PATCH 1/7] crypto: Prepare to move crypto_tfm_ctx Herbert Xu
@ 2022-05-10 11:07                     ` Herbert Xu
  2022-05-10 17:10                       ` Catalin Marinas
  2022-05-10 11:07                     ` [RFC PATCH 3/7] crypto: aead - Add ctx helpers with DMA alignment Herbert Xu
                                       ` (4 subsequent siblings)
  6 siblings, 1 reply; 10+ messages in thread
From: Herbert Xu @ 2022-05-10 11:07 UTC (permalink / raw)
  To: Catalin Marinas, Ard Biesheuvel, Will Deacon, Marc Zyngier,
	Arnd Bergmann, Greg Kroah-Hartman, Andrew Morton, Linus Torvalds,
	Linux Memory Management List, Linux ARM,
	Linux Kernel Mailing List, David S. Miller,
	Linux Crypto Mailing List

This patch adds the helper crypto_tfm_ctx_dma.  It's similar to
crypto_tfm_ctx_aligned but the alignment is hard-coded to the
macro ARCH_DMA_MINALIGN.

This patch also moves crypto_tfm_ctx into algapi.h.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---

 include/crypto/algapi.h |   28 ++++++++++++++++++++++++++--
 include/linux/crypto.h  |    5 -----
 2 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index f50c5d1725da5..cdf12e51c53a0 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -189,10 +189,34 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
 	}
 }
 
+static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
+{
+	return tfm->__crt_ctx;
+}
+
+static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
+					 unsigned int align)
+{
+	if (align <= crypto_tfm_ctx_alignment())
+		align = 1;
+
+	return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
+}
+
 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
 {
-	return PTR_ALIGN(crypto_tfm_ctx(tfm),
-			 crypto_tfm_alg_alignmask(tfm) + 1);
+	return crypto_tfm_ctx_align(tfm, crypto_tfm_alg_alignmask(tfm) + 1);
+}
+
+static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
+{
+	unsigned int align = 1;
+
+#ifdef ARCH_DMA_MINALIGN
+	align = ARCH_DMA_MINALIGN;
+#endif
+
+	return crypto_tfm_ctx_align(tfm, align);
 }
 
 static inline struct crypto_instance *crypto_tfm_alg_instance(
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 2324ab6f1846b..5d1e961f810ec 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -714,11 +714,6 @@ static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags)
 	tfm->crt_flags &= ~flags;
 }
 
-static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
-{
-	return tfm->__crt_ctx;
-}
-
 static inline unsigned int crypto_tfm_ctx_alignment(void)
 {
 	struct crypto_tfm *tfm;

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 3/7] crypto: aead - Add ctx helpers with DMA alignment
  2022-05-10 11:03                   ` [RFC PATCH 0/7] crypto: Add helpers for allocating with DMA alignment Herbert Xu
  2022-05-10 11:07                     ` [RFC PATCH 1/7] crypto: Prepare to move crypto_tfm_ctx Herbert Xu
  2022-05-10 11:07                     ` [RFC PATCH 2/7] crypto: api - Add crypto_tfm_ctx_dma Herbert Xu
@ 2022-05-10 11:07                     ` Herbert Xu
  2022-05-10 11:07                     ` [RFC PATCH 4/7] crypto: hash " Herbert Xu
                                       ` (3 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Herbert Xu @ 2022-05-10 11:07 UTC (permalink / raw)
  To: Catalin Marinas, Ard Biesheuvel, Will Deacon, Marc Zyngier,
	Arnd Bergmann, Greg Kroah-Hartman, Andrew Morton, Linus Torvalds,
	Linux Memory Management List, Linux ARM,
	Linux Kernel Mailing List, David S. Miller,
	Linux Crypto Mailing List

This patch adds helpers to access the aead context structure and
request context structure with an added alignment for DMA access.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---

 include/crypto/internal/aead.h |   28 ++++++++++++++++++++++++++++
 1 file changed, 28 insertions(+)

diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 27b7b0224ea6f..be225f3f7fdbf 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -39,6 +39,11 @@ static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
 	return crypto_tfm_ctx(&tfm->base);
 }
 
+static inline void *crypto_aead_ctx_dma(struct crypto_aead *tfm)
+{
+	return crypto_tfm_ctx_dma(&tfm->base);
+}
+
 static inline struct crypto_instance *aead_crypto_instance(
 	struct aead_instance *inst)
 {
@@ -65,6 +70,20 @@ static inline void *aead_request_ctx(struct aead_request *req)
 	return req->__ctx;
 }
 
+static inline void *aead_request_ctx_dma(struct aead_request *req)
+{
+	unsigned int align = 1;
+
+#ifdef ARCH_DMA_MINALIGN
+	align = ARCH_DMA_MINALIGN;
+#endif
+
+	if (align <= crypto_tfm_ctx_alignment())
+		align = 1;
+
+	return PTR_ALIGN(aead_request_ctx(req), align);
+}
+
 static inline void aead_request_complete(struct aead_request *req, int err)
 {
 	req->base.complete(&req->base, err);
@@ -108,6 +127,15 @@ static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
 	aead->reqsize = reqsize;
 }
 
+static inline void crypto_aead_set_reqsize_dma(struct crypto_aead *aead,
+					       unsigned int reqsize)
+{
+#ifdef ARCH_DMA_MINALIGN
+	reqsize += ARCH_DMA_MINALIGN & ~(crypto_tfm_ctx_alignment() - 1);
+#endif
+	aead->reqsize = reqsize;
+}
+
 static inline void aead_init_queue(struct aead_queue *queue,
 				   unsigned int max_qlen)
 {

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 4/7] crypto: hash - Add ctx helpers with DMA alignment
  2022-05-10 11:03                   ` [RFC PATCH 0/7] crypto: Add helpers for allocating with DMA alignment Herbert Xu
                                       ` (2 preceding siblings ...)
  2022-05-10 11:07                     ` [RFC PATCH 3/7] crypto: aead - Add ctx helpers with DMA alignment Herbert Xu
@ 2022-05-10 11:07                     ` Herbert Xu
  2022-05-10 11:07                     ` [RFC PATCH 5/7] crypto: skcipher " Herbert Xu
                                       ` (2 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Herbert Xu @ 2022-05-10 11:07 UTC (permalink / raw)
  To: Catalin Marinas, Ard Biesheuvel, Will Deacon, Marc Zyngier,
	Arnd Bergmann, Greg Kroah-Hartman, Andrew Morton, Linus Torvalds,
	Linux Memory Management List, Linux ARM,
	Linux Kernel Mailing List, David S. Miller,
	Linux Crypto Mailing List

This patch adds helpers to access the ahash context structure and
request context structure with an added alignment for DMA access.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---

 include/crypto/internal/hash.h |   28 ++++++++++++++++++++++++++++
 1 file changed, 28 insertions(+)

diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 25806141db591..f635a61fd1a0e 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -134,6 +134,11 @@ static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
 	return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
 }
 
+static inline void *crypto_ahash_ctx_dma(struct crypto_ahash *tfm)
+{
+	return crypto_tfm_ctx_dma(crypto_ahash_tfm(tfm));
+}
+
 static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
 {
 	return container_of(__crypto_hash_alg_common(alg), struct ahash_alg,
@@ -146,6 +151,15 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
 	tfm->reqsize = reqsize;
 }
 
+static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash,
+						unsigned int reqsize)
+{
+#ifdef ARCH_DMA_MINALIGN
+	reqsize += ARCH_DMA_MINALIGN & ~(crypto_tfm_ctx_alignment() - 1);
+#endif
+	ahash->reqsize = reqsize;
+}
+
 static inline struct crypto_instance *ahash_crypto_instance(
 	struct ahash_instance *inst)
 {
@@ -169,6 +183,20 @@ static inline void *ahash_instance_ctx(struct ahash_instance *inst)
 	return crypto_instance_ctx(ahash_crypto_instance(inst));
 }
 
+static inline void *ahash_request_ctx_dma(struct ahash_request *req)
+{
+	unsigned int align = 1;
+
+#ifdef ARCH_DMA_MINALIGN
+	align = ARCH_DMA_MINALIGN;
+#endif
+
+	if (align <= crypto_tfm_ctx_alignment())
+		align = 1;
+
+	return PTR_ALIGN(ahash_request_ctx(req), align);
+}
+
 static inline void ahash_request_complete(struct ahash_request *req, int err)
 {
 	req->base.complete(&req->base, err);

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 5/7] crypto: skcipher - Add ctx helpers with DMA alignment
  2022-05-10 11:03                   ` [RFC PATCH 0/7] crypto: Add helpers for allocating with DMA alignment Herbert Xu
                                       ` (3 preceding siblings ...)
  2022-05-10 11:07                     ` [RFC PATCH 4/7] crypto: hash " Herbert Xu
@ 2022-05-10 11:07                     ` Herbert Xu
  2022-05-10 11:07                     ` [RFC PATCH 6/7] crypto: api - Increase MAX_ALGAPI_ALIGNMASK to 127 Herbert Xu
  2022-05-10 11:07                     ` [RFC PATCH 7/7] crypto: caam - Explicitly request DMA alignment Herbert Xu
  6 siblings, 0 replies; 10+ messages in thread
From: Herbert Xu @ 2022-05-10 11:07 UTC (permalink / raw)
  To: Catalin Marinas, Ard Biesheuvel, Will Deacon, Marc Zyngier,
	Arnd Bergmann, Greg Kroah-Hartman, Andrew Morton, Linus Torvalds,
	Linux Memory Management List, Linux ARM,
	Linux Kernel Mailing List, David S. Miller,
	Linux Crypto Mailing List

This patch adds helpers to access the skcipher context structure and
request context structure with an added alignment for DMA access.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---

 include/crypto/internal/skcipher.h |   28 ++++++++++++++++++++++++++++
 1 file changed, 28 insertions(+)

diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index a2339f80a6159..719822e42dc46 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -122,6 +122,15 @@ static inline void crypto_skcipher_set_reqsize(
 	skcipher->reqsize = reqsize;
 }
 
+static inline void crypto_skcipher_set_reqsize_dma(
+	struct crypto_skcipher *skcipher, unsigned int reqsize)
+{
+#ifdef ARCH_DMA_MINALIGN
+	reqsize += ARCH_DMA_MINALIGN & ~(crypto_tfm_ctx_alignment() - 1);
+#endif
+	skcipher->reqsize = reqsize;
+}
+
 int crypto_register_skcipher(struct skcipher_alg *alg);
 void crypto_unregister_skcipher(struct skcipher_alg *alg);
 int crypto_register_skciphers(struct skcipher_alg *algs, int count);
@@ -151,11 +160,30 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
 	return crypto_tfm_ctx(&tfm->base);
 }
 
+static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm)
+{
+	return crypto_tfm_ctx_dma(&tfm->base);
+}
+
 static inline void *skcipher_request_ctx(struct skcipher_request *req)
 {
 	return req->__ctx;
 }
 
+static inline void *skcipher_request_ctx_dma(struct skcipher_request *req)
+{
+	unsigned int align = 1;
+
+#ifdef ARCH_DMA_MINALIGN
+	align = ARCH_DMA_MINALIGN;
+#endif
+
+	if (align <= crypto_tfm_ctx_alignment())
+		align = 1;
+
+	return PTR_ALIGN(skcipher_request_ctx(req), align);
+}
+
 static inline u32 skcipher_request_flags(struct skcipher_request *req)
 {
 	return req->base.flags;

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 6/7] crypto: api - Increase MAX_ALGAPI_ALIGNMASK to 127
  2022-05-10 11:03                   ` [RFC PATCH 0/7] crypto: Add helpers for allocating with DMA alignment Herbert Xu
                                       ` (4 preceding siblings ...)
  2022-05-10 11:07                     ` [RFC PATCH 5/7] crypto: skcipher " Herbert Xu
@ 2022-05-10 11:07                     ` Herbert Xu
  2022-05-10 11:07                     ` [RFC PATCH 7/7] crypto: caam - Explicitly request DMA alignment Herbert Xu
  6 siblings, 0 replies; 10+ messages in thread
From: Herbert Xu @ 2022-05-10 11:07 UTC (permalink / raw)
  To: Catalin Marinas, Ard Biesheuvel, Will Deacon, Marc Zyngier,
	Arnd Bergmann, Greg Kroah-Hartman, Andrew Morton, Linus Torvalds,
	Linux Memory Management List, Linux ARM,
	Linux Kernel Mailing List, David S. Miller,
	Linux Crypto Mailing List

Previously we limited the maximum alignment mask to 63.  This
is mostly due to stack usage for shash.  This patch introduces
a separate limit for shash algorithms and increases the general
limit to 127 which is the value that we need for DMA allocations
on arm64.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---

 crypto/shash.c          |    9 +++++++--
 include/crypto/algapi.h |    2 +-
 2 files changed, 8 insertions(+), 3 deletions(-)

diff --git a/crypto/shash.c b/crypto/shash.c
index 0a0a50cb694f0..0a10477eac060 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -18,6 +18,8 @@
 
 #include "internal.h"
 
+#define MAX_SHASH_ALIGNMASK 63
+
 static const struct crypto_type crypto_shash_type;
 
 static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -100,7 +102,7 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
 	 * We cannot count on __aligned() working for large values:
 	 * https://patchwork.kernel.org/patch/9507697/
 	 */
-	u8 ubuf[MAX_ALGAPI_ALIGNMASK * 2];
+	u8 ubuf[MAX_SHASH_ALIGNMASK * 2];
 	u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
 	int err;
 
@@ -142,7 +144,7 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
 	 * We cannot count on __aligned() working for large values:
 	 * https://patchwork.kernel.org/patch/9507697/
 	 */
-	u8 ubuf[MAX_ALGAPI_ALIGNMASK + HASH_MAX_DIGESTSIZE];
+	u8 ubuf[MAX_SHASH_ALIGNMASK + HASH_MAX_DIGESTSIZE];
 	u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
 	int err;
 
@@ -530,6 +532,9 @@ static int shash_prepare_alg(struct shash_alg *alg)
 	    alg->statesize > HASH_MAX_STATESIZE)
 		return -EINVAL;
 
+	if (base->cra_alignmask > MAX_SHASH_ALIGNMASK)
+		return -EINVAL;
+
 	if ((alg->export && !alg->import) || (alg->import && !alg->export))
 		return -EINVAL;
 
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index cdf12e51c53a0..16cfd823ee911 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -21,7 +21,7 @@
  * algs and architectures. Ciphers have a lower maximum size.
  */
 #define MAX_ALGAPI_BLOCKSIZE		160
-#define MAX_ALGAPI_ALIGNMASK		63
+#define MAX_ALGAPI_ALIGNMASK		127
 #define MAX_CIPHER_BLOCKSIZE		16
 #define MAX_CIPHER_ALIGNMASK		15
 

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC PATCH 7/7] crypto: caam - Explicitly request DMA alignment
  2022-05-10 11:03                   ` [RFC PATCH 0/7] crypto: Add helpers for allocating with DMA alignment Herbert Xu
                                       ` (5 preceding siblings ...)
  2022-05-10 11:07                     ` [RFC PATCH 6/7] crypto: api - Increase MAX_ALGAPI_ALIGNMASK to 127 Herbert Xu
@ 2022-05-10 11:07                     ` Herbert Xu
  6 siblings, 0 replies; 10+ messages in thread
From: Herbert Xu @ 2022-05-10 11:07 UTC (permalink / raw)
  To: Catalin Marinas, Ard Biesheuvel, Will Deacon, Marc Zyngier,
	Arnd Bergmann, Greg Kroah-Hartman, Andrew Morton, Linus Torvalds,
	Linux Memory Management List, Linux ARM,
	Linux Kernel Mailing List, David S. Miller,
	Linux Crypto Mailing List

This patch uses API helpers to explicitly request for DMA alignment
when allocating memory through the Crypto API.  Previously it was
implicitly assumed that all kmalloc memory is aligned for DMA.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---

 drivers/crypto/caam/caamalg.c |  101 ++++++++++++++++++++++--------------------
 1 file changed, 55 insertions(+), 46 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d3d8bb0a69900..c12c678dcb0fd 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -131,7 +131,7 @@ struct caam_aead_req_ctx {
 
 static int aead_null_set_sh_desc(struct crypto_aead *aead)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 	u32 *desc;
@@ -184,7 +184,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
 						 struct caam_aead_alg, aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 	u32 ctx1_iv_off = 0;
@@ -312,7 +312,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 static int aead_setauthsize(struct crypto_aead *authenc,
 				    unsigned int authsize)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
 
 	ctx->authsize = authsize;
 	aead_set_sh_desc(authenc);
@@ -322,7 +322,7 @@ static int aead_setauthsize(struct crypto_aead *authenc,
 
 static int gcm_set_sh_desc(struct crypto_aead *aead)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	u32 *desc;
@@ -372,7 +372,7 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
 
 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
 	int err;
 
 	err = crypto_gcm_check_authsize(authsize);
@@ -387,7 +387,7 @@ static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
 
 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	u32 *desc;
@@ -440,7 +440,7 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 static int rfc4106_setauthsize(struct crypto_aead *authenc,
 			       unsigned int authsize)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
 	int err;
 
 	err = crypto_rfc4106_check_authsize(authsize);
@@ -455,7 +455,7 @@ static int rfc4106_setauthsize(struct crypto_aead *authenc,
 
 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	u32 *desc;
@@ -508,7 +508,7 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 static int rfc4543_setauthsize(struct crypto_aead *authenc,
 			       unsigned int authsize)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
 
 	if (authsize != 16)
 		return -EINVAL;
@@ -521,7 +521,7 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
 
 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	u32 *desc;
@@ -547,7 +547,7 @@ static int chachapoly_set_sh_desc(struct crypto_aead *aead)
 static int chachapoly_setauthsize(struct crypto_aead *aead,
 				  unsigned int authsize)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 
 	if (authsize != POLY1305_DIGEST_SIZE)
 		return -EINVAL;
@@ -559,7 +559,7 @@ static int chachapoly_setauthsize(struct crypto_aead *aead,
 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
 			     unsigned int keylen)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
 
@@ -575,7 +575,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
 static int aead_setkey(struct crypto_aead *aead,
 			       const u8 *key, unsigned int keylen)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 	struct crypto_authenc_keys keys;
@@ -656,7 +656,7 @@ static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
 static int gcm_setkey(struct crypto_aead *aead,
 		      const u8 *key, unsigned int keylen)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	int err;
 
@@ -677,7 +677,7 @@ static int gcm_setkey(struct crypto_aead *aead,
 static int rfc4106_setkey(struct crypto_aead *aead,
 			  const u8 *key, unsigned int keylen)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	int err;
 
@@ -703,7 +703,7 @@ static int rfc4106_setkey(struct crypto_aead *aead,
 static int rfc4543_setkey(struct crypto_aead *aead,
 			  const u8 *key, unsigned int keylen)
 {
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	int err;
 
@@ -729,7 +729,7 @@ static int rfc4543_setkey(struct crypto_aead *aead,
 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 			   unsigned int keylen, const u32 ctx1_iv_off)
 {
-	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
 	struct caam_skcipher_alg *alg =
 		container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
 			     skcipher);
@@ -832,7 +832,7 @@ static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 			       unsigned int keylen)
 {
-	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
 	struct device *jrdev = ctx->jrdev;
 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 	u32 *desc;
@@ -970,7 +970,7 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
 			    void *context)
 {
 	struct aead_request *req = context;
-	struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
+	struct caam_aead_req_ctx *rctx = aead_request_ctx_dma(req);
 	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
 	struct aead_edesc *edesc;
 	int ecode = 0;
@@ -1003,7 +1003,7 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
 {
 	struct skcipher_request *req = context;
 	struct skcipher_edesc *edesc;
-	struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+	struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx_dma(req);
 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 	struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
 	int ivsize = crypto_skcipher_ivsize(skcipher);
@@ -1057,7 +1057,7 @@ static void init_aead_job(struct aead_request *req,
 			  bool all_contig, bool encrypt)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	int authsize = ctx->authsize;
 	u32 *desc = edesc->hw_desc;
 	u32 out_options, in_options;
@@ -1118,7 +1118,7 @@ static void init_gcm_job(struct aead_request *req,
 			 bool all_contig, bool encrypt)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	u32 *desc = edesc->hw_desc;
 	bool generic_gcm = (ivsize == GCM_AES_IV_SIZE);
@@ -1185,7 +1185,7 @@ static void init_authenc_job(struct aead_request *req,
 	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
 						 struct caam_aead_alg, aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
 	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
@@ -1234,7 +1234,7 @@ static void init_skcipher_job(struct skcipher_request *req,
 			      const bool encrypt)
 {
 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
 	struct device *jrdev = ctx->jrdev;
 	int ivsize = crypto_skcipher_ivsize(skcipher);
 	u32 *desc = edesc->hw_desc;
@@ -1290,9 +1290,9 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 					   bool encrypt)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
-	struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
+	struct caam_aead_req_ctx *rctx = aead_request_ctx_dma(req);
 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 		       GFP_KERNEL : GFP_ATOMIC;
 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
@@ -1429,7 +1429,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 static int aead_enqueue_req(struct device *jrdev, struct aead_request *req)
 {
 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
-	struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
+	struct caam_aead_req_ctx *rctx = aead_request_ctx_dma(req);
 	struct aead_edesc *edesc = rctx->edesc;
 	u32 *desc = edesc->hw_desc;
 	int ret;
@@ -1457,7 +1457,7 @@ static inline int chachapoly_crypt(struct aead_request *req, bool encrypt)
 {
 	struct aead_edesc *edesc;
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	bool all_contig;
 	u32 *desc;
@@ -1491,7 +1491,7 @@ static inline int aead_crypt(struct aead_request *req, bool encrypt)
 {
 	struct aead_edesc *edesc;
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	bool all_contig;
 
@@ -1524,8 +1524,8 @@ static int aead_decrypt(struct aead_request *req)
 static int aead_do_one_req(struct crypto_engine *engine, void *areq)
 {
 	struct aead_request *req = aead_request_cast(areq);
-	struct caam_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
-	struct caam_aead_req_ctx *rctx = aead_request_ctx(req);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(crypto_aead_reqtfm(req));
+	struct caam_aead_req_ctx *rctx = aead_request_ctx_dma(req);
 	u32 *desc = rctx->edesc->hw_desc;
 	int ret;
 
@@ -1550,7 +1550,7 @@ static inline int gcm_crypt(struct aead_request *req, bool encrypt)
 {
 	struct aead_edesc *edesc;
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
 	struct device *jrdev = ctx->jrdev;
 	bool all_contig;
 
@@ -1597,8 +1597,8 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
 						   int desc_bytes)
 {
 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
-	struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+	struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx_dma(req);
 	struct device *jrdev = ctx->jrdev;
 	gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 		       GFP_KERNEL : GFP_ATOMIC;
@@ -1756,8 +1756,8 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
 static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
 {
 	struct skcipher_request *req = skcipher_request_cast(areq);
-	struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
-	struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(crypto_skcipher_reqtfm(req));
+	struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx_dma(req);
 	u32 *desc = rctx->edesc->hw_desc;
 	int ret;
 
@@ -1790,7 +1790,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
 {
 	struct skcipher_edesc *edesc;
 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
 	struct device *jrdev = ctx->jrdev;
 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
@@ -1807,7 +1807,8 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
 
 	if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
 			      ctx->xts_key_fallback)) {
-		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+		struct caam_skcipher_req_ctx *rctx =
+			skcipher_request_ctx_dma(req);
 
 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 		skcipher_request_set_callback(&rctx->fallback_req,
@@ -3397,7 +3398,7 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 	struct caam_skcipher_alg *caam_alg =
 		container_of(alg, typeof(*caam_alg), skcipher);
-	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
 	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
 	int ret = 0;
 
@@ -3416,10 +3417,12 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
 		}
 
 		ctx->fallback = fallback;
-		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
-					    crypto_skcipher_reqsize(fallback));
+		crypto_skcipher_set_reqsize_dma(
+			tfm, sizeof(struct caam_skcipher_req_ctx) +
+			     crypto_skcipher_reqsize(fallback));
 	} else {
-		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
+		crypto_skcipher_set_reqsize_dma(
+			tfm, sizeof(struct caam_skcipher_req_ctx));
 	}
 
 	ret = caam_init_common(ctx, &caam_alg->caam, false);
@@ -3434,9 +3437,9 @@ static int caam_aead_init(struct crypto_aead *tfm)
 	struct aead_alg *alg = crypto_aead_alg(tfm);
 	struct caam_aead_alg *caam_alg =
 		 container_of(alg, struct caam_aead_alg, aead);
-	struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+	struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
 
-	crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
+	crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_aead_req_ctx));
 
 	ctx->enginectx.op.do_one_request = aead_do_one_req;
 
@@ -3454,7 +3457,7 @@ static void caam_exit_common(struct caam_ctx *ctx)
 
 static void caam_cra_exit(struct crypto_skcipher *tfm)
 {
-	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
 
 	if (ctx->fallback)
 		crypto_free_skcipher(ctx->fallback);
@@ -3463,7 +3466,7 @@ static void caam_cra_exit(struct crypto_skcipher *tfm)
 
 static void caam_aead_exit(struct crypto_aead *tfm)
 {
-	caam_exit_common(crypto_aead_ctx(tfm));
+	caam_exit_common(crypto_aead_ctx_dma(tfm));
 }
 
 void caam_algapi_exit(void)
@@ -3492,6 +3495,9 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
 	alg->base.cra_module = THIS_MODULE;
 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+#ifdef ARCH_DMA_MINALIGN
+	alg->base.cra_alignmask = ARCH_DMA_MINALIGN - 1;
+#endif
 	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
 			      CRYPTO_ALG_KERN_DRIVER_ONLY);
 
@@ -3506,6 +3512,9 @@ static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
 	alg->base.cra_module = THIS_MODULE;
 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+#ifdef ARCH_DMA_MINALIGN
+	alg->base.cra_alignmask = ARCH_DMA_MINALIGN - 1;
+#endif
 	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
 			      CRYPTO_ALG_KERN_DRIVER_ONLY;
 

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [RFC PATCH 2/7] crypto: api - Add crypto_tfm_ctx_dma
  2022-05-10 11:07                     ` [RFC PATCH 2/7] crypto: api - Add crypto_tfm_ctx_dma Herbert Xu
@ 2022-05-10 17:10                       ` Catalin Marinas
  2022-05-12  3:57                         ` Herbert Xu
  0 siblings, 1 reply; 10+ messages in thread
From: Catalin Marinas @ 2022-05-10 17:10 UTC (permalink / raw)
  To: Herbert Xu
  Cc: Ard Biesheuvel, Will Deacon, Marc Zyngier, Arnd Bergmann,
	Greg Kroah-Hartman, Andrew Morton, Linus Torvalds,
	Linux Memory Management List, Linux ARM,
	Linux Kernel Mailing List, David S. Miller,
	Linux Crypto Mailing List

Hi Herbert,

Thanks for putting this together.

On Tue, May 10, 2022 at 07:07:10PM +0800, Herbert Xu wrote:
> diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
> index f50c5d1725da5..cdf12e51c53a0 100644
> --- a/include/crypto/algapi.h
> +++ b/include/crypto/algapi.h
> @@ -189,10 +189,34 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
>  	}
>  }
>  
> +static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
> +{
> +	return tfm->__crt_ctx;
> +}
> +
> +static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm,
> +					 unsigned int align)
> +{
> +	if (align <= crypto_tfm_ctx_alignment())
> +		align = 1;
> +
> +	return PTR_ALIGN(crypto_tfm_ctx(tfm), align);
> +}
> +
>  static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
>  {
> -	return PTR_ALIGN(crypto_tfm_ctx(tfm),
> -			 crypto_tfm_alg_alignmask(tfm) + 1);
> +	return crypto_tfm_ctx_align(tfm, crypto_tfm_alg_alignmask(tfm) + 1);
> +}
> +
> +static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm)
> +{
> +	unsigned int align = 1;
> +
> +#ifdef ARCH_DMA_MINALIGN
> +	align = ARCH_DMA_MINALIGN;
> +#endif
> +
> +	return crypto_tfm_ctx_align(tfm, align);
>  }

Is there a case where a driver needs the minimum alignment between
ARCH_DMA_MINALIGN and crypto_tfm_alg_alignmask()+1? Maybe for platforms
where ARCH_DMA_MINALIGN is 8 (fully coherent) but the device's bus
master alignment requirements are higher.

My plan is to have ARCH_DMA_MINALIGN always defined but potentially
higher than ARCH_KMALLOC_MINALIGN on specific architectures. I think
crypto_tfm_ctx_dma() should use ARCH_KMALLOC_MINALIGN (and no #ifdefs)
until I get my patches sorted and I'll replace it with ARCH_DMA_MINALIGN
once it's defined globally (still no #ifdefs). Currently in mainline
it's ARCH_KMALLOC_MINALIGN that gives the static DMA alignment.

With the explicit crypto_tfm_ctx_dma(), can CRYPTO_MINALIGN_ATTR be
dropped entirely? This may be beneficial in reducing the structure size
when no DMA is required.

-- 
Catalin

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [RFC PATCH 2/7] crypto: api - Add crypto_tfm_ctx_dma
  2022-05-10 17:10                       ` Catalin Marinas
@ 2022-05-12  3:57                         ` Herbert Xu
  0 siblings, 0 replies; 10+ messages in thread
From: Herbert Xu @ 2022-05-12  3:57 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: Ard Biesheuvel, Will Deacon, Marc Zyngier, Arnd Bergmann,
	Greg Kroah-Hartman, Andrew Morton, Linus Torvalds,
	Linux Memory Management List, Linux ARM,
	Linux Kernel Mailing List, David S. Miller,
	Linux Crypto Mailing List

On Tue, May 10, 2022 at 06:10:34PM +0100, Catalin Marinas wrote:
>
> Is there a case where a driver needs the minimum alignment between
> ARCH_DMA_MINALIGN and crypto_tfm_alg_alignmask()+1? Maybe for platforms
> where ARCH_DMA_MINALIGN is 8 (fully coherent) but the device's bus
> master alignment requirements are higher.

Yes, for example on x86 aesni requires 16-byte alignment.

> My plan is to have ARCH_DMA_MINALIGN always defined but potentially
> higher than ARCH_KMALLOC_MINALIGN on specific architectures. I think
> crypto_tfm_ctx_dma() should use ARCH_KMALLOC_MINALIGN (and no #ifdefs)
> until I get my patches sorted and I'll replace it with ARCH_DMA_MINALIGN
> once it's defined globally (still no #ifdefs). Currently in mainline
> it's ARCH_KMALLOC_MINALIGN that gives the static DMA alignment.
> 
> With the explicit crypto_tfm_ctx_dma(), can CRYPTO_MINALIGN_ATTR be
> dropped entirely? This may be beneficial in reducing the structure size
> when no DMA is required.

We always need CRYPTO_MINALIGN to reflect what alignment kmalloc
guarantees.  It is used to minimise the amount of extra padding
for users such aesni.

This shouldn't have any impact on your plans though as once the
drivers in question switch over to the DMA helpers you can safely
lower ARCH_KMALLOC_MINALIGN.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2022-05-12  3:57 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <CAMj1kXFjLbtpJLFh-C_k3Ydcg4M7NqrCfOXnBY2iSxusWtBLbA@mail.gmail.com>
     [not found] ` <YllM24eca/uxf9y7@gondor.apana.org.au>
     [not found]   ` <CAMj1kXH5O32H1nnm6y7=3KiH7R-_oakxzBpZ20wK+8kaD46aKw@mail.gmail.com>
     [not found]     ` <YlvK9iefUECy361O@gondor.apana.org.au>
     [not found]       ` <YlvQTci7RP5evtTy@arm.com>
     [not found]         ` <YlvRbvWSWMTtBJiN@gondor.apana.org.au>
     [not found]           ` <YlvU6ou14okbAbgW@arm.com>
     [not found]             ` <YlvWtc/dJ6luXzZf@gondor.apana.org.au>
     [not found]               ` <YlxAo5BAy+ARlvqj@arm.com>
     [not found]                 ` <Yl0jPdfdUkaStDN5@gondor.apana.org.au>
2022-05-10 11:03                   ` [RFC PATCH 0/7] crypto: Add helpers for allocating with DMA alignment Herbert Xu
2022-05-10 11:07                     ` [RFC PATCH 1/7] crypto: Prepare to move crypto_tfm_ctx Herbert Xu
2022-05-10 11:07                     ` [RFC PATCH 2/7] crypto: api - Add crypto_tfm_ctx_dma Herbert Xu
2022-05-10 17:10                       ` Catalin Marinas
2022-05-12  3:57                         ` Herbert Xu
2022-05-10 11:07                     ` [RFC PATCH 3/7] crypto: aead - Add ctx helpers with DMA alignment Herbert Xu
2022-05-10 11:07                     ` [RFC PATCH 4/7] crypto: hash " Herbert Xu
2022-05-10 11:07                     ` [RFC PATCH 5/7] crypto: skcipher " Herbert Xu
2022-05-10 11:07                     ` [RFC PATCH 6/7] crypto: api - Increase MAX_ALGAPI_ALIGNMASK to 127 Herbert Xu
2022-05-10 11:07                     ` [RFC PATCH 7/7] crypto: caam - Explicitly request DMA alignment Herbert Xu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).