linux-crypto.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path
@ 2020-09-01  6:28 Herbert Xu
  2020-09-01  6:31 ` [PATCH 2/2] crypto: cbc - Remove cbc.h Herbert Xu
  2020-09-01  8:47 ` [PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Ard Biesheuvel
  0 siblings, 2 replies; 7+ messages in thread
From: Herbert Xu @ 2020-09-01  6:28 UTC (permalink / raw)
  To: Ard Biesheuvel, Linux Crypto Mailing List

Since commit b56f5cbc7e08ec7d31c42fc41e5247677f20b143 ("crypto:
arm/aes-neonbs - resolve fallback cipher at runtime") the CBC
encryption path in aes-neonbs is now identical to that obtained
through the cbc template.  This means that it can simply call
the generic cbc template instead of doing its own thing.

This patch removes the custom encryption path and simply invokes
the generic cbc template.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index e6fd32919c81..b324c5500846 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -8,7 +8,6 @@
 #include <asm/neon.h>
 #include <asm/simd.h>
 #include <crypto/aes.h>
-#include <crypto/cbc.h>
 #include <crypto/ctr.h>
 #include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
@@ -49,7 +48,7 @@ struct aesbs_ctx {
 
 struct aesbs_cbc_ctx {
 	struct aesbs_ctx	key;
-	struct crypto_cipher	*enc_tfm;
+	struct crypto_skcipher	*enc_tfm;
 };
 
 struct aesbs_xts_ctx {
@@ -140,19 +139,23 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
 	kernel_neon_end();
 	memzero_explicit(&rk, sizeof(rk));
 
-	return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len);
+	return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len);
 }
 
-static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
+static int cbc_encrypt(struct skcipher_request *req)
 {
+	struct skcipher_request *subreq = skcipher_request_ctx(req);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src);
-}
+	skcipher_request_set_tfm(subreq, ctx->enc_tfm);
+	skcipher_request_set_callback(subreq,
+				      skcipher_request_flags(req),
+				      NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst,
+				   req->cryptlen, req->iv);
 
-static int cbc_encrypt(struct skcipher_request *req)
-{
-	return crypto_cbc_encrypt_walk(req, cbc_encrypt_one);
+	return crypto_skcipher_encrypt(subreq);
 }
 
 static int cbc_decrypt(struct skcipher_request *req)
@@ -183,20 +186,27 @@ static int cbc_decrypt(struct skcipher_request *req)
 	return err;
 }
 
-static int cbc_init(struct crypto_tfm *tfm)
+static int cbc_init(struct crypto_skcipher *tfm)
 {
-	struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+	unsigned int reqsize;
+
+	ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, 0);
+	if (IS_ERR(ctx->enc_tfm))
+		return PTR_ERR(ctx->enc_tfm);
 
-	ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0);
+	reqsize = sizeof(struct skcipher_request);
+	reqsize += crypto_skcipher_reqsize(ctx->enc_tfm);
+	crypto_skcipher_set_reqsize(tfm, reqsize);
 
-	return PTR_ERR_OR_ZERO(ctx->enc_tfm);
+	return 0;
 }
 
-static void cbc_exit(struct crypto_tfm *tfm)
+static void cbc_exit(struct crypto_skcipher *tfm)
 {
-	struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	crypto_free_cipher(ctx->enc_tfm);
+	crypto_free_skcipher(ctx->enc_tfm);
 }
 
 static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
@@ -432,8 +442,6 @@ static struct skcipher_alg aes_algs[] = { {
 	.base.cra_ctxsize	= sizeof(struct aesbs_cbc_ctx),
 	.base.cra_module	= THIS_MODULE,
 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
-	.base.cra_init		= cbc_init,
-	.base.cra_exit		= cbc_exit,
 
 	.min_keysize		= AES_MIN_KEY_SIZE,
 	.max_keysize		= AES_MAX_KEY_SIZE,
@@ -442,6 +450,8 @@ static struct skcipher_alg aes_algs[] = { {
 	.setkey			= aesbs_cbc_setkey,
 	.encrypt		= cbc_encrypt,
 	.decrypt		= cbc_decrypt,
+	.init			= cbc_init,
+	.exit			= cbc_exit,
 }, {
 	.base.cra_name		= "__ctr(aes)",
 	.base.cra_driver_name	= "__ctr-aes-neonbs",
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/2] crypto: cbc - Remove cbc.h
  2020-09-01  6:28 [PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Herbert Xu
@ 2020-09-01  6:31 ` Herbert Xu
  2020-09-01  8:47 ` [PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Ard Biesheuvel
  1 sibling, 0 replies; 7+ messages in thread
From: Herbert Xu @ 2020-09-01  6:31 UTC (permalink / raw)
  To: Ard Biesheuvel, Linux Crypto Mailing List

Now that crypto/cbc.h is only used by the generic cbc template,
we can merge it back into the CBC code.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

diff --git a/crypto/cbc.c b/crypto/cbc.c
index e6f6273a7d39..0d9509dff891 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -6,7 +6,6 @@
  */
 
 #include <crypto/algapi.h>
-#include <crypto/cbc.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -14,34 +13,157 @@
 #include <linux/log2.h>
 #include <linux/module.h>
 
-static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm,
-					  const u8 *src, u8 *dst)
+static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk,
+				      struct crypto_skcipher *skcipher)
 {
-	crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
+	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	struct crypto_cipher *cipher;
+	struct crypto_tfm *tfm;
+	u8 *iv = walk->iv;
+
+	cipher = skcipher_cipher_simple(skcipher);
+	tfm = crypto_cipher_tfm(cipher);
+	fn = crypto_cipher_alg(cipher)->cia_encrypt;
+
+	do {
+		crypto_xor(iv, src, bsize);
+		fn(tfm, dst, iv);
+		memcpy(iv, dst, bsize);
+
+		src += bsize;
+		dst += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	return nbytes;
+}
+
+static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk,
+				      struct crypto_skcipher *skcipher)
+{
+	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	struct crypto_cipher *cipher;
+	struct crypto_tfm *tfm;
+	u8 *iv = walk->iv;
+
+	cipher = skcipher_cipher_simple(skcipher);
+	tfm = crypto_cipher_tfm(cipher);
+	fn = crypto_cipher_alg(cipher)->cia_encrypt;
+
+	do {
+		crypto_xor(src, iv, bsize);
+		fn(tfm, src, src);
+		iv = src;
+
+		src += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	memcpy(walk->iv, iv, bsize);
+
+	return nbytes;
 }
 
 static int crypto_cbc_encrypt(struct skcipher_request *req)
 {
-	return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one);
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	struct skcipher_walk walk;
+	int err;
+
+	err = skcipher_walk_virt(&walk, req, false);
+
+	while (walk.nbytes) {
+		if (walk.src.virt.addr == walk.dst.virt.addr)
+			err = crypto_cbc_encrypt_inplace(&walk, skcipher);
+		else
+			err = crypto_cbc_encrypt_segment(&walk, skcipher);
+		err = skcipher_walk_done(&walk, err);
+	}
+
+	return err;
+}
+
+static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
+				      struct crypto_skcipher *skcipher)
+{
+	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	struct crypto_cipher *cipher;
+	struct crypto_tfm *tfm;
+	u8 *iv = walk->iv;
+
+	cipher = skcipher_cipher_simple(skcipher);
+	tfm = crypto_cipher_tfm(cipher);
+	fn = crypto_cipher_alg(cipher)->cia_decrypt;
+
+	do {
+		fn(tfm, dst, src);
+		crypto_xor(dst, iv, bsize);
+		iv = src;
+
+		src += bsize;
+		dst += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	memcpy(walk->iv, iv, bsize);
+
+	return nbytes;
 }
 
-static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm,
-					  const u8 *src, u8 *dst)
+static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk,
+				      struct crypto_skcipher *skcipher)
 {
-	crypto_cipher_decrypt_one(skcipher_cipher_simple(tfm), dst, src);
+	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 last_iv[MAX_CIPHER_BLOCKSIZE];
+	struct crypto_cipher *cipher;
+	struct crypto_tfm *tfm;
+
+	cipher = skcipher_cipher_simple(skcipher);
+	tfm = crypto_cipher_tfm(cipher);
+	fn = crypto_cipher_alg(cipher)->cia_decrypt;
+
+	/* Start of the last block. */
+	src += nbytes - (nbytes & (bsize - 1)) - bsize;
+	memcpy(last_iv, src, bsize);
+
+	for (;;) {
+		fn(tfm, src, src);
+		if ((nbytes -= bsize) < bsize)
+			break;
+		crypto_xor(src, src - bsize, bsize);
+		src -= bsize;
+	}
+
+	crypto_xor(src, walk->iv, bsize);
+	memcpy(walk->iv, last_iv, bsize);
+
+	return nbytes;
 }
 
 static int crypto_cbc_decrypt(struct skcipher_request *req)
 {
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 	struct skcipher_walk walk;
 	int err;
 
 	err = skcipher_walk_virt(&walk, req, false);
 
 	while (walk.nbytes) {
-		err = crypto_cbc_decrypt_blocks(&walk, tfm,
-						crypto_cbc_decrypt_one);
+		if (walk.src.virt.addr == walk.dst.virt.addr)
+			err = crypto_cbc_decrypt_inplace(&walk, skcipher);
+		else
+			err = crypto_cbc_decrypt_segment(&walk, skcipher);
 		err = skcipher_walk_done(&walk, err);
 	}
 
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
deleted file mode 100644
index 2b6422db42e2..000000000000
--- a/include/crypto/cbc.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CBC: Cipher Block Chaining mode
- *
- * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#ifndef _CRYPTO_CBC_H
-#define _CRYPTO_CBC_H
-
-#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-static inline int crypto_cbc_encrypt_segment(
-	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
-	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
-	unsigned int bsize = crypto_skcipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *dst = walk->dst.virt.addr;
-	u8 *iv = walk->iv;
-
-	do {
-		crypto_xor(iv, src, bsize);
-		fn(tfm, iv, dst);
-		memcpy(iv, dst, bsize);
-
-		src += bsize;
-		dst += bsize;
-	} while ((nbytes -= bsize) >= bsize);
-
-	return nbytes;
-}
-
-static inline int crypto_cbc_encrypt_inplace(
-	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
-	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
-	unsigned int bsize = crypto_skcipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *iv = walk->iv;
-
-	do {
-		crypto_xor(src, iv, bsize);
-		fn(tfm, src, src);
-		iv = src;
-
-		src += bsize;
-	} while ((nbytes -= bsize) >= bsize);
-
-	memcpy(walk->iv, iv, bsize);
-
-	return nbytes;
-}
-
-static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
-					  void (*fn)(struct crypto_skcipher *,
-						     const u8 *, u8 *))
-{
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	struct skcipher_walk walk;
-	int err;
-
-	err = skcipher_walk_virt(&walk, req, false);
-
-	while (walk.nbytes) {
-		if (walk.src.virt.addr == walk.dst.virt.addr)
-			err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
-		else
-			err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
-		err = skcipher_walk_done(&walk, err);
-	}
-
-	return err;
-}
-
-static inline int crypto_cbc_decrypt_segment(
-	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
-	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
-	unsigned int bsize = crypto_skcipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *dst = walk->dst.virt.addr;
-	u8 *iv = walk->iv;
-
-	do {
-		fn(tfm, src, dst);
-		crypto_xor(dst, iv, bsize);
-		iv = src;
-
-		src += bsize;
-		dst += bsize;
-	} while ((nbytes -= bsize) >= bsize);
-
-	memcpy(walk->iv, iv, bsize);
-
-	return nbytes;
-}
-
-static inline int crypto_cbc_decrypt_inplace(
-	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
-	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
-	unsigned int bsize = crypto_skcipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 last_iv[MAX_CIPHER_BLOCKSIZE];
-
-	/* Start of the last block. */
-	src += nbytes - (nbytes & (bsize - 1)) - bsize;
-	memcpy(last_iv, src, bsize);
-
-	for (;;) {
-		fn(tfm, src, src);
-		if ((nbytes -= bsize) < bsize)
-			break;
-		crypto_xor(src, src - bsize, bsize);
-		src -= bsize;
-	}
-
-	crypto_xor(src, walk->iv, bsize);
-	memcpy(walk->iv, last_iv, bsize);
-
-	return nbytes;
-}
-
-static inline int crypto_cbc_decrypt_blocks(
-	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
-	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
-	if (walk->src.virt.addr == walk->dst.virt.addr)
-		return crypto_cbc_decrypt_inplace(walk, tfm, fn);
-	else
-		return crypto_cbc_decrypt_segment(walk, tfm, fn);
-}
-
-#endif	/* _CRYPTO_CBC_H */
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path
  2020-09-01  6:28 [PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Herbert Xu
  2020-09-01  6:31 ` [PATCH 2/2] crypto: cbc - Remove cbc.h Herbert Xu
@ 2020-09-01  8:47 ` Ard Biesheuvel
  2020-09-01 11:45   ` Herbert Xu
  1 sibling, 1 reply; 7+ messages in thread
From: Ard Biesheuvel @ 2020-09-01  8:47 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Tue, 1 Sep 2020 at 09:28, Herbert Xu <herbert@gondor.apana.org.au> wrote:
>
> Since commit b56f5cbc7e08ec7d31c42fc41e5247677f20b143 ("crypto:
> arm/aes-neonbs - resolve fallback cipher at runtime") the CBC
> encryption path in aes-neonbs is now identical to that obtained
> through the cbc template.  This means that it can simply call
> the generic cbc template instead of doing its own thing.
>
> This patch removes the custom encryption path and simply invokes
> the generic cbc template.
>
> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
>

Aren't we ending up with a cbc(aes) implementation that allocates a
cbc(aes) implementation as a fallback?

> diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
> index e6fd32919c81..b324c5500846 100644
> --- a/arch/arm/crypto/aes-neonbs-glue.c
> +++ b/arch/arm/crypto/aes-neonbs-glue.c
> @@ -8,7 +8,6 @@
>  #include <asm/neon.h>
>  #include <asm/simd.h>
>  #include <crypto/aes.h>
> -#include <crypto/cbc.h>
>  #include <crypto/ctr.h>
>  #include <crypto/internal/simd.h>
>  #include <crypto/internal/skcipher.h>
> @@ -49,7 +48,7 @@ struct aesbs_ctx {
>
>  struct aesbs_cbc_ctx {
>         struct aesbs_ctx        key;
> -       struct crypto_cipher    *enc_tfm;
> +       struct crypto_skcipher  *enc_tfm;
>  };
>
>  struct aesbs_xts_ctx {
> @@ -140,19 +139,23 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
>         kernel_neon_end();
>         memzero_explicit(&rk, sizeof(rk));
>
> -       return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len);
> +       return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len);
>  }
>
> -static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
> +static int cbc_encrypt(struct skcipher_request *req)
>  {
> +       struct skcipher_request *subreq = skcipher_request_ctx(req);
> +       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
>         struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
>
> -       crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src);
> -}
> +       skcipher_request_set_tfm(subreq, ctx->enc_tfm);
> +       skcipher_request_set_callback(subreq,
> +                                     skcipher_request_flags(req),
> +                                     NULL, NULL);
> +       skcipher_request_set_crypt(subreq, req->src, req->dst,
> +                                  req->cryptlen, req->iv);
>
> -static int cbc_encrypt(struct skcipher_request *req)
> -{
> -       return crypto_cbc_encrypt_walk(req, cbc_encrypt_one);
> +       return crypto_skcipher_encrypt(subreq);
>  }
>
>  static int cbc_decrypt(struct skcipher_request *req)
> @@ -183,20 +186,27 @@ static int cbc_decrypt(struct skcipher_request *req)
>         return err;
>  }
>
> -static int cbc_init(struct crypto_tfm *tfm)
> +static int cbc_init(struct crypto_skcipher *tfm)
>  {
> -       struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
> +       struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
> +       unsigned int reqsize;
> +
> +       ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, 0);
> +       if (IS_ERR(ctx->enc_tfm))
> +               return PTR_ERR(ctx->enc_tfm);
>
> -       ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0);
> +       reqsize = sizeof(struct skcipher_request);
> +       reqsize += crypto_skcipher_reqsize(ctx->enc_tfm);
> +       crypto_skcipher_set_reqsize(tfm, reqsize);
>
> -       return PTR_ERR_OR_ZERO(ctx->enc_tfm);
> +       return 0;
>  }
>
> -static void cbc_exit(struct crypto_tfm *tfm)
> +static void cbc_exit(struct crypto_skcipher *tfm)
>  {
> -       struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
> +       struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
>
> -       crypto_free_cipher(ctx->enc_tfm);
> +       crypto_free_skcipher(ctx->enc_tfm);
>  }
>
>  static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
> @@ -432,8 +442,6 @@ static struct skcipher_alg aes_algs[] = { {
>         .base.cra_ctxsize       = sizeof(struct aesbs_cbc_ctx),
>         .base.cra_module        = THIS_MODULE,
>         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
> -       .base.cra_init          = cbc_init,
> -       .base.cra_exit          = cbc_exit,
>
>         .min_keysize            = AES_MIN_KEY_SIZE,
>         .max_keysize            = AES_MAX_KEY_SIZE,
> @@ -442,6 +450,8 @@ static struct skcipher_alg aes_algs[] = { {
>         .setkey                 = aesbs_cbc_setkey,
>         .encrypt                = cbc_encrypt,
>         .decrypt                = cbc_decrypt,
> +       .init                   = cbc_init,
> +       .exit                   = cbc_exit,
>  }, {
>         .base.cra_name          = "__ctr(aes)",
>         .base.cra_driver_name   = "__ctr-aes-neonbs",
> --
> Email: Herbert Xu <herbert@gondor.apana.org.au>
> Home Page: http://gondor.apana.org.au/~herbert/
> PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path
  2020-09-01  8:47 ` [PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Ard Biesheuvel
@ 2020-09-01 11:45   ` Herbert Xu
  2020-09-01 11:48     ` [v2 PATCH " Herbert Xu
  0 siblings, 1 reply; 7+ messages in thread
From: Herbert Xu @ 2020-09-01 11:45 UTC (permalink / raw)
  To: Ard Biesheuvel; +Cc: Linux Crypto Mailing List

On Tue, Sep 01, 2020 at 11:47:03AM +0300, Ard Biesheuvel wrote:
> On Tue, 1 Sep 2020 at 09:28, Herbert Xu <herbert@gondor.apana.org.au> wrote:
> >
> > Since commit b56f5cbc7e08ec7d31c42fc41e5247677f20b143 ("crypto:
> > arm/aes-neonbs - resolve fallback cipher at runtime") the CBC
> > encryption path in aes-neonbs is now identical to that obtained
> > through the cbc template.  This means that it can simply call
> > the generic cbc template instead of doing its own thing.
> >
> > This patch removes the custom encryption path and simply invokes
> > the generic cbc template.
> >
> > Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
> >
> 
> Aren't we ending up with a cbc(aes) implementation that allocates a
> cbc(aes) implementation as a fallback?

Good catch, I meant to make the fallback sync only.

Thanks,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [v2 PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path
  2020-09-01 11:45   ` Herbert Xu
@ 2020-09-01 11:48     ` Herbert Xu
  2020-09-01 11:49       ` [v2 PATCH 2/2] crypto: cbc - Remove cbc.h Herbert Xu
  2020-09-01 12:15       ` [v2 PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Ard Biesheuvel
  0 siblings, 2 replies; 7+ messages in thread
From: Herbert Xu @ 2020-09-01 11:48 UTC (permalink / raw)
  To: Ard Biesheuvel; +Cc: Linux Crypto Mailing List

Since commit b56f5cbc7e08ec7d31c42fc41e5247677f20b143 ("crypto:
arm/aes-neonbs - resolve fallback cipher at runtime") the CBC
encryption path in aes-neonbs is now identical to that obtained
through the cbc template.  This means that it can simply call
the generic cbc template instead of doing its own thing.

This patch removes the custom encryption path and simply invokes
the generic cbc template.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index e6fd32919c81..b324c5500846 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -8,7 +8,6 @@
 #include <asm/neon.h>
 #include <asm/simd.h>
 #include <crypto/aes.h>
-#include <crypto/cbc.h>
 #include <crypto/ctr.h>
 #include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
@@ -49,7 +48,7 @@ struct aesbs_ctx {
 
 struct aesbs_cbc_ctx {
 	struct aesbs_ctx	key;
-	struct crypto_cipher	*enc_tfm;
+	struct crypto_skcipher	*enc_tfm;
 };
 
 struct aesbs_xts_ctx {
@@ -140,19 +139,23 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
 	kernel_neon_end();
 	memzero_explicit(&rk, sizeof(rk));
 
-	return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len);
+	return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len);
 }
 
-static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
+static int cbc_encrypt(struct skcipher_request *req)
 {
+	struct skcipher_request *subreq = skcipher_request_ctx(req);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src);
-}
+	skcipher_request_set_tfm(subreq, ctx->enc_tfm);
+	skcipher_request_set_callback(subreq,
+				      skcipher_request_flags(req),
+				      NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst,
+				   req->cryptlen, req->iv);
 
-static int cbc_encrypt(struct skcipher_request *req)
-{
-	return crypto_cbc_encrypt_walk(req, cbc_encrypt_one);
+	return crypto_skcipher_encrypt(subreq);
 }
 
 static int cbc_decrypt(struct skcipher_request *req)
@@ -183,20 +186,27 @@ static int cbc_decrypt(struct skcipher_request *req)
 	return err;
 }
 
-static int cbc_init(struct crypto_tfm *tfm)
+static int cbc_init(struct crypto_skcipher *tfm)
 {
-	struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
+	unsigned int reqsize;
+
+	ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(ctx->enc_tfm))
+		return PTR_ERR(ctx->enc_tfm);
 
-	ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0);
+	reqsize = sizeof(struct skcipher_request);
+	reqsize += crypto_skcipher_reqsize(ctx->enc_tfm);
+	crypto_skcipher_set_reqsize(tfm, reqsize);
 
-	return PTR_ERR_OR_ZERO(ctx->enc_tfm);
+	return 0;
 }
 
-static void cbc_exit(struct crypto_tfm *tfm)
+static void cbc_exit(struct crypto_skcipher *tfm)
 {
-	struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-	crypto_free_cipher(ctx->enc_tfm);
+	crypto_free_skcipher(ctx->enc_tfm);
 }
 
 static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
@@ -432,8 +442,6 @@ static struct skcipher_alg aes_algs[] = { {
 	.base.cra_ctxsize	= sizeof(struct aesbs_cbc_ctx),
 	.base.cra_module	= THIS_MODULE,
 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
-	.base.cra_init		= cbc_init,
-	.base.cra_exit		= cbc_exit,
 
 	.min_keysize		= AES_MIN_KEY_SIZE,
 	.max_keysize		= AES_MAX_KEY_SIZE,
@@ -442,6 +450,8 @@ static struct skcipher_alg aes_algs[] = { {
 	.setkey			= aesbs_cbc_setkey,
 	.encrypt		= cbc_encrypt,
 	.decrypt		= cbc_decrypt,
+	.init			= cbc_init,
+	.exit			= cbc_exit,
 }, {
 	.base.cra_name		= "__ctr(aes)",
 	.base.cra_driver_name	= "__ctr-aes-neonbs",
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [v2 PATCH 2/2] crypto: cbc - Remove cbc.h
  2020-09-01 11:48     ` [v2 PATCH " Herbert Xu
@ 2020-09-01 11:49       ` Herbert Xu
  2020-09-01 12:15       ` [v2 PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Ard Biesheuvel
  1 sibling, 0 replies; 7+ messages in thread
From: Herbert Xu @ 2020-09-01 11:49 UTC (permalink / raw)
  To: Ard Biesheuvel; +Cc: Linux Crypto Mailing List

Now that crypto/cbc.h is only used by the generic cbc template,
we can merge it back into the CBC code.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

diff --git a/crypto/cbc.c b/crypto/cbc.c
index e6f6273a7d39..0d9509dff891 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -6,7 +6,6 @@
  */
 
 #include <crypto/algapi.h>
-#include <crypto/cbc.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -14,34 +13,157 @@
 #include <linux/log2.h>
 #include <linux/module.h>
 
-static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm,
-					  const u8 *src, u8 *dst)
+static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk,
+				      struct crypto_skcipher *skcipher)
 {
-	crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src);
+	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	struct crypto_cipher *cipher;
+	struct crypto_tfm *tfm;
+	u8 *iv = walk->iv;
+
+	cipher = skcipher_cipher_simple(skcipher);
+	tfm = crypto_cipher_tfm(cipher);
+	fn = crypto_cipher_alg(cipher)->cia_encrypt;
+
+	do {
+		crypto_xor(iv, src, bsize);
+		fn(tfm, dst, iv);
+		memcpy(iv, dst, bsize);
+
+		src += bsize;
+		dst += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	return nbytes;
+}
+
+static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk,
+				      struct crypto_skcipher *skcipher)
+{
+	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	struct crypto_cipher *cipher;
+	struct crypto_tfm *tfm;
+	u8 *iv = walk->iv;
+
+	cipher = skcipher_cipher_simple(skcipher);
+	tfm = crypto_cipher_tfm(cipher);
+	fn = crypto_cipher_alg(cipher)->cia_encrypt;
+
+	do {
+		crypto_xor(src, iv, bsize);
+		fn(tfm, src, src);
+		iv = src;
+
+		src += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	memcpy(walk->iv, iv, bsize);
+
+	return nbytes;
 }
 
 static int crypto_cbc_encrypt(struct skcipher_request *req)
 {
-	return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one);
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	struct skcipher_walk walk;
+	int err;
+
+	err = skcipher_walk_virt(&walk, req, false);
+
+	while (walk.nbytes) {
+		if (walk.src.virt.addr == walk.dst.virt.addr)
+			err = crypto_cbc_encrypt_inplace(&walk, skcipher);
+		else
+			err = crypto_cbc_encrypt_segment(&walk, skcipher);
+		err = skcipher_walk_done(&walk, err);
+	}
+
+	return err;
+}
+
+static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
+				      struct crypto_skcipher *skcipher)
+{
+	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 *dst = walk->dst.virt.addr;
+	struct crypto_cipher *cipher;
+	struct crypto_tfm *tfm;
+	u8 *iv = walk->iv;
+
+	cipher = skcipher_cipher_simple(skcipher);
+	tfm = crypto_cipher_tfm(cipher);
+	fn = crypto_cipher_alg(cipher)->cia_decrypt;
+
+	do {
+		fn(tfm, dst, src);
+		crypto_xor(dst, iv, bsize);
+		iv = src;
+
+		src += bsize;
+		dst += bsize;
+	} while ((nbytes -= bsize) >= bsize);
+
+	memcpy(walk->iv, iv, bsize);
+
+	return nbytes;
 }
 
-static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm,
-					  const u8 *src, u8 *dst)
+static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk,
+				      struct crypto_skcipher *skcipher)
 {
-	crypto_cipher_decrypt_one(skcipher_cipher_simple(tfm), dst, src);
+	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
+	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
+	unsigned int nbytes = walk->nbytes;
+	u8 *src = walk->src.virt.addr;
+	u8 last_iv[MAX_CIPHER_BLOCKSIZE];
+	struct crypto_cipher *cipher;
+	struct crypto_tfm *tfm;
+
+	cipher = skcipher_cipher_simple(skcipher);
+	tfm = crypto_cipher_tfm(cipher);
+	fn = crypto_cipher_alg(cipher)->cia_decrypt;
+
+	/* Start of the last block. */
+	src += nbytes - (nbytes & (bsize - 1)) - bsize;
+	memcpy(last_iv, src, bsize);
+
+	for (;;) {
+		fn(tfm, src, src);
+		if ((nbytes -= bsize) < bsize)
+			break;
+		crypto_xor(src, src - bsize, bsize);
+		src -= bsize;
+	}
+
+	crypto_xor(src, walk->iv, bsize);
+	memcpy(walk->iv, last_iv, bsize);
+
+	return nbytes;
 }
 
 static int crypto_cbc_decrypt(struct skcipher_request *req)
 {
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 	struct skcipher_walk walk;
 	int err;
 
 	err = skcipher_walk_virt(&walk, req, false);
 
 	while (walk.nbytes) {
-		err = crypto_cbc_decrypt_blocks(&walk, tfm,
-						crypto_cbc_decrypt_one);
+		if (walk.src.virt.addr == walk.dst.virt.addr)
+			err = crypto_cbc_decrypt_inplace(&walk, skcipher);
+		else
+			err = crypto_cbc_decrypt_segment(&walk, skcipher);
 		err = skcipher_walk_done(&walk, err);
 	}
 
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
deleted file mode 100644
index 2b6422db42e2..000000000000
--- a/include/crypto/cbc.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * CBC: Cipher Block Chaining mode
- *
- * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au>
- */
-
-#ifndef _CRYPTO_CBC_H
-#define _CRYPTO_CBC_H
-
-#include <crypto/internal/skcipher.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-static inline int crypto_cbc_encrypt_segment(
-	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
-	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
-	unsigned int bsize = crypto_skcipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *dst = walk->dst.virt.addr;
-	u8 *iv = walk->iv;
-
-	do {
-		crypto_xor(iv, src, bsize);
-		fn(tfm, iv, dst);
-		memcpy(iv, dst, bsize);
-
-		src += bsize;
-		dst += bsize;
-	} while ((nbytes -= bsize) >= bsize);
-
-	return nbytes;
-}
-
-static inline int crypto_cbc_encrypt_inplace(
-	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
-	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
-	unsigned int bsize = crypto_skcipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *iv = walk->iv;
-
-	do {
-		crypto_xor(src, iv, bsize);
-		fn(tfm, src, src);
-		iv = src;
-
-		src += bsize;
-	} while ((nbytes -= bsize) >= bsize);
-
-	memcpy(walk->iv, iv, bsize);
-
-	return nbytes;
-}
-
-static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req,
-					  void (*fn)(struct crypto_skcipher *,
-						     const u8 *, u8 *))
-{
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	struct skcipher_walk walk;
-	int err;
-
-	err = skcipher_walk_virt(&walk, req, false);
-
-	while (walk.nbytes) {
-		if (walk.src.virt.addr == walk.dst.virt.addr)
-			err = crypto_cbc_encrypt_inplace(&walk, tfm, fn);
-		else
-			err = crypto_cbc_encrypt_segment(&walk, tfm, fn);
-		err = skcipher_walk_done(&walk, err);
-	}
-
-	return err;
-}
-
-static inline int crypto_cbc_decrypt_segment(
-	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
-	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
-	unsigned int bsize = crypto_skcipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *dst = walk->dst.virt.addr;
-	u8 *iv = walk->iv;
-
-	do {
-		fn(tfm, src, dst);
-		crypto_xor(dst, iv, bsize);
-		iv = src;
-
-		src += bsize;
-		dst += bsize;
-	} while ((nbytes -= bsize) >= bsize);
-
-	memcpy(walk->iv, iv, bsize);
-
-	return nbytes;
-}
-
-static inline int crypto_cbc_decrypt_inplace(
-	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
-	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
-	unsigned int bsize = crypto_skcipher_blocksize(tfm);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 last_iv[MAX_CIPHER_BLOCKSIZE];
-
-	/* Start of the last block. */
-	src += nbytes - (nbytes & (bsize - 1)) - bsize;
-	memcpy(last_iv, src, bsize);
-
-	for (;;) {
-		fn(tfm, src, src);
-		if ((nbytes -= bsize) < bsize)
-			break;
-		crypto_xor(src, src - bsize, bsize);
-		src -= bsize;
-	}
-
-	crypto_xor(src, walk->iv, bsize);
-	memcpy(walk->iv, last_iv, bsize);
-
-	return nbytes;
-}
-
-static inline int crypto_cbc_decrypt_blocks(
-	struct skcipher_walk *walk, struct crypto_skcipher *tfm,
-	void (*fn)(struct crypto_skcipher *, const u8 *, u8 *))
-{
-	if (walk->src.virt.addr == walk->dst.virt.addr)
-		return crypto_cbc_decrypt_inplace(walk, tfm, fn);
-	else
-		return crypto_cbc_decrypt_segment(walk, tfm, fn);
-}
-
-#endif	/* _CRYPTO_CBC_H */
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [v2 PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path
  2020-09-01 11:48     ` [v2 PATCH " Herbert Xu
  2020-09-01 11:49       ` [v2 PATCH 2/2] crypto: cbc - Remove cbc.h Herbert Xu
@ 2020-09-01 12:15       ` Ard Biesheuvel
  1 sibling, 0 replies; 7+ messages in thread
From: Ard Biesheuvel @ 2020-09-01 12:15 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Tue, 1 Sep 2020 at 14:48, Herbert Xu <herbert@gondor.apana.org.au> wrote:
>
> Since commit b56f5cbc7e08ec7d31c42fc41e5247677f20b143 ("crypto:
> arm/aes-neonbs - resolve fallback cipher at runtime") the CBC
> encryption path in aes-neonbs is now identical to that obtained
> through the cbc template.  This means that it can simply call
> the generic cbc template instead of doing its own thing.
>
> This patch removes the custom encryption path and simply invokes
> the generic cbc template.
>
> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

For the series,

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>

>
> diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
> index e6fd32919c81..b324c5500846 100644
> --- a/arch/arm/crypto/aes-neonbs-glue.c
> +++ b/arch/arm/crypto/aes-neonbs-glue.c
> @@ -8,7 +8,6 @@
>  #include <asm/neon.h>
>  #include <asm/simd.h>
>  #include <crypto/aes.h>
> -#include <crypto/cbc.h>
>  #include <crypto/ctr.h>
>  #include <crypto/internal/simd.h>
>  #include <crypto/internal/skcipher.h>
> @@ -49,7 +48,7 @@ struct aesbs_ctx {
>
>  struct aesbs_cbc_ctx {
>         struct aesbs_ctx        key;
> -       struct crypto_cipher    *enc_tfm;
> +       struct crypto_skcipher  *enc_tfm;
>  };
>
>  struct aesbs_xts_ctx {
> @@ -140,19 +139,23 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
>         kernel_neon_end();
>         memzero_explicit(&rk, sizeof(rk));
>
> -       return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len);
> +       return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len);
>  }
>
> -static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst)
> +static int cbc_encrypt(struct skcipher_request *req)
>  {
> +       struct skcipher_request *subreq = skcipher_request_ctx(req);
> +       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
>         struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
>
> -       crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src);
> -}
> +       skcipher_request_set_tfm(subreq, ctx->enc_tfm);
> +       skcipher_request_set_callback(subreq,
> +                                     skcipher_request_flags(req),
> +                                     NULL, NULL);
> +       skcipher_request_set_crypt(subreq, req->src, req->dst,
> +                                  req->cryptlen, req->iv);
>
> -static int cbc_encrypt(struct skcipher_request *req)
> -{
> -       return crypto_cbc_encrypt_walk(req, cbc_encrypt_one);
> +       return crypto_skcipher_encrypt(subreq);
>  }
>
>  static int cbc_decrypt(struct skcipher_request *req)
> @@ -183,20 +186,27 @@ static int cbc_decrypt(struct skcipher_request *req)
>         return err;
>  }
>
> -static int cbc_init(struct crypto_tfm *tfm)
> +static int cbc_init(struct crypto_skcipher *tfm)
>  {
> -       struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
> +       struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
> +       unsigned int reqsize;
> +
> +       ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
> +       if (IS_ERR(ctx->enc_tfm))
> +               return PTR_ERR(ctx->enc_tfm);
>
> -       ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0);
> +       reqsize = sizeof(struct skcipher_request);
> +       reqsize += crypto_skcipher_reqsize(ctx->enc_tfm);
> +       crypto_skcipher_set_reqsize(tfm, reqsize);
>
> -       return PTR_ERR_OR_ZERO(ctx->enc_tfm);
> +       return 0;
>  }
>
> -static void cbc_exit(struct crypto_tfm *tfm)
> +static void cbc_exit(struct crypto_skcipher *tfm)
>  {
> -       struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
> +       struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm);
>
> -       crypto_free_cipher(ctx->enc_tfm);
> +       crypto_free_skcipher(ctx->enc_tfm);
>  }
>
>  static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key,
> @@ -432,8 +442,6 @@ static struct skcipher_alg aes_algs[] = { {
>         .base.cra_ctxsize       = sizeof(struct aesbs_cbc_ctx),
>         .base.cra_module        = THIS_MODULE,
>         .base.cra_flags         = CRYPTO_ALG_INTERNAL,
> -       .base.cra_init          = cbc_init,
> -       .base.cra_exit          = cbc_exit,
>
>         .min_keysize            = AES_MIN_KEY_SIZE,
>         .max_keysize            = AES_MAX_KEY_SIZE,
> @@ -442,6 +450,8 @@ static struct skcipher_alg aes_algs[] = { {
>         .setkey                 = aesbs_cbc_setkey,
>         .encrypt                = cbc_encrypt,
>         .decrypt                = cbc_decrypt,
> +       .init                   = cbc_init,
> +       .exit                   = cbc_exit,
>  }, {
>         .base.cra_name          = "__ctr(aes)",
>         .base.cra_driver_name   = "__ctr-aes-neonbs",
> --
> Email: Herbert Xu <herbert@gondor.apana.org.au>
> Home Page: http://gondor.apana.org.au/~herbert/
> PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-09-01 14:55 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-01  6:28 [PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Herbert Xu
2020-09-01  6:31 ` [PATCH 2/2] crypto: cbc - Remove cbc.h Herbert Xu
2020-09-01  8:47 ` [PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Ard Biesheuvel
2020-09-01 11:45   ` Herbert Xu
2020-09-01 11:48     ` [v2 PATCH " Herbert Xu
2020-09-01 11:49       ` [v2 PATCH 2/2] crypto: cbc - Remove cbc.h Herbert Xu
2020-09-01 12:15       ` [v2 PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Ard Biesheuvel

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).