linux-crypto.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ard Biesheuvel <ardb@kernel.org>
To: linux-crypto@vger.kernel.org
Cc: herbert@gondor.apana.org.au, Ard Biesheuvel <ardb@kernel.org>
Subject: [PATCH 1/5] crypto: arm64/ghash - drop PMULL based shash
Date: Mon, 29 Jun 2020 09:39:21 +0200	[thread overview]
Message-ID: <20200629073925.127538-2-ardb@kernel.org> (raw)
In-Reply-To: <20200629073925.127538-1-ardb@kernel.org>

There are two ways to implement SIMD accelerated GCM on arm64:
- using the PMULL instructions for carryless 64x64->128 multiplication,
  in which case the architecture guarantees that the AES instructions are
  available as well, and so we can use the AEAD implementation that combines
  both,
- using the PMULL instructions for carryless 8x8->16 bit multiplication,
  which is implemented as a shash, and can be combined with any ctr(aes)
  implementation by the generic GCM AEAD template driver.

So let's drop the 64x64->128 shash driver, which is never needed for GCM,
and not suitable for use anywhere else.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/crypto/ghash-ce-glue.c | 90 +++-----------------
 1 file changed, 12 insertions(+), 78 deletions(-)

diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 22831d3b7f62..be63d8b5152c 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -113,12 +113,8 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
 /* avoid hogging the CPU for too long */
 #define MAX_BLOCKS	(SZ_64K / GHASH_BLOCK_SIZE)
 
-static int __ghash_update(struct shash_desc *desc, const u8 *src,
-			  unsigned int len,
-			  void (*simd_update)(int blocks, u64 dg[],
-					      const char *src,
-					      struct ghash_key const *k,
-					      const char *head))
+static int ghash_update(struct shash_desc *desc, const u8 *src,
+			unsigned int len)
 {
 	struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
 	unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
@@ -145,7 +141,7 @@ static int __ghash_update(struct shash_desc *desc, const u8 *src,
 
 			ghash_do_update(chunk, ctx->digest, src, key,
 					partial ? ctx->buf : NULL,
-					simd_update);
+					pmull_ghash_update_p8);
 
 			blocks -= chunk;
 			src += chunk * GHASH_BLOCK_SIZE;
@@ -157,19 +153,7 @@ static int __ghash_update(struct shash_desc *desc, const u8 *src,
 	return 0;
 }
 
-static int ghash_update_p8(struct shash_desc *desc, const u8 *src,
-			   unsigned int len)
-{
-	return __ghash_update(desc, src, len, pmull_ghash_update_p8);
-}
-
-static int ghash_update_p64(struct shash_desc *desc, const u8 *src,
-			    unsigned int len)
-{
-	return __ghash_update(desc, src, len, pmull_ghash_update_p64);
-}
-
-static int ghash_final_p8(struct shash_desc *desc, u8 *dst)
+static int ghash_final(struct shash_desc *desc, u8 *dst)
 {
 	struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
 	unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
@@ -189,26 +173,6 @@ static int ghash_final_p8(struct shash_desc *desc, u8 *dst)
 	return 0;
 }
 
-static int ghash_final_p64(struct shash_desc *desc, u8 *dst)
-{
-	struct ghash_desc_ctx *ctx = shash_desc_ctx(desc);
-	unsigned int partial = ctx->count % GHASH_BLOCK_SIZE;
-
-	if (partial) {
-		struct ghash_key *key = crypto_shash_ctx(desc->tfm);
-
-		memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
-
-		ghash_do_update(1, ctx->digest, ctx->buf, key, NULL,
-				pmull_ghash_update_p64);
-	}
-	put_unaligned_be64(ctx->digest[1], dst);
-	put_unaligned_be64(ctx->digest[0], dst + 8);
-
-	*ctx = (struct ghash_desc_ctx){};
-	return 0;
-}
-
 static void ghash_reflect(u64 h[], const be128 *k)
 {
 	u64 carry = be64_to_cpu(k->a) & BIT(63) ? 1 : 0;
@@ -254,7 +218,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
 	return __ghash_setkey(key, inkey, keylen);
 }
 
-static struct shash_alg ghash_alg[] = {{
+static struct shash_alg ghash_alg = {
 	.base.cra_name		= "ghash",
 	.base.cra_driver_name	= "ghash-neon",
 	.base.cra_priority	= 150,
@@ -264,25 +228,11 @@ static struct shash_alg ghash_alg[] = {{
 
 	.digestsize		= GHASH_DIGEST_SIZE,
 	.init			= ghash_init,
-	.update			= ghash_update_p8,
-	.final			= ghash_final_p8,
-	.setkey			= ghash_setkey,
-	.descsize		= sizeof(struct ghash_desc_ctx),
-}, {
-	.base.cra_name		= "ghash",
-	.base.cra_driver_name	= "ghash-ce",
-	.base.cra_priority	= 200,
-	.base.cra_blocksize	= GHASH_BLOCK_SIZE,
-	.base.cra_ctxsize	= sizeof(struct ghash_key),
-	.base.cra_module	= THIS_MODULE,
-
-	.digestsize		= GHASH_DIGEST_SIZE,
-	.init			= ghash_init,
-	.update			= ghash_update_p64,
-	.final			= ghash_final_p64,
+	.update			= ghash_update,
+	.final			= ghash_final,
 	.setkey			= ghash_setkey,
 	.descsize		= sizeof(struct ghash_desc_ctx),
-}};
+};
 
 static int num_rounds(struct crypto_aes_ctx *ctx)
 {
@@ -641,37 +591,21 @@ static struct aead_alg gcm_aes_alg = {
 
 static int __init ghash_ce_mod_init(void)
 {
-	int ret;
-
 	if (!cpu_have_named_feature(ASIMD))
 		return -ENODEV;
 
 	if (cpu_have_named_feature(PMULL))
-		ret = crypto_register_shashes(ghash_alg,
-					      ARRAY_SIZE(ghash_alg));
-	else
-		/* only register the first array element */
-		ret = crypto_register_shash(ghash_alg);
+		return crypto_register_aead(&gcm_aes_alg);
 
-	if (ret)
-		return ret;
-
-	if (cpu_have_named_feature(PMULL)) {
-		ret = crypto_register_aead(&gcm_aes_alg);
-		if (ret)
-			crypto_unregister_shashes(ghash_alg,
-						  ARRAY_SIZE(ghash_alg));
-	}
-	return ret;
+	return crypto_register_shash(&ghash_alg);
 }
 
 static void __exit ghash_ce_mod_exit(void)
 {
 	if (cpu_have_named_feature(PMULL))
-		crypto_unregister_shashes(ghash_alg, ARRAY_SIZE(ghash_alg));
+		crypto_unregister_aead(&gcm_aes_alg);
 	else
-		crypto_unregister_shash(ghash_alg);
-	crypto_unregister_aead(&gcm_aes_alg);
+		crypto_unregister_shash(&ghash_alg);
 }
 
 static const struct cpu_feature ghash_cpu_feature[] = {
-- 
2.20.1


  reply	other threads:[~2020-06-29 21:31 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-29  7:39 [PATCH 0/5] crypto: clean up ARM/arm64 glue code for GHASH and GCM Ard Biesheuvel
2020-06-29  7:39 ` Ard Biesheuvel [this message]
2020-06-29  7:39 ` [PATCH 2/5] crypto: arm64/gcm - disentangle ghash and gcm setkey() routines Ard Biesheuvel
2020-06-29  7:39 ` [PATCH 3/5] crypto: arm64/gcm - use variably sized key struct Ard Biesheuvel
2020-07-09  8:22   ` Herbert Xu
2020-07-09 12:12     ` Herbert Xu
2020-07-09 12:14       ` Herbert Xu
2020-06-29  7:39 ` [PATCH 4/5] crypto: arm64/gcm - use inline helper to suppress indirect calls Ard Biesheuvel
2020-06-29  7:39 ` [PATCH 5/5] crypto: arm/ghash - use variably sized key struct Ard Biesheuvel
2020-07-09  8:22   ` Herbert Xu
2020-07-09  8:51     ` Ard Biesheuvel
2020-07-09 12:09       ` Herbert Xu
2020-07-10  0:16         ` Luc Van Oostenryck
2020-07-09 12:20 ` [PATCH 0/5] crypto: clean up ARM/arm64 glue code for GHASH and GCM Herbert Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200629073925.127538-2-ardb@kernel.org \
    --to=ardb@kernel.org \
    --cc=herbert@gondor.apana.org.au \
    --cc=linux-crypto@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).