linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
To: Herbert Xu <herbert@gondor.apana.org.au>,
	"David S. Miller" <davem@davemloft.net>,
	Vitaly Chikunov <vt@altlinux.org>,
	Eric Biggers <ebiggers@google.com>,
	Eric Biggers <ebiggers@kernel.org>,
	Gilad Ben-Yossef <gilad@benyossef.com>,
	Ard Biesheuvel <ardb@kernel.org>,
	"Markku-Juhani O . Saarinen" <mjos@iki.fi>,
	Jussi Kivilinna <jussi.kivilinna@iki.fi>,
	Will Deacon <will@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	"H. Peter Anvin" <hpa@zytor.com>,
	x86@kernel.org, linux-crypto@vger.kernel.org,
	linux-kernel@vger.kernel.org,
	Jia Zhang <zhang.jia@linux.alibaba.com>,
	"YiLin . Li" <YiLin.Li@linux.alibaba.com>
Cc: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
Subject: [PATCH 1/2] crypto: x86/sm4 - export reusable AESNI/AVX functions
Date: Wed, 18 Aug 2021 11:31:16 +0800	[thread overview]
Message-ID: <20210818033117.91717-2-tianjia.zhang@linux.alibaba.com> (raw)
In-Reply-To: <20210818033117.91717-1-tianjia.zhang@linux.alibaba.com>

Export the reusable functions in the SM4 AESNI/AVX implementation,
mainly public functions, which are used to develop the SM4 AESNI/AVX2
implementation, and eliminate unnecessary duplication of code.

At the same time, in order to make the public function universal,
minor fixes was added.

Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
---
 arch/x86/crypto/sm4-avx.h            | 24 ++++++++
 arch/x86/crypto/sm4_aesni_avx_glue.c | 92 ++++++++++++++++++----------
 2 files changed, 84 insertions(+), 32 deletions(-)
 create mode 100644 arch/x86/crypto/sm4-avx.h

diff --git a/arch/x86/crypto/sm4-avx.h b/arch/x86/crypto/sm4-avx.h
new file mode 100644
index 000000000000..1bceab7516aa
--- /dev/null
+++ b/arch/x86/crypto/sm4-avx.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef ASM_X86_SM4_AVX_H
+#define ASM_X86_SM4_AVX_H
+
+#include <linux/types.h>
+#include <crypto/sm4.h>
+
+typedef void (*sm4_crypt_func)(const u32 *rk, u8 *dst, const u8 *src, u8 *iv);
+
+int sm4_avx_ecb_encrypt(struct skcipher_request *req);
+int sm4_avx_ecb_decrypt(struct skcipher_request *req);
+
+int sm4_cbc_encrypt(struct skcipher_request *req);
+int sm4_avx_cbc_decrypt(struct skcipher_request *req,
+			unsigned int bsize, sm4_crypt_func func);
+
+int sm4_cfb_encrypt(struct skcipher_request *req);
+int sm4_avx_cfb_decrypt(struct skcipher_request *req,
+			unsigned int bsize, sm4_crypt_func func);
+
+int sm4_avx_ctr_crypt(struct skcipher_request *req,
+			unsigned int bsize, sm4_crypt_func func);
+
+#endif
diff --git a/arch/x86/crypto/sm4_aesni_avx_glue.c b/arch/x86/crypto/sm4_aesni_avx_glue.c
index c1f5728efd1d..7800f77d68ad 100644
--- a/arch/x86/crypto/sm4_aesni_avx_glue.c
+++ b/arch/x86/crypto/sm4_aesni_avx_glue.c
@@ -15,6 +15,7 @@
 #include <crypto/internal/simd.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/sm4.h>
+#include "sm4-avx.h"
 
 #define SM4_CRYPT8_BLOCK_SIZE	(SM4_BLOCK_SIZE * 8)
 
@@ -71,23 +72,25 @@ static int ecb_do_crypt(struct skcipher_request *req, const u32 *rkey)
 	return err;
 }
 
-static int ecb_encrypt(struct skcipher_request *req)
+int sm4_avx_ecb_encrypt(struct skcipher_request *req)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 	return ecb_do_crypt(req, ctx->rkey_enc);
 }
+EXPORT_SYMBOL_GPL(sm4_avx_ecb_encrypt);
 
-static int ecb_decrypt(struct skcipher_request *req)
+int sm4_avx_ecb_decrypt(struct skcipher_request *req)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 	return ecb_do_crypt(req, ctx->rkey_dec);
 }
+EXPORT_SYMBOL_GPL(sm4_avx_ecb_decrypt);
 
-static int cbc_encrypt(struct skcipher_request *req)
+int sm4_cbc_encrypt(struct skcipher_request *req)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -118,8 +121,10 @@ static int cbc_encrypt(struct skcipher_request *req)
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(sm4_cbc_encrypt);
 
-static int cbc_decrypt(struct skcipher_request *req)
+int sm4_avx_cbc_decrypt(struct skcipher_request *req,
+			unsigned int bsize, sm4_crypt_func func)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -135,15 +140,14 @@ static int cbc_decrypt(struct skcipher_request *req)
 
 		kernel_fpu_begin();
 
-		while (nbytes >= SM4_CRYPT8_BLOCK_SIZE) {
-			sm4_aesni_avx_cbc_dec_blk8(ctx->rkey_dec, dst,
-						src, walk.iv);
-			dst += SM4_CRYPT8_BLOCK_SIZE;
-			src += SM4_CRYPT8_BLOCK_SIZE;
-			nbytes -= SM4_CRYPT8_BLOCK_SIZE;
+		while (nbytes >= bsize) {
+			func(ctx->rkey_dec, dst, src, walk.iv);
+			dst += bsize;
+			src += bsize;
+			nbytes -= bsize;
 		}
 
-		if (nbytes >= SM4_BLOCK_SIZE) {
+		while (nbytes >= SM4_BLOCK_SIZE) {
 			u8 keystream[SM4_BLOCK_SIZE * 8];
 			u8 iv[SM4_BLOCK_SIZE];
 			unsigned int nblocks = min(nbytes >> 4, 8u);
@@ -165,6 +169,8 @@ static int cbc_decrypt(struct skcipher_request *req)
 			}
 			crypto_xor_cpy(dst, walk.iv, keystream, SM4_BLOCK_SIZE);
 			memcpy(walk.iv, iv, SM4_BLOCK_SIZE);
+			dst += nblocks * SM4_BLOCK_SIZE;
+			src += (nblocks + 1) * SM4_BLOCK_SIZE;
 			nbytes -= nblocks * SM4_BLOCK_SIZE;
 		}
 
@@ -174,8 +180,15 @@ static int cbc_decrypt(struct skcipher_request *req)
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(sm4_avx_cbc_decrypt);
+
+static int cbc_decrypt(struct skcipher_request *req)
+{
+	return sm4_avx_cbc_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
+				sm4_aesni_avx_cbc_dec_blk8);
+}
 
-static int cfb_encrypt(struct skcipher_request *req)
+int sm4_cfb_encrypt(struct skcipher_request *req)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -214,8 +227,10 @@ static int cfb_encrypt(struct skcipher_request *req)
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(sm4_cfb_encrypt);
 
-static int cfb_decrypt(struct skcipher_request *req)
+int sm4_avx_cfb_decrypt(struct skcipher_request *req,
+			unsigned int bsize, sm4_crypt_func func)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -231,15 +246,14 @@ static int cfb_decrypt(struct skcipher_request *req)
 
 		kernel_fpu_begin();
 
-		while (nbytes >= SM4_CRYPT8_BLOCK_SIZE) {
-			sm4_aesni_avx_cfb_dec_blk8(ctx->rkey_enc, dst,
-						src, walk.iv);
-			dst += SM4_CRYPT8_BLOCK_SIZE;
-			src += SM4_CRYPT8_BLOCK_SIZE;
-			nbytes -= SM4_CRYPT8_BLOCK_SIZE;
+		while (nbytes >= bsize) {
+			func(ctx->rkey_enc, dst, src, walk.iv);
+			dst += bsize;
+			src += bsize;
+			nbytes -= bsize;
 		}
 
-		if (nbytes >= SM4_BLOCK_SIZE) {
+		while (nbytes >= SM4_BLOCK_SIZE) {
 			u8 keystream[SM4_BLOCK_SIZE * 8];
 			unsigned int nblocks = min(nbytes >> 4, 8u);
 
@@ -276,8 +290,16 @@ static int cfb_decrypt(struct skcipher_request *req)
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(sm4_avx_cfb_decrypt);
 
-static int ctr_crypt(struct skcipher_request *req)
+static int cfb_decrypt(struct skcipher_request *req)
+{
+	return sm4_avx_cfb_decrypt(req, SM4_CRYPT8_BLOCK_SIZE,
+				sm4_aesni_avx_cfb_dec_blk8);
+}
+
+int sm4_avx_ctr_crypt(struct skcipher_request *req,
+			unsigned int bsize, sm4_crypt_func func)
 {
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm);
@@ -293,15 +315,14 @@ static int ctr_crypt(struct skcipher_request *req)
 
 		kernel_fpu_begin();
 
-		while (nbytes >= SM4_CRYPT8_BLOCK_SIZE) {
-			sm4_aesni_avx_ctr_enc_blk8(ctx->rkey_enc, dst,
-						src, walk.iv);
-			dst += SM4_CRYPT8_BLOCK_SIZE;
-			src += SM4_CRYPT8_BLOCK_SIZE;
-			nbytes -= SM4_CRYPT8_BLOCK_SIZE;
+		while (nbytes >= bsize) {
+			func(ctx->rkey_enc, dst, src, walk.iv);
+			dst += bsize;
+			src += bsize;
+			nbytes -= bsize;
 		}
 
-		if (nbytes >= SM4_BLOCK_SIZE) {
+		while (nbytes >= SM4_BLOCK_SIZE) {
 			u8 keystream[SM4_BLOCK_SIZE * 8];
 			unsigned int nblocks = min(nbytes >> 4, 8u);
 			int i;
@@ -343,6 +364,13 @@ static int ctr_crypt(struct skcipher_request *req)
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(sm4_avx_ctr_crypt);
+
+static int ctr_crypt(struct skcipher_request *req)
+{
+	return sm4_avx_ctr_crypt(req, SM4_CRYPT8_BLOCK_SIZE,
+				sm4_aesni_avx_ctr_enc_blk8);
+}
 
 static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
 	{
@@ -359,8 +387,8 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
 		.max_keysize	= SM4_KEY_SIZE,
 		.walksize	= 8 * SM4_BLOCK_SIZE,
 		.setkey		= sm4_skcipher_setkey,
-		.encrypt	= ecb_encrypt,
-		.decrypt	= ecb_decrypt,
+		.encrypt	= sm4_avx_ecb_encrypt,
+		.decrypt	= sm4_avx_ecb_decrypt,
 	}, {
 		.base = {
 			.cra_name		= "__cbc(sm4)",
@@ -376,7 +404,7 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
 		.ivsize		= SM4_BLOCK_SIZE,
 		.walksize	= 8 * SM4_BLOCK_SIZE,
 		.setkey		= sm4_skcipher_setkey,
-		.encrypt	= cbc_encrypt,
+		.encrypt	= sm4_cbc_encrypt,
 		.decrypt	= cbc_decrypt,
 	}, {
 		.base = {
@@ -394,7 +422,7 @@ static struct skcipher_alg sm4_aesni_avx_skciphers[] = {
 		.chunksize	= SM4_BLOCK_SIZE,
 		.walksize	= 8 * SM4_BLOCK_SIZE,
 		.setkey		= sm4_skcipher_setkey,
-		.encrypt	= cfb_encrypt,
+		.encrypt	= sm4_cfb_encrypt,
 		.decrypt	= cfb_decrypt,
 	}, {
 		.base = {
-- 
2.19.1.3.ge56e4f7


  reply	other threads:[~2021-08-18  3:31 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-18  3:31 [PATCH 0/2] add AES-NI/AVX2/x86_64 implementation Tianjia Zhang
2021-08-18  3:31 ` Tianjia Zhang [this message]
2021-08-18  3:31 ` [PATCH 2/2] crypto: x86/sm4 - " Tianjia Zhang
2021-08-20 10:03 ` [PATCH 0/2] " David Laight
2021-08-22  5:20   ` Tianjia Zhang
2021-08-27  8:35 ` Herbert Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210818033117.91717-2-tianjia.zhang@linux.alibaba.com \
    --to=tianjia.zhang@linux.alibaba.com \
    --cc=YiLin.Li@linux.alibaba.com \
    --cc=ardb@kernel.org \
    --cc=bp@alien8.de \
    --cc=davem@davemloft.net \
    --cc=ebiggers@google.com \
    --cc=ebiggers@kernel.org \
    --cc=gilad@benyossef.com \
    --cc=herbert@gondor.apana.org.au \
    --cc=hpa@zytor.com \
    --cc=jussi.kivilinna@iki.fi \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=mjos@iki.fi \
    --cc=tglx@linutronix.de \
    --cc=vt@altlinux.org \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    --cc=zhang.jia@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).