linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware
@ 2021-06-04  1:31 Kai Ye
  2021-06-04  1:31 ` [PATCH 1/4] crypto: hisilicon/sec - add new algorithm mode for AEAD Kai Ye
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: Kai Ye @ 2021-06-04  1:31 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, linux-kernel, wangzhou1, yekai13

The driver adds new aeads, add fallback tfm supporting.
Modify the driver as needed. The crypto fuzzing test has been passed.

Kai Ye (4):
  crypto: hisilicon/sec - add new algorithm mode for AEAD
  crypto: hisilicon/sec - add fallback tfm supporting for aeads
  crypto: hisilicon/sec - add hardware integrity check value process
  crypto: hisilicon/sec - modify the SEC request structure

 drivers/crypto/hisilicon/sec2/sec.h        |  13 +-
 drivers/crypto/hisilicon/sec2/sec_crypto.c | 575 ++++++++++++++++++++++++-----
 drivers/crypto/hisilicon/sec2/sec_crypto.h |   9 +
 3 files changed, 507 insertions(+), 90 deletions(-)

-- 
2.8.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/4] crypto: hisilicon/sec - add new algorithm mode for AEAD
  2021-06-04  1:31 [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware Kai Ye
@ 2021-06-04  1:31 ` Kai Ye
  2021-06-04  1:31 ` [PATCH 2/4] crypto: hisilicon/sec - add fallback tfm supporting for aeads Kai Ye
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Kai Ye @ 2021-06-04  1:31 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, linux-kernel, wangzhou1, yekai13

Add new algorithm mode for AEAD:
CCM(AES), GCM(AES), CCM(SM4), GCM(SM4).

Signed-off-by: Kai Ye <yekai13@huawei.com>
Signed-off-by: Longfang Liu <liulongfang@huawei.com>
---
 drivers/crypto/hisilicon/sec2/sec.h        |   4 +
 drivers/crypto/hisilicon/sec2/sec_crypto.c | 367 ++++++++++++++++++++++++++---
 drivers/crypto/hisilicon/sec2/sec_crypto.h |   8 +
 3 files changed, 345 insertions(+), 34 deletions(-)

diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 935d8d9..2960fae 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -13,6 +13,8 @@ struct sec_alg_res {
 	dma_addr_t pbuf_dma;
 	u8 *c_ivin;
 	dma_addr_t c_ivin_dma;
+	u8 *a_ivin;
+	dma_addr_t a_ivin_dma;
 	u8 *out_mac;
 	dma_addr_t out_mac_dma;
 };
@@ -33,6 +35,8 @@ struct sec_cipher_req {
 struct sec_aead_req {
 	u8 *out_mac;
 	dma_addr_t out_mac_dma;
+	u8 *a_ivin;
+	dma_addr_t a_ivin_dma;
 	struct aead_request *aead_req;
 };
 
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 5926b64..f2ab9ff 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -79,10 +79,24 @@
 #define SEC_SQE_CFLAG		2
 #define SEC_SQE_AEAD_FLAG	3
 #define SEC_SQE_DONE		0x1
+#define MIN_MAC_LEN		4
+#define MAC_LEN_MASK		0x1U
 #define MAX_INPUT_DATA_LEN	0xFFFE00
 #define BITS_MASK		0xFF
 #define BYTE_BITS		0x8
 #define SEC_XTS_NAME_SZ		0x3
+#define IV_CM_CAL_NUM		2
+#define IV_CL_MASK		0x7
+#define IV_CL_MIN		2
+#define IV_CL_MID		4
+#define IV_CL_MAX		8
+#define IV_FLAGS_OFFSET	0x6
+#define IV_CM_OFFSET		0x3
+#define IV_LAST_BYTE1		1
+#define IV_LAST_BYTE2		2
+#define IV_LAST_BYTE_MASK	0xFF
+#define IV_CTR_INIT		0x1
+#define IV_BYTE_OFFSET		0x8
 
 /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
 static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
@@ -316,6 +330,30 @@ static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
 				  res->c_ivin, res->c_ivin_dma);
 }
 
+static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
+{
+	int i;
+
+	res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+					 &res->a_ivin_dma, GFP_KERNEL);
+	if (!res->a_ivin)
+		return -ENOMEM;
+
+	for (i = 1; i < QM_Q_DEPTH; i++) {
+		res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
+		res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
+	}
+
+	return 0;
+}
+
+static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
+{
+	if (res->a_ivin)
+		dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+				  res->a_ivin, res->a_ivin_dma);
+}
+
 static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
 {
 	int i;
@@ -398,9 +436,13 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
 		return ret;
 
 	if (ctx->alg_type == SEC_AEAD) {
+		ret = sec_alloc_aiv_resource(dev, res);
+		if (ret)
+			goto alloc_aiv_fail;
+
 		ret = sec_alloc_mac_resource(dev, res);
 		if (ret)
-			goto alloc_fail;
+			goto alloc_mac_fail;
 	}
 	if (ctx->pbuf_supported) {
 		ret = sec_alloc_pbuf_resource(dev, res);
@@ -415,7 +457,10 @@ static int sec_alg_resource_alloc(struct sec_ctx *ctx,
 alloc_pbuf_fail:
 	if (ctx->alg_type == SEC_AEAD)
 		sec_free_mac_resource(dev, qp_ctx->res);
-alloc_fail:
+alloc_mac_fail:
+	if (ctx->alg_type == SEC_AEAD)
+		sec_free_aiv_resource(dev, res);
+alloc_aiv_fail:
 	sec_free_civ_resource(dev, res);
 	return ret;
 }
@@ -871,6 +916,8 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
 		c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
 		c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
 		if (ctx->alg_type == SEC_AEAD) {
+			a_req->a_ivin = res->a_ivin;
+			a_req->a_ivin_dma = res->a_ivin_dma;
 			a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
 			a_req->out_mac_dma = res->pbuf_dma +
 					SEC_PBUF_MAC_OFFSET;
@@ -881,6 +928,8 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
 	c_req->c_ivin = res->c_ivin;
 	c_req->c_ivin_dma = res->c_ivin_dma;
 	if (ctx->alg_type == SEC_AEAD) {
+		a_req->a_ivin = res->a_ivin;
+		a_req->a_ivin_dma = res->a_ivin_dma;
 		a_req->out_mac = res->out_mac;
 		a_req->out_mac_dma = res->out_mac_dma;
 	}
@@ -1012,6 +1061,17 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 	ctx->a_ctx.mac_len = mac_len;
 	c_ctx->c_mode = c_mode;
 
+	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
+		ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
+		if (ret) {
+			dev_err(dev, "set sec aes ccm cipher key err!\n");
+			return ret;
+		}
+		memcpy(c_ctx->c_key, key, keylen);
+
+		return 0;
+	}
+
 	if (crypto_authenc_extractkeys(&keys, key, keylen))
 		goto bad_key;
 
@@ -1054,6 +1114,14 @@ GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
 			 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
 GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
 			 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
+			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
+			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
+			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
+GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
+			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
 
 static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
 {
@@ -1295,12 +1363,125 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
 	sk_req->base.complete(&sk_req->base, err);
 }
 
-static void sec_aead_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
+static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
 {
 	struct aead_request *aead_req = req->aead_req.aead_req;
 	struct sec_cipher_req *c_req = &req->c_req;
+	struct sec_aead_req *a_req = &req->aead_req;
+	size_t authsize = ctx->a_ctx.mac_len;
+	u32 data_size = aead_req->cryptlen;
+	u8 flage = 0;
+	u8 cm, cl;
+
+	/* the specification has been checked in aead_iv_demension_check() */
+	cl = c_req->c_ivin[0] + 1;
+	c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
+	memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
+	c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
+
+	/* the last 3bit is L' */
+	flage |= c_req->c_ivin[0] & IV_CL_MASK;
+
+	/* the M' is bit3~bit5, the Flags is bit6 */
+	cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
+	flage |= cm << IV_CM_OFFSET;
+	if (aead_req->assoclen)
+		flage |= 0x01 << IV_FLAGS_OFFSET;
+
+	memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
+	a_req->a_ivin[0] = flage;
+
+	/*
+	 * the last 32bit is counter's initial number,
+	 * but the nonce uses the first 16bit
+	 * the tail 16bit fill with the cipher length
+	 */
+	if (!c_req->encrypt)
+		data_size = aead_req->cryptlen - authsize;
+
+	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
+			data_size & IV_LAST_BYTE_MASK;
+	data_size >>= IV_BYTE_OFFSET;
+	a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
+			data_size & IV_LAST_BYTE_MASK;
+}
+
+static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
+{
+	struct aead_request *aead_req = req->aead_req.aead_req;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+	size_t authsize = crypto_aead_authsize(tfm);
+	struct sec_cipher_req *c_req = &req->c_req;
+	struct sec_aead_req *a_req = &req->aead_req;
 
 	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
+
+	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
+		/*
+		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
+		 * the  counter must set to 0x01
+		 */
+		ctx->a_ctx.mac_len = authsize;
+		/* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
+		set_aead_auth_iv(ctx, req);
+	}
+
+	/* GCM 12Byte Cipher_IV == Auth_IV */
+	if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
+		ctx->a_ctx.mac_len = authsize;
+		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
+	}
+}
+
+static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
+				 struct sec_req *req, struct sec_sqe *sec_sqe)
+{
+	struct sec_aead_req *a_req = &req->aead_req;
+	struct aead_request *aq = a_req->aead_req;
+
+	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
+	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
+
+	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
+	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
+	sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
+	sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
+
+	if (dir)
+		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
+	else
+		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
+
+	sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
+	sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
+	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+
+	sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
+}
+
+static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
+				    struct sec_req *req, struct sec_sqe3 *sqe3)
+{
+	struct sec_aead_req *a_req = &req->aead_req;
+	struct aead_request *aq = a_req->aead_req;
+
+	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
+	sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
+
+	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
+	sqe3->a_key_addr = sqe3->c_key_addr;
+	sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
+	sqe3->auth_mac_key |= SEC_NO_AUTH;
+
+	if (dir)
+		sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
+	else
+		sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
+
+	sqe3->a_len_key = cpu_to_le32(aq->assoclen);
+	sqe3->auth_src_offset = cpu_to_le16(0x0);
+	sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
+	sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
 }
 
 static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
@@ -1348,7 +1529,11 @@ static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
 		return ret;
 	}
 
-	sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
+	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
+		sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
+	else
+		sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
 
 	return 0;
 }
@@ -1399,7 +1584,13 @@ static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
 		return ret;
 	}
 
-	sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt, req, sec_sqe3);
+	if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
+	    ctx->c_ctx.c_mode == SEC_CMODE_GCM)
+		sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
+					req, sec_sqe3);
+	else
+		sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
+				       req, sec_sqe3);
 
 	return 0;
 }
@@ -1531,7 +1722,7 @@ static const struct sec_req_op sec_skcipher_req_ops = {
 static const struct sec_req_op sec_aead_req_ops = {
 	.buf_map	= sec_aead_sgl_map,
 	.buf_unmap	= sec_aead_sgl_unmap,
-	.do_transfer	= sec_aead_copy_iv,
+	.do_transfer	= sec_aead_set_iv,
 	.bd_fill	= sec_aead_bd_fill,
 	.bd_send	= sec_bd_send,
 	.callback	= sec_aead_callback,
@@ -1551,7 +1742,7 @@ static const struct sec_req_op sec_skcipher_req_ops_v3 = {
 static const struct sec_req_op sec_aead_req_ops_v3 = {
 	.buf_map	= sec_aead_sgl_map,
 	.buf_unmap	= sec_aead_sgl_unmap,
-	.do_transfer	= sec_aead_copy_iv,
+	.do_transfer	= sec_aead_set_iv,
 	.bd_fill	= sec_aead_bd_fill_v3,
 	.bd_send	= sec_bd_send,
 	.callback	= sec_aead_callback,
@@ -1591,8 +1782,9 @@ static int sec_aead_init(struct crypto_aead *tfm)
 	crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
 	ctx->alg_type = SEC_AEAD;
 	ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
-	if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
-		dev_err(ctx->dev, "get error aead iv size!\n");
+	if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
+	    ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+		pr_err("get error aead iv size!\n");
 		return -EINVAL;
 	}
 
@@ -1663,6 +1855,25 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
 	sec_aead_exit(tfm);
 }
 
+static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
+{
+	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+	int ret;
+
+	ret = sec_aead_init(tfm);
+	if (ret) {
+		dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
+{
+	sec_aead_exit(tfm);
+}
+
 static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
 {
 	return sec_aead_ctx_init(tfm, "sha1");
@@ -1903,41 +2114,100 @@ static struct skcipher_alg sec_skciphers_v3[] = {
 			 SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE)
 };
 
-static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+static int aead_iv_demension_check(struct aead_request *aead_req)
+{
+	u8 cl;
+
+	cl = aead_req->iv[0] + 1;
+	if (cl < IV_CL_MIN || cl > IV_CL_MAX)
+		return -EINVAL;
+
+	if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
+		return -EOVERFLOW;
+
+	return 0;
+}
+
+static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
 {
 	struct aead_request *req = sreq->aead_req.aead_req;
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	size_t authsize = crypto_aead_authsize(tfm);
+	u8 c_mode = ctx->c_ctx.c_mode;
 	struct device *dev = ctx->dev;
-	u8 c_alg = ctx->c_ctx.c_alg;
+	int ret;
 
-	if (unlikely(!req->src || !req->dst || !req->cryptlen ||
-		req->assoclen > SEC_MAX_AAD_LEN)) {
-		dev_err(dev, "aead input param error!\n");
+	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
+	    req->assoclen > SEC_MAX_AAD_LEN)) {
+		dev_err(dev, "aead input spec error!\n");
 		return -EINVAL;
 	}
 
-	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
-		SEC_PBUF_SZ)
-		sreq->use_pbuf = true;
-	else
-		sreq->use_pbuf = false;
-
-	/* Support AES only */
-	if (unlikely(c_alg != SEC_CALG_AES)) {
-		dev_err(dev, "aead crypto alg error!\n");
+	if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
+	   (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
+		authsize & MAC_LEN_MASK)))) {
+		dev_err(dev, "aead input mac length error!\n");
 		return -EINVAL;
 	}
+
+	if (c_mode == SEC_CMODE_CCM) {
+		ret = aead_iv_demension_check(req);
+		if (ret) {
+			dev_err(dev, "aead input iv param error!\n");
+			return ret;
+		}
+	}
+
 	if (sreq->c_req.encrypt)
 		sreq->c_req.c_len = req->cryptlen;
 	else
 		sreq->c_req.c_len = req->cryptlen - authsize;
+	if (c_mode == SEC_CMODE_CBC) {
+		if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
+			dev_err(dev, "aead crypto length error!\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
 
-	if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
-		dev_err(dev, "aead crypto length error!\n");
+static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+	struct aead_request *req = sreq->aead_req.aead_req;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	size_t authsize = crypto_aead_authsize(tfm);
+	struct device *dev = ctx->dev;
+	u8 c_alg = ctx->c_ctx.c_alg;
+
+	if (unlikely(!req->src || !req->dst)) {
+		dev_err(dev, "aead input param error!\n");
 		return -EINVAL;
 	}
 
+	if (ctx->sec->qm.ver == QM_HW_V2) {
+		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
+		    req->cryptlen <= authsize))) {
+			dev_err(dev, "Kunpeng920 not support 0 length!\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Support AES or SM4 */
+	if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
+		dev_err(dev, "aead crypto alg error!\n");
+		return -EINVAL;
+	}
+
+	if (unlikely(sec_aead_spec_check(ctx, sreq)))
+		return -EINVAL;
+
+	if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
+		SEC_PBUF_SZ)
+		sreq->use_pbuf = true;
+	else
+		sreq->use_pbuf = false;
+
 	return 0;
 }
 
@@ -1970,7 +2240,7 @@ static int sec_aead_decrypt(struct aead_request *a_req)
 	return sec_aead_crypto(a_req, false);
 }
 
-#define SEC_AEAD_GEN_ALG(sec_cra_name, sec_set_key, ctx_init,\
+#define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
 			 ctx_exit, blk_size, iv_size, max_authsize)\
 {\
 	.base = {\
@@ -1991,22 +2261,39 @@ static int sec_aead_decrypt(struct aead_request *a_req)
 	.maxauthsize = max_authsize,\
 }
 
-#define SEC_AEAD_ALG(algname, keyfunc, aead_init, blksize, ivsize, authsize)\
-	SEC_AEAD_GEN_ALG(algname, keyfunc, aead_init,\
-			sec_aead_ctx_exit, blksize, ivsize, authsize)
-
 static struct aead_alg sec_aeads[] = {
 	SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))",
 		     sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init,
-		     AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
+		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
+		     AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
 
 	SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))",
 		     sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init,
-		     AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
+		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
+		     AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
 
 	SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))",
 		     sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init,
-		     AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+		     sec_aead_ctx_exit, AES_BLOCK_SIZE,
+		     AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
+
+	SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
+		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+		     AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+
+	SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
+		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+		     SEC_AIV_SIZE, AES_BLOCK_SIZE)
+};
+
+static struct aead_alg sec_aeads_v3[] = {
+	SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
+		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+		     AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+
+	SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
+		     sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ,
+		     SEC_AIV_SIZE, AES_BLOCK_SIZE)
 };
 
 int sec_register_to_crypto(struct hisi_qm *qm)
@@ -2025,11 +2312,19 @@ int sec_register_to_crypto(struct hisi_qm *qm)
 		if (ret)
 			goto reg_skcipher_fail;
 	}
+
 	ret = crypto_register_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
 	if (ret)
 		goto reg_aead_fail;
+	if (qm->ver > QM_HW_V2) {
+		ret = crypto_register_aeads(sec_aeads_v3, ARRAY_SIZE(sec_aeads_v3));
+		if (ret)
+			goto reg_aead_v3_fail;
+	}
 	return ret;
 
+reg_aead_v3_fail:
+	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
 reg_aead_fail:
 	if (qm->ver > QM_HW_V2)
 		crypto_unregister_skciphers(sec_skciphers_v3,
@@ -2043,9 +2338,13 @@ int sec_register_to_crypto(struct hisi_qm *qm)
 void sec_unregister_from_crypto(struct hisi_qm *qm)
 {
 	if (qm->ver > QM_HW_V2)
+		crypto_unregister_aeads(sec_aeads_v3,
+					ARRAY_SIZE(sec_aeads_v3));
+	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
+
+	if (qm->ver > QM_HW_V2)
 		crypto_unregister_skciphers(sec_skciphers_v3,
 					    ARRAY_SIZE(sec_skciphers_v3));
 	crypto_unregister_skciphers(sec_skciphers,
 				    ARRAY_SIZE(sec_skciphers));
-	crypto_unregister_aeads(sec_aeads, ARRAY_SIZE(sec_aeads));
 }
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index c9bfe75..a7bcd3e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -4,6 +4,7 @@
 #ifndef __HISI_SEC_V2_CRYPTO_H
 #define __HISI_SEC_V2_CRYPTO_H
 
+#define SEC_AIV_SIZE		12
 #define SEC_IV_SIZE		24
 #define SEC_MAX_KEY_SIZE	64
 #define SEC_COMM_SCENE		0
@@ -22,6 +23,11 @@ enum sec_hash_alg {
 };
 
 enum sec_mac_len {
+	SEC_HMAC_CCM_MAC   = 16,
+	SEC_HMAC_GCM_MAC   = 16,
+	SEC_SM3_MAC        = 32,
+	SEC_HMAC_SM3_MAC   = 32,
+	SEC_HMAC_MD5_MAC   = 16,
 	SEC_HMAC_SHA1_MAC   = 20,
 	SEC_HMAC_SHA256_MAC = 32,
 	SEC_HMAC_SHA512_MAC = 64,
@@ -33,6 +39,8 @@ enum sec_cmode {
 	SEC_CMODE_CFB    = 0x2,
 	SEC_CMODE_OFB    = 0x3,
 	SEC_CMODE_CTR    = 0x4,
+	SEC_CMODE_CCM    = 0x5,
+	SEC_CMODE_GCM    = 0x6,
 	SEC_CMODE_XTS    = 0x7,
 };
 
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/4] crypto: hisilicon/sec - add fallback tfm supporting for aeads
  2021-06-04  1:31 [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware Kai Ye
  2021-06-04  1:31 ` [PATCH 1/4] crypto: hisilicon/sec - add new algorithm mode for AEAD Kai Ye
@ 2021-06-04  1:31 ` Kai Ye
  2021-06-04  1:31 ` [PATCH 3/4] crypto: hisilicon/sec - add hardware integrity check value process Kai Ye
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Kai Ye @ 2021-06-04  1:31 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, linux-kernel, wangzhou1, yekai13

Add fallback tfm supporting for hisi_sec driver. Due to the Kunpeng920's
CCM/GCM algorithm not supports 0 byte src length. So the driver needs to
setting the soft fallback aead tfm.

Signed-off-by: Kai Ye <yekai13@huawei.com>
Signed-off-by: Longfang Liu <liulongfang@huawei.com>
---
 drivers/crypto/hisilicon/sec2/sec.h        |  2 +
 drivers/crypto/hisilicon/sec2/sec_crypto.c | 97 ++++++++++++++++++++++++++++--
 2 files changed, 94 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 2960fae..3fe7875 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -88,7 +88,9 @@ struct sec_auth_ctx {
 	u8 a_key_len;
 	u8 mac_len;
 	u8 a_alg;
+	bool fallback;
 	struct crypto_shash *hash_tfm;
+	struct crypto_aead *fallback_aead_tfm;
 };
 
 /* SEC cipher context which cipher's relatives */
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index f2ab9ff..194a9bc 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -2,6 +2,7 @@
 /* Copyright (c) 2019 HiSilicon Limited. */
 
 #include <crypto/aes.h>
+#include <crypto/aead.h>
 #include <crypto/algapi.h>
 #include <crypto/authenc.h>
 #include <crypto/des.h>
@@ -853,12 +854,16 @@ GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
 static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
 			struct scatterlist *src)
 {
-	struct aead_request *aead_req = req->aead_req.aead_req;
+	struct sec_aead_req *a_req = &req->aead_req;
+	struct aead_request *aead_req = a_req->aead_req;
 	struct sec_cipher_req *c_req = &req->c_req;
 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
 	struct device *dev = ctx->dev;
 	int copy_size, pbuf_length;
 	int req_id = req->req_id;
+	struct crypto_aead *tfm;
+	size_t authsize;
+	u8 *mac_offset;
 
 	if (ctx->alg_type == SEC_AEAD)
 		copy_size = aead_req->cryptlen + aead_req->assoclen;
@@ -866,12 +871,17 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
 		copy_size = c_req->c_len;
 
 	pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
-							qp_ctx->res[req_id].pbuf,
-							copy_size);
+			qp_ctx->res[req_id].pbuf, copy_size);
 	if (unlikely(pbuf_length != copy_size)) {
 		dev_err(dev, "copy src data to pbuf error!\n");
 		return -EINVAL;
 	}
+	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
+		tfm = crypto_aead_reqtfm(aead_req);
+		authsize = crypto_aead_authsize(tfm);
+		mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
+		memcpy(a_req->out_mac, mac_offset, authsize);
+	}
 
 	c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
 	c_req->c_out_dma = c_req->c_in_dma;
@@ -1044,6 +1054,28 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
 	return 0;
 }
 
+static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+
+	if (unlikely(a_ctx->fallback_aead_tfm))
+		return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
+
+	return 0;
+}
+
+static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
+				    struct crypto_aead *tfm, const u8 *key,
+				    unsigned int keylen)
+{
+	crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
+			      crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
+	return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
+}
+
 static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 			   const u32 keylen, const enum sec_hash_alg a_alg,
 			   const enum sec_calg c_alg,
@@ -1052,6 +1084,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 {
 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
 	struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
 	struct device *dev = ctx->dev;
 	struct crypto_authenc_keys keys;
 	int ret;
@@ -1069,6 +1102,12 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 		}
 		memcpy(c_ctx->c_key, key, keylen);
 
+		if (unlikely(a_ctx->fallback_aead_tfm)) {
+			ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+			if (ret)
+				return ret;
+		}
+
 		return 0;
 	}
 
@@ -1857,7 +1896,10 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
 
 static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
 {
+	struct aead_alg *alg = crypto_aead_alg(tfm);
 	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+	const char *aead_name = alg->base.cra_name;
 	int ret;
 
 	ret = sec_aead_init(tfm);
@@ -1866,11 +1908,24 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
 		return ret;
 	}
 
+	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
+						     CRYPTO_ALG_NEED_FALLBACK |
+						     CRYPTO_ALG_ASYNC);
+	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
+		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
+		sec_aead_exit(tfm);
+		return PTR_ERR(a_ctx->fallback_aead_tfm);
+	}
+	a_ctx->fallback = false;
+
 	return 0;
 }
 
 static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
 {
+	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
 	sec_aead_exit(tfm);
 }
 
@@ -2189,6 +2244,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
 		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
 		    req->cryptlen <= authsize))) {
 			dev_err(dev, "Kunpeng920 not support 0 length!\n");
+			ctx->a_ctx.fallback = true;
 			return -EINVAL;
 		}
 	}
@@ -2211,6 +2267,31 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
 	return 0;
 }
 
+static int sec_aead_soft_crypto(struct sec_ctx *ctx,
+				struct aead_request *aead_req,
+				bool encrypt)
+{
+	struct aead_request *subreq = aead_request_ctx(aead_req);
+	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+	struct device *dev = ctx->dev;
+
+	/* Kunpeng920 aead mode not support input 0 size */
+	if (!a_ctx->fallback_aead_tfm) {
+		dev_err(dev, "aead fallbcak tfm is NULL!\n");
+		return -EINVAL;
+	}
+
+	aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
+	aead_request_set_callback(subreq, aead_req->base.flags,
+				  aead_req->base.complete, aead_req->base.data);
+	aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
+			       aead_req->cryptlen, aead_req->iv);
+	aead_request_set_ad(subreq, aead_req->assoclen);
+
+	return encrypt ? crypto_aead_encrypt(subreq) :
+		   crypto_aead_decrypt(subreq);
+}
+
 static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
@@ -2224,8 +2305,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
 	req->ctx = ctx;
 
 	ret = sec_aead_param_check(ctx, req);
-	if (unlikely(ret))
+	if (unlikely(ret)) {
+		if (ctx->a_ctx.fallback)
+			return sec_aead_soft_crypto(ctx, a_req, encrypt);
 		return -EINVAL;
+	}
 
 	return ctx->req_op->process(ctx, req);
 }
@@ -2247,7 +2331,9 @@ static int sec_aead_decrypt(struct aead_request *a_req)
 		.cra_name = sec_cra_name,\
 		.cra_driver_name = "hisi_sec_"sec_cra_name,\
 		.cra_priority = SEC_PRIORITY,\
-		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,\
+		.cra_flags = CRYPTO_ALG_ASYNC |\
+		 CRYPTO_ALG_ALLOCATES_MEMORY |\
+		 CRYPTO_ALG_NEED_FALLBACK,\
 		.cra_blocksize = blk_size,\
 		.cra_ctxsize = sizeof(struct sec_ctx),\
 		.cra_module = THIS_MODULE,\
@@ -2255,6 +2341,7 @@ static int sec_aead_decrypt(struct aead_request *a_req)
 	.init = ctx_init,\
 	.exit = ctx_exit,\
 	.setkey = sec_set_key,\
+	.setauthsize = sec_aead_setauthsize,\
 	.decrypt = sec_aead_decrypt,\
 	.encrypt = sec_aead_encrypt,\
 	.ivsize = iv_size,\
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/4] crypto: hisilicon/sec - add hardware integrity check value process
  2021-06-04  1:31 [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware Kai Ye
  2021-06-04  1:31 ` [PATCH 1/4] crypto: hisilicon/sec - add new algorithm mode for AEAD Kai Ye
  2021-06-04  1:31 ` [PATCH 2/4] crypto: hisilicon/sec - add fallback tfm supporting for aeads Kai Ye
@ 2021-06-04  1:31 ` Kai Ye
  2021-06-04  1:31 ` [PATCH v2 4/4] crypto: hisilicon/sec - modify the SEC request structure Kai Ye
  2021-06-11  7:23 ` [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware Herbert Xu
  4 siblings, 0 replies; 7+ messages in thread
From: Kai Ye @ 2021-06-04  1:31 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, linux-kernel, wangzhou1, yekai13

Use hardware integrity check value process instead of soft verify
process when doing aead decryption.

Signed-off-by: Kai Ye <yekai13@huawei.com>
Signed-off-by: Longfang Liu <liulongfang@huawei.com>
---
 drivers/crypto/hisilicon/sec2/sec_crypto.c | 81 +++++++++++++++++-------------
 drivers/crypto/hisilicon/sec2/sec_crypto.h |  1 +
 2 files changed, 48 insertions(+), 34 deletions(-)

diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 194a9bc..75122f0 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -54,6 +54,7 @@
 #define SEC_FLAG_MASK		0x0780
 #define SEC_TYPE_MASK		0x0F
 #define SEC_DONE_MASK		0x0001
+#define SEC_ICV_MASK		0x000E
 #define SEC_SQE_LEN_RATE_MASK	0x3
 
 #define SEC_TOTAL_IV_SZ		(SEC_IV_SIZE * QM_Q_DEPTH)
@@ -80,6 +81,7 @@
 #define SEC_SQE_CFLAG		2
 #define SEC_SQE_AEAD_FLAG	3
 #define SEC_SQE_DONE		0x1
+#define SEC_ICV_ERR		0x2
 #define MIN_MAC_LEN		4
 #define MAC_LEN_MASK		0x1U
 #define MAX_INPUT_DATA_LEN	0xFFFE00
@@ -156,32 +158,12 @@ static void sec_free_req_id(struct sec_req *req)
 	mutex_unlock(&qp_ctx->req_lock);
 }
 
-static int sec_aead_verify(struct sec_req *req)
-{
-	struct aead_request *aead_req = req->aead_req.aead_req;
-	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
-	size_t authsize = crypto_aead_authsize(tfm);
-	u8 *mac_out = req->aead_req.out_mac;
-	u8 *mac = mac_out + SEC_MAX_MAC_LEN;
-	struct scatterlist *sgl = aead_req->src;
-	size_t sz;
-
-	sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac, authsize,
-				aead_req->cryptlen + aead_req->assoclen -
-				authsize);
-	if (unlikely(sz != authsize || memcmp(mac_out, mac, sz))) {
-		dev_err(req->ctx->dev, "aead verify failure!\n");
-		return -EBADMSG;
-	}
-
-	return 0;
-}
-
 static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
 {
 	struct sec_sqe *bd = resp;
 
 	status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+	status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
 	status->flag = (le16_to_cpu(bd->type2.done_flag) &
 					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
 	status->tag = le16_to_cpu(bd->type2.tag);
@@ -195,6 +177,7 @@ static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
 	struct sec_sqe3 *bd3 = resp;
 
 	status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
+	status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
 	status->flag = (le16_to_cpu(bd3->done_flag) &
 					SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
 	status->tag = le64_to_cpu(bd3->tag);
@@ -220,6 +203,14 @@ static int sec_cb_status_check(struct sec_req *req,
 					    status->flag);
 			return -EIO;
 		}
+	} else if (unlikely(ctx->alg_type == SEC_AEAD)) {
+		if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
+			     status->icv == SEC_ICV_ERR)) {
+			dev_err_ratelimited(ctx->dev,
+					    "flag[%u], icv[%u]\n",
+					    status->flag, status->icv);
+			return -EBADMSG;
+		}
 	}
 
 	return 0;
@@ -262,9 +253,6 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp)
 	if (err)
 		atomic64_inc(&dfx->done_flag_cnt);
 
-	if (ctx->alg_type == SEC_AEAD && !req->c_req.encrypt)
-		err = sec_aead_verify(req);
-
 	atomic64_inc(&dfx->recv_cnt);
 
 	ctx->req_op->buf_unmap(ctx, req);
@@ -895,7 +883,6 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
 	struct aead_request *aead_req = req->aead_req.aead_req;
 	struct sec_cipher_req *c_req = &req->c_req;
 	struct sec_qp_ctx *qp_ctx = req->qp_ctx;
-	struct device *dev = ctx->dev;
 	int copy_size, pbuf_length;
 	int req_id = req->req_id;
 
@@ -905,10 +892,29 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
 		copy_size = c_req->c_len;
 
 	pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
-				qp_ctx->res[req_id].pbuf,
-				copy_size);
+			qp_ctx->res[req_id].pbuf, copy_size);
 	if (unlikely(pbuf_length != copy_size))
-		dev_err(dev, "copy pbuf data to dst error!\n");
+		dev_err(ctx->dev, "copy pbuf data to dst error!\n");
+}
+
+static int sec_aead_mac_init(struct sec_aead_req *req)
+{
+	struct aead_request *aead_req = req->aead_req;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
+	size_t authsize = crypto_aead_authsize(tfm);
+	u8 *mac_out = req->out_mac;
+	struct scatterlist *sgl = aead_req->src;
+	size_t copy_size;
+	off_t skip_size;
+
+	/* Copy input mac */
+	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
+	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
+				       authsize, skip_size);
+	if (unlikely(copy_size != authsize))
+		return -EINVAL;
+
+	return 0;
 }
 
 static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
@@ -922,7 +928,6 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
 	int ret;
 
 	if (req->use_pbuf) {
-		ret = sec_cipher_pbuf_map(ctx, req, src);
 		c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
 		c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
 		if (ctx->alg_type == SEC_AEAD) {
@@ -932,6 +937,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
 			a_req->out_mac_dma = res->pbuf_dma +
 					SEC_PBUF_MAC_OFFSET;
 		}
+		ret = sec_cipher_pbuf_map(ctx, req, src);
 
 		return ret;
 	}
@@ -954,6 +960,13 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
 		return PTR_ERR(c_req->c_in);
 	}
 
+	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
+		ret = sec_aead_mac_init(a_req);
+		if (unlikely(ret)) {
+			dev_err(dev, "fail to init mac data for ICV!\n");
+			return ret;
+		}
+	}
 	if (dst == src) {
 		c_req->c_out = c_req->c_in;
 		c_req->c_out_dma = c_req->c_in_dma;
@@ -1542,13 +1555,13 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
 	sec_sqe->type2.mac_key_alg |=
 			cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
 
-	sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
-
-	if (dir)
+	if (dir) {
+		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
 		sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
-	else
+	} else {
+		sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
 		sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
-
+	}
 	sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
 
 	sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index a7bcd3e..9f71c35 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -80,6 +80,7 @@ struct bd_status {
 	u8 done;
 	u8 err_type;
 	u16 flag;
+	u16 icv;
 };
 
 enum {
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH v2 4/4] crypto: hisilicon/sec - modify the SEC request structure
  2021-06-04  1:31 [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware Kai Ye
                   ` (2 preceding siblings ...)
  2021-06-04  1:31 ` [PATCH 3/4] crypto: hisilicon/sec - add hardware integrity check value process Kai Ye
@ 2021-06-04  1:31 ` Kai Ye
  2021-06-07  2:49   ` yekai(A)
  2021-06-11  7:23 ` [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware Herbert Xu
  4 siblings, 1 reply; 7+ messages in thread
From: Kai Ye @ 2021-06-04  1:31 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, linux-kernel, wangzhou1, yekai13

Modify the SEC request structure, combines two common parameters of the
SEC request into one parameter.

Signed-off-by: Kai Ye <yekai13@huawei.com>
---
 drivers/crypto/hisilicon/sec2/sec.h        |  7 ++++--
 drivers/crypto/hisilicon/sec2/sec_crypto.c | 34 +++++++++++++++---------------
 2 files changed, 22 insertions(+), 19 deletions(-)

diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 3fe7875..018415b 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -21,8 +21,6 @@ struct sec_alg_res {
 
 /* Cipher request of SEC private */
 struct sec_cipher_req {
-	struct hisi_acc_hw_sgl *c_in;
-	dma_addr_t c_in_dma;
 	struct hisi_acc_hw_sgl *c_out;
 	dma_addr_t c_out_dma;
 	u8 *c_ivin;
@@ -49,6 +47,11 @@ struct sec_req {
 	struct sec_ctx *ctx;
 	struct sec_qp_ctx *qp_ctx;
 
+	/**
+	 * Common parameter of the SEC request.
+	 */
+	struct hisi_acc_hw_sgl *in;
+	dma_addr_t in_dma;
 	struct sec_cipher_req c_req;
 	struct sec_aead_req aead_req;
 	struct list_head backlog_head;
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 75122f0..f23af61 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -871,8 +871,8 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
 		memcpy(a_req->out_mac, mac_offset, authsize);
 	}
 
-	c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
-	c_req->c_out_dma = c_req->c_in_dma;
+	req->in_dma = qp_ctx->res[req_id].pbuf_dma;
+	c_req->c_out_dma = req->in_dma;
 
 	return 0;
 }
@@ -950,14 +950,13 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
 		a_req->out_mac_dma = res->out_mac_dma;
 	}
 
-	c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
-						    qp_ctx->c_in_pool,
-						    req->req_id,
-						    &c_req->c_in_dma);
-
-	if (IS_ERR(c_req->c_in)) {
+	req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
+						qp_ctx->c_in_pool,
+						req->req_id,
+						&req->in_dma);
+	if (IS_ERR(req->in)) {
 		dev_err(dev, "fail to dma map input sgl buffers!\n");
-		return PTR_ERR(c_req->c_in);
+		return PTR_ERR(req->in);
 	}
 
 	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
@@ -967,9 +966,10 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
 			return ret;
 		}
 	}
+
 	if (dst == src) {
-		c_req->c_out = c_req->c_in;
-		c_req->c_out_dma = c_req->c_in_dma;
+		c_req->c_out = req->in;
+		c_req->c_out_dma = req->in_dma;
 	} else {
 		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
 							     qp_ctx->c_out_pool,
@@ -978,7 +978,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
 
 		if (IS_ERR(c_req->c_out)) {
 			dev_err(dev, "fail to dma map output sgl buffers!\n");
-			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+			hisi_acc_sg_buf_unmap(dev, src, req->in);
 			return PTR_ERR(c_req->c_out);
 		}
 	}
@@ -996,7 +996,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
 		sec_cipher_pbuf_unmap(ctx, req, dst);
 	} else {
 		if (dst != src)
-			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+			hisi_acc_sg_buf_unmap(dev, src, req->in);
 
 		hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
 	}
@@ -1236,7 +1236,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
 
 	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
 	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
-	sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
+	sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
 	sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
 
 	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
@@ -1263,7 +1263,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
 
 	sec_sqe->sdm_addr_type |= da_type;
 	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
-	if (c_req->c_in_dma != c_req->c_out_dma)
+	if (req->in_dma != c_req->c_out_dma)
 		de = 0x1 << SEC_DE_OFFSET;
 
 	sec_sqe->sds_sa_type = (de | scene | sa_type);
@@ -1286,7 +1286,7 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
 
 	sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
 	sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
-	sec_sqe3->data_src_addr = cpu_to_le64(c_req->c_in_dma);
+	sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
 	sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
 
 	sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
@@ -1309,7 +1309,7 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
 	}
 
 	bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
-	if (c_req->c_in_dma != c_req->c_out_dma)
+	if (req->in_dma != c_req->c_out_dma)
 		bd_param |= 0x1 << SEC_DE_OFFSET_V3;
 
 	bd_param |= SEC_BD_TYPE3;
-- 
2.8.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2 4/4] crypto: hisilicon/sec - modify the SEC request structure
  2021-06-04  1:31 ` [PATCH v2 4/4] crypto: hisilicon/sec - modify the SEC request structure Kai Ye
@ 2021-06-07  2:49   ` yekai(A)
  0 siblings, 0 replies; 7+ messages in thread
From: yekai(A) @ 2021-06-07  2:49 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, linux-kernel, wangzhou1



On 2021/6/4 9:31, Kai Ye wrote:
> Modify the SEC request structure, combines two common parameters of the
> SEC request into one parameter.
>
> Signed-off-by: Kai Ye <yekai13@huawei.com>
> ---
>  drivers/crypto/hisilicon/sec2/sec.h        |  7 ++++--
>  drivers/crypto/hisilicon/sec2/sec_crypto.c | 34 +++++++++++++++---------------
>  2 files changed, 22 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
> index 3fe7875..018415b 100644
> --- a/drivers/crypto/hisilicon/sec2/sec.h
> +++ b/drivers/crypto/hisilicon/sec2/sec.h
> @@ -21,8 +21,6 @@ struct sec_alg_res {
>
>  /* Cipher request of SEC private */
>  struct sec_cipher_req {
> -	struct hisi_acc_hw_sgl *c_in;
> -	dma_addr_t c_in_dma;
>  	struct hisi_acc_hw_sgl *c_out;
>  	dma_addr_t c_out_dma;
>  	u8 *c_ivin;
> @@ -49,6 +47,11 @@ struct sec_req {
>  	struct sec_ctx *ctx;
>  	struct sec_qp_ctx *qp_ctx;
>
> +	/**
> +	 * Common parameter of the SEC request.
> +	 */
> +	struct hisi_acc_hw_sgl *in;
> +	dma_addr_t in_dma;
>  	struct sec_cipher_req c_req;
>  	struct sec_aead_req aead_req;
>  	struct list_head backlog_head;
> diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
> index 75122f0..f23af61 100644
> --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
> +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
> @@ -871,8 +871,8 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
>  		memcpy(a_req->out_mac, mac_offset, authsize);
>  	}
>
> -	c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
> -	c_req->c_out_dma = c_req->c_in_dma;
> +	req->in_dma = qp_ctx->res[req_id].pbuf_dma;
> +	c_req->c_out_dma = req->in_dma;
>
>  	return 0;
>  }
> @@ -950,14 +950,13 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
>  		a_req->out_mac_dma = res->out_mac_dma;
>  	}
>
> -	c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
> -						    qp_ctx->c_in_pool,
> -						    req->req_id,
> -						    &c_req->c_in_dma);
> -
> -	if (IS_ERR(c_req->c_in)) {
> +	req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
> +						qp_ctx->c_in_pool,
> +						req->req_id,
> +						&req->in_dma);
> +	if (IS_ERR(req->in)) {
>  		dev_err(dev, "fail to dma map input sgl buffers!\n");
> -		return PTR_ERR(c_req->c_in);
> +		return PTR_ERR(req->in);
>  	}
>
>  	if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
> @@ -967,9 +966,10 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
>  			return ret;
>  		}
>  	}
> +
>  	if (dst == src) {
> -		c_req->c_out = c_req->c_in;
> -		c_req->c_out_dma = c_req->c_in_dma;
> +		c_req->c_out = req->in;
> +		c_req->c_out_dma = req->in_dma;
>  	} else {
>  		c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
>  							     qp_ctx->c_out_pool,
> @@ -978,7 +978,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
>
>  		if (IS_ERR(c_req->c_out)) {
>  			dev_err(dev, "fail to dma map output sgl buffers!\n");
> -			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
> +			hisi_acc_sg_buf_unmap(dev, src, req->in);
>  			return PTR_ERR(c_req->c_out);
>  		}
>  	}
> @@ -996,7 +996,7 @@ static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
>  		sec_cipher_pbuf_unmap(ctx, req, dst);
>  	} else {
>  		if (dst != src)
> -			hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
> +			hisi_acc_sg_buf_unmap(dev, src, req->in);
>
>  		hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
>  	}
> @@ -1236,7 +1236,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
>
>  	sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
>  	sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
> -	sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
> +	sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
>  	sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
>
>  	sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
> @@ -1263,7 +1263,7 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
>
>  	sec_sqe->sdm_addr_type |= da_type;
>  	scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
> -	if (c_req->c_in_dma != c_req->c_out_dma)
> +	if (req->in_dma != c_req->c_out_dma)
>  		de = 0x1 << SEC_DE_OFFSET;
>
>  	sec_sqe->sds_sa_type = (de | scene | sa_type);
> @@ -1286,7 +1286,7 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
>
>  	sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
>  	sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
> -	sec_sqe3->data_src_addr = cpu_to_le64(c_req->c_in_dma);
> +	sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
>  	sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
>
>  	sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
> @@ -1309,7 +1309,7 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
>  	}
>
>  	bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
> -	if (c_req->c_in_dma != c_req->c_out_dma)
> +	if (req->in_dma != c_req->c_out_dma)
>  		bd_param |= 0x1 << SEC_DE_OFFSET_V3;
>
>  	bd_param |= SEC_BD_TYPE3;
>

Hi Herbert
please ignore the v2 in 4/4 title.

sincere thanks
Kai

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware
  2021-06-04  1:31 [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware Kai Ye
                   ` (3 preceding siblings ...)
  2021-06-04  1:31 ` [PATCH v2 4/4] crypto: hisilicon/sec - modify the SEC request structure Kai Ye
@ 2021-06-11  7:23 ` Herbert Xu
  4 siblings, 0 replies; 7+ messages in thread
From: Herbert Xu @ 2021-06-11  7:23 UTC (permalink / raw)
  To: Kai Ye; +Cc: linux-crypto, linux-kernel, wangzhou1

On Fri, Jun 04, 2021 at 09:31:25AM +0800, Kai Ye wrote:
> The driver adds new aeads, add fallback tfm supporting.
> Modify the driver as needed. The crypto fuzzing test has been passed.
> 
> Kai Ye (4):
>   crypto: hisilicon/sec - add new algorithm mode for AEAD
>   crypto: hisilicon/sec - add fallback tfm supporting for aeads
>   crypto: hisilicon/sec - add hardware integrity check value process
>   crypto: hisilicon/sec - modify the SEC request structure
> 
>  drivers/crypto/hisilicon/sec2/sec.h        |  13 +-
>  drivers/crypto/hisilicon/sec2/sec_crypto.c | 575 ++++++++++++++++++++++++-----
>  drivers/crypto/hisilicon/sec2/sec_crypto.h |   9 +
>  3 files changed, 507 insertions(+), 90 deletions(-)

All applied.  Thanks.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-06-11  7:23 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-04  1:31 [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware Kai Ye
2021-06-04  1:31 ` [PATCH 1/4] crypto: hisilicon/sec - add new algorithm mode for AEAD Kai Ye
2021-06-04  1:31 ` [PATCH 2/4] crypto: hisilicon/sec - add fallback tfm supporting for aeads Kai Ye
2021-06-04  1:31 ` [PATCH 3/4] crypto: hisilicon/sec - add hardware integrity check value process Kai Ye
2021-06-04  1:31 ` [PATCH v2 4/4] crypto: hisilicon/sec - modify the SEC request structure Kai Ye
2021-06-07  2:49   ` yekai(A)
2021-06-11  7:23 ` [PATCH 0/4] crypto: hisilicon - supports new aeads for new hardware Herbert Xu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).