netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/9] Bug fixes and ctr mode of operation
@ 2017-06-15  7:13 Harsh Jain
  2017-06-15  7:13 ` [PATCH 1/9] crypto: chcr - Pass lcb bit setting to firmware Harsh Jain
                   ` (9 more replies)
  0 siblings, 10 replies; 11+ messages in thread
From: Harsh Jain @ 2017-06-15  7:13 UTC (permalink / raw)
  To: herbert, linux-crypto, netdev, ganeshgr; +Cc: Harsh Jain

This series is based on cryptodev2.6 tree and includes bug fix ,ctr(aes), rfc3686(ctr(aes)) algo.

Harsh Jain (7):
  crypto: chcr - Pass lcb bit setting to firmware
  crypto: chcr - Set fallback key
  crypto: chcr - Return correct error code
  crypto: chcr - Avoid changing request structure
  crypto:chcr - Add ctr mode and process large sg entries for cipher
  MAINTAINERS:Add maintainer for chelsio crypto driver
  crypto: chcr - Ensure Destination sg entry size less than      2k
Atul Gupta (2):
  chcr - Add debug counters
  crypto: chcr - Select device in Round Robin fashion

 MAINTAINERS                                        |    7 +
 drivers/crypto/chelsio/chcr_algo.c                 | 1096 ++++++++++++++++----
 drivers/crypto/chelsio/chcr_algo.h                 |   30 +-
 drivers/crypto/chelsio/chcr_core.c                 |   56 +-
 drivers/crypto/chelsio/chcr_core.h                 |    5 +-
 drivers/crypto/chelsio/chcr_crypto.h               |   25 +-
 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h         |    1 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c |   35 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c     |    1 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h     |   10 +
 10 files changed, 1020 insertions(+), 246 deletions(-)

-- 
1.8.3.1

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/9] crypto: chcr - Pass lcb bit setting to firmware
  2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
@ 2017-06-15  7:13 ` Harsh Jain
  2017-06-15  7:13 ` [PATCH 2/9] crypto: chcr - Fix fallback key setting Harsh Jain
                   ` (8 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Harsh Jain @ 2017-06-15  7:13 UTC (permalink / raw)
  To: herbert, linux-crypto, netdev, ganeshgr; +Cc: Harsh Jain

GCM and CBC mode of operation requires Last Cipher Block.
This patch set lcb bit in WR header when required.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c | 18 +++++++++++-------
 drivers/crypto/chelsio/chcr_algo.h |  4 ++--
 2 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index f00e0d8..e8ff505 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -518,7 +518,8 @@ static inline void create_wreq(struct chcr_context *ctx,
 			       void *req, struct sk_buff *skb,
 			       int kctx_len, int hash_sz,
 			       int is_iv,
-			       unsigned int sc_len)
+			       unsigned int sc_len,
+			       unsigned int lcb)
 {
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	int iv_loc = IV_DSGL;
@@ -543,7 +544,8 @@ static inline void create_wreq(struct chcr_context *ctx,
 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 	chcr_req->wreq.rx_chid_to_rx_q_id =
 		FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
-				is_iv ? iv_loc : IV_NOP, ctx->tx_qidx);
+				is_iv ? iv_loc : IV_NOP, !!lcb,
+				ctx->tx_qidx);
 
 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
 						       qid);
@@ -652,7 +654,8 @@ static inline void create_wreq(struct chcr_context *ctx,
 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
 	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
-			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
+			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
+			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 	reqctx->skb = skb;
 	skb_get(skb);
 	return skb;
@@ -923,7 +926,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
 		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
 
 	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
-			DUMMY_BYTES);
+			DUMMY_BYTES, 0);
 	req_ctx->skb = skb;
 	skb_get(skb);
 	return skb;
@@ -1508,7 +1511,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
-		   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+		   sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
 	reqctx->skb = skb;
 	skb_get(skb);
 
@@ -1804,7 +1807,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 	skb_set_transport_header(skb, transhdr_len);
 	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
-		    sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+		    sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
 	reqctx->skb = skb;
 	skb_get(skb);
 	return skb;
@@ -1950,7 +1953,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
-			sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+			sizeof(struct cpl_rx_phys_dsgl) + dst_size,
+			reqctx->verify);
 	reqctx->skb = skb;
 	skb_get(skb);
 	return skb;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 751d06a..9894c7b 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -185,11 +185,11 @@
 			FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \
 			FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len)))
 
-#define FILL_WR_RX_Q_ID(cid, qid, wr_iv, fid) \
+#define FILL_WR_RX_Q_ID(cid, qid, wr_iv, lcb, fid) \
 		htonl( \
 			FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \
 			FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \
-			FW_CRYPTO_LOOKASIDE_WR_LCB_V(0) | \
+			FW_CRYPTO_LOOKASIDE_WR_LCB_V((lcb)) | \
 			FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv)) | \
 			FW_CRYPTO_LOOKASIDE_WR_FQIDX_V(fid))
 
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/9] crypto: chcr - Fix fallback key setting
  2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
  2017-06-15  7:13 ` [PATCH 1/9] crypto: chcr - Pass lcb bit setting to firmware Harsh Jain
@ 2017-06-15  7:13 ` Harsh Jain
  2017-06-15  7:13 ` [PATCH 3/9] crypto: chcr - Return correct error code Harsh Jain
                   ` (7 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Harsh Jain @ 2017-06-15  7:13 UTC (permalink / raw)
  To: herbert, linux-crypto, netdev, ganeshgr; +Cc: Harsh Jain

Set key of fallback tfm for rfc4309.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index e8ff505..14641c6 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -2210,7 +2210,8 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
 				    unsigned int keylen)
 {
 	struct chcr_context *ctx = crypto_aead_ctx(aead);
-	 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	int error;
 
 	if (keylen < 3) {
 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
@@ -2218,6 +2219,15 @@ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
 		aeadctx->enckey_len = 0;
 		return	-EINVAL;
 	}
+	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
+			      CRYPTO_TFM_REQ_MASK);
+	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
+	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
+			      CRYPTO_TFM_RES_MASK);
+	if (error)
+		return error;
 	keylen -= 3;
 	memcpy(aeadctx->salt, key + keylen, 3);
 	return chcr_ccm_common_setkey(aead, key, keylen);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/9] crypto: chcr - Return correct error code
  2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
  2017-06-15  7:13 ` [PATCH 1/9] crypto: chcr - Pass lcb bit setting to firmware Harsh Jain
  2017-06-15  7:13 ` [PATCH 2/9] crypto: chcr - Fix fallback key setting Harsh Jain
@ 2017-06-15  7:13 ` Harsh Jain
  2017-06-15  7:13 ` [PATCH 4/9] crypto: chcr - Avoid changing request structure Harsh Jain
                   ` (6 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Harsh Jain @ 2017-06-15  7:13 UTC (permalink / raw)
  To: herbert, linux-crypto, netdev, ganeshgr; +Cc: Harsh Jain

Return correct error instead of EINVAL.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c | 76 +++++++++++++++++++++-----------------
 1 file changed, 42 insertions(+), 34 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 14641c6..156065d 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1399,7 +1399,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	unsigned short stop_offset = 0;
 	unsigned int  assoclen = req->assoclen;
 	unsigned int  authsize = crypto_aead_authsize(tfm);
-	int err = -EINVAL, src_nent;
+	int error = -EINVAL, src_nent;
 	int null = 0;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
@@ -1416,9 +1416,9 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	reqctx->dst = src;
 
 	if (req->src != req->dst) {
-		err = chcr_copy_assoc(req, aeadctx);
-		if (err)
-			return ERR_PTR(err);
+		error = chcr_copy_assoc(req, aeadctx);
+		if (error)
+			return ERR_PTR(error);
 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
 					       req->assoclen);
 	}
@@ -1430,6 +1430,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 					     (op_type ? -authsize : authsize));
 	if (reqctx->dst_nents < 0) {
 		pr_err("AUTHENC:Invalid Destination sg entries\n");
+		error = -EINVAL;
 		goto err;
 	}
 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
@@ -1443,8 +1444,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
-	if (!skb)
+	if (!skb) {
+		error = -ENOMEM;
 		goto err;
+	}
 
 	/* LLD is going to write the sge hdr. */
 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
@@ -1496,9 +1499,9 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	sg_param.nents = reqctx->dst_nents;
 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
 	sg_param.qid = qid;
-	sg_param.align = 0;
-	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
-				  &sg_param))
+	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
+					reqctx->dst, &sg_param);
+	if (error)
 		goto dstmap_fail;
 
 	skb_set_transport_header(skb, transhdr_len);
@@ -1520,7 +1523,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	/* ivmap_fail: */
 	kfree_skb(skb);
 err:
-	return ERR_PTR(-EINVAL);
+	return ERR_PTR(error);
 }
 
 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
@@ -1730,7 +1733,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 	unsigned int dst_size = 0, kctx_len;
 	unsigned int sub_type;
 	unsigned int authsize = crypto_aead_authsize(tfm);
-	int err = -EINVAL, src_nent;
+	int error = -EINVAL, src_nent;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
 
@@ -1746,10 +1749,10 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 	reqctx->dst = src;
 
 	if (req->src != req->dst) {
-		err = chcr_copy_assoc(req, aeadctx);
-		if (err) {
+		error = chcr_copy_assoc(req, aeadctx);
+		if (error) {
 			pr_err("AAD copy to destination buffer fails\n");
-			return ERR_PTR(err);
+			return ERR_PTR(error);
 		}
 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
 					       req->assoclen);
@@ -1758,11 +1761,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 					     (op_type ? -authsize : authsize));
 	if (reqctx->dst_nents < 0) {
 		pr_err("CCM:Invalid Destination sg entries\n");
+		error = -EINVAL;
 		goto err;
 	}
-
-
-	if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+	error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
+	if (error)
 		goto err;
 
 	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
@@ -1777,8 +1780,10 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 
 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
 
-	if (!skb)
+	if (!skb) {
+		error = -ENOMEM;
 		goto err;
+	}
 
 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
 
@@ -1793,15 +1798,16 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 					16), aeadctx->key, aeadctx->enckey_len);
 
 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
-	if (ccm_format_packet(req, aeadctx, sub_type, op_type))
+	error = ccm_format_packet(req, aeadctx, sub_type, op_type);
+	if (error)
 		goto dstmap_fail;
 
 	sg_param.nents = reqctx->dst_nents;
 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
 	sg_param.qid = qid;
-	sg_param.align = 0;
-	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
-				  &sg_param))
+	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
+				 reqctx->dst, &sg_param);
+	if (error)
 		goto dstmap_fail;
 
 	skb_set_transport_header(skb, transhdr_len);
@@ -1813,9 +1819,8 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 	return skb;
 dstmap_fail:
 	kfree_skb(skb);
-	skb = NULL;
 err:
-	return ERR_PTR(-EINVAL);
+	return ERR_PTR(error);
 }
 
 static struct sk_buff *create_gcm_wr(struct aead_request *req,
@@ -1839,7 +1844,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	unsigned char tag_offset = 0;
 	unsigned int crypt_len = 0;
 	unsigned int authsize = crypto_aead_authsize(tfm);
-	int err = -EINVAL, src_nent;
+	int error = -EINVAL, src_nent;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
 
@@ -1856,9 +1861,9 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
 	reqctx->dst = src;
 	if (req->src != req->dst) {
-		err = chcr_copy_assoc(req, aeadctx);
-		if (err)
-			return	ERR_PTR(err);
+		error = chcr_copy_assoc(req, aeadctx);
+		if (error)
+			return	ERR_PTR(error);
 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
 					       req->assoclen);
 	}
@@ -1874,6 +1879,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 					     (op_type ? -authsize : authsize));
 	if (reqctx->dst_nents < 0) {
 		pr_err("GCM:Invalid Destination sg entries\n");
+		error = -EINVAL;
 		goto err;
 	}
 
@@ -1889,8 +1895,10 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
-	if (!skb)
+	if (!skb) {
+		error = -ENOMEM;
 		goto err;
+	}
 
 	/* NIC driver is going to write the sge hdr. */
 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
@@ -1941,9 +1949,9 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	sg_param.nents = reqctx->dst_nents;
 	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
 	sg_param.qid = qid;
-	sg_param.align = 0;
-	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
-				  &sg_param))
+	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
+					  reqctx->dst, &sg_param);
+	if (error)
 		goto dstmap_fail;
 
 	skb_set_transport_header(skb, transhdr_len);
@@ -1962,9 +1970,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 dstmap_fail:
 	/* ivmap_fail: */
 	kfree_skb(skb);
-	skb = NULL;
 err:
-	return skb;
+	return ERR_PTR(error);
 }
 
 
@@ -1976,7 +1983,8 @@ static int chcr_aead_cra_init(struct crypto_aead *tfm)
 	struct aead_alg *alg = crypto_aead_alg(tfm);
 
 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
-					       CRYPTO_ALG_NEED_FALLBACK);
+					       CRYPTO_ALG_NEED_FALLBACK |
+					       CRYPTO_ALG_ASYNC);
 	if  (IS_ERR(aeadctx->sw_cipher))
 		return PTR_ERR(aeadctx->sw_cipher);
 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 4/9] crypto: chcr - Avoid changing request structure
  2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
                   ` (2 preceding siblings ...)
  2017-06-15  7:13 ` [PATCH 3/9] crypto: chcr - Return correct error code Harsh Jain
@ 2017-06-15  7:13 ` Harsh Jain
  2017-06-15  7:13 ` [PATCH 5/9] crypto:chcr - Add ctr mode and process large sg entries for cipher Harsh Jain
                   ` (5 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Harsh Jain @ 2017-06-15  7:13 UTC (permalink / raw)
  To: herbert, linux-crypto, netdev, ganeshgr; +Cc: Harsh Jain

Do not update assoclen received in aead_request.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c | 37 ++++++++++++++-----------------------
 1 file changed, 14 insertions(+), 23 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 156065d..9c839c6 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -126,13 +126,13 @@ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
 	fw6_pld = (struct cpl_fw6_pld *)input;
 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
-		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
 	} else {
 
 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
 				authsize, req->assoclen +
 				req->cryptlen - authsize);
-		cmp = memcmp(temp, (fw6_pld + 1), authsize);
+		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
 	}
 	if (cmp)
 		*err = -EBADMSG;
@@ -1840,9 +1840,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	struct scatterlist *src;
 	unsigned int frags = 0, transhdr_len;
 	unsigned int ivsize = AES_BLOCK_SIZE;
-	unsigned int dst_size = 0, kctx_len;
+	unsigned int dst_size = 0, kctx_len, assoclen = req->assoclen;
 	unsigned char tag_offset = 0;
-	unsigned int crypt_len = 0;
 	unsigned int authsize = crypto_aead_authsize(tfm);
 	int error = -EINVAL, src_nent;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
@@ -1854,27 +1853,21 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 
 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
 		goto err;
-	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
+	src_nent = sg_nents_for_len(req->src, assoclen + req->cryptlen);
 	if (src_nent < 0)
 		goto err;
 
-	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
 	reqctx->dst = src;
 	if (req->src != req->dst) {
 		error = chcr_copy_assoc(req, aeadctx);
 		if (error)
 			return	ERR_PTR(error);
 		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
-					       req->assoclen);
+					       assoclen);
 	}
 
-	if (!req->cryptlen)
-		/* null-payload is not supported in the hardware.
-		 * software is sending block size
-		 */
-		crypt_len = AES_BLOCK_SIZE;
-	else
-		crypt_len = req->cryptlen;
+
 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
 					     (op_type ? -authsize : authsize));
 	if (reqctx->dst_nents < 0) {
@@ -1907,19 +1900,19 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	memset(chcr_req, 0, transhdr_len);
 
 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
-		req->assoclen -= 8;
+		assoclen = req->assoclen - 8;
 
 	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
 					ctx->dev->rx_channel_id, 2, (ivsize ?
-					(req->assoclen + 1) : 0));
+					(assoclen + 1) : 0));
 	chcr_req->sec_cpl.pldlen =
-		htonl(req->assoclen + ivsize + req->cryptlen);
+		htonl(assoclen + ivsize + req->cryptlen);
 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
-					req->assoclen ? 1 : 0, req->assoclen,
-					req->assoclen + ivsize + 1, 0);
+					assoclen ? 1 : 0, assoclen,
+					assoclen + ivsize + 1, 0);
 		chcr_req->sec_cpl.cipherstop_lo_authinsert =
-			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+			FILL_SEC_CPL_AUTHINSERT(0, assoclen + ivsize + 1,
 						tag_offset, tag_offset);
 		chcr_req->sec_cpl.seqno_numivs =
 			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
@@ -1955,9 +1948,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 		goto dstmap_fail;
 
 	skb_set_transport_header(skb, transhdr_len);
-
-	write_sg_to_skb(skb, &frags, req->src, req->assoclen);
-
+	write_sg_to_skb(skb, &frags, req->src, assoclen);
 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
 	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 5/9] crypto:chcr - Add ctr mode and process large sg entries  for cipher
  2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
                   ` (3 preceding siblings ...)
  2017-06-15  7:13 ` [PATCH 4/9] crypto: chcr - Avoid changing request structure Harsh Jain
@ 2017-06-15  7:13 ` Harsh Jain
  2017-06-15  7:13 ` [PATCH 6/9] chcr - Add debug counters Harsh Jain
                   ` (4 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Harsh Jain @ 2017-06-15  7:13 UTC (permalink / raw)
  To: herbert, linux-crypto, netdev, ganeshgr; +Cc: Harsh Jain

It send multiple WRs to H/W to handle large sg lists. Adds ctr(aes)
and rfc(ctr(aes)) modes.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c   | 786 ++++++++++++++++++++++++++++-------
 drivers/crypto/chelsio/chcr_algo.h   |  26 +-
 drivers/crypto/chelsio/chcr_core.c   |   1 -
 drivers/crypto/chelsio/chcr_core.h   |   3 +
 drivers/crypto/chelsio/chcr_crypto.h |  19 +-
 5 files changed, 690 insertions(+), 145 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 9c839c6..03b817f 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -55,6 +55,8 @@
 #include <crypto/hash.h>
 #include <crypto/sha.h>
 #include <crypto/authenc.h>
+#include <crypto/ctr.h>
+#include <crypto/gf128mul.h>
 #include <crypto/internal/aead.h>
 #include <crypto/null.h>
 #include <crypto/internal/skcipher.h>
@@ -151,12 +153,11 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	struct chcr_req_ctx ctx_req;
-	struct cpl_fw6_pld *fw6_pld;
 	unsigned int digestsize, updated_digestsize;
 
 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 	case CRYPTO_ALG_TYPE_AEAD:
-		ctx_req.req.aead_req = (struct aead_request *)req;
+		ctx_req.req.aead_req = aead_request_cast(req);
 		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
 			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
@@ -169,27 +170,16 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 					&err);
 			ctx_req.ctx.reqctx->verify = VERIFY_HW;
 		}
+		ctx_req.req.aead_req->base.complete(req, err);
 		break;
 
 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
-		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
-		ctx_req.ctx.ablk_ctx =
-			ablkcipher_request_ctx(ctx_req.req.ablk_req);
-		if (!err) {
-			fw6_pld = (struct cpl_fw6_pld *)input;
-			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
-			       AES_BLOCK_SIZE);
-		}
-		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
-			     ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
-		if (ctx_req.ctx.ablk_ctx->skb) {
-			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
-			ctx_req.ctx.ablk_ctx->skb = NULL;
-		}
+		 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+					       input, err);
 		break;
 
 	case CRYPTO_ALG_TYPE_AHASH:
-		ctx_req.req.ahash_req = (struct ahash_request *)req;
+		ctx_req.req.ahash_req = ahash_request_cast(req);
 		ctx_req.ctx.ahash_ctx =
 			ahash_request_ctx(ctx_req.req.ahash_req);
 		digestsize =
@@ -214,6 +204,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 			       sizeof(struct cpl_fw6_pld),
 			       updated_digestsize);
 		}
+		ctx_req.req.ahash_req->base.complete(req, err);
 		break;
 	}
 	return err;
@@ -392,7 +383,7 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
 			   struct phys_sge_parm *sg_param)
 {
 	struct phys_sge_pairs *to;
-	int out_buf_size = sg_param->obsize;
+	unsigned int len = 0, left_size = sg_param->obsize;
 	unsigned int nents = sg_param->nents, i, j = 0;
 
 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
@@ -409,20 +400,15 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
 	phys_cpl->rss_hdr_int.hash_val = 0;
 	to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
 				       sizeof(struct cpl_rx_phys_dsgl));
-
-	for (i = 0; nents; to++) {
-		for (j = 0; j < 8 && nents; j++, nents--) {
-			out_buf_size -= sg_dma_len(sg);
-			to->len[j] = htons(sg_dma_len(sg));
+	for (i = 0; nents && left_size; to++) {
+		for (j = 0; j < 8 && nents && left_size; j++, nents--) {
+			len = min(left_size, sg_dma_len(sg));
+			to->len[j] = htons(len);
 			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
+			left_size -= len;
 			sg = sg_next(sg);
 		}
 	}
-	if (out_buf_size) {
-		j--;
-		to--;
-		to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
-	}
 }
 
 static inline int map_writesg_phys_cpl(struct device *dev,
@@ -431,7 +417,7 @@ static inline int map_writesg_phys_cpl(struct device *dev,
 					struct phys_sge_parm *sg_param)
 {
 	if (!sg || !sg_param->nents)
-		return 0;
+		return -EINVAL;
 
 	sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE);
 	if (sg_param->nents == 0) {
@@ -498,6 +484,24 @@ static inline void write_buffer_to_skb(struct sk_buff *skb,
 	}
 }
 
+static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
+{
+	struct adapter *adap = netdev2adap(dev);
+	struct sge_uld_txq_info *txq_info =
+		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
+	struct sge_uld_txq *txq;
+	int ret = 0;
+
+	local_bh_disable();
+	txq = &txq_info->uldtxq[idx];
+	spin_lock(&txq->sendq.lock);
+	if (txq->full)
+		ret = -1;
+	spin_unlock(&txq->sendq.lock);
+	local_bh_enable();
+	return ret;
+}
+
 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 			       struct _key_ctx *key_ctx)
 {
@@ -512,7 +516,60 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 	}
 	return 0;
 }
+static int chcr_sg_ent_in_wr(struct scatterlist *src,
+			     struct scatterlist *dst,
+			     unsigned int minsg,
+			     unsigned int space,
+			     short int *sent,
+			     short int *dent)
+{
+	int srclen = 0, dstlen = 0;
+	int srcsg = minsg, dstsg = 0;
+
+	*sent = 0;
+	*dent = 0;
+	while (src && dst && ((srcsg + 1) <= MAX_SKB_FRAGS) &&
+	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
+		srclen += src->length;
+		srcsg++;
+		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
+		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
+			if (srclen <= dstlen)
+				break;
+			dstlen += dst->length;
+			dst = sg_next(dst);
+			dstsg++;
+		}
+		src = sg_next(src);
+	}
+	*sent = srcsg - minsg;
+	*dent = dstsg;
+	return min(srclen, dstlen);
+}
+
+static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
+				u32 flags,
+				struct scatterlist *src,
+				struct scatterlist *dst,
+				unsigned int nbytes,
+				u8 *iv,
+				unsigned short op_type)
+{
+	int err;
+
+	SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
+	skcipher_request_set_tfm(subreq, cipher);
+	skcipher_request_set_callback(subreq, flags, NULL, NULL);
+	skcipher_request_set_crypt(subreq, src, dst,
+				   nbytes, iv);
+
+	err = op_type ? crypto_skcipher_decrypt(subreq) :
+		crypto_skcipher_encrypt(subreq);
+	skcipher_request_zero(subreq);
+
+	return err;
 
+}
 static inline void create_wreq(struct chcr_context *ctx,
 			       struct chcr_wr *chcr_req,
 			       void *req, struct sk_buff *skb,
@@ -565,69 +622,61 @@ static inline void create_wreq(struct chcr_context *ctx,
  *	@qid: ingress qid where response of this WR should be received.
  *	@op_type:	encryption or decryption
  */
-static struct sk_buff
-*create_cipher_wr(struct ablkcipher_request *req,
-		  unsigned short qid,
-		  unsigned short op_type)
+static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 {
-	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 	struct sk_buff *skb = NULL;
 	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
-	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	struct chcr_blkcipher_req_ctx *reqctx =
+		ablkcipher_request_ctx(wrparam->req);
 	struct phys_sge_parm sg_param;
 	unsigned int frags = 0, transhdr_len, phys_dsgl;
-	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
-	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
-			GFP_ATOMIC;
-
-	if (!req->info)
-		return ERR_PTR(-EINVAL);
-	reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
-	if (reqctx->dst_nents <= 0) {
-		pr_err("AES:Invalid Destination sg lists\n");
-		return ERR_PTR(-EINVAL);
-	}
-	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
-	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
-		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
-		       ablkctx->enckey_len, req->nbytes, ivsize);
-		return ERR_PTR(-EINVAL);
-	}
+	int error;
+	unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
+	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+			GFP_KERNEL : GFP_ATOMIC;
 
 	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
 
 	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
-	if (!skb)
-		return ERR_PTR(-ENOMEM);
+	if (!skb) {
+		error = -ENOMEM;
+		goto err;
+	}
 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
 	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
 	memset(chcr_req, 0, transhdr_len);
 	chcr_req->sec_cpl.op_ivinsrtofst =
 		FILL_SEC_CPL_OP_IVINSR(ctx->dev->rx_channel_id, 2, 1);
 
-	chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+	chcr_req->sec_cpl.pldlen = htonl(ivsize + wrparam->bytes);
 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
 
 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
-	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
 							 ablkctx->ciph_mode,
 							 0, 0, ivsize >> 1);
 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 							  0, 1, phys_dsgl);
 
 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
-	if (op_type == CHCR_DECRYPT_OP) {
+	if ((reqctx->op == CHCR_DECRYPT_OP) &&
+	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
+	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 	} else {
-		if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
+		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
+		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
 			       ablkctx->enckey_len);
 		} else {
@@ -642,18 +691,17 @@ static inline void create_wreq(struct chcr_context *ctx,
 	}
 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 	sg_param.nents = reqctx->dst_nents;
-	sg_param.obsize = req->nbytes;
-	sg_param.qid = qid;
-	sg_param.align = 1;
-	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
-				 &sg_param))
+	sg_param.obsize =  wrparam->bytes;
+	sg_param.qid = wrparam->qid;
+	error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
+				       reqctx->dst, &sg_param);
+	if (error)
 		goto map_fail1;
 
 	skb_set_transport_header(skb, transhdr_len);
-	memcpy(reqctx->iv, req->info, ivsize);
 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
-	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
-	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+	write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
+	create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
 			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
 	reqctx->skb = skb;
@@ -661,27 +709,61 @@ static inline void create_wreq(struct chcr_context *ctx,
 	return skb;
 map_fail1:
 	kfree_skb(skb);
-	return ERR_PTR(-ENOMEM);
+err:
+	return ERR_PTR(error);
+}
+
+static inline int chcr_keyctx_ck_size(unsigned int keylen)
+{
+	int ck_size = 0;
+
+	if (keylen == AES_KEYSIZE_128)
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	else if (keylen == AES_KEYSIZE_192)
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	else if (keylen == AES_KEYSIZE_256)
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	else
+		ck_size = 0;
+
+	return ck_size;
+}
+static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
+				       const u8 *key,
+				       unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	int err = 0;
+
+	crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
+				  CRYPTO_TFM_REQ_MASK);
+	err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
+	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+	tfm->crt_flags |=
+		crypto_skcipher_get_flags(ablkctx->sw_cipher) &
+		CRYPTO_TFM_RES_MASK;
+	return err;
 }
 
-static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
+			       const u8 *key,
 			       unsigned int keylen)
 {
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
+	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 	unsigned int ck_size, context_size;
 	u16 alignment = 0;
+	int err;
 
-	if (keylen == AES_KEYSIZE_128) {
-		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
-	} else if (keylen == AES_KEYSIZE_192) {
-		alignment = 8;
-		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
-	} else if (keylen == AES_KEYSIZE_256) {
-		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
-	} else {
+	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
+	if (err)
 		goto badkey_err;
-	}
+
+	ck_size = chcr_keyctx_ck_size(keylen);
+	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
 	memcpy(ablkctx->key, key, keylen);
 	ablkctx->enckey_len = keylen;
 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
@@ -693,35 +775,387 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
 	return 0;
 badkey_err:
-	crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 	ablkctx->enckey_len = 0;
-	return -EINVAL;
+
+	return err;
 }
 
-static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
+static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
+				   const u8 *key,
+				   unsigned int keylen)
 {
-	struct adapter *adap = netdev2adap(dev);
-	struct sge_uld_txq_info *txq_info =
-		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
-	struct sge_uld_txq *txq;
+	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	unsigned int ck_size, context_size;
+	u16 alignment = 0;
+	int err;
+
+	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
+	if (err)
+		goto badkey_err;
+	ck_size = chcr_keyctx_ck_size(keylen);
+	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
+	memcpy(ablkctx->key, key, keylen);
+	ablkctx->enckey_len = keylen;
+	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
+			keylen + alignment) >> 4;
+
+	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
+						0, 0, context_size);
+	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+
+	return 0;
+badkey_err:
+	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	ablkctx->enckey_len = 0;
+
+	return err;
+}
+
+static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
+				   const u8 *key,
+				   unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	unsigned int ck_size, context_size;
+	u16 alignment = 0;
+	int err;
+
+	if (keylen < CTR_RFC3686_NONCE_SIZE)
+		return -EINVAL;
+	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
+	       CTR_RFC3686_NONCE_SIZE);
+
+	keylen -= CTR_RFC3686_NONCE_SIZE;
+	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
+	if (err)
+		goto badkey_err;
+
+	ck_size = chcr_keyctx_ck_size(keylen);
+	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
+	memcpy(ablkctx->key, key, keylen);
+	ablkctx->enckey_len = keylen;
+	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
+			keylen + alignment) >> 4;
+
+	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
+						0, 0, context_size);
+	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
+
+	return 0;
+badkey_err:
+	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	ablkctx->enckey_len = 0;
+
+	return err;
+}
+static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
+{
+	unsigned int size = AES_BLOCK_SIZE;
+	__be32 *b = (__be32 *)(dstiv + size);
+	u32 c, prev;
+
+	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
+	for (; size >= 4; size -= 4) {
+		prev = be32_to_cpu(*--b);
+		c = prev + add;
+		*b = cpu_to_be32(c);
+		if (prev < c)
+			break;
+		add = 1;
+	}
+
+}
+
+static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
+{
+	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
+	u64 c;
+	u32 temp = be32_to_cpu(*--b);
+
+	temp = ~temp;
+	c = (u64)temp +  1; // No of block can processed withou overflow
+	if ((bytes / AES_BLOCK_SIZE) > c)
+		bytes = c * AES_BLOCK_SIZE;
+	return bytes;
+}
+
+static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	struct crypto_cipher *cipher;
+	int ret, i;
+	u8 *key;
+	unsigned int keylen;
+
+	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
+	memcpy(iv, req->info, AES_BLOCK_SIZE);
+
+	if (IS_ERR(cipher)) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	keylen = ablkctx->enckey_len / 2;
+	key = ablkctx->key + keylen;
+	ret = crypto_cipher_setkey(cipher, key, keylen);
+	if (ret)
+		goto out1;
+
+	crypto_cipher_encrypt_one(cipher, iv, iv);
+	for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++)
+		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
+
+	crypto_cipher_decrypt_one(cipher, iv, iv);
+out1:
+	crypto_free_cipher(cipher);
+out:
+	return ret;
+}
+
+static int chcr_update_cipher_iv(struct ablkcipher_request *req,
+				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
 	int ret = 0;
 
-	local_bh_disable();
-	txq = &txq_info->uldtxq[idx];
-	spin_lock(&txq->sendq.lock);
-	if (txq->full)
-		ret = -1;
-	spin_unlock(&txq->sendq.lock);
-	local_bh_enable();
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
+		ctr_add_iv(iv, req->info, (reqctx->processed /
+			   AES_BLOCK_SIZE));
+	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
+		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
+						AES_BLOCK_SIZE) + 1);
+	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
+		ret = chcr_update_tweak(req, iv);
+	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
+		if (reqctx->op)
+			sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
+					   16,
+					   reqctx->processed - AES_BLOCK_SIZE);
+		else
+			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
+	}
+
 	return ret;
+
 }
 
-static int chcr_aes_encrypt(struct ablkcipher_request *req)
+/* We need separate function for final iv because in rfc3686  Initial counter
+ * starts from 1 and buffer size of iv is 8 byte only which remains constant
+ * for subsequent update requests
+ */
+
+static int chcr_final_cipher_iv(struct ablkcipher_request *req,
+				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
+	int ret = 0;
+
+	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
+		ctr_add_iv(iv, req->info, (reqctx->processed /
+			   AES_BLOCK_SIZE));
+	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
+		ret = chcr_update_tweak(req, iv);
+	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
+		if (reqctx->op)
+			sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
+					   16,
+					   reqctx->processed - AES_BLOCK_SIZE);
+		else
+			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
+
+	}
+	return ret;
+
+}
+
+
+static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+				   unsigned char *input, int err)
 {
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 	struct sk_buff *skb;
+	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	struct  cipher_wr_param wrparam;
+	int bytes;
+
+	dma_unmap_sg(&u_ctx->lldi.pdev->dev, reqctx->dst, reqctx->dst_nents,
+		     DMA_FROM_DEVICE);
+
+	if (reqctx->skb) {
+		kfree_skb(reqctx->skb);
+		reqctx->skb = NULL;
+	}
+	if (err)
+		goto complete;
+
+	if (req->nbytes == reqctx->processed) {
+		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
+		goto complete;
+	}
+
+	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+					    ctx->tx_qidx))) {
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+			err = -EBUSY;
+			goto complete;
+		}
+
+	}
+	wrparam.srcsg = scatterwalk_ffwd(reqctx->srcffwd, req->src,
+				       reqctx->processed);
+	reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, reqctx->dstsg,
+					 reqctx->processed);
+	if (!wrparam.srcsg || !reqctx->dst) {
+		pr_err("Input sg list length less that nbytes\n");
+		err = -EINVAL;
+		goto complete;
+	}
+	bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dst, 1,
+				 SPACE_LEFT(ablkctx->enckey_len),
+				 &wrparam.snent, &reqctx->dst_nents);
+	if ((bytes + reqctx->processed) >= req->nbytes)
+		bytes  = req->nbytes - reqctx->processed;
+	else
+		bytes = ROUND_16(bytes);
+	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
+	if (err)
+		goto complete;
+
+	if (unlikely(bytes == 0)) {
+		err = chcr_cipher_fallback(ablkctx->sw_cipher,
+				     req->base.flags,
+				     wrparam.srcsg,
+				     reqctx->dst,
+				     req->nbytes - reqctx->processed,
+				     reqctx->iv,
+				     reqctx->op);
+		goto complete;
+	}
+
+	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+	    CRYPTO_ALG_SUB_TYPE_CTR)
+		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
+	reqctx->processed += bytes;
+	wrparam.qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
+	wrparam.req = req;
+	wrparam.bytes = bytes;
+	skb = create_cipher_wr(&wrparam);
+	if (IS_ERR(skb)) {
+		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
+		err = PTR_ERR(skb);
+		goto complete;
+	}
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
+	chcr_send_wr(skb);
+	return 0;
+complete:
+	req->base.complete(&req->base, err);
+	return err;
+}
+
+static int process_cipher(struct ablkcipher_request *req,
+				  unsigned short qid,
+				  struct sk_buff **skb,
+				  unsigned short op_type)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+	struct	cipher_wr_param wrparam;
+	int bytes, err = -EINVAL;
+
+	reqctx->newdstsg = NULL;
+	reqctx->processed = 0;
+	if (!req->info)
+		goto error;
+	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
+	    (req->nbytes == 0) ||
+	    (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
+		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+		       ablkctx->enckey_len, req->nbytes, ivsize);
+		goto error;
+	}
+	wrparam.srcsg = req->src;
+	reqctx->dstsg = req->dst;
+	bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
+				 SPACE_LEFT(ablkctx->enckey_len),
+				 &wrparam.snent,
+				 &reqctx->dst_nents);
+	if ((bytes + reqctx->processed) >= req->nbytes)
+		bytes  = req->nbytes - reqctx->processed;
+	else
+		bytes = ROUND_16(bytes);
+	if (unlikely(bytes > req->nbytes))
+		bytes = req->nbytes;
+	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+				  CRYPTO_ALG_SUB_TYPE_CTR) {
+		bytes = adjust_ctr_overflow(req->info, bytes);
+	}
+	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
+	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
+		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
+		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
+				CTR_RFC3686_IV_SIZE);
+
+		/* initialize counter portion of counter block */
+		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+
+	} else {
+
+		memcpy(reqctx->iv, req->info, ivsize);
+	}
+	if (unlikely(bytes == 0)) {
+		err = chcr_cipher_fallback(ablkctx->sw_cipher,
+					   req->base.flags,
+					   req->src,
+					   req->dst,
+					   req->nbytes,
+					   req->info,
+					   op_type);
+		goto error;
+	}
+	reqctx->processed = bytes;
+	reqctx->dst = reqctx->dstsg;
+	reqctx->op = op_type;
+	wrparam.qid = qid;
+	wrparam.req = req;
+	wrparam.bytes = bytes;
+	*skb = create_cipher_wr(&wrparam);
+	if (IS_ERR(*skb)) {
+		err = PTR_ERR(*skb);
+		goto error;
+	}
+
+	return 0;
+error:
+	return err;
+}
+
+static int chcr_aes_encrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
+	struct sk_buff *skb = NULL;
+	int err;
+	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 
 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
 					    ctx->tx_qidx))) {
@@ -729,12 +1163,10 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
 			return -EBUSY;
 	}
 
-	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
+	err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
 			       CHCR_ENCRYPT_OP);
-	if (IS_ERR(skb)) {
-		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
-		return  PTR_ERR(skb);
-	}
+	if (err || !skb)
+		return  err;
 	skb->dev = u_ctx->lldi.ports[0];
 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
 	chcr_send_wr(skb);
@@ -746,7 +1178,8 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
-	struct sk_buff *skb;
+	struct sk_buff *skb = NULL;
+	int err;
 
 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
 					    ctx->tx_qidx))) {
@@ -754,12 +1187,10 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
 			return -EBUSY;
 	}
 
-	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx],
+	 err = process_cipher(req, u_ctx->lldi.rxq_ids[ctx->rx_qidx], &skb,
 			       CHCR_DECRYPT_OP);
-	if (IS_ERR(skb)) {
-		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
-		return PTR_ERR(skb);
-	}
+	if (err || !skb)
+		return err;
 	skb->dev = u_ctx->lldi.ports[0];
 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_qidx);
 	chcr_send_wr(skb);
@@ -804,10 +1235,48 @@ static int chcr_device_init(struct chcr_context *ctx)
 
 static int chcr_cra_init(struct crypto_tfm *tfm)
 {
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+
+	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
+				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ablkctx->sw_cipher)) {
+		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
+		return PTR_ERR(ablkctx->sw_cipher);
+	}
 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
 	return chcr_device_init(crypto_tfm_ctx(tfm));
 }
 
+static int chcr_rfc3686_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+
+	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
+	 * cannot be used as fallback in chcr_handle_cipher_response
+	 */
+	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
+				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ablkctx->sw_cipher)) {
+		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
+		return PTR_ERR(ablkctx->sw_cipher);
+	}
+	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
+	return chcr_device_init(crypto_tfm_ctx(tfm));
+}
+
+
+static void chcr_cra_exit(struct crypto_tfm *tfm)
+{
+	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
+	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
+
+	crypto_free_skcipher(ablkctx->sw_cipher);
+}
+
 static int get_alg_config(struct algo_param *params,
 			  unsigned int auth_size)
 {
@@ -925,8 +1394,8 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
 	if (param->sg_len != 0)
 		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
 
-	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
-			DUMMY_BYTES, 0);
+	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
+		    hash_size_in_response, 0, DUMMY_BYTES, 0);
 	req_ctx->skb = skb;
 	skb_get(skb);
 	return skb;
@@ -1229,21 +1698,17 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 	return err;
 }
 
-static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
 			       unsigned int key_len)
 {
-	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
+	struct chcr_context *ctx = crypto_ablkcipher_ctx(cipher);
 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 	unsigned short context_size = 0;
+	int err;
 
-	if ((key_len != (AES_KEYSIZE_128 << 1)) &&
-	    (key_len != (AES_KEYSIZE_256 << 1))) {
-		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
-				     CRYPTO_TFM_RES_BAD_KEY_LEN);
-		ablkctx->enckey_len = 0;
-		return -EINVAL;
-
-	}
+	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
+	if (err)
+		goto badkey_err;
 
 	memcpy(ablkctx->key, key, key_len);
 	ablkctx->enckey_len = key_len;
@@ -1257,6 +1722,11 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 				 0, context_size);
 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
 	return 0;
+badkey_err:
+	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	ablkctx->enckey_len = 0;
+
+	return err;
 }
 
 static int chcr_sha_init(struct ahash_request *areq)
@@ -1513,7 +1983,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	}
 	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
-	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
 		   sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
 	reqctx->skb = skb;
 	skb_get(skb);
@@ -1812,7 +2282,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 
 	skb_set_transport_header(skb, transhdr_len);
 	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
-	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
 		    sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
 	reqctx->skb = skb;
 	skb_get(skb);
@@ -1951,7 +2421,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	write_sg_to_skb(skb, &frags, req->src, assoclen);
 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
-	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
 			sizeof(struct cpl_rx_phys_dsgl) + dst_size,
 			reqctx->verify);
 	reqctx->skb = skb;
@@ -2565,22 +3035,14 @@ static int chcr_aead_op(struct aead_request *req,
 static struct chcr_alg_template driver_algs[] = {
 	/* AES-CBC */
 	{
-		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
 		.is_registered = 0,
 		.alg.crypto = {
 			.cra_name		= "cbc(aes)",
 			.cra_driver_name	= "cbc-aes-chcr",
-			.cra_priority		= CHCR_CRA_PRIORITY,
-			.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
-				CRYPTO_ALG_ASYNC,
 			.cra_blocksize		= AES_BLOCK_SIZE,
-			.cra_ctxsize		= sizeof(struct chcr_context)
-				+ sizeof(struct ablk_ctx),
-			.cra_alignmask		= 0,
-			.cra_type		= &crypto_ablkcipher_type,
-			.cra_module		= THIS_MODULE,
 			.cra_init		= chcr_cra_init,
-			.cra_exit		= NULL,
+			.cra_exit		= chcr_cra_exit,
 			.cra_u.ablkcipher	= {
 				.min_keysize	= AES_MIN_KEY_SIZE,
 				.max_keysize	= AES_MAX_KEY_SIZE,
@@ -2592,24 +3054,15 @@ static int chcr_aead_op(struct aead_request *req,
 		}
 	},
 	{
-		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
 		.is_registered = 0,
 		.alg.crypto =   {
 			.cra_name		= "xts(aes)",
 			.cra_driver_name	= "xts-aes-chcr",
-			.cra_priority		= CHCR_CRA_PRIORITY,
-			.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
-				CRYPTO_ALG_ASYNC,
 			.cra_blocksize		= AES_BLOCK_SIZE,
-			.cra_ctxsize		= sizeof(struct chcr_context) +
-				sizeof(struct ablk_ctx),
-			.cra_alignmask		= 0,
-			.cra_type		= &crypto_ablkcipher_type,
-			.cra_module		= THIS_MODULE,
 			.cra_init		= chcr_cra_init,
 			.cra_exit		= NULL,
-			.cra_u = {
-				.ablkcipher = {
+			.cra_u .ablkcipher = {
 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
 					.ivsize		= AES_BLOCK_SIZE,
@@ -2618,6 +3071,47 @@ static int chcr_aead_op(struct aead_request *req,
 					.decrypt	= chcr_aes_decrypt,
 				}
 			}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
+		.is_registered = 0,
+		.alg.crypto = {
+			.cra_name		= "ctr(aes)",
+			.cra_driver_name	= "ctr-aes-chcr",
+			.cra_blocksize		= 1,
+			.cra_init		= chcr_cra_init,
+			.cra_exit		= chcr_cra_exit,
+			.cra_u.ablkcipher	= {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.ivsize		= AES_BLOCK_SIZE,
+				.setkey		= chcr_aes_ctr_setkey,
+				.encrypt	= chcr_aes_encrypt,
+				.decrypt	= chcr_aes_decrypt,
+			}
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER |
+			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
+		.is_registered = 0,
+		.alg.crypto = {
+			.cra_name		= "rfc3686(ctr(aes))",
+			.cra_driver_name	= "rfc3686-ctr-aes-chcr",
+			.cra_blocksize		= 1,
+			.cra_init		= chcr_rfc3686_init,
+			.cra_exit		= chcr_cra_exit,
+			.cra_u.ablkcipher	= {
+				.min_keysize	= AES_MIN_KEY_SIZE +
+					CTR_RFC3686_NONCE_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE +
+					CTR_RFC3686_NONCE_SIZE,
+				.ivsize		= CTR_RFC3686_IV_SIZE,
+				.setkey		= chcr_aes_rfc3686_setkey,
+				.encrypt	= chcr_aes_encrypt,
+				.decrypt	= chcr_aes_decrypt,
+				.geniv          = "seqiv",
+			}
 		}
 	},
 	/* SHA */
@@ -2999,6 +3493,18 @@ static int chcr_register_alg(void)
 			continue;
 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			driver_algs[i].alg.crypto.cra_priority =
+				CHCR_CRA_PRIORITY;
+			driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
+			driver_algs[i].alg.crypto.cra_flags =
+				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+				CRYPTO_ALG_NEED_FALLBACK;
+			driver_algs[i].alg.crypto.cra_ctxsize =
+				sizeof(struct chcr_context) +
+				sizeof(struct ablk_ctx);
+			driver_algs[i].alg.crypto.cra_alignmask = 0;
+			driver_algs[i].alg.crypto.cra_type =
+				&crypto_ablkcipher_type;
 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
 			name = driver_algs[i].alg.crypto.cra_driver_name;
 			break;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 9894c7b..583008d 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -219,9 +219,26 @@
 #define MAX_NK 8
 #define CRYPTO_MAX_IMM_TX_PKT_LEN 256
 #define MAX_WR_SIZE			512
+#define ROUND_16(bytes)		((bytes) & 0xFFFFFFF0)
+#define MAX_DSGL_ENT			32
+#define MAX_DIGEST_SKB_SGE	(MAX_SKB_FRAGS - 2)
+#define MIN_CIPHER_SG			1 /* IV */
 #define MIN_AUTH_SG			2 /*IV + AAD*/
 #define MIN_GCM_SG			2 /* IV + AAD*/
+#define MIN_DIGEST_SG			1 /*Partial Buffer*/
 #define MIN_CCM_SG			3 /*IV+AAD+B0*/
+#define SPACE_LEFT(len) \
+	((MAX_WR_SIZE - WR_MIN_LEN - (len)))
+
+unsigned int sgl_ent_len[] = {0, 0, 16, 24, 40,
+				48, 64, 72, 88,
+				96, 112, 120, 136,
+				144, 160, 168, 184,
+				192};
+unsigned int dsgl_ent_len[] = {0, 32, 32, 48, 48, 64, 64, 80, 80,
+				112, 112, 128, 128, 144, 144, 160, 160,
+				192, 192, 208, 208, 224, 224, 240, 240,
+				272, 272, 288, 288, 304, 304, 320, 320};
 
 struct algo_param {
 	unsigned int auth_mode;
@@ -239,6 +256,14 @@ struct hash_wr_param {
 	u64 scmd1;
 };
 
+struct cipher_wr_param {
+	struct ablkcipher_request *req;
+	struct scatterlist *srcsg;
+	char *iv;
+	int bytes;
+	short int snent;
+	unsigned short qid;
+};
 enum {
 	AES_KEYLENGTH_128BIT = 128,
 	AES_KEYLENGTH_192BIT = 192,
@@ -293,7 +318,6 @@ struct phys_sge_parm {
 	unsigned int nents;
 	unsigned int obsize;
 	unsigned short qid;
-	unsigned char align;
 };
 
 struct crypto_result {
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index c28e018..a6c938a 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -115,7 +115,6 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
 	/* call completion callback with failure status */
 	if (req) {
 		error_status = chcr_handle_resp(req, input, error_status);
-		req->complete(req, error_status);
 	} else {
 		pr_err("Incorrect request address from the firmware\n");
 		return -EFAULT;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index cd0c35a1..ddfb2c9 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -53,6 +53,9 @@
 #define MAC_ERROR_BIT		0
 #define CHK_MAC_ERR_BIT(x)	(((x) >> MAC_ERROR_BIT) & 1)
 #define MAX_SALT                4
+#define WR_MIN_LEN (sizeof(struct chcr_wr) + \
+		    sizeof(struct cpl_rx_phys_dsgl) + \
+		    sizeof(struct ulptx_sgl))
 
 #define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
 
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 5b2fabb..b1b7944 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -139,6 +139,9 @@
 #define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309    0x06000000
 #define CRYPTO_ALG_SUB_TYPE_AEAD_NULL       0x07000000
 #define CRYPTO_ALG_SUB_TYPE_CTR             0x08000000
+#define CRYPTO_ALG_SUB_TYPE_CTR_RFC3686     0x09000000
+#define CRYPTO_ALG_SUB_TYPE_XTS		    0x0a000000
+#define CRYPTO_ALG_SUB_TYPE_CBC		    0x0b000000
 #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
 			      CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
 
@@ -150,10 +153,12 @@
 /* Aligned to 128 bit boundary */
 
 struct ablk_ctx {
+	struct crypto_skcipher *sw_cipher;
 	__be32 key_ctx_hdr;
 	unsigned int enckey_len;
-	u8 key[CHCR_AES_MAX_KEY_LEN];
 	unsigned char ciph_mode;
+	u8 key[CHCR_AES_MAX_KEY_LEN];
+	u8 nonce[4];
 	u8 rrkey[AES_MAX_KEY_SIZE];
 };
 struct chcr_aead_reqctx {
@@ -233,7 +238,14 @@ struct chcr_ahash_req_ctx {
 
 struct chcr_blkcipher_req_ctx {
 	struct sk_buff *skb;
-	unsigned int dst_nents;
+	struct scatterlist srcffwd[2];
+	struct scatterlist dstffwd[2];
+	struct scatterlist *dstsg;
+	struct scatterlist *dst;
+	struct scatterlist *newdstsg;
+	unsigned int processed;
+	unsigned int op;
+	short int dst_nents;
 	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
 };
 
@@ -275,5 +287,6 @@ static int chcr_aead_op(struct aead_request *req_base,
 			  int size,
 			  create_wr_t create_wr_fn);
 static inline int get_aead_subtype(struct crypto_aead *aead);
-
+static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
+				   unsigned char *input, int err);
 #endif /* __CHCR_CRYPTO_H__ */
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 6/9] chcr - Add debug counters
  2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
                   ` (4 preceding siblings ...)
  2017-06-15  7:13 ` [PATCH 5/9] crypto:chcr - Add ctr mode and process large sg entries for cipher Harsh Jain
@ 2017-06-15  7:13 ` Harsh Jain
  2017-06-15  7:13 ` [PATCH 7/9] MAINTAINERS:Add maintainer for chelsio crypto driver Harsh Jain
                   ` (3 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Harsh Jain @ 2017-06-15  7:13 UTC (permalink / raw)
  To: herbert, linux-crypto, netdev, ganeshgr; +Cc: Harsh Jain

Count types of operation done by HW.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c                 | 16 +++++++++-
 drivers/crypto/chelsio/chcr_core.c                 |  2 ++
 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h         |  1 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 35 ++++++++++++++++++++++
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h     |  9 ++++++
 5 files changed, 62 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 03b817f..2f388af 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -154,6 +154,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	struct chcr_req_ctx ctx_req;
 	unsigned int digestsize, updated_digestsize;
+	struct adapter *adap = padap(ctx->dev);
 
 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
 	case CRYPTO_ALG_TYPE_AEAD:
@@ -207,6 +208,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 		ctx_req.req.ahash_req->base.complete(req, err);
 		break;
 	}
+	atomic_inc(&adap->chcr_stats.complete);
 	return err;
 }
 
@@ -639,6 +641,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 	unsigned int ivsize = AES_BLOCK_SIZE, kctx_len;
 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 			GFP_KERNEL : GFP_ATOMIC;
+	struct adapter *adap = padap(ctx->dev);
 
 	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
 
@@ -701,6 +704,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
 	skb_set_transport_header(skb, transhdr_len);
 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
 	write_sg_to_skb(skb, &frags, wrparam->srcsg, wrparam->bytes);
+	atomic_inc(&adap->chcr_stats.cipher_rqst);
 	create_wreq(ctx, chcr_req, &(wrparam->req->base), skb, kctx_len, 0, 1,
 			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl,
 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
@@ -1337,6 +1341,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
 	u8 hash_size_in_response = 0;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
+	struct adapter *adap = padap(ctx->dev);
 
 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
 	kctx_len = param->alg_prm.result_size + iopad_alignment;
@@ -1393,7 +1398,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
 				    param->bfr_len);
 	if (param->sg_len != 0)
 		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
-
+	atomic_inc(&adap->chcr_stats.digest_rqst);
 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len,
 		    hash_size_in_response, 0, DUMMY_BYTES, 0);
 	req_ctx->skb = skb;
@@ -1873,6 +1878,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	int null = 0;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
+	struct adapter *adap = padap(ctx->dev);
 
 	if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
 		goto err;
@@ -1911,6 +1917,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 			T6_MAX_AAD_SIZE,
 			transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
 				op_type)) {
+		atomic_inc(&adap->chcr_stats.fallback);
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
@@ -1983,6 +1990,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	}
 	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
+	atomic_inc(&adap->chcr_stats.cipher_rqst);
 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
 		   sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
 	reqctx->skb = skb;
@@ -2206,6 +2214,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 	int error = -EINVAL, src_nent;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
+	struct adapter *adap = padap(ctx->dev);
 
 
 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
@@ -2245,6 +2254,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 			    T6_MAX_AAD_SIZE - 18,
 			    transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
 			    op_type)) {
+		atomic_inc(&adap->chcr_stats.fallback);
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
 
@@ -2282,6 +2292,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 
 	skb_set_transport_header(skb, transhdr_len);
 	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
+	atomic_inc(&adap->chcr_stats.aead_rqst);
 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, 0, 1,
 		    sizeof(struct cpl_rx_phys_dsgl) + dst_size, 0);
 	reqctx->skb = skb;
@@ -2316,6 +2327,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	int error = -EINVAL, src_nent;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		GFP_ATOMIC;
+	struct adapter *adap = padap(ctx->dev);
 
 	/* validate key size */
 	if (aeadctx->enckey_len == 0)
@@ -2355,6 +2367,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 			    T6_MAX_AAD_SIZE,
 			    transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
 			    op_type)) {
+		atomic_inc(&adap->chcr_stats.fallback);
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
@@ -2421,6 +2434,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	write_sg_to_skb(skb, &frags, req->src, assoclen);
 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
 	write_sg_to_skb(skb, &frags, src, req->cryptlen);
+	atomic_inc(&adap->chcr_stats.aead_rqst);
 	create_wreq(ctx, chcr_req, &req->base, skb, kctx_len, size, 1,
 			sizeof(struct cpl_rx_phys_dsgl) + dst_size,
 			reqctx->verify);
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index a6c938a..5ae659a 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -100,6 +100,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
 	struct cpl_fw6_pld *fw6_pld;
 	u32 ack_err_status = 0;
 	int error_status = 0;
+	struct adapter *adap = padap(dev);
 
 	fw6_pld = (struct cpl_fw6_pld *)input;
 	req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
@@ -111,6 +112,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
 		if (CHK_MAC_ERR_BIT(ack_err_status) ||
 		    CHK_PAD_ERR_BIT(ack_err_status))
 			error_status = -EBADMSG;
+		atomic_inc(&adap->chcr_stats.error);
 	}
 	/* call completion callback with failure status */
 	if (req) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index e88c180..b32eb8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -868,6 +868,7 @@ struct adapter {
 
 	/* TC u32 offload */
 	struct cxgb4_tc_u32_table *tc_u32;
+	struct chcr_stats_debug chcr_stats;
 };
 
 /* Support for "sched-class" command to allow a TX Scheduling Class to be
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 1fa34b0..77a59d7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3069,6 +3069,40 @@ static int meminfo_open(struct inode *inode, struct file *file)
 	.llseek  = seq_lseek,
 	.release = single_release,
 };
+
+static int chcr_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adap = seq->private;
+
+	seq_puts(seq, "Chelsio Crypto Accelerator Stats \n");
+	seq_printf(seq, "Cipher Ops: %10u \n",
+		   atomic_read(&adap->chcr_stats.cipher_rqst));
+	seq_printf(seq, "Digest Ops: %10u \n",
+		   atomic_read(&adap->chcr_stats.digest_rqst));
+	seq_printf(seq, "Aead Ops: %10u \n",
+		   atomic_read(&adap->chcr_stats.aead_rqst));
+	seq_printf(seq, "Completion: %10u \n",
+		   atomic_read(&adap->chcr_stats.complete));
+	seq_printf(seq, "Error: %10u \n",
+		   atomic_read(&adap->chcr_stats.error));
+	seq_printf(seq, "Fallback: %10u \n",
+		   atomic_read(&adap->chcr_stats.fallback));
+	return 0;
+}
+
+
+static int chcr_stats_open(struct inode *inode, struct file *file)
+{
+        return single_open(file, chcr_show, inode->i_private);
+}
+
+static const struct file_operations chcr_stats_debugfs_fops = {
+        .owner   = THIS_MODULE,
+        .open    = chcr_stats_open,
+        .read    = seq_read,
+        .llseek  = seq_lseek,
+        .release = single_release,
+};
 /* Add an array of Debug FS files.
  */
 void add_debugfs_files(struct adapter *adap,
@@ -3143,6 +3177,7 @@ int t4_setup_debugfs(struct adapter *adap)
 		{ "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
 		{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
 		{ "meminfo", &meminfo_fops, S_IRUSR, 0 },
+		{ "crypto", &chcr_stats_debugfs_fops, S_IRUSR, 0 },
 	};
 
 	/* Debug FS nodes common to all T5 and later adapters.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6e74040..35f4d9f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -275,6 +275,15 @@ struct cxgb4_virt_res {                      /* virtualized HW resources */
 	unsigned int ncrypto_fc;
 };
 
+struct chcr_stats_debug {
+	atomic_t cipher_rqst;
+	atomic_t digest_rqst;
+	atomic_t aead_rqst;
+	atomic_t complete;
+	atomic_t error;
+	atomic_t fallback;
+};
+
 #define OCQ_WIN_OFFSET(pdev, vres) \
 	(pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
 
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 7/9] MAINTAINERS:Add maintainer for chelsio crypto driver
  2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
                   ` (5 preceding siblings ...)
  2017-06-15  7:13 ` [PATCH 6/9] chcr - Add debug counters Harsh Jain
@ 2017-06-15  7:13 ` Harsh Jain
  2017-06-15  7:13 ` [PATCH 8/9] crypto: chcr - Ensure Destination sg entry size less than 2k Harsh Jain
                   ` (2 subsequent siblings)
  9 siblings, 0 replies; 11+ messages in thread
From: Harsh Jain @ 2017-06-15  7:13 UTC (permalink / raw)
  To: herbert, linux-crypto, netdev, ganeshgr; +Cc: Harsh Jain

Add myself as maintainer for chcr.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 MAINTAINERS | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/MAINTAINERS b/MAINTAINERS
index 1f20176..504dc65 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3706,6 +3706,13 @@ S:	Supported
 F:	drivers/infiniband/hw/cxgb4/
 F:	include/uapi/rdma/cxgb4-abi.h
 
+CXGB4 CRYPTO DRIVER (chcr)
+M:	Harsh Jain <harsh@chelsio.com>
+L:	linux-crypto@vger.kernel.org
+W:	http://www.chelsio.com
+S:	Supported
+F:	drivers/crypto/chelsio
+
 CXGB4VF ETHERNET DRIVER (CXGB4VF)
 M:	Casey Leedom <leedom@chelsio.com>
 L:	netdev@vger.kernel.org
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 8/9] crypto: chcr - Ensure Destination sg entry size less than 2k
  2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
                   ` (6 preceding siblings ...)
  2017-06-15  7:13 ` [PATCH 7/9] MAINTAINERS:Add maintainer for chelsio crypto driver Harsh Jain
@ 2017-06-15  7:13 ` Harsh Jain
  2017-06-15  7:13 ` [PATCH 9/9] crypto: chcr - Select device in Round Robin fashion Harsh Jain
  2017-06-20  3:41 ` [PATCH 0/9] Bug fixes and ctr mode of operation Herbert Xu
  9 siblings, 0 replies; 11+ messages in thread
From: Harsh Jain @ 2017-06-15  7:13 UTC (permalink / raw)
  To: herbert, linux-crypto, netdev, ganeshgr; +Cc: Harsh Jain

Allocate new sg list in case received destination sg list has entry
greater that 2k.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c   | 153 +++++++++++++++++++++++++++++++----
 drivers/crypto/chelsio/chcr_crypto.h |   6 ++
 2 files changed, 142 insertions(+), 17 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 2f388af..9a84ffa 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -166,6 +166,8 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 			kfree_skb(ctx_req.ctx.reqctx->skb);
 			ctx_req.ctx.reqctx->skb = NULL;
 		}
+		free_new_sg(ctx_req.ctx.reqctx->newdstsg);
+		ctx_req.ctx.reqctx->newdstsg = NULL;
 		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
 			chcr_verify_tag(ctx_req.req.aead_req, input,
 					&err);
@@ -1068,6 +1070,8 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
 	chcr_send_wr(skb);
 	return 0;
 complete:
+	free_new_sg(reqctx->newdstsg);
+	reqctx->newdstsg = NULL;
 	req->base.complete(&req->base, err);
 	return err;
 }
@@ -1083,7 +1087,7 @@ static int process_cipher(struct ablkcipher_request *req,
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 	struct	cipher_wr_param wrparam;
-	int bytes, err = -EINVAL;
+	int bytes, nents, err = -EINVAL;
 
 	reqctx->newdstsg = NULL;
 	reqctx->processed = 0;
@@ -1097,7 +1101,14 @@ static int process_cipher(struct ablkcipher_request *req,
 		goto error;
 	}
 	wrparam.srcsg = req->src;
-	reqctx->dstsg = req->dst;
+	if (is_newsg(req->dst, &nents)) {
+		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
+		if (IS_ERR(reqctx->newdstsg))
+			return PTR_ERR(reqctx->newdstsg);
+		reqctx->dstsg = reqctx->newdstsg;
+	} else {
+		reqctx->dstsg = req->dst;
+	}
 	bytes = chcr_sg_ent_in_wr(wrparam.srcsg, reqctx->dstsg, MIN_CIPHER_SG,
 				 SPACE_LEFT(ablkctx->enckey_len),
 				 &wrparam.snent,
@@ -1150,6 +1161,8 @@ static int process_cipher(struct ablkcipher_request *req,
 
 	return 0;
 error:
+	free_new_sg(reqctx->newdstsg);
+	reqctx->newdstsg = NULL;
 	return err;
 }
 
@@ -1808,6 +1821,63 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
 	}
 }
 
+static int is_newsg(struct scatterlist *sgl, unsigned int *newents)
+{
+	int nents = 0;
+	int ret = 0;
+
+	while (sgl) {
+		if (sgl->length > CHCR_SG_SIZE)
+			ret = 1;
+		nents += DIV_ROUND_UP(sgl->length, CHCR_SG_SIZE);
+		sgl = sg_next(sgl);
+	}
+	*newents = nents;
+	return ret;
+}
+
+static inline void free_new_sg(struct scatterlist *sgl)
+{
+	kfree(sgl);
+}
+
+static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
+				       unsigned int nents)
+{
+	struct scatterlist *newsg, *sg;
+	int i, len, processed = 0;
+	struct page *spage;
+	int offset;
+
+	newsg = kmalloc_array(nents, sizeof(struct scatterlist), GFP_KERNEL);
+	if (!newsg)
+		return ERR_PTR(-ENOMEM);
+	sg = newsg;
+	sg_init_table(sg, nents);
+	offset = sgl->offset;
+	spage = sg_page(sgl);
+	for (i = 0; i < nents; i++) {
+		len = min_t(u32, sgl->length - processed, CHCR_SG_SIZE);
+		sg_set_page(sg, spage, len, offset);
+		processed += len;
+		offset += len;
+		if (offset >= PAGE_SIZE) {
+			offset = offset % PAGE_SIZE;
+			spage++;
+		}
+		if (processed == sgl->length) {
+			processed = 0;
+			sgl = sg_next(sgl);
+			if (!sgl)
+				break;
+			spage = sg_page(sgl);
+			offset = sgl->offset;
+		}
+		sg = sg_next(sg);
+	}
+	return newsg;
+}
+
 static int chcr_copy_assoc(struct aead_request *req,
 				struct chcr_aead_ctx *ctx)
 {
@@ -1870,7 +1940,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	struct scatterlist *src;
 	unsigned int frags = 0, transhdr_len;
 	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
-	unsigned int   kctx_len = 0;
+	unsigned int   kctx_len = 0, nents;
 	unsigned short stop_offset = 0;
 	unsigned int  assoclen = req->assoclen;
 	unsigned int  authsize = crypto_aead_authsize(tfm);
@@ -1880,7 +1950,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 		GFP_ATOMIC;
 	struct adapter *adap = padap(ctx->dev);
 
-	if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
+	reqctx->newdstsg = NULL;
+	dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
+						   authsize);
+	if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
 		goto err;
 
 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
@@ -1889,14 +1962,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	if (src_nent < 0)
 		goto err;
 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
-	reqctx->dst = src;
 
 	if (req->src != req->dst) {
 		error = chcr_copy_assoc(req, aeadctx);
 		if (error)
 			return ERR_PTR(error);
-		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
-					       req->assoclen);
+	}
+	if (dst_size && is_newsg(req->dst, &nents)) {
+		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
+		if (IS_ERR(reqctx->newdstsg))
+			return ERR_CAST(reqctx->newdstsg);
+		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
+					       reqctx->newdstsg, req->assoclen);
+	} else {
+		if (req->src == req->dst)
+			reqctx->dst = src;
+		else
+			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
+						       req->dst, req->assoclen);
 	}
 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
 		null = 1;
@@ -1918,6 +2001,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 			transhdr_len + (sgl_len(src_nent + MIN_AUTH_SG) * 8),
 				op_type)) {
 		atomic_inc(&adap->chcr_stats.fallback);
+		free_new_sg(reqctx->newdstsg);
+		reqctx->newdstsg = NULL;
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
@@ -2001,6 +2086,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 	/* ivmap_fail: */
 	kfree_skb(skb);
 err:
+	free_new_sg(reqctx->newdstsg);
+	reqctx->newdstsg = NULL;
 	return ERR_PTR(error);
 }
 
@@ -2208,7 +2295,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 	struct phys_sge_parm sg_param;
 	struct scatterlist *src;
 	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
-	unsigned int dst_size = 0, kctx_len;
+	unsigned int dst_size = 0, kctx_len, nents;
 	unsigned int sub_type;
 	unsigned int authsize = crypto_aead_authsize(tfm);
 	int error = -EINVAL, src_nent;
@@ -2216,7 +2303,9 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 		GFP_ATOMIC;
 	struct adapter *adap = padap(ctx->dev);
 
-
+	dst_size = req->assoclen + req->cryptlen + (op_type ? -authsize :
+						   authsize);
+	reqctx->newdstsg = NULL;
 	if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
 		goto err;
 	src_nent = sg_nents_for_len(req->src, req->assoclen + req->cryptlen);
@@ -2225,16 +2314,25 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 
 	sub_type = get_aead_subtype(tfm);
 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
-	reqctx->dst = src;
-
 	if (req->src != req->dst) {
 		error = chcr_copy_assoc(req, aeadctx);
 		if (error) {
 			pr_err("AAD copy to destination buffer fails\n");
 			return ERR_PTR(error);
 		}
-		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
-					       req->assoclen);
+	}
+	if (dst_size && is_newsg(req->dst, &nents)) {
+		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
+		if (IS_ERR(reqctx->newdstsg))
+			return ERR_CAST(reqctx->newdstsg);
+		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
+					       reqctx->newdstsg, req->assoclen);
+	} else {
+		if (req->src == req->dst)
+			reqctx->dst = src;
+		else
+			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
+						       req->dst, req->assoclen);
 	}
 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
 					     (op_type ? -authsize : authsize));
@@ -2255,6 +2353,8 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 			    transhdr_len + (sgl_len(src_nent + MIN_CCM_SG) * 8),
 			    op_type)) {
 		atomic_inc(&adap->chcr_stats.fallback);
+		free_new_sg(reqctx->newdstsg);
+		reqctx->newdstsg = NULL;
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
 
@@ -2301,6 +2401,8 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
 dstmap_fail:
 	kfree_skb(skb);
 err:
+	free_new_sg(reqctx->newdstsg);
+	reqctx->newdstsg = NULL;
 	return ERR_PTR(error);
 }
 
@@ -2321,7 +2423,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	struct scatterlist *src;
 	unsigned int frags = 0, transhdr_len;
 	unsigned int ivsize = AES_BLOCK_SIZE;
-	unsigned int dst_size = 0, kctx_len, assoclen = req->assoclen;
+	unsigned int dst_size = 0, kctx_len, nents, assoclen = req->assoclen;
 	unsigned char tag_offset = 0;
 	unsigned int authsize = crypto_aead_authsize(tfm);
 	int error = -EINVAL, src_nent;
@@ -2329,6 +2431,9 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 		GFP_ATOMIC;
 	struct adapter *adap = padap(ctx->dev);
 
+	reqctx->newdstsg = NULL;
+	dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
+						    authsize);
 	/* validate key size */
 	if (aeadctx->enckey_len == 0)
 		goto err;
@@ -2340,15 +2445,25 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 		goto err;
 
 	src = scatterwalk_ffwd(reqctx->srcffwd, req->src, assoclen);
-	reqctx->dst = src;
 	if (req->src != req->dst) {
 		error = chcr_copy_assoc(req, aeadctx);
 		if (error)
 			return	ERR_PTR(error);
-		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
-					       assoclen);
 	}
 
+	if (dst_size && is_newsg(req->dst, &nents)) {
+		reqctx->newdstsg = alloc_new_sg(req->dst, nents);
+		if (IS_ERR(reqctx->newdstsg))
+			return ERR_CAST(reqctx->newdstsg);
+		reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
+					       reqctx->newdstsg, assoclen);
+	} else {
+		if (req->src == req->dst)
+			reqctx->dst = src;
+		else
+			reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd,
+						       req->dst, assoclen);
+	}
 
 	reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
 					     (op_type ? -authsize : authsize));
@@ -2368,6 +2483,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 			    transhdr_len + (sgl_len(src_nent + MIN_GCM_SG) * 8),
 			    op_type)) {
 		atomic_inc(&adap->chcr_stats.fallback);
+		free_new_sg(reqctx->newdstsg);
+		reqctx->newdstsg = NULL;
 		return ERR_PTR(chcr_aead_fallback(req, op_type));
 	}
 	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
@@ -2446,6 +2563,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
 	/* ivmap_fail: */
 	kfree_skb(skb);
 err:
+	free_new_sg(reqctx->newdstsg);
+	reqctx->newdstsg = NULL;
 	return ERR_PTR(error);
 }
 
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index b1b7944..a4f95b0 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -149,6 +149,7 @@
 
 #define CHCR_HASH_MAX_BLOCK_SIZE_64  64
 #define CHCR_HASH_MAX_BLOCK_SIZE_128 128
+#define CHCR_SG_SIZE 2048
 
 /* Aligned to 128 bit boundary */
 
@@ -164,6 +165,7 @@ struct ablk_ctx {
 struct chcr_aead_reqctx {
 	struct	sk_buff	*skb;
 	struct scatterlist *dst;
+	struct scatterlist *newdstsg;
 	struct scatterlist srcffwd[2];
 	struct scatterlist dstffwd[2];
 	short int dst_nents;
@@ -287,6 +289,10 @@ static int chcr_aead_op(struct aead_request *req_base,
 			  int size,
 			  create_wr_t create_wr_fn);
 static inline int get_aead_subtype(struct crypto_aead *aead);
+static int is_newsg(struct scatterlist *sgl, unsigned int *newents);
+static struct scatterlist *alloc_new_sg(struct scatterlist *sgl,
+					unsigned int nents);
+static inline void free_new_sg(struct scatterlist *sgl);
 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
 				   unsigned char *input, int err);
 #endif /* __CHCR_CRYPTO_H__ */
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 9/9] crypto: chcr - Select device in Round Robin fashion
  2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
                   ` (7 preceding siblings ...)
  2017-06-15  7:13 ` [PATCH 8/9] crypto: chcr - Ensure Destination sg entry size less than 2k Harsh Jain
@ 2017-06-15  7:13 ` Harsh Jain
  2017-06-20  3:41 ` [PATCH 0/9] Bug fixes and ctr mode of operation Herbert Xu
  9 siblings, 0 replies; 11+ messages in thread
From: Harsh Jain @ 2017-06-15  7:13 UTC (permalink / raw)
  To: herbert, linux-crypto, netdev, ganeshgr; +Cc: Harsh Jain, Atul Gupta

When multiple devices are present in system select device
in round-robin fashion for crypto operations

Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
Reviewed-by: Ganesh Goudar <ganeshgr@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c             |  8 ++--
 drivers/crypto/chelsio/chcr_core.c             | 53 ++++++++++++++++++--------
 drivers/crypto/chelsio/chcr_core.h             |  2 +-
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c |  1 +
 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h |  1 +
 5 files changed, 44 insertions(+), 21 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 9a84ffa..aa4e5b8 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1216,7 +1216,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
 
 static int chcr_device_init(struct chcr_context *ctx)
 {
-	struct uld_ctx *u_ctx;
+	struct uld_ctx *u_ctx = NULL;
 	struct adapter *adap;
 	unsigned int id;
 	int txq_perchan, txq_idx, ntxq;
@@ -1224,12 +1224,12 @@ static int chcr_device_init(struct chcr_context *ctx)
 
 	id = smp_processor_id();
 	if (!ctx->dev) {
-		err = assign_chcr_device(&ctx->dev);
-		if (err) {
+		u_ctx = assign_chcr_device();
+		if (!u_ctx) {
 			pr_err("chcr device assignment fails\n");
 			goto out;
 		}
-		u_ctx = ULD_CTX(ctx);
+		ctx->dev = u_ctx->dev;
 		adap = padap(ctx->dev);
 		ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
 				    adap->vres.ncrypto_fc);
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 5ae659a..b6dd9cb 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -29,6 +29,7 @@
 static LIST_HEAD(uld_ctx_list);
 static DEFINE_MUTEX(dev_mutex);
 static atomic_t dev_count;
+static struct uld_ctx *ctx_rr;
 
 typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
 static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
@@ -49,25 +50,28 @@
 	.rx_handler = chcr_uld_rx_handler,
 };
 
-int assign_chcr_device(struct chcr_dev **dev)
+struct uld_ctx *assign_chcr_device(void)
 {
-	struct uld_ctx *u_ctx;
-	int ret = -ENXIO;
+	struct uld_ctx *u_ctx = NULL;
 
 	/*
-	 * Which device to use if multiple devices are available TODO
-	 * May be select the device based on round robin. One session
-	 * must go to the same device to maintain the ordering.
+	 * When multiple devices are present in system select
+	 * device in round-robin fashion for crypto operations
+	 * Although One session must use the same device to
+	 * maintain request-response ordering.
 	 */
-	mutex_lock(&dev_mutex); /* TODO ? */
-	list_for_each_entry(u_ctx, &uld_ctx_list, entry)
-		if (u_ctx->dev) {
-			*dev = u_ctx->dev;
-			ret = 0;
-			break;
+	mutex_lock(&dev_mutex);
+	if (!list_empty(&uld_ctx_list)) {
+		u_ctx = ctx_rr;
+		if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
+			ctx_rr = list_first_entry(&uld_ctx_list,
+						  struct uld_ctx,
+						  entry);
+		else
+			ctx_rr = list_next_entry(ctx_rr, entry);
 	}
 	mutex_unlock(&dev_mutex);
-	return ret;
+	return u_ctx;
 }
 
 static int chcr_dev_add(struct uld_ctx *u_ctx)
@@ -82,11 +86,27 @@ static int chcr_dev_add(struct uld_ctx *u_ctx)
 	u_ctx->dev = dev;
 	dev->u_ctx = u_ctx;
 	atomic_inc(&dev_count);
+	mutex_lock(&dev_mutex);
+	list_add_tail(&u_ctx->entry, &uld_ctx_list);
+	if (!ctx_rr)
+		ctx_rr = u_ctx;
+	mutex_unlock(&dev_mutex);
 	return 0;
 }
 
 static int chcr_dev_remove(struct uld_ctx *u_ctx)
 {
+	if (ctx_rr == u_ctx) {
+		if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
+			ctx_rr = list_first_entry(&uld_ctx_list,
+						  struct uld_ctx,
+						  entry);
+		else
+			ctx_rr = list_next_entry(ctx_rr, entry);
+	}
+	list_del(&u_ctx->entry);
+	if (list_empty(&uld_ctx_list))
+		ctx_rr = NULL;
 	kfree(u_ctx->dev);
 	u_ctx->dev = NULL;
 	atomic_dec(&dev_count);
@@ -139,10 +159,11 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
 		u_ctx = ERR_PTR(-ENOMEM);
 		goto out;
 	}
+	if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) {
+		u_ctx = ERR_PTR(-ENOMEM);
+		goto out;
+	}
 	u_ctx->lldi = *lld;
-	mutex_lock(&dev_mutex);
-	list_add_tail(&u_ctx->entry, &uld_ctx_list);
-	mutex_unlock(&dev_mutex);
 out:
 	return u_ctx;
 }
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index ddfb2c9..c9a19b2 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -89,7 +89,7 @@ struct uld_ctx {
 	struct chcr_dev *dev;
 };
 
-int assign_chcr_device(struct chcr_dev **dev);
+struct uld_ctx * assign_chcr_device(void);
 int chcr_send_wr(struct sk_buff *skb);
 int start_crypto(void);
 int stop_crypto(void);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index d0868c2..ec53fe9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -642,6 +642,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
 	lld->sge_ingpadboundary = adap->sge.fl_align;
 	lld->sge_egrstatuspagesize = adap->sge.stat_len;
 	lld->sge_pktshift = adap->sge.pktshift;
+	lld->ulp_crypto = adap->params.crypto;
 	lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
 	lld->max_ordird_qp = adap->params.max_ordird_qp;
 	lld->max_ird_adapter = adap->params.max_ird_adapter;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 35f4d9f..8f1c874 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -331,6 +331,7 @@ struct cxgb4_lld_info {
 	unsigned int iscsi_tagmask;	     /* iscsi ddp tag mask */
 	unsigned int iscsi_pgsz_order;	     /* iscsi ddp page size orders */
 	unsigned int iscsi_llimit;	     /* chip's iscsi region llimit */
+	unsigned int ulp_crypto;             /* crypto lookaside support */
 	void **iscsi_ppm;		     /* iscsi page pod manager */
 	int nodeid;			     /* device numa node id */
 	bool fr_nsmr_tpte_wr_support;	     /* FW supports FR_NSMR_TPTE_WR */
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 0/9] Bug fixes and ctr mode of operation
  2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
                   ` (8 preceding siblings ...)
  2017-06-15  7:13 ` [PATCH 9/9] crypto: chcr - Select device in Round Robin fashion Harsh Jain
@ 2017-06-20  3:41 ` Herbert Xu
  9 siblings, 0 replies; 11+ messages in thread
From: Herbert Xu @ 2017-06-20  3:41 UTC (permalink / raw)
  To: Harsh Jain; +Cc: linux-crypto, netdev, ganeshgr

On Thu, Jun 15, 2017 at 12:43:38PM +0530, Harsh Jain wrote:
> This series is based on cryptodev2.6 tree and includes bug fix ,ctr(aes), rfc3686(ctr(aes)) algo.
> 
> Harsh Jain (7):
>   crypto: chcr - Pass lcb bit setting to firmware
>   crypto: chcr - Set fallback key
>   crypto: chcr - Return correct error code
>   crypto: chcr - Avoid changing request structure
>   crypto:chcr - Add ctr mode and process large sg entries for cipher
>   MAINTAINERS:Add maintainer for chelsio crypto driver
>   crypto: chcr - Ensure Destination sg entry size less than      2k
> Atul Gupta (2):
>   chcr - Add debug counters
>   crypto: chcr - Select device in Round Robin fashion

All applied.  Thanks.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2017-06-20  3:41 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-06-15  7:13 [PATCH 0/9] Bug fixes and ctr mode of operation Harsh Jain
2017-06-15  7:13 ` [PATCH 1/9] crypto: chcr - Pass lcb bit setting to firmware Harsh Jain
2017-06-15  7:13 ` [PATCH 2/9] crypto: chcr - Fix fallback key setting Harsh Jain
2017-06-15  7:13 ` [PATCH 3/9] crypto: chcr - Return correct error code Harsh Jain
2017-06-15  7:13 ` [PATCH 4/9] crypto: chcr - Avoid changing request structure Harsh Jain
2017-06-15  7:13 ` [PATCH 5/9] crypto:chcr - Add ctr mode and process large sg entries for cipher Harsh Jain
2017-06-15  7:13 ` [PATCH 6/9] chcr - Add debug counters Harsh Jain
2017-06-15  7:13 ` [PATCH 7/9] MAINTAINERS:Add maintainer for chelsio crypto driver Harsh Jain
2017-06-15  7:13 ` [PATCH 8/9] crypto: chcr - Ensure Destination sg entry size less than 2k Harsh Jain
2017-06-15  7:13 ` [PATCH 9/9] crypto: chcr - Select device in Round Robin fashion Harsh Jain
2017-06-20  3:41 ` [PATCH 0/9] Bug fixes and ctr mode of operation Herbert Xu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).