All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/6] chcr: AEAD support and bug fixes
@ 2016-10-13 11:09 Harsh Jain
  2016-10-13 11:09 ` [PATCH 1/6] chcr:Fix memory corruption done Harsh Jain
                   ` (5 more replies)
  0 siblings, 6 replies; 16+ messages in thread
From: Harsh Jain @ 2016-10-13 11:09 UTC (permalink / raw)
  To: dan.carpenter, herbert, linux-crypto
  Cc: jlulla, atul.gupta, yeshaswi, hariprasad, Harsh Jain

This patch series includes Bug Fixes, performance improvement and
support for following AEAD algos.
GCM,CCM,RFC4106,RFC4303,authenc(hmac(shaXXX),cbc(aes))

This patch series is based on linux-next tree and depends on
("crypto/chcr: Add support for Chelsio Crypto Driver ") series.

https://www.mail-archive.com/linux-crypto@vger.kernel.org/msg20658.html

Jitendra Lulla (3):
  Fix memory corruption done by  *((u32 *)dec_key + k) operation.
  Remove malloc/free in crypto operation and allocate memory in Init.
      Added new structure chcr_wr to populate Work Request Header.
  Destination buffer size passed to hardware should not be greater than
    crypto operation output.
Harsh Jain (3):
  Use SHASH_DESC_ON_STACK macro to allocate memory for ipad/opad
    calculation.
  Move tfm ctx variable to request context.
  Add support for AEAD algos
    GCM,CCM,RFC4106,RFC4303,authenc(hmac(shaXXX),cbc(aes))

 drivers/crypto/chelsio/Kconfig       |    1 +
 drivers/crypto/chelsio/chcr_algo.c   | 1998 +++++++++++++++++++++++++++++-----
 drivers/crypto/chelsio/chcr_algo.h   |  102 +-
 drivers/crypto/chelsio/chcr_core.c   |    8 +-
 drivers/crypto/chelsio/chcr_core.h   |   18 +-
 drivers/crypto/chelsio/chcr_crypto.h |  115 +-
 6 files changed, 1857 insertions(+), 385 deletions(-)

-- 
1.8.2.3

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/6] chcr:Fix memory corruption done
  2016-10-13 11:09 [PATCH 0/6] chcr: AEAD support and bug fixes Harsh Jain
@ 2016-10-13 11:09 ` Harsh Jain
  2016-10-13 11:09 ` [PATCH 2/6] chcr: Remove malloc/free Harsh Jain
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 16+ messages in thread
From: Harsh Jain @ 2016-10-13 11:09 UTC (permalink / raw)
  To: dan.carpenter, herbert, linux-crypto
  Cc: jlulla, atul.gupta, yeshaswi, hariprasad, Harsh Jain, Jitendra Lulla

Fix memory corruption done by  *((u32 *)dec_key + k) operation.

Signed-off-by: Jitendra Lulla <JLULLA@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c | 52 ++++++++++++++++++++++++++++++++++
 drivers/crypto/chelsio/chcr_algo.h | 58 +-------------------------------------
 2 files changed, 53 insertions(+), 57 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index e4ddb92..944c11f 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -178,6 +178,58 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 	return flits + sgl_len(cnt);
 }
 
+static void get_aes_decrypt_key(unsigned char *dec_key,
+				const unsigned char *key,
+				unsigned int keylength)
+{
+	u32 temp;
+	u32 w_ring[MAX_NK];
+	int i, j, k;
+	u8  nr, nk;
+
+	switch (keylength) {
+	case AES_KEYLENGTH_128BIT:
+		nk = KEYLENGTH_4BYTES;
+		nr = NUMBER_OF_ROUNDS_10;
+		break;
+	case AES_KEYLENGTH_192BIT:
+		nk = KEYLENGTH_6BYTES;
+		nr = NUMBER_OF_ROUNDS_12;
+		break;
+	case AES_KEYLENGTH_256BIT:
+		nk = KEYLENGTH_8BYTES;
+		nr = NUMBER_OF_ROUNDS_14;
+		break;
+	default:
+		return;
+	}
+	for (i = 0; i < nk; i++)
+		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+
+	i = 0;
+	temp = w_ring[nk - 1];
+	while (i + nk < (nr + 1) * 4) {
+		if (!(i % nk)) {
+			/* RotWord(temp) */
+			temp = (temp << 8) | (temp >> 24);
+			temp = aes_ks_subword(temp);
+			temp ^= round_constant[i / nk];
+		} else if (nk == 8 && (i % 4 == 0)) {
+			temp = aes_ks_subword(temp);
+		}
+		w_ring[i % nk] ^= temp;
+		temp = w_ring[i % nk];
+		i++;
+	}
+	i--;
+	for (k = 0, j = i % nk; k < nk; k++) {
+		*((u32 *)dec_key + k) = htonl(w_ring[j]);
+		j--;
+		if (j < 0)
+			j += nk;
+	}
+}
+
 static struct shash_desc *chcr_alloc_shash(unsigned int ds)
 {
 	struct crypto_shash *base_hash = NULL;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index ec64fbc..f34bc91 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -394,7 +394,7 @@ static const u8 aes_sbox[256] = {
 	187, 22
 };
 
-static u32 aes_ks_subword(const u32 w)
+static inline u32 aes_ks_subword(const u32 w)
 {
 	u8 bytes[4];
 
@@ -412,60 +412,4 @@ static u32 round_constant[11] = {
 	0x1B000000, 0x36000000, 0x6C000000
 };
 
-/* dec_key - OUTPUT - Reverse round key
- * key - INPUT - key
- * keylength - INPUT - length of the key in number of bits
- */
-static inline void get_aes_decrypt_key(unsigned char *dec_key,
-				       const unsigned char *key,
-				       unsigned int keylength)
-{
-	u32 temp;
-	u32 w_ring[MAX_NK];
-	int i, j, k = 0;
-	u8  nr, nk;
-
-	switch (keylength) {
-	case AES_KEYLENGTH_128BIT:
-		nk = KEYLENGTH_4BYTES;
-		nr = NUMBER_OF_ROUNDS_10;
-		break;
-
-	case AES_KEYLENGTH_192BIT:
-		nk = KEYLENGTH_6BYTES;
-		nr = NUMBER_OF_ROUNDS_12;
-		break;
-	case AES_KEYLENGTH_256BIT:
-		nk = KEYLENGTH_8BYTES;
-		nr = NUMBER_OF_ROUNDS_14;
-		break;
-	default:
-		return;
-	}
-	for (i = 0; i < nk; i++ )
-		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
-
-	i = 0;
-	temp = w_ring[nk - 1];
-	while(i + nk < (nr + 1) * 4) {
-		if(!(i % nk)) {
-			/* RotWord(temp) */
-			temp = (temp << 8) | (temp >> 24);
-			temp = aes_ks_subword(temp);
-			temp ^= round_constant[i / nk];
-		}
-		else if (nk == 8 && (i % 4 == 0))
-			temp = aes_ks_subword(temp);
-		w_ring[i % nk] ^= temp;
-		temp = w_ring[i % nk];
-		i++;
-	}
-	for (k = 0, j = i % nk; k < nk; k++) {
-		*((u32 *)dec_key + k) = htonl(w_ring[j]);
-		j--;
-		if(j < 0)
-			j += nk;
-	}
-}
-
 #endif /* __CHCR_ALGO_H__ */
-- 
1.8.2.3

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/6] chcr: Remove malloc/free
  2016-10-13 11:09 [PATCH 0/6] chcr: AEAD support and bug fixes Harsh Jain
  2016-10-13 11:09 ` [PATCH 1/6] chcr:Fix memory corruption done Harsh Jain
@ 2016-10-13 11:09 ` Harsh Jain
  2016-10-21  2:20   ` Herbert Xu
  2016-10-13 11:09 ` [PATCH 3/6] chcr: Adjust Dest. buffer size Harsh Jain
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 16+ messages in thread
From: Harsh Jain @ 2016-10-13 11:09 UTC (permalink / raw)
  To: dan.carpenter, herbert, linux-crypto
  Cc: jlulla, atul.gupta, yeshaswi, hariprasad, Harsh Jain, Jitendra Lulla

Remove malloc/free in crypto operation and allocate memory via cra_ctxsize.
Added new structure chcr_wr to populate Work Request Header.
Fixes: 324429d74127 (chcr: Support for Chelsio's Crypto Hardware)

Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Jitendra Lulla <JLULLA@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c   | 361 +++++++++++++++++------------------
 drivers/crypto/chelsio/chcr_algo.h   |  28 ++-
 drivers/crypto/chelsio/chcr_core.h   |  16 ++
 drivers/crypto/chelsio/chcr_crypto.h |  16 +-
 4 files changed, 210 insertions(+), 211 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 944c11f..d5e0066 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -150,8 +150,6 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 			       sizeof(struct cpl_fw6_pld),
 			       updated_digestsize);
 		}
-		kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
-		ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
 		break;
 	}
 	return 0;
@@ -414,8 +412,23 @@ static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
 }
 
+static inline void write_buffer_to_skb(struct sk_buff *skb,
+					unsigned int *frags,
+					char *bfr,
+					u8 bfr_len)
+{
+	skb->len += bfr_len;
+	skb->data_len += bfr_len;
+	skb->truesize += bfr_len;
+	get_page(virt_to_page(bfr));
+	skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
+			   offset_in_page(bfr), bfr_len);
+	(*frags)++;
+}
+
+
 static inline void
-write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
 			struct scatterlist *sg, unsigned int count)
 {
 	struct page *spage;
@@ -424,8 +437,9 @@ write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
 	skb->len += count;
 	skb->data_len += count;
 	skb->truesize += count;
+
 	while (count > 0) {
-		if (sg && (!(sg->length)))
+		if (!sg || (!(sg->length)))
 			break;
 		spage = sg_page(sg);
 		get_page(spage);
@@ -441,29 +455,24 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
 			       struct _key_ctx *key_ctx)
 {
 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
-		get_aes_decrypt_key(key_ctx->key, ablkctx->key,
-				    ablkctx->enckey_len << 3);
-		memset(key_ctx->key + ablkctx->enckey_len, 0,
-		       CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
+		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
 	} else {
 		memcpy(key_ctx->key,
 		       ablkctx->key + (ablkctx->enckey_len >> 1),
 		       ablkctx->enckey_len >> 1);
-		get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
-				    ablkctx->key, ablkctx->enckey_len << 2);
+		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
 	}
 	return 0;
 }
 
 static inline void create_wreq(struct chcr_context *ctx,
-			       struct fw_crypto_lookaside_wr *wreq,
+			       struct chcr_wr *chcr_req,
 			       void *req, struct sk_buff *skb,
 			       int kctx_len, int hash_sz,
 			       unsigned int phys_dsgl)
 {
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
-	struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
-	struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
 	int iv_loc = IV_DSGL;
 	int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
 	unsigned int immdatalen = 0, nr_frags = 0;
@@ -475,24 +484,27 @@ static inline void create_wreq(struct chcr_context *ctx,
 		nr_frags = skb_shinfo(skb)->nr_frags;
 	}
 
-	wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
-						     (kctx_len >> 4));
-	wreq->pld_size_hash_size =
+	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+				((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+	chcr_req->wreq.pld_size_hash_size =
 		htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
 		      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
-	wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+	chcr_req->wreq.len16_pkd =
+		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
 				    (calc_tx_flits_ofld(skb) * 8), 16)));
-	wreq->cookie = cpu_to_be64((uintptr_t)req);
-	wreq->rx_chid_to_rx_q_id =
+	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+	chcr_req->wreq.rx_chid_to_rx_q_id =
 		FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
 				(hash_sz) ? IV_NOP : iv_loc);
 
-	ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
-	ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
-					 16) - ((sizeof(*wreq)) >> 4)));
+	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+					16) - ((sizeof(chcr_req->wreq)) >> 4)));
 
-	sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
-	sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
+	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+				   sizeof(chcr_req->key_ctx) +
+				   kctx_len +
 				  ((hash_sz) ? DUMMY_BYTES :
 				  (sizeof(struct cpl_rx_phys_dsgl) +
 				   phys_dsgl)) + immdatalen);
@@ -506,23 +518,23 @@ static inline void create_wreq(struct chcr_context *ctx,
  *	@op_type:	encryption or decryption
  */
 static struct sk_buff
-*create_cipher_wr(struct crypto_async_request *req_base,
-		  struct chcr_context *ctx, unsigned short qid,
+*create_cipher_wr(struct ablkcipher_request *req,
+		  unsigned short qid,
 		  unsigned short op_type)
 {
-	struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
 	struct sk_buff *skb = NULL;
-	struct _key_ctx *key_ctx;
-	struct fw_crypto_lookaside_wr *wreq;
-	struct cpl_tx_sec_pdu *sec_cpl;
+	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
 	struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 	struct phys_sge_parm sg_param;
 	unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+			GFP_ATOMIC;
 
 	if (!req->info)
 		return ERR_PTR(-EINVAL);
@@ -530,62 +542,57 @@ static struct sk_buff
 	ablkctx->enc = op_type;
 
 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
-	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
+		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+		       ablkctx->enckey_len, req->nbytes, ivsize);
 		return ERR_PTR(-EINVAL);
+	}
 
 	phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
 
-	kctx_len = sizeof(*key_ctx) +
-		(DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
-	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
-			GFP_ATOMIC);
+	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
 	if (!skb)
 		return ERR_PTR(-ENOMEM);
 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-	wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-
-	sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
-	sec_cpl->op_ivinsrtofst =
-		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
-
-	sec_cpl->pldlen = htonl(ivsize + req->nbytes);
-	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
-								ivsize + 1, 0);
-
-	sec_cpl->cipherstop_lo_authinsert =  FILL_SEC_CPL_AUTHINSERT(0, 0,
-								     0, 0);
-	sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+	memset(chcr_req, 0, transhdr_len);
+	chcr_req->sec_cpl.op_ivinsrtofst =
+		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
+
+	chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+	chcr_req->sec_cpl.aadstart_cipherstop_hi =
+			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+
+	chcr_req->sec_cpl.cipherstop_lo_authinsert =
+			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
 							 ablkctx->ciph_mode,
-							 0, 0, ivsize >> 1, 1);
-	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+							 0, 0, ivsize >> 1);
+	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
 							  0, 1, phys_dsgl);
 
-	key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
-	key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
 	if (op_type == CHCR_DECRYPT_OP) {
-		if (generate_copy_rrkey(ablkctx, key_ctx))
-			goto map_fail1;
+		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
 	} else {
 		if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
-			memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+			memcpy(chcr_req->key_ctx.key, ablkctx->key,
+			       ablkctx->enckey_len);
 		} else {
-			memcpy(key_ctx->key, ablkctx->key +
+			memcpy(chcr_req->key_ctx.key, ablkctx->key +
 			       (ablkctx->enckey_len >> 1),
 			       ablkctx->enckey_len >> 1);
-			memcpy(key_ctx->key +
+			memcpy(chcr_req->key_ctx.key +
 			       (ablkctx->enckey_len >> 1),
 			       ablkctx->key,
 			       ablkctx->enckey_len >> 1);
 		}
 	}
-	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
-
-	memcpy(ablkctx->iv, req->info, ivsize);
-	sg_init_table(&ablkctx->iv_sg, 1);
-	sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
 	sg_param.nents = ablkctx->dst_nents;
-	sg_param.obsize = dst_bufsize;
+	sg_param.obsize = req->nbytes;
 	sg_param.qid = qid;
 	sg_param.align = 1;
 	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
@@ -593,9 +600,10 @@ static struct sk_buff
 		goto map_fail1;
 
 	skb_set_transport_header(skb, transhdr_len);
-	write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
-	write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
-	create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
+	memcpy(ablkctx->iv, req->info, ivsize);
+	write_buffer_to_skb(skb, &frags, ablkctx->iv, ivsize);
+	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, phys_dsgl);
 	req_ctx->skb = skb;
 	skb_get(skb);
 	return skb;
@@ -609,15 +617,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 {
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
-	struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
 	unsigned int ck_size, context_size;
 	u16 alignment = 0;
 
-	if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
-		goto badkey_err;
-
-	memcpy(ablkctx->key, key, keylen);
-	ablkctx->enckey_len = keylen;
 	if (keylen == AES_KEYSIZE_128) {
 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
 	} else if (keylen == AES_KEYSIZE_192) {
@@ -628,7 +630,9 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 	} else {
 		goto badkey_err;
 	}
-
+	memcpy(ablkctx->key, key, keylen);
+	ablkctx->enckey_len = keylen;
+	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
 			keylen + alignment) >> 4;
 
@@ -662,7 +666,6 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
 {
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-	struct crypto_async_request *req_base = &req->base;
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	struct sk_buff *skb;
 
@@ -672,8 +675,7 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
 			return -EBUSY;
 	}
 
-	skb = create_cipher_wr(req_base, ctx,
-			       u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
 			       CHCR_ENCRYPT_OP);
 	if (IS_ERR(skb)) {
 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -689,7 +691,6 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
 {
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-	struct crypto_async_request *req_base = &req->base;
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	struct sk_buff *skb;
 
@@ -699,7 +700,7 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
 			return -EBUSY;
 	}
 
-	skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+	skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
 			       CHCR_DECRYPT_OP);
 	if (IS_ERR(skb)) {
 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@ -779,33 +780,11 @@ static int get_alg_config(struct algo_param *params,
 	return 0;
 }
 
-static inline int
-write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
-			    struct sk_buff *skb, unsigned int *frags, char *bfr,
-			    u8 bfr_len)
-{
-	void *page_ptr = NULL;
-
-	skb->len += bfr_len;
-	skb->data_len += bfr_len;
-	skb->truesize += bfr_len;
-	page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
-	if (!page_ptr)
-		return -ENOMEM;
-	get_page(virt_to_page(page_ptr));
-	req_ctx->dummy_payload_ptr = page_ptr;
-	memcpy(page_ptr, bfr, bfr_len);
-	skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
-			   offset_in_page(page_ptr), bfr_len);
-	(*frags)++;
-	return 0;
-}
-
 /**
- *	create_final_hash_wr - Create hash work request
+ *	create_hash_wr - Create hash work request
  *	@req - Cipher req base
  */
-static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
+static struct sk_buff *create_hash_wr(struct ahash_request *req,
 					    struct hash_wr_param *param)
 {
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
@@ -813,16 +792,16 @@ static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
 	struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
 	struct sk_buff *skb = NULL;
-	struct _key_ctx *key_ctx;
-	struct fw_crypto_lookaside_wr *wreq;
-	struct cpl_tx_sec_pdu *sec_cpl;
+	struct chcr_wr *chcr_req;
 	unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
-	unsigned int kctx_len = sizeof(*key_ctx);
+	unsigned int kctx_len = 0;
 	u8 hash_size_in_response = 0;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
 
 	iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
-	kctx_len += param->alg_prm.result_size + iopad_alignment;
+	kctx_len = param->alg_prm.result_size + iopad_alignment;
 	if (param->opad_needed)
 		kctx_len += param->alg_prm.result_size + iopad_alignment;
 
@@ -831,53 +810,53 @@ static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
 	else
 		hash_size_in_response = param->alg_prm.result_size;
 	transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
-	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
-			GFP_ATOMIC);
+	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
 	if (!skb)
 		return skb;
 
 	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-	wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-	memset(wreq, 0, transhdr_len);
+	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+	memset(chcr_req, 0, transhdr_len);
 
-	sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
-	sec_cpl->op_ivinsrtofst =
-		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
-	sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+	chcr_req->sec_cpl.op_ivinsrtofst =
+		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
+	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
 
-	sec_cpl->aadstart_cipherstop_hi =
+	chcr_req->sec_cpl.aadstart_cipherstop_hi =
 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
-	sec_cpl->cipherstop_lo_authinsert =
+	chcr_req->sec_cpl.cipherstop_lo_authinsert =
 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
-	sec_cpl->seqno_numivs =
+	chcr_req->sec_cpl.seqno_numivs =
 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
-					 param->opad_needed, 0, 0);
+					 param->opad_needed, 0);
 
-	sec_cpl->ivgen_hdrlen =
+	chcr_req->sec_cpl.ivgen_hdrlen =
 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
 
-	key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
-	memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+	       param->alg_prm.result_size);
 
 	if (param->opad_needed)
-		memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
-				       CHCR_HASH_MAX_DIGEST_SIZE),
+		memcpy(chcr_req->key_ctx.key +
+		       ((param->alg_prm.result_size <= 32) ? 32 :
+			CHCR_HASH_MAX_DIGEST_SIZE),
 		       hmacctx->opad, param->alg_prm.result_size);
 
-	key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
 					    param->alg_prm.mk_size, 0,
 					    param->opad_needed,
-					    (kctx_len >> 4));
-	sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+					    ((kctx_len +
+					     sizeof(chcr_req->key_ctx)) >> 4));
+	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
 
 	skb_set_transport_header(skb, transhdr_len);
 	if (param->bfr_len != 0)
-		write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
-					    param->bfr_len);
+		write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
+				    param->bfr_len);
 	if (param->sg_len != 0)
-		write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
 
-	create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response,
 		    0);
 	req_ctx->skb = skb;
 	skb_get(skb);
@@ -904,34 +883,41 @@ static int chcr_ahash_update(struct ahash_request *req)
 			return -EBUSY;
 	}
 
-	if (nbytes + req_ctx->bfr_len >= bs) {
-		remainder = (nbytes + req_ctx->bfr_len) % bs;
-		nbytes = nbytes + req_ctx->bfr_len - remainder;
+	if (nbytes + req_ctx->reqlen >= bs) {
+		remainder = (nbytes + req_ctx->reqlen) % bs;
+		nbytes = nbytes + req_ctx->reqlen - remainder;
 	} else {
-		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
-				   req_ctx->bfr_len, nbytes, 0);
-		req_ctx->bfr_len += nbytes;
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+				   + req_ctx->reqlen, nbytes, 0);
+		req_ctx->reqlen += nbytes;
 		return 0;
 	}
 
 	params.opad_needed = 0;
 	params.more = 1;
 	params.last = 0;
-	params.sg_len = nbytes - req_ctx->bfr_len;
-	params.bfr_len = req_ctx->bfr_len;
+	params.sg_len = nbytes - req_ctx->reqlen;
+	params.bfr_len = req_ctx->reqlen;
 	params.scmd1 = 0;
 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
 	req_ctx->result = 0;
 	req_ctx->data_len += params.sg_len + params.bfr_len;
-	skb = create_final_hash_wr(req, &params);
-	if (!skb)
-		return -ENOMEM;
+	skb = create_hash_wr(req, &params);
+
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
 
-	req_ctx->bfr_len = remainder;
-	if (remainder)
+	if (remainder) {
+		u8 *temp;
+		/* Swap buffers */
+		temp = req_ctx->reqbfr;
+		req_ctx->reqbfr = req_ctx->skbfr;
+		req_ctx->skbfr = temp;
 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
-				   req_ctx->bfr, remainder, req->nbytes -
+				   req_ctx->reqbfr, remainder, req->nbytes -
 				   remainder);
+	}
+	req_ctx->reqlen = remainder;
 	skb->dev = u_ctx->lldi.ports[0];
 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
 	chcr_send_wr(skb);
@@ -967,10 +953,10 @@ static int chcr_ahash_final(struct ahash_request *req)
 	params.sg_len = 0;
 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
 	req_ctx->result = 1;
-	params.bfr_len = req_ctx->bfr_len;
+	params.bfr_len = req_ctx->reqlen;
 	req_ctx->data_len += params.bfr_len + params.sg_len;
-	if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
-		create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+	if (req_ctx->reqlen == 0) {
+		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
 		params.last = 0;
 		params.more = 1;
 		params.scmd1 = 0;
@@ -981,7 +967,10 @@ static int chcr_ahash_final(struct ahash_request *req)
 		params.last = 1;
 		params.more = 0;
 	}
-	skb = create_final_hash_wr(req, &params);
+	skb = create_hash_wr(req, &params);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
 	skb->dev = u_ctx->lldi.ports[0];
 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
 	chcr_send_wr(skb);
@@ -1013,12 +1002,12 @@ static int chcr_ahash_finup(struct ahash_request *req)
 		params.opad_needed = 0;
 
 	params.sg_len = req->nbytes;
-	params.bfr_len = req_ctx->bfr_len;
+	params.bfr_len = req_ctx->reqlen;
 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
 	req_ctx->data_len += params.bfr_len + params.sg_len;
 	req_ctx->result = 1;
-	if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
-		create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+	if ((req_ctx->reqlen + req->nbytes) == 0) {
+		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
 		params.last = 0;
 		params.more = 1;
 		params.scmd1 = 0;
@@ -1029,9 +1018,10 @@ static int chcr_ahash_finup(struct ahash_request *req)
 		params.more = 0;
 	}
 
-	skb = create_final_hash_wr(req, &params);
-	if (!skb)
-		return -ENOMEM;
+	skb = create_hash_wr(req, &params);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
 	skb->dev = u_ctx->lldi.ports[0];
 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
 	chcr_send_wr(skb);
@@ -1073,15 +1063,15 @@ static int chcr_ahash_digest(struct ahash_request *req)
 	req_ctx->result = 1;
 	req_ctx->data_len += params.bfr_len + params.sg_len;
 
-	if (req_ctx->bfr && req->nbytes == 0) {
-		create_last_hash_block(req_ctx->bfr, bs, 0);
+	if (req->nbytes == 0) {
+		create_last_hash_block(req_ctx->reqbfr, bs, 0);
 		params.more = 1;
 		params.bfr_len = bs;
 	}
 
-	skb = create_final_hash_wr(req, &params);
-	if (!skb)
-		return -ENOMEM;
+	skb = create_hash_wr(req, &params);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
 
 	skb->dev = u_ctx->lldi.ports[0];
 	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
@@ -1094,12 +1084,12 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 	struct chcr_ahash_req_ctx *state = out;
 
-	state->bfr_len = req_ctx->bfr_len;
+	state->reqlen = req_ctx->reqlen;
 	state->data_len = req_ctx->data_len;
-	memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
 	memcpy(state->partial_hash, req_ctx->partial_hash,
 	       CHCR_HASH_MAX_DIGEST_SIZE);
-	return 0;
+		return 0;
 }
 
 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -1107,10 +1097,11 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
 
-	req_ctx->bfr_len = state->bfr_len;
+	req_ctx->reqlen = state->reqlen;
 	req_ctx->data_len = state->data_len;
-	req_ctx->dummy_payload_ptr = NULL;
-	memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+	req_ctx->reqbfr = req_ctx->bfr1;
+	req_ctx->skbfr = req_ctx->bfr2;
+	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
 	memcpy(req_ctx->partial_hash, state->partial_hash,
 	       CHCR_HASH_MAX_DIGEST_SIZE);
 	return 0;
@@ -1174,28 +1165,29 @@ static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 {
 	struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
-	int status = 0;
 	unsigned short context_size = 0;
 
-	if ((key_len == (AES_KEYSIZE_128 << 1)) ||
-	    (key_len == (AES_KEYSIZE_256 << 1))) {
-		memcpy(ablkctx->key, key, key_len);
-		ablkctx->enckey_len = key_len;
-		context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
-		ablkctx->key_ctx_hdr =
-			FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
-					 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
-					 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
-					 CHCR_KEYCTX_NO_KEY, 1,
-					 0, context_size);
-		ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
-	} else {
+	if ((key_len != (AES_KEYSIZE_128 << 1)) &&
+	    (key_len != (AES_KEYSIZE_256 << 1))) {
 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
 		ablkctx->enckey_len = 0;
-		status = -EINVAL;
+		return -EINVAL;
+
 	}
-	return status;
+
+	memcpy(ablkctx->key, key, key_len);
+	ablkctx->enckey_len = key_len;
+	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+	ablkctx->key_ctx_hdr =
+		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+				 CHCR_KEYCTX_NO_KEY, 1,
+				 0, context_size);
+	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+	return 0;
 }
 
 static int chcr_sha_init(struct ahash_request *areq)
@@ -1205,8 +1197,9 @@ static int chcr_sha_init(struct ahash_request *areq)
 	int digestsize =  crypto_ahash_digestsize(tfm);
 
 	req_ctx->data_len = 0;
-	req_ctx->dummy_payload_ptr = NULL;
-	req_ctx->bfr_len = 0;
+	req_ctx->reqlen = 0;
+	req_ctx->reqbfr = req_ctx->bfr1;
+	req_ctx->skbfr = req_ctx->bfr2;
 	req_ctx->skb = NULL;
 	req_ctx->result = 0;
 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index f34bc91..f2a5905 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -108,30 +108,24 @@
 #define IPAD_DATA 0x36363636
 #define OPAD_DATA 0x5c5c5c5c
 
-#define TRANSHDR_SIZE(alignedkctx_len)\
-	(sizeof(struct ulptx_idata) +\
-	 sizeof(struct ulp_txpkt) +\
-	 sizeof(struct fw_crypto_lookaside_wr) +\
-	 sizeof(struct cpl_tx_sec_pdu) +\
-	 (alignedkctx_len))
-#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \
-	(TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\
+#define TRANSHDR_SIZE(kctx_len)\
+	(sizeof(struct chcr_wr) +\
+	 kctx_len)
+#define CIPHER_TRANSHDR_SIZE(kctx_len, sge_pairs) \
+	(TRANSHDR_SIZE((kctx_len)) + (sge_pairs) +\
 	 sizeof(struct cpl_rx_phys_dsgl))
-#define HASH_TRANSHDR_SIZE(alignedkctx_len)\
-	(TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES)
+#define HASH_TRANSHDR_SIZE(kctx_len)\
+	(TRANSHDR_SIZE(kctx_len) + DUMMY_BYTES)
 
-#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \
-			sizeof(struct ulp_txpkt) + \
-			sizeof(struct ulptx_idata))
 
-#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst)      \
+#define FILL_SEC_CPL_OP_IVINSR(id, len, ofst)      \
 	htonl( \
 	       CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \
 	       CPL_TX_SEC_PDU_RXCHID_V((id)) | \
 	       CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \
 	       CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \
 	       CPL_TX_SEC_PDU_CPLLEN_V((len)) | \
-	       CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \
+	       CPL_TX_SEC_PDU_PLACEHOLDER_V(0) | \
 	       CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst)))
 
 #define  FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \
@@ -148,7 +142,7 @@
 		CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \
 		CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst)))
 
-#define  FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs)  \
+#define  FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size)  \
 		htonl( \
 		SCMD_SEQ_NO_CTRL_V(0) | \
 		SCMD_STATUS_PRESENT_V(0) | \
@@ -159,7 +153,7 @@
 		SCMD_AUTH_MODE_V((amode)) | \
 		SCMD_HMAC_CTRL_V((opad)) | \
 		SCMD_IV_SIZE_V((size)) | \
-		SCMD_NUM_IVS_V((nivs)))
+		SCMD_NUM_IVS_V(0))
 
 #define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \
 		SCMD_ENB_DBGID_V(0) | \
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 2a5c671..fc3cd77 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -52,9 +52,25 @@
 
 #define MAC_ERROR_BIT		0
 #define CHK_MAC_ERR_BIT(x)	(((x) >> MAC_ERROR_BIT) & 1)
+#define MAX_SALT                4
 
 struct uld_ctx;
 
+struct _key_ctx {
+	__be32 ctx_hdr;
+	u8 salt[MAX_SALT];
+	__be64 reserverd;
+	unsigned char key[0];
+};
+
+struct chcr_wr {
+	struct fw_crypto_lookaside_wr wreq;
+	struct ulp_txpkt ulptx;
+	struct ulptx_idata sc_imm;
+	struct cpl_tx_sec_pdu sec_cpl;
+	struct _key_ctx key_ctx;
+};
+
 struct chcr_dev {
 	/* Request submited to h/w and waiting for response. */
 	spinlock_t lock_chcr_dev;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d7d7560..7ed6d2b 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -118,12 +118,6 @@
 #define CHCR_HASH_MAX_BLOCK_SIZE_128 128
 
 /* Aligned to 128 bit boundary */
-struct _key_ctx {
-	__be32 ctx_hdr;
-	u8 salt[MAX_SALT];
-	__be64 reserverd;
-	unsigned char key[0];
-};
 
 struct ablk_ctx {
 	u8 enc;
@@ -131,8 +125,8 @@ struct ablk_ctx {
 	__be32 key_ctx_hdr;
 	unsigned int enckey_len;
 	unsigned int dst_nents;
-	struct scatterlist iv_sg;
 	u8 key[CHCR_AES_MAX_KEY_LEN];
+	u8 rrkey[AES_MAX_KEY_SIZE];
 	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
 	unsigned char ciph_mode;
 };
@@ -156,12 +150,14 @@ struct chcr_context {
 
 struct chcr_ahash_req_ctx {
 	u32 result;
-	char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128];
-	u8 bfr_len;
+	u8 bfr1[CHCR_HASH_MAX_BLOCK_SIZE_128];
+	u8 bfr2[CHCR_HASH_MAX_BLOCK_SIZE_128];
+	u8 *reqbfr;
+	u8 *skbfr;
+	u8 reqlen;
 	/* DMA the partial hash in it */
 	u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE];
 	u64 data_len;  /* Data len till time */
-	void *dummy_payload_ptr;
 	/* SKB which is being sent to the hardware for processing */
 	struct sk_buff *skb;
 };
-- 
1.8.2.3

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/6] chcr: Adjust Dest. buffer size
  2016-10-13 11:09 [PATCH 0/6] chcr: AEAD support and bug fixes Harsh Jain
  2016-10-13 11:09 ` [PATCH 1/6] chcr:Fix memory corruption done Harsh Jain
  2016-10-13 11:09 ` [PATCH 2/6] chcr: Remove malloc/free Harsh Jain
@ 2016-10-13 11:09 ` Harsh Jain
  2016-10-13 11:09 ` [PATCH 4/6] chcr: Use SHASH_DESC_ON_STACK Harsh Jain
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 16+ messages in thread
From: Harsh Jain @ 2016-10-13 11:09 UTC (permalink / raw)
  To: dan.carpenter, herbert, linux-crypto
  Cc: jlulla, atul.gupta, yeshaswi, hariprasad, Harsh Jain, Jitendra Lulla

Destination buffer size passed to hardware should not be greater
than crypto operation output.

Signed-off-by: Jitendra Lulla <JLULLA@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c | 50 +++++++++++++++-----------------------
 1 file changed, 20 insertions(+), 30 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index d5e0066..17d0c1f 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -335,25 +335,13 @@ static inline int is_hmac(struct crypto_tfm *tfm)
 	return 0;
 }
 
-static inline unsigned int ch_nents(struct scatterlist *sg,
-				    unsigned int *total_size)
-{
-	unsigned int nents;
-
-	for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
-		nents++;
-		*total_size += sg->length;
-	}
-	return nents;
-}
-
 static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
 			   struct scatterlist *sg,
 			   struct phys_sge_parm *sg_param)
 {
 	struct phys_sge_pairs *to;
-	unsigned int out_buf_size = sg_param->obsize;
-	unsigned int nents = sg_param->nents, i, j, tot_len = 0;
+	int out_buf_size = sg_param->obsize;
+	unsigned int nents = sg_param->nents, i, j = 0;
 
 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
@@ -371,25 +359,24 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
 				       sizeof(struct cpl_rx_phys_dsgl));
 
 	for (i = 0; nents; to++) {
-		for (j = i; (nents && (j < (8 + i))); j++, nents--) {
-			to->len[j] = htons(sg->length);
+		for (j = 0; j < 8 && nents; j++, nents--) {
+			out_buf_size -= sg_dma_len(sg);
+			to->len[j] = htons(sg_dma_len(sg));
 			to->addr[j] = cpu_to_be64(sg_dma_address(sg));
-			if (out_buf_size) {
-				if (tot_len + sg_dma_len(sg) >= out_buf_size) {
-					to->len[j] = htons(out_buf_size -
-							   tot_len);
-					return;
-				}
-				tot_len += sg_dma_len(sg);
-			}
 			sg = sg_next(sg);
 		}
 	}
+	if (out_buf_size) {
+		j--;
+		to--;
+		to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
+	}
 }
 
-static inline unsigned
-int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
-			 struct scatterlist *sg, struct phys_sge_parm *sg_param)
+static inline int map_writesg_phys_cpl(struct device *dev,
+					struct cpl_rx_phys_dsgl *phys_cpl,
+					struct scatterlist *sg,
+					struct phys_sge_parm *sg_param)
 {
 	if (!sg || !sg_param->nents)
 		return 0;
@@ -531,16 +518,19 @@ static struct sk_buff
 	struct cpl_rx_phys_dsgl *phys_cpl;
 	struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
 	struct phys_sge_parm sg_param;
-	unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
+	unsigned int frags = 0, transhdr_len, phys_dsgl;
 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 			GFP_ATOMIC;
 
 	if (!req->info)
 		return ERR_PTR(-EINVAL);
-	ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
+	ablkctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+	if (ablkctx->dst_nents <= 0) {
+		pr_err("AES:Invalid Destination sg lists\n");
+		return ERR_PTR(-EINVAL);
+	}
 	ablkctx->enc = op_type;
-
 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
 	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
-- 
1.8.2.3

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/6] chcr: Use SHASH_DESC_ON_STACK
  2016-10-13 11:09 [PATCH 0/6] chcr: AEAD support and bug fixes Harsh Jain
                   ` (2 preceding siblings ...)
  2016-10-13 11:09 ` [PATCH 3/6] chcr: Adjust Dest. buffer size Harsh Jain
@ 2016-10-13 11:09 ` Harsh Jain
  2016-10-13 11:09 ` [PATCH 5/6] chcr: Move tfm ctx variable to request context Harsh Jain
  2016-10-13 11:09 ` [PATCH 6/6] Add support for AEAD algos Harsh Jain
  5 siblings, 0 replies; 16+ messages in thread
From: Harsh Jain @ 2016-10-13 11:09 UTC (permalink / raw)
  To: dan.carpenter, herbert, linux-crypto
  Cc: jlulla, atul.gupta, yeshaswi, hariprasad, Harsh Jain

Use SHASH_DESC_ON_STACK macro to allocate memory for ipad/opad
calculation.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c   | 63 +++++++++++++++---------------------
 drivers/crypto/chelsio/chcr_crypto.h |  2 +-
 2 files changed, 27 insertions(+), 38 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 17d0c1f..7262bb3 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -228,40 +228,29 @@ static void get_aes_decrypt_key(unsigned char *dec_key,
 	}
 }
 
-static struct shash_desc *chcr_alloc_shash(unsigned int ds)
+static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
 {
 	struct crypto_shash *base_hash = NULL;
-	struct shash_desc *desc;
 
 	switch (ds) {
 	case SHA1_DIGEST_SIZE:
-		base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
+		base_hash = crypto_alloc_shash("sha1", 0, 0);
 		break;
 	case SHA224_DIGEST_SIZE:
-		base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
+		base_hash = crypto_alloc_shash("sha224", 0, 0);
 		break;
 	case SHA256_DIGEST_SIZE:
-		base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
+		base_hash = crypto_alloc_shash("sha256", 0, 0);
 		break;
 	case SHA384_DIGEST_SIZE:
-		base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
+		base_hash = crypto_alloc_shash("sha384", 0, 0);
 		break;
 	case SHA512_DIGEST_SIZE:
-		base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
+		base_hash = crypto_alloc_shash("sha512", 0, 0);
 		break;
 	}
-	if (IS_ERR(base_hash)) {
-		pr_err("Can not allocate sha-generic algo.\n");
-		return (void *)base_hash;
-	}
 
-	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
-		       GFP_KERNEL);
-	if (!desc)
-		return ERR_PTR(-ENOMEM);
-	desc->tfm = base_hash;
-	desc->flags = crypto_shash_get_flags(base_hash);
-	return desc;
+	return base_hash;
 }
 
 static int chcr_compute_partial_hash(struct shash_desc *desc,
@@ -770,6 +759,11 @@ static int get_alg_config(struct algo_param *params,
 	return 0;
 }
 
+static inline void chcr_free_shash(struct crypto_shash *base_hash)
+{
+		crypto_free_shash(base_hash);
+}
+
 /**
  *	create_hash_wr - Create hash work request
  *	@req - Cipher req base
@@ -1106,15 +1100,16 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 	unsigned int i, err = 0, updated_digestsize;
 
-	/*
-	 * use the key to calculate the ipad and opad. ipad will sent with the
+	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+
+	/* use the key to calculate the ipad and opad. ipad will sent with the
 	 * first request's data. opad will be sent with the final hash result
 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
 	 */
-	if (!hmacctx->desc)
-		return -EINVAL;
+	shash->tfm = hmacctx->base_hash;
+	shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
 	if (keylen > bs) {
-		err = crypto_shash_digest(hmacctx->desc, key, keylen,
+		err = crypto_shash_digest(shash, key, keylen,
 					  hmacctx->ipad);
 		if (err)
 			goto out;
@@ -1135,13 +1130,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 		updated_digestsize = SHA256_DIGEST_SIZE;
 	else if (digestsize == SHA384_DIGEST_SIZE)
 		updated_digestsize = SHA512_DIGEST_SIZE;
-	err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
+	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
 					hmacctx->ipad, digestsize);
 	if (err)
 		goto out;
 	chcr_change_order(hmacctx->ipad, updated_digestsize);
 
-	err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
+	err = chcr_compute_partial_hash(shash, hmacctx->opad,
 					hmacctx->opad, digestsize);
 	if (err)
 		goto out;
@@ -1237,26 +1232,20 @@ static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
 
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 				 sizeof(struct chcr_ahash_req_ctx));
-	hmacctx->desc = chcr_alloc_shash(digestsize);
-	if (IS_ERR(hmacctx->desc))
-		return PTR_ERR(hmacctx->desc);
+	hmacctx->base_hash = chcr_alloc_shash(digestsize);
+	if (IS_ERR(hmacctx->base_hash))
+		return PTR_ERR(hmacctx->base_hash);
 	return chcr_device_init(crypto_tfm_ctx(tfm));
 }
 
-static void chcr_free_shash(struct shash_desc *desc)
-{
-	crypto_free_shash(desc->tfm);
-	kfree(desc);
-}
-
 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
 {
 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
 
-	if (hmacctx->desc) {
-		chcr_free_shash(hmacctx->desc);
-		hmacctx->desc = NULL;
+	if (hmacctx->base_hash) {
+		chcr_free_shash(hmacctx->base_hash);
+		hmacctx->base_hash = NULL;
 	}
 }
 
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 7ed6d2b..977d205 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -132,7 +132,7 @@ struct ablk_ctx {
 };
 
 struct hmac_ctx {
-	struct shash_desc *desc;
+	struct crypto_shash *base_hash;
 	u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
 	u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
 };
-- 
1.8.2.3

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 5/6] chcr: Move tfm ctx variable to request context
  2016-10-13 11:09 [PATCH 0/6] chcr: AEAD support and bug fixes Harsh Jain
                   ` (3 preceding siblings ...)
  2016-10-13 11:09 ` [PATCH 4/6] chcr: Use SHASH_DESC_ON_STACK Harsh Jain
@ 2016-10-13 11:09 ` Harsh Jain
  2016-10-13 11:09 ` [PATCH 6/6] Add support for AEAD algos Harsh Jain
  5 siblings, 0 replies; 16+ messages in thread
From: Harsh Jain @ 2016-10-13 11:09 UTC (permalink / raw)
  To: dan.carpenter, herbert, linux-crypto
  Cc: jlulla, atul.gupta, yeshaswi, hariprasad, Harsh Jain

Move tfm ctx variable to request context.

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 drivers/crypto/chelsio/chcr_algo.c   | 26 +++++++++++++-------------
 drivers/crypto/chelsio/chcr_crypto.h |  9 ++++-----
 2 files changed, 17 insertions(+), 18 deletions(-)

diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 7262bb3..18385d6 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -119,7 +119,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 			       AES_BLOCK_SIZE);
 		}
 		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
-			     ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
+			     ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
 		if (ctx_req.ctx.ablk_ctx->skb) {
 			kfree_skb(ctx_req.ctx.ablk_ctx->skb);
 			ctx_req.ctx.ablk_ctx->skb = NULL;
@@ -138,8 +138,10 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 			updated_digestsize = SHA256_DIGEST_SIZE;
 		else if (digestsize == SHA384_DIGEST_SIZE)
 			updated_digestsize = SHA512_DIGEST_SIZE;
-		if (ctx_req.ctx.ahash_ctx->skb)
+		if (ctx_req.ctx.ahash_ctx->skb) {
+			kfree_skb(ctx_req.ctx.ahash_ctx->skb);
 			ctx_req.ctx.ahash_ctx->skb = NULL;
+		}
 		if (ctx_req.ctx.ahash_ctx->result == 1) {
 			ctx_req.ctx.ahash_ctx->result = 0;
 			memcpy(ctx_req.req.ahash_req->result, input +
@@ -318,8 +320,7 @@ static inline int is_hmac(struct crypto_tfm *tfm)
 	struct chcr_alg_template *chcr_crypto_alg =
 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
 			     alg.hash);
-	if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
-	    CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
 		return 1;
 	return 0;
 }
@@ -505,7 +506,7 @@ static struct sk_buff
 	struct sk_buff *skb = NULL;
 	struct chcr_wr *chcr_req;
 	struct cpl_rx_phys_dsgl *phys_cpl;
-	struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
 	struct phys_sge_parm sg_param;
 	unsigned int frags = 0, transhdr_len, phys_dsgl;
 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
@@ -514,12 +515,11 @@ static struct sk_buff
 
 	if (!req->info)
 		return ERR_PTR(-EINVAL);
-	ablkctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
-	if (ablkctx->dst_nents <= 0) {
+	reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+	if (reqctx->dst_nents <= 0) {
 		pr_err("AES:Invalid Destination sg lists\n");
 		return ERR_PTR(-EINVAL);
 	}
-	ablkctx->enc = op_type;
 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
 	    (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
@@ -527,7 +527,7 @@ static struct sk_buff
 		return ERR_PTR(-EINVAL);
 	}
 
-	phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
+	phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
 
 	kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
@@ -570,7 +570,7 @@ static struct sk_buff
 		}
 	}
 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
-	sg_param.nents = ablkctx->dst_nents;
+	sg_param.nents = reqctx->dst_nents;
 	sg_param.obsize = req->nbytes;
 	sg_param.qid = qid;
 	sg_param.align = 1;
@@ -579,11 +579,11 @@ static struct sk_buff
 		goto map_fail1;
 
 	skb_set_transport_header(skb, transhdr_len);
-	memcpy(ablkctx->iv, req->info, ivsize);
-	write_buffer_to_skb(skb, &frags, ablkctx->iv, ivsize);
+	memcpy(reqctx->iv, req->info, ivsize);
+	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
 	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
 	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, phys_dsgl);
-	req_ctx->skb = skb;
+	reqctx->skb = skb;
 	skb_get(skb);
 	return skb;
 map_fail1:
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 977d205..40a5182 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -120,17 +120,14 @@
 /* Aligned to 128 bit boundary */
 
 struct ablk_ctx {
-	u8 enc;
-	unsigned int processed_len;
 	__be32 key_ctx_hdr;
 	unsigned int enckey_len;
-	unsigned int dst_nents;
 	u8 key[CHCR_AES_MAX_KEY_LEN];
-	u8 rrkey[AES_MAX_KEY_SIZE];
-	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
 	unsigned char ciph_mode;
+	u8 rrkey[AES_MAX_KEY_SIZE];
 };
 
+
 struct hmac_ctx {
 	struct crypto_shash *base_hash;
 	u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
@@ -164,6 +161,8 @@ struct chcr_ahash_req_ctx {
 
 struct chcr_blkcipher_req_ctx {
 	struct sk_buff *skb;
+	unsigned int dst_nents;
+	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
 };
 
 struct chcr_alg_template {
-- 
1.8.2.3

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 6/6] Add support for AEAD algos.
  2016-10-13 11:09 [PATCH 0/6] chcr: AEAD support and bug fixes Harsh Jain
                   ` (4 preceding siblings ...)
  2016-10-13 11:09 ` [PATCH 5/6] chcr: Move tfm ctx variable to request context Harsh Jain
@ 2016-10-13 11:09 ` Harsh Jain
  2016-10-14 14:24   ` Stephan Mueller
  5 siblings, 1 reply; 16+ messages in thread
From: Harsh Jain @ 2016-10-13 11:09 UTC (permalink / raw)
  To: dan.carpenter, herbert, linux-crypto
  Cc: jlulla, atul.gupta, yeshaswi, hariprasad, Harsh Jain

Add support for following AEAD algos.
 GCM,CCM,RFC4106,RFC4309,authenc(hmac(shaXXX),cbc(aes)).

Signed-off-by: Harsh Jain <harsh@chelsio.com>
---
 drivers/crypto/chelsio/Kconfig       |    1 +
 drivers/crypto/chelsio/chcr_algo.c   | 1466 +++++++++++++++++++++++++++++++++-
 drivers/crypto/chelsio/chcr_algo.h   |   16 +-
 drivers/crypto/chelsio/chcr_core.c   |    8 +-
 drivers/crypto/chelsio/chcr_core.h   |    2 -
 drivers/crypto/chelsio/chcr_crypto.h |   90 ++-
 6 files changed, 1541 insertions(+), 42 deletions(-)

diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 4ce67fb..3e104f5 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,6 +4,7 @@ config CRYPTO_DEV_CHELSIO
 	select CRYPTO_SHA1
 	select CRYPTO_SHA256
 	select CRYPTO_SHA512
+	select CRYPTO_AUTHENC
 	---help---
 	  The Chelsio Crypto Co-processor driver for T6 adapters.
 
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 18385d6..cffc38f 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -54,6 +54,12 @@
 #include <crypto/algapi.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <crypto/internal/aead.h>
+#include <crypto/null.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/aead.h>
+#include <crypto/scatterwalk.h>
 #include <crypto/internal/hash.h>
 
 #include "t4fw_api.h"
@@ -62,6 +68,11 @@
 #include "chcr_algo.h"
 #include "chcr_crypto.h"
 
+static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+{
+	return ctx->crypto_ctx->aeadctx;
+}
+
 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
 {
 	return ctx->crypto_ctx->ablkctx;
@@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
 	return ctx->crypto_ctx->hmacctx;
 }
 
+static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+{
+	return gctx->ctx->gcm;
+}
+
+static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+{
+	return gctx->ctx->authenc;
+}
+
 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
 {
 	return ctx->dev->u_ctx;
@@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n)
 	return (3 * n) / 2 + (n & 1) + 2;
 }
 
+static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+{
+	u8 temp[SHA512_DIGEST_SIZE];
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	int authsize = crypto_aead_authsize(tfm);
+	struct cpl_fw6_pld *fw6_pld;
+	int cmp = 0;
+
+	fw6_pld = (struct cpl_fw6_pld *)input;
+	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+	} else {
+
+		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+				authsize, req->assoclen +
+				req->cryptlen - authsize);
+		cmp = memcmp(temp, (fw6_pld + 1), authsize);
+	}
+	if (cmp)
+		*err = -EBADMSG;
+	else
+		*err = 0;
+}
+
 /*
  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
  *	@req: crypto request
  */
 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
-		     int error_status)
+			 int err)
 {
 	struct crypto_tfm *tfm = req->tfm;
 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
@@ -109,11 +155,27 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 	unsigned int digestsize, updated_digestsize;
 
 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+	case CRYPTO_ALG_TYPE_AEAD:
+		ctx_req.req.aead_req = (struct aead_request *)req;
+		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
+		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
+		if (ctx_req.ctx.reqctx->skb) {
+			kfree_skb(ctx_req.ctx.reqctx->skb);
+			ctx_req.ctx.reqctx->skb = NULL;
+		}
+		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
+			chcr_verify_tag(ctx_req.req.aead_req, input,
+					&err);
+			ctx_req.ctx.reqctx->verify = VERIFY_HW;
+		}
+		break;
+
 	case CRYPTO_ALG_TYPE_BLKCIPHER:
 		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
 		ctx_req.ctx.ablk_ctx =
 			ablkcipher_request_ctx(ctx_req.req.ablk_req);
-		if (!error_status) {
+		if (!err) {
 			fw6_pld = (struct cpl_fw6_pld *)input;
 			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
 			       AES_BLOCK_SIZE);
@@ -154,7 +216,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
 		}
 		break;
 	}
-	return 0;
+	return err;
 }
 
 /*
@@ -380,6 +442,14 @@ static inline int map_writesg_phys_cpl(struct device *dev,
 	return 0;
 }
 
+static inline int get_aead_subtype(struct crypto_aead *aead)
+{
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct chcr_alg_template *chcr_crypto_alg =
+		container_of(alg, struct chcr_alg_template, alg.aead);
+	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+}
+
 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
 {
 	struct crypto_alg *alg = tfm->__crt_alg;
@@ -447,7 +517,8 @@ static inline void create_wreq(struct chcr_context *ctx,
 			       struct chcr_wr *chcr_req,
 			       void *req, struct sk_buff *skb,
 			       int kctx_len, int hash_sz,
-			       unsigned int phys_dsgl)
+			       int is_iv,
+			       unsigned int sc_len)
 {
 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
 	int iv_loc = IV_DSGL;
@@ -472,7 +543,7 @@ static inline void create_wreq(struct chcr_context *ctx,
 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
 	chcr_req->wreq.rx_chid_to_rx_q_id =
 		FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
-				(hash_sz) ? IV_NOP : iv_loc);
+				is_iv ? iv_loc : IV_NOP);
 
 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
@@ -481,10 +552,7 @@ static inline void create_wreq(struct chcr_context *ctx,
 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
 				   sizeof(chcr_req->key_ctx) +
-				   kctx_len +
-				  ((hash_sz) ? DUMMY_BYTES :
-				  (sizeof(struct cpl_rx_phys_dsgl) +
-				   phys_dsgl)) + immdatalen);
+				   kctx_len + sc_len + immdatalen);
 }
 
 /**
@@ -582,7 +650,8 @@ static struct sk_buff
 	memcpy(reqctx->iv, req->info, ivsize);
 	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
 	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
-	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, phys_dsgl);
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
 	reqctx->skb = skb;
 	skb_get(skb);
 	return skb;
@@ -706,11 +775,11 @@ static int chcr_device_init(struct chcr_context *ctx)
 		}
 		u_ctx = ULD_CTX(ctx);
 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
-		ctx->dev->tx_channel_id = 0;
 		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
 		rxq_idx += id % rxq_perchan;
 		spin_lock(&ctx->dev->lock_chcr_dev);
 		ctx->tx_channel_id = rxq_idx;
+		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
 		spin_unlock(&ctx->dev->lock_chcr_dev);
 	}
 out:
@@ -769,7 +838,7 @@ static inline void chcr_free_shash(struct crypto_shash *base_hash)
  *	@req - Cipher req base
  */
 static struct sk_buff *create_hash_wr(struct ahash_request *req,
-					    struct hash_wr_param *param)
+				      struct hash_wr_param *param)
 {
 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -840,8 +909,8 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
 	if (param->sg_len != 0)
 		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
 
-	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response,
-		    0);
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
+			DUMMY_BYTES);
 	req_ctx->skb = skb;
 	skb_get(skb);
 	return skb;
@@ -1249,6 +1318,1149 @@ static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
 	}
 }
 
+static int chcr_copy_assoc(struct aead_request *req,
+				struct chcr_aead_ctx *ctx)
+{
+	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+
+	skcipher_request_set_tfm(skreq, ctx->null);
+	skcipher_request_set_callback(skreq, aead_request_flags(req),
+			NULL, NULL);
+	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+			NULL);
+
+	return crypto_skcipher_encrypt(skreq);
+}
+
+static unsigned char get_hmac(unsigned int authsize)
+{
+	switch (authsize) {
+	case ICV_8:
+		return CHCR_SCMD_HMAC_CTRL_PL1;
+	case ICV_10:
+		return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+	case ICV_12:
+		return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+	}
+	return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+}
+
+
+static struct sk_buff *create_authenc_wr(struct aead_request *req,
+					 unsigned short qid,
+					 int size,
+					 unsigned short op_type)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_context *ctx = crypto_aead_ctx(tfm);
+	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	struct sk_buff *skb = NULL;
+	struct chcr_wr *chcr_req;
+	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct phys_sge_parm sg_param;
+	struct scatterlist *src, *dst;
+	struct scatterlist src_sg[2], dst_sg[2];
+	unsigned int frags = 0, transhdr_len;
+	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
+	unsigned int   kctx_len = 0;
+	unsigned short stop_offset = 0;
+	unsigned int  assoclen = req->assoclen;
+	unsigned int  authsize = crypto_aead_authsize(tfm);
+	int err = 0;
+	int null = 0;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
+
+	if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
+		goto err;
+	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+	dst = src;
+	if (req->src != req->dst) {
+		err = chcr_copy_assoc(req, aeadctx);
+		if (err)
+			return ERR_PTR(err);
+		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+	}
+	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+		null = 1;
+		assoclen = 0;
+	}
+	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+					     (op_type ? -authsize : authsize));
+	if (reqctx->dst_nents <= 0) {
+		pr_err("AUTHENC:Invalid Destination sg entries\n");
+		goto err;
+	}
+	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+		- sizeof(chcr_req->key_ctx);
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+	if (!skb)
+		goto err;
+
+	/* LLD is going to write the sge hdr. */
+	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+	/* Write WR */
+	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+	memset(chcr_req, 0, transhdr_len);
+
+	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+
+	/*
+	 * Input order	is AAD,IV and Payload. where IV should be included as
+	 * the part of authdata. All other fields should be filled according
+	 * to the hardware spec
+	 */
+	chcr_req->sec_cpl.op_ivinsrtofst =
+		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
+				       (ivsize ? (assoclen + 1) : 0));
+	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+					assoclen ? 1 : 0, assoclen,
+					assoclen + ivsize + 1,
+					(stop_offset & 0x1F0) >> 4);
+	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+					stop_offset & 0xF,
+					null ? 0 : assoclen + ivsize + 1,
+					stop_offset, stop_offset);
+	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
+					CHCR_SCMD_CIPHER_MODE_AES_CBC,
+					actx->auth_mode, aeadctx->hmac_ctrl,
+					ivsize >> 1);
+	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+					 0, 1, dst_size);
+
+	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+	if (op_type == CHCR_ENCRYPT_OP)
+		memcpy(chcr_req->key_ctx.key, aeadctx->key,
+		       aeadctx->enckey_len);
+	else
+		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+		       aeadctx->enckey_len);
+
+	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
+					4), actx->h_iopad, kctx_len -
+				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	sg_param.nents = reqctx->dst_nents;
+	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+	sg_param.qid = qid;
+	sg_param.align = 0;
+	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+				  &sg_param))
+		goto dstmap_fail;
+
+	skb_set_transport_header(skb, transhdr_len);
+
+	if (assoclen) {
+		/* AAD buffer in */
+		write_sg_to_skb(skb, &frags, req->src, assoclen);
+
+	}
+	write_buffer_to_skb(skb, &frags, req->iv, ivsize);
+	write_sg_to_skb(skb, &frags, src, req->cryptlen);
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+		   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+	reqctx->skb = skb;
+	skb_get(skb);
+
+	return skb;
+dstmap_fail:
+	/* ivmap_fail: */
+	kfree_skb(skb);
+err:
+	return ERR_PTR(-EINVAL);
+}
+
+static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
+				  unsigned short offset)
+{
+	struct page *spage;
+	unsigned char *addr;
+
+	spage = sg_page(sg);
+	get_page(spage); /* so that it is not freed by NIC */
+#ifdef KMAP_ATOMIC_ARGS
+	addr = kmap_atomic(spage, KM_SOFTIRQ0);
+#else
+	addr = kmap_atomic(spage);
+#endif
+	memset(addr + sg->offset, 0, offset + 1);
+
+	kunmap_atomic(addr);
+}
+
+static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (unsigned int)(1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+static void generate_b0(struct aead_request *req,
+			struct chcr_aead_ctx *aeadctx,
+			unsigned short op_type)
+{
+	unsigned int l, lp, m;
+	int rc;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	u8 *b0 = reqctx->scratch_pad;
+
+	m = crypto_aead_authsize(aead);
+
+	memcpy(b0, reqctx->iv, 16);
+
+	lp = b0[0];
+	l = lp + 1;
+
+	/* set m, bits 3-5 */
+	*b0 |= (8 * ((m - 2) / 2));
+
+	/* set adata, bit 6, if associated data is used */
+	if (req->assoclen)
+		*b0 |= 64;
+	rc = set_msg_len(b0 + 16 - l,
+			 (op_type == CHCR_DECRYPT_OP) ?
+			 req->cryptlen - m : req->cryptlen, l);
+}
+
+static inline int crypto_ccm_check_iv(const u8 *iv)
+{
+	/* 2 <= L <= 8, so 1 <= L' <= 7. */
+	if (iv[0] < 1 || iv[0] > 7)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int ccm_format_packet(struct aead_request *req,
+			     struct chcr_aead_ctx *aeadctx,
+			     unsigned int sub_type,
+			     unsigned short op_type)
+{
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	int rc = 0;
+
+	if (req->assoclen > T5_MAX_AAD_SIZE) {
+		pr_err("CCM: Unsupported AAD data. It should be < %d\n",
+		       T5_MAX_AAD_SIZE);
+		return -EINVAL;
+	}
+	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+		reqctx->iv[0] = 3;
+		memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+		memcpy(reqctx->iv + 4, req->iv, 8);
+		memset(reqctx->iv + 12, 0, 4);
+		*((unsigned short *)(reqctx->scratch_pad + 16)) =
+			htons(req->assoclen - 8);
+	} else {
+		memcpy(reqctx->iv, req->iv, 16);
+		*((unsigned short *)(reqctx->scratch_pad + 16)) =
+			htons(req->assoclen);
+	}
+	generate_b0(req, aeadctx, op_type);
+	/* zero the ctr value */
+	memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+	return rc;
+}
+
+static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+				  unsigned int dst_size,
+				  struct aead_request *req,
+				  unsigned short op_type,
+					  struct chcr_context *chcrctx)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	unsigned int ivsize = AES_BLOCK_SIZE;
+	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+	unsigned int c_id = chcrctx->dev->tx_channel_id;
+	unsigned int ccm_xtra;
+	unsigned char tag_offset = 0, auth_offset = 0;
+	unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
+	unsigned int assoclen;
+
+	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+		assoclen = req->assoclen - 8;
+	else
+		assoclen = req->assoclen;
+	ccm_xtra = CCM_B0_SIZE +
+		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+	auth_offset = req->cryptlen ?
+		(assoclen + ivsize + 1 + ccm_xtra) : 0;
+	if (op_type == CHCR_DECRYPT_OP) {
+		if (crypto_aead_authsize(tfm) != req->cryptlen)
+			tag_offset = crypto_aead_authsize(tfm);
+		else
+			auth_offset = 0;
+	}
+
+
+	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+					 2, (ivsize ?  (assoclen + 1) :  0) +
+					 ccm_xtra);
+	sec_cpl->pldlen =
+		htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+	/* For CCM there wil be b0 always. So AAD start will be 1 always */
+	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+					1, assoclen + ccm_xtra, assoclen
+					+ ivsize + 1 + ccm_xtra, 0);
+
+	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+					auth_offset, tag_offset,
+					(op_type == CHCR_ENCRYPT_OP) ? 0 :
+					crypto_aead_authsize(tfm));
+	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+					cipher_mode, mac_mode, hmac_ctrl,
+					ivsize >> 1);
+
+	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+					1, dst_size);
+}
+
+int aead_ccm_validate_input(unsigned short op_type,
+			    struct aead_request *req,
+			    struct chcr_aead_ctx *aeadctx,
+			    unsigned int sub_type)
+{
+	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+		if (crypto_ccm_check_iv(req->iv)) {
+			pr_err("CCM: IV check fails\n");
+			return -EINVAL;
+		}
+	} else {
+		if (req->assoclen != 16 && req->assoclen != 20) {
+			pr_err("RFC4309: Invalid AAD length %d\n",
+			       req->assoclen);
+			return -EINVAL;
+		}
+	}
+	if (aeadctx->enckey_len == 0) {
+		pr_err("CCM: Encryption key not set\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+unsigned int fill_aead_req_fields(struct sk_buff *skb,
+				  struct aead_request *req,
+				  struct scatterlist *src,
+				  unsigned int ivsize,
+				  struct chcr_aead_ctx *aeadctx)
+{
+	unsigned int frags = 0;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	/* b0 and aad length(if available) */
+
+	write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
+				(req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
+	if (req->assoclen) {
+		if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+			write_sg_to_skb(skb, &frags, req->src,
+					req->assoclen - 8);
+		else
+			write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+	}
+	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+	if (req->cryptlen)
+		write_sg_to_skb(skb, &frags, src, req->cryptlen);
+
+	return frags;
+}
+
+static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+					  unsigned short qid,
+					  int size,
+					  unsigned short op_type)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_context *ctx = crypto_aead_ctx(tfm);
+	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	struct sk_buff *skb = NULL;
+	struct chcr_wr *chcr_req;
+	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct phys_sge_parm sg_param;
+	struct scatterlist *src, *dst;
+	struct scatterlist src_sg[2], dst_sg[2];
+	unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
+	unsigned int dst_size = 0, kctx_len;
+	unsigned int sub_type;
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	int err = 0;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
+
+	sub_type = get_aead_subtype(tfm);
+	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+	dst = src;
+	if (req->src != req->dst) {
+		err = chcr_copy_assoc(req, aeadctx);
+		if (err) {
+			pr_err("AAD copy to destination buffer fails\n");
+			return ERR_PTR(err);
+		}
+		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+	}
+	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+					     (op_type ? -authsize : authsize));
+	if (reqctx->dst_nents <= 0) {
+		pr_err("CCM:Invalid Destination sg entries\n");
+		goto err;
+	}
+
+
+	if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+		goto err;
+
+	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
+
+	if (!skb)
+		goto err;
+
+	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+	memset(chcr_req, 0, transhdr_len);
+
+	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+
+	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+					16), aeadctx->key, aeadctx->enckey_len);
+
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	if (ccm_format_packet(req, aeadctx, sub_type, op_type))
+		goto dstmap_fail;
+
+	sg_param.nents = reqctx->dst_nents;
+	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+	sg_param.qid = qid;
+	sg_param.align = 0;
+	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+				  &sg_param))
+		goto dstmap_fail;
+
+	skb_set_transport_header(skb, transhdr_len);
+	frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+		    sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+	reqctx->skb = skb;
+	skb_get(skb);
+	return skb;
+dstmap_fail:
+	kfree_skb(skb);
+	skb = NULL;
+err:
+	return ERR_PTR(-EINVAL);
+}
+
+static struct sk_buff *create_gcm_wr(struct aead_request *req,
+				     unsigned short qid,
+				     int size,
+				     unsigned short op_type)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_context *ctx = crypto_aead_ctx(tfm);
+	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+	struct sk_buff *skb = NULL;
+	struct chcr_wr *chcr_req;
+	struct cpl_rx_phys_dsgl *phys_cpl;
+	struct phys_sge_parm sg_param;
+	struct scatterlist *src, *dst;
+	struct scatterlist src_sg[2], dst_sg[2];
+	unsigned int frags = 0, transhdr_len;
+	unsigned int ivsize = AES_BLOCK_SIZE;
+	unsigned int dst_size = 0, kctx_len;
+	unsigned char tag_offset = 0;
+	unsigned int crypt_len = 0;
+	unsigned int authsize = crypto_aead_authsize(tfm);
+	unsigned char hmac_ctrl = get_hmac(authsize);
+	int err = 0;
+	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+		GFP_ATOMIC;
+
+	/* validate key size */
+	if (aeadctx->enckey_len == 0)
+		goto err;
+
+	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+	dst = src;
+	if (req->src != req->dst) {
+		err = chcr_copy_assoc(req, aeadctx);
+		if (err)
+			return	ERR_PTR(err);
+		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+	}
+
+	if (!req->cryptlen)
+		/* null-payload is not supported in the hardware.
+		* software is sending block size
+		*/
+		crypt_len = AES_BLOCK_SIZE;
+	else
+		crypt_len = req->cryptlen;
+	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+					     (op_type ? -authsize : authsize));
+	if (reqctx->dst_nents <= 0) {
+		pr_err("GCM:Invalid Destination sg entries\n");
+		goto err;
+	}
+
+
+	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+	kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
+		AEAD_H_SIZE;
+	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+	if (!skb)
+		goto err;
+
+	/* NIC driver is going to write the sge hdr. */
+	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+
+	chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+	memset(chcr_req, 0, transhdr_len);
+
+	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+		req->assoclen -= 8;
+
+	tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+					ctx->dev->tx_channel_id, 2, (ivsize ?
+					(req->assoclen + 1) : 0));
+	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
+	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+					req->assoclen ? 1 : 0, req->assoclen,
+					req->assoclen + ivsize + 1, 0);
+	if (req->cryptlen) {
+		chcr_req->sec_cpl.cipherstop_lo_authinsert =
+			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+						tag_offset, tag_offset);
+		chcr_req->sec_cpl.seqno_numivs =
+			FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
+					CHCR_ENCRYPT_OP) ? 1 : 0,
+					CHCR_SCMD_CIPHER_MODE_AES_GCM,
+					CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
+					ivsize >> 1);
+	} else {
+		chcr_req->sec_cpl.cipherstop_lo_authinsert =
+			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+		chcr_req->sec_cpl.seqno_numivs =
+			FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+					(op_type ==  CHCR_ENCRYPT_OP) ?
+					1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
+					0, 0, ivsize >> 1);
+	}
+	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+					0, 1, dst_size);
+	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+				16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
+	/* prepare a 16 byte iv */
+	/* S   A   L  T |  IV | 0x00000001 */
+	if (get_aead_subtype(tfm) ==
+	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+		memcpy(reqctx->iv, aeadctx->salt, 4);
+		memcpy(reqctx->iv + 4, req->iv, 8);
+	} else {
+		memcpy(reqctx->iv, req->iv, 12);
+	}
+	*((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+
+	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+	sg_param.nents = reqctx->dst_nents;
+	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+	sg_param.qid = qid;
+	sg_param.align = 0;
+	if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+				  &sg_param))
+		goto dstmap_fail;
+
+	skb_set_transport_header(skb, transhdr_len);
+
+	write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+
+	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+
+	if (req->cryptlen) {
+		write_sg_to_skb(skb, &frags, src, req->cryptlen);
+	} else {
+		aes_gcm_empty_pld_pad(req->dst, authsize - 1);
+		write_sg_to_skb(skb, &frags, dst, crypt_len);
+	}
+
+	create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+			sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+	reqctx->skb = skb;
+	skb_get(skb);
+	return skb;
+
+dstmap_fail:
+	/* ivmap_fail: */
+	kfree_skb(skb);
+	skb = NULL;
+err:
+	return skb;
+}
+
+
+
+static int chcr_aead_cra_init(struct crypto_aead *tfm)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(tfm);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
+	aeadctx->null = crypto_get_default_null_skcipher();
+	if (IS_ERR(aeadctx->null))
+		return PTR_ERR(aeadctx->null);
+	return chcr_device_init(ctx);
+}
+
+static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+{
+	crypto_put_default_null_skcipher();
+}
+
+static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+					unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+	aeadctx->mayverify = VERIFY_HW;
+	return 0;
+}
+static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+				    unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+	u32 maxauth = crypto_aead_maxauthsize(tfm);
+
+	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+	 * true for sha1. authsize == 12 condition should be before
+	 * authsize == (maxauth >> 1)
+	 */
+	if (authsize == ICV_4) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_6) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_10) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_12) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == ICV_14) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == (maxauth >> 1)) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+	} else if (authsize == maxauth) {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+	} else {
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_SW;
+	}
+	return 0;
+}
+
+
+static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+	switch (authsize) {
+	case ICV_4:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_8:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_12:
+		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		 aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_14:
+		 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+		 aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_16:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_13:
+	case ICV_15:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_SW;
+		break;
+	default:
+
+		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
+			CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+					  unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+	switch (authsize) {
+	case ICV_8:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_12:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_16:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	default:
+		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+				unsigned int authsize)
+{
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+
+	switch (authsize) {
+	case ICV_4:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_6:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_8:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_10:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_12:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_14:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	case ICV_16:
+		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+		aeadctx->mayverify = VERIFY_HW;
+		break;
+	default:
+		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+				const u8 *key,
+				unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(aead);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	unsigned char ck_size, mk_size;
+	int key_ctx_size = 0;
+
+	memcpy(aeadctx->key, key, keylen);
+	aeadctx->enckey_len = keylen;
+	key_ctx_size = sizeof(struct _key_ctx) +
+		((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
+	if (keylen == AES_KEYSIZE_128) {
+		mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+	} else if (keylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+	} else {
+		crypto_tfm_set_flags((struct crypto_tfm *)aead,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		aeadctx->enckey_len = 0;
+		return	-EINVAL;
+	}
+	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+						key_ctx_size >> 4);
+	return 0;
+}
+
+static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+				    unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(aead);
+	 struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+
+	if (keylen < 3) {
+		crypto_tfm_set_flags((struct crypto_tfm *)aead,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		aeadctx->enckey_len = 0;
+		return	-EINVAL;
+	}
+	keylen -= 3;
+	memcpy(aeadctx->salt, key + keylen, 3);
+	return chcr_aead_ccm_setkey(aead, key, keylen);
+}
+
+static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+			   unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(aead);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+	struct blkcipher_desc h_desc;
+	struct scatterlist src[1];
+	unsigned int ck_size;
+	int ret = 0, key_ctx_size = 0;
+
+	if (get_aead_subtype(aead) ==
+	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
+		memcpy(aeadctx->salt, key + keylen, 4);
+	}
+	if (keylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	} else if (keylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	} else {
+		crypto_tfm_set_flags((struct crypto_tfm *)aead,
+				     CRYPTO_TFM_RES_BAD_KEY_LEN);
+		aeadctx->enckey_len = 0;
+		pr_err("GCM: Invalid key length %d", keylen);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memcpy(aeadctx->key, key, keylen);
+	aeadctx->enckey_len = keylen;
+	key_ctx_size = sizeof(struct _key_ctx) +
+		((DIV_ROUND_UP(keylen, 16)) << 4) +
+		AEAD_H_SIZE;
+		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+						CHCR_KEYCTX_MAC_KEY_SIZE_128,
+						0, 0,
+						key_ctx_size >> 4);
+	/* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
+	* blkcipher It will go on key context
+	*/
+	h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
+	if (IS_ERR(h_desc.tfm)) {
+		aeadctx->enckey_len = 0;
+		ret = -ENOMEM;
+		goto out;
+	}
+	h_desc.flags = 0;
+	ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
+	if (ret) {
+		aeadctx->enckey_len = 0;
+		goto out1;
+	}
+	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+	sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
+	ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
+
+out1:
+	crypto_free_blkcipher(h_desc.tfm);
+out:
+	return ret;
+}
+
+static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+				   unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(authenc);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+	/* it contains auth and cipher key both*/
+	struct crypto_authenc_keys keys;
+	unsigned int bs;
+	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+	int err = 0, i, key_ctx_len = 0;
+	unsigned char ck_size = 0;
+	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+	struct crypto_shash *base_hash = NULL;
+	struct algo_param param;
+	int align;
+	u8 *o_ptr = NULL;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		goto out;
+	}
+
+	if (get_alg_config(&param, max_authsize)) {
+		pr_err("chcr : Unsupported digest size\n");
+		goto out;
+	}
+	if (keys.enckeylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keys.enckeylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	} else if (keys.enckeylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	} else {
+		pr_err("chcr : Unsupported cipher key\n");
+		goto out;
+	}
+
+	/* Copy only encryption key. We use authkey to generate h(ipad) and
+	 * h(opad) so authkey is not needed again. authkeylen size have the
+	 * size of the hash digest size.
+	 */
+	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+	aeadctx->enckey_len = keys.enckeylen;
+	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+			    aeadctx->enckey_len << 3);
+
+	base_hash  = chcr_alloc_shash(max_authsize);
+	if (IS_ERR(base_hash)) {
+		pr_err("chcr : Base driver cannot be loaded\n");
+		goto out;
+	}
+	{
+		SHASH_DESC_ON_STACK(shash, base_hash);
+		shash->tfm = base_hash;
+		shash->flags = crypto_shash_get_flags(base_hash);
+		bs = crypto_shash_blocksize(base_hash);
+		align = KEYCTX_ALIGN_PAD(max_authsize);
+		o_ptr =  actx->h_iopad + param.result_size + align;
+
+		if (keys.authkeylen > bs) {
+			err = crypto_shash_digest(shash, keys.authkey,
+						  keys.authkeylen,
+						  o_ptr);
+			if (err) {
+				pr_err("chcr : Base driver cannot be loaded\n");
+				goto out;
+			}
+			keys.authkeylen = max_authsize;
+		} else
+			memcpy(o_ptr, keys.authkey, keys.authkeylen);
+
+		/* Compute the ipad-digest*/
+		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+		memcpy(pad, o_ptr, keys.authkeylen);
+		for (i = 0; i < bs >> 2; i++)
+			*((unsigned int *)pad + i) ^= IPAD_DATA;
+
+		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+					      max_authsize))
+			goto out;
+		/* Compute the opad-digest */
+		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+		memcpy(pad, o_ptr, keys.authkeylen);
+		for (i = 0; i < bs >> 2; i++)
+			*((unsigned int *)pad + i) ^= OPAD_DATA;
+
+		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+			goto out;
+
+		/* convert the ipad and opad digest to network order */
+		chcr_change_order(actx->h_iopad, param.result_size);
+		chcr_change_order(o_ptr, param.result_size);
+		key_ctx_len = sizeof(struct _key_ctx) +
+			((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+			(param.result_size + align) * 2;
+		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+						0, 1, key_ctx_len >> 4);
+		actx->auth_mode = param.auth_mode;
+		chcr_free_shash(base_hash);
+
+		return 0;
+	}
+out:
+	aeadctx->enckey_len = 0;
+	if (base_hash)
+		chcr_free_shash(base_hash);
+	return -EINVAL;
+}
+
+static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+					const u8 *key, unsigned int keylen)
+{
+	struct chcr_context *ctx = crypto_aead_ctx(authenc);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+	struct crypto_authenc_keys keys;
+
+	/* it contains auth and cipher key both*/
+	int key_ctx_len = 0;
+	unsigned char ck_size = 0;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		goto out;
+	}
+	if (keys.enckeylen == AES_KEYSIZE_128) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+	} else if (keys.enckeylen == AES_KEYSIZE_192) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+	} else if (keys.enckeylen == AES_KEYSIZE_256) {
+		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+	} else {
+		pr_err("chcr : Unsupported cipher key\n");
+		goto out;
+	}
+	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+	aeadctx->enckey_len = keys.enckeylen;
+	get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+				    aeadctx->enckey_len << 3);
+	key_ctx_len =  sizeof(struct _key_ctx)
+		+ ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+
+	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+						0, key_ctx_len >> 4);
+	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+	return 0;
+out:
+	aeadctx->enckey_len = 0;
+	return -EINVAL;
+}
+static int chcr_aead_encrypt(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+
+	reqctx->verify = VERIFY_HW;
+
+	switch (get_aead_subtype(tfm)) {
+	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+				    create_authenc_wr);
+	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+				    create_aead_ccm_wr);
+	default:
+		return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+				    create_gcm_wr);
+	}
+}
+
+static int chcr_aead_decrypt(struct aead_request *req)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+	int size;
+
+	if (aeadctx->mayverify == VERIFY_SW) {
+		size = crypto_aead_maxauthsize(tfm);
+		reqctx->verify = VERIFY_SW;
+	} else {
+		size = 0;
+		reqctx->verify = VERIFY_HW;
+	}
+
+	switch (get_aead_subtype(tfm)) {
+	case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+	case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+				    create_authenc_wr);
+	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+				    create_aead_ccm_wr);
+	default:
+		return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+				    create_gcm_wr);
+	}
+}
+
+static int chcr_aead_op(struct aead_request *req,
+			  unsigned short op_type,
+			  int size,
+			  create_wr_t create_wr_fn)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chcr_context *ctx = crypto_aead_ctx(tfm);
+	struct uld_ctx *u_ctx = ULD_CTX(ctx);
+	struct sk_buff *skb;
+
+	if (ctx && !ctx->dev) {
+		pr_err("chcr : %s : No crypto device.\n", __func__);
+		return -ENXIO;
+	}
+	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+				   ctx->tx_channel_id)) {
+		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+			return -EBUSY;
+	}
+
+	/* Form a WR from req */
+	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
+			   op_type);
+
+	if (IS_ERR(skb) || skb == NULL) {
+		pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
+		return PTR_ERR(skb);
+	}
+
+	skb->dev = u_ctx->lldi.ports[0];
+	set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+	chcr_send_wr(skb);
+	return -EINPROGRESS;
+}
 static struct chcr_alg_template driver_algs[] = {
 	/* AES-CBC */
 	{
@@ -1256,7 +2468,7 @@ static struct chcr_alg_template driver_algs[] = {
 		.is_registered = 0,
 		.alg.crypto = {
 			.cra_name		= "cbc(aes)",
-			.cra_driver_name	= "cbc(aes-chcr)",
+			.cra_driver_name	= "cbc-aes-chcr",
 			.cra_priority		= CHCR_CRA_PRIORITY,
 			.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
 				CRYPTO_ALG_ASYNC,
@@ -1283,7 +2495,7 @@ static struct chcr_alg_template driver_algs[] = {
 		.is_registered = 0,
 		.alg.crypto =   {
 			.cra_name		= "xts(aes)",
-			.cra_driver_name	= "xts(aes-chcr)",
+			.cra_driver_name	= "xts-aes-chcr",
 			.cra_priority		= CHCR_CRA_PRIORITY,
 			.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER |
 				CRYPTO_ALG_ASYNC,
@@ -1376,7 +2588,7 @@ static struct chcr_alg_template driver_algs[] = {
 			.halg.digestsize = SHA1_DIGEST_SIZE,
 			.halg.base = {
 				.cra_name = "hmac(sha1)",
-				.cra_driver_name = "hmac(sha1-chcr)",
+				.cra_driver_name = "hmac-sha1-chcr",
 				.cra_blocksize = SHA1_BLOCK_SIZE,
 			}
 		}
@@ -1388,7 +2600,7 @@ static struct chcr_alg_template driver_algs[] = {
 			.halg.digestsize = SHA224_DIGEST_SIZE,
 			.halg.base = {
 				.cra_name = "hmac(sha224)",
-				.cra_driver_name = "hmac(sha224-chcr)",
+				.cra_driver_name = "hmac-sha224-chcr",
 				.cra_blocksize = SHA224_BLOCK_SIZE,
 			}
 		}
@@ -1400,7 +2612,7 @@ static struct chcr_alg_template driver_algs[] = {
 			.halg.digestsize = SHA256_DIGEST_SIZE,
 			.halg.base = {
 				.cra_name = "hmac(sha256)",
-				.cra_driver_name = "hmac(sha256-chcr)",
+				.cra_driver_name = "hmac-sha256-chcr",
 				.cra_blocksize = SHA256_BLOCK_SIZE,
 			}
 		}
@@ -1412,7 +2624,7 @@ static struct chcr_alg_template driver_algs[] = {
 			.halg.digestsize = SHA384_DIGEST_SIZE,
 			.halg.base = {
 				.cra_name = "hmac(sha384)",
-				.cra_driver_name = "hmac(sha384-chcr)",
+				.cra_driver_name = "hmac-sha384-chcr",
 				.cra_blocksize = SHA384_BLOCK_SIZE,
 			}
 		}
@@ -1424,11 +2636,205 @@ static struct chcr_alg_template driver_algs[] = {
 			.halg.digestsize = SHA512_DIGEST_SIZE,
 			.halg.base = {
 				.cra_name = "hmac(sha512)",
-				.cra_driver_name = "hmac(sha512-chcr)",
+				.cra_driver_name = "hmac-sha512-chcr",
 				.cra_blocksize = SHA512_BLOCK_SIZE,
 			}
 		}
 	},
+	/* Add AEAD Algorithms */
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "gcm(aes)",
+				.cra_driver_name = "gcm-aes-chcr",
+				.cra_blocksize	= 1,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_gcm_ctx),
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = GHASH_DIGEST_SIZE,
+			.setkey = chcr_gcm_setkey,
+			.setauthsize = chcr_gcm_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "rfc4106(gcm(aes))",
+				.cra_driver_name = "rfc4106-gcm-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_gcm_ctx),
+
+			},
+			.ivsize = 8,
+			.maxauthsize	= GHASH_DIGEST_SIZE,
+			.setkey = chcr_gcm_setkey,
+			.setauthsize	= chcr_4106_4309_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "ccm(aes)",
+				.cra_driver_name = "ccm-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize	= GHASH_DIGEST_SIZE,
+			.setkey = chcr_aead_ccm_setkey,
+			.setauthsize	= chcr_ccm_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "rfc4309(ccm(aes))",
+				.cra_driver_name = "rfc4309-ccm-aes-chcr",
+				.cra_blocksize	 = 1,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx),
+
+			},
+			.ivsize = 8,
+			.maxauthsize	= GHASH_DIGEST_SIZE,
+			.setkey = chcr_aead_rfc4309_setkey,
+			.setauthsize = chcr_4106_4309_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha1-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha256-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize	= SHA256_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha224-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha384-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name =
+					"authenc-hmac-sha512-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+			.setkey = chcr_authenc_setkey,
+			.setauthsize = chcr_authenc_setauthsize,
+		}
+	},
+	{
+		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+		.is_registered = 0,
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(digest_null,cbc(aes))",
+				.cra_driver_name =
+					"authenc-digest_null-cbc-aes-chcr",
+				.cra_blocksize	 = AES_BLOCK_SIZE,
+				.cra_ctxsize =	sizeof(struct chcr_context) +
+						sizeof(struct chcr_aead_ctx) +
+						sizeof(struct chcr_authenc_ctx),
+
+			},
+			.ivsize  = AES_BLOCK_SIZE,
+			.maxauthsize = 0,
+			.setkey  = chcr_aead_digest_null_setkey,
+			.setauthsize = chcr_authenc_null_setauthsize,
+		}
+	},
 };
 
 /*
@@ -1446,6 +2852,11 @@ static int chcr_unregister_alg(void)
 				crypto_unregister_alg(
 						&driver_algs[i].alg.crypto);
 			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			if (driver_algs[i].is_registered)
+				crypto_unregister_aead(
+						&driver_algs[i].alg.aead);
+			break;
 		case CRYPTO_ALG_TYPE_AHASH:
 			if (driver_algs[i].is_registered)
 				crypto_unregister_ahash(
@@ -1480,6 +2891,19 @@ static int chcr_register_alg(void)
 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
 			name = driver_algs[i].alg.crypto.cra_driver_name;
 			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			driver_algs[i].alg.aead.base.cra_priority =
+				CHCR_CRA_PRIORITY;
+			driver_algs[i].alg.aead.base.cra_flags =
+				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+			err = crypto_register_aead(&driver_algs[i].alg.aead);
+			name = driver_algs[i].alg.aead.base.cra_driver_name;
+			break;
 		case CRYPTO_ALG_TYPE_AHASH:
 			a_hash = &driver_algs[i].alg.hash;
 			a_hash->update = chcr_ahash_update;
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index f2a5905..3c7c51f 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -258,13 +258,15 @@ enum {
  * where they indicate the size of the integrity check value (ICV)
  */
 enum {
-	AES_CCM_ICV_4   = 4,
-	AES_CCM_ICV_6   = 6,
-	AES_CCM_ICV_8   = 8,
-	AES_CCM_ICV_10  = 10,
-	AES_CCM_ICV_12  = 12,
-	AES_CCM_ICV_14  = 14,
-	AES_CCM_ICV_16 = 16
+	ICV_4  = 4,
+	ICV_6  = 6,
+	ICV_8  = 8,
+	ICV_10 = 10,
+	ICV_12 = 12,
+	ICV_13 = 13,
+	ICV_14 = 14,
+	ICV_15 = 15,
+	ICV_16 = 16
 };
 
 struct hash_op_params {
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 2f6156b..49e9975 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -111,14 +111,12 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
 	if (ack_err_status) {
 		if (CHK_MAC_ERR_BIT(ack_err_status) ||
 		    CHK_PAD_ERR_BIT(ack_err_status))
-			error_status = -EINVAL;
+			error_status = -EBADMSG;
 	}
 	/* call completion callback with failure status */
 	if (req) {
-		if (!chcr_handle_resp(req, input, error_status))
-			req->complete(req, error_status);
-		else
-			return -EINVAL;
+		error_status = chcr_handle_resp(req, input, error_status);
+		req->complete(req, error_status);
 	} else {
 		pr_err("Incorrect request address from the firmware\n");
 		return -EFAULT;
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index fc3cd77..c7088a4 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -72,9 +72,7 @@ struct chcr_wr {
 };
 
 struct chcr_dev {
-	/* Request submited to h/w and waiting for response. */
 	spinlock_t lock_chcr_dev;
-	struct crypto_queue pending_queue;
 	struct uld_ctx *u_ctx;
 	unsigned char tx_channel_id;
 };
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 40a5182..d5af7d6 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -36,6 +36,14 @@
 #ifndef __CHCR_CRYPTO_H__
 #define __CHCR_CRYPTO_H__
 
+#define GHASH_BLOCK_SIZE    16
+#define GHASH_DIGEST_SIZE   16
+
+#define CCM_B0_SIZE             16
+#define CCM_AAD_FIELD_SIZE      2
+#define T5_MAX_AAD_SIZE 512
+
+
 /* Define following if h/w is not dropping the AAD and IV data before
  * giving the processed data
  */
@@ -63,22 +71,36 @@
 #define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0
 #define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1
 
-#define CHCR_SCMD_CIPHER_MODE_NOP           0
-#define CHCR_SCMD_CIPHER_MODE_AES_CBC       1
-#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES   4
-#define CHCR_SCMD_CIPHER_MODE_AES_XTS       6
+#define CHCR_SCMD_CIPHER_MODE_NOP               0
+#define CHCR_SCMD_CIPHER_MODE_AES_CBC           1
+#define CHCR_SCMD_CIPHER_MODE_AES_GCM           2
+#define CHCR_SCMD_CIPHER_MODE_AES_CTR           3
+#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES       4
+#define CHCR_SCMD_CIPHER_MODE_AES_XTS           6
+#define CHCR_SCMD_CIPHER_MODE_AES_CCM           7
 
 #define CHCR_SCMD_AUTH_MODE_NOP             0
 #define CHCR_SCMD_AUTH_MODE_SHA1            1
 #define CHCR_SCMD_AUTH_MODE_SHA224          2
 #define CHCR_SCMD_AUTH_MODE_SHA256          3
+#define CHCR_SCMD_AUTH_MODE_GHASH           4
 #define CHCR_SCMD_AUTH_MODE_SHA512_224      5
 #define CHCR_SCMD_AUTH_MODE_SHA512_256      6
 #define CHCR_SCMD_AUTH_MODE_SHA512_384      7
 #define CHCR_SCMD_AUTH_MODE_SHA512_512      8
+#define CHCR_SCMD_AUTH_MODE_CBCMAC          9
+#define CHCR_SCMD_AUTH_MODE_CMAC            10
 
 #define CHCR_SCMD_HMAC_CTRL_NOP             0
 #define CHCR_SCMD_HMAC_CTRL_NO_TRUNC        1
+#define CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366   2
+#define CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT     3
+#define CHCR_SCMD_HMAC_CTRL_PL1		    4
+#define CHCR_SCMD_HMAC_CTRL_PL2		    5
+#define CHCR_SCMD_HMAC_CTRL_PL3		    6
+#define CHCR_SCMD_HMAC_CTRL_DIV2	    7
+#define VERIFY_HW 0
+#define VERIFY_SW 1
 
 #define CHCR_SCMD_IVGEN_CTRL_HW             0
 #define CHCR_SCMD_IVGEN_CTRL_SW             1
@@ -106,12 +128,20 @@
 #define IV_IMMEDIATE            1
 #define IV_DSGL			2
 
+#define AEAD_H_SIZE             16
+
 #define CRYPTO_ALG_SUB_TYPE_MASK            0x0f000000
 #define CRYPTO_ALG_SUB_TYPE_HASH_HMAC       0x01000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106    0x02000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_GCM	    0x03000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC    0x04000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_CCM        0x05000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309    0x06000000
+#define CRYPTO_ALG_SUB_TYPE_AEAD_NULL       0x07000000
+#define CRYPTO_ALG_SUB_TYPE_CTR             0x08000000
 #define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\
 			      CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
 
-#define MAX_SALT                4
 #define MAX_SCRATCH_PAD_SIZE    32
 
 #define CHCR_HASH_MAX_BLOCK_SIZE_64  64
@@ -126,6 +156,42 @@ struct ablk_ctx {
 	unsigned char ciph_mode;
 	u8 rrkey[AES_MAX_KEY_SIZE];
 };
+struct chcr_aead_reqctx {
+	struct	sk_buff	*skb;
+	short int dst_nents;
+	u16 verify;
+	u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
+	unsigned char scratch_pad[MAX_SCRATCH_PAD_SIZE];
+};
+
+struct chcr_gcm_ctx {
+	u8 ghash_h[AEAD_H_SIZE];
+};
+
+struct chcr_authenc_ctx {
+	u8 dec_rrkey[AES_MAX_KEY_SIZE];
+	u8 h_iopad[2 * CHCR_HASH_MAX_DIGEST_SIZE];
+	unsigned char auth_mode;
+};
+
+struct __aead_ctx {
+	struct chcr_gcm_ctx gcm[0];
+	struct chcr_authenc_ctx authenc[0];
+};
+
+
+
+struct chcr_aead_ctx {
+	__be32 key_ctx_hdr;
+	unsigned int enckey_len;
+	struct crypto_skcipher *null;
+	u8 salt[MAX_SALT];
+	u8 key[CHCR_AES_MAX_KEY_LEN];
+	u16 hmac_ctrl;
+	u16 mayverify;
+	struct	__aead_ctx ctx[0];
+};
+
 
 
 struct hmac_ctx {
@@ -137,6 +203,7 @@ struct hmac_ctx {
 struct __crypto_ctx {
 	struct hmac_ctx hmacctx[0];
 	struct ablk_ctx ablkctx[0];
+	struct chcr_aead_ctx aeadctx[0];
 };
 
 struct chcr_context {
@@ -171,16 +238,19 @@ struct chcr_alg_template {
 	union {
 		struct crypto_alg crypto;
 		struct ahash_alg hash;
+		struct aead_alg aead;
 	} alg;
 };
 
 struct chcr_req_ctx {
 	union {
 		struct ahash_request *ahash_req;
+		struct aead_request *aead_req;
 		struct ablkcipher_request *ablk_req;
 	} req;
 	union {
 		struct chcr_ahash_req_ctx *ahash_ctx;
+		struct chcr_aead_reqctx *reqctx;
 		struct chcr_blkcipher_req_ctx *ablk_ctx;
 	} ctx;
 };
@@ -190,9 +260,15 @@ struct sge_opaque_hdr {
 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
 };
 
-typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req,
-				       struct chcr_context *ctx,
+typedef struct sk_buff *(*create_wr_t)(struct aead_request *req,
 				       unsigned short qid,
+				       int size,
 				       unsigned short op_type);
 
+static int chcr_aead_op(struct aead_request *req_base,
+			  unsigned short op_type,
+			  int size,
+			  create_wr_t create_wr_fn);
+static inline int get_aead_subtype(struct crypto_aead *aead);
+
 #endif /* __CHCR_CRYPTO_H__ */
-- 
1.8.2.3

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 6/6] Add support for AEAD algos.
  2016-10-13 11:09 ` [PATCH 6/6] Add support for AEAD algos Harsh Jain
@ 2016-10-14 14:24   ` Stephan Mueller
  2016-10-27 10:06     ` Harsh Jain
  0 siblings, 1 reply; 16+ messages in thread
From: Stephan Mueller @ 2016-10-14 14:24 UTC (permalink / raw)
  To: Harsh Jain
  Cc: dan.carpenter, herbert, linux-crypto, jlulla, atul.gupta,
	yeshaswi, hariprasad

Am Donnerstag, 13. Oktober 2016, 16:39:39 CEST schrieb Harsh Jain:

Hi Harsh,

> Add support for following AEAD algos.
>  GCM,CCM,RFC4106,RFC4309,authenc(hmac(shaXXX),cbc(aes)).
> 
> Signed-off-by: Harsh Jain <harsh@chelsio.com>
> ---
>  drivers/crypto/chelsio/Kconfig       |    1 +
>  drivers/crypto/chelsio/chcr_algo.c   | 1466
> +++++++++++++++++++++++++++++++++- drivers/crypto/chelsio/chcr_algo.h   |  
> 16 +-
>  drivers/crypto/chelsio/chcr_core.c   |    8 +-
>  drivers/crypto/chelsio/chcr_core.h   |    2 -
>  drivers/crypto/chelsio/chcr_crypto.h |   90 ++-
>  6 files changed, 1541 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
> index 4ce67fb..3e104f5 100644
> --- a/drivers/crypto/chelsio/Kconfig
> +++ b/drivers/crypto/chelsio/Kconfig
> @@ -4,6 +4,7 @@ config CRYPTO_DEV_CHELSIO
>  	select CRYPTO_SHA1
>  	select CRYPTO_SHA256
>  	select CRYPTO_SHA512
> +	select CRYPTO_AUTHENC
>  	---help---
>  	  The Chelsio Crypto Co-processor driver for T6 adapters.
> 
> diff --git a/drivers/crypto/chelsio/chcr_algo.c
> b/drivers/crypto/chelsio/chcr_algo.c index 18385d6..cffc38f 100644
> --- a/drivers/crypto/chelsio/chcr_algo.c
> +++ b/drivers/crypto/chelsio/chcr_algo.c
> @@ -54,6 +54,12 @@
>  #include <crypto/algapi.h>
>  #include <crypto/hash.h>
>  #include <crypto/sha.h>
> +#include <crypto/authenc.h>
> +#include <crypto/internal/aead.h>
> +#include <crypto/null.h>
> +#include <crypto/internal/skcipher.h>
> +#include <crypto/aead.h>
> +#include <crypto/scatterwalk.h>
>  #include <crypto/internal/hash.h>
> 
>  #include "t4fw_api.h"
> @@ -62,6 +68,11 @@
>  #include "chcr_algo.h"
>  #include "chcr_crypto.h"
> 
> +static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
> +{
> +	return ctx->crypto_ctx->aeadctx;
> +}
> +
>  static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
>  {
>  	return ctx->crypto_ctx->ablkctx;
> @@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct
> chcr_context *ctx) return ctx->crypto_ctx->hmacctx;
>  }
> 
> +static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
> +{
> +	return gctx->ctx->gcm;
> +}
> +
> +static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx
> *gctx) +{
> +	return gctx->ctx->authenc;
> +}
> +
>  static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
>  {
>  	return ctx->dev->u_ctx;
> @@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n)
>  	return (3 * n) / 2 + (n & 1) + 2;
>  }
> 
> +static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
> +{
> +	u8 temp[SHA512_DIGEST_SIZE];
> +	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> +	int authsize = crypto_aead_authsize(tfm);
> +	struct cpl_fw6_pld *fw6_pld;
> +	int cmp = 0;
> +
> +	fw6_pld = (struct cpl_fw6_pld *)input;
> +	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
> +	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
> +		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
> +	} else {
> +
> +		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
> +				authsize, req->assoclen +
> +				req->cryptlen - authsize);

I am wondering whether the math is correct here in any case. It is permissible 
that we have an AAD size of 0 and even a zero-sized ciphertext. How is such 
scenario covered here?

> +		cmp = memcmp(temp, (fw6_pld + 1), authsize);

I would guess in both cases memcmp should be replaced with crypto_memneq

> +	}
> +	if (cmp)
> +		*err = -EBADMSG;
> +	else
> +		*err = 0;

What do you think about memzero_explicit(tmp)?

> +}
> +
>  /*
>   *	chcr_handle_resp - Unmap the DMA buffers associated with the request
>   *	@req: crypto request
>   */
>  int chcr_handle_resp(struct crypto_async_request *req, unsigned char
> *input, -		     int error_status)
> +			 int err)
>  {
>  	struct crypto_tfm *tfm = req->tfm;
>  	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
> @@ -109,11 +155,27 @@ int chcr_handle_resp(struct crypto_async_request *req,
> unsigned char *input, unsigned int digestsize, updated_digestsize;
> 
>  	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
> +	case CRYPTO_ALG_TYPE_AEAD:
> +		ctx_req.req.aead_req = (struct aead_request *)req;
> +		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
> +		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
> +			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
> +		if (ctx_req.ctx.reqctx->skb) {
> +			kfree_skb(ctx_req.ctx.reqctx->skb);
> +			ctx_req.ctx.reqctx->skb = NULL;
> +		}
> +		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
> +			chcr_verify_tag(ctx_req.req.aead_req, input,
> +					&err);
> +			ctx_req.ctx.reqctx->verify = VERIFY_HW;
> +		}
> +		break;
> +
>  	case CRYPTO_ALG_TYPE_BLKCIPHER:
>  		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
>  		ctx_req.ctx.ablk_ctx =
>  			ablkcipher_request_ctx(ctx_req.req.ablk_req);
> -		if (!error_status) {
> +		if (!err) {
>  			fw6_pld = (struct cpl_fw6_pld *)input;
>  			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
>  			       AES_BLOCK_SIZE);
> @@ -154,7 +216,7 @@ int chcr_handle_resp(struct crypto_async_request *req,
> unsigned char *input, }
>  		break;
>  	}
> -	return 0;
> +	return err;
>  }
> 
>  /*
> @@ -380,6 +442,14 @@ static inline int map_writesg_phys_cpl(struct device
> *dev, return 0;
>  }
> 
> +static inline int get_aead_subtype(struct crypto_aead *aead)
> +{
> +	struct aead_alg *alg = crypto_aead_alg(aead);
> +	struct chcr_alg_template *chcr_crypto_alg =
> +		container_of(alg, struct chcr_alg_template, alg.aead);
> +	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
> +}
> +
>  static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
>  {
>  	struct crypto_alg *alg = tfm->__crt_alg;
> @@ -447,7 +517,8 @@ static inline void create_wreq(struct chcr_context *ctx,
> struct chcr_wr *chcr_req,
>  			       void *req, struct sk_buff *skb,
>  			       int kctx_len, int hash_sz,
> -			       unsigned int phys_dsgl)
> +			       int is_iv,
> +			       unsigned int sc_len)
>  {
>  	struct uld_ctx *u_ctx = ULD_CTX(ctx);
>  	int iv_loc = IV_DSGL;
> @@ -472,7 +543,7 @@ static inline void create_wreq(struct chcr_context *ctx,
> chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
>  	chcr_req->wreq.rx_chid_to_rx_q_id =
>  		FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
> -				(hash_sz) ? IV_NOP : iv_loc);
> +				is_iv ? iv_loc : IV_NOP);
> 
>  	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
>  	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
> @@ -481,10 +552,7 @@ static inline void create_wreq(struct chcr_context
> *ctx, chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
>  	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
>  				   sizeof(chcr_req->key_ctx) +
> -				   kctx_len +
> -				  ((hash_sz) ? DUMMY_BYTES :
> -				  (sizeof(struct cpl_rx_phys_dsgl) +
> -				   phys_dsgl)) + immdatalen);
> +				   kctx_len + sc_len + immdatalen);
>  }
> 
>  /**
> @@ -582,7 +650,8 @@ static struct sk_buff
>  	memcpy(reqctx->iv, req->info, ivsize);
>  	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
>  	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
> -	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, phys_dsgl);
> +	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
> +			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
>  	reqctx->skb = skb;
>  	skb_get(skb);
>  	return skb;
> @@ -706,11 +775,11 @@ static int chcr_device_init(struct chcr_context *ctx)
>  		}
>  		u_ctx = ULD_CTX(ctx);
>  		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
> -		ctx->dev->tx_channel_id = 0;
>  		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
>  		rxq_idx += id % rxq_perchan;
>  		spin_lock(&ctx->dev->lock_chcr_dev);
>  		ctx->tx_channel_id = rxq_idx;
> +		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
>  		spin_unlock(&ctx->dev->lock_chcr_dev);
>  	}
>  out:
> @@ -769,7 +838,7 @@ static inline void chcr_free_shash(struct crypto_shash
> *base_hash) *	@req - Cipher req base
>   */
>  static struct sk_buff *create_hash_wr(struct ahash_request *req,
> -					    struct hash_wr_param *param)
> +				      struct hash_wr_param *param)
>  {
>  	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
>  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
> @@ -840,8 +909,8 @@ static struct sk_buff *create_hash_wr(struct
> ahash_request *req, if (param->sg_len != 0)
>  		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
> 
> -	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response,
> -		    0);
> +	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
> +			DUMMY_BYTES);
>  	req_ctx->skb = skb;
>  	skb_get(skb);
>  	return skb;
> @@ -1249,6 +1318,1149 @@ static void chcr_hmac_cra_exit(struct crypto_tfm
> *tfm) }
>  }
> 
> +static int chcr_copy_assoc(struct aead_request *req,
> +				struct chcr_aead_ctx *ctx)
> +{
> +	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
> +
> +	skcipher_request_set_tfm(skreq, ctx->null);
> +	skcipher_request_set_callback(skreq, aead_request_flags(req),
> +			NULL, NULL);
> +	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
> +			NULL);
> +
> +	return crypto_skcipher_encrypt(skreq);
> +}
> +
> +static unsigned char get_hmac(unsigned int authsize)
> +{
> +	switch (authsize) {
> +	case ICV_8:
> +		return CHCR_SCMD_HMAC_CTRL_PL1;
> +	case ICV_10:
> +		return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
> +	case ICV_12:
> +		return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
> +	}
> +	return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
> +}
> +
> +
> +static struct sk_buff *create_authenc_wr(struct aead_request *req,
> +					 unsigned short qid,
> +					 int size,
> +					 unsigned short op_type)
> +{
> +	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> +	struct chcr_context *ctx = crypto_aead_ctx(tfm);
> +	struct uld_ctx *u_ctx = ULD_CTX(ctx);
> +	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
> +	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
> +	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
> +	struct sk_buff *skb = NULL;
> +	struct chcr_wr *chcr_req;
> +	struct cpl_rx_phys_dsgl *phys_cpl;
> +	struct phys_sge_parm sg_param;
> +	struct scatterlist *src, *dst;
> +	struct scatterlist src_sg[2], dst_sg[2];
> +	unsigned int frags = 0, transhdr_len;
> +	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
> +	unsigned int   kctx_len = 0;
> +	unsigned short stop_offset = 0;
> +	unsigned int  assoclen = req->assoclen;
> +	unsigned int  authsize = crypto_aead_authsize(tfm);
> +	int err = 0;
> +	int null = 0;
> +	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
> +		GFP_ATOMIC;
> +
> +	if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
> +		goto err;
> +	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
> +	dst = src;
> +	if (req->src != req->dst) {
> +		err = chcr_copy_assoc(req, aeadctx);
> +		if (err)
> +			return ERR_PTR(err);
> +		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
> +	}
> +	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
> +		null = 1;
> +		assoclen = 0;
> +	}
> +	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
> +					     (op_type ? -authsize : authsize));
> +	if (reqctx->dst_nents <= 0) {
> +		pr_err("AUTHENC:Invalid Destination sg entries\n");
> +		goto err;
> +	}
> +	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
> +	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
> +		- sizeof(chcr_req->key_ctx);
> +	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
> +	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
> +	if (!skb)
> +		goto err;
> +
> +	/* LLD is going to write the sge hdr. */
> +	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
> +
> +	/* Write WR */
> +	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
> +	memset(chcr_req, 0, transhdr_len);
> +
> +	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
> +
> +	/*
> +	 * Input order	is AAD,IV and Payload. where IV should be included as
> +	 * the part of authdata. All other fields should be filled according
> +	 * to the hardware spec
> +	 */
> +	chcr_req->sec_cpl.op_ivinsrtofst =
> +		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
> +				       (ivsize ? (assoclen + 1) : 0));
> +	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
> +	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
> +					assoclen ? 1 : 0, assoclen,
> +					assoclen + ivsize + 1,
> +					(stop_offset & 0x1F0) >> 4);
> +	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
> +					stop_offset & 0xF,
> +					null ? 0 : assoclen + ivsize + 1,
> +					stop_offset, stop_offset);
> +	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
> +					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
> +					CHCR_SCMD_CIPHER_MODE_AES_CBC,
> +					actx->auth_mode, aeadctx->hmac_ctrl,
> +					ivsize >> 1);
> +	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
> +					 0, 1, dst_size);
> +
> +	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
> +	if (op_type == CHCR_ENCRYPT_OP)
> +		memcpy(chcr_req->key_ctx.key, aeadctx->key,
> +		       aeadctx->enckey_len);
> +	else
> +		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
> +		       aeadctx->enckey_len);
> +
> +	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
> +					4), actx->h_iopad, kctx_len -
> +				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
> +
> +	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
> +	sg_param.nents = reqctx->dst_nents;
> +	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);

Just like above: is it ensured that we cannot have negative results here in 
case cryptlen is less than authsize?


Ciao
Stephan

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/6] chcr: Remove malloc/free
  2016-10-13 11:09 ` [PATCH 2/6] chcr: Remove malloc/free Harsh Jain
@ 2016-10-21  2:20   ` Herbert Xu
  2016-10-26 11:29     ` Harsh Jain
  0 siblings, 1 reply; 16+ messages in thread
From: Herbert Xu @ 2016-10-21  2:20 UTC (permalink / raw)
  To: Harsh Jain
  Cc: dan.carpenter, linux-crypto, jlulla, atul.gupta, yeshaswi, hariprasad

On Thu, Oct 13, 2016 at 04:39:35PM +0530, Harsh Jain wrote:
> Remove malloc/free in crypto operation and allocate memory via cra_ctxsize.
> Added new structure chcr_wr to populate Work Request Header.
> Fixes: 324429d74127 (chcr: Support for Chelsio's Crypto Hardware)

Do you mean the reqsize as opposed to ctxsize since the latter is
shared by all tfm users?

In any case, your patch doesn't seem to change the size setting?

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/6] chcr: Remove malloc/free
  2016-10-21  2:20   ` Herbert Xu
@ 2016-10-26 11:29     ` Harsh Jain
  0 siblings, 0 replies; 16+ messages in thread
From: Harsh Jain @ 2016-10-26 11:29 UTC (permalink / raw)
  To: Herbert Xu
  Cc: dan.carpenter, linux-crypto, jlulla, atul.gupta, yeshaswi, hariprasad



On 21-10-2016 07:50, Herbert Xu wrote:
> On Thu, Oct 13, 2016 at 04:39:35PM +0530, Harsh Jain wrote:
>> Remove malloc/free in crypto operation and allocate memory via cra_ctxsize.
>> Added new structure chcr_wr to populate Work Request Header.
>> Fixes: 324429d74127 (chcr: Support for Chelsio's Crypto Hardware)
> Do you mean the reqsize as opposed to ctxsize since the latter is
> shared by all tfm users?
It's reqsize for hash algos (chcr_ahash_req_ctx struct ) and ctxsize for Cipher algos(ablk_ctx struct),As rrkey(reverse round key) is also same for all tfm.
>
> In any case, your patch doesn't seem to change the size setting?
Added new variables in old structure. Size setting related code is unchanged.
In v2  will break the patch to smaller one.
>
> Cheers,

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 6/6] Add support for AEAD algos.
  2016-10-14 14:24   ` Stephan Mueller
@ 2016-10-27 10:06     ` Harsh Jain
  2016-11-08 11:15       ` Stephan Mueller
  0 siblings, 1 reply; 16+ messages in thread
From: Harsh Jain @ 2016-10-27 10:06 UTC (permalink / raw)
  To: Stephan Mueller
  Cc: dan.carpenter, herbert, linux-crypto, jlulla, atul.gupta,
	yeshaswi, hariprasad



On 14-10-2016 19:54, Stephan Mueller wrote:
> Am Donnerstag, 13. Oktober 2016, 16:39:39 CEST schrieb Harsh Jain:
>
> Hi Harsh,
>
>> Add support for following AEAD algos.
>>  GCM,CCM,RFC4106,RFC4309,authenc(hmac(shaXXX),cbc(aes)).
>>
>> Signed-off-by: Harsh Jain <harsh@chelsio.com>
>> ---
>>  drivers/crypto/chelsio/Kconfig       |    1 +
>>  drivers/crypto/chelsio/chcr_algo.c   | 1466
>> +++++++++++++++++++++++++++++++++- drivers/crypto/chelsio/chcr_algo.h   |  
>> 16 +-
>>  drivers/crypto/chelsio/chcr_core.c   |    8 +-
>>  drivers/crypto/chelsio/chcr_core.h   |    2 -
>>  drivers/crypto/chelsio/chcr_crypto.h |   90 ++-
>>  6 files changed, 1541 insertions(+), 42 deletions(-)
>>
>> diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
>> index 4ce67fb..3e104f5 100644
>> --- a/drivers/crypto/chelsio/Kconfig
>> +++ b/drivers/crypto/chelsio/Kconfig
>> @@ -4,6 +4,7 @@ config CRYPTO_DEV_CHELSIO
>>  	select CRYPTO_SHA1
>>  	select CRYPTO_SHA256
>>  	select CRYPTO_SHA512
>> +	select CRYPTO_AUTHENC
>>  	---help---
>>  	  The Chelsio Crypto Co-processor driver for T6 adapters.
>>
>> diff --git a/drivers/crypto/chelsio/chcr_algo.c
>> b/drivers/crypto/chelsio/chcr_algo.c index 18385d6..cffc38f 100644
>> --- a/drivers/crypto/chelsio/chcr_algo.c
>> +++ b/drivers/crypto/chelsio/chcr_algo.c
>> @@ -54,6 +54,12 @@
>>  #include <crypto/algapi.h>
>>  #include <crypto/hash.h>
>>  #include <crypto/sha.h>
>> +#include <crypto/authenc.h>
>> +#include <crypto/internal/aead.h>
>> +#include <crypto/null.h>
>> +#include <crypto/internal/skcipher.h>
>> +#include <crypto/aead.h>
>> +#include <crypto/scatterwalk.h>
>>  #include <crypto/internal/hash.h>
>>
>>  #include "t4fw_api.h"
>> @@ -62,6 +68,11 @@
>>  #include "chcr_algo.h"
>>  #include "chcr_crypto.h"
>>
>> +static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
>> +{
>> +	return ctx->crypto_ctx->aeadctx;
>> +}
>> +
>>  static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
>>  {
>>  	return ctx->crypto_ctx->ablkctx;
>> @@ -72,6 +83,16 @@ static inline struct hmac_ctx *HMAC_CTX(struct
>> chcr_context *ctx) return ctx->crypto_ctx->hmacctx;
>>  }
>>
>> +static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
>> +{
>> +	return gctx->ctx->gcm;
>> +}
>> +
>> +static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx
>> *gctx) +{
>> +	return gctx->ctx->authenc;
>> +}
>> +
>>  static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
>>  {
>>  	return ctx->dev->u_ctx;
>> @@ -94,12 +115,37 @@ static inline unsigned int sgl_len(unsigned int n)
>>  	return (3 * n) / 2 + (n & 1) + 2;
>>  }
>>
>> +static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
>> +{
>> +	u8 temp[SHA512_DIGEST_SIZE];
>> +	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
>> +	int authsize = crypto_aead_authsize(tfm);
>> +	struct cpl_fw6_pld *fw6_pld;
>> +	int cmp = 0;
>> +
>> +	fw6_pld = (struct cpl_fw6_pld *)input;
>> +	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
>> +	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
>> +		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
>> +	} else {
>> +
>> +		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
>> +				authsize, req->assoclen +
>> +				req->cryptlen - authsize);
> I am wondering whether the math is correct here in any case. It is permissible 
> that we have an AAD size of 0 and even a zero-sized ciphertext. How is such 
> scenario covered here?
Here we are trying to copy user supplied tag to local buffer(temp) for decrypt operation only. relative index of tag in src sg list
will not change when AAD is zero and in decrypt operation cryptlen > authsize.
>
>> +		cmp = memcmp(temp, (fw6_pld + 1), authsize);
> I would guess in both cases memcmp should be replaced with crypto_memneq
Yes can be done

>
>> +	}
>> +	if (cmp)
>> +		*err = -EBADMSG;
>> +	else
>> +		*err = 0;
> What do you think about memzero_explicit(tmp)?
No Idea why we needs explicitly setting of zero for local variable.  Please share some online resources to understand this.

>
>> +}
>> +
>>  /*
>>   *	chcr_handle_resp - Unmap the DMA buffers associated with the request
>>   *	@req: crypto request
>>   */
>>  int chcr_handle_resp(struct crypto_async_request *req, unsigned char
>> *input, -		     int error_status)
>> +			 int err)
>>  {
>>  	struct crypto_tfm *tfm = req->tfm;
>>  	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
>> @@ -109,11 +155,27 @@ int chcr_handle_resp(struct crypto_async_request *req,
>> unsigned char *input, unsigned int digestsize, updated_digestsize;
>>
>>  	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
>> +	case CRYPTO_ALG_TYPE_AEAD:
>> +		ctx_req.req.aead_req = (struct aead_request *)req;
>> +		ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
>> +		dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
>> +			     ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
>> +		if (ctx_req.ctx.reqctx->skb) {
>> +			kfree_skb(ctx_req.ctx.reqctx->skb);
>> +			ctx_req.ctx.reqctx->skb = NULL;
>> +		}
>> +		if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
>> +			chcr_verify_tag(ctx_req.req.aead_req, input,
>> +					&err);
>> +			ctx_req.ctx.reqctx->verify = VERIFY_HW;
>> +		}
>> +		break;
>> +
>>  	case CRYPTO_ALG_TYPE_BLKCIPHER:
>>  		ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
>>  		ctx_req.ctx.ablk_ctx =
>>  			ablkcipher_request_ctx(ctx_req.req.ablk_req);
>> -		if (!error_status) {
>> +		if (!err) {
>>  			fw6_pld = (struct cpl_fw6_pld *)input;
>>  			memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
>>  			       AES_BLOCK_SIZE);
>> @@ -154,7 +216,7 @@ int chcr_handle_resp(struct crypto_async_request *req,
>> unsigned char *input, }
>>  		break;
>>  	}
>> -	return 0;
>> +	return err;
>>  }
>>
>>  /*
>> @@ -380,6 +442,14 @@ static inline int map_writesg_phys_cpl(struct device
>> *dev, return 0;
>>  }
>>
>> +static inline int get_aead_subtype(struct crypto_aead *aead)
>> +{
>> +	struct aead_alg *alg = crypto_aead_alg(aead);
>> +	struct chcr_alg_template *chcr_crypto_alg =
>> +		container_of(alg, struct chcr_alg_template, alg.aead);
>> +	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
>> +}
>> +
>>  static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
>>  {
>>  	struct crypto_alg *alg = tfm->__crt_alg;
>> @@ -447,7 +517,8 @@ static inline void create_wreq(struct chcr_context *ctx,
>> struct chcr_wr *chcr_req,
>>  			       void *req, struct sk_buff *skb,
>>  			       int kctx_len, int hash_sz,
>> -			       unsigned int phys_dsgl)
>> +			       int is_iv,
>> +			       unsigned int sc_len)
>>  {
>>  	struct uld_ctx *u_ctx = ULD_CTX(ctx);
>>  	int iv_loc = IV_DSGL;
>> @@ -472,7 +543,7 @@ static inline void create_wreq(struct chcr_context *ctx,
>> chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
>>  	chcr_req->wreq.rx_chid_to_rx_q_id =
>>  		FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
>> -				(hash_sz) ? IV_NOP : iv_loc);
>> +				is_iv ? iv_loc : IV_NOP);
>>
>>  	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
>>  	chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
>> @@ -481,10 +552,7 @@ static inline void create_wreq(struct chcr_context
>> *ctx, chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
>>  	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
>>  				   sizeof(chcr_req->key_ctx) +
>> -				   kctx_len +
>> -				  ((hash_sz) ? DUMMY_BYTES :
>> -				  (sizeof(struct cpl_rx_phys_dsgl) +
>> -				   phys_dsgl)) + immdatalen);
>> +				   kctx_len + sc_len + immdatalen);
>>  }
>>
>>  /**
>> @@ -582,7 +650,8 @@ static struct sk_buff
>>  	memcpy(reqctx->iv, req->info, ivsize);
>>  	write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
>>  	write_sg_to_skb(skb, &frags, req->src, req->nbytes);
>> -	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, phys_dsgl);
>> +	create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
>> +			sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
>>  	reqctx->skb = skb;
>>  	skb_get(skb);
>>  	return skb;
>> @@ -706,11 +775,11 @@ static int chcr_device_init(struct chcr_context *ctx)
>>  		}
>>  		u_ctx = ULD_CTX(ctx);
>>  		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
>> -		ctx->dev->tx_channel_id = 0;
>>  		rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
>>  		rxq_idx += id % rxq_perchan;
>>  		spin_lock(&ctx->dev->lock_chcr_dev);
>>  		ctx->tx_channel_id = rxq_idx;
>> +		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
>>  		spin_unlock(&ctx->dev->lock_chcr_dev);
>>  	}
>>  out:
>> @@ -769,7 +838,7 @@ static inline void chcr_free_shash(struct crypto_shash
>> *base_hash) *	@req - Cipher req base
>>   */
>>  static struct sk_buff *create_hash_wr(struct ahash_request *req,
>> -					    struct hash_wr_param *param)
>> +				      struct hash_wr_param *param)
>>  {
>>  	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
>>  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
>> @@ -840,8 +909,8 @@ static struct sk_buff *create_hash_wr(struct
>> ahash_request *req, if (param->sg_len != 0)
>>  		write_sg_to_skb(skb, &frags, req->src, param->sg_len);
>>
>> -	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response,
>> -		    0);
>> +	create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
>> +			DUMMY_BYTES);
>>  	req_ctx->skb = skb;
>>  	skb_get(skb);
>>  	return skb;
>> @@ -1249,6 +1318,1149 @@ static void chcr_hmac_cra_exit(struct crypto_tfm
>> *tfm) }
>>  }
>>
>> +static int chcr_copy_assoc(struct aead_request *req,
>> +				struct chcr_aead_ctx *ctx)
>> +{
>> +	SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
>> +
>> +	skcipher_request_set_tfm(skreq, ctx->null);
>> +	skcipher_request_set_callback(skreq, aead_request_flags(req),
>> +			NULL, NULL);
>> +	skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
>> +			NULL);
>> +
>> +	return crypto_skcipher_encrypt(skreq);
>> +}
>> +
>> +static unsigned char get_hmac(unsigned int authsize)
>> +{
>> +	switch (authsize) {
>> +	case ICV_8:
>> +		return CHCR_SCMD_HMAC_CTRL_PL1;
>> +	case ICV_10:
>> +		return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
>> +	case ICV_12:
>> +		return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
>> +	}
>> +	return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
>> +}
>> +
>> +
>> +static struct sk_buff *create_authenc_wr(struct aead_request *req,
>> +					 unsigned short qid,
>> +					 int size,
>> +					 unsigned short op_type)
>> +{
>> +	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
>> +	struct chcr_context *ctx = crypto_aead_ctx(tfm);
>> +	struct uld_ctx *u_ctx = ULD_CTX(ctx);
>> +	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
>> +	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
>> +	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
>> +	struct sk_buff *skb = NULL;
>> +	struct chcr_wr *chcr_req;
>> +	struct cpl_rx_phys_dsgl *phys_cpl;
>> +	struct phys_sge_parm sg_param;
>> +	struct scatterlist *src, *dst;
>> +	struct scatterlist src_sg[2], dst_sg[2];
>> +	unsigned int frags = 0, transhdr_len;
>> +	unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
>> +	unsigned int   kctx_len = 0;
>> +	unsigned short stop_offset = 0;
>> +	unsigned int  assoclen = req->assoclen;
>> +	unsigned int  authsize = crypto_aead_authsize(tfm);
>> +	int err = 0;
>> +	int null = 0;
>> +	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
>> +		GFP_ATOMIC;
>> +
>> +	if (aeadctx->enckey_len == 0 || (req->cryptlen <= 0))
>> +		goto err;
>> +	src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
>> +	dst = src;
>> +	if (req->src != req->dst) {
>> +		err = chcr_copy_assoc(req, aeadctx);
>> +		if (err)
>> +			return ERR_PTR(err);
>> +		dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
>> +	}
>> +	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
>> +		null = 1;
>> +		assoclen = 0;
>> +	}
>> +	reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
>> +					     (op_type ? -authsize : authsize));
>> +	if (reqctx->dst_nents <= 0) {
>> +		pr_err("AUTHENC:Invalid Destination sg entries\n");
>> +		goto err;
>> +	}
>> +	dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
>> +	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
>> +		- sizeof(chcr_req->key_ctx);
>> +	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
>> +	skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
>> +	if (!skb)
>> +		goto err;
>> +
>> +	/* LLD is going to write the sge hdr. */
>> +	skb_reserve(skb, sizeof(struct sge_opaque_hdr));
>> +
>> +	/* Write WR */
>> +	chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
>> +	memset(chcr_req, 0, transhdr_len);
>> +
>> +	stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
>> +
>> +	/*
>> +	 * Input order	is AAD,IV and Payload. where IV should be included as
>> +	 * the part of authdata. All other fields should be filled according
>> +	 * to the hardware spec
>> +	 */
>> +	chcr_req->sec_cpl.op_ivinsrtofst =
>> +		FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
>> +				       (ivsize ? (assoclen + 1) : 0));
>> +	chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
>> +	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
>> +					assoclen ? 1 : 0, assoclen,
>> +					assoclen + ivsize + 1,
>> +					(stop_offset & 0x1F0) >> 4);
>> +	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
>> +					stop_offset & 0xF,
>> +					null ? 0 : assoclen + ivsize + 1,
>> +					stop_offset, stop_offset);
>> +	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
>> +					(op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
>> +					CHCR_SCMD_CIPHER_MODE_AES_CBC,
>> +					actx->auth_mode, aeadctx->hmac_ctrl,
>> +					ivsize >> 1);
>> +	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
>> +					 0, 1, dst_size);
>> +
>> +	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
>> +	if (op_type == CHCR_ENCRYPT_OP)
>> +		memcpy(chcr_req->key_ctx.key, aeadctx->key,
>> +		       aeadctx->enckey_len);
>> +	else
>> +		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
>> +		       aeadctx->enckey_len);
>> +
>> +	memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
>> +					4), actx->h_iopad, kctx_len -
>> +				(DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
>> +
>> +	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
>> +	sg_param.nents = reqctx->dst_nents;
>> +	sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
> Just like above: is it ensured that we cannot have negative results here in 
> case cryptlen is less than authsize?
not handled. Will change accordingly.

>
>
> Ciao
> Stephan

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 6/6] Add support for AEAD algos.
  2016-10-27 10:06     ` Harsh Jain
@ 2016-11-08 11:15       ` Stephan Mueller
  2016-11-08 11:46         ` Harsh Jain
  0 siblings, 1 reply; 16+ messages in thread
From: Stephan Mueller @ 2016-11-08 11:15 UTC (permalink / raw)
  To: Harsh Jain
  Cc: dan.carpenter, herbert, linux-crypto, jlulla, atul.gupta,
	yeshaswi, hariprasad

Am Donnerstag, 27. Oktober 2016, 15:36:08 CET schrieb Harsh Jain:

Hi Harsh,

> >> +static void chcr_verify_tag(struct aead_request *req, u8 *input, int
> >> *err)
> >> +{
> >> +	u8 temp[SHA512_DIGEST_SIZE];
> >> +	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> >> +	int authsize = crypto_aead_authsize(tfm);
> >> +	struct cpl_fw6_pld *fw6_pld;
> >> +	int cmp = 0;
> >> +
> >> +	fw6_pld = (struct cpl_fw6_pld *)input;
> >> +	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
> >> +	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
> >> +		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
> >> +	} else {
> >> +
> >> +		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
> >> +				authsize, req->assoclen +
> >> +				req->cryptlen - authsize);
> > 
> > I am wondering whether the math is correct here in any case. It is
> > permissible that we have an AAD size of 0 and even a zero-sized
> > ciphertext. How is such scenario covered here?
> 
> Here we are trying to copy user supplied tag to local buffer(temp) for
> decrypt operation only. relative index of tag in src sg list will not
> change when AAD is zero and in decrypt operation cryptlen > authsize.

I am just wondering where this is checked. Since all of these implementations 
are directly accessible from unprivileged user space, we should be careful.

> >> +		cmp = memcmp(temp, (fw6_pld + 1), authsize);
> > 
> > I would guess in both cases memcmp should be replaced with crypto_memneq
> 
> Yes can be done
> 
> >> +	}
> >> +	if (cmp)
> >> +		*err = -EBADMSG;
> >> +	else
> >> +		*err = 0;
> > 
> > What do you think about memzero_explicit(tmp)?
> 
> No Idea why we needs explicitly setting of zero for local variable.  Please
> share some online resources to understand this.

In dumps, the stack is also produced. Yet I see that stack memory is very 
volatile and thus will be overwritten soon. Thus my common approach for 
sensitive data is that heap variables must be zeroized. Stack variables are 
suggested to be zeroized. As far as I understand the code, temp will hold a 
copy of the tag value, i.e. a public piece of information. If this is correct, 
that I concur that a memset may not be needed after all.

Ciao
Stephan

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 6/6] Add support for AEAD algos.
  2016-11-08 11:15       ` Stephan Mueller
@ 2016-11-08 11:46         ` Harsh Jain
  2016-11-08 12:59           ` Stephan Mueller
  0 siblings, 1 reply; 16+ messages in thread
From: Harsh Jain @ 2016-11-08 11:46 UTC (permalink / raw)
  To: Stephan Mueller
  Cc: dan.carpenter, herbert, linux-crypto, jlulla, atul.gupta,
	yeshaswi, hariprasad



On 08-11-2016 16:45, Stephan Mueller wrote:
> Am Donnerstag, 27. Oktober 2016, 15:36:08 CET schrieb Harsh Jain:
>
> Hi Harsh,
>
>>>> +static void chcr_verify_tag(struct aead_request *req, u8 *input, int
>>>> *err)
>>>> +{
>>>> +	u8 temp[SHA512_DIGEST_SIZE];
>>>> +	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
>>>> +	int authsize = crypto_aead_authsize(tfm);
>>>> +	struct cpl_fw6_pld *fw6_pld;
>>>> +	int cmp = 0;
>>>> +
>>>> +	fw6_pld = (struct cpl_fw6_pld *)input;
>>>> +	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
>>>> +	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
>>>> +		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
>>>> +	} else {
>>>> +
>>>> +		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
>>>> +				authsize, req->assoclen +
>>>> +				req->cryptlen - authsize);
>>> I am wondering whether the math is correct here in any case. It is
>>> permissible that we have an AAD size of 0 and even a zero-sized
>>> ciphertext. How is such scenario covered here?
>> Here we are trying to copy user supplied tag to local buffer(temp) for
>> decrypt operation only. relative index of tag in src sg list will not
>> change when AAD is zero and in decrypt operation cryptlen > authsize.
> I am just wondering where this is checked. Since all of these implementations 
> are directly accessible from unprivileged user space, we should be careful.
chcr_verify_tag() will be called when req->verify is set to "VERIFY_SW",  same will set in decrypt callback function of Algo(like chcr_aead_decrypt) only. It will ensure calling of chcr_verify_tag() in de-crypt operation only.


>
>>>> +		cmp = memcmp(temp, (fw6_pld + 1), authsize);
>>> I would guess in both cases memcmp should be replaced with crypto_memneq
>> Yes can be done
>>
>>>> +	}
>>>> +	if (cmp)
>>>> +		*err = -EBADMSG;
>>>> +	else
>>>> +		*err = 0;
>>> What do you think about memzero_explicit(tmp)?
>> No Idea why we needs explicitly setting of zero for local variable.  Please
>> share some online resources to understand this.
> In dumps, the stack is also produced. Yet I see that stack memory is very 
> volatile and thus will be overwritten soon. Thus my common approach for 
> sensitive data is that heap variables must be zeroized. Stack variables are 
> suggested to be zeroized. As far as I understand the code, temp will hold a 
> copy of the tag value, i.e. a public piece of information. If this is correct, 
> that I concur that a memset may not be needed after all.
Yes, temp contains user supplied tag. We can ignore memset here. I will review the other function weather they need similar memset or not.
>
> Ciao
> Stephan

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 6/6] Add support for AEAD algos.
  2016-11-08 11:46         ` Harsh Jain
@ 2016-11-08 12:59           ` Stephan Mueller
  2016-11-08 14:21             ` Harsh Jain
  0 siblings, 1 reply; 16+ messages in thread
From: Stephan Mueller @ 2016-11-08 12:59 UTC (permalink / raw)
  To: Harsh Jain
  Cc: dan.carpenter, herbert, linux-crypto, jlulla, atul.gupta,
	yeshaswi, hariprasad

Am Dienstag, 8. November 2016, 17:16:38 CET schrieb Harsh Jain:

Hi Harsh,

> On 08-11-2016 16:45, Stephan Mueller wrote:
> > Am Donnerstag, 27. Oktober 2016, 15:36:08 CET schrieb Harsh Jain:
> > 
> > Hi Harsh,
> > 
> >>>> +static void chcr_verify_tag(struct aead_request *req, u8 *input, int
> >>>> *err)
> >>>> +{
> >>>> +	u8 temp[SHA512_DIGEST_SIZE];
> >>>> +	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> >>>> +	int authsize = crypto_aead_authsize(tfm);
> >>>> +	struct cpl_fw6_pld *fw6_pld;
> >>>> +	int cmp = 0;
> >>>> +
> >>>> +	fw6_pld = (struct cpl_fw6_pld *)input;
> >>>> +	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
> >>>> +	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
> >>>> +		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
> >>>> +	} else {
> >>>> +
> >>>> +		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
> >>>> +				authsize, req->assoclen +
> >>>> +				req->cryptlen - authsize);
> >>> 
> >>> I am wondering whether the math is correct here in any case. It is
> >>> permissible that we have an AAD size of 0 and even a zero-sized
> >>> ciphertext. How is such scenario covered here?
> >> 
> >> Here we are trying to copy user supplied tag to local buffer(temp) for
> >> decrypt operation only. relative index of tag in src sg list will not
> >> change when AAD is zero and in decrypt operation cryptlen > authsize.
> > 
> > I am just wondering where this is checked. Since all of these
> > implementations are directly accessible from unprivileged user space, we
> > should be careful.
> chcr_verify_tag() will be called when req->verify is set to "VERIFY_SW", 
> same will set in decrypt callback function of Algo(like chcr_aead_decrypt)
> only. It will ensure calling of chcr_verify_tag() in de-crypt operation
> only.

I think that limiting to the decryption path may not be enough. What happens 
if a caller sets some assoclen, but when invoking the decryption operation it 
provides input data that is smaller than the assoclen? The API allows this 
scenario.

Ciao
Stephan

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 6/6] Add support for AEAD algos.
  2016-11-08 12:59           ` Stephan Mueller
@ 2016-11-08 14:21             ` Harsh Jain
  2016-11-10  5:00               ` Harsh Jain
  0 siblings, 1 reply; 16+ messages in thread
From: Harsh Jain @ 2016-11-08 14:21 UTC (permalink / raw)
  To: Stephan Mueller
  Cc: dan.carpenter, herbert, linux-crypto, jlulla, atul.gupta,
	yeshaswi, hariprasad



On 08-11-2016 18:29, Stephan Mueller wrote:
> Am Dienstag, 8. November 2016, 17:16:38 CET schrieb Harsh Jain:
>
> Hi Harsh,
>
>> On 08-11-2016 16:45, Stephan Mueller wrote:
>>> Am Donnerstag, 27. Oktober 2016, 15:36:08 CET schrieb Harsh Jain:
>>>
>>> Hi Harsh,
>>>
>>>>>> +static void chcr_verify_tag(struct aead_request *req, u8 *input, int
>>>>>> *err)
>>>>>> +{
>>>>>> +	u8 temp[SHA512_DIGEST_SIZE];
>>>>>> +	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
>>>>>> +	int authsize = crypto_aead_authsize(tfm);
>>>>>> +	struct cpl_fw6_pld *fw6_pld;
>>>>>> +	int cmp = 0;
>>>>>> +
>>>>>> +	fw6_pld = (struct cpl_fw6_pld *)input;
>>>>>> +	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
>>>>>> +	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
>>>>>> +		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
>>>>>> +	} else {
>>>>>> +
>>>>>> +		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
>>>>>> +				authsize, req->assoclen +
>>>>>> +				req->cryptlen - authsize);
>>>>> I am wondering whether the math is correct here in any case. It is
>>>>> permissible that we have an AAD size of 0 and even a zero-sized
>>>>> ciphertext. How is such scenario covered here?
>>>> Here we are trying to copy user supplied tag to local buffer(temp) for
>>>> decrypt operation only. relative index of tag in src sg list will not
>>>> change when AAD is zero and in decrypt operation cryptlen > authsize.
>>> I am just wondering where this is checked. Since all of these
>>> implementations are directly accessible from unprivileged user space, we
>>> should be careful.
>> chcr_verify_tag() will be called when req->verify is set to "VERIFY_SW", 
>> same will set in decrypt callback function of Algo(like chcr_aead_decrypt)
>> only. It will ensure calling of chcr_verify_tag() in de-crypt operation
>> only.
> I think that limiting to the decryption path may not be enough. What happens 
> if a caller sets some assoclen, but when invoking the decryption operation it 
> provides input data that is smaller than the assoclen? The API allows this 
> scenario.
If I understand correctly, in this case passed sg list will be smaller. We should return with error -EINVAL at entry point only (like create_gcm_wr), control should not reach to chcr_verify_tag().

>
> Ciao
> Stephan

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 6/6] Add support for AEAD algos.
  2016-11-08 14:21             ` Harsh Jain
@ 2016-11-10  5:00               ` Harsh Jain
  0 siblings, 0 replies; 16+ messages in thread
From: Harsh Jain @ 2016-11-10  5:00 UTC (permalink / raw)
  To: Stephan Mueller
  Cc: dan.carpenter, herbert, linux-crypto, jlulla, atul.gupta,
	yeshaswi, hariprasad



On 08-11-2016 19:51, Harsh Jain wrote:
>
> On 08-11-2016 18:29, Stephan Mueller wrote:
>> Am Dienstag, 8. November 2016, 17:16:38 CET schrieb Harsh Jain:
>>
>> Hi Harsh,
>>
>>> On 08-11-2016 16:45, Stephan Mueller wrote:
>>>> Am Donnerstag, 27. Oktober 2016, 15:36:08 CET schrieb Harsh Jain:
>>>>
>>>> Hi Harsh,
>>>>
>>>>>>> +static void chcr_verify_tag(struct aead_request *req, u8 *input, int
>>>>>>> *err)
>>>>>>> +{
>>>>>>> +	u8 temp[SHA512_DIGEST_SIZE];
>>>>>>> +	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
>>>>>>> +	int authsize = crypto_aead_authsize(tfm);
>>>>>>> +	struct cpl_fw6_pld *fw6_pld;
>>>>>>> +	int cmp = 0;
>>>>>>> +
>>>>>>> +	fw6_pld = (struct cpl_fw6_pld *)input;
>>>>>>> +	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
>>>>>>> +	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
>>>>>>> +		cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
>>>>>>> +	} else {
>>>>>>> +
>>>>>>> +		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
>>>>>>> +				authsize, req->assoclen +
>>>>>>> +				req->cryptlen - authsize);
>>>>>> I am wondering whether the math is correct here in any case. It is
>>>>>> permissible that we have an AAD size of 0 and even a zero-sized
>>>>>> ciphertext. How is such scenario covered here?
>>>>> Here we are trying to copy user supplied tag to local buffer(temp) for
>>>>> decrypt operation only. relative index of tag in src sg list will not
>>>>> change when AAD is zero and in decrypt operation cryptlen > authsize.
>>>> I am just wondering where this is checked. Since all of these
>>>> implementations are directly accessible from unprivileged user space, we
>>>> should be careful.
>>> chcr_verify_tag() will be called when req->verify is set to "VERIFY_SW", 
>>> same will set in decrypt callback function of Algo(like chcr_aead_decrypt)
>>> only. It will ensure calling of chcr_verify_tag() in de-crypt operation
>>> only.
>> I think that limiting to the decryption path may not be enough. What happens 
>> if a caller sets some assoclen, but when invoking the decryption operation it 
>> provides input data that is smaller than the assoclen? The API allows this 
>> scenario.
> If I understand correctly, in this case passed sg list will be smaller. We should return with error -EINVAL at entry point only (like create_gcm_wr), control should not reach to chcr_verify_tag().
I had a look in software implementation for check related to aad len > src sg list.  I doubt same is not handled in software also. See  below
                In "crypto_authenc_encrypt" if assoclen passed to "scatterwalk_ffwd" is greater than src. It may panic with NULL pointer exception.

 I will add  this check in V2 of chcr driver.

>
>> Ciao
>> Stephan

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2016-11-10  5:00 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-10-13 11:09 [PATCH 0/6] chcr: AEAD support and bug fixes Harsh Jain
2016-10-13 11:09 ` [PATCH 1/6] chcr:Fix memory corruption done Harsh Jain
2016-10-13 11:09 ` [PATCH 2/6] chcr: Remove malloc/free Harsh Jain
2016-10-21  2:20   ` Herbert Xu
2016-10-26 11:29     ` Harsh Jain
2016-10-13 11:09 ` [PATCH 3/6] chcr: Adjust Dest. buffer size Harsh Jain
2016-10-13 11:09 ` [PATCH 4/6] chcr: Use SHASH_DESC_ON_STACK Harsh Jain
2016-10-13 11:09 ` [PATCH 5/6] chcr: Move tfm ctx variable to request context Harsh Jain
2016-10-13 11:09 ` [PATCH 6/6] Add support for AEAD algos Harsh Jain
2016-10-14 14:24   ` Stephan Mueller
2016-10-27 10:06     ` Harsh Jain
2016-11-08 11:15       ` Stephan Mueller
2016-11-08 11:46         ` Harsh Jain
2016-11-08 12:59           ` Stephan Mueller
2016-11-08 14:21             ` Harsh Jain
2016-11-10  5:00               ` Harsh Jain

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.