linux-crypto.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues
@ 2019-07-02 14:39 Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH] crypto: inside-secure - fix scatter/gather list handling issues Pascal van Leeuwen
                   ` (12 more replies)
  0 siblings, 13 replies; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto; +Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen

This patch set fixes all remaining issues with the cryptomgr extra tests
when run on a Marvell A7K or A8K device (i.e Macchiatobin), resulting in
a clean boot with the extra tests enabled.

Pascal van Leeuwen (9):
  crypto: inside-secure - keep ivsize for DES ECB modes at 0
  crypto: inside-secure - silently return -EINVAL for input error cases
  crypto: inside-secure - fix incorrect skcipher output IV
  crypto: inside-secure - fix scatter/gather list to descriptor
    conversion
  crypto: inside-secure - fix EINVAL error (buf overflow) for AEAD
    decrypt
  crypto: inside-secure: back out parts of earlier HMAC update
    workaround
  crypto: inside-secure - let HW deal with initial hash digest
  crypto: inside-secure - add support for arbitrary size hash/HMAC
    updates
  crypto: inside-secure - add support for 0 length HMAC messages

 drivers/crypto/inside-secure/safexcel.c        |  25 +-
 drivers/crypto/inside-secure/safexcel.h        |   6 +-
 drivers/crypto/inside-secure/safexcel_cipher.c | 265 ++++++++----
 drivers/crypto/inside-secure/safexcel_hash.c   | 553 ++++++++++++++-----------
 4 files changed, 520 insertions(+), 329 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 18+ messages in thread

* [PATCH] crypto: inside-secure - fix scatter/gather list handling issues
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
@ 2019-07-02 14:39 ` Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH] crypto: inside-secure - fix scatter/gather list to descriptor conversion Pascal van Leeuwen
                   ` (11 subsequent siblings)
  12 siblings, 0 replies; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto; +Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

---
 drivers/crypto/inside-secure/safexcel_cipher.c | 178 +++++++++++++++++++------
 1 file changed, 136 insertions(+), 42 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 7977e4c..9ba2c21 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -54,6 +54,7 @@ struct safexcel_cipher_req {
 	/* Number of result descriptors associated to the request */
 	unsigned int rdescs;
 	bool needs_inv;
+	int  nr_src, nr_dst;
 };
 
 static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
@@ -374,10 +375,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 	safexcel_complete(priv, ring);
 
 	if (src == dst) {
-		dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
 	} else {
-		dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
-		dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
 	}
 
 	*should_complete = true;
@@ -395,50 +396,90 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	struct safexcel_command_desc *cdesc;
+	struct safexcel_command_desc *first_cdesc = NULL;
 	struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
 	struct scatterlist *sg;
-	unsigned int totlen = cryptlen + assoclen;
-	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
-	int i, ret = 0;
+	unsigned int totlen;
+	unsigned int totlen_src = cryptlen + assoclen;
+	unsigned int totlen_dst = totlen_src;
+	int n_cdesc = 0, n_rdesc = 0;
+	int queued, i, ret = 0;
+	bool first = true;
+
+	if (ctx->aead) {
+		/*
+		 * AEAD has auth tag appended to output for encrypt and
+		 * removed from the output for decrypt!
+		 */
+		if (sreq->direction == SAFEXCEL_DECRYPT)
+			totlen_dst -= digestsize;
+		else
+			totlen_dst += digestsize;
+
+		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
+		       ctx->ipad, ctx->state_sz);
+		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) /
+		       sizeof(u32),
+		       ctx->opad, ctx->state_sz);
+	}
+
+	/*
+	 * Remember actual input length, source buffer length may be
+	 * updated in case of inline operation below.
+	 */
+	totlen = totlen_src;
+	queued = totlen_src;
 
 	if (src == dst) {
-		nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
-				    DMA_BIDIRECTIONAL);
-		nr_dst = nr_src;
-		if (!nr_src)
+		totlen_src = max(totlen_src, totlen_dst);
+		sreq->nr_src = sg_nents_for_len(src, totlen_src);
+		if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
+			dev_err(priv->dev, "In-place buffer not large enough (need %d bytes)!",
+				totlen_src);
 			return -EINVAL;
+		}
+		sreq->nr_src = dma_map_sg(priv->dev, src, sreq->nr_src,
+					  DMA_BIDIRECTIONAL);
+		sreq->nr_dst = sreq->nr_src;
 	} else {
-		nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
-				    DMA_TO_DEVICE);
-		if (!nr_src)
+		sreq->nr_src = sg_nents_for_len(src, totlen_src);
+		if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
+			dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
+				totlen_src);
 			return -EINVAL;
-
-		nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
-				    DMA_FROM_DEVICE);
-		if (!nr_dst) {
-			dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
+		}
+		sreq->nr_src = dma_map_sg(priv->dev, src, sreq->nr_src,
+					  DMA_TO_DEVICE);
+
+		sreq->nr_dst = sg_nents_for_len(dst, totlen_dst);
+		if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
+			dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
+				totlen_dst);
+			dma_unmap_sg(priv->dev, src, sreq->nr_src,
+				     DMA_TO_DEVICE);
 			return -EINVAL;
 		}
+
+		sreq->nr_dst = dma_map_sg(priv->dev, dst, sreq->nr_dst,
+					  DMA_FROM_DEVICE);
 	}
 
 	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
 
-	if (ctx->aead) {
-		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
-		       ctx->ipad, ctx->state_sz);
-		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32),
-		       ctx->opad, ctx->state_sz);
-	}
+	/* The EIP cannot deal with zero length input packets! */
+	if (totlen == 0)
+		totlen = 1;
 
 	/* command descriptors */
-	for_each_sg(src, sg, nr_src, i) {
+	for_each_sg(src, sg, sreq->nr_src, i) {
 		int len = sg_dma_len(sg);
 
 		/* Do not overflow the request */
 		if (queued - len < 0)
 			len = queued;
 
-		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
+		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+					   !(queued - len),
 					   sg_dma_address(sg), len, totlen,
 					   ctx->base.ctxr_dma);
 		if (IS_ERR(cdesc)) {
@@ -449,14 +490,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 		n_cdesc++;
 
 		if (n_cdesc == 1) {
-			safexcel_context_control(ctx, base, sreq, cdesc);
-			if (ctx->aead)
-				safexcel_aead_token(ctx, iv, cdesc,
-						    sreq->direction, cryptlen,
-						    assoclen, digestsize);
-			else
-				safexcel_skcipher_token(ctx, iv, cdesc,
-							cryptlen);
+			first_cdesc = cdesc;
 		}
 
 		queued -= len;
@@ -464,23 +498,83 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 			break;
 	}
 
+	if (unlikely(!n_cdesc)) {
+		/*
+		 * Special case: zero length input buffer.
+		 * The engine always needs the 1st command descriptor, however!
+		 */
+		first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
+						 ctx->base.ctxr_dma);
+		n_cdesc = 1;
+	}
+
+	/* Add context control words and token to first command descriptor */
+	safexcel_context_control(ctx, base, sreq, first_cdesc);
+	if (ctx->aead)
+		safexcel_aead_token(ctx, iv, first_cdesc,
+				    sreq->direction, cryptlen,
+				    assoclen, digestsize);
+	else
+		safexcel_skcipher_token(ctx, iv, first_cdesc,
+					cryptlen);
+
 	/* result descriptors */
-	for_each_sg(dst, sg, nr_dst, i) {
-		bool first = !i, last = sg_is_last(sg);
+	for_each_sg(dst, sg, sreq->nr_dst, i) {
+		bool last = (i == sreq->nr_dst - 1);
 		u32 len = sg_dma_len(sg);
 
-		rdesc = safexcel_add_rdesc(priv, ring, first, last,
-					   sg_dma_address(sg), len);
+		/* only allow the part of the buffer we know we need */
+		if (len > totlen_dst)
+			len = totlen_dst;
+		if (unlikely(!len))
+			break;
+		totlen_dst -= len;
+
+		/* skip over AAD space in buffer - not written */
+		if (assoclen) {
+			if (assoclen >= len) {
+				assoclen -= len;
+				continue;
+			}
+			rdesc = safexcel_add_rdesc(priv, ring, first, last,
+						   sg_dma_address(sg) +
+						   assoclen,
+						   len - assoclen);
+			assoclen = 0;
+		} else {
+			rdesc = safexcel_add_rdesc(priv, ring, first, last,
+						   sg_dma_address(sg),
+						   len);
+		}
 		if (IS_ERR(rdesc)) {
 			/* No space left in the result descriptor ring */
 			ret = PTR_ERR(rdesc);
 			goto rdesc_rollback;
 		}
-		if (first)
+		if (first) {
 			first_rdesc = rdesc;
+			first = false;
+		}
 		n_rdesc++;
 	}
 
+	if (unlikely(first)) {
+		/*
+		 * Special case: AEAD decrypt with only AAD data.
+		 * In this case there is NO output data from the engine,
+		 * but the engine still needs a result descriptor!
+		 * Create a dummy one just for catching the result token.
+		 */
+		rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
+		if (IS_ERR(rdesc)) {
+			/* No space left in the result descriptor ring */
+			ret = PTR_ERR(rdesc);
+			goto rdesc_rollback;
+		}
+		first_rdesc = rdesc;
+		n_rdesc = 1;
+	}
+
 	safexcel_rdr_req_set(priv, ring, first_rdesc, base);
 
 	*commands = n_cdesc;
@@ -495,10 +589,10 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 
 	if (src == dst) {
-		dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
 	} else {
-		dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
-		dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
 	}
 
 	return ret;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH] crypto: inside-secure - fix scatter/gather list to descriptor conversion
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH] crypto: inside-secure - fix scatter/gather list handling issues Pascal van Leeuwen
@ 2019-07-02 14:39 ` Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH 1/9] crypto: inside-secure - keep ivsize for DES ECB modes at 0 Pascal van Leeuwen
                   ` (10 subsequent siblings)
  12 siblings, 0 replies; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto; +Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

Fixed issues with the skcipher and AEAD scatter/gather list to engine
descriptor conversion code which caused either too much or too little
buffer space to be provided to the hardware. This caused errors with the
testmgr extra tests, either kernel panics (on x86-EIP197-FPGA) or engine
descriptor errors 0x1, 0x8 or 0x9 (on Macchiatobin e.g. Marvell A8K).
With this patch in place, all skcipher and AEAD (extra) tests pass.
---
 drivers/crypto/inside-secure/safexcel_cipher.c | 182 ++++++++++++++++++-------
 1 file changed, 136 insertions(+), 46 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 9aebc0a..c839514 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -54,6 +54,7 @@ struct safexcel_cipher_req {
 	/* Number of result descriptors associated to the request */
 	unsigned int rdescs;
 	bool needs_inv;
+	int  nr_src, nr_dst;
 };
 
 static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
@@ -358,10 +359,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 	safexcel_complete(priv, ring);
 
 	if (src == dst) {
-		dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
 	} else {
-		dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
-		dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
 	}
 
 	/*
@@ -370,7 +371,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 	if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
 	    (sreq->direction == SAFEXCEL_ENCRYPT)) {
 		/* For encrypt take the last output word */
-		sg_pcopy_to_buffer(dst, sg_nents(dst), areq->iv,
+		sg_pcopy_to_buffer(dst, sreq->nr_dst, areq->iv,
 				   crypto_skcipher_ivsize(skcipher),
 				   (cryptlen -
 				    crypto_skcipher_ivsize(skcipher)));
@@ -393,63 +394,99 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	struct safexcel_command_desc *cdesc;
+	struct safexcel_command_desc *first_cdesc = NULL;
 	struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
 	struct scatterlist *sg;
-	unsigned int totlen = cryptlen + assoclen;
-	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
-	int i, ret = 0;
+	unsigned int totlen;
+	unsigned int totlen_src = cryptlen + assoclen;
+	unsigned int totlen_dst = totlen_src;
+	int n_cdesc = 0, n_rdesc = 0;
+	int queued, i, ret = 0;
+	bool first = true;
 
-	if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
-	    (sreq->direction == SAFEXCEL_DECRYPT)) {
+	sreq->nr_src = sg_nents_for_len(src, totlen_src);
+
+	if (ctx->aead) {
+		/*
+		 * AEAD has auth tag appended to output for encrypt and
+		 * removed from the output for decrypt!
+		 */
+		if (sreq->direction == SAFEXCEL_DECRYPT)
+			totlen_dst -= digestsize;
+		else
+			totlen_dst += digestsize;
+
+		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
+		       ctx->ipad, ctx->state_sz);
+		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) /
+		       sizeof(u32),
+		       ctx->opad, ctx->state_sz);
+	} else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
+		   (sreq->direction == SAFEXCEL_DECRYPT)) {
 		/*
 		 * Save IV from last crypto input word for CBC modes in decrypt
 		 * direction. Need to do this first in case of inplace operation
 		 * as it will be overwritten.
 		 */
-		sg_pcopy_to_buffer(src, sg_nents(src), areq->iv,
+		sg_pcopy_to_buffer(src, sreq->nr_src, areq->iv,
 				   crypto_skcipher_ivsize(skcipher),
-				   (totlen -
+				   (totlen_src -
 				    crypto_skcipher_ivsize(skcipher)));
 	}
 
+	sreq->nr_dst = sg_nents_for_len(dst, totlen_dst);
+
+	/*
+	 * Remember actual input length, source buffer length may be
+	 * updated in case of inline operation below.
+	 */
+	totlen = totlen_src;
+	queued = totlen_src;
+
 	if (src == dst) {
-		nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
-				    DMA_BIDIRECTIONAL);
-		nr_dst = nr_src;
-		if (!nr_src)
+		sreq->nr_src = max(sreq->nr_src, sreq->nr_dst);
+		sreq->nr_dst = sreq->nr_src;
+		if (unlikely((totlen_src || totlen_dst) &&
+		    (sreq->nr_src <= 0))) {
+			dev_err(priv->dev, "In-place buffer not large enough (need %d bytes)!",
+				max(totlen_src, totlen_dst));
 			return -EINVAL;
+		}
+		dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
 	} else {
-		nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
-				    DMA_TO_DEVICE);
-		if (!nr_src)
+		if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
+			dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
+				totlen_src);
 			return -EINVAL;
+		}
+		dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
 
-		nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
-				    DMA_FROM_DEVICE);
-		if (!nr_dst) {
-			dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
+		if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
+			dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
+				totlen_dst);
+			dma_unmap_sg(priv->dev, src, sreq->nr_src,
+				     DMA_TO_DEVICE);
 			return -EINVAL;
 		}
+		dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
 	}
 
 	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
 
-	if (ctx->aead) {
-		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
-		       ctx->ipad, ctx->state_sz);
-		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32),
-		       ctx->opad, ctx->state_sz);
-	}
+	/* The EIP cannot deal with zero length input packets! */
+	if (totlen == 0)
+		totlen = 1;
 
 	/* command descriptors */
-	for_each_sg(src, sg, nr_src, i) {
+	for_each_sg(src, sg, sreq->nr_src, i) {
 		int len = sg_dma_len(sg);
 
 		/* Do not overflow the request */
 		if (queued - len < 0)
 			len = queued;
 
-		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
+		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+					   !(queued - len),
 					   sg_dma_address(sg), len, totlen,
 					   ctx->base.ctxr_dma);
 		if (IS_ERR(cdesc)) {
@@ -460,14 +497,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 		n_cdesc++;
 
 		if (n_cdesc == 1) {
-			safexcel_context_control(ctx, base, sreq, cdesc);
-			if (ctx->aead)
-				safexcel_aead_token(ctx, iv, cdesc,
-						    sreq->direction, cryptlen,
-						    assoclen, digestsize);
-			else
-				safexcel_skcipher_token(ctx, iv, cdesc,
-							cryptlen);
+			first_cdesc = cdesc;
 		}
 
 		queued -= len;
@@ -475,23 +505,83 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 			break;
 	}
 
+	if (unlikely(!n_cdesc)) {
+		/*
+		 * Special case: zero length input buffer.
+		 * The engine always needs the 1st command descriptor, however!
+		 */
+		first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
+						 ctx->base.ctxr_dma);
+		n_cdesc = 1;
+	}
+
+	/* Add context control words and token to first command descriptor */
+	safexcel_context_control(ctx, base, sreq, first_cdesc);
+	if (ctx->aead)
+		safexcel_aead_token(ctx, iv, first_cdesc,
+				    sreq->direction, cryptlen,
+				    assoclen, digestsize);
+	else
+		safexcel_skcipher_token(ctx, iv, first_cdesc,
+					cryptlen);
+
 	/* result descriptors */
-	for_each_sg(dst, sg, nr_dst, i) {
-		bool first = !i, last = sg_is_last(sg);
+	for_each_sg(dst, sg, sreq->nr_dst, i) {
+		bool last = (i == sreq->nr_dst - 1);
 		u32 len = sg_dma_len(sg);
 
-		rdesc = safexcel_add_rdesc(priv, ring, first, last,
-					   sg_dma_address(sg), len);
+		/* only allow the part of the buffer we know we need */
+		if (len > totlen_dst)
+			len = totlen_dst;
+		if (unlikely(!len))
+			break;
+		totlen_dst -= len;
+
+		/* skip over AAD space in buffer - not written */
+		if (assoclen) {
+			if (assoclen >= len) {
+				assoclen -= len;
+				continue;
+			}
+			rdesc = safexcel_add_rdesc(priv, ring, first, last,
+						   sg_dma_address(sg) +
+						   assoclen,
+						   len - assoclen);
+			assoclen = 0;
+		} else {
+			rdesc = safexcel_add_rdesc(priv, ring, first, last,
+						   sg_dma_address(sg),
+						   len);
+		}
 		if (IS_ERR(rdesc)) {
 			/* No space left in the result descriptor ring */
 			ret = PTR_ERR(rdesc);
 			goto rdesc_rollback;
 		}
-		if (first)
+		if (first) {
 			first_rdesc = rdesc;
+			first = false;
+		}
 		n_rdesc++;
 	}
 
+	if (unlikely(first)) {
+		/*
+		 * Special case: AEAD decrypt with only AAD data.
+		 * In this case there is NO output data from the engine,
+		 * but the engine still needs a result descriptor!
+		 * Create a dummy one just for catching the result token.
+		 */
+		rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
+		if (IS_ERR(rdesc)) {
+			/* No space left in the result descriptor ring */
+			ret = PTR_ERR(rdesc);
+			goto rdesc_rollback;
+		}
+		first_rdesc = rdesc;
+		n_rdesc = 1;
+	}
+
 	safexcel_rdr_req_set(priv, ring, first_rdesc, base);
 
 	*commands = n_cdesc;
@@ -506,10 +596,10 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 
 	if (src == dst) {
-		dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
 	} else {
-		dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
-		dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
 	}
 
 	return ret;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 1/9] crypto: inside-secure - keep ivsize for DES ECB modes at 0
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH] crypto: inside-secure - fix scatter/gather list handling issues Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH] crypto: inside-secure - fix scatter/gather list to descriptor conversion Pascal van Leeuwen
@ 2019-07-02 14:39 ` Pascal van Leeuwen
  2019-07-05 14:40   ` Antoine Tenart
  2019-07-02 14:39 ` [PATCH 2/9] crypto: inside-secure - silently return -EINVAL for input error cases Pascal van Leeuwen
                   ` (9 subsequent siblings)
  12 siblings, 1 reply; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto
  Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

The driver incorrectly advertised the IV size for DES and 3DES ECB
mode as being the DES blocksize of 8. This is incorrect as ECB mode
does not need any IV.

Signed-off-by: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
---
 drivers/crypto/inside-secure/safexcel_cipher.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index ee8a0c3..7977e4c 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1033,7 +1033,6 @@ struct safexcel_alg_template safexcel_alg_ecb_des = {
 		.decrypt = safexcel_ecb_des_decrypt,
 		.min_keysize = DES_KEY_SIZE,
 		.max_keysize = DES_KEY_SIZE,
-		.ivsize = DES_BLOCK_SIZE,
 		.base = {
 			.cra_name = "ecb(des)",
 			.cra_driver_name = "safexcel-ecb-des",
@@ -1134,7 +1133,6 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
 		.decrypt = safexcel_ecb_des3_ede_decrypt,
 		.min_keysize = DES3_EDE_KEY_SIZE,
 		.max_keysize = DES3_EDE_KEY_SIZE,
-		.ivsize = DES3_EDE_BLOCK_SIZE,
 		.base = {
 			.cra_name = "ecb(des3_ede)",
 			.cra_driver_name = "safexcel-ecb-des3_ede",
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 2/9] crypto: inside-secure - silently return -EINVAL for input error cases
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
                   ` (2 preceding siblings ...)
  2019-07-02 14:39 ` [PATCH 1/9] crypto: inside-secure - keep ivsize for DES ECB modes at 0 Pascal van Leeuwen
@ 2019-07-02 14:39 ` Pascal van Leeuwen
  2019-07-05 14:36   ` Antoine Tenart
  2019-07-02 14:39 ` [PATCH 3/9] crypto: inside-secure - fix incorrect skcipher output IV Pascal van Leeuwen
                   ` (8 subsequent siblings)
  12 siblings, 1 reply; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto
  Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

Driver was printing an error message for certain input error cases that
should just return -EINVAL, which caused the related testmgr extra tests
to flood the kernel message log. Ensured those cases remain silent while
making some other device-specific errors a bit more verbose.

Signed-off-by: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
---
 drivers/crypto/inside-secure/safexcel.c | 25 ++++++++++++++++++++-----
 1 file changed, 20 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 503fef0..8e8c01d 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -694,16 +694,31 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
 				       struct safexcel_result_desc *rdesc)
 {
-	if (likely(!rdesc->result_data.error_code))
+	if (likely((!rdesc->descriptor_overflow) &&
+		   (!rdesc->buffer_overflow) &&
+		   (!rdesc->result_data.error_code)))
 		return 0;
 
-	if (rdesc->result_data.error_code & 0x407f) {
-		/* Fatal error (bits 0-7, 14) */
+	if (rdesc->descriptor_overflow)
+		dev_err(priv->dev, "Descriptor overflow detected");
+
+	if (rdesc->buffer_overflow)
+		dev_err(priv->dev, "Buffer overflow detected");
+
+	if (rdesc->result_data.error_code & 0x4067) {
+		/* Fatal error (bits 0,1,2,5,6 & 14) */
 		dev_err(priv->dev,
-			"cipher: result: result descriptor error (0x%x)\n",
+			"result descriptor error (%x)",
 			rdesc->result_data.error_code);
+		return -EIO;
+	} else if (rdesc->result_data.error_code &
+		   (BIT(7) | BIT(4) | BIT(3))) {
+		/*
+		 * Give priority over authentication fails:
+		 * Blocksize & overflow errors, something wrong with the input!
+		 */
 		return -EINVAL;
-	} else if (rdesc->result_data.error_code == BIT(9)) {
+	} else if (rdesc->result_data.error_code & BIT(9)) {
 		/* Authentication failed */
 		return -EBADMSG;
 	}
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 3/9] crypto: inside-secure - fix incorrect skcipher output IV
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
                   ` (3 preceding siblings ...)
  2019-07-02 14:39 ` [PATCH 2/9] crypto: inside-secure - silently return -EINVAL for input error cases Pascal van Leeuwen
@ 2019-07-02 14:39 ` Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH 4/9] crypto: inside-secure - fix scatter/gather list to descriptor conversion Pascal van Leeuwen
                   ` (7 subsequent siblings)
  12 siblings, 0 replies; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto
  Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

This patch fixes corruption issues with the skcipher output IV
witnessed on x86+EIP197-FPGA (devboard). The original fix, commit
57660b11d5ad ("crypto: inside-secure - implement IV retrieval"),
attempted to write out the result IV through the context record.
However, this is not a reliable mechanism as there is no way of
knowing the hardware context update actually arrived in memory, so
it is possible to read the old contents instead of the updated IV.
(and indeed, this failed for the x86/FPGA case)

The alternative approach used here recognises the fact that the
result IV for CBC is actually the last cipher block, which is the last
input block in case of decryption and the last output block in case
of encryption. So the result IV is taken from the input data buffer
respectively the output data buffer instead, which *is* reliable.

Signed-off-by: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
---
 drivers/crypto/inside-secure/safexcel_cipher.c | 84 +++++++++++++-------------
 1 file changed, 43 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 7977e4c..9aebc0a 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -92,25 +92,6 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
 	token[0].instructions = EIP197_TOKEN_INS_LAST |
 				EIP197_TOKEN_INS_TYPE_CRYTO |
 				EIP197_TOKEN_INS_TYPE_OUTPUT;
-
-	if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
-		u32 last = (EIP197_MAX_TOKENS - 1) - offset;
-
-		token[last].opcode = EIP197_TOKEN_OPCODE_CTX_ACCESS;
-		token[last].packet_length = EIP197_TOKEN_DIRECTION_EXTERNAL |
-					    EIP197_TOKEN_EXEC_IF_SUCCESSFUL|
-					    EIP197_TOKEN_CTX_OFFSET(0x2);
-		token[last].stat = EIP197_TOKEN_STAT_LAST_HASH |
-			EIP197_TOKEN_STAT_LAST_PACKET;
-		token[last].instructions =
-			EIP197_TOKEN_INS_ORIGIN_LEN(block_sz / sizeof(u32)) |
-			EIP197_TOKEN_INS_ORIGIN_IV0;
-
-		/* Store the updated IV values back in the internal context
-		 * registers.
-		 */
-		cdesc->control_data.control1 |= CONTEXT_CONTROL_CRYPTO_STORE;
-	}
 }
 
 static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
@@ -348,6 +329,9 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 				      struct safexcel_cipher_req *sreq,
 				      bool *should_complete, int *ret)
 {
+	struct skcipher_request *areq = skcipher_request_cast(async);
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq);
+	struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(skcipher);
 	struct safexcel_result_desc *rdesc;
 	int ndesc = 0;
 
@@ -380,6 +364,18 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 		dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
 	}
 
+	/*
+	 * Update IV in req from last crypto output word for CBC modes
+	 */
+	if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
+	    (sreq->direction == SAFEXCEL_ENCRYPT)) {
+		/* For encrypt take the last output word */
+		sg_pcopy_to_buffer(dst, sg_nents(dst), areq->iv,
+				   crypto_skcipher_ivsize(skcipher),
+				   (cryptlen -
+				    crypto_skcipher_ivsize(skcipher)));
+	}
+
 	*should_complete = true;
 
 	return ndesc;
@@ -392,6 +388,8 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 			     unsigned int digestsize, u8 *iv, int *commands,
 			     int *results)
 {
+	struct skcipher_request *areq = skcipher_request_cast(base);
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(areq);
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	struct safexcel_command_desc *cdesc;
@@ -401,6 +399,19 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
 	int i, ret = 0;
 
+	if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
+	    (sreq->direction == SAFEXCEL_DECRYPT)) {
+		/*
+		 * Save IV from last crypto input word for CBC modes in decrypt
+		 * direction. Need to do this first in case of inplace operation
+		 * as it will be overwritten.
+		 */
+		sg_pcopy_to_buffer(src, sg_nents(src), areq->iv,
+				   crypto_skcipher_ivsize(skcipher),
+				   (totlen -
+				    crypto_skcipher_ivsize(skcipher)));
+	}
+
 	if (src == dst) {
 		nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
 				    DMA_BIDIRECTIONAL);
@@ -570,7 +581,6 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
 {
 	struct skcipher_request *req = skcipher_request_cast(async);
 	struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
-	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(async->tfm);
 	int err;
 
 	if (sreq->needs_inv) {
@@ -581,24 +591,6 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
 		err = safexcel_handle_req_result(priv, ring, async, req->src,
 						 req->dst, req->cryptlen, sreq,
 						 should_complete, ret);
-
-		if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
-			u32 block_sz = 0;
-
-			switch (ctx->alg) {
-			case SAFEXCEL_DES:
-				block_sz = DES_BLOCK_SIZE;
-				break;
-			case SAFEXCEL_3DES:
-				block_sz = DES3_EDE_BLOCK_SIZE;
-				break;
-			case SAFEXCEL_AES:
-				block_sz = AES_BLOCK_SIZE;
-				break;
-			}
-
-			memcpy(req->iv, ctx->base.ctxr->data, block_sz);
-		}
 	}
 
 	return err;
@@ -656,12 +648,22 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
 
 	BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
 
-	if (sreq->needs_inv)
+	if (sreq->needs_inv) {
 		ret = safexcel_cipher_send_inv(async, ring, commands, results);
-	else
+	} else {
+		struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+		u8 input_iv[AES_BLOCK_SIZE];
+
+		/*
+		 * Save input IV in case of CBC decrypt mode
+		 * Will be overwritten with output IV prior to use!
+		 */
+		memcpy(input_iv, req->iv, crypto_skcipher_ivsize(skcipher));
+
 		ret = safexcel_send_req(async, ring, sreq, req->src,
-					req->dst, req->cryptlen, 0, 0, req->iv,
+					req->dst, req->cryptlen, 0, 0, input_iv,
 					commands, results);
+	}
 
 	sreq->rdescs = *results;
 	return ret;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 4/9] crypto: inside-secure - fix scatter/gather list to descriptor conversion
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
                   ` (4 preceding siblings ...)
  2019-07-02 14:39 ` [PATCH 3/9] crypto: inside-secure - fix incorrect skcipher output IV Pascal van Leeuwen
@ 2019-07-02 14:39 ` Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH 5/9] crypto: inside-secure - fix EINVAL error (buf overflow) for AEAD decrypt Pascal van Leeuwen
                   ` (6 subsequent siblings)
  12 siblings, 0 replies; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto
  Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

Fixed issues with the skcipher and AEAD scatter/gather list to engine
descriptor conversion code which caused either too much or too little
buffer space to be provided to the hardware. This caused errors with the
testmgr extra tests, either kernel panics (on x86-EIP197-FPGA) or engine
descriptor errors 0x1, 0x8 or 0x9 (on Macchiatobin e.g. Marvell A8K).
With this patch in place, all skcipher and AEAD (extra) tests pass.

Signed-off-by: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
---
 drivers/crypto/inside-secure/safexcel_cipher.c | 182 ++++++++++++++++++-------
 1 file changed, 136 insertions(+), 46 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 9aebc0a..c839514 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -54,6 +54,7 @@ struct safexcel_cipher_req {
 	/* Number of result descriptors associated to the request */
 	unsigned int rdescs;
 	bool needs_inv;
+	int  nr_src, nr_dst;
 };
 
 static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
@@ -358,10 +359,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 	safexcel_complete(priv, ring);
 
 	if (src == dst) {
-		dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_BIDIRECTIONAL);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
 	} else {
-		dma_unmap_sg(priv->dev, src, sg_nents(src), DMA_TO_DEVICE);
-		dma_unmap_sg(priv->dev, dst, sg_nents(dst), DMA_FROM_DEVICE);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
 	}
 
 	/*
@@ -370,7 +371,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 	if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
 	    (sreq->direction == SAFEXCEL_ENCRYPT)) {
 		/* For encrypt take the last output word */
-		sg_pcopy_to_buffer(dst, sg_nents(dst), areq->iv,
+		sg_pcopy_to_buffer(dst, sreq->nr_dst, areq->iv,
 				   crypto_skcipher_ivsize(skcipher),
 				   (cryptlen -
 				    crypto_skcipher_ivsize(skcipher)));
@@ -393,63 +394,99 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 	struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	struct safexcel_command_desc *cdesc;
+	struct safexcel_command_desc *first_cdesc = NULL;
 	struct safexcel_result_desc *rdesc, *first_rdesc = NULL;
 	struct scatterlist *sg;
-	unsigned int totlen = cryptlen + assoclen;
-	int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
-	int i, ret = 0;
+	unsigned int totlen;
+	unsigned int totlen_src = cryptlen + assoclen;
+	unsigned int totlen_dst = totlen_src;
+	int n_cdesc = 0, n_rdesc = 0;
+	int queued, i, ret = 0;
+	bool first = true;
 
-	if ((!ctx->aead) && (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
-	    (sreq->direction == SAFEXCEL_DECRYPT)) {
+	sreq->nr_src = sg_nents_for_len(src, totlen_src);
+
+	if (ctx->aead) {
+		/*
+		 * AEAD has auth tag appended to output for encrypt and
+		 * removed from the output for decrypt!
+		 */
+		if (sreq->direction == SAFEXCEL_DECRYPT)
+			totlen_dst -= digestsize;
+		else
+			totlen_dst += digestsize;
+
+		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
+		       ctx->ipad, ctx->state_sz);
+		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) /
+		       sizeof(u32),
+		       ctx->opad, ctx->state_sz);
+	} else if ((ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) &&
+		   (sreq->direction == SAFEXCEL_DECRYPT)) {
 		/*
 		 * Save IV from last crypto input word for CBC modes in decrypt
 		 * direction. Need to do this first in case of inplace operation
 		 * as it will be overwritten.
 		 */
-		sg_pcopy_to_buffer(src, sg_nents(src), areq->iv,
+		sg_pcopy_to_buffer(src, sreq->nr_src, areq->iv,
 				   crypto_skcipher_ivsize(skcipher),
-				   (totlen -
+				   (totlen_src -
 				    crypto_skcipher_ivsize(skcipher)));
 	}
 
+	sreq->nr_dst = sg_nents_for_len(dst, totlen_dst);
+
+	/*
+	 * Remember actual input length, source buffer length may be
+	 * updated in case of inline operation below.
+	 */
+	totlen = totlen_src;
+	queued = totlen_src;
+
 	if (src == dst) {
-		nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
-				    DMA_BIDIRECTIONAL);
-		nr_dst = nr_src;
-		if (!nr_src)
+		sreq->nr_src = max(sreq->nr_src, sreq->nr_dst);
+		sreq->nr_dst = sreq->nr_src;
+		if (unlikely((totlen_src || totlen_dst) &&
+		    (sreq->nr_src <= 0))) {
+			dev_err(priv->dev, "In-place buffer not large enough (need %d bytes)!",
+				max(totlen_src, totlen_dst));
 			return -EINVAL;
+		}
+		dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
 	} else {
-		nr_src = dma_map_sg(priv->dev, src, sg_nents(src),
-				    DMA_TO_DEVICE);
-		if (!nr_src)
+		if (unlikely(totlen_src && (sreq->nr_src <= 0))) {
+			dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!",
+				totlen_src);
 			return -EINVAL;
+		}
+		dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
 
-		nr_dst = dma_map_sg(priv->dev, dst, sg_nents(dst),
-				    DMA_FROM_DEVICE);
-		if (!nr_dst) {
-			dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
+		if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) {
+			dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!",
+				totlen_dst);
+			dma_unmap_sg(priv->dev, src, sreq->nr_src,
+				     DMA_TO_DEVICE);
 			return -EINVAL;
 		}
+		dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
 	}
 
 	memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
 
-	if (ctx->aead) {
-		memcpy(ctx->base.ctxr->data + ctx->key_len / sizeof(u32),
-		       ctx->ipad, ctx->state_sz);
-		memcpy(ctx->base.ctxr->data + (ctx->key_len + ctx->state_sz) / sizeof(u32),
-		       ctx->opad, ctx->state_sz);
-	}
+	/* The EIP cannot deal with zero length input packets! */
+	if (totlen == 0)
+		totlen = 1;
 
 	/* command descriptors */
-	for_each_sg(src, sg, nr_src, i) {
+	for_each_sg(src, sg, sreq->nr_src, i) {
 		int len = sg_dma_len(sg);
 
 		/* Do not overflow the request */
 		if (queued - len < 0)
 			len = queued;
 
-		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
+		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+					   !(queued - len),
 					   sg_dma_address(sg), len, totlen,
 					   ctx->base.ctxr_dma);
 		if (IS_ERR(cdesc)) {
@@ -460,14 +497,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 		n_cdesc++;
 
 		if (n_cdesc == 1) {
-			safexcel_context_control(ctx, base, sreq, cdesc);
-			if (ctx->aead)
-				safexcel_aead_token(ctx, iv, cdesc,
-						    sreq->direction, cryptlen,
-						    assoclen, digestsize);
-			else
-				safexcel_skcipher_token(ctx, iv, cdesc,
-							cryptlen);
+			first_cdesc = cdesc;
 		}
 
 		queued -= len;
@@ -475,23 +505,83 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 			break;
 	}
 
+	if (unlikely(!n_cdesc)) {
+		/*
+		 * Special case: zero length input buffer.
+		 * The engine always needs the 1st command descriptor, however!
+		 */
+		first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
+						 ctx->base.ctxr_dma);
+		n_cdesc = 1;
+	}
+
+	/* Add context control words and token to first command descriptor */
+	safexcel_context_control(ctx, base, sreq, first_cdesc);
+	if (ctx->aead)
+		safexcel_aead_token(ctx, iv, first_cdesc,
+				    sreq->direction, cryptlen,
+				    assoclen, digestsize);
+	else
+		safexcel_skcipher_token(ctx, iv, first_cdesc,
+					cryptlen);
+
 	/* result descriptors */
-	for_each_sg(dst, sg, nr_dst, i) {
-		bool first = !i, last = sg_is_last(sg);
+	for_each_sg(dst, sg, sreq->nr_dst, i) {
+		bool last = (i == sreq->nr_dst - 1);
 		u32 len = sg_dma_len(sg);
 
-		rdesc = safexcel_add_rdesc(priv, ring, first, last,
-					   sg_dma_address(sg), len);
+		/* only allow the part of the buffer we know we need */
+		if (len > totlen_dst)
+			len = totlen_dst;
+		if (unlikely(!len))
+			break;
+		totlen_dst -= len;
+
+		/* skip over AAD space in buffer - not written */
+		if (assoclen) {
+			if (assoclen >= len) {
+				assoclen -= len;
+				continue;
+			}
+			rdesc = safexcel_add_rdesc(priv, ring, first, last,
+						   sg_dma_address(sg) +
+						   assoclen,
+						   len - assoclen);
+			assoclen = 0;
+		} else {
+			rdesc = safexcel_add_rdesc(priv, ring, first, last,
+						   sg_dma_address(sg),
+						   len);
+		}
 		if (IS_ERR(rdesc)) {
 			/* No space left in the result descriptor ring */
 			ret = PTR_ERR(rdesc);
 			goto rdesc_rollback;
 		}
-		if (first)
+		if (first) {
 			first_rdesc = rdesc;
+			first = false;
+		}
 		n_rdesc++;
 	}
 
+	if (unlikely(first)) {
+		/*
+		 * Special case: AEAD decrypt with only AAD data.
+		 * In this case there is NO output data from the engine,
+		 * but the engine still needs a result descriptor!
+		 * Create a dummy one just for catching the result token.
+		 */
+		rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
+		if (IS_ERR(rdesc)) {
+			/* No space left in the result descriptor ring */
+			ret = PTR_ERR(rdesc);
+			goto rdesc_rollback;
+		}
+		first_rdesc = rdesc;
+		n_rdesc = 1;
+	}
+
 	safexcel_rdr_req_set(priv, ring, first_rdesc, base);
 
 	*commands = n_cdesc;
@@ -506,10 +596,10 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
 		safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 
 	if (src == dst) {
-		dma_unmap_sg(priv->dev, src, nr_src, DMA_BIDIRECTIONAL);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL);
 	} else {
-		dma_unmap_sg(priv->dev, src, nr_src, DMA_TO_DEVICE);
-		dma_unmap_sg(priv->dev, dst, nr_dst, DMA_FROM_DEVICE);
+		dma_unmap_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE);
+		dma_unmap_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE);
 	}
 
 	return ret;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 5/9] crypto: inside-secure - fix EINVAL error (buf overflow) for AEAD decrypt
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
                   ` (5 preceding siblings ...)
  2019-07-02 14:39 ` [PATCH 4/9] crypto: inside-secure - fix scatter/gather list to descriptor conversion Pascal van Leeuwen
@ 2019-07-02 14:39 ` Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH 6/9] crypto: inside-secure: back out parts of earlier HMAC update workaround Pascal van Leeuwen
                   ` (5 subsequent siblings)
  12 siblings, 0 replies; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto
  Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

This patch fixes a buffer overflow error returning -EINVAL for AEAD
decrypt operations by NOT appending the (already verified) ICV to
the output packet (which is not expected by the API anyway).
With this fix, all testmgr AEAD (extra) tests now pass.

Signed-off-by: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
---
 drivers/crypto/inside-secure/safexcel.h        | 2 +-
 drivers/crypto/inside-secure/safexcel_cipher.c | 7 +++----
 2 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index ed47df0..75c6126 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -481,7 +481,7 @@ static inline void eip197_noop_token(struct safexcel_token *token)
 #define EIP197_TOKEN_INS_ORIGIN_LEN(x)		((x) << 5)
 #define EIP197_TOKEN_INS_TYPE_OUTPUT		BIT(5)
 #define EIP197_TOKEN_INS_TYPE_HASH		BIT(6)
-#define EIP197_TOKEN_INS_TYPE_CRYTO		BIT(7)
+#define EIP197_TOKEN_INS_TYPE_CRYPTO		BIT(7)
 #define EIP197_TOKEN_INS_LAST			BIT(8)
 
 /* Processing Engine Control Data  */
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index c839514..ea122dd 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -91,7 +91,7 @@ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
 	token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
 			EIP197_TOKEN_STAT_LAST_HASH;
 	token[0].instructions = EIP197_TOKEN_INS_LAST |
-				EIP197_TOKEN_INS_TYPE_CRYTO |
+				EIP197_TOKEN_INS_TYPE_CRYPTO |
 				EIP197_TOKEN_INS_TYPE_OUTPUT;
 }
 
@@ -117,14 +117,13 @@ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
 
 	token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
 	token[0].packet_length = assoclen;
-	token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH |
-				EIP197_TOKEN_INS_TYPE_OUTPUT;
+	token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
 
 	token[1].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
 	token[1].packet_length = cryptlen;
 	token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
 	token[1].instructions = EIP197_TOKEN_INS_LAST |
-				EIP197_TOKEN_INS_TYPE_CRYTO |
+				EIP197_TOKEN_INS_TYPE_CRYPTO |
 				EIP197_TOKEN_INS_TYPE_HASH |
 				EIP197_TOKEN_INS_TYPE_OUTPUT;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 6/9] crypto: inside-secure: back out parts of earlier HMAC update workaround
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
                   ` (6 preceding siblings ...)
  2019-07-02 14:39 ` [PATCH 5/9] crypto: inside-secure - fix EINVAL error (buf overflow) for AEAD decrypt Pascal van Leeuwen
@ 2019-07-02 14:39 ` Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH 7/9] crypto: inside-secure - let HW deal with initial hash digest Pascal van Leeuwen
                   ` (4 subsequent siblings)
  12 siblings, 0 replies; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto
  Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

This patch backs out some changes done with commit 082ec2d48467 -
"add support for HMAC updates" as that update just works around the
issue for the basic tests by providing twice the amount of buffering,
but this does not solve the case of much larger data blocks such as
those performed by the extra tests.
This is in preparation of an actual solution in the next patch(es),
which does not actually require any extra buffering at all.

Signed-off-by: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
---
 drivers/crypto/inside-secure/safexcel_hash.c | 32 +++++++++++-----------------
 1 file changed, 13 insertions(+), 19 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 2c31536..19ac73c 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -41,11 +41,11 @@ struct safexcel_ahash_req {
 	u64 len[2];
 	u64 processed[2];
 
-	u8 cache[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
+	u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 	dma_addr_t cache_dma;
 	unsigned int cache_sz;
 
-	u8 cache_next[SHA512_BLOCK_SIZE << 1] __aligned(sizeof(u32));
+	u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
 };
 
 static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
@@ -89,9 +89,6 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
 	cdesc->control_data.control0 |= ctx->alg;
 	cdesc->control_data.control0 |= req->digest;
 
-	if (!req->finish)
-		cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
-
 	if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
 		if (req->processed[0] || req->processed[1]) {
 			if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
@@ -110,6 +107,9 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
 			cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
 		}
 
+		if (!req->finish)
+			cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
+
 		/*
 		 * Copy the input digest if needed, and setup the context
 		 * fields. Do this now as we need it to setup the first command
@@ -216,8 +216,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 	u64 queued, len, cache_len, cache_max;
 
 	cache_max = crypto_ahash_blocksize(ahash);
-	if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
-		cache_max <<= 1;
 
 	queued = len = safexcel_queued_len(req);
 	if (queued <= cache_max)
@@ -229,17 +227,13 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 		/* If this is not the last request and the queued data does not
 		 * fit into full blocks, cache it for the next send() call.
 		 */
-		extra = queued & (crypto_ahash_blocksize(ahash) - 1);
-
-		if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC &&
-		    extra < crypto_ahash_blocksize(ahash))
-			extra += crypto_ahash_blocksize(ahash);
+		extra = queued & (cache_max - 1);
 
 		/* If this is not the last request and the queued data
 		 * is a multiple of a block, cache the last one for now.
 		 */
 		if (!extra)
-			extra = crypto_ahash_blocksize(ahash);
+			extra = cache_max;
 
 		sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
 				   req->cache_next, extra,
@@ -247,6 +241,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 
 		queued -= extra;
 		len -= extra;
+
+		if (!queued) {
+			*commands = 0;
+			*results = 0;
+			return 0;
+		}
 	}
 
 	/* Add a command descriptor for the cached data, if any */
@@ -613,8 +613,6 @@ static int safexcel_ahash_update(struct ahash_request *areq)
 		req->len[1]++;
 
 	cache_max = crypto_ahash_blocksize(ahash);
-	if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
-		cache_max <<= 1;
 
 	safexcel_ahash_cache(areq, cache_max);
 
@@ -689,8 +687,6 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
 	u32 cache_sz;
 
 	cache_sz = crypto_ahash_blocksize(ahash);
-	if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
-		cache_sz <<= 1;
 
 	export->len[0] = req->len[0];
 	export->len[1] = req->len[1];
@@ -718,8 +714,6 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
 		return ret;
 
 	cache_sz = crypto_ahash_blocksize(ahash);
-	if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
-		cache_sz <<= 1;
 
 	req->len[0] = export->len[0];
 	req->len[1] = export->len[1];
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 7/9] crypto: inside-secure - let HW deal with initial hash digest
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
                   ` (7 preceding siblings ...)
  2019-07-02 14:39 ` [PATCH 6/9] crypto: inside-secure: back out parts of earlier HMAC update workaround Pascal van Leeuwen
@ 2019-07-02 14:39 ` Pascal van Leeuwen
  2019-07-02 14:39 ` [PATCH 8/9] crypto: inside-secure - add support for arbitrary size hash/HMAC updates Pascal van Leeuwen
                   ` (3 subsequent siblings)
  12 siblings, 0 replies; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto
  Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

The driver was loading the initial digest for hash operations into
the hardware explicitly, but this is not needed as the hardware can
handle that by itself, which is more efficient and avoids any context
record coherence issues.

Signed-off-by: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
---
 drivers/crypto/inside-secure/safexcel_hash.c | 71 +++-------------------------
 1 file changed, 6 insertions(+), 65 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 19ac73c..1b62608 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -641,8 +641,12 @@ static int safexcel_ahash_final(struct ahash_request *areq)
 	req->last_req = true;
 	req->finish = true;
 
-	/* If we have an overall 0 length request */
-	if (!req->len[0] && !req->len[1] && !areq->nbytes) {
+	if (unlikely(!req->len[0] && !req->len[1] && !areq->nbytes)) {
+		/*
+		 * If we have an overall 0 length *hash* request:
+		 * The HW cannot do 0 length hash, so we provide the correct
+		 * result directly here.
+		 */
 		if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
 			memcpy(areq->result, md5_zero_message_hash,
 			       MD5_DIGEST_SIZE);
@@ -751,12 +755,6 @@ static int safexcel_sha1_init(struct ahash_request *areq)
 
 	memset(req, 0, sizeof(*req));
 
-	req->state[0] = SHA1_H0;
-	req->state[1] = SHA1_H1;
-	req->state[2] = SHA1_H2;
-	req->state[3] = SHA1_H3;
-	req->state[4] = SHA1_H4;
-
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = SHA1_DIGEST_SIZE;
@@ -1063,15 +1061,6 @@ static int safexcel_sha256_init(struct ahash_request *areq)
 
 	memset(req, 0, sizeof(*req));
 
-	req->state[0] = SHA256_H0;
-	req->state[1] = SHA256_H1;
-	req->state[2] = SHA256_H2;
-	req->state[3] = SHA256_H3;
-	req->state[4] = SHA256_H4;
-	req->state[5] = SHA256_H5;
-	req->state[6] = SHA256_H6;
-	req->state[7] = SHA256_H7;
-
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = SHA256_DIGEST_SIZE;
@@ -1125,15 +1114,6 @@ static int safexcel_sha224_init(struct ahash_request *areq)
 
 	memset(req, 0, sizeof(*req));
 
-	req->state[0] = SHA224_H0;
-	req->state[1] = SHA224_H1;
-	req->state[2] = SHA224_H2;
-	req->state[3] = SHA224_H3;
-	req->state[4] = SHA224_H4;
-	req->state[5] = SHA224_H5;
-	req->state[6] = SHA224_H6;
-	req->state[7] = SHA224_H7;
-
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = SHA256_DIGEST_SIZE;
@@ -1299,23 +1279,6 @@ static int safexcel_sha512_init(struct ahash_request *areq)
 
 	memset(req, 0, sizeof(*req));
 
-	req->state[0] = lower_32_bits(SHA512_H0);
-	req->state[1] = upper_32_bits(SHA512_H0);
-	req->state[2] = lower_32_bits(SHA512_H1);
-	req->state[3] = upper_32_bits(SHA512_H1);
-	req->state[4] = lower_32_bits(SHA512_H2);
-	req->state[5] = upper_32_bits(SHA512_H2);
-	req->state[6] = lower_32_bits(SHA512_H3);
-	req->state[7] = upper_32_bits(SHA512_H3);
-	req->state[8] = lower_32_bits(SHA512_H4);
-	req->state[9] = upper_32_bits(SHA512_H4);
-	req->state[10] = lower_32_bits(SHA512_H5);
-	req->state[11] = upper_32_bits(SHA512_H5);
-	req->state[12] = lower_32_bits(SHA512_H6);
-	req->state[13] = upper_32_bits(SHA512_H6);
-	req->state[14] = lower_32_bits(SHA512_H7);
-	req->state[15] = upper_32_bits(SHA512_H7);
-
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = SHA512_DIGEST_SIZE;
@@ -1369,23 +1332,6 @@ static int safexcel_sha384_init(struct ahash_request *areq)
 
 	memset(req, 0, sizeof(*req));
 
-	req->state[0] = lower_32_bits(SHA384_H0);
-	req->state[1] = upper_32_bits(SHA384_H0);
-	req->state[2] = lower_32_bits(SHA384_H1);
-	req->state[3] = upper_32_bits(SHA384_H1);
-	req->state[4] = lower_32_bits(SHA384_H2);
-	req->state[5] = upper_32_bits(SHA384_H2);
-	req->state[6] = lower_32_bits(SHA384_H3);
-	req->state[7] = upper_32_bits(SHA384_H3);
-	req->state[8] = lower_32_bits(SHA384_H4);
-	req->state[9] = upper_32_bits(SHA384_H4);
-	req->state[10] = lower_32_bits(SHA384_H5);
-	req->state[11] = upper_32_bits(SHA384_H5);
-	req->state[12] = lower_32_bits(SHA384_H6);
-	req->state[13] = upper_32_bits(SHA384_H6);
-	req->state[14] = lower_32_bits(SHA384_H7);
-	req->state[15] = upper_32_bits(SHA384_H7);
-
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = SHA512_DIGEST_SIZE;
@@ -1551,11 +1497,6 @@ static int safexcel_md5_init(struct ahash_request *areq)
 
 	memset(req, 0, sizeof(*req));
 
-	req->state[0] = MD5_H0;
-	req->state[1] = MD5_H1;
-	req->state[2] = MD5_H2;
-	req->state[3] = MD5_H3;
-
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = MD5_DIGEST_SIZE;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 8/9] crypto: inside-secure - add support for arbitrary size hash/HMAC updates
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
                   ` (8 preceding siblings ...)
  2019-07-02 14:39 ` [PATCH 7/9] crypto: inside-secure - let HW deal with initial hash digest Pascal van Leeuwen
@ 2019-07-02 14:39 ` Pascal van Leeuwen
  2019-07-02 14:40 ` [PATCH 9/9] crypto: inside-secure - add support for 0 length HMAC messages Pascal van Leeuwen
                   ` (2 subsequent siblings)
  12 siblings, 0 replies; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:39 UTC (permalink / raw)
  To: linux-crypto
  Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

This patch fixes an issue with hash and HMAC operations that perform
"large" intermediate updates (i.e. combined size > 2 hash blocks) by
actually making use of the hardware's hash continue capabilities.
The original implementation would cache these updates in a buffer that
was 2 hash blocks in size and fail if all update calls combined would
overflow that buffer. Which caused the cryptomgr extra tests to fail.

Signed-off-by: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
---
 drivers/crypto/inside-secure/safexcel.h      |   4 +-
 drivers/crypto/inside-secure/safexcel_hash.c | 423 +++++++++++++++++----------
 2 files changed, 269 insertions(+), 158 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 75c6126..b68fec3 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -667,6 +667,8 @@ struct safexcel_context {
 	bool exit_inv;
 };
 
+#define HASH_CACHE_SIZE			SHA512_BLOCK_SIZE
+
 struct safexcel_ahash_export_state {
 	u64 len[2];
 	u64 processed[2];
@@ -674,7 +676,7 @@ struct safexcel_ahash_export_state {
 	u32 digest;
 
 	u32 state[SHA512_DIGEST_SIZE / sizeof(u32)];
-	u8 cache[SHA512_BLOCK_SIZE << 1];
+	u8 cache[HASH_CACHE_SIZE];
 };
 
 /*
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 1b62608..59ec7dc 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -35,17 +35,18 @@ struct safexcel_ahash_req {
 
 	u32 digest;
 
-	u8 state_sz;    /* expected sate size, only set once */
+	u8 state_sz;    /* expected state size, only set once */
+	u8 block_sz;    /* block size, only set once */
 	u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
 
 	u64 len[2];
 	u64 processed[2];
 
-	u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+	u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32));
 	dma_addr_t cache_dma;
 	unsigned int cache_sz;
 
-	u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
+	u8 cache_next[HASH_CACHE_SIZE] __aligned(sizeof(u32));
 };
 
 static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
@@ -79,75 +80,99 @@ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
 
 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
 				     struct safexcel_ahash_req *req,
-				     struct safexcel_command_desc *cdesc,
-				     unsigned int digestsize)
+				     struct safexcel_command_desc *cdesc)
 {
 	struct safexcel_crypto_priv *priv = ctx->priv;
-	int i;
+	u64 count = 0;
 
-	cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
 	cdesc->control_data.control0 |= ctx->alg;
-	cdesc->control_data.control0 |= req->digest;
-
-	if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
-		if (req->processed[0] || req->processed[1]) {
-			if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
-				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5);
-			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
-				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
-			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
-				 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
-				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
-			else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ||
-				 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
-				cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17);
-
-			cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
+
+	/*
+	 * Copy the input digest if needed, and setup the context
+	 * fields. Do this now as we need it to setup the first command
+	 * descriptor.
+	 */
+	if ((!req->processed[0]) && (!req->processed[1])) {
+		/* First - and possibly only - block of basic hash only */
+		if (req->finish) {
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_TYPE_HASH_OUT |
+				CONTEXT_CONTROL_RESTART_HASH  |
+				/* ensure its not 0! */
+				CONTEXT_CONTROL_SIZE(1);
 		} else {
-			cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_TYPE_HASH_OUT  |
+				CONTEXT_CONTROL_RESTART_HASH   |
+				CONTEXT_CONTROL_NO_FINISH_HASH |
+				/* ensure its not 0! */
+				CONTEXT_CONTROL_SIZE(1);
 		}
+		return;
+	}
 
-		if (!req->finish)
-			cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
-
-		/*
-		 * Copy the input digest if needed, and setup the context
-		 * fields. Do this now as we need it to setup the first command
-		 * descriptor.
-		 */
-		if (req->processed[0] || req->processed[1]) {
-			for (i = 0; i < digestsize / sizeof(u32); i++)
-				ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
-
-			if (req->finish) {
-				u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
-				count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) *
-					  req->processed[1]);
-
-				/* This is a haredware limitation, as the
-				 * counter must fit into an u32. This represents
-				 * a farily big amount of input data, so we
-				 * shouldn't see this.
-				 */
-				if (unlikely(count & 0xffff0000)) {
-					dev_warn(priv->dev,
-						 "Input data is too big\n");
-					return;
-				}
-
-				ctx->base.ctxr->data[i] = cpu_to_le32(count);
+	/* Hash continuation or HMAC, setup (inner) digest from state */
+	memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
+
+	if (req->finish) {
+		/* Compute digest count for hash/HMAC finish operations */
+		if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
+		    req->processed[1] ||
+		    (req->processed[0] != req->block_sz)) {
+			count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
+			count += ((0x100000000ULL / EIP197_COUNTER_BLOCK_SIZE) *
+				  req->processed[1]);
+
+			/* This is a hardware limitation, as the
+			 * counter must fit into an u32. This represents
+			 * a fairly big amount of input data, so we
+			 * shouldn't see this.
+			 */
+			if (unlikely(count & 0xffffffff00000000ULL)) {
+				dev_warn(priv->dev,
+					 "Input data is too big\n");
+				return;
 			}
 		}
-	} else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
-		cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
 
-		memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
-		memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
-		       ctx->opad, req->state_sz);
+		if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
+		    /* PE HW < 4.4 cannot do HMAC continue, fake using hash */
+		    ((req->processed[1] ||
+		      (req->processed[0] != req->block_sz)))) {
+			/* Basic hash continue operation, need digest + cnt */
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) |
+				CONTEXT_CONTROL_TYPE_HASH_OUT |
+				CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+			cdesc->control_data.control1 |=
+				CONTEXT_CONTROL_DIGEST_CNT;
+			ctx->base.ctxr->data[req->state_sz >> 2] =
+				cpu_to_le32(count);
+			req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+		} else { /* HMAC */
+			/* Need outer digest for HMAC finalization */
+			memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
+			       ctx->opad, req->state_sz);
+
+			/* Single pass HMAC - no digest count */
+			cdesc->control_data.control0 |=
+				CONTEXT_CONTROL_SIZE(req->state_sz >> 1) |
+				CONTEXT_CONTROL_TYPE_HASH_OUT |
+				CONTEXT_CONTROL_DIGEST_HMAC;
+		}
+	} else { /* Hash continuation, do not finish yet */
+		cdesc->control_data.control0 |=
+			CONTEXT_CONTROL_SIZE(req->state_sz >> 2) |
+			CONTEXT_CONTROL_DIGEST_PRECOMPUTED |
+			CONTEXT_CONTROL_TYPE_HASH_OUT |
+			CONTEXT_CONTROL_NO_FINISH_HASH;
 	}
 }
 
-static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
+static int safexcel_ahash_enqueue(struct ahash_request *areq);
+
+static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
+				      int ring,
 				      struct crypto_async_request *async,
 				      bool *should_complete, int *ret)
 {
@@ -155,6 +180,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 	struct ahash_request *areq = ahash_request_cast(async);
 	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
 	struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
 	u64 cache_len;
 
 	*ret = 0;
@@ -188,9 +214,33 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
 		sreq->cache_sz = 0;
 	}
 
-	if (sreq->finish)
+	if (sreq->finish) {
+		if (sreq->hmac &&
+		    (sreq->digest != CONTEXT_CONTROL_DIGEST_HMAC)) {
+			/* Faking HMAC using hash - need to do outer hash */
+			memcpy(sreq->cache, sreq->state,
+			       crypto_ahash_digestsize(ahash));
+
+			memcpy(sreq->state, ctx->opad, sreq->state_sz);
+
+			sreq->len[0] = sreq->block_sz +
+				       crypto_ahash_digestsize(ahash);
+			sreq->len[1] = 0;
+			sreq->processed[0] = sreq->block_sz;
+			sreq->processed[1] = 0;
+			sreq->hmac = 0;
+
+			ctx->base.needs_inv = true;
+			areq->nbytes = 0;
+			safexcel_ahash_enqueue(areq);
+
+			*should_complete = false; /* Not done yet */
+			return 1;
+		}
+
 		memcpy(areq->result, sreq->state,
 		       crypto_ahash_digestsize(ahash));
+	}
 
 	cache_len = safexcel_queued_len(sreq);
 	if (cache_len)
@@ -205,7 +255,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 				   int *commands, int *results)
 {
 	struct ahash_request *areq = ahash_request_cast(async);
-	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 	struct safexcel_crypto_priv *priv = ctx->priv;
@@ -213,27 +262,25 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 	struct safexcel_result_desc *rdesc;
 	struct scatterlist *sg;
 	int i, extra = 0, n_cdesc = 0, ret = 0;
-	u64 queued, len, cache_len, cache_max;
-
-	cache_max = crypto_ahash_blocksize(ahash);
+	u64 queued, len, cache_len;
 
 	queued = len = safexcel_queued_len(req);
-	if (queued <= cache_max)
+	if (queued <= HASH_CACHE_SIZE)
 		cache_len = queued;
 	else
 		cache_len = queued - areq->nbytes;
 
-	if (!req->last_req) {
+	if (!req->finish && !req->last_req) {
 		/* If this is not the last request and the queued data does not
-		 * fit into full blocks, cache it for the next send() call.
+		 * fit into full cache blocks, cache it for the next send call.
 		 */
-		extra = queued & (cache_max - 1);
+		extra = queued & (HASH_CACHE_SIZE - 1);
 
 		/* If this is not the last request and the queued data
 		 * is a multiple of a block, cache the last one for now.
 		 */
 		if (!extra)
-			extra = cache_max;
+			extra = HASH_CACHE_SIZE;
 
 		sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
 				   req->cache_next, extra,
@@ -272,8 +319,14 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 			goto send_command;
 	}
 
+	/* Skip descriptor generation for zero-length requests */
+	if (!areq->nbytes)
+		goto send_command;
+
 	/* Now handle the current ahash request buffer(s) */
-	req->nents = dma_map_sg(priv->dev, areq->src, sg_nents(areq->src),
+	req->nents = dma_map_sg(priv->dev, areq->src,
+				sg_nents_for_len(areq->src,
+						 areq->nbytes),
 				DMA_TO_DEVICE);
 	if (!req->nents) {
 		ret = -ENOMEM;
@@ -288,7 +341,8 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 			sglen = queued;
 
 		cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
-					   !(queued - sglen), sg_dma_address(sg),
+					   !(queued - sglen),
+					   sg_dma_address(sg),
 					   sglen, len, ctx->base.ctxr_dma);
 		if (IS_ERR(cdesc)) {
 			ret = PTR_ERR(cdesc);
@@ -306,7 +360,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 
 send_command:
 	/* Setup the context options */
-	safexcel_context_control(ctx, req, first_cdesc, req->state_sz);
+	safexcel_context_control(ctx, req, first_cdesc);
 
 	/* Add the token */
 	safexcel_hash_token(first_cdesc, len, req->state_sz);
@@ -355,27 +409,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
 	return ret;
 }
 
-static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
-{
-	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
-	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
-	unsigned int state_w_sz = req->state_sz / sizeof(u32);
-	u64 processed;
-	int i;
-
-	processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
-	processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1];
-
-	for (i = 0; i < state_w_sz; i++)
-		if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
-			return true;
-
-	if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed))
-		return true;
-
-	return false;
-}
-
 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
 				      int ring,
 				      struct crypto_async_request *async,
@@ -523,30 +556,25 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
 /* safexcel_ahash_cache: cache data until at least one request can be sent to
  * the engine, aka. when there is at least 1 block size in the pipe.
  */
-static int safexcel_ahash_cache(struct ahash_request *areq, u32 cache_max)
+static int safexcel_ahash_cache(struct ahash_request *areq)
 {
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
-	u64 queued, cache_len;
+	u64 cache_len;
 
-	/* queued: everything accepted by the driver which will be handled by
-	 * the next send() calls.
-	 * tot sz handled by update() - tot sz handled by send()
-	 */
-	queued = safexcel_queued_len(req);
 	/* cache_len: everything accepted by the driver but not sent yet,
 	 * tot sz handled by update() - last req sz - tot sz handled by send()
 	 */
-	cache_len = queued - areq->nbytes;
+	cache_len = safexcel_queued_len(req);
 
 	/*
 	 * In case there isn't enough bytes to proceed (less than a
 	 * block size), cache the data until we have enough.
 	 */
-	if (cache_len + areq->nbytes <= cache_max) {
+	if (cache_len + areq->nbytes <= HASH_CACHE_SIZE) {
 		sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
 				   req->cache + cache_len,
 				   areq->nbytes, 0);
-		return areq->nbytes;
+		return 0;
 	}
 
 	/* We couldn't cache all the data */
@@ -565,13 +593,25 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
 	if (ctx->base.ctxr) {
 		if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
 		    (req->processed[0] || req->processed[1]) &&
-		    req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
-			/* We're still setting needs_inv here, even though it is
+		    (/* invalidate for basic hash continuation finish */
+		     (req->finish &&
+		      (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) ||
+		     /* invalidate if (i)digest changed */
+		     memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
+		     /* invalidate for HMAC continuation finish */
+		     (req->finish && (req->processed[1] ||
+		      (req->processed[0] != req->block_sz))) ||
+		     /* invalidate for HMAC finish with odigest changed */
+		     (req->finish &&
+		      memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
+			     ctx->opad, req->state_sz))))
+			/*
+			 * We're still setting needs_inv here, even though it is
 			 * cleared right away, because the needs_inv flag can be
 			 * set in other functions and we want to keep the same
 			 * logic.
 			 */
-			ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+			ctx->base.needs_inv = true;
 
 		if (ctx->base.needs_inv) {
 			ctx->base.needs_inv = false;
@@ -601,33 +641,25 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
 static int safexcel_ahash_update(struct ahash_request *areq)
 {
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
-	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
-	u32 cache_max;
+	int ret;
 
 	/* If the request is 0 length, do nothing */
 	if (!areq->nbytes)
 		return 0;
 
+	/* Add request to the cache if it fits */
+	ret = safexcel_ahash_cache(areq);
+
+	/* Update total request length */
 	req->len[0] += areq->nbytes;
 	if (req->len[0] < areq->nbytes)
 		req->len[1]++;
 
-	cache_max = crypto_ahash_blocksize(ahash);
-
-	safexcel_ahash_cache(areq, cache_max);
-
-	/*
-	 * We're not doing partial updates when performing an hmac request.
-	 * Everything will be handled by the final() call.
+	/* If not all data could fit into the cache, go process the excess.
+	 * Also go process immediately for an HMAC IV precompute, which
+	 * will never be finished at all, but needs to be processed anyway.
 	 */
-	if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
-		return 0;
-
-	if (req->hmac)
-		return safexcel_ahash_enqueue(areq);
-
-	if (!req->last_req &&
-	    safexcel_queued_len(req) > cache_max)
+	if ((ret && !req->finish) || req->last_req)
 		return safexcel_ahash_enqueue(areq);
 
 	return 0;
@@ -638,7 +670,6 @@ static int safexcel_ahash_final(struct ahash_request *areq)
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 
-	req->last_req = true;
 	req->finish = true;
 
 	if (unlikely(!req->len[0] && !req->len[1] && !areq->nbytes)) {
@@ -667,6 +698,14 @@ static int safexcel_ahash_final(struct ahash_request *areq)
 			       SHA512_DIGEST_SIZE);
 
 		return 0;
+	} else if (unlikely(req->hmac && !req->len[1] &&
+			    (req->len[0] == req->block_sz) &&
+			    !areq->nbytes)) {
+		/* TODO: add support for zero length HMAC */
+		return 0;
+	} else if (req->hmac) {
+		/* Finalize HMAC */
+		req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
 	}
 
 	return safexcel_ahash_enqueue(areq);
@@ -676,7 +715,6 @@ static int safexcel_ahash_finup(struct ahash_request *areq)
 {
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 
-	req->last_req = true;
 	req->finish = true;
 
 	safexcel_ahash_update(areq);
@@ -685,12 +723,8 @@ static int safexcel_ahash_finup(struct ahash_request *areq)
 
 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
 {
-	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 	struct safexcel_ahash_export_state *export = out;
-	u32 cache_sz;
-
-	cache_sz = crypto_ahash_blocksize(ahash);
 
 	export->len[0] = req->len[0];
 	export->len[1] = req->len[1];
@@ -700,25 +734,21 @@ static int safexcel_ahash_export(struct ahash_request *areq, void *out)
 	export->digest = req->digest;
 
 	memcpy(export->state, req->state, req->state_sz);
-	memcpy(export->cache, req->cache, cache_sz);
+	memcpy(export->cache, req->cache, HASH_CACHE_SIZE);
 
 	return 0;
 }
 
 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
 {
-	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 	const struct safexcel_ahash_export_state *export = in;
-	u32 cache_sz;
 	int ret;
 
 	ret = crypto_ahash_init(areq);
 	if (ret)
 		return ret;
 
-	cache_sz = crypto_ahash_blocksize(ahash);
-
 	req->len[0] = export->len[0];
 	req->len[1] = export->len[1];
 	req->processed[0] = export->processed[0];
@@ -726,7 +756,7 @@ static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
 
 	req->digest = export->digest;
 
-	memcpy(req->cache, export->cache, cache_sz);
+	memcpy(req->cache, export->cache, HASH_CACHE_SIZE);
 	memcpy(req->state, export->state, req->state_sz);
 
 	return 0;
@@ -758,6 +788,7 @@ static int safexcel_sha1_init(struct ahash_request *areq)
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = SHA1_DIGEST_SIZE;
+	req->block_sz = SHA1_BLOCK_SIZE;
 
 	return 0;
 }
@@ -823,10 +854,23 @@ struct safexcel_alg_template safexcel_alg_sha1 = {
 
 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
 {
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 
-	safexcel_sha1_init(areq);
-	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, SHA1_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len[0]	  = SHA1_BLOCK_SIZE;
+	req->processed[0] = SHA1_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA1_DIGEST_SIZE;
+	req->block_sz = SHA1_BLOCK_SIZE;
+	req->hmac = true;
+
 	return 0;
 }
 
@@ -995,21 +1039,16 @@ static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
 	struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
 	struct safexcel_crypto_priv *priv = ctx->priv;
 	struct safexcel_ahash_export_state istate, ostate;
-	int ret, i;
+	int ret;
 
 	ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
 	if (ret)
 		return ret;
 
-	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) {
-		for (i = 0; i < state_sz / sizeof(u32); i++) {
-			if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
-			    ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
-				ctx->base.needs_inv = true;
-				break;
-			}
-		}
-	}
+	if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr &&
+	    (memcmp(ctx->ipad, istate.state, state_sz) ||
+	     memcmp(ctx->opad, ostate.state, state_sz)))
+		ctx->base.needs_inv = true;
 
 	memcpy(ctx->ipad, &istate.state, state_sz);
 	memcpy(ctx->opad, &ostate.state, state_sz);
@@ -1064,6 +1103,7 @@ static int safexcel_sha256_init(struct ahash_request *areq)
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = SHA256_DIGEST_SIZE;
+	req->block_sz = SHA256_BLOCK_SIZE;
 
 	return 0;
 }
@@ -1117,6 +1157,7 @@ static int safexcel_sha224_init(struct ahash_request *areq)
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = SHA256_DIGEST_SIZE;
+	req->block_sz = SHA256_BLOCK_SIZE;
 
 	return 0;
 }
@@ -1169,10 +1210,23 @@ static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
 
 static int safexcel_hmac_sha224_init(struct ahash_request *areq)
 {
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 
-	safexcel_sha224_init(areq);
-	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len[0]	  = SHA256_BLOCK_SIZE;
+	req->processed[0] = SHA256_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA256_DIGEST_SIZE;
+	req->block_sz = SHA256_BLOCK_SIZE;
+	req->hmac = true;
+
 	return 0;
 }
 
@@ -1225,10 +1279,23 @@ static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
 
 static int safexcel_hmac_sha256_init(struct ahash_request *areq)
 {
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 
-	safexcel_sha256_init(areq);
-	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, SHA256_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len[0]	  = SHA256_BLOCK_SIZE;
+	req->processed[0] = SHA256_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA256_DIGEST_SIZE;
+	req->block_sz = SHA256_BLOCK_SIZE;
+	req->hmac = true;
+
 	return 0;
 }
 
@@ -1282,6 +1349,7 @@ static int safexcel_sha512_init(struct ahash_request *areq)
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = SHA512_DIGEST_SIZE;
+	req->block_sz = SHA512_BLOCK_SIZE;
 
 	return 0;
 }
@@ -1335,6 +1403,7 @@ static int safexcel_sha384_init(struct ahash_request *areq)
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = SHA512_DIGEST_SIZE;
+	req->block_sz = SHA512_BLOCK_SIZE;
 
 	return 0;
 }
@@ -1387,10 +1456,23 @@ static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
 
 static int safexcel_hmac_sha512_init(struct ahash_request *areq)
 {
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 
-	safexcel_sha512_init(areq);
-	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len[0]	  = SHA512_BLOCK_SIZE;
+	req->processed[0] = SHA512_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+	req->block_sz = SHA512_BLOCK_SIZE;
+	req->hmac = true;
+
 	return 0;
 }
 
@@ -1443,10 +1525,23 @@ static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
 
 static int safexcel_hmac_sha384_init(struct ahash_request *areq)
 {
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 
-	safexcel_sha384_init(areq);
-	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, SHA512_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len[0]	  = SHA512_BLOCK_SIZE;
+	req->processed[0] = SHA512_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = SHA512_DIGEST_SIZE;
+	req->block_sz = SHA512_BLOCK_SIZE;
+	req->hmac = true;
+
 	return 0;
 }
 
@@ -1500,6 +1595,7 @@ static int safexcel_md5_init(struct ahash_request *areq)
 	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = MD5_DIGEST_SIZE;
+	req->block_sz = MD5_HMAC_BLOCK_SIZE;
 
 	return 0;
 }
@@ -1545,10 +1641,23 @@ struct safexcel_alg_template safexcel_alg_md5 = {
 
 static int safexcel_hmac_md5_init(struct ahash_request *areq)
 {
+	struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
 	struct safexcel_ahash_req *req = ahash_request_ctx(areq);
 
-	safexcel_md5_init(areq);
-	req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+	memset(req, 0, sizeof(*req));
+
+	/* Start from ipad precompute */
+	memcpy(req->state, ctx->ipad, MD5_DIGEST_SIZE);
+	/* Already processed the key^ipad part now! */
+	req->len[0]	  = MD5_HMAC_BLOCK_SIZE;
+	req->processed[0] = MD5_HMAC_BLOCK_SIZE;
+
+	ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+	req->state_sz = MD5_DIGEST_SIZE;
+	req->block_sz = MD5_HMAC_BLOCK_SIZE;
+	req->hmac = true;
+
 	return 0;
 }
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* [PATCH 9/9] crypto: inside-secure - add support for 0 length HMAC messages
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
                   ` (9 preceding siblings ...)
  2019-07-02 14:39 ` [PATCH 8/9] crypto: inside-secure - add support for arbitrary size hash/HMAC updates Pascal van Leeuwen
@ 2019-07-02 14:40 ` Pascal van Leeuwen
  2019-07-02 21:21 ` [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal Van Leeuwen
  2019-07-26 12:30 ` Herbert Xu
  12 siblings, 0 replies; 18+ messages in thread
From: Pascal van Leeuwen @ 2019-07-02 14:40 UTC (permalink / raw)
  To: linux-crypto
  Cc: antoine.tenart, herbert, davem, Pascal van Leeuwen, Pascal van Leeuwen

From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>

This patch adds support for the specific corner case of performing HMAC
on an empty string (i.e. payload length is zero). This solves the last
failing cryptomgr extratests for HMAC.

Signed-off-by: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>
---
 drivers/crypto/inside-secure/safexcel_hash.c | 47 ++++++++++++++++++++++++++--
 1 file changed, 44 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 59ec7dc..bdbaea9 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -29,6 +29,8 @@ struct safexcel_ahash_req {
 	bool finish;
 	bool hmac;
 	bool needs_inv;
+	bool hmac_zlen;
+	bool len_is_le;
 
 	int nents;
 	dma_addr_t result_dma;
@@ -117,7 +119,7 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
 	if (req->finish) {
 		/* Compute digest count for hash/HMAC finish operations */
 		if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
-		    req->processed[1] ||
+		    req->hmac_zlen || req->processed[1] ||
 		    (req->processed[0] != req->block_sz)) {
 			count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
 			count += ((0x100000000ULL / EIP197_COUNTER_BLOCK_SIZE) *
@@ -136,6 +138,8 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
 		}
 
 		if ((req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ||
+		    /* Special case: zero length HMAC */
+		    req->hmac_zlen ||
 		    /* PE HW < 4.4 cannot do HMAC continue, fake using hash */
 		    ((req->processed[1] ||
 		      (req->processed[0] != req->block_sz)))) {
@@ -144,11 +148,18 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
 				CONTEXT_CONTROL_SIZE((req->state_sz >> 2) + 1) |
 				CONTEXT_CONTROL_TYPE_HASH_OUT |
 				CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+			/* For zero-len HMAC, don't finalize, already padded! */
+			if (req->hmac_zlen)
+				cdesc->control_data.control0 |=
+					CONTEXT_CONTROL_NO_FINISH_HASH;
 			cdesc->control_data.control1 |=
 				CONTEXT_CONTROL_DIGEST_CNT;
 			ctx->base.ctxr->data[req->state_sz >> 2] =
 				cpu_to_le32(count);
 			req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+
+			/* Clear zero-length HMAC flag for next operation! */
+			req->hmac_zlen = false;
 		} else { /* HMAC */
 			/* Need outer digest for HMAC finalization */
 			memcpy(ctx->base.ctxr->data + (req->state_sz >> 2),
@@ -701,8 +712,37 @@ static int safexcel_ahash_final(struct ahash_request *areq)
 	} else if (unlikely(req->hmac && !req->len[1] &&
 			    (req->len[0] == req->block_sz) &&
 			    !areq->nbytes)) {
-		/* TODO: add support for zero length HMAC */
-		return 0;
+		/*
+		 * If we have an overall 0 length *HMAC* request:
+		 * For HMAC, we need to finalize the inner digest
+		 * and then perform the outer hash.
+		 */
+
+		/* generate pad block in the cache */
+		/* start with a hash block of all zeroes */
+		memset(req->cache, 0, req->block_sz);
+		/* set the first byte to 0x80 to 'append a 1 bit' */
+		req->cache[0] = 0x80;
+		/* add the length in bits in the last 2 bytes */
+		if (req->len_is_le) {
+			/* Little endian length word (e.g. MD5) */
+			req->cache[req->block_sz-8] = (req->block_sz << 3) &
+						      255;
+			req->cache[req->block_sz-7] = (req->block_sz >> 5);
+		} else {
+			/* Big endian length word (e.g. any SHA) */
+			req->cache[req->block_sz-2] = (req->block_sz >> 5);
+			req->cache[req->block_sz-1] = (req->block_sz << 3) &
+						      255;
+		}
+
+		req->len[0] += req->block_sz; /* plus 1 hash block */
+
+		/* Set special zero-length HMAC flag */
+		req->hmac_zlen = true;
+
+		/* Finalize HMAC */
+		req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
 	} else if (req->hmac) {
 		/* Finalize HMAC */
 		req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
@@ -1656,6 +1696,7 @@ static int safexcel_hmac_md5_init(struct ahash_request *areq)
 	req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
 	req->state_sz = MD5_DIGEST_SIZE;
 	req->block_sz = MD5_HMAC_BLOCK_SIZE;
+	req->len_is_le = true; /* MD5 is little endian! ... */
 	req->hmac = true;
 
 	return 0;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 18+ messages in thread

* RE: [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
                   ` (10 preceding siblings ...)
  2019-07-02 14:40 ` [PATCH 9/9] crypto: inside-secure - add support for 0 length HMAC messages Pascal van Leeuwen
@ 2019-07-02 21:21 ` Pascal Van Leeuwen
  2019-07-26 12:30 ` Herbert Xu
  12 siblings, 0 replies; 18+ messages in thread
From: Pascal Van Leeuwen @ 2019-07-02 21:21 UTC (permalink / raw)
  To: Pascal van Leeuwen, linux-crypto; +Cc: antoine.tenart, herbert, davem

Hi,

I just noticed two bogus unnumbered (i.e. just designated [PATCH]) patch files got accidentally
included in my patch submission. Please ignore those.

Sorry about that,

Pascal van Leeuwen
Silicon IP Architect, Multi-Protocol Engines @ Verimatrix
www.insidesecure.com


^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/9] crypto: inside-secure - silently return -EINVAL for input error cases
  2019-07-02 14:39 ` [PATCH 2/9] crypto: inside-secure - silently return -EINVAL for input error cases Pascal van Leeuwen
@ 2019-07-05 14:36   ` Antoine Tenart
  2019-07-05 14:43     ` Pascal Van Leeuwen
  0 siblings, 1 reply; 18+ messages in thread
From: Antoine Tenart @ 2019-07-05 14:36 UTC (permalink / raw)
  To: Pascal van Leeuwen
  Cc: linux-crypto, antoine.tenart, herbert, davem, Pascal van Leeuwen,
	Pascal van Leeuwen

Hi Pascal,

On Tue, Jul 02, 2019 at 04:39:53PM +0200, Pascal van Leeuwen wrote:
> From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>
> 
> diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
> index 503fef0..8e8c01d 100644
> --- a/drivers/crypto/inside-secure/safexcel.c
> +++ b/drivers/crypto/inside-secure/safexcel.c
> @@ -694,16 +694,31 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
>  inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
>  				       struct safexcel_result_desc *rdesc)
>  {
> -	if (likely(!rdesc->result_data.error_code))
> +	if (likely((!rdesc->descriptor_overflow) &&
> +		   (!rdesc->buffer_overflow) &&
> +		   (!rdesc->result_data.error_code)))

You don't need the extra () here.

> +	if (rdesc->descriptor_overflow)
> +		dev_err(priv->dev, "Descriptor overflow detected");
> +
> +	if (rdesc->buffer_overflow)
> +		dev_err(priv->dev, "Buffer overflow detected");

You're not returning an error here, is there a reason for that?

I also remember having issues when adding those checks a while ago, Did
you see any of those two error messages when using the crypto engine?

Thanks!
Antoine

-- 
Antoine Ténart, Bootlin
Embedded Linux and Kernel engineering
https://bootlin.com

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 1/9] crypto: inside-secure - keep ivsize for DES ECB modes at 0
  2019-07-02 14:39 ` [PATCH 1/9] crypto: inside-secure - keep ivsize for DES ECB modes at 0 Pascal van Leeuwen
@ 2019-07-05 14:40   ` Antoine Tenart
  0 siblings, 0 replies; 18+ messages in thread
From: Antoine Tenart @ 2019-07-05 14:40 UTC (permalink / raw)
  To: Pascal van Leeuwen
  Cc: linux-crypto, antoine.tenart, herbert, davem, Pascal van Leeuwen,
	Pascal van Leeuwen

Hi Pascal,

On Tue, Jul 02, 2019 at 04:39:52PM +0200, Pascal van Leeuwen wrote:
> From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>
> 
> The driver incorrectly advertised the IV size for DES and 3DES ECB
> mode as being the DES blocksize of 8. This is incorrect as ECB mode
> does not need any IV.
> 
> Signed-off-by: Pascal van Leeuwen <pvanleeuwen@verimatrix.com>

Acked-by: Antoine Tenart <antoine.tenart@bootlin.com>

Thanks!
Antoine

> ---
>  drivers/crypto/inside-secure/safexcel_cipher.c | 2 --
>  1 file changed, 2 deletions(-)
> 
> diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
> index ee8a0c3..7977e4c 100644
> --- a/drivers/crypto/inside-secure/safexcel_cipher.c
> +++ b/drivers/crypto/inside-secure/safexcel_cipher.c
> @@ -1033,7 +1033,6 @@ struct safexcel_alg_template safexcel_alg_ecb_des = {
>  		.decrypt = safexcel_ecb_des_decrypt,
>  		.min_keysize = DES_KEY_SIZE,
>  		.max_keysize = DES_KEY_SIZE,
> -		.ivsize = DES_BLOCK_SIZE,
>  		.base = {
>  			.cra_name = "ecb(des)",
>  			.cra_driver_name = "safexcel-ecb-des",
> @@ -1134,7 +1133,6 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
>  		.decrypt = safexcel_ecb_des3_ede_decrypt,
>  		.min_keysize = DES3_EDE_KEY_SIZE,
>  		.max_keysize = DES3_EDE_KEY_SIZE,
> -		.ivsize = DES3_EDE_BLOCK_SIZE,
>  		.base = {
>  			.cra_name = "ecb(des3_ede)",
>  			.cra_driver_name = "safexcel-ecb-des3_ede",
> -- 
> 1.8.3.1
> 

-- 
Antoine Ténart, Bootlin
Embedded Linux and Kernel engineering
https://bootlin.com

^ permalink raw reply	[flat|nested] 18+ messages in thread

* RE: [PATCH 2/9] crypto: inside-secure - silently return -EINVAL for input error cases
  2019-07-05 14:36   ` Antoine Tenart
@ 2019-07-05 14:43     ` Pascal Van Leeuwen
  2019-07-05 15:50       ` Antoine Tenart
  0 siblings, 1 reply; 18+ messages in thread
From: Pascal Van Leeuwen @ 2019-07-05 14:43 UTC (permalink / raw)
  To: Antoine Tenart, Pascal van Leeuwen
  Cc: linux-crypto, herbert, davem, Pascal Van Leeuwen


> -----Original Message-----
> From: Antoine Tenart <antoine.tenart@bootlin.com>
> Sent: Friday, July 5, 2019 4:36 PM
> To: Pascal van Leeuwen <pascalvanl@gmail.com>
> Cc: linux-crypto@vger.kernel.org; antoine.tenart@bootlin.com; herbert@gondor.apana.org.au; davem@davemloft.net; Pascal Van
> Leeuwen <pvanleeuwen@insidesecure.com>; Pascal Van Leeuwen <pvanleeuwen@verimatrix.com>
> Subject: Re: [PATCH 2/9] crypto: inside-secure - silently return -EINVAL for input error cases
> 
> Hi Pascal,
> 
> On Tue, Jul 02, 2019 at 04:39:53PM +0200, Pascal van Leeuwen wrote:
> > From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>
> >
> > diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
> > index 503fef0..8e8c01d 100644
> > --- a/drivers/crypto/inside-secure/safexcel.c
> > +++ b/drivers/crypto/inside-secure/safexcel.c
> > @@ -694,16 +694,31 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
> >  inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
> >  				       struct safexcel_result_desc *rdesc)
> >  {
> > -	if (likely(!rdesc->result_data.error_code))
> > +	if (likely((!rdesc->descriptor_overflow) &&
> > +		   (!rdesc->buffer_overflow) &&
> > +		   (!rdesc->result_data.error_code)))
> 
> You don't need the extra () here.
> 
> > +	if (rdesc->descriptor_overflow)
> > +		dev_err(priv->dev, "Descriptor overflow detected");
> > +
> > +	if (rdesc->buffer_overflow)
> > +		dev_err(priv->dev, "Buffer overflow detected");
> 
> You're not returning an error here, is there a reason for that?
> 
I guess the reason for that would be that it's a driver internal error, but the
result may still be just fine ... so I do want testmgr to continue its checks.
These should really only fire during driver development, see answer below.

> I also remember having issues when adding those checks a while ago, Did
> you see any of those two error messages when using the crypto engine?
> 
Only during development when I implemented things not fully correctly.

> Thanks!
> Antoine
> 
> --
> Antoine Ténart, Bootlin
> Embedded Linux and Kernel engineering
> https://bootlin.com

Regards,
Pascal van Leeuwen
Silicon IP Architect, Multi-Protocol Engines @ Verimatrix
www.insidesecure.com

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 2/9] crypto: inside-secure - silently return -EINVAL for input error cases
  2019-07-05 14:43     ` Pascal Van Leeuwen
@ 2019-07-05 15:50       ` Antoine Tenart
  0 siblings, 0 replies; 18+ messages in thread
From: Antoine Tenart @ 2019-07-05 15:50 UTC (permalink / raw)
  To: Pascal Van Leeuwen
  Cc: Antoine Tenart, Pascal van Leeuwen, linux-crypto, herbert, davem,
	Pascal Van Leeuwen

On Fri, Jul 05, 2019 at 02:43:16PM +0000, Pascal Van Leeuwen wrote:
> > From: Antoine Tenart <antoine.tenart@bootlin.com>
> > On Tue, Jul 02, 2019 at 04:39:53PM +0200, Pascal van Leeuwen wrote:
> > > From: Pascal van Leeuwen <pvanleeuwen@insidesecure.com>
> > 
> > > +	if (rdesc->descriptor_overflow)
> > > +		dev_err(priv->dev, "Descriptor overflow detected");
> > > +
> > > +	if (rdesc->buffer_overflow)
> > > +		dev_err(priv->dev, "Buffer overflow detected");
> > 
> > You're not returning an error here, is there a reason for that?
> > 
> I guess the reason for that would be that it's a driver internal error, but the
> result may still be just fine ... so I do want testmgr to continue its checks.
> These should really only fire during driver development, see answer below.
> 
> > I also remember having issues when adding those checks a while ago, Did
> > you see any of those two error messages when using the crypto engine?
> > 
> Only during development when I implemented things not fully correctly.

OK, that makes sense.

Thanks!
Antoine

-- 
Antoine Ténart, Bootlin
Embedded Linux and Kernel engineering
https://bootlin.com

^ permalink raw reply	[flat|nested] 18+ messages in thread

* Re: [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues
  2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
                   ` (11 preceding siblings ...)
  2019-07-02 21:21 ` [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal Van Leeuwen
@ 2019-07-26 12:30 ` Herbert Xu
  12 siblings, 0 replies; 18+ messages in thread
From: Herbert Xu @ 2019-07-26 12:30 UTC (permalink / raw)
  To: Pascal van Leeuwen; +Cc: linux-crypto, antoine.tenart, davem, pvanleeuwen

Pascal van Leeuwen <pascalvanl@gmail.com> wrote:
> This patch set fixes all remaining issues with the cryptomgr extra tests
> when run on a Marvell A7K or A8K device (i.e Macchiatobin), resulting in
> a clean boot with the extra tests enabled.
> 
> Pascal van Leeuwen (9):
>  crypto: inside-secure - keep ivsize for DES ECB modes at 0
>  crypto: inside-secure - silently return -EINVAL for input error cases
>  crypto: inside-secure - fix incorrect skcipher output IV
>  crypto: inside-secure - fix scatter/gather list to descriptor
>    conversion
>  crypto: inside-secure - fix EINVAL error (buf overflow) for AEAD
>    decrypt
>  crypto: inside-secure: back out parts of earlier HMAC update
>    workaround
>  crypto: inside-secure - let HW deal with initial hash digest
>  crypto: inside-secure - add support for arbitrary size hash/HMAC
>    updates
>  crypto: inside-secure - add support for 0 length HMAC messages
> 
> drivers/crypto/inside-secure/safexcel.c        |  25 +-
> drivers/crypto/inside-secure/safexcel.h        |   6 +-
> drivers/crypto/inside-secure/safexcel_cipher.c | 265 ++++++++----
> drivers/crypto/inside-secure/safexcel_hash.c   | 553 ++++++++++++++-----------
> 4 files changed, 520 insertions(+), 329 deletions(-)

All applied.  Thanks.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 18+ messages in thread

end of thread, other threads:[~2019-07-26 12:31 UTC | newest]

Thread overview: 18+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-07-02 14:39 [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal van Leeuwen
2019-07-02 14:39 ` [PATCH] crypto: inside-secure - fix scatter/gather list handling issues Pascal van Leeuwen
2019-07-02 14:39 ` [PATCH] crypto: inside-secure - fix scatter/gather list to descriptor conversion Pascal van Leeuwen
2019-07-02 14:39 ` [PATCH 1/9] crypto: inside-secure - keep ivsize for DES ECB modes at 0 Pascal van Leeuwen
2019-07-05 14:40   ` Antoine Tenart
2019-07-02 14:39 ` [PATCH 2/9] crypto: inside-secure - silently return -EINVAL for input error cases Pascal van Leeuwen
2019-07-05 14:36   ` Antoine Tenart
2019-07-05 14:43     ` Pascal Van Leeuwen
2019-07-05 15:50       ` Antoine Tenart
2019-07-02 14:39 ` [PATCH 3/9] crypto: inside-secure - fix incorrect skcipher output IV Pascal van Leeuwen
2019-07-02 14:39 ` [PATCH 4/9] crypto: inside-secure - fix scatter/gather list to descriptor conversion Pascal van Leeuwen
2019-07-02 14:39 ` [PATCH 5/9] crypto: inside-secure - fix EINVAL error (buf overflow) for AEAD decrypt Pascal van Leeuwen
2019-07-02 14:39 ` [PATCH 6/9] crypto: inside-secure: back out parts of earlier HMAC update workaround Pascal van Leeuwen
2019-07-02 14:39 ` [PATCH 7/9] crypto: inside-secure - let HW deal with initial hash digest Pascal van Leeuwen
2019-07-02 14:39 ` [PATCH 8/9] crypto: inside-secure - add support for arbitrary size hash/HMAC updates Pascal van Leeuwen
2019-07-02 14:40 ` [PATCH 9/9] crypto: inside-secure - add support for 0 length HMAC messages Pascal van Leeuwen
2019-07-02 21:21 ` [PATCH 0/9] crypto: inside-secure - fix cryptomgr extratests issues Pascal Van Leeuwen
2019-07-26 12:30 ` Herbert Xu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).