All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/6] Enable hashing and ciphers for v5 CCP
@ 2016-10-13 14:52 Gary R Hook
  2016-10-13 14:52 ` [PATCH 1/6] crypto: ccp - Add SHA-2 support Gary R Hook
                   ` (5 more replies)
  0 siblings, 6 replies; 16+ messages in thread
From: Gary R Hook @ 2016-10-13 14:52 UTC (permalink / raw)
  To: linux-crypto; +Cc: thomas.lendacky, herbert, davem

The following series implements new function for a version 5
CCP: Support for SHA-2, wiring of RSA using the updated
framework, AES GCM mode, and Triple-DES in ECB mode.

---

Gary R Hook (6):
      crypto: ccp - Add SHA-2 support
      crypto: ccp - Remove unneeded sign-extension support
      crypto: ccp - Add support for RSA on the CCP
      crypto: ccp - Add RSA support for a v5 ccp
      crypto: ccp - Enable support for AES GCM on v5 CCPs
      crypto: ccp - Enable 3DES function on v5 CCPs


 drivers/crypto/ccp/Makefile                |    3 
 drivers/crypto/ccp/ccp-crypto-aes-galois.c |  252 +++++++++
 drivers/crypto/ccp/ccp-crypto-des3.c       |  254 +++++++++
 drivers/crypto/ccp/ccp-crypto-main.c       |   37 +
 drivers/crypto/ccp/ccp-crypto-rsa.c        |  258 +++++++++
 drivers/crypto/ccp/ccp-crypto-sha.c        |   22 +
 drivers/crypto/ccp/ccp-crypto.h            |   69 ++-
 drivers/crypto/ccp/ccp-dev-v3.c            |   39 +
 drivers/crypto/ccp/ccp-dev-v5.c            |   67 ++
 drivers/crypto/ccp/ccp-dev.h               |   21 +
 drivers/crypto/ccp/ccp-ops.c               |  772 ++++++++++++++++++++++++----
 drivers/crypto/ccp/ccp-pci.c               |    2 
 include/linux/ccp.h                        |  103 ++++
 13 files changed, 1768 insertions(+), 131 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp-crypto-aes-galois.c
 create mode 100644 drivers/crypto/ccp/ccp-crypto-des3.c
 create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c

--
This is my day job. Follow me at:
IG/Twitter/Facebook: @grhookphoto
IG/Twitter/Facebook: @grhphotographer

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH 1/6] crypto: ccp - Add SHA-2 support
  2016-10-13 14:52 [PATCH 0/6] Enable hashing and ciphers for v5 CCP Gary R Hook
@ 2016-10-13 14:52 ` Gary R Hook
  2016-10-13 19:35   ` Tom Lendacky
  2016-10-13 14:53 ` [PATCH 2/6] crypto: ccp - Remove unneeded sign-extension support Gary R Hook
                   ` (4 subsequent siblings)
  5 siblings, 1 reply; 16+ messages in thread
From: Gary R Hook @ 2016-10-13 14:52 UTC (permalink / raw)
  To: linux-crypto; +Cc: thomas.lendacky, herbert, davem

Incorporate 384-bit and 512-bit hashing for a version 5 CCP
device


Signed-off-by: Gary R Hook <gary.hook@amd.com>
---
 drivers/crypto/ccp/ccp-crypto-sha.c |   22 +++++++++++
 drivers/crypto/ccp/ccp-crypto.h     |    9 +++--
 drivers/crypto/ccp/ccp-ops.c        |   70 +++++++++++++++++++++++++++++++++++
 include/linux/ccp.h                 |    3 ++
 4 files changed, 101 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
index 84a652b..6b46eea 100644
--- a/drivers/crypto/ccp/ccp-crypto-sha.c
+++ b/drivers/crypto/ccp/ccp-crypto-sha.c
@@ -146,6 +146,12 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
 	case CCP_SHA_TYPE_256:
 		rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
 		break;
+	case CCP_SHA_TYPE_384:
+		rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE;
+		break;
+	case CCP_SHA_TYPE_512:
+		rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE;
+		break;
 	default:
 		/* Should never get here */
 		break;
@@ -393,6 +399,22 @@ static struct ccp_sha_def sha_algs[] = {
 		.digest_size	= SHA256_DIGEST_SIZE,
 		.block_size	= SHA256_BLOCK_SIZE,
 	},
+	{
+		.version	= CCP_VERSION(5, 0),
+		.name		= "sha384",
+		.drv_name	= "sha384-ccp",
+		.type		= CCP_SHA_TYPE_384,
+		.digest_size	= SHA384_DIGEST_SIZE,
+		.block_size	= SHA384_BLOCK_SIZE,
+	},
+	{
+		.version	= CCP_VERSION(5, 0),
+		.name		= "sha512",
+		.drv_name	= "sha512-ccp",
+		.type		= CCP_SHA_TYPE_512,
+		.digest_size	= SHA512_DIGEST_SIZE,
+		.block_size	= SHA512_BLOCK_SIZE,
+	},
 };
 
 static int ccp_register_hmac_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 8335b32..ae442ac 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -137,9 +137,12 @@ struct ccp_aes_cmac_exp_ctx {
 	u8 buf[AES_BLOCK_SIZE];
 };
 
-/***** SHA related defines *****/
-#define MAX_SHA_CONTEXT_SIZE	SHA256_DIGEST_SIZE
-#define MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
+/*
+ * SHA-related defines
+ * These values must be large enough to accommodate any variant
+ */
+#define MAX_SHA_CONTEXT_SIZE	SHA512_DIGEST_SIZE
+#define MAX_SHA_BLOCK_SIZE	SHA512_BLOCK_SIZE
 
 struct ccp_sha_ctx {
 	struct scatterlist opad_sg;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 50fae44..8fedb14 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -41,6 +41,20 @@ static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
 	cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
 };
 
+static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
+	cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
+	cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
+	cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
+	cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
+};
+
+static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
+	cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
+	cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
+	cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
+	cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
+};
+
 #define	CCP_NEW_JOBID(ccp)	((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
 					ccp_gen_jobid(ccp) : 0)
 
@@ -963,6 +977,16 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 			return -EINVAL;
 		block_size = SHA256_BLOCK_SIZE;
 		break;
+	case CCP_SHA_TYPE_384:
+		if (sha->ctx_len < SHA384_DIGEST_SIZE)
+			return -EINVAL;
+		block_size = SHA384_BLOCK_SIZE;
+		break;
+	case CCP_SHA_TYPE_512:
+		if (sha->ctx_len < SHA512_DIGEST_SIZE)
+			return -EINVAL;
+		block_size = SHA512_BLOCK_SIZE;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -1050,6 +1074,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 		sb_count = 1;
 		ooffset = ioffset = 0;
 		break;
+	case CCP_SHA_TYPE_384:
+		digest_size = SHA384_DIGEST_SIZE;
+		init = (void *) ccp_sha384_init;
+		ctx_size = SHA512_DIGEST_SIZE;
+		sb_count = 2;
+		ioffset = 0;
+		ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
+		break;
+	case CCP_SHA_TYPE_512:
+		digest_size = SHA512_DIGEST_SIZE;
+		init = (void *) ccp_sha512_init;
+		ctx_size = SHA512_DIGEST_SIZE;
+		sb_count = 2;
+		ooffset = ioffset = 0;
+		break;
 	default:
 		ret = -EINVAL;
 		goto e_data;
@@ -1068,6 +1107,11 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	op.u.sha.type = sha->type;
 	op.u.sha.msg_bits = sha->msg_bits;
 
+	/* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
+	 * SHA384/512 require 2 adjacent SB slots, with the right half in the
+	 * first slot, and the left half in the second. Each portion must then
+	 * be in little endian format: use the 256-bit byte swap option.
+	 */
 	ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
 				   DMA_BIDIRECTIONAL);
 	if (ret)
@@ -1079,6 +1123,13 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 		case CCP_SHA_TYPE_256:
 			memcpy(ctx.address + ioffset, init, ctx_size);
 			break;
+		case CCP_SHA_TYPE_384:
+		case CCP_SHA_TYPE_512:
+			memcpy(ctx.address + ctx_size / 2, init,
+			       ctx_size / 2);
+			memcpy(ctx.address, init + ctx_size / 2,
+			       ctx_size / 2);
+			break;
 		default:
 			ret = -EINVAL;
 			goto e_ctx;
@@ -1145,6 +1196,15 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 					sha->ctx, 0,
 					digest_size);
 			break;
+		case CCP_SHA_TYPE_384:
+		case CCP_SHA_TYPE_512:
+			ccp_get_dm_area(&ctx, 0,
+					sha->ctx, LSB_ITEM_SIZE - ooffset,
+					LSB_ITEM_SIZE);
+			ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
+					sha->ctx, 0,
+					LSB_ITEM_SIZE - ooffset);
+			break;
 		default:
 			ret = -EINVAL;
 			goto e_ctx;
@@ -1182,6 +1242,16 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 			       ctx.address + ooffset,
 			       digest_size);
 			break;
+		case CCP_SHA_TYPE_384:
+		case CCP_SHA_TYPE_512:
+			memcpy(hmac_buf + block_size,
+			       ctx.address + LSB_ITEM_SIZE + ooffset,
+			       LSB_ITEM_SIZE);
+			memcpy(hmac_buf + block_size +
+			       (LSB_ITEM_SIZE - ooffset),
+			       ctx.address,
+			       LSB_ITEM_SIZE);
+			break;
 		default:
 			ret = -EINVAL;
 			goto e_ctx;
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index a765333..1a3e0b5 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -249,8 +249,11 @@ enum ccp_sha_type {
 	CCP_SHA_TYPE_1 = 1,
 	CCP_SHA_TYPE_224,
 	CCP_SHA_TYPE_256,
+	CCP_SHA_TYPE_384,
+	CCP_SHA_TYPE_512,
 	CCP_SHA_TYPE__LAST,
 };
+#define	CCP_SHA_CTXSIZE		SHA512_DIGEST_SIZE
 
 /**
  * struct ccp_sha_engine - CCP SHA operation

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 2/6] crypto: ccp - Remove unneeded sign-extension support
  2016-10-13 14:52 [PATCH 0/6] Enable hashing and ciphers for v5 CCP Gary R Hook
  2016-10-13 14:52 ` [PATCH 1/6] crypto: ccp - Add SHA-2 support Gary R Hook
@ 2016-10-13 14:53 ` Gary R Hook
  2016-10-13 19:57   ` Tom Lendacky
  2016-10-13 14:53 ` [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP Gary R Hook
                   ` (3 subsequent siblings)
  5 siblings, 1 reply; 16+ messages in thread
From: Gary R Hook @ 2016-10-13 14:53 UTC (permalink / raw)
  To: linux-crypto; +Cc: thomas.lendacky, herbert, davem

The reverse-get/set functions can be simplified by
eliminating unused code.


Signed-off-by: Gary R Hook <gary.hook@amd.com>
---
 drivers/crypto/ccp/ccp-ops.c |  145 +++++++++++++++++-------------------------
 1 file changed, 59 insertions(+), 86 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 8fedb14..82cc637 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -198,62 +198,46 @@ static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
 }
 
 static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
+				   unsigned int wa_offset,
 				   struct scatterlist *sg,
-				   unsigned int len, unsigned int se_len,
-				   bool sign_extend)
+				   unsigned int sg_offset,
+				   unsigned int len)
 {
-	unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
-	u8 buffer[CCP_REVERSE_BUF_SIZE];
-
-	if (WARN_ON(se_len > sizeof(buffer)))
-		return -EINVAL;
-
-	sg_offset = len;
-	dm_offset = 0;
-	nbytes = len;
-	while (nbytes) {
-		sb_len = min_t(unsigned int, nbytes, se_len);
-		sg_offset -= sb_len;
-
-		scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0);
-		for (i = 0; i < sb_len; i++)
-			wa->address[dm_offset + i] = buffer[sb_len - i - 1];
-
-		dm_offset += sb_len;
-		nbytes -= sb_len;
-
-		if ((sb_len != se_len) && sign_extend) {
-			/* Must sign-extend to nearest sign-extend length */
-			if (wa->address[dm_offset - 1] & 0x80)
-				memset(wa->address + dm_offset, 0xff,
-				       se_len - sb_len);
-		}
+	u8 *p, *q;
+
+	ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
+
+	p = wa->address + wa_offset;
+	q = p + len - 1;
+	while (p < q) {
+		*p = *p ^ *q;
+		*q = *p ^ *q;
+		*p = *p ^ *q;
+		p++;
+		q--;
 	}
-
 	return 0;
 }
 
 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
+				    unsigned int wa_offset,
 				    struct scatterlist *sg,
+				    unsigned int sg_offset,
 				    unsigned int len)
 {
-	unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
-	u8 buffer[CCP_REVERSE_BUF_SIZE];
-
-	sg_offset = 0;
-	dm_offset = len;
-	nbytes = len;
-	while (nbytes) {
-		sb_len = min_t(unsigned int, nbytes, sizeof(buffer));
-		dm_offset -= sb_len;
-
-		for (i = 0; i < sb_len; i++)
-			buffer[sb_len - i - 1] = wa->address[dm_offset + i];
-		scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1);
-
-		sg_offset += sb_len;
-		nbytes -= sb_len;
+	u8 *p, *q;
+
+	p = wa->address + wa_offset;
+	q = p + len - 1;
+	while (p < q) {
+		*p = *p ^ *q;
+		*q = *p ^ *q;
+		*p = *p ^ *q;
+		p++;
+		q--;
 	}
+
+	ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
 }
 
 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
@@ -1294,7 +1278,9 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	struct ccp_data dst;
 	struct ccp_op op;
 	unsigned int sb_count, i_len, o_len;
-	int ret;
+	unsigned int dm_offset;
+	int i = 0;
+	int ret = 0;
 
 	if (rsa->key_size > CCP_RSA_MAX_WIDTH)
 		return -EINVAL;
@@ -1331,8 +1317,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	if (ret)
 		goto e_sb;
 
-	ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
-				      CCP_SB_BYTES, false);
+	ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
 	if (ret)
 		goto e_exp;
 	ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
@@ -1350,13 +1335,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	if (ret)
 		goto e_exp;
 
-	ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
-				      CCP_SB_BYTES, false);
+	ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len);
 	if (ret)
 		goto e_src;
-	src.address += o_len;	/* Adjust the address for the copy operation */
-	ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
-				      CCP_SB_BYTES, false);
+	ret = ccp_reverse_set_dm_area(&src, o_len, rsa->mod, 0, rsa->mod_len);
 	if (ret)
 		goto e_src;
 	src.address -= o_len;	/* Reset the address to original value */
@@ -1384,7 +1366,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 		goto e_dst;
 	}
 
-	ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len);
+	ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
 
 e_dst:
 	ccp_free_data(&dst, cmd_q);
@@ -1636,25 +1618,22 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	save = src.address;
 
 	/* Copy the ECC modulus */
-	ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
-				      CCP_ECC_OPERAND_SIZE, false);
+	ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
 	if (ret)
 		goto e_src;
 	src.address += CCP_ECC_OPERAND_SIZE;
 
 	/* Copy the first operand */
-	ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
-				      ecc->u.mm.operand_1_len,
-				      CCP_ECC_OPERAND_SIZE, false);
+	ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0,
+				      ecc->u.mm.operand_1_len);
 	if (ret)
 		goto e_src;
 	src.address += CCP_ECC_OPERAND_SIZE;
 
 	if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
 		/* Copy the second operand */
-		ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
-					      ecc->u.mm.operand_2_len,
-					      CCP_ECC_OPERAND_SIZE, false);
+		ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0,
+					      ecc->u.mm.operand_2_len);
 		if (ret)
 			goto e_src;
 		src.address += CCP_ECC_OPERAND_SIZE;
@@ -1693,7 +1672,8 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	}
 
 	/* Save the ECC result */
-	ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES);
+	ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0,
+				CCP_ECC_MODULUS_BYTES);
 
 e_dst:
 	ccp_dm_free(&dst);
@@ -1761,22 +1741,19 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	save = src.address;
 
 	/* Copy the ECC modulus */
-	ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
-				      CCP_ECC_OPERAND_SIZE, false);
+	ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len);
 	if (ret)
 		goto e_src;
 	src.address += CCP_ECC_OPERAND_SIZE;
 
 	/* Copy the first point X and Y coordinate */
-	ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
-				      ecc->u.pm.point_1.x_len,
-				      CCP_ECC_OPERAND_SIZE, false);
+	ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0,
+				      ecc->u.pm.point_1.x_len);
 	if (ret)
 		goto e_src;
 	src.address += CCP_ECC_OPERAND_SIZE;
-	ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
-				      ecc->u.pm.point_1.y_len,
-				      CCP_ECC_OPERAND_SIZE, false);
+	ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0,
+				      ecc->u.pm.point_1.y_len);
 	if (ret)
 		goto e_src;
 	src.address += CCP_ECC_OPERAND_SIZE;
@@ -1787,15 +1764,13 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 
 	if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
 		/* Copy the second point X and Y coordinate */
-		ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
-					      ecc->u.pm.point_2.x_len,
-					      CCP_ECC_OPERAND_SIZE, false);
+		ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0,
+					      ecc->u.pm.point_2.x_len);
 		if (ret)
 			goto e_src;
 		src.address += CCP_ECC_OPERAND_SIZE;
-		ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
-					      ecc->u.pm.point_2.y_len,
-					      CCP_ECC_OPERAND_SIZE, false);
+		ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0,
+					      ecc->u.pm.point_2.y_len);
 		if (ret)
 			goto e_src;
 		src.address += CCP_ECC_OPERAND_SIZE;
@@ -1805,19 +1780,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 		src.address += CCP_ECC_OPERAND_SIZE;
 	} else {
 		/* Copy the Domain "a" parameter */
-		ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
-					      ecc->u.pm.domain_a_len,
-					      CCP_ECC_OPERAND_SIZE, false);
+		ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0,
+					      ecc->u.pm.domain_a_len);
 		if (ret)
 			goto e_src;
 		src.address += CCP_ECC_OPERAND_SIZE;
 
 		if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
 			/* Copy the scalar value */
-			ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
-						      ecc->u.pm.scalar_len,
-						      CCP_ECC_OPERAND_SIZE,
-						      false);
+			ret = ccp_reverse_set_dm_area(&src, 0,
+						      ecc->u.pm.scalar, 0,
+						      ecc->u.pm.scalar_len);
 			if (ret)
 				goto e_src;
 			src.address += CCP_ECC_OPERAND_SIZE;
@@ -1862,10 +1835,10 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	save = dst.address;
 
 	/* Save the ECC result X and Y coordinates */
-	ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x,
+	ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0,
 				CCP_ECC_MODULUS_BYTES);
 	dst.address += CCP_ECC_OUTPUT_SIZE;
-	ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y,
+	ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0,
 				CCP_ECC_MODULUS_BYTES);
 	dst.address += CCP_ECC_OUTPUT_SIZE;
 

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP
  2016-10-13 14:52 [PATCH 0/6] Enable hashing and ciphers for v5 CCP Gary R Hook
  2016-10-13 14:52 ` [PATCH 1/6] crypto: ccp - Add SHA-2 support Gary R Hook
  2016-10-13 14:53 ` [PATCH 2/6] crypto: ccp - Remove unneeded sign-extension support Gary R Hook
@ 2016-10-13 14:53 ` Gary R Hook
  2016-10-13 18:25   ` Stephan Mueller
  2016-10-13 21:06   ` Tom Lendacky
  2016-10-13 14:53 ` [PATCH 4/6] crypto: ccp - Add RSA support for a v5 ccp Gary R Hook
                   ` (2 subsequent siblings)
  5 siblings, 2 replies; 16+ messages in thread
From: Gary R Hook @ 2016-10-13 14:53 UTC (permalink / raw)
  To: linux-crypto; +Cc: thomas.lendacky, herbert, davem

Wire up the v3 CCP as a cipher provider.

Signed-off-by: Gary R Hook <gary.hook@amd.com>
---
 drivers/crypto/ccp/Makefile          |    1 
 drivers/crypto/ccp/ccp-crypto-main.c |   15 ++
 drivers/crypto/ccp/ccp-crypto-rsa.c  |  258 ++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp-crypto.h      |   24 +++
 drivers/crypto/ccp/ccp-dev-v3.c      |   38 +++++
 drivers/crypto/ccp/ccp-ops.c         |    1 
 include/linux/ccp.h                  |   34 ++++
 7 files changed, 370 insertions(+), 1 deletion(-)
 create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 346ceb8..23f89b7 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -12,4 +12,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
 		   ccp-crypto-aes.o \
 		   ccp-crypto-aes-cmac.o \
 		   ccp-crypto-aes-xts.o \
+		   ccp-crypto-rsa.o \
 		   ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index e0380e5..f3c4c25 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -33,6 +33,10 @@ static unsigned int sha_disable;
 module_param(sha_disable, uint, 0444);
 MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
 
+static unsigned int rsa_disable;
+module_param(rsa_disable, uint, 0444);
+MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
+
 /* List heads for the supported algorithms */
 static LIST_HEAD(hash_algs);
 static LIST_HEAD(cipher_algs);
@@ -343,6 +347,14 @@ static int ccp_register_algs(void)
 			return ret;
 	}
 
+	if (!rsa_disable) {
+		ret = ccp_register_rsa_algs();
+		if (ret) {
+			rsa_disable = 1;
+			return ret;
+		}
+	}
+
 	return 0;
 }
 
@@ -362,6 +374,9 @@ static void ccp_unregister_algs(void)
 		list_del(&ablk_alg->entry);
 		kfree(ablk_alg);
 	}
+
+	if (!rsa_disable)
+		ccp_unregister_rsa_algs();
 }
 
 static int ccp_crypto_init(void)
diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
new file mode 100644
index 0000000..7dab43b
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -0,0 +1,258 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <gary.hook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mpi.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <crypto/scatterwalk.h>
+
+#include "ccp-crypto.h"
+
+static inline struct akcipher_request *akcipher_request_cast(
+	struct crypto_async_request *req)
+{
+	return container_of(req, struct akcipher_request, base);
+}
+
+static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
+{
+	struct akcipher_request *req = akcipher_request_cast(async_req);
+	struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+
+	if (!ret)
+		req->dst_len = rctx->cmd.u.rsa.d_len;
+
+	ret = 0;
+
+	return ret;
+}
+
+static int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
+{
+	return CCP_RSA_MAXMOD;
+}
+
+static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
+	int ret = 0;
+
+	if (!ctx->u.rsa.pkey.d && !ctx->u.rsa.pkey.e)
+		return -EINVAL;
+
+	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+	INIT_LIST_HEAD(&rctx->cmd.entry);
+	rctx->cmd.engine = CCP_ENGINE_RSA;
+	rctx->cmd.u.rsa.mode = encrypt ? CCP_RSA_ENCRYPT : CCP_RSA_DECRYPT;
+
+	rctx->cmd.u.rsa.pkey = ctx->u.rsa.pkey;
+	rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len;
+	rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
+	rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
+	rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
+	rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
+	if (ctx->u.rsa.pkey.d) {
+		rctx->cmd.u.rsa.d_sg = &ctx->u.rsa.d_sg;
+		rctx->cmd.u.rsa.d_len = ctx->u.rsa.d_len;
+	}
+
+	rctx->cmd.u.rsa.src = req->src;
+	rctx->cmd.u.rsa.src_len = req->src_len;
+	rctx->cmd.u.rsa.dst = req->dst;
+	rctx->cmd.u.rsa.dst_len = req->dst_len;
+
+	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+	return ret;
+}
+
+static int ccp_rsa_encrypt(struct akcipher_request *req)
+{
+	return ccp_rsa_crypt(req, true);
+}
+
+static int ccp_rsa_decrypt(struct akcipher_request *req)
+{
+	return ccp_rsa_crypt(req, false);
+}
+
+static void ccp_free_mpi_key(struct ccp_rsa_key *key)
+{
+	mpi_free(key->d);
+	key->d = NULL;
+	mpi_free(key->e);
+	key->e = NULL;
+	mpi_free(key->n);
+	key->n = NULL;
+}
+
+static int ccp_check_key_length(unsigned int len)
+{
+	/* In bits */
+	if (len < 8 || len > 16384)
+		return -EINVAL;
+	return 0;
+}
+
+static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
+{
+	/* Clean up old key data */
+	kfree(ctx->u.rsa.e_buf);
+	ctx->u.rsa.e_buf = NULL;
+	ctx->u.rsa.e_len = 0;
+	kfree(ctx->u.rsa.n_buf);
+	ctx->u.rsa.n_buf = NULL;
+	ctx->u.rsa.n_len = 0;
+	kfree(ctx->u.rsa.d_buf);
+	ctx->u.rsa.d_buf = NULL;
+	ctx->u.rsa.d_len = 0;
+}
+
+static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+			  unsigned int keylen, bool public)
+{
+	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct rsa_key raw_key;
+	unsigned int n_size;
+	int ret;
+
+	if (!ctx)
+		return -EINVAL;
+
+	ccp_rsa_free_key_bufs(ctx);
+	memset(&raw_key, 0, sizeof(raw_key));
+
+	/* Code borrowed from crypto/rsa.c */
+	if (public)
+		ret = rsa_parse_pub_key(&raw_key, key, keylen);
+	else
+		ret = rsa_parse_priv_key(&raw_key, key, keylen);
+	if (ret)
+		goto e_ret;
+
+	ret = -EINVAL;
+
+	ctx->u.rsa.pkey.e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
+	if (!ctx->u.rsa.pkey.e)
+		goto e_ret;
+	ctx->u.rsa.e_buf = mpi_get_buffer(ctx->u.rsa.pkey.e,
+					  &ctx->u.rsa.e_len, NULL);
+	if (!ctx->u.rsa.e_buf)
+		goto e_key;
+	sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len);
+
+
+	ctx->u.rsa.pkey.n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
+	n_size = mpi_get_size(ctx->u.rsa.pkey.n);
+	if (ccp_check_key_length(n_size << 3))
+		goto e_key;
+	ctx->u.rsa.key_len = n_size;
+	ctx->u.rsa.n_buf = mpi_get_buffer(ctx->u.rsa.pkey.n,
+					  &ctx->u.rsa.n_len, NULL);
+	if (!ctx->u.rsa.n_buf)
+		goto e_nkey;
+	sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
+
+	if (!public) {
+		ctx->u.rsa.pkey.d = mpi_read_raw_data(raw_key.d, raw_key.d_sz);
+		if (!ctx->u.rsa.pkey.d)
+			goto e_nkey;
+		ctx->u.rsa.d_buf = mpi_get_buffer(ctx->u.rsa.pkey.d,
+						  &ctx->u.rsa.d_len, NULL);
+		if (!ctx->u.rsa.d_buf)
+			goto e_dkey;
+		sg_init_one(&ctx->u.rsa.d_sg, ctx->u.rsa.d_buf,
+			    ctx->u.rsa.d_len);
+	}
+
+	return 0;
+
+e_dkey:
+	kfree(ctx->u.rsa.n_buf);
+e_nkey:
+	kfree(ctx->u.rsa.e_buf);
+e_key:
+	ccp_free_mpi_key(&ctx->u.rsa.pkey);
+e_ret:
+	return ret;
+}
+
+static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
+			      unsigned int keylen)
+{
+	return ccp_rsa_setkey(tfm, key, keylen, false);
+}
+
+static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
+			     unsigned int keylen)
+{
+	return ccp_rsa_setkey(tfm, key, keylen, true);
+}
+
+static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+	ctx->complete = ccp_rsa_complete;
+
+	return 0;
+}
+
+static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+
+	ccp_rsa_free_key_bufs(ctx);
+}
+
+static struct akcipher_alg rsa = {
+	.encrypt = ccp_rsa_encrypt,
+	.decrypt = ccp_rsa_decrypt,
+	.sign = NULL,
+	.verify = NULL,
+	.set_pub_key = ccp_rsa_setpubkey,
+	.set_priv_key = ccp_rsa_setprivkey,
+	.max_size = ccp_rsa_maxsize,
+	.init = ccp_rsa_init_tfm,
+	.exit = ccp_rsa_exit_tfm,
+	.reqsize = sizeof(struct ccp_rsa_req_ctx),
+	.base = {
+		.cra_name = "rsa",
+		.cra_driver_name = "rsa-ccp",
+		.cra_priority = 100,
+		.cra_module = THIS_MODULE,
+		.cra_ctxsize = sizeof(struct ccp_ctx),
+	},
+};
+
+int ccp_register_rsa_algs(void)
+{
+	int ret;
+
+	/* Register the RSA algorithm in standard mode
+	 * This works for CCP v3 and later
+	 */
+	ret = crypto_register_akcipher(&rsa);
+	return ret;
+}
+
+void ccp_unregister_rsa_algs(void)
+{
+	crypto_unregister_akcipher(&rsa);
+}
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index ae442ac..4a1d206 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -22,6 +22,7 @@
 #include <crypto/ctr.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
+#include <crypto/internal/rsa.h>
 
 #define CCP_CRA_PRIORITY	300
 
@@ -155,6 +156,26 @@ struct ccp_sha_ctx {
 	struct crypto_shash *hmac_tfm;
 };
 
+/***** RSA related defines *****/
+
+struct ccp_rsa_ctx {
+	unsigned int key_len; /* in bytes */
+	struct ccp_rsa_key pkey;
+	struct scatterlist e_sg;
+	u8 *e_buf;
+	unsigned int e_len;
+	struct scatterlist n_sg;
+	u8 *n_buf;
+	unsigned int n_len;
+	struct scatterlist d_sg;
+	u8 *d_buf;
+	unsigned int d_len;
+};
+
+struct ccp_rsa_req_ctx {
+	struct ccp_cmd cmd;
+};
+
 struct ccp_sha_req_ctx {
 	enum ccp_sha_type type;
 
@@ -201,6 +222,7 @@ struct ccp_ctx {
 
 	union {
 		struct ccp_aes_ctx aes;
+		struct ccp_rsa_ctx rsa;
 		struct ccp_sha_ctx sha;
 	} u;
 };
@@ -214,5 +236,7 @@ int ccp_register_aes_algs(struct list_head *head);
 int ccp_register_aes_cmac_algs(struct list_head *head);
 int ccp_register_aes_xts_algs(struct list_head *head);
 int ccp_register_sha_algs(struct list_head *head);
+int ccp_register_rsa_algs(void);
+void ccp_unregister_rsa_algs(void);
 
 #endif
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 8d2dbac..75a0978 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -20,6 +20,43 @@
 
 #include "ccp-dev.h"
 
+/* CCP version 3: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+	struct {
+		u16 size:7;
+		u16 encrypt:1;
+		u16 mode:3;
+		u16 type:2;
+	} aes;
+	struct {
+		u16 size:7;
+		u16 encrypt:1;
+		u16 rsvd:5;
+	} aes_xts;
+	struct {
+		u16 rsvd1:11;
+		u16 type:2;
+	} sha;
+	struct {
+		u16 size:13;
+	} rsa;
+	struct {
+		u16 byteswap:2;
+		u16 bitwise:3;
+		u16 rsvd:8;
+	} pt;
+	struct  {
+		u16 rsvd:13;
+	} zlib;
+	struct {
+		u16 size:8;
+		u16 mode:3;
+		u16 rsvd1:1;
+		u16 rsvd2:1;
+	} ecc;
+	u16 raw;
+};
+
 static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
 {
 	int start;
@@ -88,6 +125,7 @@ static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
 	 * are actually available, but reading that register resets it
 	 * and you could lose some error information.
 	 */
+
 	cmd_q->free_slots--;
 
 	cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 82cc637..826782d 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -17,6 +17,7 @@
 #include <linux/interrupt.h>
 #include <crypto/scatterwalk.h>
 #include <linux/ccp.h>
+#include <linux/delay.h>
 
 #include "ccp-dev.h"
 
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 1a3e0b5..d634565 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -19,7 +19,8 @@
 #include <linux/list.h>
 #include <crypto/aes.h>
 #include <crypto/sha.h>
-
+#include <linux/mpi.h>
+#include <crypto/internal/rsa.h>
 
 struct ccp_device;
 struct ccp_cmd;
@@ -293,6 +294,27 @@ struct ccp_sha_engine {
 				 * final sha cmd */
 };
 
+/**
+ * ccp_rsa_type - mode of RSA operation
+ *
+ * @CCP_RSA_MODE_STD: standard mode
+ */
+enum ccp_rsa_mode {
+	CCP_RSA_ENCRYPT = 0,
+	CCP_RSA_DECRYPT,
+	CCP_RSA__LAST,
+};
+
+struct ccp_rsa_key {
+	MPI e;
+	MPI n;
+	MPI d;
+};
+
+#define	CCP_RSA_MAXMOD	(4 * 1024 / 8)
+#define	CCP5_RSA_MAXMOD	(16 * 1024 / 8)
+#define	CCP5_RSA_MINMOD	(512 / 8)
+
 /***** RSA engine *****/
 /**
  * struct ccp_rsa_engine - CCP RSA operation
@@ -309,16 +331,26 @@ struct ccp_sha_engine {
  *   - key_size, exp, exp_len, mod, mod_len, src, dst, src_len
  */
 struct ccp_rsa_engine {
+	enum ccp_rsa_mode mode;
 	u32 key_size;		/* In bits */
 
+	struct ccp_rsa_key pkey;
+
+/* e */
 	struct scatterlist *exp;
 	u32 exp_len;		/* In bytes */
 
+/* n */
 	struct scatterlist *mod;
 	u32 mod_len;		/* In bytes */
 
+/* d */
+	struct scatterlist *d_sg;
+	unsigned int d_len;
+
 	struct scatterlist *src, *dst;
 	u32 src_len;		/* In bytes */
+	u32 dst_len;		/* In bytes */
 };
 
 /***** Passthru engine *****/

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 4/6] crypto: ccp - Add RSA support for a v5 ccp
  2016-10-13 14:52 [PATCH 0/6] Enable hashing and ciphers for v5 CCP Gary R Hook
                   ` (2 preceding siblings ...)
  2016-10-13 14:53 ` [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP Gary R Hook
@ 2016-10-13 14:53 ` Gary R Hook
  2016-10-13 21:23   ` Tom Lendacky
  2016-10-13 14:53 ` [PATCH 5/6] crypto: ccp - Enable support for AES GCM on v5 CCPs Gary R Hook
  2016-10-13 14:53 ` [PATCH 6/6] crypto: ccp - Enable 3DES function " Gary R Hook
  5 siblings, 1 reply; 16+ messages in thread
From: Gary R Hook @ 2016-10-13 14:53 UTC (permalink / raw)
  To: linux-crypto; +Cc: thomas.lendacky, herbert, davem

Take into account device implementation differences for
RSA.

Signed-off-by: Gary R Hook <gary.hook@amd.com>
---
 drivers/crypto/ccp/ccp-crypto-rsa.c |   14 +++--
 drivers/crypto/ccp/ccp-crypto.h     |    3 -
 drivers/crypto/ccp/ccp-dev.h        |    2 -
 drivers/crypto/ccp/ccp-ops.c        |   97 +++++++++++++++++++++++------------
 4 files changed, 73 insertions(+), 43 deletions(-)

diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
index 7dab43b..94411de 100644
--- a/drivers/crypto/ccp/ccp-crypto-rsa.c
+++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
@@ -125,7 +125,7 @@ static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
 }
 
 static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
-			  unsigned int keylen, bool public)
+			  unsigned int keylen, bool private)
 {
 	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
 	struct rsa_key raw_key;
@@ -139,10 +139,10 @@ static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
 	memset(&raw_key, 0, sizeof(raw_key));
 
 	/* Code borrowed from crypto/rsa.c */
-	if (public)
-		ret = rsa_parse_pub_key(&raw_key, key, keylen);
-	else
+	if (private)
 		ret = rsa_parse_priv_key(&raw_key, key, keylen);
+	else
+		ret = rsa_parse_pub_key(&raw_key, key, keylen);
 	if (ret)
 		goto e_ret;
 
@@ -169,7 +169,7 @@ static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
 		goto e_nkey;
 	sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
 
-	if (!public) {
+	if (private) {
 		ctx->u.rsa.pkey.d = mpi_read_raw_data(raw_key.d, raw_key.d_sz);
 		if (!ctx->u.rsa.pkey.d)
 			goto e_nkey;
@@ -196,13 +196,13 @@ e_ret:
 static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
 			      unsigned int keylen)
 {
-	return ccp_rsa_setkey(tfm, key, keylen, false);
+	return ccp_rsa_setkey(tfm, key, keylen, true);
 }
 
 static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
 			     unsigned int keylen)
 {
-	return ccp_rsa_setkey(tfm, key, keylen, true);
+	return ccp_rsa_setkey(tfm, key, keylen, false);
 }
 
 static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index 4a1d206..c6cf318 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -138,8 +138,7 @@ struct ccp_aes_cmac_exp_ctx {
 	u8 buf[AES_BLOCK_SIZE];
 };
 
-/*
- * SHA-related defines
+/* SHA-related defines
  * These values must be large enough to accommodate any variant
  */
 #define MAX_SHA_CONTEXT_SIZE	SHA512_DIGEST_SIZE
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 0d996fe..143f00f 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -193,6 +193,7 @@
 #define CCP_SHA_SB_COUNT		1
 
 #define CCP_RSA_MAX_WIDTH		4096
+#define CCP5_RSA_MAX_WIDTH		16384
 
 #define CCP_PASSTHRU_BLOCKSIZE		256
 #define CCP_PASSTHRU_MASKSIZE		32
@@ -515,7 +516,6 @@ struct ccp_op {
 		struct ccp_passthru_op passthru;
 		struct ccp_ecc_op ecc;
 	} u;
-	struct ccp_mem key;
 };
 
 static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 826782d..07b8dfb 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -1283,49 +1283,72 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	int i = 0;
 	int ret = 0;
 
-	if (rsa->key_size > CCP_RSA_MAX_WIDTH)
-		return -EINVAL;
+	if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)) {
+		if (rsa->key_size > CCP_RSA_MAX_WIDTH)
+			return -EINVAL;
+	} else {
+		if (rsa->key_size > CCP5_RSA_MAX_WIDTH)
+			return -EINVAL;
+	}
 
 	if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
 		return -EINVAL;
 
-	/* The RSA modulus must precede the message being acted upon, so
-	 * it must be copied to a DMA area where the message and the
-	 * modulus can be concatenated.  Therefore the input buffer
-	 * length required is twice the output buffer length (which
-	 * must be a multiple of 256-bits).
-	 */
-	o_len = ((rsa->key_size + 255) / 256) * 32;
-	i_len = o_len * 2;
-
-	sb_count = o_len / CCP_SB_BYTES;
-
 	memset(&op, 0, sizeof(op));
 	op.cmd_q = cmd_q;
-	op.jobid = ccp_gen_jobid(cmd_q->ccp);
-	op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
 
-	if (!op.sb_key)
-		return -EIO;
+	if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)) {
+		/* The RSA modulus must precede the message being acted upon, so
+		 * it must be copied to a DMA area where the message and the
+		 * modulus can be concatenated.  Therefore the input buffer
+		 * length required is twice the output buffer length (which
+		 * must be a multiple of 256-bits).
+		 */
+		sb_count = (rsa->key_size + CCP_SB_BYTES - 1) / CCP_SB_BYTES;
+		o_len = sb_count * 32; /* bytes */
+		i_len = o_len * 2; /* bytes */
+
+		op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
+								sb_count);
+		if (!op.sb_key)
+			return -EIO;
+	} else {
+		/* A version 5 device allows the key to be in memory */
+		o_len = rsa->mod_len;
+		i_len = o_len * 2; /* bytes */
+		op.sb_key = cmd_q->sb_key;
+	}
 
-	/* The RSA exponent may span multiple (32-byte) SB entries and must
-	 * be in little endian format. Reverse copy each 32-byte chunk
-	 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
-	 * and each byte within that chunk and do not perform any byte swap
-	 * operations on the passthru operation.
-	 */
 	ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
 	if (ret)
 		goto e_sb;
 
-	ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
+	if (rsa->mode == CCP_RSA_ENCRYPT)
+		ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0,
+					      rsa->exp_len);
+	else
+		ret = ccp_reverse_set_dm_area(&exp, 0, rsa->d_sg, 0,
+					      rsa->d_len);
 	if (ret)
 		goto e_exp;
-	ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
-			     CCP_PASSTHRU_BYTESWAP_NOOP);
-	if (ret) {
-		cmd->engine_error = cmd_q->cmd_error;
-		goto e_exp;
+
+	if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)) {
+		/* The RSA exponent may span multiple (32-byte) KSB entries and
+		 * must be in little endian format. Reverse copy each 32-byte
+		 * chunk of the exponent (En chunk to E0 chunk, E(n-1) chunk to
+		 * E1 chunk) and each byte within that chunk and do not perform
+		 * any byte swap operations on the passthru operation.
+		 */
+		ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
+				     CCP_PASSTHRU_BYTESWAP_NOOP);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_exp;
+		}
+	} else {
+		op.exp.u.dma.address = exp.dma.address;
+		op.exp.u.dma.offset = 0;
 	}
 
 	/* Concatenate the modulus and the message. Both the modulus and
@@ -1345,7 +1368,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	src.address -= o_len;	/* Reset the address to original value */
 
 	/* Prepare the output area for the operation */
-	ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
+	ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->dst_len,
 			    o_len, DMA_FROM_DEVICE);
 	if (ret)
 		goto e_src;
@@ -1358,7 +1381,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	op.dst.u.dma.offset = 0;
 	op.dst.u.dma.length = o_len;
 
-	op.u.rsa.mod_size = rsa->key_size;
+	if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0))
+		op.u.rsa.mod_size = rsa->key_size * 8; /* In bits */
+	else
+		op.u.rsa.mod_size = rsa->key_size;
 	op.u.rsa.input_len = i_len;
 
 	ret = cmd_q->ccp->vdata->perform->rsa(&op);
@@ -1366,8 +1392,12 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 		cmd->engine_error = cmd_q->cmd_error;
 		goto e_dst;
 	}
+	/* Return the length of the result, too */
+	for (i = o_len; !dst.dm_wa.address[--i]; )
+		;
+	rsa->d_len = i + 1;
 
-	ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
+	ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->d_len);
 
 e_dst:
 	ccp_free_data(&dst, cmd_q);
@@ -1379,7 +1409,8 @@ e_exp:
 	ccp_dm_free(&exp);
 
 e_sb:
-	cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
+	if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0))
+		cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
 
 	return ret;
 }

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 5/6] crypto: ccp - Enable support for AES GCM on v5 CCPs
  2016-10-13 14:52 [PATCH 0/6] Enable hashing and ciphers for v5 CCP Gary R Hook
                   ` (3 preceding siblings ...)
  2016-10-13 14:53 ` [PATCH 4/6] crypto: ccp - Add RSA support for a v5 ccp Gary R Hook
@ 2016-10-13 14:53 ` Gary R Hook
  2016-10-13 21:54   ` Tom Lendacky
  2016-10-13 14:53 ` [PATCH 6/6] crypto: ccp - Enable 3DES function " Gary R Hook
  5 siblings, 1 reply; 16+ messages in thread
From: Gary R Hook @ 2016-10-13 14:53 UTC (permalink / raw)
  To: linux-crypto; +Cc: thomas.lendacky, herbert, davem

A version 5 device provides the primitive commands
required for AES GCM. This patch adds support for
en/decryption.

Signed-off-by: Gary R Hook <gary.hook@amd.com>
---
 drivers/crypto/ccp/Makefile                |    1 
 drivers/crypto/ccp/ccp-crypto-aes-galois.c |  252 +++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp-crypto-main.c       |   12 +
 drivers/crypto/ccp/ccp-crypto.h            |   14 +
 drivers/crypto/ccp/ccp-dev-v5.c            |    2 
 drivers/crypto/ccp/ccp-dev.h               |    1 
 drivers/crypto/ccp/ccp-ops.c               |  262 ++++++++++++++++++++++++++++
 include/linux/ccp.h                        |    9 +
 8 files changed, 553 insertions(+)
 create mode 100644 drivers/crypto/ccp/ccp-crypto-aes-galois.c

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 23f89b7..fd77225 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -13,4 +13,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
 		   ccp-crypto-aes-cmac.o \
 		   ccp-crypto-aes-xts.o \
 		   ccp-crypto-rsa.o \
+		   ccp-crypto-aes-galois.o \
 		   ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
new file mode 100644
index 0000000..5da324f
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -0,0 +1,252 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) AES crypto API support
+ *
+ * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/internal/aead.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/ctr.h>
+#include <crypto/scatterwalk.h>
+#include <linux/delay.h>
+
+#include "ccp-crypto.h"
+
+#define	AES_GCM_IVSIZE	12
+
+static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
+{
+	return ret;
+}
+
+static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+			      unsigned int key_len)
+{
+	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+
+	switch (key_len) {
+	case AES_KEYSIZE_128:
+		ctx->u.aes.type = CCP_AES_TYPE_128;
+		break;
+	case AES_KEYSIZE_192:
+		ctx->u.aes.type = CCP_AES_TYPE_192;
+		break;
+	case AES_KEYSIZE_256:
+		ctx->u.aes.type = CCP_AES_TYPE_256;
+		break;
+	default:
+		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	ctx->u.aes.mode = CCP_AES_MODE_GCM;
+	ctx->u.aes.key_len = key_len;
+
+	memcpy(ctx->u.aes.key, key, key_len);
+	sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
+
+	return 0;
+}
+
+static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+				   unsigned int authsize)
+{
+	return 0;
+}
+
+static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
+{
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+	struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
+	struct scatterlist *iv_sg = NULL;
+	unsigned int iv_len = 0;
+	int i;
+	int ret = 0;
+
+	if (!ctx->u.aes.key_len)
+		return -EINVAL;
+
+	if (ctx->u.aes.mode != CCP_AES_MODE_GCM)
+		return -EINVAL;
+
+	if (!req->iv)
+		return -EINVAL;
+
+	/*
+	 * 5 parts:
+	 *   plaintext/ciphertext input
+	 *   AAD
+	 *   key
+	 *   IV
+	 *   Destination+tag buffer
+	 */
+
+	/* Copy the IV and initialize a scatterlist */
+	memset(rctx->iv, 0, AES_BLOCK_SIZE);
+	memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
+	for (i = 0; i < 3; i++)
+		rctx->iv[i + AES_GCM_IVSIZE] = 0;
+	rctx->iv[AES_BLOCK_SIZE - 1] = 1;
+	iv_sg = &rctx->iv_sg;
+	iv_len = AES_BLOCK_SIZE;
+	sg_init_one(iv_sg, rctx->iv, iv_len);
+
+	/* The AAD + plaintext are concatenated in the src buffer */
+	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+	INIT_LIST_HEAD(&rctx->cmd.entry);
+	rctx->cmd.engine = CCP_ENGINE_AES;
+	rctx->cmd.u.aes.type = ctx->u.aes.type;
+	rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+	rctx->cmd.u.aes.action =
+		(encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT;
+	rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
+	rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
+	rctx->cmd.u.aes.iv = iv_sg;
+	rctx->cmd.u.aes.iv_len = iv_len;
+	rctx->cmd.u.aes.src = req->src;
+	rctx->cmd.u.aes.src_len = req->cryptlen;
+	rctx->cmd.u.aes.aad_len = req->assoclen;
+
+	/* The cipher text + the tag are in the dst buffer */
+	rctx->cmd.u.aes.dst = req->dst;
+
+	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+	return ret;
+}
+
+static int ccp_aes_gcm_encrypt(struct aead_request *req)
+{
+	return ccp_aes_gcm_crypt(req, true);
+}
+
+static int ccp_aes_gcm_decrypt(struct aead_request *req)
+{
+	return ccp_aes_gcm_crypt(req, false);
+}
+
+static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm)
+{
+	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
+
+	ctx->complete = ccp_aes_gcm_complete;
+	ctx->u.aes.key_len = 0;
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
+
+	return 0;
+}
+
+static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct aead_alg ccp_aes_gcm_defaults = {
+	.setkey = ccp_aes_gcm_setkey,
+	.setauthsize = ccp_aes_gcm_setauthsize,
+	.encrypt = ccp_aes_gcm_encrypt,
+	.decrypt = ccp_aes_gcm_decrypt,
+	.init = ccp_aes_gcm_cra_init,
+	.ivsize = AES_GCM_IVSIZE,
+	.maxauthsize = AES_BLOCK_SIZE,
+	.base = {
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+				  CRYPTO_ALG_ASYNC |
+				  CRYPTO_ALG_KERN_DRIVER_ONLY |
+				  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct ccp_ctx),
+		.cra_priority	= CCP_CRA_PRIORITY,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_exit	= ccp_aes_gcm_cra_exit,
+		.cra_module	= THIS_MODULE,
+	},
+};
+
+struct ccp_aes_aead_def {
+	enum ccp_aes_mode mode;
+	unsigned int version;
+	const char *name;
+	const char *driver_name;
+	unsigned int blocksize;
+	unsigned int ivsize;
+	struct aead_alg *alg_defaults;
+};
+
+static struct ccp_aes_aead_def aes_aead_algs[] = {
+	{
+		.mode		= CCP_AES_MODE_GHASH,
+		.version	= CCP_VERSION(5, 0),
+		.name		= "gcm(aes)",
+		.driver_name	= "gcm-aes-ccp",
+		.blocksize	= 1,
+		.ivsize		= AES_BLOCK_SIZE,
+		.alg_defaults	= &ccp_aes_gcm_defaults,
+	},
+};
+
+static int ccp_register_aes_aead(struct list_head *head,
+				 const struct ccp_aes_aead_def *def)
+{
+	struct ccp_crypto_aead *ccp_aead;
+	struct aead_alg *alg;
+	int ret;
+
+	ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL);
+	if (!ccp_aead)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ccp_aead->entry);
+
+	ccp_aead->mode = def->mode;
+
+	/* Copy the defaults and override as necessary */
+	alg = &ccp_aead->alg;
+	*alg = *def->alg_defaults;
+	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 def->driver_name);
+	alg->base.cra_blocksize = def->blocksize;
+	alg->base.cra_ablkcipher.ivsize = def->ivsize;
+
+	ret = crypto_register_aead(alg);
+	if (ret) {
+		pr_err("%s ablkcipher algorithm registration error (%d)\n",
+		       alg->base.cra_name, ret);
+		kfree(ccp_aead);
+		return ret;
+	}
+
+	list_add(&ccp_aead->entry, head);
+
+	return 0;
+}
+
+int ccp_register_aes_aeads(struct list_head *head)
+{
+	int i, ret;
+	unsigned int ccpversion = ccp_version();
+
+	for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) {
+		if (aes_aead_algs[i].version > ccpversion)
+			continue;
+		ret = ccp_register_aes_aead(head, &aes_aead_algs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index f3c4c25..103a7b3 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -40,6 +40,7 @@ MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
 /* List heads for the supported algorithms */
 static LIST_HEAD(hash_algs);
 static LIST_HEAD(cipher_algs);
+static LIST_HEAD(aead_algs);
 
 /* For any tfm, requests for that tfm must be returned on the order
  * received.  With multiple queues available, the CCP can process more
@@ -339,6 +340,10 @@ static int ccp_register_algs(void)
 		ret = ccp_register_aes_xts_algs(&cipher_algs);
 		if (ret)
 			return ret;
+
+		ret = ccp_register_aes_aeads(&aead_algs);
+		if (ret)
+			return ret;
 	}
 
 	if (!sha_disable) {
@@ -362,6 +367,7 @@ static void ccp_unregister_algs(void)
 {
 	struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
 	struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
+	struct ccp_crypto_aead *aead_alg, *aead_tmp;
 
 	list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
 		crypto_unregister_ahash(&ahash_alg->alg);
@@ -377,6 +383,12 @@ static void ccp_unregister_algs(void)
 
 	if (!rsa_disable)
 		ccp_unregister_rsa_algs();
+
+	list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
+		crypto_unregister_aead(&aead_alg->alg);
+		list_del(&aead_alg->entry);
+		kfree(aead_alg);
+	}
 }
 
 static int ccp_crypto_init(void)
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index c6cf318..b2918f6 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -19,6 +19,8 @@
 #include <linux/ccp.h>
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
+#include <crypto/internal/aead.h>
+#include <crypto/aead.h>
 #include <crypto/ctr.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
@@ -34,6 +36,14 @@ struct ccp_crypto_ablkcipher_alg {
 	struct crypto_alg alg;
 };
 
+struct ccp_crypto_aead {
+	struct list_head entry;
+
+	u32 mode;
+
+	struct aead_alg alg;
+};
+
 struct ccp_crypto_ahash_alg {
 	struct list_head entry;
 
@@ -96,6 +106,9 @@ struct ccp_aes_req_ctx {
 	struct scatterlist iv_sg;
 	u8 iv[AES_BLOCK_SIZE];
 
+	struct scatterlist tag_sg;
+	u8 tag[AES_BLOCK_SIZE];
+
 	/* Fields used for RFC3686 requests */
 	u8 *rfc3686_info;
 	u8 rfc3686_iv[AES_BLOCK_SIZE];
@@ -234,6 +247,7 @@ struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
 int ccp_register_aes_algs(struct list_head *head);
 int ccp_register_aes_cmac_algs(struct list_head *head);
 int ccp_register_aes_xts_algs(struct list_head *head);
+int ccp_register_aes_aeads(struct list_head *head);
 int ccp_register_sha_algs(struct list_head *head);
 int ccp_register_rsa_algs(void);
 void ccp_unregister_rsa_algs(void);
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index faf3cb3..dcae391 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -279,6 +279,8 @@ static int ccp5_perform_aes(struct ccp_op *op)
 	CCP_AES_TYPE(&function) = op->u.aes.type;
 	if (op->u.aes.mode == CCP_AES_MODE_CFB)
 		CCP_AES_SIZE(&function) = 0x7f;
+	if ((op->u.aes.mode == CCP_AES_MODE_GCTR) && op->eom)
+		CCP_AES_SIZE(&function) = op->u.aes.size;
 
 	CCP5_CMD_FUNCTION(&desc) = function.raw;
 
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index 143f00f..a2214ac 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -467,6 +467,7 @@ struct ccp_aes_op {
 	enum ccp_aes_type type;
 	enum ccp_aes_mode mode;
 	enum ccp_aes_action action;
+	unsigned int size;
 };
 
 struct ccp_xts_aes_op {
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index 07b8dfb..de28867 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -601,6 +601,265 @@ e_key:
 	return ret;
 }
 
+static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+			       struct ccp_cmd *cmd)
+{
+	struct ccp_aes_engine *aes = &cmd->u.aes;
+	struct ccp_dm_workarea key, ctx, final_wa, tag;
+	struct ccp_data src, dst;
+	struct ccp_data aad;
+	struct ccp_op op;
+
+	unsigned long long *final;
+	unsigned int dm_offset;
+	unsigned int ilen;
+	bool in_place = true; /* Default value */
+	int ret;
+
+	struct scatterlist *p_inp, sg_inp[2];
+	struct scatterlist *p_tag, sg_tag[2];
+	struct scatterlist *p_outp, sg_outp[2];
+	struct scatterlist *p_aad;
+
+	if (!aes->iv)
+		return -EINVAL;
+
+	if (!((aes->key_len == AES_KEYSIZE_128) ||
+		(aes->key_len == AES_KEYSIZE_192) ||
+		(aes->key_len == AES_KEYSIZE_256)))
+		return -EINVAL;
+
+	if (!aes->key) /* Gotta have a key SGL */
+		return -EINVAL;
+
+	/* First, decompose the source buffer into AAD & PT,
+	 * and the destination buffer into AAD, CT & tag, or
+	 * the input into CT & tag.
+	 * It is expected that the input and output SGs will
+	 * be valid, even if the AAD and input lengths are 0.
+	 */
+	p_aad = aes->src;
+	p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
+	p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
+	if (aes->action == CCP_AES_ACTION_ENCRYPT) {
+		ilen = aes->src_len;
+		p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
+	} else {
+		/* Input length for decryption includes tag */
+		ilen = aes->src_len - AES_BLOCK_SIZE;
+		p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
+	}
+
+	ret = -EIO;
+	memset(&op, 0, sizeof(op));
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_key = cmd_q->sb_key; /* Pre-allocated */
+	op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
+	op.init = 1;
+	op.u.aes.type = aes->type;
+
+	/* Copy the key to the LSB */
+	ret = ccp_init_dm_workarea(&key, cmd_q,
+				   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
+				   DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	dm_offset = CCP_SB_BYTES - aes->key_len;
+	ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
+	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_key;
+	}
+
+	/* Copy the context (IV) to the LSB.
+	 * There is an assumption here that the IV is 96 bits in length, plus
+	 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
+	 */
+	ret = ccp_init_dm_workarea(&ctx, cmd_q,
+				   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
+				   DMA_BIDIRECTIONAL);
+	if (ret)
+		goto e_key;
+
+	dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
+	ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+
+	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_ctx;
+	}
+
+	op.init = 1;
+	if (aes->aad_len > 0) {
+		/* Step 1: Run a GHASH over the Additional Authenticated Data */
+		ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
+				    AES_BLOCK_SIZE,
+				    DMA_TO_DEVICE);
+		if (ret)
+			goto e_ctx;
+
+		op.u.aes.mode = CCP_AES_MODE_GHASH;
+		op.u.aes.action = CCP_AES_GHASHAAD;
+
+		while (aad.sg_wa.bytes_left) {
+			ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
+
+			ret = cmd_q->ccp->vdata->perform->aes(&op);
+			if (ret) {
+				cmd->engine_error = cmd_q->cmd_error;
+				goto e_aad;
+			}
+
+			ccp_process_data(&aad, NULL, &op);
+			op.init = 0;
+		}
+	}
+
+	op.u.aes.mode = CCP_AES_MODE_GCTR;
+	if (aes->action == CCP_AES_ACTION_ENCRYPT)
+		op.u.aes.action = CCP_AES_ACTION_ENCRYPT;
+	else
+		op.u.aes.action = CCP_AES_ACTION_DECRYPT;
+
+	if (ilen > 0) {
+		/* Step 2: Run a GCTR over the plaintext */
+		in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
+
+
+		ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
+				    AES_BLOCK_SIZE, DMA_FROM_DEVICE);
+		if (ret)
+			goto e_src;
+
+		ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
+				    AES_BLOCK_SIZE,
+				    in_place ? DMA_BIDIRECTIONAL
+					     : DMA_TO_DEVICE);
+		if (ret)
+			goto e_ctx;
+
+		if (in_place) {
+			dst = src;
+		} else {
+			ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
+					    AES_BLOCK_SIZE, DMA_FROM_DEVICE);
+			if (ret)
+				goto e_src;
+		}
+
+		op.soc = 0;
+		op.eom = 0;
+		op.init = 1;
+		while (src.sg_wa.bytes_left) {
+			ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
+			if (!src.sg_wa.bytes_left) {
+				unsigned int nbytes = aes->src_len
+						      % AES_BLOCK_SIZE;
+
+				if (nbytes) {
+					op.eom = 1;
+					op.u.aes.size = (nbytes * 8) - 1;
+				}
+			}
+
+			ret = cmd_q->ccp->vdata->perform->aes(&op);
+			if (ret) {
+				cmd->engine_error = cmd_q->cmd_error;
+				goto e_dst;
+			}
+
+			ccp_process_data(&src, &dst, &op);
+			op.init = 0;
+		}
+	}
+
+	/* Step 3: Update the IV portion of the context with the original IV */
+	ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			       CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_dst;
+	}
+
+	ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
+
+	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_dst;
+	}
+
+	/* Step 4: Concatenate the lengths of the AAD and source, and
+	 * hash that 16 byte buffer.
+	 */
+	ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
+				   DMA_BIDIRECTIONAL);
+	if (ret)
+		goto e_dst;
+	final = (unsigned long long *) final_wa.address;
+	final[0] = cpu_to_be64(aes->aad_len * 8);
+	final[1] = cpu_to_be64(ilen * 8);
+
+	op.u.aes.mode = CCP_AES_MODE_GHASH;
+	op.u.aes.action = CCP_AES_GHASHFINAL;
+	op.src.type = CCP_MEMTYPE_SYSTEM;
+	op.src.u.dma.address = final_wa.dma.address;
+	op.src.u.dma.length = AES_BLOCK_SIZE;
+	op.dst.type = CCP_MEMTYPE_SYSTEM;
+	op.dst.u.dma.address = final_wa.dma.address;
+	op.dst.u.dma.length = AES_BLOCK_SIZE;
+	op.eom = 1;
+	op.u.aes.size = 0;
+	ret = cmd_q->ccp->vdata->perform->aes(&op);
+	if (ret)
+		goto e_dst;
+
+	if (aes->action == CCP_AES_ACTION_ENCRYPT) {
+		/* Put the ciphered tag after the ciphertext. */
+		ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
+	} else {
+		/* Does this ciphered tag match the input? */
+		ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
+					   DMA_BIDIRECTIONAL);
+		if (ret)
+			goto e_tag;
+		ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+
+		ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
+		ccp_dm_free(&tag);
+	}
+
+e_tag:
+	ccp_dm_free(&final_wa);
+
+e_dst:
+	if (aes->src_len && !in_place)
+		ccp_free_data(&dst, cmd_q);
+
+e_src:
+	if (aes->src_len)
+		ccp_free_data(&src, cmd_q);
+
+e_aad:
+	if (aes->aad_len)
+		ccp_free_data(&aad, cmd_q);
+
+e_ctx:
+	ccp_dm_free(&ctx);
+
+e_key:
+	ccp_dm_free(&key);
+
+	return ret;
+}
+
 static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 {
 	struct ccp_aes_engine *aes = &cmd->u.aes;
@@ -614,6 +873,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	if (aes->mode == CCP_AES_MODE_CMAC)
 		return ccp_run_aes_cmac_cmd(cmd_q, cmd);
 
+	if (aes->mode == CCP_AES_MODE_GCM)
+		return ccp_run_aes_gcm_cmd(cmd_q, cmd);
+
 	if (!((aes->key_len == AES_KEYSIZE_128) ||
 	      (aes->key_len == AES_KEYSIZE_192) ||
 	      (aes->key_len == AES_KEYSIZE_256)))
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index d634565..f90f8ba 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -124,6 +124,10 @@ enum ccp_aes_mode {
 	CCP_AES_MODE_CFB,
 	CCP_AES_MODE_CTR,
 	CCP_AES_MODE_CMAC,
+	CCP_AES_MODE_GHASH,
+	CCP_AES_MODE_GCTR,
+	CCP_AES_MODE_GCM,
+	CCP_AES_MODE_GMAC,
 	CCP_AES_MODE__LAST,
 };
 
@@ -138,6 +142,9 @@ enum ccp_aes_action {
 	CCP_AES_ACTION_ENCRYPT,
 	CCP_AES_ACTION__LAST,
 };
+/* Overloaded field */
+#define	CCP_AES_GHASHAAD	CCP_AES_ACTION_DECRYPT
+#define	CCP_AES_GHASHFINAL	CCP_AES_ACTION_ENCRYPT
 
 /**
  * struct ccp_aes_engine - CCP AES operation
@@ -182,6 +189,8 @@ struct ccp_aes_engine {
 	struct scatterlist *cmac_key;	/* K1/K2 cmac key required for
 					 * final cmac cmd */
 	u32 cmac_key_len;	/* In bytes */
+
+	u32 aad_len;		/* In bytes */
 };
 
 /***** XTS-AES engine *****/

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH 6/6] crypto: ccp - Enable 3DES function on v5 CCPs
  2016-10-13 14:52 [PATCH 0/6] Enable hashing and ciphers for v5 CCP Gary R Hook
                   ` (4 preceding siblings ...)
  2016-10-13 14:53 ` [PATCH 5/6] crypto: ccp - Enable support for AES GCM on v5 CCPs Gary R Hook
@ 2016-10-13 14:53 ` Gary R Hook
  2016-10-13 22:13   ` Tom Lendacky
  5 siblings, 1 reply; 16+ messages in thread
From: Gary R Hook @ 2016-10-13 14:53 UTC (permalink / raw)
  To: linux-crypto; +Cc: thomas.lendacky, herbert, davem

Wire up support for Triple DES in ECB mode.

Signed-off-by: Gary R Hook <gary.hook@amd.com>
---
 drivers/crypto/ccp/Makefile          |    1 
 drivers/crypto/ccp/ccp-crypto-des3.c |  254 ++++++++++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp-crypto-main.c |   10 +
 drivers/crypto/ccp/ccp-crypto.h      |   25 +++
 drivers/crypto/ccp/ccp-dev-v3.c      |    1 
 drivers/crypto/ccp/ccp-dev-v5.c      |   65 ++++++++-
 drivers/crypto/ccp/ccp-dev.h         |   18 ++
 drivers/crypto/ccp/ccp-ops.c         |  201 +++++++++++++++++++++++++++
 drivers/crypto/ccp/ccp-pci.c         |    2 
 include/linux/ccp.h                  |   57 +++++++-
 10 files changed, 624 insertions(+), 10 deletions(-)
 create mode 100644 drivers/crypto/ccp/ccp-crypto-des3.c

diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index fd77225..563594a 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -14,4 +14,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
 		   ccp-crypto-aes-xts.o \
 		   ccp-crypto-rsa.o \
 		   ccp-crypto-aes-galois.o \
+		   ccp-crypto-des3.o \
 		   ccp-crypto-sha.o
diff --git a/drivers/crypto/ccp/ccp-crypto-des3.c b/drivers/crypto/ccp/ccp-crypto-des3.c
new file mode 100644
index 0000000..5af7347
--- /dev/null
+++ b/drivers/crypto/ccp/ccp-crypto-des3.c
@@ -0,0 +1,254 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) DES3 crypto API support
+ *
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ *
+ * Author: Gary R Hook <ghook@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/des.h>
+
+#include "ccp-crypto.h"
+
+static int ccp_des3_complete(struct crypto_async_request *async_req, int ret)
+{
+	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+
+	if (ret)
+		return ret;
+
+	if (ctx->u.des3.mode != CCP_DES3_MODE_ECB)
+		memcpy(req->info, rctx->iv, DES3_EDE_BLOCK_SIZE);
+
+	return 0;
+}
+
+static int ccp_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+		unsigned int key_len)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
+	struct ccp_crypto_ablkcipher_alg *alg =
+		ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));
+	u32 *flags = &tfm->base.crt_flags;
+
+
+	/* From des_generic.c:
+	 *
+	 * RFC2451:
+	 *   If the first two or last two independent 64-bit keys are
+	 *   equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
+	 *   same as DES.  Implementers MUST reject keys that exhibit this
+	 *   property.
+	 */
+	const u32 *K = (const u32 *)key;
+
+	if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+		     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+		     (*flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		*flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	/* It's not clear that there is any support for a keysize of 112.
+	 * If needed, the caller should make K1 == K3
+	 */
+	ctx->u.des3.type = CCP_DES3_TYPE_168;
+	ctx->u.des3.mode = alg->mode;
+	ctx->u.des3.key_len = key_len;
+
+	memcpy(ctx->u.des3.key, key, key_len);
+	sg_init_one(&ctx->u.des3.key_sg, ctx->u.des3.key, key_len);
+
+	return 0;
+}
+
+static int ccp_des3_crypt(struct ablkcipher_request *req, bool encrypt)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ccp_des3_req_ctx *rctx = ablkcipher_request_ctx(req);
+	struct scatterlist *iv_sg = NULL;
+	unsigned int iv_len = 0;
+	int ret;
+
+	if (!ctx->u.des3.key_len)
+		return -EINVAL;
+
+	if (((ctx->u.des3.mode == CCP_DES3_MODE_ECB) ||
+	     (ctx->u.des3.mode == CCP_DES3_MODE_CBC)) &&
+	    (req->nbytes & (DES3_EDE_BLOCK_SIZE - 1)))
+		return -EINVAL;
+
+	if (ctx->u.des3.mode != CCP_DES3_MODE_ECB) {
+		if (!req->info)
+			return -EINVAL;
+
+		memcpy(rctx->iv, req->info, DES3_EDE_BLOCK_SIZE);
+		iv_sg = &rctx->iv_sg;
+		iv_len = DES3_EDE_BLOCK_SIZE;
+		sg_init_one(iv_sg, rctx->iv, iv_len);
+	}
+
+	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+	INIT_LIST_HEAD(&rctx->cmd.entry);
+	rctx->cmd.engine = CCP_ENGINE_DES3;
+	rctx->cmd.u.des3.type = ctx->u.des3.type;
+	rctx->cmd.u.des3.mode = ctx->u.des3.mode;
+	rctx->cmd.u.des3.action = (encrypt)
+				  ? CCP_DES3_ACTION_ENCRYPT
+				  : CCP_DES3_ACTION_DECRYPT;
+	rctx->cmd.u.des3.key = &ctx->u.des3.key_sg;
+	rctx->cmd.u.des3.key_len = ctx->u.des3.key_len;
+	rctx->cmd.u.des3.iv = iv_sg;
+	rctx->cmd.u.des3.iv_len = iv_len;
+	rctx->cmd.u.des3.src = req->src;
+	rctx->cmd.u.des3.src_len = req->nbytes;
+	rctx->cmd.u.des3.dst = req->dst;
+
+	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
+
+	return ret;
+}
+
+static int ccp_des3_encrypt(struct ablkcipher_request *req)
+{
+	return ccp_des3_crypt(req, true);
+}
+
+static int ccp_des3_decrypt(struct ablkcipher_request *req)
+{
+	return ccp_des3_crypt(req, false);
+}
+
+static int ccp_des3_cra_init(struct crypto_tfm *tfm)
+{
+	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->complete = ccp_des3_complete;
+	ctx->u.des3.key_len = 0;
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_des3_req_ctx);
+
+	return 0;
+}
+
+static void ccp_des3_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
+static struct crypto_alg ccp_des3_defaults = {
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+		CRYPTO_ALG_ASYNC |
+		CRYPTO_ALG_KERN_DRIVER_ONLY |
+		CRYPTO_ALG_NEED_FALLBACK,
+	.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct ccp_ctx),
+	.cra_priority	= CCP_CRA_PRIORITY,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_init	= ccp_des3_cra_init,
+	.cra_exit	= ccp_des3_cra_exit,
+	.cra_module	= THIS_MODULE,
+	.cra_ablkcipher	= {
+		.setkey		= ccp_des3_setkey,
+		.encrypt	= ccp_des3_encrypt,
+		.decrypt	= ccp_des3_decrypt,
+		.min_keysize	= DES3_EDE_KEY_SIZE,
+		.max_keysize	= DES3_EDE_KEY_SIZE,
+	},
+};
+
+struct ccp_des3_def {
+	enum ccp_des3_mode mode;
+	unsigned int version;
+	const char *name;
+	const char *driver_name;
+	unsigned int blocksize;
+	unsigned int ivsize;
+	struct crypto_alg *alg_defaults;
+};
+
+static struct ccp_des3_def des3_algs[] = {
+	{
+		.mode		= CCP_DES3_MODE_ECB,
+		.version	= CCP_VERSION(5, 0),
+		.name		= "ecb(des3_ede)",
+		.driver_name	= "ecb-des3-ccp",
+		.blocksize	= DES3_EDE_BLOCK_SIZE,
+		.ivsize		= 0,
+		.alg_defaults	= &ccp_des3_defaults,
+	},
+	{
+		.mode		= CCP_DES3_MODE_CBC,
+		.version	= CCP_VERSION(5, 0),
+		.name		= "cbc(des3_ede)",
+		.driver_name	= "cbc-des3-ccp",
+		.blocksize	= DES3_EDE_BLOCK_SIZE,
+		.ivsize		= DES3_EDE_BLOCK_SIZE,
+		.alg_defaults	= &ccp_des3_defaults,
+	},
+};
+
+static int ccp_register_des3_alg(struct list_head *head,
+				 const struct ccp_des3_def *def)
+{
+	struct ccp_crypto_ablkcipher_alg *ccp_alg;
+	struct crypto_alg *alg;
+	int ret;
+
+	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
+	if (!ccp_alg)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ccp_alg->entry);
+
+	ccp_alg->mode = def->mode;
+
+	/* Copy the defaults and override as necessary */
+	alg = &ccp_alg->alg;
+	*alg = *def->alg_defaults;
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+			def->driver_name);
+	alg->cra_blocksize = def->blocksize;
+	alg->cra_ablkcipher.ivsize = def->ivsize;
+
+	ret = crypto_register_alg(alg);
+	if (ret) {
+		pr_err("%s ablkcipher algorithm registration error (%d)\n",
+				alg->cra_name, ret);
+		kfree(ccp_alg);
+		return ret;
+	}
+
+	list_add(&ccp_alg->entry, head);
+
+	return 0;
+}
+
+int ccp_register_des3_algs(struct list_head *head)
+{
+	int i, ret;
+	unsigned int ccpversion = ccp_version();
+
+	for (i = 0; i < ARRAY_SIZE(des3_algs); i++) {
+		if (des3_algs[i].version > ccpversion)
+			continue;
+		ret = ccp_register_des3_alg(head, &des3_algs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 103a7b3..4b35329 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -37,6 +37,10 @@ static unsigned int rsa_disable;
 module_param(rsa_disable, uint, 0444);
 MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
 
+static unsigned int des3_disable;
+module_param(des3_disable, uint, 0444);
+MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
+
 /* List heads for the supported algorithms */
 static LIST_HEAD(hash_algs);
 static LIST_HEAD(cipher_algs);
@@ -346,6 +350,12 @@ static int ccp_register_algs(void)
 			return ret;
 	}
 
+	if (!des3_disable) {
+		ret = ccp_register_des3_algs(&cipher_algs);
+		if (ret)
+			return ret;
+	}
+
 	if (!sha_disable) {
 		ret = ccp_register_sha_algs(&hash_algs);
 		if (ret)
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index b2918f6..7b7f3b2 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -26,6 +26,8 @@
 #include <crypto/sha.h>
 #include <crypto/internal/rsa.h>
 
+#define	CCP_LOG_LEVEL	KERN_INFO
+
 #define CCP_CRA_PRIORITY	300
 
 struct ccp_crypto_ablkcipher_alg {
@@ -151,7 +153,26 @@ struct ccp_aes_cmac_exp_ctx {
 	u8 buf[AES_BLOCK_SIZE];
 };
 
-/* SHA-related defines
+/***** 3DES related defines *****/
+struct ccp_des3_ctx {
+	enum ccp_engine engine;
+	enum ccp_des3_type type;
+	enum ccp_des3_mode mode;
+
+	struct scatterlist key_sg;
+	unsigned int key_len;
+	u8 key[AES_MAX_KEY_SIZE];
+};
+
+struct ccp_des3_req_ctx {
+	struct scatterlist iv_sg;
+	u8 iv[AES_BLOCK_SIZE];
+
+	struct ccp_cmd cmd;
+};
+
+/*
+ * SHA-related defines
  * These values must be large enough to accommodate any variant
  */
 #define MAX_SHA_CONTEXT_SIZE	SHA512_DIGEST_SIZE
@@ -236,6 +257,7 @@ struct ccp_ctx {
 		struct ccp_aes_ctx aes;
 		struct ccp_rsa_ctx rsa;
 		struct ccp_sha_ctx sha;
+		struct ccp_des3_ctx des3;
 	} u;
 };
 
@@ -251,5 +273,6 @@ int ccp_register_aes_aeads(struct list_head *head);
 int ccp_register_sha_algs(struct list_head *head);
 int ccp_register_rsa_algs(void);
 void ccp_unregister_rsa_algs(void);
+int ccp_register_des3_algs(struct list_head *head);
 
 #endif
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 75a0978..fccca16 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -595,6 +595,7 @@ static irqreturn_t ccp_irq_handler(int irq, void *data)
 static const struct ccp_actions ccp3_actions = {
 	.aes = ccp_perform_aes,
 	.xts_aes = ccp_perform_xts_aes,
+	.des3 = NULL,
 	.sha = ccp_perform_sha,
 	.rsa = ccp_perform_rsa,
 	.passthru = ccp_perform_passthru,
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index dcae391..85387dc 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -101,6 +101,12 @@ union ccp_function {
 		u16 type:2;
 	} aes_xts;
 	struct {
+		u16 size:7;
+		u16 encrypt:1;
+		u16 mode:5;
+		u16 type:2;
+	} des3;
+	struct {
 		u16 rsvd1:10;
 		u16 type:4;
 		u16 rsvd2:1;
@@ -132,6 +138,10 @@ union ccp_function {
 #define	CCP_AES_TYPE(p)		((p)->aes.type)
 #define	CCP_XTS_SIZE(p)		((p)->aes_xts.size)
 #define	CCP_XTS_ENCRYPT(p)	((p)->aes_xts.encrypt)
+#define	CCP_DES3_SIZE(p)	((p)->des3.size)
+#define	CCP_DES3_ENCRYPT(p)	((p)->des3.encrypt)
+#define	CCP_DES3_MODE(p)	((p)->des3.mode)
+#define	CCP_DES3_TYPE(p)	((p)->des3.type)
 #define	CCP_SHA_TYPE(p)		((p)->sha.type)
 #define	CCP_RSA_SIZE(p)		((p)->rsa.size)
 #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
@@ -242,13 +252,16 @@ static int ccp5_do_cmd(struct ccp5_desc *desc,
 		/* Wait for the job to complete */
 		ret = wait_event_interruptible(cmd_q->int_queue,
 					       cmd_q->int_rcvd);
-		if (ret || cmd_q->cmd_error) {
+		if (cmd_q->cmd_error) {
+			/*
+			 * Log the error and flush the queue by
+			 * moving the head pointer
+			 */
 			if (cmd_q->cmd_error)
 				ccp_log_error(cmd_q->ccp,
 					      cmd_q->cmd_error);
-			/* A version 5 device doesn't use Job IDs... */
-			if (!ret)
-				ret = -EIO;
+			iowrite32(tail, cmd_q->reg_head_lo);
+			ret = -EIO;
 		}
 		cmd_q->int_rcvd = 0;
 	}
@@ -381,6 +394,47 @@ static int ccp5_perform_sha(struct ccp_op *op)
 	return ccp5_do_cmd(&desc, op->cmd_q);
 }
 
+static int ccp5_perform_des3(struct ccp_op *op)
+{
+	struct ccp5_desc desc;
+	union ccp_function function;
+	u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
+
+	/* Zero out all the fields of the command desc */
+	memset(&desc, 0, sizeof(struct ccp5_desc));
+
+	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
+
+	CCP5_CMD_SOC(&desc) = op->soc;
+	CCP5_CMD_IOC(&desc) = 1;
+	CCP5_CMD_INIT(&desc) = op->init;
+	CCP5_CMD_EOM(&desc) = op->eom;
+	CCP5_CMD_PROT(&desc) = 0;
+
+	function.raw = 0;
+	CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
+	CCP_DES3_MODE(&function) = op->u.des3.mode;
+	CCP_DES3_TYPE(&function) = op->u.des3.type;
+	CCP5_CMD_FUNCTION(&desc) = cpu_to_le32(function.raw);
+
+	CCP5_CMD_LEN(&desc) = cpu_to_le32(op->src.u.dma.length);
+
+	CCP5_CMD_SRC_LO(&desc) = cpu_to_le32(ccp_addr_lo(&op->src.u.dma));
+	CCP5_CMD_SRC_HI(&desc) = cpu_to_le32(ccp_addr_hi(&op->src.u.dma));
+	CCP5_CMD_SRC_MEM(&desc) = cpu_to_le32(CCP_MEMTYPE_SYSTEM);
+
+	CCP5_CMD_DST_LO(&desc) = cpu_to_le32(ccp_addr_lo(&op->dst.u.dma));
+	CCP5_CMD_DST_HI(&desc) = cpu_to_le32(ccp_addr_hi(&op->dst.u.dma));
+	CCP5_CMD_DST_MEM(&desc) = cpu_to_le32(CCP_MEMTYPE_SYSTEM);
+
+	CCP5_CMD_KEY_LO(&desc) = cpu_to_le32(lower_32_bits(key_addr));
+	CCP5_CMD_KEY_HI(&desc) = 0;
+	CCP5_CMD_KEY_MEM(&desc) = cpu_to_le32(CCP_MEMTYPE_SB);
+	CCP5_CMD_LSB_ID(&desc) = cpu_to_le32(op->sb_ctx);
+
+	return ccp5_do_cmd(&desc, op->cmd_q);
+}
+
 static int ccp5_perform_rsa(struct ccp_op *op)
 {
 	struct ccp5_desc desc;
@@ -428,6 +482,7 @@ static int ccp5_perform_passthru(struct ccp_op *op)
 	struct ccp_dma_info *saddr = &op->src.u.dma;
 	struct ccp_dma_info *daddr = &op->dst.u.dma;
 
+
 	memset(&desc, 0, Q_DESC_SIZE);
 
 	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
@@ -722,6 +777,7 @@ static int ccp5_init(struct ccp_device *ccp)
 
 		dev_dbg(dev, "queue #%u available\n", i);
 	}
+
 	if (ccp->cmd_q_count == 0) {
 		dev_notice(dev, "no command queues available\n");
 		ret = -EIO;
@@ -991,6 +1047,7 @@ static const struct ccp_actions ccp5_actions = {
 	.aes = ccp5_perform_aes,
 	.xts_aes = ccp5_perform_xts_aes,
 	.sha = ccp5_perform_sha,
+	.des3 = ccp5_perform_des3,
 	.rsa = ccp5_perform_rsa,
 	.passthru = ccp5_perform_passthru,
 	.ecc = ccp5_perform_ecc,
diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
index a2214ac..12a92d5 100644
--- a/drivers/crypto/ccp/ccp-dev.h
+++ b/drivers/crypto/ccp/ccp-dev.h
@@ -27,6 +27,10 @@
 #include <linux/irqreturn.h>
 #include <linux/dmaengine.h>
 
+#ifndef CCP_LOG_LEVEL
+#define	CCP_LOG_LEVEL	KERN_INFO
+#endif
+
 #define MAX_CCP_NAME_LEN		16
 #define MAX_DMAPOOL_NAME_LEN		32
 
@@ -190,6 +194,9 @@
 #define CCP_XTS_AES_KEY_SB_COUNT	1
 #define CCP_XTS_AES_CTX_SB_COUNT	1
 
+#define CCP_DES3_KEY_SB_COUNT		1
+#define CCP_DES3_CTX_SB_COUNT		1
+
 #define CCP_SHA_SB_COUNT		1
 
 #define CCP_RSA_MAX_WIDTH		4096
@@ -475,6 +482,12 @@ struct ccp_xts_aes_op {
 	enum ccp_xts_aes_unit_size unit_size;
 };
 
+struct ccp_des3_op {
+	enum ccp_des3_type type;
+	enum ccp_des3_mode mode;
+	enum ccp_des3_action action;
+};
+
 struct ccp_sha_op {
 	enum ccp_sha_type type;
 	u64 msg_bits;
@@ -512,6 +525,7 @@ struct ccp_op {
 	union {
 		struct ccp_aes_op aes;
 		struct ccp_xts_aes_op xts;
+		struct ccp_des3_op des3;
 		struct ccp_sha_op sha;
 		struct ccp_rsa_op rsa;
 		struct ccp_passthru_op passthru;
@@ -620,13 +634,13 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp);
 struct ccp_actions {
 	int (*aes)(struct ccp_op *);
 	int (*xts_aes)(struct ccp_op *);
+	int (*des3)(struct ccp_op *);
 	int (*sha)(struct ccp_op *);
 	int (*rsa)(struct ccp_op *);
 	int (*passthru)(struct ccp_op *);
 	int (*ecc)(struct ccp_op *);
 	u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int);
-	void (*sbfree)(struct ccp_cmd_queue *, unsigned int,
-			       unsigned int);
+	void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int);
 	unsigned int (*get_free_slots)(struct ccp_cmd_queue *);
 	int (*init)(struct ccp_device *);
 	void (*destroy)(struct ccp_device *);
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index de28867..f9543f7 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -16,6 +16,7 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <crypto/scatterwalk.h>
+#include <crypto/des.h>
 #include <linux/ccp.h>
 #include <linux/delay.h>
 
@@ -882,8 +883,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 		return -EINVAL;
 
 	if (((aes->mode == CCP_AES_MODE_ECB) ||
-	     (aes->mode == CCP_AES_MODE_CBC) ||
-	     (aes->mode == CCP_AES_MODE_CFB)) &&
+	     (aes->mode == CCP_AES_MODE_CBC)) &&
 	    (aes->src_len & (AES_BLOCK_SIZE - 1)))
 		return -EINVAL;
 
@@ -1194,6 +1194,200 @@ e_key:
 	return ret;
 }
 
+static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+{
+	struct ccp_des3_engine *des3 = &cmd->u.des3;
+
+	struct ccp_dm_workarea key, ctx;
+	struct ccp_data src, dst;
+	struct ccp_op op;
+	unsigned int dm_offset;
+	unsigned int len_singlekey;
+	bool in_place = false;
+	int ret;
+
+	/* Error checks */
+	if (!cmd_q->ccp->vdata->perform->des3)
+		return -EINVAL;
+
+	if (des3->key_len != DES3_EDE_KEY_SIZE)
+		return -EINVAL;
+
+	if (((des3->mode == CCP_DES3_MODE_ECB) ||
+		(des3->mode == CCP_DES3_MODE_CBC)) &&
+		(des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))
+		return -EINVAL;
+
+	if (!des3->key || !des3->src || !des3->dst)
+		return -EINVAL;
+
+	if (des3->mode != CCP_DES3_MODE_ECB) {
+		if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
+			return -EINVAL;
+
+		if (!des3->iv)
+			return -EINVAL;
+	}
+
+	ret = -EIO;
+	/* Zero out all the fields of the command desc */
+	memset(&op, 0, sizeof(op));
+
+	/* Set up the Function field */
+	op.cmd_q = cmd_q;
+	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
+	op.sb_key = cmd_q->sb_key;
+
+	op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
+	op.u.des3.type = des3->type;
+	op.u.des3.mode = des3->mode;
+	op.u.des3.action = des3->action;
+
+	/*
+	 * All supported key sizes fit in a single (32-byte) KSB entry and
+	 * (like AES) must be in little endian format. Use the 256-bit byte
+	 * swap passthru option to convert from big endian to little endian.
+	 */
+	ret = ccp_init_dm_workarea(&key, cmd_q,
+				   CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
+				   DMA_TO_DEVICE);
+	if (ret)
+		return ret;
+
+	/*
+	 * The contents of the key triplet are in the reverse order of what
+	 * is required by the engine. Copy the 3 pieces individually to put
+	 * them where they belong.
+	 */
+	dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
+
+	len_singlekey = des3->key_len / 3;
+	ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
+			des3->key, 0, len_singlekey);
+	ccp_set_dm_area(&key, dm_offset + len_singlekey,
+			des3->key, len_singlekey, len_singlekey);
+	ccp_set_dm_area(&key, dm_offset,
+			des3->key, 2 * len_singlekey, len_singlekey);
+
+	/* Copy the key to the SB */
+	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+			     CCP_PASSTHRU_BYTESWAP_256BIT);
+	if (ret) {
+		cmd->engine_error = cmd_q->cmd_error;
+		goto e_key;
+	}
+
+	/*
+	 * The DES3 context fits in a single (32-byte) KSB entry and
+	 * must be in little endian format. Use the 256-bit byte swap
+	 * passthru option to convert from big endian to little endian.
+	 */
+	if (des3->mode != CCP_DES3_MODE_ECB) {
+		u32 load_mode;
+
+		op.sb_ctx = cmd_q->sb_ctx;
+
+		ret = ccp_init_dm_workarea(&ctx, cmd_q,
+					   CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
+					   DMA_BIDIRECTIONAL);
+		if (ret)
+			goto e_key;
+
+		/* Load the context into the LSB */
+		dm_offset = CCP_SB_BYTES - des3->iv_len;
+		ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
+
+		if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+			load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
+		else
+			load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
+		ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+				     load_mode);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_ctx;
+		}
+	}
+
+	/*
+	 * Prepare the input and output data workareas. For in-place
+	 * operations we need to set the dma direction to BIDIRECTIONAL
+	 * and copy the src workarea to the dst workarea.
+	 */
+	if (sg_virt(des3->src) == sg_virt(des3->dst))
+		in_place = true;
+
+	ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
+			DES3_EDE_BLOCK_SIZE,
+			in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	if (ret)
+		goto e_ctx;
+
+	if (in_place)
+		dst = src;
+	else {
+		ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
+				DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
+		if (ret)
+			goto e_src;
+	}
+
+	/* Send data to the CCP DES3 engine */
+	while (src.sg_wa.bytes_left) {
+		ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
+		if (!src.sg_wa.bytes_left) {
+			op.eom = 1;
+
+			/* Since we don't retrieve the context in ECB mode
+			 * we have to wait for the operation to complete
+			 * on the last piece of data
+			 */
+			op.soc = 0;
+		}
+
+		ret = cmd_q->ccp->vdata->perform->des3(&op);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_dst;
+		}
+
+		ccp_process_data(&src, &dst, &op);
+	}
+
+	if (des3->mode != CCP_DES3_MODE_ECB) {
+		/* Retrieve the context and make BE */
+		ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+				       CCP_PASSTHRU_BYTESWAP_256BIT);
+		if (ret) {
+			cmd->engine_error = cmd_q->cmd_error;
+			goto e_dst;
+		}
+
+		/* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
+		if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+			dm_offset = CCP_SB_BYTES - des3->iv_len;
+		else
+			dm_offset = 0;
+		ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
+				DES3_EDE_BLOCK_SIZE);
+	}
+e_dst:
+	if (!in_place)
+		ccp_free_data(&dst, cmd_q);
+
+e_src:
+	ccp_free_data(&src, cmd_q);
+
+e_ctx:
+	if (des3->mode != CCP_DES3_MODE_ECB)
+		ccp_dm_free(&ctx);
+
+e_key:
+	ccp_dm_free(&key);
+
+	return ret;
+}
+
 static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 {
 	struct ccp_sha_engine *sha = &cmd->u.sha;
@@ -2190,6 +2384,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 	case CCP_ENGINE_XTS_AES_128:
 		ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
 		break;
+	case CCP_ENGINE_DES3:
+		ret = ccp_run_des3_cmd(cmd_q, cmd);
+		break;
 	case CCP_ENGINE_SHA:
 		ret = ccp_run_sha_cmd(cmd_q, cmd);
 		break;
diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
index 28a9996..e9bdf6f 100644
--- a/drivers/crypto/ccp/ccp-pci.c
+++ b/drivers/crypto/ccp/ccp-pci.c
@@ -230,9 +230,11 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 	dev_set_drvdata(dev, ccp);
 
+	/* Instance-specific required setup */
 	if (ccp->vdata->setup)
 		ccp->vdata->setup(ccp);
 
+	/* Initialize the CCP device */
 	ret = ccp->vdata->perform->init(ccp);
 	if (ret)
 		goto e_iomap;
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index f90f8ba..e7acc37 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -303,6 +303,60 @@ struct ccp_sha_engine {
 				 * final sha cmd */
 };
 
+/***** 3DES engine *****/
+enum ccp_des3_mode {
+	CCP_DES3_MODE_ECB = 0,
+	CCP_DES3_MODE_CBC,
+	CCP_DES3_MODE_CFB,
+	CCP_DES3_MODE__LAST,
+};
+
+enum ccp_des3_type {
+	CCP_DES3_TYPE_168 = 1,
+	CCP_DES3_TYPE__LAST,
+	};
+
+enum ccp_des3_action {
+	CCP_DES3_ACTION_DECRYPT = 0,
+	CCP_DES3_ACTION_ENCRYPT,
+	CCP_DES3_ACTION__LAST,
+};
+
+/**
+ * struct ccp_des3_engine - CCP SHA operation
+ * @type: Type of 3DES operation
+ * @mode: cipher mode
+ * @action: 3DES operation (decrypt/encrypt)
+ * @key: key to be used for this 3DES operation
+ * @key_len: length of key (in bytes)
+ * @iv: IV to be used for this AES operation
+ * @iv_len: length in bytes of iv
+ * @src: input data to be used for this operation
+ * @src_len: length of input data used for this operation (in bytes)
+ * @dst: output data produced by this operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ *   - type, mode, action, key, key_len, src, dst, src_len
+ *   - iv, iv_len for any mode other than ECB
+ *
+ * The iv variable is used as both input and output. On completion of the
+ * 3DES operation the new IV overwrites the old IV.
+ */
+struct ccp_des3_engine {
+	enum ccp_des3_type type;
+	enum ccp_des3_mode mode;
+	enum ccp_des3_action action;
+
+	struct scatterlist *key;
+	u32 key_len;	    /* In bytes */
+
+	struct scatterlist *iv;
+	u32 iv_len;	     /* In bytes */
+
+	struct scatterlist *src, *dst;
+	u64 src_len;	    /* In bytes */
+};
+
 /**
  * ccp_rsa_type - mode of RSA operation
  *
@@ -583,7 +637,7 @@ struct ccp_ecc_engine {
 enum ccp_engine {
 	CCP_ENGINE_AES = 0,
 	CCP_ENGINE_XTS_AES_128,
-	CCP_ENGINE_RSVD1,
+	CCP_ENGINE_DES3,
 	CCP_ENGINE_SHA,
 	CCP_ENGINE_RSA,
 	CCP_ENGINE_PASSTHRU,
@@ -631,6 +685,7 @@ struct ccp_cmd {
 	union {
 		struct ccp_aes_engine aes;
 		struct ccp_xts_aes_engine xts;
+		struct ccp_des3_engine des3;
 		struct ccp_sha_engine sha;
 		struct ccp_rsa_engine rsa;
 		struct ccp_passthru_engine passthru;

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP
  2016-10-13 14:53 ` [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP Gary R Hook
@ 2016-10-13 18:25   ` Stephan Mueller
  2016-10-13 20:08     ` Gary R Hook
  2016-10-13 21:06   ` Tom Lendacky
  1 sibling, 1 reply; 16+ messages in thread
From: Stephan Mueller @ 2016-10-13 18:25 UTC (permalink / raw)
  To: Gary R Hook; +Cc: linux-crypto, thomas.lendacky, herbert, davem

Am Donnerstag, 13. Oktober 2016, 09:53:09 CEST schrieb Gary R Hook:

Hi Gary,

> Wire up the v3 CCP as a cipher provider.
> 
> Signed-off-by: Gary R Hook <gary.hook@amd.com>
> ---
>  drivers/crypto/ccp/Makefile          |    1
>  drivers/crypto/ccp/ccp-crypto-main.c |   15 ++
>  drivers/crypto/ccp/ccp-crypto-rsa.c  |  258
> ++++++++++++++++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h      |  
> 24 +++
>  drivers/crypto/ccp/ccp-dev-v3.c      |   38 +++++
>  drivers/crypto/ccp/ccp-ops.c         |    1
>  include/linux/ccp.h                  |   34 ++++
>  7 files changed, 370 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c
> 
> diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
> index 346ceb8..23f89b7 100644
> --- a/drivers/crypto/ccp/Makefile
> +++ b/drivers/crypto/ccp/Makefile
> @@ -12,4 +12,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
>  		   ccp-crypto-aes.o \
>  		   ccp-crypto-aes-cmac.o \
>  		   ccp-crypto-aes-xts.o \
> +		   ccp-crypto-rsa.o \
>  		   ccp-crypto-sha.o
> diff --git a/drivers/crypto/ccp/ccp-crypto-main.c
> b/drivers/crypto/ccp/ccp-crypto-main.c index e0380e5..f3c4c25 100644
> --- a/drivers/crypto/ccp/ccp-crypto-main.c
> +++ b/drivers/crypto/ccp/ccp-crypto-main.c
> @@ -33,6 +33,10 @@ static unsigned int sha_disable;
>  module_param(sha_disable, uint, 0444);
>  MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
> 
> +static unsigned int rsa_disable;
> +module_param(rsa_disable, uint, 0444);
> +MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
> +
>  /* List heads for the supported algorithms */
>  static LIST_HEAD(hash_algs);
>  static LIST_HEAD(cipher_algs);
> @@ -343,6 +347,14 @@ static int ccp_register_algs(void)
>  			return ret;
>  	}
> 
> +	if (!rsa_disable) {
> +		ret = ccp_register_rsa_algs();
> +		if (ret) {
> +			rsa_disable = 1;
> +			return ret;
> +		}
> +	}
> +
>  	return 0;
>  }
> 
> @@ -362,6 +374,9 @@ static void ccp_unregister_algs(void)
>  		list_del(&ablk_alg->entry);
>  		kfree(ablk_alg);
>  	}
> +
> +	if (!rsa_disable)
> +		ccp_unregister_rsa_algs();
>  }
> 
>  static int ccp_crypto_init(void)
> diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c
> b/drivers/crypto/ccp/ccp-crypto-rsa.c new file mode 100644
> index 0000000..7dab43b
> --- /dev/null
> +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
> @@ -0,0 +1,258 @@
> +/*
> + * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
> + *
> + * Copyright (C) 2016 Advanced Micro Devices, Inc.
> + *
> + * Author: Gary R Hook <gary.hook@amd.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/module.h>
> +#include <linux/sched.h>
> +#include <linux/mpi.h>
> +#include <linux/scatterlist.h>
> +#include <linux/crypto.h>
> +#include <crypto/algapi.h>
> +#include <crypto/internal/akcipher.h>
> +#include <crypto/akcipher.h>
> +#include <crypto/scatterwalk.h>
> +
> +#include "ccp-crypto.h"
> +
> +static inline struct akcipher_request *akcipher_request_cast(
> +	struct crypto_async_request *req)
> +{
> +	return container_of(req, struct akcipher_request, base);
> +}
> +
> +static int ccp_rsa_complete(struct crypto_async_request *async_req, int
> ret) +{
> +	struct akcipher_request *req = akcipher_request_cast(async_req);
> +	struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
> +
> +	if (!ret)
> +		req->dst_len = rctx->cmd.u.rsa.d_len;
> +
> +	ret = 0;
> +
> +	return ret;
> +}
> +
> +static int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
> +{
> +	return CCP_RSA_MAXMOD;
> +}
> +
> +static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
> +{
> +	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
> +	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> +	struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
> +	int ret = 0;
> +
> +	if (!ctx->u.rsa.pkey.d && !ctx->u.rsa.pkey.e)
> +		return -EINVAL;
> +
> +	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
> +	INIT_LIST_HEAD(&rctx->cmd.entry);
> +	rctx->cmd.engine = CCP_ENGINE_RSA;
> +	rctx->cmd.u.rsa.mode = encrypt ? CCP_RSA_ENCRYPT : CCP_RSA_DECRYPT;
> +
> +	rctx->cmd.u.rsa.pkey = ctx->u.rsa.pkey;
> +	rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len;
> +	rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
> +	rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
> +	rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
> +	rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
> +	if (ctx->u.rsa.pkey.d) {
> +		rctx->cmd.u.rsa.d_sg = &ctx->u.rsa.d_sg;
> +		rctx->cmd.u.rsa.d_len = ctx->u.rsa.d_len;
> +	}
> +
> +	rctx->cmd.u.rsa.src = req->src;
> +	rctx->cmd.u.rsa.src_len = req->src_len;
> +	rctx->cmd.u.rsa.dst = req->dst;
> +	rctx->cmd.u.rsa.dst_len = req->dst_len;
> +
> +	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
> +
> +	return ret;
> +}
> +
> +static int ccp_rsa_encrypt(struct akcipher_request *req)
> +{
> +	return ccp_rsa_crypt(req, true);
> +}
> +
> +static int ccp_rsa_decrypt(struct akcipher_request *req)
> +{
> +	return ccp_rsa_crypt(req, false);
> +}
> +
> +static void ccp_free_mpi_key(struct ccp_rsa_key *key)
> +{
> +	mpi_free(key->d);
> +	key->d = NULL;
> +	mpi_free(key->e);
> +	key->e = NULL;
> +	mpi_free(key->n);
> +	key->n = NULL;
> +}

Could you please see whether that function can be turned into a common 
function call? crypto/rsa.c implements the same code in rsa_free_mpi_key.
> +
> +static int ccp_check_key_length(unsigned int len)
> +{
> +	/* In bits */
> +	if (len < 8 || len > 16384)
> +		return -EINVAL;
> +	return 0;
> +}
> +
> +static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
> +{
> +	/* Clean up old key data */
> +	kfree(ctx->u.rsa.e_buf);
> +	ctx->u.rsa.e_buf = NULL;
> +	ctx->u.rsa.e_len = 0;
> +	kfree(ctx->u.rsa.n_buf);
> +	ctx->u.rsa.n_buf = NULL;
> +	ctx->u.rsa.n_len = 0;
> +	kfree(ctx->u.rsa.d_buf);

kzfree, please.

> +	ctx->u.rsa.d_buf = NULL;
> +	ctx->u.rsa.d_len = 0;
> +}
> +
> +static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
> +			  unsigned int keylen, bool public)
> +{
> +	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> +	struct rsa_key raw_key;
> +	unsigned int n_size;
> +	int ret;
> +
> +	if (!ctx)
> +		return -EINVAL;
> +
> +	ccp_rsa_free_key_bufs(ctx);
> +	memset(&raw_key, 0, sizeof(raw_key));
> +
> +	/* Code borrowed from crypto/rsa.c */
> +	if (public)
> +		ret = rsa_parse_pub_key(&raw_key, key, keylen);
> +	else
> +		ret = rsa_parse_priv_key(&raw_key, key, keylen);
> +	if (ret)
> +		goto e_ret;
> +
> +	ret = -EINVAL;
> +
> +	ctx->u.rsa.pkey.e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
> +	if (!ctx->u.rsa.pkey.e)
> +		goto e_ret;
> +	ctx->u.rsa.e_buf = mpi_get_buffer(ctx->u.rsa.pkey.e,
> +					  &ctx->u.rsa.e_len, NULL);
> +	if (!ctx->u.rsa.e_buf)
> +		goto e_key;
> +	sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len);
> +
> +
> +	ctx->u.rsa.pkey.n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
> +	n_size = mpi_get_size(ctx->u.rsa.pkey.n);
> +	if (ccp_check_key_length(n_size << 3))
> +		goto e_key;
> +	ctx->u.rsa.key_len = n_size;
> +	ctx->u.rsa.n_buf = mpi_get_buffer(ctx->u.rsa.pkey.n,
> +					  &ctx->u.rsa.n_len, NULL);
> +	if (!ctx->u.rsa.n_buf)
> +		goto e_nkey;
> +	sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
> +
> +	if (!public) {
> +		ctx->u.rsa.pkey.d = mpi_read_raw_data(raw_key.d, raw_key.d_sz);
> +		if (!ctx->u.rsa.pkey.d)
> +			goto e_nkey;
> +		ctx->u.rsa.d_buf = mpi_get_buffer(ctx->u.rsa.pkey.d,
> +						  &ctx->u.rsa.d_len, NULL);
> +		if (!ctx->u.rsa.d_buf)
> +			goto e_dkey;
> +		sg_init_one(&ctx->u.rsa.d_sg, ctx->u.rsa.d_buf,
> +			    ctx->u.rsa.d_len);
> +	}
> +
> +	return 0;
> +
> +e_dkey:
> +	kfree(ctx->u.rsa.n_buf);
> +e_nkey:
> +	kfree(ctx->u.rsa.e_buf);
> +e_key:
> +	ccp_free_mpi_key(&ctx->u.rsa.pkey);
> +e_ret:
> +	return ret;
> +}
> +
> +static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
> +			      unsigned int keylen)
> +{
> +	return ccp_rsa_setkey(tfm, key, keylen, false);
> +}
> +
> +static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
> +			     unsigned int keylen)
> +{
> +	return ccp_rsa_setkey(tfm, key, keylen, true);
> +}
> +
> +static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
> +{
> +	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> +
> +	ctx->complete = ccp_rsa_complete;
> +
> +	return 0;
> +}
> +
> +static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
> +{
> +	struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
> +
> +	ccp_rsa_free_key_bufs(ctx);
> +}
> +
> +static struct akcipher_alg rsa = {
> +	.encrypt = ccp_rsa_encrypt,
> +	.decrypt = ccp_rsa_decrypt,
> +	.sign = NULL,
> +	.verify = NULL,
> +	.set_pub_key = ccp_rsa_setpubkey,
> +	.set_priv_key = ccp_rsa_setprivkey,
> +	.max_size = ccp_rsa_maxsize,
> +	.init = ccp_rsa_init_tfm,
> +	.exit = ccp_rsa_exit_tfm,
> +	.reqsize = sizeof(struct ccp_rsa_req_ctx),
> +	.base = {
> +		.cra_name = "rsa",
> +		.cra_driver_name = "rsa-ccp",
> +		.cra_priority = 100,

Are you sure you want to leave it at 100? With this value, it will content 
with the C implementation.

> +		.cra_module = THIS_MODULE,
> +		.cra_ctxsize = sizeof(struct ccp_ctx),
> +	},
> +};
> +
> +int ccp_register_rsa_algs(void)
> +{
> +	int ret;
> +
> +	/* Register the RSA algorithm in standard mode
> +	 * This works for CCP v3 and later
> +	 */
> +	ret = crypto_register_akcipher(&rsa);
> +	return ret;
> +}
> +
> +void ccp_unregister_rsa_algs(void)
> +{
> +	crypto_unregister_akcipher(&rsa);
> +}
> diff --git a/drivers/crypto/ccp/ccp-crypto.h
> b/drivers/crypto/ccp/ccp-crypto.h index ae442ac..4a1d206 100644
> --- a/drivers/crypto/ccp/ccp-crypto.h
> +++ b/drivers/crypto/ccp/ccp-crypto.h
> @@ -22,6 +22,7 @@
>  #include <crypto/ctr.h>
>  #include <crypto/hash.h>
>  #include <crypto/sha.h>
> +#include <crypto/internal/rsa.h>
> 
>  #define CCP_CRA_PRIORITY	300
> 
> @@ -155,6 +156,26 @@ struct ccp_sha_ctx {
>  	struct crypto_shash *hmac_tfm;
>  };
> 
> +/***** RSA related defines *****/
> +
> +struct ccp_rsa_ctx {
> +	unsigned int key_len; /* in bytes */
> +	struct ccp_rsa_key pkey;
> +	struct scatterlist e_sg;
> +	u8 *e_buf;
> +	unsigned int e_len;
> +	struct scatterlist n_sg;
> +	u8 *n_buf;
> +	unsigned int n_len;
> +	struct scatterlist d_sg;
> +	u8 *d_buf;
> +	unsigned int d_len;
> +};
> +
> +struct ccp_rsa_req_ctx {
> +	struct ccp_cmd cmd;
> +};
> +
>  struct ccp_sha_req_ctx {
>  	enum ccp_sha_type type;
> 
> @@ -201,6 +222,7 @@ struct ccp_ctx {
> 
>  	union {
>  		struct ccp_aes_ctx aes;
> +		struct ccp_rsa_ctx rsa;
>  		struct ccp_sha_ctx sha;
>  	} u;
>  };
> @@ -214,5 +236,7 @@ int ccp_register_aes_algs(struct list_head *head);
>  int ccp_register_aes_cmac_algs(struct list_head *head);
>  int ccp_register_aes_xts_algs(struct list_head *head);
>  int ccp_register_sha_algs(struct list_head *head);
> +int ccp_register_rsa_algs(void);
> +void ccp_unregister_rsa_algs(void);
> 
>  #endif
> diff --git a/drivers/crypto/ccp/ccp-dev-v3.c
> b/drivers/crypto/ccp/ccp-dev-v3.c index 8d2dbac..75a0978 100644
> --- a/drivers/crypto/ccp/ccp-dev-v3.c
> +++ b/drivers/crypto/ccp/ccp-dev-v3.c
> @@ -20,6 +20,43 @@
> 
>  #include "ccp-dev.h"
> 
> +/* CCP version 3: Union to define the function field (cmd_reg1/dword0) */
> +union ccp_function {
> +	struct {
> +		u16 size:7;
> +		u16 encrypt:1;
> +		u16 mode:3;
> +		u16 type:2;
> +	} aes;
> +	struct {
> +		u16 size:7;
> +		u16 encrypt:1;
> +		u16 rsvd:5;
> +	} aes_xts;
> +	struct {
> +		u16 rsvd1:11;
> +		u16 type:2;
> +	} sha;
> +	struct {
> +		u16 size:13;
> +	} rsa;
> +	struct {
> +		u16 byteswap:2;
> +		u16 bitwise:3;
> +		u16 rsvd:8;
> +	} pt;
> +	struct  {
> +		u16 rsvd:13;
> +	} zlib;
> +	struct {
> +		u16 size:8;
> +		u16 mode:3;
> +		u16 rsvd1:1;
> +		u16 rsvd2:1;
> +	} ecc;
> +	u16 raw;
> +};
> +
>  static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
>  {
>  	int start;
> @@ -88,6 +125,7 @@ static int ccp_do_cmd(struct ccp_op *op, u32 *cr,
> unsigned int cr_count) * are actually available, but reading that register
> resets it
>  	 * and you could lose some error information.
>  	 */
> +
>  	cmd_q->free_slots--;
> 
>  	cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 82cc637..826782d 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -17,6 +17,7 @@
>  #include <linux/interrupt.h>
>  #include <crypto/scatterwalk.h>
>  #include <linux/ccp.h>
> +#include <linux/delay.h>
> 
>  #include "ccp-dev.h"
> 
> diff --git a/include/linux/ccp.h b/include/linux/ccp.h
> index 1a3e0b5..d634565 100644
> --- a/include/linux/ccp.h
> +++ b/include/linux/ccp.h
> @@ -19,7 +19,8 @@
>  #include <linux/list.h>
>  #include <crypto/aes.h>
>  #include <crypto/sha.h>
> -
> +#include <linux/mpi.h>
> +#include <crypto/internal/rsa.h>
> 
>  struct ccp_device;
>  struct ccp_cmd;
> @@ -293,6 +294,27 @@ struct ccp_sha_engine {
>  				 * final sha cmd */
>  };
> 
> +/**
> + * ccp_rsa_type - mode of RSA operation
> + *
> + * @CCP_RSA_MODE_STD: standard mode
> + */
> +enum ccp_rsa_mode {
> +	CCP_RSA_ENCRYPT = 0,
> +	CCP_RSA_DECRYPT,
> +	CCP_RSA__LAST,
> +};
> +
> +struct ccp_rsa_key {
> +	MPI e;
> +	MPI n;
> +	MPI d;
> +};
> +
> +#define	CCP_RSA_MAXMOD	(4 * 1024 / 8)
> +#define	CCP5_RSA_MAXMOD	(16 * 1024 / 8)
> +#define	CCP5_RSA_MINMOD	(512 / 8)
> +
>  /***** RSA engine *****/
>  /**
>   * struct ccp_rsa_engine - CCP RSA operation
> @@ -309,16 +331,26 @@ struct ccp_sha_engine {
>   *   - key_size, exp, exp_len, mod, mod_len, src, dst, src_len
>   */
>  struct ccp_rsa_engine {
> +	enum ccp_rsa_mode mode;
>  	u32 key_size;		/* In bits */
> 
> +	struct ccp_rsa_key pkey;
> +
> +/* e */
>  	struct scatterlist *exp;
>  	u32 exp_len;		/* In bytes */
> 
> +/* n */
>  	struct scatterlist *mod;
>  	u32 mod_len;		/* In bytes */
> 
> +/* d */
> +	struct scatterlist *d_sg;
> +	unsigned int d_len;
> +
>  	struct scatterlist *src, *dst;
>  	u32 src_len;		/* In bytes */
> +	u32 dst_len;		/* In bytes */
>  };
> 
>  /***** Passthru engine *****/
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html



Ciao
Stephan

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 1/6] crypto: ccp - Add SHA-2 support
  2016-10-13 14:52 ` [PATCH 1/6] crypto: ccp - Add SHA-2 support Gary R Hook
@ 2016-10-13 19:35   ` Tom Lendacky
  0 siblings, 0 replies; 16+ messages in thread
From: Tom Lendacky @ 2016-10-13 19:35 UTC (permalink / raw)
  To: Gary R Hook, linux-crypto; +Cc: herbert, davem

On 10/13/2016 09:52 AM, Gary R Hook wrote:
> Incorporate 384-bit and 512-bit hashing for a version 5 CCP
> device
> 
> 
> Signed-off-by: Gary R Hook <gary.hook@amd.com>
> ---
>  drivers/crypto/ccp/ccp-crypto-sha.c |   22 +++++++++++
>  drivers/crypto/ccp/ccp-crypto.h     |    9 +++--
>  drivers/crypto/ccp/ccp-ops.c        |   70 +++++++++++++++++++++++++++++++++++
>  include/linux/ccp.h                 |    3 ++
>  4 files changed, 101 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
> index 84a652b..6b46eea 100644
> --- a/drivers/crypto/ccp/ccp-crypto-sha.c
> +++ b/drivers/crypto/ccp/ccp-crypto-sha.c
> @@ -146,6 +146,12 @@ static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes,
>  	case CCP_SHA_TYPE_256:
>  		rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE;
>  		break;
> +	case CCP_SHA_TYPE_384:
> +		rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE;
> +		break;
> +	case CCP_SHA_TYPE_512:
> +		rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE;
> +		break;
>  	default:
>  		/* Should never get here */
>  		break;
> @@ -393,6 +399,22 @@ static struct ccp_sha_def sha_algs[] = {
>  		.digest_size	= SHA256_DIGEST_SIZE,
>  		.block_size	= SHA256_BLOCK_SIZE,
>  	},
> +	{
> +		.version	= CCP_VERSION(5, 0),
> +		.name		= "sha384",
> +		.drv_name	= "sha384-ccp",
> +		.type		= CCP_SHA_TYPE_384,
> +		.digest_size	= SHA384_DIGEST_SIZE,
> +		.block_size	= SHA384_BLOCK_SIZE,
> +	},
> +	{
> +		.version	= CCP_VERSION(5, 0),
> +		.name		= "sha512",
> +		.drv_name	= "sha512-ccp",
> +		.type		= CCP_SHA_TYPE_512,
> +		.digest_size	= SHA512_DIGEST_SIZE,
> +		.block_size	= SHA512_BLOCK_SIZE,
> +	},
>  };
>  
>  static int ccp_register_hmac_alg(struct list_head *head,
> diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
> index 8335b32..ae442ac 100644
> --- a/drivers/crypto/ccp/ccp-crypto.h
> +++ b/drivers/crypto/ccp/ccp-crypto.h
> @@ -137,9 +137,12 @@ struct ccp_aes_cmac_exp_ctx {
>  	u8 buf[AES_BLOCK_SIZE];
>  };
>  
> -/***** SHA related defines *****/
> -#define MAX_SHA_CONTEXT_SIZE	SHA256_DIGEST_SIZE
> -#define MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
> +/*
> + * SHA-related defines
> + * These values must be large enough to accommodate any variant
> + */
> +#define MAX_SHA_CONTEXT_SIZE	SHA512_DIGEST_SIZE
> +#define MAX_SHA_BLOCK_SIZE	SHA512_BLOCK_SIZE
>  
>  struct ccp_sha_ctx {
>  	struct scatterlist opad_sg;
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 50fae44..8fedb14 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -41,6 +41,20 @@ static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = {
>  	cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
>  };
>  
> +static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
> +	cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
> +	cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
> +	cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
> +	cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7),
> +};
> +
> +static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = {
> +	cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
> +	cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
> +	cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
> +	cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7),
> +};
> +
>  #define	CCP_NEW_JOBID(ccp)	((ccp->vdata->version == CCP_VERSION(3, 0)) ? \
>  					ccp_gen_jobid(ccp) : 0)
>  
> @@ -963,6 +977,16 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  			return -EINVAL;
>  		block_size = SHA256_BLOCK_SIZE;
>  		break;
> +	case CCP_SHA_TYPE_384:
> +		if (sha->ctx_len < SHA384_DIGEST_SIZE)
> +			return -EINVAL;
> +		block_size = SHA384_BLOCK_SIZE;
> +		break;
> +	case CCP_SHA_TYPE_512:
> +		if (sha->ctx_len < SHA512_DIGEST_SIZE)
> +			return -EINVAL;
> +		block_size = SHA512_BLOCK_SIZE;
> +		break;

A version 3 CCP won't support these new sizes.  You should add a version
check and return an error if v3.

>  	default:
>  		return -EINVAL;
>  	}
> @@ -1050,6 +1074,21 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  		sb_count = 1;
>  		ooffset = ioffset = 0;
>  		break;
> +	case CCP_SHA_TYPE_384:
> +		digest_size = SHA384_DIGEST_SIZE;
> +		init = (void *) ccp_sha384_init;
> +		ctx_size = SHA512_DIGEST_SIZE;
> +		sb_count = 2;
> +		ioffset = 0;
> +		ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE;
> +		break;
> +	case CCP_SHA_TYPE_512:
> +		digest_size = SHA512_DIGEST_SIZE;
> +		init = (void *) ccp_sha512_init;
> +		ctx_size = SHA512_DIGEST_SIZE;
> +		sb_count = 2;
> +		ooffset = ioffset = 0;
> +		break;
>  	default:
>  		ret = -EINVAL;
>  		goto e_data;
> @@ -1068,6 +1107,11 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  	op.u.sha.type = sha->type;
>  	op.u.sha.msg_bits = sha->msg_bits;
>  
> +	/* For SHA1/224/256 the context fits in a single (32-byte) SB entry;
> +	 * SHA384/512 require 2 adjacent SB slots, with the right half in the
> +	 * first slot, and the left half in the second. Each portion must then
> +	 * be in little endian format: use the 256-bit byte swap option.
> +	 */
>  	ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES,
>  				   DMA_BIDIRECTIONAL);
>  	if (ret)
> @@ -1079,6 +1123,13 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  		case CCP_SHA_TYPE_256:
>  			memcpy(ctx.address + ioffset, init, ctx_size);
>  			break;
> +		case CCP_SHA_TYPE_384:
> +		case CCP_SHA_TYPE_512:
> +			memcpy(ctx.address + ctx_size / 2, init,
> +			       ctx_size / 2);
> +			memcpy(ctx.address, init + ctx_size / 2,
> +			       ctx_size / 2);
> +			break;
>  		default:
>  			ret = -EINVAL;
>  			goto e_ctx;
> @@ -1145,6 +1196,15 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  					sha->ctx, 0,
>  					digest_size);
>  			break;
> +		case CCP_SHA_TYPE_384:
> +		case CCP_SHA_TYPE_512:
> +			ccp_get_dm_area(&ctx, 0,
> +					sha->ctx, LSB_ITEM_SIZE - ooffset,
> +					LSB_ITEM_SIZE);
> +			ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset,
> +					sha->ctx, 0,
> +					LSB_ITEM_SIZE - ooffset);
> +			break;
>  		default:
>  			ret = -EINVAL;
>  			goto e_ctx;
> @@ -1182,6 +1242,16 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  			       ctx.address + ooffset,
>  			       digest_size);
>  			break;
> +		case CCP_SHA_TYPE_384:
> +		case CCP_SHA_TYPE_512:
> +			memcpy(hmac_buf + block_size,
> +			       ctx.address + LSB_ITEM_SIZE + ooffset,
> +			       LSB_ITEM_SIZE);
> +			memcpy(hmac_buf + block_size +
> +			       (LSB_ITEM_SIZE - ooffset),
> +			       ctx.address,
> +			       LSB_ITEM_SIZE);
> +			break;
>  		default:
>  			ret = -EINVAL;
>  			goto e_ctx;
> diff --git a/include/linux/ccp.h b/include/linux/ccp.h
> index a765333..1a3e0b5 100644
> --- a/include/linux/ccp.h
> +++ b/include/linux/ccp.h
> @@ -249,8 +249,11 @@ enum ccp_sha_type {
>  	CCP_SHA_TYPE_1 = 1,
>  	CCP_SHA_TYPE_224,
>  	CCP_SHA_TYPE_256,
> +	CCP_SHA_TYPE_384,
> +	CCP_SHA_TYPE_512,
>  	CCP_SHA_TYPE__LAST,
>  };
> +#define	CCP_SHA_CTXSIZE		SHA512_DIGEST_SIZE

This doesn't appear to be used anywhere.

Thanks,
Tom

>  
>  /**
>   * struct ccp_sha_engine - CCP SHA operation
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 2/6] crypto: ccp - Remove unneeded sign-extension support
  2016-10-13 14:53 ` [PATCH 2/6] crypto: ccp - Remove unneeded sign-extension support Gary R Hook
@ 2016-10-13 19:57   ` Tom Lendacky
  0 siblings, 0 replies; 16+ messages in thread
From: Tom Lendacky @ 2016-10-13 19:57 UTC (permalink / raw)
  To: Gary R Hook, linux-crypto; +Cc: herbert, davem

On 10/13/2016 09:53 AM, Gary R Hook wrote:
> The reverse-get/set functions can be simplified by
> eliminating unused code.
> 
> 
> Signed-off-by: Gary R Hook <gary.hook@amd.com>
> ---
>  drivers/crypto/ccp/ccp-ops.c |  145 +++++++++++++++++-------------------------
>  1 file changed, 59 insertions(+), 86 deletions(-)
> 
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 8fedb14..82cc637 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -198,62 +198,46 @@ static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
>  }
>  
>  static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
> +				   unsigned int wa_offset,
>  				   struct scatterlist *sg,
> -				   unsigned int len, unsigned int se_len,
> -				   bool sign_extend)
> +				   unsigned int sg_offset,
> +				   unsigned int len)
>  {
> -	unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
> -	u8 buffer[CCP_REVERSE_BUF_SIZE];
> -
> -	if (WARN_ON(se_len > sizeof(buffer)))
> -		return -EINVAL;
> -
> -	sg_offset = len;
> -	dm_offset = 0;
> -	nbytes = len;
> -	while (nbytes) {
> -		sb_len = min_t(unsigned int, nbytes, se_len);
> -		sg_offset -= sb_len;
> -
> -		scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 0);
> -		for (i = 0; i < sb_len; i++)
> -			wa->address[dm_offset + i] = buffer[sb_len - i - 1];
> -
> -		dm_offset += sb_len;
> -		nbytes -= sb_len;
> -
> -		if ((sb_len != se_len) && sign_extend) {
> -			/* Must sign-extend to nearest sign-extend length */
> -			if (wa->address[dm_offset - 1] & 0x80)
> -				memset(wa->address + dm_offset, 0xff,
> -				       se_len - sb_len);
> -		}
> +	u8 *p, *q;
> +
> +	ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
> +
> +	p = wa->address + wa_offset;
> +	q = p + len - 1;
> +	while (p < q) {
> +		*p = *p ^ *q;
> +		*q = *p ^ *q;
> +		*p = *p ^ *q;
> +		p++;
> +		q--;
>  	}
> -
>  	return 0;
>  }
>  
>  static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
> +				    unsigned int wa_offset,
>  				    struct scatterlist *sg,
> +				    unsigned int sg_offset,
>  				    unsigned int len)
>  {
> -	unsigned int nbytes, sg_offset, dm_offset, sb_len, i;
> -	u8 buffer[CCP_REVERSE_BUF_SIZE];
> -
> -	sg_offset = 0;
> -	dm_offset = len;
> -	nbytes = len;
> -	while (nbytes) {
> -		sb_len = min_t(unsigned int, nbytes, sizeof(buffer));
> -		dm_offset -= sb_len;
> -
> -		for (i = 0; i < sb_len; i++)
> -			buffer[sb_len - i - 1] = wa->address[dm_offset + i];
> -		scatterwalk_map_and_copy(buffer, sg, sg_offset, sb_len, 1);
> -
> -		sg_offset += sb_len;
> -		nbytes -= sb_len;
> +	u8 *p, *q;
> +
> +	p = wa->address + wa_offset;
> +	q = p + len - 1;
> +	while (p < q) {
> +		*p = *p ^ *q;
> +		*q = *p ^ *q;
> +		*p = *p ^ *q;
> +		p++;
> +		q--;
>  	}
> +
> +	ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len);
>  }
>  
>  static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
> @@ -1294,7 +1278,9 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  	struct ccp_data dst;
>  	struct ccp_op op;
>  	unsigned int sb_count, i_len, o_len;
> -	int ret;
> +	unsigned int dm_offset;
> +	int i = 0;

Is "dm_offset" and "i" used anywhere?  I don't see them used in this
function...

> +	int ret = 0;

No need to change this, is there?

Thanks,
Tom

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP
  2016-10-13 18:25   ` Stephan Mueller
@ 2016-10-13 20:08     ` Gary R Hook
  2016-10-13 20:14       ` Stephan Mueller
  0 siblings, 1 reply; 16+ messages in thread
From: Gary R Hook @ 2016-10-13 20:08 UTC (permalink / raw)
  To: Stephan Mueller, Gary R Hook
  Cc: linux-crypto, thomas.lendacky, herbert, davem

On 10/13/2016 01:25 PM, Stephan Mueller wrote:
> Am Donnerstag, 13. Oktober 2016, 09:53:09 CEST schrieb Gary R Hook:
>
> Hi Gary,
>
>> Wire up the v3 CCP as a cipher provider.
>>
>> Signed-off-by: Gary R Hook <gary.hook@amd.com>
>> ---
>>
>> ...snip...
>>
>> +}
>> +
>> +static void ccp_free_mpi_key(struct ccp_rsa_key *key)
>> +{
>> +	mpi_free(key->d);
>> +	key->d = NULL;
>> +	mpi_free(key->e);
>> +	key->e = NULL;
>> +	mpi_free(key->n);
>> +	key->n = NULL;
>> +}
>
> Could you please see whether that function can be turned into a common
> function call? crypto/rsa.c implements the same code in rsa_free_mpi_key.

I am happy to do so, but was unsure of protocol. rsa.c is in a module, which
makes my module depend upon another. I do not want to do that. And moving
this function elsewhere makes no sense.

I would go with an inline function, but there's no obvious place for it.
The RSA software implementation uses the MPI library, but there's no
requirement to do so (witness the qat driver). Thus, an inline function 
can't
be put in internal/rsa.h without moving the rsa_mpi_key definition and
referencing mpi.h.

I think that RSA+MPI things, such as rsa_mpi_key and this function, could go
into internal/rsa.h, but it would be necessary to #include mpi.h.

Or: create a new include file that contains these (and any other) RSA/MPI
amalgams.

Which would you prefer?

>> +
>> +static int ccp_check_key_length(unsigned int len)
>> +{
>> +	/* In bits */
>> +	if (len < 8 || len > 16384)
>> +		return -EINVAL;
>> +	return 0;
>> +}
>> +
>> +static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
>> +{
>> +	/* Clean up old key data */
>> +	kfree(ctx->u.rsa.e_buf);
>> +	ctx->u.rsa.e_buf = NULL;
>> +	ctx->u.rsa.e_len = 0;
>> +	kfree(ctx->u.rsa.n_buf);
>> +	ctx->u.rsa.n_buf = NULL;
>> +	ctx->u.rsa.n_len = 0;
>> +	kfree(ctx->u.rsa.d_buf);
>
> kzfree, please.

Of course. Done.

>>
>> ...snip...
>>
>> +}
>> +
>> +static struct akcipher_alg rsa = {
>> +	.encrypt = ccp_rsa_encrypt,
>> +	.decrypt = ccp_rsa_decrypt,
>> +	.sign = NULL,
>> +	.verify = NULL,
>> +	.set_pub_key = ccp_rsa_setpubkey,
>> +	.set_priv_key = ccp_rsa_setprivkey,
>> +	.max_size = ccp_rsa_maxsize,
>> +	.init = ccp_rsa_init_tfm,
>> +	.exit = ccp_rsa_exit_tfm,
>> +	.reqsize = sizeof(struct ccp_rsa_req_ctx),
>> +	.base = {
>> +		.cra_name = "rsa",
>> +		.cra_driver_name = "rsa-ccp",
>> +		.cra_priority = 100,
>
> Are you sure you want to leave it at 100? With this value, it will content
> with the C implementation.

No, I don't. Our other functions are at 300 (CCP_CRA_PRIORITY), which is 
what
this should be.

>
>> +		.cra_module = THIS_MODULE,
>> +		.cra_ctxsize = sizeof(struct ccp_ctx),
>> +	},
>> +};
>> +
>> ...snip...
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>
>
>
> Ciao
> Stephan
>

Thank you. I hope snipping is acceptable...

-- 
This is my day job. Follow me at:
IG/Twitter/Facebook: @grhookphoto
IG/Twitter/Facebook: @grhphotographer

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP
  2016-10-13 20:08     ` Gary R Hook
@ 2016-10-13 20:14       ` Stephan Mueller
  0 siblings, 0 replies; 16+ messages in thread
From: Stephan Mueller @ 2016-10-13 20:14 UTC (permalink / raw)
  To: Gary R Hook; +Cc: Gary R Hook, linux-crypto, thomas.lendacky, herbert, davem

Am Donnerstag, 13. Oktober 2016, 15:08:41 CEST schrieb Gary R Hook:

Hi Gary,

> On 10/13/2016 01:25 PM, Stephan Mueller wrote:
> > Am Donnerstag, 13. Oktober 2016, 09:53:09 CEST schrieb Gary R Hook:
> > 
> > Hi Gary,
> > 
> >> Wire up the v3 CCP as a cipher provider.
> >> 
> >> Signed-off-by: Gary R Hook <gary.hook@amd.com>
> >> ---
> >> 
> >> ...snip...
> >> 
> >> +}
> >> +
> >> +static void ccp_free_mpi_key(struct ccp_rsa_key *key)
> >> +{
> >> +	mpi_free(key->d);
> >> +	key->d = NULL;
> >> +	mpi_free(key->e);
> >> +	key->e = NULL;
> >> +	mpi_free(key->n);
> >> +	key->n = NULL;
> >> +}
> > 
> > Could you please see whether that function can be turned into a common
> > function call? crypto/rsa.c implements the same code in rsa_free_mpi_key.
> 
> I am happy to do so, but was unsure of protocol. rsa.c is in a module, which
> makes my module depend upon another. I do not want to do that. And moving
> this function elsewhere makes no sense.
> 
> I would go with an inline function, but there's no obvious place for it.
> The RSA software implementation uses the MPI library, but there's no
> requirement to do so (witness the qat driver). Thus, an inline function
> can't
> be put in internal/rsa.h without moving the rsa_mpi_key definition and
> referencing mpi.h.
> 
> I think that RSA+MPI things, such as rsa_mpi_key and this function, could go
> into internal/rsa.h, but it would be necessary to #include mpi.h.
> 
> Or: create a new include file that contains these (and any other) RSA/MPI
> amalgams.
> 
> Which would you prefer?

I would guess it should go to include/crypto/internal/rsa.h as an inline 
considering that the "internal" header files are for crypto provider code.


Ciao
Stephan

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP
  2016-10-13 14:53 ` [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP Gary R Hook
  2016-10-13 18:25   ` Stephan Mueller
@ 2016-10-13 21:06   ` Tom Lendacky
  1 sibling, 0 replies; 16+ messages in thread
From: Tom Lendacky @ 2016-10-13 21:06 UTC (permalink / raw)
  To: Gary R Hook, linux-crypto; +Cc: herbert, davem

On 10/13/2016 09:53 AM, Gary R Hook wrote:
> Wire up the v3 CCP as a cipher provider.
> 
> Signed-off-by: Gary R Hook <gary.hook@amd.com>
> ---
>  drivers/crypto/ccp/Makefile          |    1 
>  drivers/crypto/ccp/ccp-crypto-main.c |   15 ++
>  drivers/crypto/ccp/ccp-crypto-rsa.c  |  258 ++++++++++++++++++++++++++++++++++
>  drivers/crypto/ccp/ccp-crypto.h      |   24 +++
>  drivers/crypto/ccp/ccp-dev-v3.c      |   38 +++++
>  drivers/crypto/ccp/ccp-ops.c         |    1 
>  include/linux/ccp.h                  |   34 ++++
>  7 files changed, 370 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-rsa.c
> 
> diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
> index 346ceb8..23f89b7 100644
> --- a/drivers/crypto/ccp/Makefile
> +++ b/drivers/crypto/ccp/Makefile
> @@ -12,4 +12,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
>  		   ccp-crypto-aes.o \
>  		   ccp-crypto-aes-cmac.o \
>  		   ccp-crypto-aes-xts.o \
> +		   ccp-crypto-rsa.o \
>  		   ccp-crypto-sha.o
> diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
> index e0380e5..f3c4c25 100644
> --- a/drivers/crypto/ccp/ccp-crypto-main.c
> +++ b/drivers/crypto/ccp/ccp-crypto-main.c
> @@ -33,6 +33,10 @@ static unsigned int sha_disable;
>  module_param(sha_disable, uint, 0444);
>  MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
>  
> +static unsigned int rsa_disable;
> +module_param(rsa_disable, uint, 0444);
> +MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
> +
>  /* List heads for the supported algorithms */
>  static LIST_HEAD(hash_algs);
>  static LIST_HEAD(cipher_algs);
> @@ -343,6 +347,14 @@ static int ccp_register_algs(void)
>  			return ret;
>  	}
>  
> +	if (!rsa_disable) {
> +		ret = ccp_register_rsa_algs();
> +		if (ret) {
> +			rsa_disable = 1;
> +			return ret;
> +		}
> +	}
> +
>  	return 0;
>  }
>  
> @@ -362,6 +374,9 @@ static void ccp_unregister_algs(void)
>  		list_del(&ablk_alg->entry);
>  		kfree(ablk_alg);
>  	}
> +
> +	if (!rsa_disable)
> +		ccp_unregister_rsa_algs();
>  }
>  
>  static int ccp_crypto_init(void)
> diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
> new file mode 100644
> index 0000000..7dab43b
> --- /dev/null
> +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
> @@ -0,0 +1,258 @@
> +/*
> + * AMD Cryptographic Coprocessor (CCP) RSA crypto API support
> + *
> + * Copyright (C) 2016 Advanced Micro Devices, Inc.
> + *
> + * Author: Gary R Hook <gary.hook@amd.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/module.h>
> +#include <linux/sched.h>
> +#include <linux/mpi.h>
> +#include <linux/scatterlist.h>
> +#include <linux/crypto.h>
> +#include <crypto/algapi.h>
> +#include <crypto/internal/akcipher.h>
> +#include <crypto/akcipher.h>
> +#include <crypto/scatterwalk.h>
> +
> +#include "ccp-crypto.h"
> +
> +static inline struct akcipher_request *akcipher_request_cast(
> +	struct crypto_async_request *req)
> +{
> +	return container_of(req, struct akcipher_request, base);
> +}
> +
> +static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret)
> +{
> +	struct akcipher_request *req = akcipher_request_cast(async_req);
> +	struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
> +
> +	if (!ret)
> +		req->dst_len = rctx->cmd.u.rsa.d_len;
> +
> +	ret = 0;
> +
> +	return ret;
> +}
> +
> +static int ccp_rsa_maxsize(struct crypto_akcipher *tfm)
> +{
> +	return CCP_RSA_MAXMOD;
> +}
> +
> +static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt)
> +{
> +	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
> +	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> +	struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx(req);
> +	int ret = 0;
> +
> +	if (!ctx->u.rsa.pkey.d && !ctx->u.rsa.pkey.e)
> +		return -EINVAL;
> +
> +	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
> +	INIT_LIST_HEAD(&rctx->cmd.entry);
> +	rctx->cmd.engine = CCP_ENGINE_RSA;
> +	rctx->cmd.u.rsa.mode = encrypt ? CCP_RSA_ENCRYPT : CCP_RSA_DECRYPT;
> +
> +	rctx->cmd.u.rsa.pkey = ctx->u.rsa.pkey;
> +	rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len;

The existing interface expects the key_size to be in bits, so you'll
need to multiply this by 8.

> +	rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg;
> +	rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len;
> +	rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg;
> +	rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len;
> +	if (ctx->u.rsa.pkey.d) {
> +		rctx->cmd.u.rsa.d_sg = &ctx->u.rsa.d_sg;
> +		rctx->cmd.u.rsa.d_len = ctx->u.rsa.d_len;
> +	}
> +
> +	rctx->cmd.u.rsa.src = req->src;
> +	rctx->cmd.u.rsa.src_len = req->src_len;
> +	rctx->cmd.u.rsa.dst = req->dst;
> +	rctx->cmd.u.rsa.dst_len = req->dst_len;

So rsa.pkey, rsa.d_sg and rsa.d_len have been added and are being set,
but the ccp-ops function hasn't been updated to use them (yet). Will
this be successful then?  Will pkey be needed in the request context?

If the only difference between encrypt and decrypt is what key to
use, then the rsa request context doesn't need to change at all. Just
set the appropriate key at this layer.

> +
> +	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
> +
> +	return ret;
> +}
> +
> +static int ccp_rsa_encrypt(struct akcipher_request *req)
> +{
> +	return ccp_rsa_crypt(req, true);
> +}
> +
> +static int ccp_rsa_decrypt(struct akcipher_request *req)
> +{
> +	return ccp_rsa_crypt(req, false);
> +}
> +
> +static void ccp_free_mpi_key(struct ccp_rsa_key *key)
> +{
> +	mpi_free(key->d);
> +	key->d = NULL;
> +	mpi_free(key->e);
> +	key->e = NULL;
> +	mpi_free(key->n);
> +	key->n = NULL;
> +}
> +
> +static int ccp_check_key_length(unsigned int len)
> +{
> +	/* In bits */
> +	if (len < 8 || len > 16384)
> +		return -EINVAL;
> +	return 0;
> +}
> +
> +static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
> +{
> +	/* Clean up old key data */
> +	kfree(ctx->u.rsa.e_buf);
> +	ctx->u.rsa.e_buf = NULL;
> +	ctx->u.rsa.e_len = 0;
> +	kfree(ctx->u.rsa.n_buf);
> +	ctx->u.rsa.n_buf = NULL;
> +	ctx->u.rsa.n_len = 0;
> +	kfree(ctx->u.rsa.d_buf);
> +	ctx->u.rsa.d_buf = NULL;
> +	ctx->u.rsa.d_len = 0;
> +}
> +
> +static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
> +			  unsigned int keylen, bool public)
> +{
> +	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> +	struct rsa_key raw_key;
> +	unsigned int n_size;
> +	int ret;
> +
> +	if (!ctx)
> +		return -EINVAL;
> +
> +	ccp_rsa_free_key_bufs(ctx);
> +	memset(&raw_key, 0, sizeof(raw_key));
> +
> +	/* Code borrowed from crypto/rsa.c */
> +	if (public)
> +		ret = rsa_parse_pub_key(&raw_key, key, keylen);
> +	else
> +		ret = rsa_parse_priv_key(&raw_key, key, keylen);
> +	if (ret)
> +		goto e_ret;
> +
> +	ret = -EINVAL;
> +
> +	ctx->u.rsa.pkey.e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
> +	if (!ctx->u.rsa.pkey.e)
> +		goto e_ret;
> +	ctx->u.rsa.e_buf = mpi_get_buffer(ctx->u.rsa.pkey.e,
> +					  &ctx->u.rsa.e_len, NULL);
> +	if (!ctx->u.rsa.e_buf)
> +		goto e_key;
> +	sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len);
> +
> +

Extra blank line.

> +	ctx->u.rsa.pkey.n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
> +	n_size = mpi_get_size(ctx->u.rsa.pkey.n);
> +	if (ccp_check_key_length(n_size << 3))
> +		goto e_key;

Should this be goto e_nkey?

> +	ctx->u.rsa.key_len = n_size;
> +	ctx->u.rsa.n_buf = mpi_get_buffer(ctx->u.rsa.pkey.n,
> +					  &ctx->u.rsa.n_len, NULL);
> +	if (!ctx->u.rsa.n_buf)
> +		goto e_nkey;
> +	sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
> +
> +	if (!public) {
> +		ctx->u.rsa.pkey.d = mpi_read_raw_data(raw_key.d, raw_key.d_sz);
> +		if (!ctx->u.rsa.pkey.d)
> +			goto e_nkey;

Should this be goto e_dkey?

> +		ctx->u.rsa.d_buf = mpi_get_buffer(ctx->u.rsa.pkey.d,
> +						  &ctx->u.rsa.d_len, NULL);
> +		if (!ctx->u.rsa.d_buf)
> +			goto e_dkey;
> +		sg_init_one(&ctx->u.rsa.d_sg, ctx->u.rsa.d_buf,
> +			    ctx->u.rsa.d_len);
> +	}
> +
> +	return 0;
> +
> +e_dkey:
> +	kfree(ctx->u.rsa.n_buf);
> +e_nkey:
> +	kfree(ctx->u.rsa.e_buf);
> +e_key:
> +	ccp_free_mpi_key(&ctx->u.rsa.pkey);
> +e_ret:
> +	return ret;
> +}
> +
> +static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
> +			      unsigned int keylen)
> +{
> +	return ccp_rsa_setkey(tfm, key, keylen, false);
> +}
> +
> +static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
> +			     unsigned int keylen)
> +{
> +	return ccp_rsa_setkey(tfm, key, keylen, true);
> +}
> +
> +static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
> +{
> +	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
> +
> +	ctx->complete = ccp_rsa_complete;
> +
> +	return 0;
> +}
> +
> +static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm)
> +{
> +	struct ccp_ctx *ctx = crypto_tfm_ctx(&tfm->base);
> +
> +	ccp_rsa_free_key_bufs(ctx);
> +}
> +
> +static struct akcipher_alg rsa = {
> +	.encrypt = ccp_rsa_encrypt,
> +	.decrypt = ccp_rsa_decrypt,
> +	.sign = NULL,
> +	.verify = NULL,
> +	.set_pub_key = ccp_rsa_setpubkey,
> +	.set_priv_key = ccp_rsa_setprivkey,
> +	.max_size = ccp_rsa_maxsize,
> +	.init = ccp_rsa_init_tfm,
> +	.exit = ccp_rsa_exit_tfm,
> +	.reqsize = sizeof(struct ccp_rsa_req_ctx),
> +	.base = {
> +		.cra_name = "rsa",
> +		.cra_driver_name = "rsa-ccp",
> +		.cra_priority = 100,
> +		.cra_module = THIS_MODULE,
> +		.cra_ctxsize = sizeof(struct ccp_ctx),
> +	},
> +};
> +
> +int ccp_register_rsa_algs(void)
> +{
> +	int ret;
> +
> +	/* Register the RSA algorithm in standard mode
> +	 * This works for CCP v3 and later
> +	 */
> +	ret = crypto_register_akcipher(&rsa);
> +	return ret;
> +}
> +
> +void ccp_unregister_rsa_algs(void)
> +{
> +	crypto_unregister_akcipher(&rsa);
> +}
> diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
> index ae442ac..4a1d206 100644
> --- a/drivers/crypto/ccp/ccp-crypto.h
> +++ b/drivers/crypto/ccp/ccp-crypto.h
> @@ -22,6 +22,7 @@
>  #include <crypto/ctr.h>
>  #include <crypto/hash.h>
>  #include <crypto/sha.h>
> +#include <crypto/internal/rsa.h>
>  
>  #define CCP_CRA_PRIORITY	300
>  
> @@ -155,6 +156,26 @@ struct ccp_sha_ctx {
>  	struct crypto_shash *hmac_tfm;
>  };
>  
> +/***** RSA related defines *****/
> +
> +struct ccp_rsa_ctx {
> +	unsigned int key_len; /* in bytes */
> +	struct ccp_rsa_key pkey;
> +	struct scatterlist e_sg;
> +	u8 *e_buf;
> +	unsigned int e_len;
> +	struct scatterlist n_sg;
> +	u8 *n_buf;
> +	unsigned int n_len;
> +	struct scatterlist d_sg;
> +	u8 *d_buf;
> +	unsigned int d_len;
> +};
> +
> +struct ccp_rsa_req_ctx {
> +	struct ccp_cmd cmd;
> +};
> +

Is this block of RSA info dropped in the middle of the SHA
related info?

>  struct ccp_sha_req_ctx {
>  	enum ccp_sha_type type;
>  
> @@ -201,6 +222,7 @@ struct ccp_ctx {
>  
>  	union {
>  		struct ccp_aes_ctx aes;
> +		struct ccp_rsa_ctx rsa;
>  		struct ccp_sha_ctx sha;
>  	} u;
>  };
> @@ -214,5 +236,7 @@ int ccp_register_aes_algs(struct list_head *head);
>  int ccp_register_aes_cmac_algs(struct list_head *head);
>  int ccp_register_aes_xts_algs(struct list_head *head);
>  int ccp_register_sha_algs(struct list_head *head);
> +int ccp_register_rsa_algs(void);
> +void ccp_unregister_rsa_algs(void);
>  
>  #endif
> diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
> index 8d2dbac..75a0978 100644
> --- a/drivers/crypto/ccp/ccp-dev-v3.c
> +++ b/drivers/crypto/ccp/ccp-dev-v3.c
> @@ -20,6 +20,43 @@
>  
>  #include "ccp-dev.h"
>  
> +/* CCP version 3: Union to define the function field (cmd_reg1/dword0) */
> +union ccp_function {
> +	struct {
> +		u16 size:7;
> +		u16 encrypt:1;
> +		u16 mode:3;
> +		u16 type:2;
> +	} aes;
> +	struct {
> +		u16 size:7;
> +		u16 encrypt:1;
> +		u16 rsvd:5;
> +	} aes_xts;
> +	struct {
> +		u16 rsvd1:11;
> +		u16 type:2;
> +	} sha;
> +	struct {
> +		u16 size:13;
> +	} rsa;
> +	struct {
> +		u16 byteswap:2;
> +		u16 bitwise:3;
> +		u16 rsvd:8;
> +	} pt;
> +	struct  {
> +		u16 rsvd:13;
> +	} zlib;
> +	struct {
> +		u16 size:8;
> +		u16 mode:3;
> +		u16 rsvd1:1;
> +		u16 rsvd2:1;
> +	} ecc;
> +	u16 raw;
> +};
> +

This whole block should be removed.

>  static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
>  {
>  	int start;
> @@ -88,6 +125,7 @@ static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
>  	 * are actually available, but reading that register resets it
>  	 * and you could lose some error information.
>  	 */
> +

As should this.  No changes for ccp-dev-v3.c should be in this patch.

>  	cmd_q->free_slots--;
>  
>  	cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 82cc637..826782d 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -17,6 +17,7 @@
>  #include <linux/interrupt.h>
>  #include <crypto/scatterwalk.h>
>  #include <linux/ccp.h>
> +#include <linux/delay.h>

What's this here for?

>  
>  #include "ccp-dev.h"
>  
> diff --git a/include/linux/ccp.h b/include/linux/ccp.h
> index 1a3e0b5..d634565 100644
> --- a/include/linux/ccp.h
> +++ b/include/linux/ccp.h
> @@ -19,7 +19,8 @@
>  #include <linux/list.h>
>  #include <crypto/aes.h>
>  #include <crypto/sha.h>
> -
> +#include <linux/mpi.h>
> +#include <crypto/internal/rsa.h>
>  
>  struct ccp_device;
>  struct ccp_cmd;
> @@ -293,6 +294,27 @@ struct ccp_sha_engine {
>  				 * final sha cmd */
>  };
>  
> +/**
> + * ccp_rsa_type - mode of RSA operation
> + *
> + * @CCP_RSA_MODE_STD: standard mode
> + */
> +enum ccp_rsa_mode {
> +	CCP_RSA_ENCRYPT = 0,
> +	CCP_RSA_DECRYPT,
> +	CCP_RSA__LAST,
> +};
> +
> +struct ccp_rsa_key {
> +	MPI e;
> +	MPI n;
> +	MPI d;
> +};
> +
> +#define	CCP_RSA_MAXMOD	(4 * 1024 / 8)
> +#define	CCP5_RSA_MAXMOD	(16 * 1024 / 8)
> +#define	CCP5_RSA_MINMOD	(512 / 8)
> +
>  /***** RSA engine *****/
>  /**
>   * struct ccp_rsa_engine - CCP RSA operation
> @@ -309,16 +331,26 @@ struct ccp_sha_engine {
>   *   - key_size, exp, exp_len, mod, mod_len, src, dst, src_len
>   */
>  struct ccp_rsa_engine {
> +	enum ccp_rsa_mode mode;
>  	u32 key_size;		/* In bits */
>  
> +	struct ccp_rsa_key pkey;
> +
> +/* e */
>  	struct scatterlist *exp;
>  	u32 exp_len;		/* In bytes */
>  
> +/* n */
>  	struct scatterlist *mod;
>  	u32 mod_len;		/* In bytes */
>  
> +/* d */
> +	struct scatterlist *d_sg;
> +	unsigned int d_len;
> +
>  	struct scatterlist *src, *dst;
>  	u32 src_len;		/* In bytes */
> +	u32 dst_len;		/* In bytes */
>  };
>  

As mentioned above, I think you don't need to make any changes to this
request context if you take care of things in ccp crypto api layer
(except for maybe dst_len?).

Thanks,
Tom

>  /***** Passthru engine *****/
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 4/6] crypto: ccp - Add RSA support for a v5 ccp
  2016-10-13 14:53 ` [PATCH 4/6] crypto: ccp - Add RSA support for a v5 ccp Gary R Hook
@ 2016-10-13 21:23   ` Tom Lendacky
  0 siblings, 0 replies; 16+ messages in thread
From: Tom Lendacky @ 2016-10-13 21:23 UTC (permalink / raw)
  To: Gary R Hook, linux-crypto; +Cc: herbert, davem

On 10/13/2016 09:53 AM, Gary R Hook wrote:
> Take into account device implementation differences for
> RSA.
> 
> Signed-off-by: Gary R Hook <gary.hook@amd.com>
> ---
>  drivers/crypto/ccp/ccp-crypto-rsa.c |   14 +++--
>  drivers/crypto/ccp/ccp-crypto.h     |    3 -
>  drivers/crypto/ccp/ccp-dev.h        |    2 -
>  drivers/crypto/ccp/ccp-ops.c        |   97 +++++++++++++++++++++++------------
>  4 files changed, 73 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/crypto/ccp/ccp-crypto-rsa.c b/drivers/crypto/ccp/ccp-crypto-rsa.c
> index 7dab43b..94411de 100644
> --- a/drivers/crypto/ccp/ccp-crypto-rsa.c
> +++ b/drivers/crypto/ccp/ccp-crypto-rsa.c
> @@ -125,7 +125,7 @@ static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx)
>  }
>  
>  static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
> -			  unsigned int keylen, bool public)
> +			  unsigned int keylen, bool private)
>  {
>  	struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm);
>  	struct rsa_key raw_key;
> @@ -139,10 +139,10 @@ static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
>  	memset(&raw_key, 0, sizeof(raw_key));
>  
>  	/* Code borrowed from crypto/rsa.c */
> -	if (public)
> -		ret = rsa_parse_pub_key(&raw_key, key, keylen);
> -	else
> +	if (private)
>  		ret = rsa_parse_priv_key(&raw_key, key, keylen);
> +	else
> +		ret = rsa_parse_pub_key(&raw_key, key, keylen);
>  	if (ret)
>  		goto e_ret;
>  
> @@ -169,7 +169,7 @@ static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
>  		goto e_nkey;
>  	sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len);
>  
> -	if (!public) {
> +	if (private) {
>  		ctx->u.rsa.pkey.d = mpi_read_raw_data(raw_key.d, raw_key.d_sz);
>  		if (!ctx->u.rsa.pkey.d)
>  			goto e_nkey;
> @@ -196,13 +196,13 @@ e_ret:
>  static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
>  			      unsigned int keylen)
>  {
> -	return ccp_rsa_setkey(tfm, key, keylen, false);
> +	return ccp_rsa_setkey(tfm, key, keylen, true);
>  }
>  
>  static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
>  			     unsigned int keylen)
>  {
> -	return ccp_rsa_setkey(tfm, key, keylen, true);
> +	return ccp_rsa_setkey(tfm, key, keylen, false);
>  }
>  
>  static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm)
> diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
> index 4a1d206..c6cf318 100644
> --- a/drivers/crypto/ccp/ccp-crypto.h
> +++ b/drivers/crypto/ccp/ccp-crypto.h
> @@ -138,8 +138,7 @@ struct ccp_aes_cmac_exp_ctx {
>  	u8 buf[AES_BLOCK_SIZE];
>  };
>  
> -/*
> - * SHA-related defines
> +/* SHA-related defines

Shouldn't be part of this patch.

>   * These values must be large enough to accommodate any variant
>   */
>  #define MAX_SHA_CONTEXT_SIZE	SHA512_DIGEST_SIZE
> diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
> index 0d996fe..143f00f 100644
> --- a/drivers/crypto/ccp/ccp-dev.h
> +++ b/drivers/crypto/ccp/ccp-dev.h
> @@ -193,6 +193,7 @@
>  #define CCP_SHA_SB_COUNT		1
>  
>  #define CCP_RSA_MAX_WIDTH		4096
> +#define CCP5_RSA_MAX_WIDTH		16384
>  
>  #define CCP_PASSTHRU_BLOCKSIZE		256
>  #define CCP_PASSTHRU_MASKSIZE		32
> @@ -515,7 +516,6 @@ struct ccp_op {
>  		struct ccp_passthru_op passthru;
>  		struct ccp_ecc_op ecc;
>  	} u;
> -	struct ccp_mem key;

This should probably be part of a cleanup patch.

>  };
>  
>  static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 826782d..07b8dfb 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -1283,49 +1283,72 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  	int i = 0;
>  	int ret = 0;
>  
> -	if (rsa->key_size > CCP_RSA_MAX_WIDTH)
> -		return -EINVAL;
> +	if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)) {
> +		if (rsa->key_size > CCP_RSA_MAX_WIDTH)
> +			return -EINVAL;
> +	} else {
> +		if (rsa->key_size > CCP5_RSA_MAX_WIDTH)
> +			return -EINVAL;
> +	}

Might be able to actually add the max supported key size to the
version data and simplify the check here.

>  
>  	if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
>  		return -EINVAL;
>  
> -	/* The RSA modulus must precede the message being acted upon, so
> -	 * it must be copied to a DMA area where the message and the
> -	 * modulus can be concatenated.  Therefore the input buffer
> -	 * length required is twice the output buffer length (which
> -	 * must be a multiple of 256-bits).
> -	 */
> -	o_len = ((rsa->key_size + 255) / 256) * 32;
> -	i_len = o_len * 2;
> -
> -	sb_count = o_len / CCP_SB_BYTES;
> -
>  	memset(&op, 0, sizeof(op));
>  	op.cmd_q = cmd_q;
> -	op.jobid = ccp_gen_jobid(cmd_q->ccp);
> -	op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count);
> +	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
>  
> -	if (!op.sb_key)
> -		return -EIO;
> +	if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)) {
> +		/* The RSA modulus must precede the message being acted upon, so
> +		 * it must be copied to a DMA area where the message and the
> +		 * modulus can be concatenated.  Therefore the input buffer
> +		 * length required is twice the output buffer length (which
> +		 * must be a multiple of 256-bits).
> +		 */
> +		sb_count = (rsa->key_size + CCP_SB_BYTES - 1) / CCP_SB_BYTES;
> +		o_len = sb_count * 32; /* bytes */
> +		i_len = o_len * 2; /* bytes */
> +
> +		op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q,
> +								sb_count);
> +		if (!op.sb_key)
> +			return -EIO;
> +	} else {
> +		/* A version 5 device allows the key to be in memory */
> +		o_len = rsa->mod_len;
> +		i_len = o_len * 2; /* bytes */
> +		op.sb_key = cmd_q->sb_key;
> +	}
>  
> -	/* The RSA exponent may span multiple (32-byte) SB entries and must
> -	 * be in little endian format. Reverse copy each 32-byte chunk
> -	 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
> -	 * and each byte within that chunk and do not perform any byte swap
> -	 * operations on the passthru operation.
> -	 */
>  	ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
>  	if (ret)
>  		goto e_sb;
>  
> -	ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len);
> +	if (rsa->mode == CCP_RSA_ENCRYPT)
> +		ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0,
> +					      rsa->exp_len);
> +	else
> +		ret = ccp_reverse_set_dm_area(&exp, 0, rsa->d_sg, 0,
> +					      rsa->d_len);

This goes with the comment in the previous patch where you just need to
pass in one of these - the one to be used in the operation.

>  	if (ret)
>  		goto e_exp;
> -	ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
> -			     CCP_PASSTHRU_BYTESWAP_NOOP);
> -	if (ret) {
> -		cmd->engine_error = cmd_q->cmd_error;
> -		goto e_exp;
> +
> +	if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0)) {
> +		/* The RSA exponent may span multiple (32-byte) KSB entries and
> +		 * must be in little endian format. Reverse copy each 32-byte
> +		 * chunk of the exponent (En chunk to E0 chunk, E(n-1) chunk to
> +		 * E1 chunk) and each byte within that chunk and do not perform
> +		 * any byte swap operations on the passthru operation.
> +		 */
> +		ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key,
> +				     CCP_PASSTHRU_BYTESWAP_NOOP);
> +		if (ret) {
> +			cmd->engine_error = cmd_q->cmd_error;
> +			goto e_exp;
> +		}
> +	} else {
> +		op.exp.u.dma.address = exp.dma.address;
> +		op.exp.u.dma.offset = 0;
>  	}
>  
>  	/* Concatenate the modulus and the message. Both the modulus and
> @@ -1345,7 +1368,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  	src.address -= o_len;	/* Reset the address to original value */
>  
>  	/* Prepare the output area for the operation */
> -	ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
> +	ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->dst_len,
>  			    o_len, DMA_FROM_DEVICE);
>  	if (ret)
>  		goto e_src;
> @@ -1358,7 +1381,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  	op.dst.u.dma.offset = 0;
>  	op.dst.u.dma.length = o_len;
>  
> -	op.u.rsa.mod_size = rsa->key_size;
> +	if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0))
> +		op.u.rsa.mod_size = rsa->key_size * 8; /* In bits */
> +	else
> +		op.u.rsa.mod_size = rsa->key_size;
>  	op.u.rsa.input_len = i_len;
>  
>  	ret = cmd_q->ccp->vdata->perform->rsa(&op);
> @@ -1366,8 +1392,12 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  		cmd->engine_error = cmd_q->cmd_error;
>  		goto e_dst;
>  	}
> +	/* Return the length of the result, too */
> +	for (i = o_len; !dst.dm_wa.address[--i]; )
> +		;
> +	rsa->d_len = i + 1;

The output length will always be o_len in size.  If the crypto api
requires the removal of leading zeroes you should do that at the
ccp crypto api layer.

Thanks,
Tom

>  
> -	ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->mod_len);
> +	ccp_reverse_get_dm_area(&dst.dm_wa, 0, rsa->dst, 0, rsa->d_len);
>  
>  e_dst:
>  	ccp_free_data(&dst, cmd_q);
> @@ -1379,7 +1409,8 @@ e_exp:
>  	ccp_dm_free(&exp);
>  
>  e_sb:
> -	cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
> +	if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0))
> +		cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count);
>  
>  	return ret;
>  }
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 5/6] crypto: ccp - Enable support for AES GCM on v5 CCPs
  2016-10-13 14:53 ` [PATCH 5/6] crypto: ccp - Enable support for AES GCM on v5 CCPs Gary R Hook
@ 2016-10-13 21:54   ` Tom Lendacky
  0 siblings, 0 replies; 16+ messages in thread
From: Tom Lendacky @ 2016-10-13 21:54 UTC (permalink / raw)
  To: Gary R Hook, linux-crypto; +Cc: herbert, davem

On 10/13/2016 09:53 AM, Gary R Hook wrote:
> A version 5 device provides the primitive commands
> required for AES GCM. This patch adds support for
> en/decryption.
> 
> Signed-off-by: Gary R Hook <gary.hook@amd.com>
> ---
>  drivers/crypto/ccp/Makefile                |    1 
>  drivers/crypto/ccp/ccp-crypto-aes-galois.c |  252 +++++++++++++++++++++++++++
>  drivers/crypto/ccp/ccp-crypto-main.c       |   12 +
>  drivers/crypto/ccp/ccp-crypto.h            |   14 +
>  drivers/crypto/ccp/ccp-dev-v5.c            |    2 
>  drivers/crypto/ccp/ccp-dev.h               |    1 
>  drivers/crypto/ccp/ccp-ops.c               |  262 ++++++++++++++++++++++++++++
>  include/linux/ccp.h                        |    9 +
>  8 files changed, 553 insertions(+)
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-aes-galois.c
> 
> diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
> index 23f89b7..fd77225 100644
> --- a/drivers/crypto/ccp/Makefile
> +++ b/drivers/crypto/ccp/Makefile
> @@ -13,4 +13,5 @@ ccp-crypto-objs := ccp-crypto-main.o \
>  		   ccp-crypto-aes-cmac.o \
>  		   ccp-crypto-aes-xts.o \
>  		   ccp-crypto-rsa.o \
> +		   ccp-crypto-aes-galois.o \
>  		   ccp-crypto-sha.o
> diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
> new file mode 100644
> index 0000000..5da324f
> --- /dev/null
> +++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
> @@ -0,0 +1,252 @@
> +/*
> + * AMD Cryptographic Coprocessor (CCP) AES crypto API support
> + *
> + * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
> + *
> + * Author: Tom Lendacky <thomas.lendacky@amd.com>

Maybe put your name here...

> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/module.h>
> +#include <linux/sched.h>
> +#include <linux/delay.h>
> +#include <linux/scatterlist.h>
> +#include <linux/crypto.h>
> +#include <crypto/internal/aead.h>
> +#include <crypto/algapi.h>
> +#include <crypto/aes.h>
> +#include <crypto/ctr.h>
> +#include <crypto/scatterwalk.h>
> +#include <linux/delay.h>
> +
> +#include "ccp-crypto.h"
> +
> +#define	AES_GCM_IVSIZE	12
> +
> +static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
> +{
> +	return ret;
> +}
> +
> +static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
> +			      unsigned int key_len)
> +{
> +	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
> +
> +	switch (key_len) {
> +	case AES_KEYSIZE_128:
> +		ctx->u.aes.type = CCP_AES_TYPE_128;
> +		break;
> +	case AES_KEYSIZE_192:
> +		ctx->u.aes.type = CCP_AES_TYPE_192;
> +		break;
> +	case AES_KEYSIZE_256:
> +		ctx->u.aes.type = CCP_AES_TYPE_256;
> +		break;
> +	default:
> +		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
> +		return -EINVAL;
> +	}
> +
> +	ctx->u.aes.mode = CCP_AES_MODE_GCM;
> +	ctx->u.aes.key_len = key_len;
> +
> +	memcpy(ctx->u.aes.key, key, key_len);
> +	sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
> +
> +	return 0;
> +}
> +
> +static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
> +				   unsigned int authsize)
> +{
> +	return 0;
> +}
> +
> +static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
> +{
> +	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
> +	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
> +	struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
> +	struct scatterlist *iv_sg = NULL;
> +	unsigned int iv_len = 0;
> +	int i;
> +	int ret = 0;
> +
> +	if (!ctx->u.aes.key_len)
> +		return -EINVAL;
> +
> +	if (ctx->u.aes.mode != CCP_AES_MODE_GCM)
> +		return -EINVAL;
> +
> +	if (!req->iv)
> +		return -EINVAL;
> +
> +	/*
> +	 * 5 parts:
> +	 *   plaintext/ciphertext input
> +	 *   AAD
> +	 *   key
> +	 *   IV
> +	 *   Destination+tag buffer
> +	 */
> +
> +	/* Copy the IV and initialize a scatterlist */
> +	memset(rctx->iv, 0, AES_BLOCK_SIZE);
> +	memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
> +	for (i = 0; i < 3; i++)
> +		rctx->iv[i + AES_GCM_IVSIZE] = 0;

Is this needed if you did the memset to zero above?

> +	rctx->iv[AES_BLOCK_SIZE - 1] = 1;
> +	iv_sg = &rctx->iv_sg;
> +	iv_len = AES_BLOCK_SIZE;
> +	sg_init_one(iv_sg, rctx->iv, iv_len);
> +
> +	/* The AAD + plaintext are concatenated in the src buffer */
> +	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
> +	INIT_LIST_HEAD(&rctx->cmd.entry);
> +	rctx->cmd.engine = CCP_ENGINE_AES;
> +	rctx->cmd.u.aes.type = ctx->u.aes.type;
> +	rctx->cmd.u.aes.mode = ctx->u.aes.mode;
> +	rctx->cmd.u.aes.action =
> +		(encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT;
> +	rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
> +	rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
> +	rctx->cmd.u.aes.iv = iv_sg;
> +	rctx->cmd.u.aes.iv_len = iv_len;
> +	rctx->cmd.u.aes.src = req->src;
> +	rctx->cmd.u.aes.src_len = req->cryptlen;
> +	rctx->cmd.u.aes.aad_len = req->assoclen;
> +
> +	/* The cipher text + the tag are in the dst buffer */
> +	rctx->cmd.u.aes.dst = req->dst;
> +
> +	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
> +
> +	return ret;
> +}
> +
> +static int ccp_aes_gcm_encrypt(struct aead_request *req)
> +{
> +	return ccp_aes_gcm_crypt(req, true);
> +}
> +
> +static int ccp_aes_gcm_decrypt(struct aead_request *req)
> +{
> +	return ccp_aes_gcm_crypt(req, false);
> +}
> +
> +static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm)
> +{
> +	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
> +
> +	ctx->complete = ccp_aes_gcm_complete;
> +	ctx->u.aes.key_len = 0;
> +
> +	crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
> +
> +	return 0;
> +}
> +
> +static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm)
> +{
> +}
> +
> +static struct aead_alg ccp_aes_gcm_defaults = {
> +	.setkey = ccp_aes_gcm_setkey,
> +	.setauthsize = ccp_aes_gcm_setauthsize,
> +	.encrypt = ccp_aes_gcm_encrypt,
> +	.decrypt = ccp_aes_gcm_decrypt,
> +	.init = ccp_aes_gcm_cra_init,
> +	.ivsize = AES_GCM_IVSIZE,
> +	.maxauthsize = AES_BLOCK_SIZE,
> +	.base = {
> +		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
> +				  CRYPTO_ALG_ASYNC |
> +				  CRYPTO_ALG_KERN_DRIVER_ONLY |
> +				  CRYPTO_ALG_NEED_FALLBACK,
> +		.cra_blocksize	= AES_BLOCK_SIZE,
> +		.cra_ctxsize	= sizeof(struct ccp_ctx),
> +		.cra_priority	= CCP_CRA_PRIORITY,
> +		.cra_type	= &crypto_ablkcipher_type,
> +		.cra_exit	= ccp_aes_gcm_cra_exit,
> +		.cra_module	= THIS_MODULE,
> +	},
> +};
> +
> +struct ccp_aes_aead_def {
> +	enum ccp_aes_mode mode;
> +	unsigned int version;
> +	const char *name;
> +	const char *driver_name;
> +	unsigned int blocksize;
> +	unsigned int ivsize;
> +	struct aead_alg *alg_defaults;
> +};
> +
> +static struct ccp_aes_aead_def aes_aead_algs[] = {
> +	{
> +		.mode		= CCP_AES_MODE_GHASH,
> +		.version	= CCP_VERSION(5, 0),
> +		.name		= "gcm(aes)",
> +		.driver_name	= "gcm-aes-ccp",
> +		.blocksize	= 1,
> +		.ivsize		= AES_BLOCK_SIZE,
> +		.alg_defaults	= &ccp_aes_gcm_defaults,
> +	},
> +};
> +
> +static int ccp_register_aes_aead(struct list_head *head,
> +				 const struct ccp_aes_aead_def *def)
> +{
> +	struct ccp_crypto_aead *ccp_aead;
> +	struct aead_alg *alg;
> +	int ret;
> +
> +	ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL);
> +	if (!ccp_aead)
> +		return -ENOMEM;
> +
> +	INIT_LIST_HEAD(&ccp_aead->entry);
> +
> +	ccp_aead->mode = def->mode;
> +
> +	/* Copy the defaults and override as necessary */
> +	alg = &ccp_aead->alg;
> +	*alg = *def->alg_defaults;
> +	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
> +	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
> +		 def->driver_name);
> +	alg->base.cra_blocksize = def->blocksize;
> +	alg->base.cra_ablkcipher.ivsize = def->ivsize;
> +
> +	ret = crypto_register_aead(alg);
> +	if (ret) {
> +		pr_err("%s ablkcipher algorithm registration error (%d)\n",
> +		       alg->base.cra_name, ret);
> +		kfree(ccp_aead);
> +		return ret;
> +	}
> +
> +	list_add(&ccp_aead->entry, head);
> +
> +	return 0;
> +}
> +
> +int ccp_register_aes_aeads(struct list_head *head)
> +{
> +	int i, ret;
> +	unsigned int ccpversion = ccp_version();
> +
> +	for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) {
> +		if (aes_aead_algs[i].version > ccpversion)
> +			continue;
> +		ret = ccp_register_aes_aead(head, &aes_aead_algs[i]);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	return 0;
> +}
> diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
> index f3c4c25..103a7b3 100644
> --- a/drivers/crypto/ccp/ccp-crypto-main.c
> +++ b/drivers/crypto/ccp/ccp-crypto-main.c
> @@ -40,6 +40,7 @@ MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
>  /* List heads for the supported algorithms */
>  static LIST_HEAD(hash_algs);
>  static LIST_HEAD(cipher_algs);
> +static LIST_HEAD(aead_algs);
>  
>  /* For any tfm, requests for that tfm must be returned on the order
>   * received.  With multiple queues available, the CCP can process more
> @@ -339,6 +340,10 @@ static int ccp_register_algs(void)
>  		ret = ccp_register_aes_xts_algs(&cipher_algs);
>  		if (ret)
>  			return ret;
> +
> +		ret = ccp_register_aes_aeads(&aead_algs);
> +		if (ret)
> +			return ret;
>  	}
>  
>  	if (!sha_disable) {
> @@ -362,6 +367,7 @@ static void ccp_unregister_algs(void)
>  {
>  	struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
>  	struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
> +	struct ccp_crypto_aead *aead_alg, *aead_tmp;
>  
>  	list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
>  		crypto_unregister_ahash(&ahash_alg->alg);
> @@ -377,6 +383,12 @@ static void ccp_unregister_algs(void)
>  
>  	if (!rsa_disable)
>  		ccp_unregister_rsa_algs();
> +
> +	list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
> +		crypto_unregister_aead(&aead_alg->alg);
> +		list_del(&aead_alg->entry);
> +		kfree(aead_alg);
> +	}
>  }
>  
>  static int ccp_crypto_init(void)
> diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
> index c6cf318..b2918f6 100644
> --- a/drivers/crypto/ccp/ccp-crypto.h
> +++ b/drivers/crypto/ccp/ccp-crypto.h
> @@ -19,6 +19,8 @@
>  #include <linux/ccp.h>
>  #include <crypto/algapi.h>
>  #include <crypto/aes.h>
> +#include <crypto/internal/aead.h>
> +#include <crypto/aead.h>
>  #include <crypto/ctr.h>
>  #include <crypto/hash.h>
>  #include <crypto/sha.h>
> @@ -34,6 +36,14 @@ struct ccp_crypto_ablkcipher_alg {
>  	struct crypto_alg alg;
>  };
>  
> +struct ccp_crypto_aead {
> +	struct list_head entry;
> +
> +	u32 mode;
> +
> +	struct aead_alg alg;
> +};
> +
>  struct ccp_crypto_ahash_alg {
>  	struct list_head entry;
>  
> @@ -96,6 +106,9 @@ struct ccp_aes_req_ctx {
>  	struct scatterlist iv_sg;
>  	u8 iv[AES_BLOCK_SIZE];
>  
> +	struct scatterlist tag_sg;
> +	u8 tag[AES_BLOCK_SIZE];
> +
>  	/* Fields used for RFC3686 requests */
>  	u8 *rfc3686_info;
>  	u8 rfc3686_iv[AES_BLOCK_SIZE];
> @@ -234,6 +247,7 @@ struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
>  int ccp_register_aes_algs(struct list_head *head);
>  int ccp_register_aes_cmac_algs(struct list_head *head);
>  int ccp_register_aes_xts_algs(struct list_head *head);
> +int ccp_register_aes_aeads(struct list_head *head);
>  int ccp_register_sha_algs(struct list_head *head);
>  int ccp_register_rsa_algs(void);
>  void ccp_unregister_rsa_algs(void);
> diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
> index faf3cb3..dcae391 100644
> --- a/drivers/crypto/ccp/ccp-dev-v5.c
> +++ b/drivers/crypto/ccp/ccp-dev-v5.c
> @@ -279,6 +279,8 @@ static int ccp5_perform_aes(struct ccp_op *op)
>  	CCP_AES_TYPE(&function) = op->u.aes.type;
>  	if (op->u.aes.mode == CCP_AES_MODE_CFB)
>  		CCP_AES_SIZE(&function) = 0x7f;
> +	if ((op->u.aes.mode == CCP_AES_MODE_GCTR) && op->eom)
> +		CCP_AES_SIZE(&function) = op->u.aes.size;
>  
>  	CCP5_CMD_FUNCTION(&desc) = function.raw;
>  
> diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
> index 143f00f..a2214ac 100644
> --- a/drivers/crypto/ccp/ccp-dev.h
> +++ b/drivers/crypto/ccp/ccp-dev.h
> @@ -467,6 +467,7 @@ struct ccp_aes_op {
>  	enum ccp_aes_type type;
>  	enum ccp_aes_mode mode;
>  	enum ccp_aes_action action;
> +	unsigned int size;
>  };
>  
>  struct ccp_xts_aes_op {
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index 07b8dfb..de28867 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -601,6 +601,265 @@ e_key:
>  	return ret;
>  }
>  
> +static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
> +			       struct ccp_cmd *cmd)
> +{
> +	struct ccp_aes_engine *aes = &cmd->u.aes;
> +	struct ccp_dm_workarea key, ctx, final_wa, tag;
> +	struct ccp_data src, dst;
> +	struct ccp_data aad;
> +	struct ccp_op op;
> +
> +	unsigned long long *final;
> +	unsigned int dm_offset;
> +	unsigned int ilen;
> +	bool in_place = true; /* Default value */
> +	int ret;
> +
> +	struct scatterlist *p_inp, sg_inp[2];
> +	struct scatterlist *p_tag, sg_tag[2];
> +	struct scatterlist *p_outp, sg_outp[2];
> +	struct scatterlist *p_aad;
> +
> +	if (!aes->iv)
> +		return -EINVAL;
> +
> +	if (!((aes->key_len == AES_KEYSIZE_128) ||
> +		(aes->key_len == AES_KEYSIZE_192) ||
> +		(aes->key_len == AES_KEYSIZE_256)))
> +		return -EINVAL;
> +
> +	if (!aes->key) /* Gotta have a key SGL */
> +		return -EINVAL;
> +
> +	/* First, decompose the source buffer into AAD & PT,
> +	 * and the destination buffer into AAD, CT & tag, or
> +	 * the input into CT & tag.
> +	 * It is expected that the input and output SGs will
> +	 * be valid, even if the AAD and input lengths are 0.
> +	 */
> +	p_aad = aes->src;
> +	p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len);
> +	p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len);
> +	if (aes->action == CCP_AES_ACTION_ENCRYPT) {
> +		ilen = aes->src_len;
> +		p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
> +	} else {
> +		/* Input length for decryption includes tag */
> +		ilen = aes->src_len - AES_BLOCK_SIZE;
> +		p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
> +	}
> +
> +	ret = -EIO;

No need to set ret here since it will be overwritten immediately below.

> +	memset(&op, 0, sizeof(op));
> +	op.cmd_q = cmd_q;
> +	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
> +	op.sb_key = cmd_q->sb_key; /* Pre-allocated */
> +	op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
> +	op.init = 1;
> +	op.u.aes.type = aes->type;
> +
> +	/* Copy the key to the LSB */
> +	ret = ccp_init_dm_workarea(&key, cmd_q,
> +				   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
> +				   DMA_TO_DEVICE);
> +	if (ret)
> +		return ret;
> +
> +	dm_offset = CCP_SB_BYTES - aes->key_len;
> +	ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
> +	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
> +			     CCP_PASSTHRU_BYTESWAP_256BIT);
> +	if (ret) {
> +		cmd->engine_error = cmd_q->cmd_error;
> +		goto e_key;
> +	}
> +
> +	/* Copy the context (IV) to the LSB.
> +	 * There is an assumption here that the IV is 96 bits in length, plus
> +	 * a nonce of 32 bits. If no IV is present, use a zeroed buffer.
> +	 */
> +	ret = ccp_init_dm_workarea(&ctx, cmd_q,
> +				   CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES,
> +				   DMA_BIDIRECTIONAL);
> +	if (ret)
> +		goto e_key;
> +
> +	dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
> +	ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
> +
> +	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
> +			     CCP_PASSTHRU_BYTESWAP_256BIT);
> +	if (ret) {
> +		cmd->engine_error = cmd_q->cmd_error;
> +		goto e_ctx;
> +	}
> +
> +	op.init = 1;
> +	if (aes->aad_len > 0) {
> +		/* Step 1: Run a GHASH over the Additional Authenticated Data */
> +		ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len,
> +				    AES_BLOCK_SIZE,
> +				    DMA_TO_DEVICE);
> +		if (ret)
> +			goto e_ctx;
> +
> +		op.u.aes.mode = CCP_AES_MODE_GHASH;
> +		op.u.aes.action = CCP_AES_GHASHAAD;
> +
> +		while (aad.sg_wa.bytes_left) {
> +			ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true);
> +
> +			ret = cmd_q->ccp->vdata->perform->aes(&op);
> +			if (ret) {
> +				cmd->engine_error = cmd_q->cmd_error;
> +				goto e_aad;
> +			}
> +
> +			ccp_process_data(&aad, NULL, &op);
> +			op.init = 0;
> +		}
> +	}
> +
> +	op.u.aes.mode = CCP_AES_MODE_GCTR;
> +	if (aes->action == CCP_AES_ACTION_ENCRYPT)
> +		op.u.aes.action = CCP_AES_ACTION_ENCRYPT;
> +	else
> +		op.u.aes.action = CCP_AES_ACTION_DECRYPT;

Can't you just do op.u.aes.action = aes->action?

> +
> +	if (ilen > 0) {
> +		/* Step 2: Run a GCTR over the plaintext */
> +		in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false;
> +
> +

Extra blank line.

> +		ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
> +				    AES_BLOCK_SIZE, DMA_FROM_DEVICE);
> +		if (ret)
> +			goto e_src;

I don't think you want this here since you do it again below if
!in_place.

Thanks,
Tom

> +
> +		ret = ccp_init_data(&src, cmd_q, p_inp, ilen,
> +				    AES_BLOCK_SIZE,
> +				    in_place ? DMA_BIDIRECTIONAL
> +					     : DMA_TO_DEVICE);
> +		if (ret)
> +			goto e_ctx;
> +
> +		if (in_place) {
> +			dst = src;
> +		} else {
> +			ret = ccp_init_data(&dst, cmd_q, p_outp, ilen,
> +					    AES_BLOCK_SIZE, DMA_FROM_DEVICE);
> +			if (ret)
> +				goto e_src;
> +		}
> +
> +		op.soc = 0;
> +		op.eom = 0;
> +		op.init = 1;
> +		while (src.sg_wa.bytes_left) {
> +			ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
> +			if (!src.sg_wa.bytes_left) {
> +				unsigned int nbytes = aes->src_len
> +						      % AES_BLOCK_SIZE;
> +
> +				if (nbytes) {
> +					op.eom = 1;
> +					op.u.aes.size = (nbytes * 8) - 1;
> +				}
> +			}
> +
> +			ret = cmd_q->ccp->vdata->perform->aes(&op);
> +			if (ret) {
> +				cmd->engine_error = cmd_q->cmd_error;
> +				goto e_dst;
> +			}
> +
> +			ccp_process_data(&src, &dst, &op);
> +			op.init = 0;
> +		}
> +	}
> +
> +	/* Step 3: Update the IV portion of the context with the original IV */
> +	ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
> +			       CCP_PASSTHRU_BYTESWAP_256BIT);
> +	if (ret) {
> +		cmd->engine_error = cmd_q->cmd_error;
> +		goto e_dst;
> +	}
> +
> +	ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
> +
> +	ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
> +			     CCP_PASSTHRU_BYTESWAP_256BIT);
> +	if (ret) {
> +		cmd->engine_error = cmd_q->cmd_error;
> +		goto e_dst;
> +	}
> +
> +	/* Step 4: Concatenate the lengths of the AAD and source, and
> +	 * hash that 16 byte buffer.
> +	 */
> +	ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE,
> +				   DMA_BIDIRECTIONAL);
> +	if (ret)
> +		goto e_dst;
> +	final = (unsigned long long *) final_wa.address;
> +	final[0] = cpu_to_be64(aes->aad_len * 8);
> +	final[1] = cpu_to_be64(ilen * 8);
> +
> +	op.u.aes.mode = CCP_AES_MODE_GHASH;
> +	op.u.aes.action = CCP_AES_GHASHFINAL;
> +	op.src.type = CCP_MEMTYPE_SYSTEM;
> +	op.src.u.dma.address = final_wa.dma.address;
> +	op.src.u.dma.length = AES_BLOCK_SIZE;
> +	op.dst.type = CCP_MEMTYPE_SYSTEM;
> +	op.dst.u.dma.address = final_wa.dma.address;
> +	op.dst.u.dma.length = AES_BLOCK_SIZE;
> +	op.eom = 1;
> +	op.u.aes.size = 0;
> +	ret = cmd_q->ccp->vdata->perform->aes(&op);
> +	if (ret)
> +		goto e_dst;
> +
> +	if (aes->action == CCP_AES_ACTION_ENCRYPT) {
> +		/* Put the ciphered tag after the ciphertext. */
> +		ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
> +	} else {
> +		/* Does this ciphered tag match the input? */
> +		ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
> +					   DMA_BIDIRECTIONAL);
> +		if (ret)
> +			goto e_tag;
> +		ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
> +
> +		ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
> +		ccp_dm_free(&tag);
> +	}
> +
> +e_tag:
> +	ccp_dm_free(&final_wa);
> +
> +e_dst:
> +	if (aes->src_len && !in_place)
> +		ccp_free_data(&dst, cmd_q);
> +
> +e_src:
> +	if (aes->src_len)
> +		ccp_free_data(&src, cmd_q);
> +
> +e_aad:
> +	if (aes->aad_len)
> +		ccp_free_data(&aad, cmd_q);
> +
> +e_ctx:
> +	ccp_dm_free(&ctx);
> +
> +e_key:
> +	ccp_dm_free(&key);
> +
> +	return ret;
> +}
> +
>  static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  {
>  	struct ccp_aes_engine *aes = &cmd->u.aes;
> @@ -614,6 +873,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  	if (aes->mode == CCP_AES_MODE_CMAC)
>  		return ccp_run_aes_cmac_cmd(cmd_q, cmd);
>  
> +	if (aes->mode == CCP_AES_MODE_GCM)
> +		return ccp_run_aes_gcm_cmd(cmd_q, cmd);
> +
>  	if (!((aes->key_len == AES_KEYSIZE_128) ||
>  	      (aes->key_len == AES_KEYSIZE_192) ||
>  	      (aes->key_len == AES_KEYSIZE_256)))
> diff --git a/include/linux/ccp.h b/include/linux/ccp.h
> index d634565..f90f8ba 100644
> --- a/include/linux/ccp.h
> +++ b/include/linux/ccp.h
> @@ -124,6 +124,10 @@ enum ccp_aes_mode {
>  	CCP_AES_MODE_CFB,
>  	CCP_AES_MODE_CTR,
>  	CCP_AES_MODE_CMAC,
> +	CCP_AES_MODE_GHASH,
> +	CCP_AES_MODE_GCTR,
> +	CCP_AES_MODE_GCM,
> +	CCP_AES_MODE_GMAC,
>  	CCP_AES_MODE__LAST,
>  };
>  
> @@ -138,6 +142,9 @@ enum ccp_aes_action {
>  	CCP_AES_ACTION_ENCRYPT,
>  	CCP_AES_ACTION__LAST,
>  };
> +/* Overloaded field */
> +#define	CCP_AES_GHASHAAD	CCP_AES_ACTION_DECRYPT
> +#define	CCP_AES_GHASHFINAL	CCP_AES_ACTION_ENCRYPT
>  
>  /**
>   * struct ccp_aes_engine - CCP AES operation
> @@ -182,6 +189,8 @@ struct ccp_aes_engine {
>  	struct scatterlist *cmac_key;	/* K1/K2 cmac key required for
>  					 * final cmac cmd */
>  	u32 cmac_key_len;	/* In bytes */
> +
> +	u32 aad_len;		/* In bytes */
>  };
>  
>  /***** XTS-AES engine *****/
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH 6/6] crypto: ccp - Enable 3DES function on v5 CCPs
  2016-10-13 14:53 ` [PATCH 6/6] crypto: ccp - Enable 3DES function " Gary R Hook
@ 2016-10-13 22:13   ` Tom Lendacky
  0 siblings, 0 replies; 16+ messages in thread
From: Tom Lendacky @ 2016-10-13 22:13 UTC (permalink / raw)
  To: Gary R Hook, linux-crypto; +Cc: herbert, davem

On 10/13/2016 09:53 AM, Gary R Hook wrote:
> Wire up support for Triple DES in ECB mode.
> 
> Signed-off-by: Gary R Hook <gary.hook@amd.com>
> ---
>  drivers/crypto/ccp/Makefile          |    1 
>  drivers/crypto/ccp/ccp-crypto-des3.c |  254 ++++++++++++++++++++++++++++++++++
>  drivers/crypto/ccp/ccp-crypto-main.c |   10 +
>  drivers/crypto/ccp/ccp-crypto.h      |   25 +++
>  drivers/crypto/ccp/ccp-dev-v3.c      |    1 
>  drivers/crypto/ccp/ccp-dev-v5.c      |   65 ++++++++-
>  drivers/crypto/ccp/ccp-dev.h         |   18 ++
>  drivers/crypto/ccp/ccp-ops.c         |  201 +++++++++++++++++++++++++++
>  drivers/crypto/ccp/ccp-pci.c         |    2 
>  include/linux/ccp.h                  |   57 +++++++-
>  10 files changed, 624 insertions(+), 10 deletions(-)
>  create mode 100644 drivers/crypto/ccp/ccp-crypto-des3.c
> 

... <SNIP> ...

> --- a/drivers/crypto/ccp/ccp-crypto.h
> +++ b/drivers/crypto/ccp/ccp-crypto.h
> @@ -26,6 +26,8 @@
>  #include <crypto/sha.h>
>  #include <crypto/internal/rsa.h>
>  
> +#define	CCP_LOG_LEVEL	KERN_INFO
> +

Not used anywhere that I can tell.

>  #define CCP_CRA_PRIORITY	300
>  
>  struct ccp_crypto_ablkcipher_alg {
> @@ -151,7 +153,26 @@ struct ccp_aes_cmac_exp_ctx {
>  	u8 buf[AES_BLOCK_SIZE];
>  };
>  
> -/* SHA-related defines
> +/***** 3DES related defines *****/
> +struct ccp_des3_ctx {
> +	enum ccp_engine engine;
> +	enum ccp_des3_type type;
> +	enum ccp_des3_mode mode;
> +
> +	struct scatterlist key_sg;
> +	unsigned int key_len;
> +	u8 key[AES_MAX_KEY_SIZE];
> +};
> +
> +struct ccp_des3_req_ctx {
> +	struct scatterlist iv_sg;
> +	u8 iv[AES_BLOCK_SIZE];
> +
> +	struct ccp_cmd cmd;
> +};
> +
> +/*
> + * SHA-related defines
>   * These values must be large enough to accommodate any variant
>   */
>  #define MAX_SHA_CONTEXT_SIZE	SHA512_DIGEST_SIZE
> @@ -236,6 +257,7 @@ struct ccp_ctx {
>  		struct ccp_aes_ctx aes;
>  		struct ccp_rsa_ctx rsa;
>  		struct ccp_sha_ctx sha;
> +		struct ccp_des3_ctx des3;
>  	} u;
>  };
>  
> @@ -251,5 +273,6 @@ int ccp_register_aes_aeads(struct list_head *head);
>  int ccp_register_sha_algs(struct list_head *head);
>  int ccp_register_rsa_algs(void);
>  void ccp_unregister_rsa_algs(void);
> +int ccp_register_des3_algs(struct list_head *head);
>  
>  #endif
> diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
> index 75a0978..fccca16 100644
> --- a/drivers/crypto/ccp/ccp-dev-v3.c
> +++ b/drivers/crypto/ccp/ccp-dev-v3.c
> @@ -595,6 +595,7 @@ static irqreturn_t ccp_irq_handler(int irq, void *data)
>  static const struct ccp_actions ccp3_actions = {
>  	.aes = ccp_perform_aes,
>  	.xts_aes = ccp_perform_xts_aes,
> +	.des3 = NULL,
>  	.sha = ccp_perform_sha,
>  	.rsa = ccp_perform_rsa,
>  	.passthru = ccp_perform_passthru,
> diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
> index dcae391..85387dc 100644
> --- a/drivers/crypto/ccp/ccp-dev-v5.c
> +++ b/drivers/crypto/ccp/ccp-dev-v5.c
> @@ -101,6 +101,12 @@ union ccp_function {
>  		u16 type:2;
>  	} aes_xts;
>  	struct {
> +		u16 size:7;
> +		u16 encrypt:1;
> +		u16 mode:5;
> +		u16 type:2;
> +	} des3;
> +	struct {
>  		u16 rsvd1:10;
>  		u16 type:4;
>  		u16 rsvd2:1;
> @@ -132,6 +138,10 @@ union ccp_function {
>  #define	CCP_AES_TYPE(p)		((p)->aes.type)
>  #define	CCP_XTS_SIZE(p)		((p)->aes_xts.size)
>  #define	CCP_XTS_ENCRYPT(p)	((p)->aes_xts.encrypt)
> +#define	CCP_DES3_SIZE(p)	((p)->des3.size)
> +#define	CCP_DES3_ENCRYPT(p)	((p)->des3.encrypt)
> +#define	CCP_DES3_MODE(p)	((p)->des3.mode)
> +#define	CCP_DES3_TYPE(p)	((p)->des3.type)
>  #define	CCP_SHA_TYPE(p)		((p)->sha.type)
>  #define	CCP_RSA_SIZE(p)		((p)->rsa.size)
>  #define	CCP_PT_BYTESWAP(p)	((p)->pt.byteswap)
> @@ -242,13 +252,16 @@ static int ccp5_do_cmd(struct ccp5_desc *desc,
>  		/* Wait for the job to complete */
>  		ret = wait_event_interruptible(cmd_q->int_queue,
>  					       cmd_q->int_rcvd);
> -		if (ret || cmd_q->cmd_error) {
> +		if (cmd_q->cmd_error) {
> +			/*
> +			 * Log the error and flush the queue by
> +			 * moving the head pointer
> +			 */

I don't think you wanted to remove the check for ret in the if
statement above.

>  			if (cmd_q->cmd_error)
>  				ccp_log_error(cmd_q->ccp,
>  					      cmd_q->cmd_error);
> -			/* A version 5 device doesn't use Job IDs... */
> -			if (!ret)
> -				ret = -EIO;
> +			iowrite32(tail, cmd_q->reg_head_lo);
> +			ret = -EIO;
>  		}

Hmmm... I think this block needs to be looked at some more.

>  		cmd_q->int_rcvd = 0;
>  	}
> @@ -381,6 +394,47 @@ static int ccp5_perform_sha(struct ccp_op *op)
>  	return ccp5_do_cmd(&desc, op->cmd_q);
>  }
>  
> +static int ccp5_perform_des3(struct ccp_op *op)
> +{
> +	struct ccp5_desc desc;
> +	union ccp_function function;
> +	u32 key_addr = op->sb_key * LSB_ITEM_SIZE;
> +
> +	/* Zero out all the fields of the command desc */
> +	memset(&desc, 0, sizeof(struct ccp5_desc));
> +
> +	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3;
> +
> +	CCP5_CMD_SOC(&desc) = op->soc;
> +	CCP5_CMD_IOC(&desc) = 1;
> +	CCP5_CMD_INIT(&desc) = op->init;
> +	CCP5_CMD_EOM(&desc) = op->eom;
> +	CCP5_CMD_PROT(&desc) = 0;
> +
> +	function.raw = 0;
> +	CCP_DES3_ENCRYPT(&function) = op->u.des3.action;
> +	CCP_DES3_MODE(&function) = op->u.des3.mode;
> +	CCP_DES3_TYPE(&function) = op->u.des3.type;
> +	CCP5_CMD_FUNCTION(&desc) = cpu_to_le32(function.raw);
> +
> +	CCP5_CMD_LEN(&desc) = cpu_to_le32(op->src.u.dma.length);
> +
> +	CCP5_CMD_SRC_LO(&desc) = cpu_to_le32(ccp_addr_lo(&op->src.u.dma));
> +	CCP5_CMD_SRC_HI(&desc) = cpu_to_le32(ccp_addr_hi(&op->src.u.dma));
> +	CCP5_CMD_SRC_MEM(&desc) = cpu_to_le32(CCP_MEMTYPE_SYSTEM);
> +
> +	CCP5_CMD_DST_LO(&desc) = cpu_to_le32(ccp_addr_lo(&op->dst.u.dma));
> +	CCP5_CMD_DST_HI(&desc) = cpu_to_le32(ccp_addr_hi(&op->dst.u.dma));
> +	CCP5_CMD_DST_MEM(&desc) = cpu_to_le32(CCP_MEMTYPE_SYSTEM);
> +
> +	CCP5_CMD_KEY_LO(&desc) = cpu_to_le32(lower_32_bits(key_addr));
> +	CCP5_CMD_KEY_HI(&desc) = 0;
> +	CCP5_CMD_KEY_MEM(&desc) = cpu_to_le32(CCP_MEMTYPE_SB);
> +	CCP5_CMD_LSB_ID(&desc) = cpu_to_le32(op->sb_ctx);
> +
> +	return ccp5_do_cmd(&desc, op->cmd_q);
> +}
> +
>  static int ccp5_perform_rsa(struct ccp_op *op)
>  {
>  	struct ccp5_desc desc;
> @@ -428,6 +482,7 @@ static int ccp5_perform_passthru(struct ccp_op *op)
>  	struct ccp_dma_info *saddr = &op->src.u.dma;
>  	struct ccp_dma_info *daddr = &op->dst.u.dma;
>  
> +

Extra blank line.

>  	memset(&desc, 0, Q_DESC_SIZE);
>  
>  	CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU;
> @@ -722,6 +777,7 @@ static int ccp5_init(struct ccp_device *ccp)
>  
>  		dev_dbg(dev, "queue #%u available\n", i);
>  	}
> +

Not needed for this patch.

>  	if (ccp->cmd_q_count == 0) {
>  		dev_notice(dev, "no command queues available\n");
>  		ret = -EIO;
> @@ -991,6 +1047,7 @@ static const struct ccp_actions ccp5_actions = {
>  	.aes = ccp5_perform_aes,
>  	.xts_aes = ccp5_perform_xts_aes,
>  	.sha = ccp5_perform_sha,
> +	.des3 = ccp5_perform_des3,
>  	.rsa = ccp5_perform_rsa,
>  	.passthru = ccp5_perform_passthru,
>  	.ecc = ccp5_perform_ecc,
> diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
> index a2214ac..12a92d5 100644
> --- a/drivers/crypto/ccp/ccp-dev.h
> +++ b/drivers/crypto/ccp/ccp-dev.h
> @@ -27,6 +27,10 @@
>  #include <linux/irqreturn.h>
>  #include <linux/dmaengine.h>
>  
> +#ifndef CCP_LOG_LEVEL
> +#define	CCP_LOG_LEVEL	KERN_INFO
> +#endif
> +

Not used anywhere that I can tell.

>  #define MAX_CCP_NAME_LEN		16
>  #define MAX_DMAPOOL_NAME_LEN		32
>  
> @@ -190,6 +194,9 @@
>  #define CCP_XTS_AES_KEY_SB_COUNT	1
>  #define CCP_XTS_AES_CTX_SB_COUNT	1
>  
> +#define CCP_DES3_KEY_SB_COUNT		1
> +#define CCP_DES3_CTX_SB_COUNT		1
> +
>  #define CCP_SHA_SB_COUNT		1
>  
>  #define CCP_RSA_MAX_WIDTH		4096
> @@ -475,6 +482,12 @@ struct ccp_xts_aes_op {
>  	enum ccp_xts_aes_unit_size unit_size;
>  };
>  
> +struct ccp_des3_op {
> +	enum ccp_des3_type type;
> +	enum ccp_des3_mode mode;
> +	enum ccp_des3_action action;
> +};
> +
>  struct ccp_sha_op {
>  	enum ccp_sha_type type;
>  	u64 msg_bits;
> @@ -512,6 +525,7 @@ struct ccp_op {
>  	union {
>  		struct ccp_aes_op aes;
>  		struct ccp_xts_aes_op xts;
> +		struct ccp_des3_op des3;
>  		struct ccp_sha_op sha;
>  		struct ccp_rsa_op rsa;
>  		struct ccp_passthru_op passthru;
> @@ -620,13 +634,13 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp);
>  struct ccp_actions {
>  	int (*aes)(struct ccp_op *);
>  	int (*xts_aes)(struct ccp_op *);
> +	int (*des3)(struct ccp_op *);
>  	int (*sha)(struct ccp_op *);
>  	int (*rsa)(struct ccp_op *);
>  	int (*passthru)(struct ccp_op *);
>  	int (*ecc)(struct ccp_op *);
>  	u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int);
> -	void (*sbfree)(struct ccp_cmd_queue *, unsigned int,
> -			       unsigned int);
> +	void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int);
>  	unsigned int (*get_free_slots)(struct ccp_cmd_queue *);
>  	int (*init)(struct ccp_device *);
>  	void (*destroy)(struct ccp_device *);
> diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
> index de28867..f9543f7 100644
> --- a/drivers/crypto/ccp/ccp-ops.c
> +++ b/drivers/crypto/ccp/ccp-ops.c
> @@ -16,6 +16,7 @@
>  #include <linux/pci.h>
>  #include <linux/interrupt.h>
>  #include <crypto/scatterwalk.h>
> +#include <crypto/des.h>
>  #include <linux/ccp.h>
>  #include <linux/delay.h>
>  
> @@ -882,8 +883,7 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  		return -EINVAL;
>  
>  	if (((aes->mode == CCP_AES_MODE_ECB) ||
> -	     (aes->mode == CCP_AES_MODE_CBC) ||
> -	     (aes->mode == CCP_AES_MODE_CFB)) &&
> +	     (aes->mode == CCP_AES_MODE_CBC)) &&

Why are you removing AES modes?

>  	    (aes->src_len & (AES_BLOCK_SIZE - 1)))
>  		return -EINVAL;
>  
> @@ -1194,6 +1194,200 @@ e_key:
>  	return ret;
>  }
>  
> +static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
> +{
> +	struct ccp_des3_engine *des3 = &cmd->u.des3;
> +
> +	struct ccp_dm_workarea key, ctx;
> +	struct ccp_data src, dst;
> +	struct ccp_op op;
> +	unsigned int dm_offset;
> +	unsigned int len_singlekey;
> +	bool in_place = false;
> +	int ret;
> +
> +	/* Error checks */
> +	if (!cmd_q->ccp->vdata->perform->des3)
> +		return -EINVAL;
> +
> +	if (des3->key_len != DES3_EDE_KEY_SIZE)
> +		return -EINVAL;
> +
> +	if (((des3->mode == CCP_DES3_MODE_ECB) ||
> +		(des3->mode == CCP_DES3_MODE_CBC)) &&
> +		(des3->src_len & (DES3_EDE_BLOCK_SIZE - 1)))

These need to be lined up with the first if condition.

> +		return -EINVAL;
> +
> +	if (!des3->key || !des3->src || !des3->dst)
> +		return -EINVAL;
> +
> +	if (des3->mode != CCP_DES3_MODE_ECB) {
> +		if (des3->iv_len != DES3_EDE_BLOCK_SIZE)
> +			return -EINVAL;
> +
> +		if (!des3->iv)
> +			return -EINVAL;
> +	}
> +
> +	ret = -EIO;

Not needed since it will be immediately overwritten in the operation
below.

> +	/* Zero out all the fields of the command desc */
> +	memset(&op, 0, sizeof(op));
> +
> +	/* Set up the Function field */
> +	op.cmd_q = cmd_q;
> +	op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
> +	op.sb_key = cmd_q->sb_key;
> +
> +	op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1;
> +	op.u.des3.type = des3->type;
> +	op.u.des3.mode = des3->mode;
> +	op.u.des3.action = des3->action;
> +
> +	/*
> +	 * All supported key sizes fit in a single (32-byte) KSB entry and
> +	 * (like AES) must be in little endian format. Use the 256-bit byte
> +	 * swap passthru option to convert from big endian to little endian.
> +	 */
> +	ret = ccp_init_dm_workarea(&key, cmd_q,
> +				   CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES,
> +				   DMA_TO_DEVICE);
> +	if (ret)
> +		return ret;
> +
> +	/*
> +	 * The contents of the key triplet are in the reverse order of what
> +	 * is required by the engine. Copy the 3 pieces individually to put
> +	 * them where they belong.
> +	 */
> +	dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
> +
> +	len_singlekey = des3->key_len / 3;
> +	ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
> +			des3->key, 0, len_singlekey);
> +	ccp_set_dm_area(&key, dm_offset + len_singlekey,
> +			des3->key, len_singlekey, len_singlekey);
> +	ccp_set_dm_area(&key, dm_offset,
> +			des3->key, 2 * len_singlekey, len_singlekey);
> +
> +	/* Copy the key to the SB */
> +	ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
> +			     CCP_PASSTHRU_BYTESWAP_256BIT);
> +	if (ret) {
> +		cmd->engine_error = cmd_q->cmd_error;
> +		goto e_key;
> +	}
> +
> +	/*
> +	 * The DES3 context fits in a single (32-byte) KSB entry and
> +	 * must be in little endian format. Use the 256-bit byte swap
> +	 * passthru option to convert from big endian to little endian.
> +	 */
> +	if (des3->mode != CCP_DES3_MODE_ECB) {
> +		u32 load_mode;
> +
> +		op.sb_ctx = cmd_q->sb_ctx;
> +
> +		ret = ccp_init_dm_workarea(&ctx, cmd_q,
> +					   CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES,
> +					   DMA_BIDIRECTIONAL);
> +		if (ret)
> +			goto e_key;
> +
> +		/* Load the context into the LSB */
> +		dm_offset = CCP_SB_BYTES - des3->iv_len;
> +		ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
> +
> +		if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
> +			load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
> +		else
> +			load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
> +		ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
> +				     load_mode);
> +		if (ret) {
> +			cmd->engine_error = cmd_q->cmd_error;
> +			goto e_ctx;
> +		}
> +	}
> +
> +	/*
> +	 * Prepare the input and output data workareas. For in-place
> +	 * operations we need to set the dma direction to BIDIRECTIONAL
> +	 * and copy the src workarea to the dst workarea.
> +	 */
> +	if (sg_virt(des3->src) == sg_virt(des3->dst))
> +		in_place = true;
> +
> +	ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len,
> +			DES3_EDE_BLOCK_SIZE,
> +			in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
> +	if (ret)
> +		goto e_ctx;
> +
> +	if (in_place)
> +		dst = src;
> +	else {
> +		ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len,
> +				DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE);
> +		if (ret)
> +			goto e_src;
> +	}
> +
> +	/* Send data to the CCP DES3 engine */
> +	while (src.sg_wa.bytes_left) {
> +		ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true);
> +		if (!src.sg_wa.bytes_left) {
> +			op.eom = 1;
> +
> +			/* Since we don't retrieve the context in ECB mode
> +			 * we have to wait for the operation to complete
> +			 * on the last piece of data
> +			 */
> +			op.soc = 0;
> +		}
> +
> +		ret = cmd_q->ccp->vdata->perform->des3(&op);
> +		if (ret) {
> +			cmd->engine_error = cmd_q->cmd_error;
> +			goto e_dst;
> +		}
> +
> +		ccp_process_data(&src, &dst, &op);
> +	}
> +
> +	if (des3->mode != CCP_DES3_MODE_ECB) {
> +		/* Retrieve the context and make BE */
> +		ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
> +				       CCP_PASSTHRU_BYTESWAP_256BIT);
> +		if (ret) {
> +			cmd->engine_error = cmd_q->cmd_error;
> +			goto e_dst;
> +		}
> +
> +		/* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
> +		if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))

V3 doesn't support des3 and has a perform_des3 of NULL so can never get
here.  Just use an offset of 0 in the ccp_get_dm_area() call.

Thanks,
Tom

> +			dm_offset = CCP_SB_BYTES - des3->iv_len;
> +		else
> +			dm_offset = 0;
> +		ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
> +				DES3_EDE_BLOCK_SIZE);
> +	}
> +e_dst:
> +	if (!in_place)
> +		ccp_free_data(&dst, cmd_q);
> +
> +e_src:
> +	ccp_free_data(&src, cmd_q);
> +
> +e_ctx:
> +	if (des3->mode != CCP_DES3_MODE_ECB)
> +		ccp_dm_free(&ctx);
> +
> +e_key:
> +	ccp_dm_free(&key);
> +
> +	return ret;
> +}
> +
>  static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  {
>  	struct ccp_sha_engine *sha = &cmd->u.sha;
> @@ -2190,6 +2384,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
>  	case CCP_ENGINE_XTS_AES_128:
>  		ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
>  		break;
> +	case CCP_ENGINE_DES3:
> +		ret = ccp_run_des3_cmd(cmd_q, cmd);
> +		break;
>  	case CCP_ENGINE_SHA:
>  		ret = ccp_run_sha_cmd(cmd_q, cmd);
>  		break;
> diff --git a/drivers/crypto/ccp/ccp-pci.c b/drivers/crypto/ccp/ccp-pci.c
> index 28a9996..e9bdf6f 100644
> --- a/drivers/crypto/ccp/ccp-pci.c
> +++ b/drivers/crypto/ccp/ccp-pci.c
> @@ -230,9 +230,11 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
>  
>  	dev_set_drvdata(dev, ccp);
>  
> +	/* Instance-specific required setup */
>  	if (ccp->vdata->setup)
>  		ccp->vdata->setup(ccp);
>  
> +	/* Initialize the CCP device */
>  	ret = ccp->vdata->perform->init(ccp);
>  	if (ret)
>  		goto e_iomap;
> diff --git a/include/linux/ccp.h b/include/linux/ccp.h
> index f90f8ba..e7acc37 100644
> --- a/include/linux/ccp.h
> +++ b/include/linux/ccp.h
> @@ -303,6 +303,60 @@ struct ccp_sha_engine {
>  				 * final sha cmd */
>  };
>  
> +/***** 3DES engine *****/
> +enum ccp_des3_mode {
> +	CCP_DES3_MODE_ECB = 0,
> +	CCP_DES3_MODE_CBC,
> +	CCP_DES3_MODE_CFB,
> +	CCP_DES3_MODE__LAST,
> +};
> +
> +enum ccp_des3_type {
> +	CCP_DES3_TYPE_168 = 1,
> +	CCP_DES3_TYPE__LAST,
> +	};
> +
> +enum ccp_des3_action {
> +	CCP_DES3_ACTION_DECRYPT = 0,
> +	CCP_DES3_ACTION_ENCRYPT,
> +	CCP_DES3_ACTION__LAST,
> +};
> +
> +/**
> + * struct ccp_des3_engine - CCP SHA operation
> + * @type: Type of 3DES operation
> + * @mode: cipher mode
> + * @action: 3DES operation (decrypt/encrypt)
> + * @key: key to be used for this 3DES operation
> + * @key_len: length of key (in bytes)
> + * @iv: IV to be used for this AES operation
> + * @iv_len: length in bytes of iv
> + * @src: input data to be used for this operation
> + * @src_len: length of input data used for this operation (in bytes)
> + * @dst: output data produced by this operation
> + *
> + * Variables required to be set when calling ccp_enqueue_cmd():
> + *   - type, mode, action, key, key_len, src, dst, src_len
> + *   - iv, iv_len for any mode other than ECB
> + *
> + * The iv variable is used as both input and output. On completion of the
> + * 3DES operation the new IV overwrites the old IV.
> + */
> +struct ccp_des3_engine {
> +	enum ccp_des3_type type;
> +	enum ccp_des3_mode mode;
> +	enum ccp_des3_action action;
> +
> +	struct scatterlist *key;
> +	u32 key_len;	    /* In bytes */
> +
> +	struct scatterlist *iv;
> +	u32 iv_len;	     /* In bytes */
> +
> +	struct scatterlist *src, *dst;
> +	u64 src_len;	    /* In bytes */
> +};
> +
>  /**
>   * ccp_rsa_type - mode of RSA operation
>   *
> @@ -583,7 +637,7 @@ struct ccp_ecc_engine {
>  enum ccp_engine {
>  	CCP_ENGINE_AES = 0,
>  	CCP_ENGINE_XTS_AES_128,
> -	CCP_ENGINE_RSVD1,
> +	CCP_ENGINE_DES3,
>  	CCP_ENGINE_SHA,
>  	CCP_ENGINE_RSA,
>  	CCP_ENGINE_PASSTHRU,
> @@ -631,6 +685,7 @@ struct ccp_cmd {
>  	union {
>  		struct ccp_aes_engine aes;
>  		struct ccp_xts_aes_engine xts;
> +		struct ccp_des3_engine des3;
>  		struct ccp_sha_engine sha;
>  		struct ccp_rsa_engine rsa;
>  		struct ccp_passthru_engine passthru;
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2016-10-14  3:35 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-10-13 14:52 [PATCH 0/6] Enable hashing and ciphers for v5 CCP Gary R Hook
2016-10-13 14:52 ` [PATCH 1/6] crypto: ccp - Add SHA-2 support Gary R Hook
2016-10-13 19:35   ` Tom Lendacky
2016-10-13 14:53 ` [PATCH 2/6] crypto: ccp - Remove unneeded sign-extension support Gary R Hook
2016-10-13 19:57   ` Tom Lendacky
2016-10-13 14:53 ` [PATCH 3/6] crypto: ccp - Add support for RSA on the CCP Gary R Hook
2016-10-13 18:25   ` Stephan Mueller
2016-10-13 20:08     ` Gary R Hook
2016-10-13 20:14       ` Stephan Mueller
2016-10-13 21:06   ` Tom Lendacky
2016-10-13 14:53 ` [PATCH 4/6] crypto: ccp - Add RSA support for a v5 ccp Gary R Hook
2016-10-13 21:23   ` Tom Lendacky
2016-10-13 14:53 ` [PATCH 5/6] crypto: ccp - Enable support for AES GCM on v5 CCPs Gary R Hook
2016-10-13 21:54   ` Tom Lendacky
2016-10-13 14:53 ` [PATCH 6/6] crypto: ccp - Enable 3DES function " Gary R Hook
2016-10-13 22:13   ` Tom Lendacky

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.