All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] crypto: caam - add support for ctr(aes)
@ 2014-10-31 10:45 Catalin Vasile
  2014-10-31 10:45 ` [PATCH 2/4] crypto: caam - add support for rfc3686(ctr(aes)) Catalin Vasile
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Catalin Vasile @ 2014-10-31 10:45 UTC (permalink / raw)
  To: linux-crypto; +Cc: herbert

Add support for AES working in Counter Mode

Signed-off-by: Catalin Vasile <catalin.vasile@freescale.com>
---
 drivers/crypto/caam/caamalg.c     | 40 ++++++++++++++++++++++++++++++++++-----
 drivers/crypto/caam/desc_constr.h |  2 ++
 2 files changed, 37 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index c17154c..859ebe8 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1301,11 +1301,21 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	int ret = 0;
 	u32 *key_jump_cmd;
 	u32 *desc;
+	u32 ctx1_iv_off = 0;
+	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 #endif
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
 
 	memcpy(ctx->key, key, keylen);
 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
@@ -1331,8 +1341,8 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* Load iv */
-	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_1_CCB | tfm->ivsize);
+	append_seq_load(desc, tfm->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
 
 	/* Load operation */
 	append_operation(desc, ctx->class1_alg_type |
@@ -1370,11 +1380,15 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* load IV */
-	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_1_CCB | tfm->ivsize);
+	append_seq_load(desc, tfm->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
 
 	/* Choose operation */
-	append_dec_op1(desc, ctx->class1_alg_type);
+	if (ctr_mode)
+		append_operation(desc, ctx->class1_alg_type |
+				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+	else
+		append_dec_op1(desc, ctx->class1_alg_type);
 
 	/* Perform operation */
 	ablkcipher_append_src_dst(desc);
@@ -3076,6 +3090,22 @@ static struct caam_alg_template driver_algs[] = {
 			.ivsize = DES_BLOCK_SIZE,
 			},
 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "chainiv",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
 	}
 };
 
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 7eec20b..9f79fd7 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -192,6 +192,8 @@ static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
 	PRINT_POS; \
 	append_cmd(desc, CMD_##op | len | options); \
 }
+
+APPEND_CMD_LEN(seq_load, SEQ_LOAD)
 APPEND_CMD_LEN(seq_store, SEQ_STORE)
 APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
 APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 2/4] crypto: caam - add support for rfc3686(ctr(aes))
  2014-10-31 10:45 [PATCH 1/4] crypto: caam - add support for ctr(aes) Catalin Vasile
@ 2014-10-31 10:45 ` Catalin Vasile
  2014-10-31 10:45 ` [PATCH 3/4] crypto: caam - add support for rfc3686 with authenc md5, sha1 and sha2 Catalin Vasile
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Catalin Vasile @ 2014-10-31 10:45 UTC (permalink / raw)
  To: linux-crypto; +Cc: herbert

Add support for Advanced Encryption Standard (AES) in Counter Mode (CTR)
as provided in IPsec implementation standard RFC3686.

ablkcipher shared descriptors now save context registers after job
execution. This is used to load Nonce specific to RFC3686 only at
first execution of shared job descriptor.

Signed-off-by: Catalin Vasile <catalin.vasile@freescale.com>
---
 drivers/crypto/caam/caamalg.c | 83 ++++++++++++++++++++++++++++++++++++++++---
 drivers/crypto/caam/compat.h  |  1 +
 2 files changed, 79 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 859ebe8..2909efc 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1296,14 +1296,19 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 			     const u8 *key, unsigned int keylen)
 {
 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-	struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
+	struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
 	struct device *jrdev = ctx->jrdev;
 	int ret = 0;
 	u32 *key_jump_cmd;
 	u32 *desc;
+	u32 *nonce;
 	u32 ctx1_iv_off = 0;
 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = (ctr_mode &&
+				 (strstr(alg_name, "rfc3686") != NULL));
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
@@ -1317,6 +1322,16 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	if (ctr_mode)
 		ctx1_iv_off = 16;
 
+	/*
+	 * RFC3686 specific:
+	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 *	| *key = {KEY, NONCE}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		keylen -= CTR_RFC3686_NONCE_SIZE;
+	}
+
 	memcpy(ctx->key, key, keylen);
 	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
 				      DMA_TO_DEVICE);
@@ -1328,7 +1343,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 
 	/* ablkcipher_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
 	/* Skip if already shared */
 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 				   JUMP_COND_SHRD);
@@ -1338,12 +1353,32 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 			  ctx->enckeylen, CLASS_1 |
 			  KEY_DEST_CLASS_REG);
 
+	/* Load nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		nonce = (u32 *)(key + keylen);
+		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP |
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* Load iv */
-	append_seq_load(desc, tfm->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
 
+	/* Load counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
+
 	/* Load operation */
 	append_operation(desc, ctx->class1_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
@@ -1367,7 +1402,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	/* ablkcipher_decrypt shared descriptor */
 	desc = ctx->sh_desc_dec;
 
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
 	/* Skip if already shared */
 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 				   JUMP_COND_SHRD);
@@ -1377,12 +1412,32 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 			  ctx->enckeylen, CLASS_1 |
 			  KEY_DEST_CLASS_REG);
 
+	/* Load nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		nonce = (u32 *)(key + keylen);
+		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP |
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* load IV */
-	append_seq_load(desc, tfm->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
 			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
 
+	/* Load counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
+
 	/* Choose operation */
 	if (ctr_mode)
 		append_operation(desc, ctx->class1_alg_type |
@@ -3106,6 +3161,24 @@ static struct caam_alg_template driver_algs[] = {
 			.ivsize = AES_BLOCK_SIZE,
 			},
 		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "rfc3686(ctr(aes))",
+		.driver_name = "rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "seqiv",
+			.min_keysize = AES_MIN_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
 	}
 };
 
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index f227922..acd7743 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -28,6 +28,7 @@
 #include <crypto/algapi.h>
 #include <crypto/null.h>
 #include <crypto/aes.h>
+#include <crypto/ctr.h>
 #include <crypto/des.h>
 #include <crypto/sha.h>
 #include <crypto/md5.h>
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 3/4] crypto: caam - add support for rfc3686 with authenc md5, sha1 and sha2
  2014-10-31 10:45 [PATCH 1/4] crypto: caam - add support for ctr(aes) Catalin Vasile
  2014-10-31 10:45 ` [PATCH 2/4] crypto: caam - add support for rfc3686(ctr(aes)) Catalin Vasile
@ 2014-10-31 10:45 ` Catalin Vasile
  2014-10-31 10:45 ` [PATCH 4/4] crypto: caam - add support for givencrypt cbc(aes) and rfc3686(ctr(aes)) Catalin Vasile
  2014-11-06 15:17 ` [PATCH 1/4] crypto: caam - add support for ctr(aes) Herbert Xu
  3 siblings, 0 replies; 5+ messages in thread
From: Catalin Vasile @ 2014-10-31 10:45 UTC (permalink / raw)
  To: linux-crypto; +Cc: herbert

Add support for AES Counter Mode (CTR) compliant with RFC3686 to be
used along with authenc algorithms (md5, sha1, sha224, sha256, sha384,
sha512) as one-shot aead algorithms.

Signed-off-by: Catalin Vasile <catalin.vasile@freescale.com>
---
 drivers/crypto/caam/caamalg.c | 262 +++++++++++++++++++++++++++++++++++++-----
 1 file changed, 236 insertions(+), 26 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2909efc..cdcc6cb 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -60,6 +60,7 @@
 #define CAAM_CRA_PRIORITY		3000
 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
+					 CTR_RFC3686_NONCE_SIZE + \
 					 SHA512_DIGEST_SIZE * 2)
 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 #define CAAM_MAX_IV_LENGTH		16
@@ -70,6 +71,9 @@
 #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
 #define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
 
+/* Note: Nonce is counted in enckeylen */
+#define DESC_AEAD_CTR_RFC3686_LEN	(6 * CAAM_CMD_SZ)
+
 #define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
 #define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
 #define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
@@ -137,11 +141,13 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
 /*
  * For aead encrypt and decrypt, read iv for both classes
  */
-static inline void aead_append_ld_iv(u32 *desc, int ivsize)
+static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
 {
-	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_1_CCB | ivsize);
-	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+	append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+			LDST_SRCDST_BYTE_CONTEXT |
+			(ivoffset << LDST_OFFSET_SHIFT));
+	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+		    (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
 }
 
 /*
@@ -187,35 +193,60 @@ struct caam_ctx {
 };
 
 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
-			    int keys_fit_inline)
+			    int keys_fit_inline, bool is_rfc3686)
 {
+	u32 *nonce;
+	unsigned int enckeylen = ctx->enckeylen;
+
+	/*
+	 * RFC3686 specific:
+	 *	| ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
+	 *	| enckeylen = encryption key size + nonce size
+	 */
+	if (is_rfc3686)
+		enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
 	if (keys_fit_inline) {
 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
 				  ctx->split_key_len, CLASS_2 |
 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
 		append_key_as_imm(desc, (void *)ctx->key +
-				  ctx->split_key_pad_len, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+				  ctx->split_key_pad_len, enckeylen,
+				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
 	} else {
 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
 		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
-			   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+			   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	}
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686) {
+		nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
+			       enckeylen);
+		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc,
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
 	}
 }
 
 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
-				  int keys_fit_inline)
+				  int keys_fit_inline, bool is_rfc3686)
 {
 	u32 *key_jump_cmd;
 
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
+	/* Note: Context registers are saved. */
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
 
 	/* Skip if already shared */
 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 				   JUMP_COND_SHRD);
 
-	append_key_aead(desc, ctx, keys_fit_inline);
+	append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 	set_jump_tgt_here(desc, key_jump_cmd);
 }
@@ -415,10 +446,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 {
 	struct aead_tfm *tfm = &aead->base.crt_aead;
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
+	const char *alg_name = crypto_tfm_alg_name(ctfm);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline = false;
+	bool keys_fit_inline;
 	u32 geniv, moveiv;
+	u32 ctx1_iv_off = 0;
 	u32 *desc;
+	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = (ctr_mode &&
+				 (strstr(alg_name, "rfc3686") != NULL));
 
 	if (!ctx->authsize)
 		return 0;
@@ -428,18 +466,36 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 		return aead_null_set_sh_desc(aead);
 
 	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 */
+	if (is_rfc3686)
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+
+	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
+	keys_fit_inline = false;
 	if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen <=
+	    ctx->split_key_pad_len + ctx->enckeylen +
+	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 
 	/* aead_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
 
-	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 	/* Class 2 operation */
 	append_operation(desc, ctx->class2_alg_type |
@@ -457,7 +513,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	/* read assoc before reading payload */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 			     KEY_VLF);
-	aead_append_ld_iv(desc, tfm->ivsize);
+	aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
 
 	/* Class 1 operation */
 	append_operation(desc, ctx->class1_alg_type |
@@ -491,14 +555,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	 */
 	keys_fit_inline = false;
 	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen <=
+	    ctx->split_key_pad_len + ctx->enckeylen +
+	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 
 	/* aead_decrypt shared descriptor */
 	desc = ctx->sh_desc_dec;
 
-	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 	/* Class 2 operation */
 	append_operation(desc, ctx->class2_alg_type |
@@ -515,9 +581,22 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 			     KEY_VLF);
 
-	aead_append_ld_iv(desc, tfm->ivsize);
+	aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
 
-	append_dec_op1(desc, ctx->class1_alg_type);
+	/* Choose operation */
+	if (ctr_mode)
+		append_operation(desc, ctx->class1_alg_type |
+				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+	else
+		append_dec_op1(desc, ctx->class1_alg_type);
 
 	/* Read and write cryptlen bytes */
 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
@@ -547,14 +626,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	 */
 	keys_fit_inline = false;
 	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen <=
+	    ctx->split_key_pad_len + ctx->enckeylen +
+	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 
 	/* aead_givencrypt shared descriptor */
 	desc = ctx->sh_desc_givenc;
 
-	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 	/* Generate IV */
 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
@@ -563,13 +644,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
-	append_move(desc, MOVE_SRC_INFIFO |
-		    MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
+	append_move(desc, MOVE_WAITCOMP |
+		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+		    (tfm->ivsize << MOVE_LEN_SHIFT));
 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
 
 	/* Copy IV to class 1 context */
-	append_move(desc, MOVE_SRC_CLASS1CTX |
-		    MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
+	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+		    (tfm->ivsize << MOVE_LEN_SHIFT));
 
 	/* Return to encryption */
 	append_operation(desc, ctx->class2_alg_type |
@@ -585,7 +669,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 			     KEY_VLF);
 
-	/* Copy iv from class 1 ctx to class 2 fifo*/
+	/* Copy iv from outfifo to class 2 fifo */
 	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
 		 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
 	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
@@ -593,6 +677,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
 			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
 
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
+
 	/* Class 1 operation */
 	append_operation(desc, ctx->class1_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
@@ -3063,6 +3155,124 @@ static struct caam_alg_template driver_algs[] = {
 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 	},
 	{
+		.name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+	},
+	{
 		.name = "rfc4106(gcm(aes))",
 		.driver_name = "rfc4106-gcm-aes-caam",
 		.blocksize = 1,
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH 4/4] crypto: caam - add support for givencrypt cbc(aes) and rfc3686(ctr(aes))
  2014-10-31 10:45 [PATCH 1/4] crypto: caam - add support for ctr(aes) Catalin Vasile
  2014-10-31 10:45 ` [PATCH 2/4] crypto: caam - add support for rfc3686(ctr(aes)) Catalin Vasile
  2014-10-31 10:45 ` [PATCH 3/4] crypto: caam - add support for rfc3686 with authenc md5, sha1 and sha2 Catalin Vasile
@ 2014-10-31 10:45 ` Catalin Vasile
  2014-11-06 15:17 ` [PATCH 1/4] crypto: caam - add support for ctr(aes) Herbert Xu
  3 siblings, 0 replies; 5+ messages in thread
From: Catalin Vasile @ 2014-10-31 10:45 UTC (permalink / raw)
  To: linux-crypto; +Cc: herbert

Add support for one-shot givencrypt algorithms.
Givencrypt algorithms will generate their IV and encrypt data
within the same shared job descriptors.
Current algorithms merged from ablkcipher to givencrypt are:
 - AES Cipher Block Chaining (CBC)
 - AES Counter Mode (CTR) compliant with RFC3686

Signed-off-by: Catalin Vasile <catalin.vasile@freescale.com>
---
 drivers/crypto/caam/caamalg.c | 285 +++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 281 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index cdcc6cb..3e0492c 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1396,6 +1396,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	u32 *key_jump_cmd;
 	u32 *desc;
 	u32 *nonce;
+	u32 geniv;
 	u32 ctx1_iv_off = 0;
 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
@@ -1554,6 +1555,83 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 		       desc_bytes(desc), 1);
 #endif
+	/* ablkcipher_givencrypt shared descriptor */
+	desc = ctx->sh_desc_givenc;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+			  ctx->enckeylen, CLASS_1 |
+			  KEY_DEST_CLASS_REG);
+
+	/* Load Nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		nonce = (u32 *)(key + keylen);
+		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP |
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+		NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_WAITCOMP |
+		    MOVE_SRC_INFIFO |
+		    MOVE_DEST_CLASS1CTX |
+		    (crt->ivsize << MOVE_LEN_SHIFT) |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Copy generated IV to memory */
+	append_seq_store(desc, crt->ivsize,
+			 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			 (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, (u32)1, LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
+
+	if (ctx1_iv_off)
+		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+			    (1 << JUMP_OFFSET_SHIFT));
+
+	/* Load operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+						 desc_bytes(desc),
+						 DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
 
 	return ret;
 }
@@ -2041,6 +2119,54 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
 }
 
 /*
+ * Fill in ablkcipher givencrypt job descriptor
+ */
+static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+				    struct ablkcipher_edesc *edesc,
+				    struct ablkcipher_request *req,
+				    bool iv_contig)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 *desc = edesc->hw_desc;
+	u32 out_options, in_options;
+	dma_addr_t dst_dma, src_dma;
+	int len, sec4_sg_index = 0;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       ivsize, 1);
+	print_hex_dump(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+		       edesc->src_nents ? 100 : req->nbytes, 1);
+#endif
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	if (!edesc->src_nents) {
+		src_dma = sg_dma_address(req->src);
+		in_options = 0;
+	} else {
+		src_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+		in_options = LDST_SGF;
+	}
+	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+
+	if (iv_contig) {
+		dst_dma = edesc->iv_dma;
+		out_options = 0;
+	} else {
+		dst_dma = edesc->sec4_sg_dma +
+			  sec4_sg_index * sizeof(struct sec4_sg_entry);
+		out_options = LDST_SGF;
+	}
+	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
+}
+
+/*
  * allocate and map the aead extended descriptor
  */
 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
@@ -2659,6 +2785,151 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
 	return ret;
 }
 
+/*
+ * allocate and map the ablkcipher extended descriptor
+ * for ablkcipher givencrypt
+ */
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+				struct skcipher_givcrypt_request *greq,
+				int desc_bytes,
+				bool *iv_contig_out)
+{
+	struct ablkcipher_request *req = &greq->creq;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, dst_nents = 0, sec4_sg_bytes;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma = 0;
+	bool iv_contig = false;
+	int sgc;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	bool src_chained = false, dst_chained = false;
+	int sec4_sg_index;
+
+	src_nents = sg_count(req->src, req->nbytes, &src_chained);
+
+	if (unlikely(req->dst != req->src))
+		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+
+	if (likely(req->src == req->dst)) {
+		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+					 DMA_BIDIRECTIONAL, src_chained);
+	} else {
+		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+					 DMA_TO_DEVICE, src_chained);
+		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+					 DMA_FROM_DEVICE, dst_chained);
+	}
+
+	/*
+	 * Check if iv can be contiguous with source and destination.
+	 * If so, include it. If not, create scatterlist.
+	 */
+	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, iv_dma)) {
+		dev_err(jrdev, "unable to map IV\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
+		iv_contig = true;
+	else
+		dst_nents = dst_nents ? : 1;
+	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+			sizeof(struct sec4_sg_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = kmalloc(sizeof(*edesc) + desc_bytes +
+			sec4_sg_bytes, GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(jrdev, "could not allocate extended descriptor\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->src_chained = src_chained;
+	edesc->dst_nents = dst_nents;
+	edesc->dst_chained = dst_chained;
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+			 desc_bytes;
+
+	sec4_sg_index = 0;
+	if (src_nents) {
+		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+		sec4_sg_index += src_nents;
+	}
+
+	if (!iv_contig) {
+		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+				   iv_dma, ivsize, 0);
+		sec4_sg_index += 1;
+		sg_to_sec4_sg_last(req->dst, dst_nents,
+				   edesc->sec4_sg + sec4_sg_index, 0);
+	}
+
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+		       sec4_sg_bytes, 1);
+#endif
+
+	*iv_contig_out = iv_contig;
+	return edesc;
+}
+
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+{
+	struct ablkcipher_request *req = &creq->creq;
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	bool iv_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
+				       CAAM_CMD_SZ, &iv_contig);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+				edesc, req, iv_contig);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
 #define template_aead		template_u.aead
 #define template_ablkcipher	template_u.ablkcipher
 struct caam_alg_template {
@@ -3312,12 +3583,13 @@ static struct caam_alg_template driver_algs[] = {
 		.name = "cbc(aes)",
 		.driver_name = "cbc-aes-caam",
 		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
 		.template_ablkcipher = {
 			.setkey = ablkcipher_setkey,
 			.encrypt = ablkcipher_encrypt,
 			.decrypt = ablkcipher_decrypt,
-			.geniv = "eseqiv",
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -3376,12 +3648,13 @@ static struct caam_alg_template driver_algs[] = {
 		.name = "rfc3686(ctr(aes))",
 		.driver_name = "rfc3686-ctr-aes-caam",
 		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
 		.template_ablkcipher = {
 			.setkey = ablkcipher_setkey,
 			.encrypt = ablkcipher_encrypt,
 			.decrypt = ablkcipher_decrypt,
-			.geniv = "seqiv",
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
 			.min_keysize = AES_MIN_KEY_SIZE +
 				       CTR_RFC3686_NONCE_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE +
@@ -3489,6 +3762,10 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
 			 template->type;
 	switch (template->type) {
+	case CRYPTO_ALG_TYPE_GIVCIPHER:
+		alg->cra_type = &crypto_givcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
 		alg->cra_type = &crypto_ablkcipher_type;
 		alg->cra_ablkcipher = template->template_ablkcipher;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH 1/4] crypto: caam - add support for ctr(aes)
  2014-10-31 10:45 [PATCH 1/4] crypto: caam - add support for ctr(aes) Catalin Vasile
                   ` (2 preceding siblings ...)
  2014-10-31 10:45 ` [PATCH 4/4] crypto: caam - add support for givencrypt cbc(aes) and rfc3686(ctr(aes)) Catalin Vasile
@ 2014-11-06 15:17 ` Herbert Xu
  3 siblings, 0 replies; 5+ messages in thread
From: Herbert Xu @ 2014-11-06 15:17 UTC (permalink / raw)
  To: Catalin Vasile; +Cc: linux-crypto

On Fri, Oct 31, 2014 at 12:45:35PM +0200, Catalin Vasile wrote:
> Add support for AES working in Counter Mode
> 
> Signed-off-by: Catalin Vasile <catalin.vasile@freescale.com>

All applied.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2014-11-06 15:17 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-10-31 10:45 [PATCH 1/4] crypto: caam - add support for ctr(aes) Catalin Vasile
2014-10-31 10:45 ` [PATCH 2/4] crypto: caam - add support for rfc3686(ctr(aes)) Catalin Vasile
2014-10-31 10:45 ` [PATCH 3/4] crypto: caam - add support for rfc3686 with authenc md5, sha1 and sha2 Catalin Vasile
2014-10-31 10:45 ` [PATCH 4/4] crypto: caam - add support for givencrypt cbc(aes) and rfc3686(ctr(aes)) Catalin Vasile
2014-11-06 15:17 ` [PATCH 1/4] crypto: caam - add support for ctr(aes) Herbert Xu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.