All of lore.kernel.org
 help / color / mirror / Atom feed
From: Gilad Ben-Yossef <gilad@benyossef.com>
To: Herbert Xu <herbert@gondor.apana.org.au>,
	"David S. Miller" <davem@davemloft.net>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: linux-crypto@vger.kernel.org, devel@driverdev.osuosl.org,
	linux-kernel@vger.kernel.org, Ofir Drang <ofir.drang@arm.com>
Subject: [PATCH 3/7] crypto: ccree: add ablkcipher support
Date: Thu, 11 Jan 2018 09:17:10 +0000	[thread overview]
Message-ID: <1515662239-1714-4-git-send-email-gilad@benyossef.com> (raw)
In-Reply-To: <1515662239-1714-1-git-send-email-gilad@benyossef.com>

Add CryptoCell ablkcipher support

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
 drivers/crypto/ccree/Makefile        |    2 +-
 drivers/crypto/ccree/cc_buffer_mgr.c |  125 ++++
 drivers/crypto/ccree/cc_buffer_mgr.h |   10 +
 drivers/crypto/ccree/cc_cipher.c     | 1167 ++++++++++++++++++++++++++++++++++
 drivers/crypto/ccree/cc_cipher.h     |   74 +++
 drivers/crypto/ccree/cc_driver.c     |   11 +
 drivers/crypto/ccree/cc_driver.h     |    2 +
 7 files changed, 1390 insertions(+), 1 deletion(-)
 create mode 100644 drivers/crypto/ccree/cc_cipher.c
 create mode 100644 drivers/crypto/ccree/cc_cipher.h

diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index 6b204ab..a7fecad 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
-ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_ivgen.o cc_sram_mgr.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_ivgen.o cc_sram_mgr.o
 ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
 ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 4c67579..510993e 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -8,6 +8,7 @@
 
 #include "cc_buffer_mgr.h"
 #include "cc_lli_defs.h"
+#include "cc_cipher.h"
 
 enum dma_buffer_type {
 	DMA_NULL_TYPE = -1,
@@ -347,6 +348,130 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 	return 0;
 }
 
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+				unsigned int ivsize, struct scatterlist *src,
+				struct scatterlist *dst)
+{
+	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
+
+	if (req_ctx->gen_ctx.iv_dma_addr) {
+		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
+			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
+		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
+				 ivsize,
+				 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
+				 DMA_TO_DEVICE);
+	}
+	/* Release pool */
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
+	    req_ctx->mlli_params.mlli_virt_addr) {
+		dma_pool_free(req_ctx->mlli_params.curr_pool,
+			      req_ctx->mlli_params.mlli_virt_addr,
+			      req_ctx->mlli_params.mlli_dma_addr);
+	}
+
+	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+
+	if (src != dst) {
+		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
+	}
+}
+
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
+			     unsigned int ivsize, unsigned int nbytes,
+			     void *info, struct scatterlist *src,
+			     struct scatterlist *dst, gfp_t flags)
+{
+	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
+	struct mlli_params *mlli_params = &req_ctx->mlli_params;
+	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+	struct device *dev = drvdata_to_dev(drvdata);
+	struct buffer_array sg_data;
+	u32 dummy = 0;
+	int rc = 0;
+	u32 mapped_nents = 0;
+
+	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
+	mlli_params->curr_pool = NULL;
+	sg_data.num_of_buffers = 0;
+
+	/* Map IV buffer */
+	if (ivsize) {
+		dump_byte_array("iv", (u8 *)info, ivsize);
+		req_ctx->gen_ctx.iv_dma_addr =
+			dma_map_single(dev, (void *)info,
+				       ivsize,
+				       req_ctx->is_giv ? DMA_BIDIRECTIONAL :
+				       DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
+			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+				ivsize, info);
+			return -ENOMEM;
+		}
+		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
+	} else {
+		req_ctx->gen_ctx.iv_dma_addr = 0;
+	}
+
+	/* Map the src SGL */
+	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+	if (rc) {
+		rc = -ENOMEM;
+		goto ablkcipher_exit;
+	}
+	if (mapped_nents > 1)
+		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+	if (src == dst) {
+		/* Handle inplace operation */
+		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+			req_ctx->out_nents = 0;
+			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+					nbytes, 0, true,
+					&req_ctx->in_mlli_nents);
+		}
+	} else {
+		/* Map the dst sg */
+		if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+			      &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+			      &dummy, &mapped_nents)) {
+			rc = -ENOMEM;
+			goto ablkcipher_exit;
+		}
+		if (mapped_nents > 1)
+			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+					nbytes, 0, true,
+					&req_ctx->in_mlli_nents);
+			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
+					nbytes, 0, true,
+					&req_ctx->out_mlli_nents);
+		}
+	}
+
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+		if (rc)
+			goto ablkcipher_exit;
+	}
+
+	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
+		cc_dma_buf_type(req_ctx->dma_buf_type));
+
+	return 0;
+
+ablkcipher_exit:
+	cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+	return rc;
+}
+
 int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
 {
 	struct buff_mgr_handle *buff_mgr_handle;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index 93ef765..ff4a5ec 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -40,6 +40,16 @@ int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
 
 int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
 
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
+			     unsigned int ivsize, unsigned int nbytes,
+			     void *info, struct scatterlist *src,
+			     struct scatterlist *dst, gfp_t flags);
+
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+				unsigned int ivsize,
+				struct scatterlist *src,
+				struct scatterlist *dst);
+
 int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
 			      struct scatterlist *src, unsigned int nbytes,
 			      bool do_update, gfp_t flags);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
new file mode 100644
index 0000000..8afdbc1
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -0,0 +1,1167 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/des.h>
+#include <crypto/xts.h>
+#include <crypto/scatterwalk.h>
+
+#include "cc_driver.h"
+#include "cc_lli_defs.h"
+#include "cc_buffer_mgr.h"
+#include "cc_cipher.h"
+#include "cc_request_mgr.h"
+
+#define MAX_ABLKCIPHER_SEQ_LEN 6
+
+#define template_ablkcipher	template_u.ablkcipher
+
+#define CC_MIN_AES_XTS_SIZE 0x10
+#define CC_MAX_AES_XTS_SIZE 0x2000
+struct cc_cipher_handle {
+	struct list_head blkcipher_alg_list;
+};
+
+struct cc_user_key_info {
+	u8 *key;
+	dma_addr_t key_dma_addr;
+};
+
+struct cc_hw_key_info {
+	enum cc_hw_crypto_key key1_slot;
+	enum cc_hw_crypto_key key2_slot;
+};
+
+struct cc_cipher_ctx {
+	struct cc_drvdata *drvdata;
+	int keylen;
+	int key_round_number;
+	int cipher_mode;
+	int flow_mode;
+	unsigned int flags;
+	struct blkcipher_req_ctx *sync_ctx;
+	struct cc_user_key_info user;
+	struct cc_hw_key_info hw;
+	struct crypto_shash *shash_tfm;
+};
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
+
+static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
+{
+	switch (ctx_p->flow_mode) {
+	case S_DIN_to_AES:
+		switch (size) {
+		case CC_AES_128_BIT_KEY_SIZE:
+		case CC_AES_192_BIT_KEY_SIZE:
+			if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
+			    ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
+			    ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
+				return 0;
+			break;
+		case CC_AES_256_BIT_KEY_SIZE:
+			return 0;
+		case (CC_AES_192_BIT_KEY_SIZE * 2):
+		case (CC_AES_256_BIT_KEY_SIZE * 2):
+			if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+			    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+			    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
+				return 0;
+			break;
+		default:
+			break;
+		}
+	case S_DIN_to_DES:
+		if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
+			return 0;
+		break;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int validate_data_size(struct cc_cipher_ctx *ctx_p,
+			      unsigned int size)
+{
+	switch (ctx_p->flow_mode) {
+	case S_DIN_to_AES:
+		switch (ctx_p->cipher_mode) {
+		case DRV_CIPHER_XTS:
+			if (size >= CC_MIN_AES_XTS_SIZE &&
+			    size <= CC_MAX_AES_XTS_SIZE &&
+			    IS_ALIGNED(size, AES_BLOCK_SIZE))
+				return 0;
+			break;
+		case DRV_CIPHER_CBC_CTS:
+			if (size >= AES_BLOCK_SIZE)
+				return 0;
+			break;
+		case DRV_CIPHER_OFB:
+		case DRV_CIPHER_CTR:
+				return 0;
+		case DRV_CIPHER_ECB:
+		case DRV_CIPHER_CBC:
+		case DRV_CIPHER_ESSIV:
+		case DRV_CIPHER_BITLOCKER:
+			if (IS_ALIGNED(size, AES_BLOCK_SIZE))
+				return 0;
+			break;
+		default:
+			break;
+		}
+		break;
+	case S_DIN_to_DES:
+		if (IS_ALIGNED(size, DES_BLOCK_SIZE))
+			return 0;
+		break;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static unsigned int get_max_keysize(struct crypto_tfm *tfm)
+{
+	struct cc_crypto_alg *cc_alg =
+		container_of(tfm->__crt_alg, struct cc_crypto_alg,
+			     crypto_alg);
+
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_ABLKCIPHER)
+		return cc_alg->crypto_alg.cra_ablkcipher.max_keysize;
+
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_BLKCIPHER)
+		return cc_alg->crypto_alg.cra_blkcipher.max_keysize;
+
+	return 0;
+}
+
+static int cc_cipher_init(struct crypto_tfm *tfm)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct cc_crypto_alg *cc_alg =
+			container_of(alg, struct cc_crypto_alg, crypto_alg);
+	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+	int rc = 0;
+	unsigned int max_key_buf_size = get_max_keysize(tfm);
+	struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
+
+	dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
+		crypto_tfm_alg_name(tfm));
+
+	ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
+
+	ctx_p->cipher_mode = cc_alg->cipher_mode;
+	ctx_p->flow_mode = cc_alg->flow_mode;
+	ctx_p->drvdata = cc_alg->drvdata;
+
+	/* Allocate key buffer, cache line aligned */
+	ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
+	if (!ctx_p->user.key)
+		return -ENOMEM;
+
+	dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
+		ctx_p->user.key);
+
+	/* Map key buffer */
+	ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
+						  max_key_buf_size,
+						  DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
+		dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+			max_key_buf_size, ctx_p->user.key);
+		return -ENOMEM;
+	}
+	dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+		max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+		/* Alloc hash tfm for essiv */
+		ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
+		if (IS_ERR(ctx_p->shash_tfm)) {
+			dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
+			return PTR_ERR(ctx_p->shash_tfm);
+		}
+	}
+
+	return rc;
+}
+
+static void cc_cipher_exit(struct crypto_tfm *tfm)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	unsigned int max_key_buf_size = get_max_keysize(tfm);
+
+	dev_dbg(dev, "Clearing context @%p for %s\n",
+		crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+		/* Free hash tfm for essiv */
+		crypto_free_shash(ctx_p->shash_tfm);
+		ctx_p->shash_tfm = NULL;
+	}
+
+	/* Unmap key buffer */
+	dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
+			 DMA_TO_DEVICE);
+	dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
+		&ctx_p->user.key_dma_addr);
+
+	/* Free key buffer in context */
+	kfree(ctx_p->user.key);
+	dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
+}
+
+struct tdes_keys {
+	u8	key1[DES_KEY_SIZE];
+	u8	key2[DES_KEY_SIZE];
+	u8	key3[DES_KEY_SIZE];
+};
+
+static const u8 zero_buff[] = {	0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+
+/* The function verifies that tdes keys are not weak.*/
+static int cc_verify_3des_keys(const u8 *key, unsigned int keylen)
+{
+	struct tdes_keys *tdes_key = (struct tdes_keys *)key;
+
+	/* verify key1 != key2 and key3 != key2*/
+	if ((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2,
+		    sizeof(tdes_key->key1)) == 0) ||
+	    (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2,
+		    sizeof(tdes_key->key3)) == 0)) {
+		return -ENOEXEC;
+	}
+
+	return 0;
+}
+
+static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
+{
+	switch (slot_num) {
+	case 0:
+		return KFDE0_KEY;
+	case 1:
+		return KFDE1_KEY;
+	case 2:
+		return KFDE2_KEY;
+	case 3:
+		return KFDE3_KEY;
+	}
+	return END_OF_KEYS;
+}
+
+static int cc_cipher_setkey(struct crypto_ablkcipher *atfm, const u8 *key,
+			    unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	u32 tmp[DES_EXPKEY_WORDS];
+	unsigned int max_key_buf_size = get_max_keysize(tfm);
+
+	dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
+		ctx_p, crypto_tfm_alg_name(tfm), keylen);
+	dump_byte_array("key", (u8 *)key, keylen);
+
+	/* STAT_PHASE_0: Init and sanity checks */
+
+	if (validate_keys_sizes(ctx_p, keylen)) {
+		dev_err(dev, "Unsupported key size %d.\n", keylen);
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	if (cc_is_hw_key(tfm)) {
+		/* setting HW key slots */
+		struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
+
+		if (ctx_p->flow_mode != S_DIN_to_AES) {
+			dev_err(dev, "HW key not supported for non-AES flows\n");
+			return -EINVAL;
+		}
+
+		ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
+		if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+			dev_err(dev, "Unsupported hw key1 number (%d)\n",
+				hki->hw_key1);
+			return -EINVAL;
+		}
+
+		if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+		    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+		    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+			if (hki->hw_key1 == hki->hw_key2) {
+				dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+					hki->hw_key1, hki->hw_key2);
+				return -EINVAL;
+			}
+			ctx_p->hw.key2_slot =
+				hw_key_to_cc_hw_key(hki->hw_key2);
+			if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+				dev_err(dev, "Unsupported hw key2 number (%d)\n",
+					hki->hw_key2);
+				return -EINVAL;
+			}
+		}
+
+		ctx_p->keylen = keylen;
+		dev_dbg(dev, "cc_is_hw_key ret 0");
+
+		return 0;
+	}
+
+	// verify weak keys
+	if (ctx_p->flow_mode == S_DIN_to_DES) {
+		if (!des_ekey(tmp, key) &&
+		    (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			dev_dbg(dev, "weak DES key");
+			return -EINVAL;
+		}
+	}
+	if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
+	    xts_check_key(tfm, key, keylen)) {
+		dev_dbg(dev, "weak XTS key");
+		return -EINVAL;
+	}
+	if (ctx_p->flow_mode == S_DIN_to_DES &&
+	    keylen == DES3_EDE_KEY_SIZE &&
+	    cc_verify_3des_keys(key, keylen)) {
+		dev_dbg(dev, "weak 3DES key");
+		return -EINVAL;
+	}
+
+	/* STAT_PHASE_1: Copy key to ctx */
+	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
+				max_key_buf_size, DMA_TO_DEVICE);
+
+	memcpy(ctx_p->user.key, key, keylen);
+	if (keylen == 24)
+		memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+		/* sha256 for key2 - use sw implementation */
+		int key_len = keylen >> 1;
+		int err;
+
+		SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+
+		desc->tfm = ctx_p->shash_tfm;
+
+		err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
+					  ctx_p->user.key + key_len);
+		if (err) {
+			dev_err(dev, "Failed to hash ESSIV key.\n");
+			return err;
+		}
+	}
+	dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
+				   max_key_buf_size, DMA_TO_DEVICE);
+	ctx_p->keylen = keylen;
+
+	 dev_dbg(dev, "return safely");
+	return 0;
+}
+
+static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+				 struct blkcipher_req_ctx *req_ctx,
+				 unsigned int ivsize, unsigned int nbytes,
+				 struct cc_hw_desc desc[],
+				 unsigned int *seq_size)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	int cipher_mode = ctx_p->cipher_mode;
+	int flow_mode = ctx_p->flow_mode;
+	int direction = req_ctx->gen_ctx.op_type;
+	dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+	unsigned int key_len = ctx_p->keylen;
+	dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+	unsigned int du_size = nbytes;
+
+	struct cc_crypto_alg *cc_alg =
+		container_of(tfm->__crt_alg, struct cc_crypto_alg,
+			     crypto_alg);
+
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+	    CRYPTO_ALG_BULK_DU_512)
+		du_size = 512;
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+	    CRYPTO_ALG_BULK_DU_4096)
+		du_size = 4096;
+
+	switch (cipher_mode) {
+	case DRV_CIPHER_CBC:
+	case DRV_CIPHER_CBC_CTS:
+	case DRV_CIPHER_CTR:
+	case DRV_CIPHER_OFB:
+		/* Load cipher state */
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
+			     NS_BIT);
+		set_cipher_config0(&desc[*seq_size], direction);
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		if (cipher_mode == DRV_CIPHER_CTR ||
+		    cipher_mode == DRV_CIPHER_OFB) {
+			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+		} else {
+			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
+		}
+		(*seq_size)++;
+		/*FALLTHROUGH*/
+	case DRV_CIPHER_ECB:
+		/* Load key */
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		if (flow_mode == S_DIN_to_AES) {
+			if (cc_is_hw_key(tfm)) {
+				set_hw_crypto_key(&desc[*seq_size],
+						  ctx_p->hw.key1_slot);
+			} else {
+				set_din_type(&desc[*seq_size], DMA_DLLI,
+					     key_dma_addr, ((key_len == 24) ?
+							    AES_MAX_KEY_SIZE :
+							    key_len), NS_BIT);
+			}
+			set_key_size_aes(&desc[*seq_size], key_len);
+		} else {
+			/*des*/
+			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+				     key_len, NS_BIT);
+			set_key_size_des(&desc[*seq_size], key_len);
+		}
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+		(*seq_size)++;
+		break;
+	case DRV_CIPHER_XTS:
+	case DRV_CIPHER_ESSIV:
+	case DRV_CIPHER_BITLOCKER:
+		/* Load AES key */
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		if (cc_is_hw_key(tfm)) {
+			set_hw_crypto_key(&desc[*seq_size],
+					  ctx_p->hw.key1_slot);
+		} else {
+			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+				     (key_len / 2), NS_BIT);
+		}
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+		(*seq_size)++;
+
+		/* load XEX key */
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		if (cc_is_hw_key(tfm)) {
+			set_hw_crypto_key(&desc[*seq_size],
+					  ctx_p->hw.key2_slot);
+		} else {
+			set_din_type(&desc[*seq_size], DMA_DLLI,
+				     (key_dma_addr + (key_len / 2)),
+				     (key_len / 2), NS_BIT);
+		}
+		set_xex_data_unit_size(&desc[*seq_size], du_size);
+		set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
+		(*seq_size)++;
+
+		/* Set state */
+		hw_desc_init(&desc[*seq_size]);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
+			     CC_AES_BLOCK_SIZE, NS_BIT);
+		(*seq_size)++;
+		break;
+	default:
+		dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+	}
+}
+
+static void cc_setup_cipher_data(struct crypto_tfm *tfm,
+				 struct blkcipher_req_ctx *req_ctx,
+				 struct scatterlist *dst,
+				 struct scatterlist *src, unsigned int nbytes,
+				 void *areq, struct cc_hw_desc desc[],
+				 unsigned int *seq_size)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	unsigned int flow_mode = ctx_p->flow_mode;
+
+	switch (ctx_p->flow_mode) {
+	case S_DIN_to_AES:
+		flow_mode = DIN_AES_DOUT;
+		break;
+	case S_DIN_to_DES:
+		flow_mode = DIN_DES_DOUT;
+		break;
+	default:
+		dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
+		return;
+	}
+	/* Process */
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+		dev_dbg(dev, " data params addr %pad length 0x%X\n",
+			&sg_dma_address(src), nbytes);
+		dev_dbg(dev, " data params addr %pad length 0x%X\n",
+			&sg_dma_address(dst), nbytes);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+			     nbytes, NS_BIT);
+		set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+			      nbytes, NS_BIT, (!areq ? 0 : 1));
+		if (areq)
+			set_queue_last_ind(&desc[*seq_size]);
+
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		(*seq_size)++;
+	} else {
+		/* bypass */
+		dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
+			&req_ctx->mlli_params.mlli_dma_addr,
+			req_ctx->mlli_params.mlli_len,
+			(unsigned int)ctx_p->drvdata->mlli_sram_addr);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI,
+			     req_ctx->mlli_params.mlli_dma_addr,
+			     req_ctx->mlli_params.mlli_len, NS_BIT);
+		set_dout_sram(&desc[*seq_size],
+			      ctx_p->drvdata->mlli_sram_addr,
+			      req_ctx->mlli_params.mlli_len);
+		set_flow_mode(&desc[*seq_size], BYPASS);
+		(*seq_size)++;
+
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_MLLI,
+			     ctx_p->drvdata->mlli_sram_addr,
+			     req_ctx->in_mlli_nents, NS_BIT);
+		if (req_ctx->out_nents == 0) {
+			dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr,
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr);
+			set_dout_mlli(&desc[*seq_size],
+				      ctx_p->drvdata->mlli_sram_addr,
+				      req_ctx->in_mlli_nents, NS_BIT,
+				      (!areq ? 0 : 1));
+		} else {
+			dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr,
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr +
+				(u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
+			set_dout_mlli(&desc[*seq_size],
+				      (ctx_p->drvdata->mlli_sram_addr +
+				       (LLI_ENTRY_BYTE_SIZE *
+					req_ctx->in_mlli_nents)),
+				      req_ctx->out_mlli_nents, NS_BIT,
+				      (!areq ? 0 : 1));
+		}
+		if (areq)
+			set_queue_last_ind(&desc[*seq_size]);
+
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		(*seq_size)++;
+	}
+}
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
+{
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)cc_req;
+	struct scatterlist *dst = areq->dst;
+	struct scatterlist *src = areq->src;
+	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
+
+	cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+	kfree(req_ctx->iv);
+
+	/*
+	 * The crypto API expects us to set the req->info to the last
+	 * ciphertext block. For encrypt, simply copy from the result.
+	 * For decrypt, we must copy from a saved buffer since this
+	 * could be an in-place decryption operation and the src is
+	 * lost by this point.
+	 */
+	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
+		memcpy(req->info, req_ctx->backup_info, ivsize);
+		kfree(req_ctx->backup_info);
+	} else if (!err) {
+		scatterwalk_map_and_copy(req->info, req->dst,
+					 (req->nbytes - ivsize),
+					 ivsize, 0);
+	}
+
+	ablkcipher_request_complete(areq, err);
+}
+
+static int cc_cipher_process(struct ablkcipher_request *req,
+			     enum drv_crypto_direction direction)
+{
+	struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
+	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+	struct scatterlist *dst = req->dst;
+	struct scatterlist *src = req->src;
+	unsigned int nbytes = req->nbytes;
+	void *info = req->info;
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
+	struct cc_crypto_req cc_req = {};
+	int rc, seq_len = 0, cts_restore_flag = 0;
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	dev_dbg(dev, "%s req=%p info=%p nbytes=%d\n",
+		((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+		"Encrypt" : "Decrypt"), req, info, nbytes);
+
+	/* STAT_PHASE_0: Init and sanity checks */
+
+	/* TODO: check data length according to mode */
+	if (validate_data_size(ctx_p, nbytes)) {
+		dev_err(dev, "Unsupported data size %d.\n", nbytes);
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+		rc = -EINVAL;
+		goto exit_process;
+	}
+	if (nbytes == 0) {
+		/* No data to process is valid */
+		rc = 0;
+		goto exit_process;
+	}
+
+	/* The IV we are handed may be allocted from the stack so
+	 * we must copy it to a DMAable buffer before use.
+	 */
+	req_ctx->iv = kmalloc(ivsize, flags);
+	if (!req_ctx->iv) {
+		rc = -ENOMEM;
+		goto exit_process;
+	}
+	memcpy(req_ctx->iv, info, ivsize);
+
+	/*For CTS in case of data size aligned to 16 use CBC mode*/
+	if (((nbytes % AES_BLOCK_SIZE) == 0) &&
+	    ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
+		ctx_p->cipher_mode = DRV_CIPHER_CBC;
+		cts_restore_flag = 1;
+	}
+
+	/* Setup DX request structure */
+	cc_req.user_cb = (void *)cc_cipher_complete;
+	cc_req.user_arg = (void *)req;
+
+#ifdef ENABLE_CYCLE_COUNT
+	cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+		STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
+
+#endif
+
+	/* Setup request context */
+	req_ctx->gen_ctx.op_type = direction;
+
+	/* STAT_PHASE_1: Map buffers */
+
+	rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
+				      req_ctx->iv, src, dst, flags);
+	if (rc) {
+		dev_err(dev, "map_request() failed\n");
+		goto exit_process;
+	}
+
+	/* STAT_PHASE_2: Create sequence */
+
+	/* Setup processing */
+	cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+	/* Data processing */
+	cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
+			     &seq_len);
+
+	/* do we need to generate IV? */
+	if (req_ctx->is_giv) {
+		cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
+		cc_req.ivgen_dma_addr_len = 1;
+		/* set the IV size (8/16 B long)*/
+		cc_req.ivgen_size = ivsize;
+	}
+
+	/* STAT_PHASE_3: Lock HW and push sequence */
+
+	rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
+			     &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		/* Failed to send the request or request completed
+		 * synchronously
+		 */
+		cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+	}
+
+exit_process:
+	if (cts_restore_flag)
+		ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
+
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		kfree(req_ctx->backup_info);
+		kfree(req_ctx->iv);
+	}
+
+	return rc;
+}
+
+static int cc_cipher_encrypt(struct ablkcipher_request *req)
+{
+	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+	req_ctx->is_giv = false;
+	req_ctx->backup_info = NULL;
+
+	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+}
+
+static int cc_cipher_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
+	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	/*
+	 * Allocate and save the last IV sized bytes of the source, which will
+	 * be lost in case of in-place decryption and might be needed for CTS.
+	 */
+	req_ctx->backup_info = kmalloc(ivsize, flags);
+	if (!req_ctx->backup_info)
+		return -ENOMEM;
+
+	scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
+				 (req->nbytes - ivsize), ivsize, 0);
+	req_ctx->is_giv = false;
+
+	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+}
+
+/* DX Block cipher alg */
+static struct cc_alg_template blkcipher_algs[] = {
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			.geniv = "eseqiv",
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-du512-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-du4096-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "essiv(aes)",
+		.driver_name = "essiv-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "essiv(aes)",
+		.driver_name = "essiv-aes-du512-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "essiv(aes)",
+		.driver_name = "essiv-aes-du4096-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "bitlocker(aes)",
+		.driver_name = "bitlocker-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "bitlocker(aes)",
+		.driver_name = "bitlocker-aes-du512-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "bitlocker(aes)",
+		.driver_name = "bitlocker-aes-du4096-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "ecb(aes)",
+		.driver_name = "ecb-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = 0,
+			},
+		.cipher_mode = DRV_CIPHER_ECB,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "cbc(aes)",
+		.driver_name = "cbc-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "ofb(aes)",
+		.driver_name = "ofb-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_OFB,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "cts1(cbc(aes))",
+		.driver_name = "cts1-cbc-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC_CTS,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-dx",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CTR,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "cbc(des3_ede)",
+		.driver_name = "cbc-3des-dx",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_DES,
+	},
+	{
+		.name = "ecb(des3_ede)",
+		.driver_name = "ecb-3des-dx",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = 0,
+			},
+		.cipher_mode = DRV_CIPHER_ECB,
+		.flow_mode = S_DIN_to_DES,
+	},
+	{
+		.name = "cbc(des)",
+		.driver_name = "cbc-des-dx",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_DES,
+	},
+	{
+		.name = "ecb(des)",
+		.driver_name = "ecb-des-dx",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = 0,
+			},
+		.cipher_mode = DRV_CIPHER_ECB,
+		.flow_mode = S_DIN_to_DES,
+	},
+};
+
+static
+struct cc_crypto_alg *cc_cipher_create_alg(struct cc_alg_template *template,
+					   struct device *dev)
+{
+	struct cc_crypto_alg *t_alg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg)
+		return ERR_PTR(-ENOMEM);
+
+	alg = &t_alg->crypto_alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 template->driver_name);
+	alg->cra_module = THIS_MODULE;
+	alg->cra_priority = CC_CRA_PRIO;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_ctxsize = sizeof(struct cc_cipher_ctx);
+
+	alg->cra_init = cc_cipher_init;
+	alg->cra_exit = cc_cipher_exit;
+	alg->cra_type = &crypto_ablkcipher_type;
+	alg->cra_ablkcipher = template->template_ablkcipher;
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+				template->type;
+
+	t_alg->cipher_mode = template->cipher_mode;
+	t_alg->flow_mode = template->flow_mode;
+
+	return t_alg;
+}
+
+int cc_cipher_free(struct cc_drvdata *drvdata)
+{
+	struct cc_crypto_alg *t_alg, *n;
+	struct cc_cipher_handle *blkcipher_handle =
+						drvdata->blkcipher_handle;
+	if (blkcipher_handle) {
+		/* Remove registered algs */
+		list_for_each_entry_safe(t_alg, n,
+					 &blkcipher_handle->blkcipher_alg_list,
+					 entry) {
+			crypto_unregister_alg(&t_alg->crypto_alg);
+			list_del(&t_alg->entry);
+			kfree(t_alg);
+		}
+		kfree(blkcipher_handle);
+		drvdata->blkcipher_handle = NULL;
+	}
+	return 0;
+}
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata)
+{
+	struct cc_cipher_handle *ablkcipher_handle;
+	struct cc_crypto_alg *t_alg;
+	struct device *dev = drvdata_to_dev(drvdata);
+	int rc = -ENOMEM;
+	int alg;
+
+	ablkcipher_handle = kmalloc(sizeof(*ablkcipher_handle), GFP_KERNEL);
+	if (!ablkcipher_handle)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ablkcipher_handle->blkcipher_alg_list);
+	drvdata->blkcipher_handle = ablkcipher_handle;
+
+	/* Linux crypto */
+	dev_dbg(dev, "Number of algorithms = %zu\n",
+		ARRAY_SIZE(blkcipher_algs));
+	for (alg = 0; alg < ARRAY_SIZE(blkcipher_algs); alg++) {
+		dev_dbg(dev, "creating %s\n", blkcipher_algs[alg].driver_name);
+		t_alg = cc_cipher_create_alg(&blkcipher_algs[alg], dev);
+		if (IS_ERR(t_alg)) {
+			rc = PTR_ERR(t_alg);
+			dev_err(dev, "%s alg allocation failed\n",
+				blkcipher_algs[alg].driver_name);
+			goto fail0;
+		}
+		t_alg->drvdata = drvdata;
+
+		dev_dbg(dev, "registering %s\n",
+			blkcipher_algs[alg].driver_name);
+		rc = crypto_register_alg(&t_alg->crypto_alg);
+		dev_dbg(dev, "%s alg registration rc = %x\n",
+			t_alg->crypto_alg.cra_driver_name, rc);
+		if (rc) {
+			dev_err(dev, "%s alg registration failed\n",
+				t_alg->crypto_alg.cra_driver_name);
+			kfree(t_alg);
+			goto fail0;
+		} else {
+			list_add_tail(&t_alg->entry,
+				      &ablkcipher_handle->blkcipher_alg_list);
+			dev_dbg(dev, "Registered %s\n",
+				t_alg->crypto_alg.cra_driver_name);
+		}
+	}
+	return 0;
+
+fail0:
+	cc_cipher_free(drvdata);
+	return rc;
+}
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
new file mode 100644
index 0000000..4c181c7
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_cipher.h
+ * ARM CryptoCell Cipher Crypto API
+ */
+
+#ifndef __CC_CIPHER_H__
+#define __CC_CIPHER_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+
+/* Crypto cipher flags */
+#define CC_CRYPTO_CIPHER_KEY_KFDE0	BIT(0)
+#define CC_CRYPTO_CIPHER_KEY_KFDE1	BIT(1)
+#define CC_CRYPTO_CIPHER_KEY_KFDE2	BIT(2)
+#define CC_CRYPTO_CIPHER_KEY_KFDE3	BIT(3)
+#define CC_CRYPTO_CIPHER_DU_SIZE_512B	BIT(4)
+
+#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
+					CC_CRYPTO_CIPHER_KEY_KFDE1 | \
+					CC_CRYPTO_CIPHER_KEY_KFDE2 | \
+					CC_CRYPTO_CIPHER_KEY_KFDE3)
+
+struct blkcipher_req_ctx {
+	struct async_gen_req_ctx gen_ctx;
+	enum cc_req_dma_buf_type dma_buf_type;
+	u32 in_nents;
+	u32 in_mlli_nents;
+	u32 out_nents;
+	u32 out_mlli_nents;
+	u8 *backup_info; /*store iv for generated IV flow*/
+	u8 *iv;
+	bool is_giv;
+	struct mlli_params mlli_params;
+};
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata);
+
+int cc_cipher_free(struct cc_drvdata *drvdata);
+
+#ifndef CRYPTO_ALG_BULK_MASK
+
+#define CRYPTO_ALG_BULK_DU_512	0x00002000
+#define CRYPTO_ALG_BULK_DU_4096	0x00004000
+#define CRYPTO_ALG_BULK_MASK	(CRYPTO_ALG_BULK_DU_512 |\
+				CRYPTO_ALG_BULK_DU_4096)
+#endif /* CRYPTO_ALG_BULK_MASK */
+
+#ifdef CRYPTO_TFM_REQ_HW_KEY
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+	return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
+}
+
+#else
+
+struct arm_hw_key_info {
+	int hw_key1;
+	int hw_key2;
+};
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+	return false;
+}
+
+#endif /* CRYPTO_TFM_REQ_HW_KEY */
+
+#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 941ca86..77393a6 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -19,6 +19,7 @@
 #include "cc_request_mgr.h"
 #include "cc_buffer_mgr.h"
 #include "cc_debugfs.h"
+#include "cc_cipher.h"
 #include "cc_ivgen.h"
 #include "cc_sram_mgr.h"
 #include "cc_pm.h"
@@ -279,8 +280,17 @@ static int init_cc_resources(struct platform_device *plat_dev)
 		goto post_power_mgr_err;
 	}
 
+	/* Allocate crypto algs */
+	rc = cc_cipher_alloc(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_cipher_alloc failed\n");
+		goto post_ivgen_err;
+	}
+
 	return 0;
 
+post_ivgen_err:
+	cc_ivgen_fini(new_drvdata);
 post_power_mgr_err:
 	cc_pm_fini(new_drvdata);
 post_buf_mgr_err:
@@ -309,6 +319,7 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
 	struct cc_drvdata *drvdata =
 		(struct cc_drvdata *)platform_get_drvdata(plat_dev);
 
+	cc_cipher_free(drvdata);
 	cc_ivgen_fini(drvdata);
 	cc_pm_fini(drvdata);
 	cc_buffer_mgr_fini(drvdata);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index b3be754..2e94957 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -15,6 +15,7 @@
 #endif
 #include <linux/dma-mapping.h>
 #include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
 #include <crypto/aes.h>
 #include <crypto/sha.h>
 #include <crypto/authenc.h>
@@ -109,6 +110,7 @@ struct cc_drvdata {
 	struct platform_device *plat_dev;
 	cc_sram_addr_t mlli_sram_addr;
 	void *buff_mgr_handle;
+	void *blkcipher_handle;
 	void *request_mgr_handle;
 	void *ivgen_handle;
 	void *sram_mgr_handle;
-- 
2.7.4

WARNING: multiple messages have this Message-ID (diff)
From: Gilad Ben-Yossef <gilad@benyossef.com>
To: Herbert Xu <herbert@gondor.apana.org.au>,
	"David S. Miller" <davem@davemloft.net>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Ofir Drang <ofir.drang@arm.com>,
	linux-kernel@vger.kernel.org, linux-crypto@vger.kernel.org,
	devel@driverdev.osuosl.org
Subject: [PATCH 3/7] crypto: ccree: add ablkcipher support
Date: Thu, 11 Jan 2018 09:17:10 +0000	[thread overview]
Message-ID: <1515662239-1714-4-git-send-email-gilad@benyossef.com> (raw)
In-Reply-To: <1515662239-1714-1-git-send-email-gilad@benyossef.com>

Add CryptoCell ablkcipher support

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
---
 drivers/crypto/ccree/Makefile        |    2 +-
 drivers/crypto/ccree/cc_buffer_mgr.c |  125 ++++
 drivers/crypto/ccree/cc_buffer_mgr.h |   10 +
 drivers/crypto/ccree/cc_cipher.c     | 1167 ++++++++++++++++++++++++++++++++++
 drivers/crypto/ccree/cc_cipher.h     |   74 +++
 drivers/crypto/ccree/cc_driver.c     |   11 +
 drivers/crypto/ccree/cc_driver.h     |    2 +
 7 files changed, 1390 insertions(+), 1 deletion(-)
 create mode 100644 drivers/crypto/ccree/cc_cipher.c
 create mode 100644 drivers/crypto/ccree/cc_cipher.h

diff --git a/drivers/crypto/ccree/Makefile b/drivers/crypto/ccree/Makefile
index 6b204ab..a7fecad 100644
--- a/drivers/crypto/ccree/Makefile
+++ b/drivers/crypto/ccree/Makefile
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
-ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_ivgen.o cc_sram_mgr.o
+ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_ivgen.o cc_sram_mgr.o
 ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
 ccree-$(CONFIG_PM) += cc_pm.o
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 4c67579..510993e 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -8,6 +8,7 @@
 
 #include "cc_buffer_mgr.h"
 #include "cc_lli_defs.h"
+#include "cc_cipher.h"
 
 enum dma_buffer_type {
 	DMA_NULL_TYPE = -1,
@@ -347,6 +348,130 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
 	return 0;
 }
 
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+				unsigned int ivsize, struct scatterlist *src,
+				struct scatterlist *dst)
+{
+	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
+
+	if (req_ctx->gen_ctx.iv_dma_addr) {
+		dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
+			&req_ctx->gen_ctx.iv_dma_addr, ivsize);
+		dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr,
+				 ivsize,
+				 req_ctx->is_giv ? DMA_BIDIRECTIONAL :
+				 DMA_TO_DEVICE);
+	}
+	/* Release pool */
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI &&
+	    req_ctx->mlli_params.mlli_virt_addr) {
+		dma_pool_free(req_ctx->mlli_params.curr_pool,
+			      req_ctx->mlli_params.mlli_virt_addr,
+			      req_ctx->mlli_params.mlli_dma_addr);
+	}
+
+	dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
+	dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
+
+	if (src != dst) {
+		dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL);
+		dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
+	}
+}
+
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
+			     unsigned int ivsize, unsigned int nbytes,
+			     void *info, struct scatterlist *src,
+			     struct scatterlist *dst, gfp_t flags)
+{
+	struct blkcipher_req_ctx *req_ctx = (struct blkcipher_req_ctx *)ctx;
+	struct mlli_params *mlli_params = &req_ctx->mlli_params;
+	struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle;
+	struct device *dev = drvdata_to_dev(drvdata);
+	struct buffer_array sg_data;
+	u32 dummy = 0;
+	int rc = 0;
+	u32 mapped_nents = 0;
+
+	req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
+	mlli_params->curr_pool = NULL;
+	sg_data.num_of_buffers = 0;
+
+	/* Map IV buffer */
+	if (ivsize) {
+		dump_byte_array("iv", (u8 *)info, ivsize);
+		req_ctx->gen_ctx.iv_dma_addr =
+			dma_map_single(dev, (void *)info,
+				       ivsize,
+				       req_ctx->is_giv ? DMA_BIDIRECTIONAL :
+				       DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) {
+			dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
+				ivsize, info);
+			return -ENOMEM;
+		}
+		dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n",
+			ivsize, info, &req_ctx->gen_ctx.iv_dma_addr);
+	} else {
+		req_ctx->gen_ctx.iv_dma_addr = 0;
+	}
+
+	/* Map the src SGL */
+	rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents,
+		       LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
+	if (rc) {
+		rc = -ENOMEM;
+		goto ablkcipher_exit;
+	}
+	if (mapped_nents > 1)
+		req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+	if (src == dst) {
+		/* Handle inplace operation */
+		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+			req_ctx->out_nents = 0;
+			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+					nbytes, 0, true,
+					&req_ctx->in_mlli_nents);
+		}
+	} else {
+		/* Map the dst sg */
+		if (cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL,
+			      &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
+			      &dummy, &mapped_nents)) {
+			rc = -ENOMEM;
+			goto ablkcipher_exit;
+		}
+		if (mapped_nents > 1)
+			req_ctx->dma_buf_type = CC_DMA_BUF_MLLI;
+
+		if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+			cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src,
+					nbytes, 0, true,
+					&req_ctx->in_mlli_nents);
+			cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst,
+					nbytes, 0, true,
+					&req_ctx->out_mlli_nents);
+		}
+	}
+
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) {
+		mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
+		rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags);
+		if (rc)
+			goto ablkcipher_exit;
+	}
+
+	dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n",
+		cc_dma_buf_type(req_ctx->dma_buf_type));
+
+	return 0;
+
+ablkcipher_exit:
+	cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+	return rc;
+}
+
 int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
 {
 	struct buff_mgr_handle *buff_mgr_handle;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.h b/drivers/crypto/ccree/cc_buffer_mgr.h
index 93ef765..ff4a5ec 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.h
+++ b/drivers/crypto/ccree/cc_buffer_mgr.h
@@ -40,6 +40,16 @@ int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
 
 int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
 
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
+			     unsigned int ivsize, unsigned int nbytes,
+			     void *info, struct scatterlist *src,
+			     struct scatterlist *dst, gfp_t flags);
+
+void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
+				unsigned int ivsize,
+				struct scatterlist *src,
+				struct scatterlist *dst);
+
 int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
 			      struct scatterlist *src, unsigned int nbytes,
 			      bool do_update, gfp_t flags);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
new file mode 100644
index 0000000..8afdbc1
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -0,0 +1,1167 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/des.h>
+#include <crypto/xts.h>
+#include <crypto/scatterwalk.h>
+
+#include "cc_driver.h"
+#include "cc_lli_defs.h"
+#include "cc_buffer_mgr.h"
+#include "cc_cipher.h"
+#include "cc_request_mgr.h"
+
+#define MAX_ABLKCIPHER_SEQ_LEN 6
+
+#define template_ablkcipher	template_u.ablkcipher
+
+#define CC_MIN_AES_XTS_SIZE 0x10
+#define CC_MAX_AES_XTS_SIZE 0x2000
+struct cc_cipher_handle {
+	struct list_head blkcipher_alg_list;
+};
+
+struct cc_user_key_info {
+	u8 *key;
+	dma_addr_t key_dma_addr;
+};
+
+struct cc_hw_key_info {
+	enum cc_hw_crypto_key key1_slot;
+	enum cc_hw_crypto_key key2_slot;
+};
+
+struct cc_cipher_ctx {
+	struct cc_drvdata *drvdata;
+	int keylen;
+	int key_round_number;
+	int cipher_mode;
+	int flow_mode;
+	unsigned int flags;
+	struct blkcipher_req_ctx *sync_ctx;
+	struct cc_user_key_info user;
+	struct cc_hw_key_info hw;
+	struct crypto_shash *shash_tfm;
+};
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
+
+static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
+{
+	switch (ctx_p->flow_mode) {
+	case S_DIN_to_AES:
+		switch (size) {
+		case CC_AES_128_BIT_KEY_SIZE:
+		case CC_AES_192_BIT_KEY_SIZE:
+			if (ctx_p->cipher_mode != DRV_CIPHER_XTS &&
+			    ctx_p->cipher_mode != DRV_CIPHER_ESSIV &&
+			    ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER)
+				return 0;
+			break;
+		case CC_AES_256_BIT_KEY_SIZE:
+			return 0;
+		case (CC_AES_192_BIT_KEY_SIZE * 2):
+		case (CC_AES_256_BIT_KEY_SIZE * 2):
+			if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+			    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+			    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER)
+				return 0;
+			break;
+		default:
+			break;
+		}
+	case S_DIN_to_DES:
+		if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
+			return 0;
+		break;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static int validate_data_size(struct cc_cipher_ctx *ctx_p,
+			      unsigned int size)
+{
+	switch (ctx_p->flow_mode) {
+	case S_DIN_to_AES:
+		switch (ctx_p->cipher_mode) {
+		case DRV_CIPHER_XTS:
+			if (size >= CC_MIN_AES_XTS_SIZE &&
+			    size <= CC_MAX_AES_XTS_SIZE &&
+			    IS_ALIGNED(size, AES_BLOCK_SIZE))
+				return 0;
+			break;
+		case DRV_CIPHER_CBC_CTS:
+			if (size >= AES_BLOCK_SIZE)
+				return 0;
+			break;
+		case DRV_CIPHER_OFB:
+		case DRV_CIPHER_CTR:
+				return 0;
+		case DRV_CIPHER_ECB:
+		case DRV_CIPHER_CBC:
+		case DRV_CIPHER_ESSIV:
+		case DRV_CIPHER_BITLOCKER:
+			if (IS_ALIGNED(size, AES_BLOCK_SIZE))
+				return 0;
+			break;
+		default:
+			break;
+		}
+		break;
+	case S_DIN_to_DES:
+		if (IS_ALIGNED(size, DES_BLOCK_SIZE))
+			return 0;
+		break;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+static unsigned int get_max_keysize(struct crypto_tfm *tfm)
+{
+	struct cc_crypto_alg *cc_alg =
+		container_of(tfm->__crt_alg, struct cc_crypto_alg,
+			     crypto_alg);
+
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_ABLKCIPHER)
+		return cc_alg->crypto_alg.cra_ablkcipher.max_keysize;
+
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	    CRYPTO_ALG_TYPE_BLKCIPHER)
+		return cc_alg->crypto_alg.cra_blkcipher.max_keysize;
+
+	return 0;
+}
+
+static int cc_cipher_init(struct crypto_tfm *tfm)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct cc_crypto_alg *cc_alg =
+			container_of(alg, struct cc_crypto_alg, crypto_alg);
+	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
+	int rc = 0;
+	unsigned int max_key_buf_size = get_max_keysize(tfm);
+	struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
+
+	dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p,
+		crypto_tfm_alg_name(tfm));
+
+	ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
+
+	ctx_p->cipher_mode = cc_alg->cipher_mode;
+	ctx_p->flow_mode = cc_alg->flow_mode;
+	ctx_p->drvdata = cc_alg->drvdata;
+
+	/* Allocate key buffer, cache line aligned */
+	ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL);
+	if (!ctx_p->user.key)
+		return -ENOMEM;
+
+	dev_dbg(dev, "Allocated key buffer in context. key=@%p\n",
+		ctx_p->user.key);
+
+	/* Map key buffer */
+	ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key,
+						  max_key_buf_size,
+						  DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) {
+		dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n",
+			max_key_buf_size, ctx_p->user.key);
+		return -ENOMEM;
+	}
+	dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n",
+		max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr);
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+		/* Alloc hash tfm for essiv */
+		ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0);
+		if (IS_ERR(ctx_p->shash_tfm)) {
+			dev_err(dev, "Error allocating hash tfm for ESSIV.\n");
+			return PTR_ERR(ctx_p->shash_tfm);
+		}
+	}
+
+	return rc;
+}
+
+static void cc_cipher_exit(struct crypto_tfm *tfm)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	unsigned int max_key_buf_size = get_max_keysize(tfm);
+
+	dev_dbg(dev, "Clearing context @%p for %s\n",
+		crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm));
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+		/* Free hash tfm for essiv */
+		crypto_free_shash(ctx_p->shash_tfm);
+		ctx_p->shash_tfm = NULL;
+	}
+
+	/* Unmap key buffer */
+	dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size,
+			 DMA_TO_DEVICE);
+	dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n",
+		&ctx_p->user.key_dma_addr);
+
+	/* Free key buffer in context */
+	kfree(ctx_p->user.key);
+	dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key);
+}
+
+struct tdes_keys {
+	u8	key1[DES_KEY_SIZE];
+	u8	key2[DES_KEY_SIZE];
+	u8	key3[DES_KEY_SIZE];
+};
+
+static const u8 zero_buff[] = {	0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+				0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+
+/* The function verifies that tdes keys are not weak.*/
+static int cc_verify_3des_keys(const u8 *key, unsigned int keylen)
+{
+	struct tdes_keys *tdes_key = (struct tdes_keys *)key;
+
+	/* verify key1 != key2 and key3 != key2*/
+	if ((memcmp((u8 *)tdes_key->key1, (u8 *)tdes_key->key2,
+		    sizeof(tdes_key->key1)) == 0) ||
+	    (memcmp((u8 *)tdes_key->key3, (u8 *)tdes_key->key2,
+		    sizeof(tdes_key->key3)) == 0)) {
+		return -ENOEXEC;
+	}
+
+	return 0;
+}
+
+static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num)
+{
+	switch (slot_num) {
+	case 0:
+		return KFDE0_KEY;
+	case 1:
+		return KFDE1_KEY;
+	case 2:
+		return KFDE2_KEY;
+	case 3:
+		return KFDE3_KEY;
+	}
+	return END_OF_KEYS;
+}
+
+static int cc_cipher_setkey(struct crypto_ablkcipher *atfm, const u8 *key,
+			    unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	u32 tmp[DES_EXPKEY_WORDS];
+	unsigned int max_key_buf_size = get_max_keysize(tfm);
+
+	dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n",
+		ctx_p, crypto_tfm_alg_name(tfm), keylen);
+	dump_byte_array("key", (u8 *)key, keylen);
+
+	/* STAT_PHASE_0: Init and sanity checks */
+
+	if (validate_keys_sizes(ctx_p, keylen)) {
+		dev_err(dev, "Unsupported key size %d.\n", keylen);
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	if (cc_is_hw_key(tfm)) {
+		/* setting HW key slots */
+		struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key;
+
+		if (ctx_p->flow_mode != S_DIN_to_AES) {
+			dev_err(dev, "HW key not supported for non-AES flows\n");
+			return -EINVAL;
+		}
+
+		ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1);
+		if (ctx_p->hw.key1_slot == END_OF_KEYS) {
+			dev_err(dev, "Unsupported hw key1 number (%d)\n",
+				hki->hw_key1);
+			return -EINVAL;
+		}
+
+		if (ctx_p->cipher_mode == DRV_CIPHER_XTS ||
+		    ctx_p->cipher_mode == DRV_CIPHER_ESSIV ||
+		    ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) {
+			if (hki->hw_key1 == hki->hw_key2) {
+				dev_err(dev, "Illegal hw key numbers (%d,%d)\n",
+					hki->hw_key1, hki->hw_key2);
+				return -EINVAL;
+			}
+			ctx_p->hw.key2_slot =
+				hw_key_to_cc_hw_key(hki->hw_key2);
+			if (ctx_p->hw.key2_slot == END_OF_KEYS) {
+				dev_err(dev, "Unsupported hw key2 number (%d)\n",
+					hki->hw_key2);
+				return -EINVAL;
+			}
+		}
+
+		ctx_p->keylen = keylen;
+		dev_dbg(dev, "cc_is_hw_key ret 0");
+
+		return 0;
+	}
+
+	// verify weak keys
+	if (ctx_p->flow_mode == S_DIN_to_DES) {
+		if (!des_ekey(tmp, key) &&
+		    (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) {
+			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+			dev_dbg(dev, "weak DES key");
+			return -EINVAL;
+		}
+	}
+	if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
+	    xts_check_key(tfm, key, keylen)) {
+		dev_dbg(dev, "weak XTS key");
+		return -EINVAL;
+	}
+	if (ctx_p->flow_mode == S_DIN_to_DES &&
+	    keylen == DES3_EDE_KEY_SIZE &&
+	    cc_verify_3des_keys(key, keylen)) {
+		dev_dbg(dev, "weak 3DES key");
+		return -EINVAL;
+	}
+
+	/* STAT_PHASE_1: Copy key to ctx */
+	dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
+				max_key_buf_size, DMA_TO_DEVICE);
+
+	memcpy(ctx_p->user.key, key, keylen);
+	if (keylen == 24)
+		memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
+
+	if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) {
+		/* sha256 for key2 - use sw implementation */
+		int key_len = keylen >> 1;
+		int err;
+
+		SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+
+		desc->tfm = ctx_p->shash_tfm;
+
+		err = crypto_shash_digest(desc, ctx_p->user.key, key_len,
+					  ctx_p->user.key + key_len);
+		if (err) {
+			dev_err(dev, "Failed to hash ESSIV key.\n");
+			return err;
+		}
+	}
+	dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr,
+				   max_key_buf_size, DMA_TO_DEVICE);
+	ctx_p->keylen = keylen;
+
+	 dev_dbg(dev, "return safely");
+	return 0;
+}
+
+static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
+				 struct blkcipher_req_ctx *req_ctx,
+				 unsigned int ivsize, unsigned int nbytes,
+				 struct cc_hw_desc desc[],
+				 unsigned int *seq_size)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	int cipher_mode = ctx_p->cipher_mode;
+	int flow_mode = ctx_p->flow_mode;
+	int direction = req_ctx->gen_ctx.op_type;
+	dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
+	unsigned int key_len = ctx_p->keylen;
+	dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+	unsigned int du_size = nbytes;
+
+	struct cc_crypto_alg *cc_alg =
+		container_of(tfm->__crt_alg, struct cc_crypto_alg,
+			     crypto_alg);
+
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+	    CRYPTO_ALG_BULK_DU_512)
+		du_size = 512;
+	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+	    CRYPTO_ALG_BULK_DU_4096)
+		du_size = 4096;
+
+	switch (cipher_mode) {
+	case DRV_CIPHER_CBC:
+	case DRV_CIPHER_CBC_CTS:
+	case DRV_CIPHER_CTR:
+	case DRV_CIPHER_OFB:
+		/* Load cipher state */
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize,
+			     NS_BIT);
+		set_cipher_config0(&desc[*seq_size], direction);
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		if (cipher_mode == DRV_CIPHER_CTR ||
+		    cipher_mode == DRV_CIPHER_OFB) {
+			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+		} else {
+			set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
+		}
+		(*seq_size)++;
+		/*FALLTHROUGH*/
+	case DRV_CIPHER_ECB:
+		/* Load key */
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		if (flow_mode == S_DIN_to_AES) {
+			if (cc_is_hw_key(tfm)) {
+				set_hw_crypto_key(&desc[*seq_size],
+						  ctx_p->hw.key1_slot);
+			} else {
+				set_din_type(&desc[*seq_size], DMA_DLLI,
+					     key_dma_addr, ((key_len == 24) ?
+							    AES_MAX_KEY_SIZE :
+							    key_len), NS_BIT);
+			}
+			set_key_size_aes(&desc[*seq_size], key_len);
+		} else {
+			/*des*/
+			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+				     key_len, NS_BIT);
+			set_key_size_des(&desc[*seq_size], key_len);
+		}
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+		(*seq_size)++;
+		break;
+	case DRV_CIPHER_XTS:
+	case DRV_CIPHER_ESSIV:
+	case DRV_CIPHER_BITLOCKER:
+		/* Load AES key */
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		if (cc_is_hw_key(tfm)) {
+			set_hw_crypto_key(&desc[*seq_size],
+					  ctx_p->hw.key1_slot);
+		} else {
+			set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr,
+				     (key_len / 2), NS_BIT);
+		}
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0);
+		(*seq_size)++;
+
+		/* load XEX key */
+		hw_desc_init(&desc[*seq_size]);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		if (cc_is_hw_key(tfm)) {
+			set_hw_crypto_key(&desc[*seq_size],
+					  ctx_p->hw.key2_slot);
+		} else {
+			set_din_type(&desc[*seq_size], DMA_DLLI,
+				     (key_dma_addr + (key_len / 2)),
+				     (key_len / 2), NS_BIT);
+		}
+		set_xex_data_unit_size(&desc[*seq_size], du_size);
+		set_flow_mode(&desc[*seq_size], S_DIN_to_AES2);
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY);
+		(*seq_size)++;
+
+		/* Set state */
+		hw_desc_init(&desc[*seq_size]);
+		set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
+		set_cipher_mode(&desc[*seq_size], cipher_mode);
+		set_cipher_config0(&desc[*seq_size], direction);
+		set_key_size_aes(&desc[*seq_size], (key_len / 2));
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr,
+			     CC_AES_BLOCK_SIZE, NS_BIT);
+		(*seq_size)++;
+		break;
+	default:
+		dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode);
+	}
+}
+
+static void cc_setup_cipher_data(struct crypto_tfm *tfm,
+				 struct blkcipher_req_ctx *req_ctx,
+				 struct scatterlist *dst,
+				 struct scatterlist *src, unsigned int nbytes,
+				 void *areq, struct cc_hw_desc desc[],
+				 unsigned int *seq_size)
+{
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	unsigned int flow_mode = ctx_p->flow_mode;
+
+	switch (ctx_p->flow_mode) {
+	case S_DIN_to_AES:
+		flow_mode = DIN_AES_DOUT;
+		break;
+	case S_DIN_to_DES:
+		flow_mode = DIN_DES_DOUT;
+		break;
+	default:
+		dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
+		return;
+	}
+	/* Process */
+	if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) {
+		dev_dbg(dev, " data params addr %pad length 0x%X\n",
+			&sg_dma_address(src), nbytes);
+		dev_dbg(dev, " data params addr %pad length 0x%X\n",
+			&sg_dma_address(dst), nbytes);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src),
+			     nbytes, NS_BIT);
+		set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
+			      nbytes, NS_BIT, (!areq ? 0 : 1));
+		if (areq)
+			set_queue_last_ind(&desc[*seq_size]);
+
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		(*seq_size)++;
+	} else {
+		/* bypass */
+		dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n",
+			&req_ctx->mlli_params.mlli_dma_addr,
+			req_ctx->mlli_params.mlli_len,
+			(unsigned int)ctx_p->drvdata->mlli_sram_addr);
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_DLLI,
+			     req_ctx->mlli_params.mlli_dma_addr,
+			     req_ctx->mlli_params.mlli_len, NS_BIT);
+		set_dout_sram(&desc[*seq_size],
+			      ctx_p->drvdata->mlli_sram_addr,
+			      req_ctx->mlli_params.mlli_len);
+		set_flow_mode(&desc[*seq_size], BYPASS);
+		(*seq_size)++;
+
+		hw_desc_init(&desc[*seq_size]);
+		set_din_type(&desc[*seq_size], DMA_MLLI,
+			     ctx_p->drvdata->mlli_sram_addr,
+			     req_ctx->in_mlli_nents, NS_BIT);
+		if (req_ctx->out_nents == 0) {
+			dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr,
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr);
+			set_dout_mlli(&desc[*seq_size],
+				      ctx_p->drvdata->mlli_sram_addr,
+				      req_ctx->in_mlli_nents, NS_BIT,
+				      (!areq ? 0 : 1));
+		} else {
+			dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n",
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr,
+				(unsigned int)ctx_p->drvdata->mlli_sram_addr +
+				(u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents);
+			set_dout_mlli(&desc[*seq_size],
+				      (ctx_p->drvdata->mlli_sram_addr +
+				       (LLI_ENTRY_BYTE_SIZE *
+					req_ctx->in_mlli_nents)),
+				      req_ctx->out_mlli_nents, NS_BIT,
+				      (!areq ? 0 : 1));
+		}
+		if (areq)
+			set_queue_last_ind(&desc[*seq_size]);
+
+		set_flow_mode(&desc[*seq_size], flow_mode);
+		(*seq_size)++;
+	}
+}
+
+static void cc_cipher_complete(struct device *dev, void *cc_req, int err)
+{
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)cc_req;
+	struct scatterlist *dst = areq->dst;
+	struct scatterlist *src = areq->src;
+	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct ablkcipher_request *req = (struct ablkcipher_request *)areq;
+
+	cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+	kfree(req_ctx->iv);
+
+	/*
+	 * The crypto API expects us to set the req->info to the last
+	 * ciphertext block. For encrypt, simply copy from the result.
+	 * For decrypt, we must copy from a saved buffer since this
+	 * could be an in-place decryption operation and the src is
+	 * lost by this point.
+	 */
+	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT)  {
+		memcpy(req->info, req_ctx->backup_info, ivsize);
+		kfree(req_ctx->backup_info);
+	} else if (!err) {
+		scatterwalk_map_and_copy(req->info, req->dst,
+					 (req->nbytes - ivsize),
+					 ivsize, 0);
+	}
+
+	ablkcipher_request_complete(areq, err);
+}
+
+static int cc_cipher_process(struct ablkcipher_request *req,
+			     enum drv_crypto_direction direction)
+{
+	struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk_tfm);
+	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+	struct scatterlist *dst = req->dst;
+	struct scatterlist *src = req->src;
+	unsigned int nbytes = req->nbytes;
+	void *info = req->info;
+	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+	struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
+	struct cc_crypto_req cc_req = {};
+	int rc, seq_len = 0, cts_restore_flag = 0;
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	dev_dbg(dev, "%s req=%p info=%p nbytes=%d\n",
+		((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
+		"Encrypt" : "Decrypt"), req, info, nbytes);
+
+	/* STAT_PHASE_0: Init and sanity checks */
+
+	/* TODO: check data length according to mode */
+	if (validate_data_size(ctx_p, nbytes)) {
+		dev_err(dev, "Unsupported data size %d.\n", nbytes);
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
+		rc = -EINVAL;
+		goto exit_process;
+	}
+	if (nbytes == 0) {
+		/* No data to process is valid */
+		rc = 0;
+		goto exit_process;
+	}
+
+	/* The IV we are handed may be allocted from the stack so
+	 * we must copy it to a DMAable buffer before use.
+	 */
+	req_ctx->iv = kmalloc(ivsize, flags);
+	if (!req_ctx->iv) {
+		rc = -ENOMEM;
+		goto exit_process;
+	}
+	memcpy(req_ctx->iv, info, ivsize);
+
+	/*For CTS in case of data size aligned to 16 use CBC mode*/
+	if (((nbytes % AES_BLOCK_SIZE) == 0) &&
+	    ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) {
+		ctx_p->cipher_mode = DRV_CIPHER_CBC;
+		cts_restore_flag = 1;
+	}
+
+	/* Setup DX request structure */
+	cc_req.user_cb = (void *)cc_cipher_complete;
+	cc_req.user_arg = (void *)req;
+
+#ifdef ENABLE_CYCLE_COUNT
+	cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+		STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
+
+#endif
+
+	/* Setup request context */
+	req_ctx->gen_ctx.op_type = direction;
+
+	/* STAT_PHASE_1: Map buffers */
+
+	rc = cc_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes,
+				      req_ctx->iv, src, dst, flags);
+	if (rc) {
+		dev_err(dev, "map_request() failed\n");
+		goto exit_process;
+	}
+
+	/* STAT_PHASE_2: Create sequence */
+
+	/* Setup processing */
+	cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+	/* Data processing */
+	cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc,
+			     &seq_len);
+
+	/* do we need to generate IV? */
+	if (req_ctx->is_giv) {
+		cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
+		cc_req.ivgen_dma_addr_len = 1;
+		/* set the IV size (8/16 B long)*/
+		cc_req.ivgen_size = ivsize;
+	}
+
+	/* STAT_PHASE_3: Lock HW and push sequence */
+
+	rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len,
+			     &req->base);
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		/* Failed to send the request or request completed
+		 * synchronously
+		 */
+		cc_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
+	}
+
+exit_process:
+	if (cts_restore_flag)
+		ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS;
+
+	if (rc != -EINPROGRESS && rc != -EBUSY) {
+		kfree(req_ctx->backup_info);
+		kfree(req_ctx->iv);
+	}
+
+	return rc;
+}
+
+static int cc_cipher_encrypt(struct ablkcipher_request *req)
+{
+	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+	req_ctx->is_giv = false;
+	req_ctx->backup_info = NULL;
+
+	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
+}
+
+static int cc_cipher_decrypt(struct ablkcipher_request *req)
+{
+	struct crypto_ablkcipher *ablk_tfm = crypto_ablkcipher_reqtfm(req);
+	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+	unsigned int ivsize = crypto_ablkcipher_ivsize(ablk_tfm);
+	gfp_t flags = cc_gfp_flags(&req->base);
+
+	/*
+	 * Allocate and save the last IV sized bytes of the source, which will
+	 * be lost in case of in-place decryption and might be needed for CTS.
+	 */
+	req_ctx->backup_info = kmalloc(ivsize, flags);
+	if (!req_ctx->backup_info)
+		return -ENOMEM;
+
+	scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
+				 (req->nbytes - ivsize), ivsize, 0);
+	req_ctx->is_giv = false;
+
+	return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT);
+}
+
+/* DX Block cipher alg */
+static struct cc_alg_template blkcipher_algs[] = {
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			.geniv = "eseqiv",
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-du512-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "xts(aes)",
+		.driver_name = "xts-aes-du4096-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_XTS,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "essiv(aes)",
+		.driver_name = "essiv-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "essiv(aes)",
+		.driver_name = "essiv-aes-du512-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "essiv(aes)",
+		.driver_name = "essiv-aes-du4096-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_ESSIV,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "bitlocker(aes)",
+		.driver_name = "bitlocker-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "bitlocker(aes)",
+		.driver_name = "bitlocker-aes-du512-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_512,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "bitlocker(aes)",
+		.driver_name = "bitlocker-aes-du4096-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_BULK_DU_4096,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE * 2,
+			.max_keysize = AES_MAX_KEY_SIZE * 2,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_BITLOCKER,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "ecb(aes)",
+		.driver_name = "ecb-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = 0,
+			},
+		.cipher_mode = DRV_CIPHER_ECB,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "cbc(aes)",
+		.driver_name = "cbc-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+		},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "ofb(aes)",
+		.driver_name = "ofb-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_OFB,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "cts1(cbc(aes))",
+		.driver_name = "cts1-cbc-aes-dx",
+		.blocksize = AES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC_CTS,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-dx",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CTR,
+		.flow_mode = S_DIN_to_AES,
+	},
+	{
+		.name = "cbc(des3_ede)",
+		.driver_name = "cbc-3des-dx",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_DES,
+	},
+	{
+		.name = "ecb(des3_ede)",
+		.driver_name = "ecb-3des-dx",
+		.blocksize = DES3_EDE_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES3_EDE_KEY_SIZE,
+			.max_keysize = DES3_EDE_KEY_SIZE,
+			.ivsize = 0,
+			},
+		.cipher_mode = DRV_CIPHER_ECB,
+		.flow_mode = S_DIN_to_DES,
+	},
+	{
+		.name = "cbc(des)",
+		.driver_name = "cbc-des-dx",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = DES_BLOCK_SIZE,
+			},
+		.cipher_mode = DRV_CIPHER_CBC,
+		.flow_mode = S_DIN_to_DES,
+	},
+	{
+		.name = "ecb(des)",
+		.driver_name = "ecb-des-dx",
+		.blocksize = DES_BLOCK_SIZE,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = cc_cipher_setkey,
+			.encrypt = cc_cipher_encrypt,
+			.decrypt = cc_cipher_decrypt,
+			.min_keysize = DES_KEY_SIZE,
+			.max_keysize = DES_KEY_SIZE,
+			.ivsize = 0,
+			},
+		.cipher_mode = DRV_CIPHER_ECB,
+		.flow_mode = S_DIN_to_DES,
+	},
+};
+
+static
+struct cc_crypto_alg *cc_cipher_create_alg(struct cc_alg_template *template,
+					   struct device *dev)
+{
+	struct cc_crypto_alg *t_alg;
+	struct crypto_alg *alg;
+
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+	if (!t_alg)
+		return ERR_PTR(-ENOMEM);
+
+	alg = &t_alg->crypto_alg;
+
+	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
+	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+		 template->driver_name);
+	alg->cra_module = THIS_MODULE;
+	alg->cra_priority = CC_CRA_PRIO;
+	alg->cra_blocksize = template->blocksize;
+	alg->cra_alignmask = 0;
+	alg->cra_ctxsize = sizeof(struct cc_cipher_ctx);
+
+	alg->cra_init = cc_cipher_init;
+	alg->cra_exit = cc_cipher_exit;
+	alg->cra_type = &crypto_ablkcipher_type;
+	alg->cra_ablkcipher = template->template_ablkcipher;
+	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+				template->type;
+
+	t_alg->cipher_mode = template->cipher_mode;
+	t_alg->flow_mode = template->flow_mode;
+
+	return t_alg;
+}
+
+int cc_cipher_free(struct cc_drvdata *drvdata)
+{
+	struct cc_crypto_alg *t_alg, *n;
+	struct cc_cipher_handle *blkcipher_handle =
+						drvdata->blkcipher_handle;
+	if (blkcipher_handle) {
+		/* Remove registered algs */
+		list_for_each_entry_safe(t_alg, n,
+					 &blkcipher_handle->blkcipher_alg_list,
+					 entry) {
+			crypto_unregister_alg(&t_alg->crypto_alg);
+			list_del(&t_alg->entry);
+			kfree(t_alg);
+		}
+		kfree(blkcipher_handle);
+		drvdata->blkcipher_handle = NULL;
+	}
+	return 0;
+}
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata)
+{
+	struct cc_cipher_handle *ablkcipher_handle;
+	struct cc_crypto_alg *t_alg;
+	struct device *dev = drvdata_to_dev(drvdata);
+	int rc = -ENOMEM;
+	int alg;
+
+	ablkcipher_handle = kmalloc(sizeof(*ablkcipher_handle), GFP_KERNEL);
+	if (!ablkcipher_handle)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&ablkcipher_handle->blkcipher_alg_list);
+	drvdata->blkcipher_handle = ablkcipher_handle;
+
+	/* Linux crypto */
+	dev_dbg(dev, "Number of algorithms = %zu\n",
+		ARRAY_SIZE(blkcipher_algs));
+	for (alg = 0; alg < ARRAY_SIZE(blkcipher_algs); alg++) {
+		dev_dbg(dev, "creating %s\n", blkcipher_algs[alg].driver_name);
+		t_alg = cc_cipher_create_alg(&blkcipher_algs[alg], dev);
+		if (IS_ERR(t_alg)) {
+			rc = PTR_ERR(t_alg);
+			dev_err(dev, "%s alg allocation failed\n",
+				blkcipher_algs[alg].driver_name);
+			goto fail0;
+		}
+		t_alg->drvdata = drvdata;
+
+		dev_dbg(dev, "registering %s\n",
+			blkcipher_algs[alg].driver_name);
+		rc = crypto_register_alg(&t_alg->crypto_alg);
+		dev_dbg(dev, "%s alg registration rc = %x\n",
+			t_alg->crypto_alg.cra_driver_name, rc);
+		if (rc) {
+			dev_err(dev, "%s alg registration failed\n",
+				t_alg->crypto_alg.cra_driver_name);
+			kfree(t_alg);
+			goto fail0;
+		} else {
+			list_add_tail(&t_alg->entry,
+				      &ablkcipher_handle->blkcipher_alg_list);
+			dev_dbg(dev, "Registered %s\n",
+				t_alg->crypto_alg.cra_driver_name);
+		}
+	}
+	return 0;
+
+fail0:
+	cc_cipher_free(drvdata);
+	return rc;
+}
diff --git a/drivers/crypto/ccree/cc_cipher.h b/drivers/crypto/ccree/cc_cipher.h
new file mode 100644
index 0000000..4c181c7
--- /dev/null
+++ b/drivers/crypto/ccree/cc_cipher.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
+
+/* \file cc_cipher.h
+ * ARM CryptoCell Cipher Crypto API
+ */
+
+#ifndef __CC_CIPHER_H__
+#define __CC_CIPHER_H__
+
+#include <linux/kernel.h>
+#include <crypto/algapi.h>
+#include "cc_driver.h"
+#include "cc_buffer_mgr.h"
+
+/* Crypto cipher flags */
+#define CC_CRYPTO_CIPHER_KEY_KFDE0	BIT(0)
+#define CC_CRYPTO_CIPHER_KEY_KFDE1	BIT(1)
+#define CC_CRYPTO_CIPHER_KEY_KFDE2	BIT(2)
+#define CC_CRYPTO_CIPHER_KEY_KFDE3	BIT(3)
+#define CC_CRYPTO_CIPHER_DU_SIZE_512B	BIT(4)
+
+#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | \
+					CC_CRYPTO_CIPHER_KEY_KFDE1 | \
+					CC_CRYPTO_CIPHER_KEY_KFDE2 | \
+					CC_CRYPTO_CIPHER_KEY_KFDE3)
+
+struct blkcipher_req_ctx {
+	struct async_gen_req_ctx gen_ctx;
+	enum cc_req_dma_buf_type dma_buf_type;
+	u32 in_nents;
+	u32 in_mlli_nents;
+	u32 out_nents;
+	u32 out_mlli_nents;
+	u8 *backup_info; /*store iv for generated IV flow*/
+	u8 *iv;
+	bool is_giv;
+	struct mlli_params mlli_params;
+};
+
+int cc_cipher_alloc(struct cc_drvdata *drvdata);
+
+int cc_cipher_free(struct cc_drvdata *drvdata);
+
+#ifndef CRYPTO_ALG_BULK_MASK
+
+#define CRYPTO_ALG_BULK_DU_512	0x00002000
+#define CRYPTO_ALG_BULK_DU_4096	0x00004000
+#define CRYPTO_ALG_BULK_MASK	(CRYPTO_ALG_BULK_DU_512 |\
+				CRYPTO_ALG_BULK_DU_4096)
+#endif /* CRYPTO_ALG_BULK_MASK */
+
+#ifdef CRYPTO_TFM_REQ_HW_KEY
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+	return (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_HW_KEY);
+}
+
+#else
+
+struct arm_hw_key_info {
+	int hw_key1;
+	int hw_key2;
+};
+
+static inline bool cc_is_hw_key(struct crypto_tfm *tfm)
+{
+	return false;
+}
+
+#endif /* CRYPTO_TFM_REQ_HW_KEY */
+
+#endif /*__CC_CIPHER_H__*/
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 941ca86..77393a6 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -19,6 +19,7 @@
 #include "cc_request_mgr.h"
 #include "cc_buffer_mgr.h"
 #include "cc_debugfs.h"
+#include "cc_cipher.h"
 #include "cc_ivgen.h"
 #include "cc_sram_mgr.h"
 #include "cc_pm.h"
@@ -279,8 +280,17 @@ static int init_cc_resources(struct platform_device *plat_dev)
 		goto post_power_mgr_err;
 	}
 
+	/* Allocate crypto algs */
+	rc = cc_cipher_alloc(new_drvdata);
+	if (rc) {
+		dev_err(dev, "cc_cipher_alloc failed\n");
+		goto post_ivgen_err;
+	}
+
 	return 0;
 
+post_ivgen_err:
+	cc_ivgen_fini(new_drvdata);
 post_power_mgr_err:
 	cc_pm_fini(new_drvdata);
 post_buf_mgr_err:
@@ -309,6 +319,7 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
 	struct cc_drvdata *drvdata =
 		(struct cc_drvdata *)platform_get_drvdata(plat_dev);
 
+	cc_cipher_free(drvdata);
 	cc_ivgen_fini(drvdata);
 	cc_pm_fini(drvdata);
 	cc_buffer_mgr_fini(drvdata);
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index b3be754..2e94957 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -15,6 +15,7 @@
 #endif
 #include <linux/dma-mapping.h>
 #include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
 #include <crypto/aes.h>
 #include <crypto/sha.h>
 #include <crypto/authenc.h>
@@ -109,6 +110,7 @@ struct cc_drvdata {
 	struct platform_device *plat_dev;
 	cc_sram_addr_t mlli_sram_addr;
 	void *buff_mgr_handle;
+	void *blkcipher_handle;
 	void *request_mgr_handle;
 	void *ivgen_handle;
 	void *sram_mgr_handle;
-- 
2.7.4

  parent reply	other threads:[~2018-01-11  9:17 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-01-11  9:17 [PATCH 0/7] Introduce Arm TrustZone CryptoCell Gilad Ben-Yossef
2018-01-11  9:17 ` [PATCH 1/7] staging: ccree: remove ccree from staging tree Gilad Ben-Yossef
2018-01-11  9:17   ` Gilad Ben-Yossef
2018-01-13 13:21   ` Greg Kroah-Hartman
2018-01-18  8:39     ` Gilad Ben-Yossef
2018-01-18  8:39       ` Gilad Ben-Yossef
2018-01-18  9:41       ` Greg Kroah-Hartman
2018-01-18  9:41         ` Greg Kroah-Hartman
2018-01-11  9:17 ` [PATCH 2/7] crypto: ccree: introduce CryptoCell driver Gilad Ben-Yossef
2018-01-11  9:17 ` Gilad Ben-Yossef [this message]
2018-01-11  9:17   ` [PATCH 3/7] crypto: ccree: add ablkcipher support Gilad Ben-Yossef
2018-01-11 10:01   ` Corentin Labbe
2018-01-22  7:07     ` Gilad Ben-Yossef
2018-01-22  7:07       ` Gilad Ben-Yossef
2018-01-11 10:03   ` Stephan Mueller
2018-01-22  7:08     ` Gilad Ben-Yossef
2018-01-11  9:17 ` [PATCH 4/7] crypto: ccree: add ahash support Gilad Ben-Yossef
2018-01-11  9:17   ` Gilad Ben-Yossef
2018-01-11  9:17 ` [PATCH 5/7] crypto: ccree: add AEAD support Gilad Ben-Yossef
2018-01-11  9:17   ` Gilad Ben-Yossef
2018-01-11  9:17 ` [PATCH 6/7] crypto: ccree: add FIPS support Gilad Ben-Yossef
2018-01-11  9:17   ` Gilad Ben-Yossef
2018-01-11  9:17 ` [PATCH 7/7] MAINTAINERS: update ccree entry Gilad Ben-Yossef

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1515662239-1714-4-git-send-email-gilad@benyossef.com \
    --to=gilad@benyossef.com \
    --cc=davem@davemloft.net \
    --cc=devel@driverdev.osuosl.org \
    --cc=gregkh@linuxfoundation.org \
    --cc=herbert@gondor.apana.org.au \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=ofir.drang@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.