All of lore.kernel.org
 help / color / mirror / Atom feed
* [WIP] crypto: add support for Orion5X crypto engine
@ 2009-05-07 21:03 Sebastian Andrzej Siewior
  2009-05-07 21:39 ` Ben Dooks
  0 siblings, 1 reply; 8+ messages in thread
From: Sebastian Andrzej Siewior @ 2009-05-07 21:03 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: Nicolas Pitre, linux-crypto

update since last post, unfortunately not much:
- interrupt handler fix
- s/mav/mv

the dm-crypt still crashes but a few delays seem to help argh....

Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
---
 drivers/crypto/Kconfig      |    9 +
 drivers/crypto/Makefile     |    1 +
 drivers/crypto/mv_crypto.c |  725 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 735 insertions(+), 0 deletions(-)
 create mode 100644 drivers/crypto/mv_crypto.c

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index e522144..fa564b5 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -157,6 +157,15 @@ config S390_PRNG
 	  ANSI X9.17 standard. The PRNG is usable via the char device
 	  /dev/prandom.
 
+config CRYPTO_DEV_MARVELL_CRYPTO_ENGINE
+	tristate "Marvell's Cryptographic Engine"
+	depends on PLAT_ORION
+	select CRYPTO_ALGAPI
+	select CRYPTO_AES
+	help
+	  This driver allows you utilize the cryptographic engine which can be
+	  found on certain SoC like QNAP's TS-209.
+
 config CRYPTO_DEV_HIFN_795X
 	tristate "Driver HIFN 795x crypto accelerator chips"
 	select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 73557b2..6020a58 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,5 +2,6 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+obj-$(CONFIG_CRYPTO_DEV_MARVELL_CRYPTO_ENGINE) += mv_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
diff --git a/drivers/crypto/mv_crypto.c b/drivers/crypto/mv_crypto.c
new file mode 100644
index 0000000..40eb083
--- /dev/null
+++ b/drivers/crypto/mv_crypto.c
@@ -0,0 +1,725 @@
+/*
+ * Support for Marvell's crypto engine which can be found on some Orion5X
+ * boards.
+ *
+ * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ * License: GPL
+ *
+ */
+#include <linux/io.h>
+#include <linux/crypto.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+
+enum engine_status {
+	engine_idle,
+	engine_busy,
+	engine_w_dequeue,
+};
+
+struct req_progress {
+	struct sg_mapping_iter src_sg_it;
+	struct sg_mapping_iter dst_sg_it;
+
+	/* src mostly */
+	int this_sg_b_left;
+	int src_start;
+	int crypt_len;
+	/* dst mostly */
+	int this_dst_sg_b_left;
+	int dst_start;
+	int total_req_bytes;
+};
+
+struct crypto_priv {
+	void __iomem *reg;
+	void __iomem *sram;
+	int irq;
+	struct task_struct *queue_th;
+
+	spinlock_t lock;
+	struct crypto_queue queue;
+	enum engine_status eng_st;
+	struct ablkcipher_request *cur_req;
+	struct req_progress p;
+};
+
+static struct crypto_priv *cpg;
+
+static void reg_write(void __iomem *mem, u32 val)
+{
+	__raw_writel(val, mem);
+}
+
+static u32 reg_read(void __iomem *mem)
+{
+	return __raw_readl(mem);
+}
+
+#define DIGEST_INITIAL_VAL_A	0xdd00
+#define DES_CMD_REG		0xdd58
+
+#define SEC_ACCEL_CMD		0xde00
+#define SEC_CMD_EN_SEC_ACCL0	(1 << 0)
+#define SEC_CMD_EN_SEC_ACCL1	(1 << 1)
+#define SEC_CMD_DISABLE_SEC	(1 << 2)
+
+#define SEC_ACCEL_DESC_P0	0xde04
+#define SEC_DESC_P0_PTR(x)	(x)
+
+#define SEC_ACCEL_DESC_P1	0xde14
+#define SEC_DESC_P1_PTR(x)	(x)
+
+#define SEC_ACCEL_CFG		0xde08
+#define SEC_CFG_STOP_DIG_ERR	(1 << 0)
+#define SEC_CFG_CH0_W_IDMA	(1 << 7)
+#define SEC_CFG_CH1_W_IDMA	(1 << 8)
+#define SEC_CFG_ACT_CH0_IDMA	(1 << 9)
+#define SEC_CFG_ACT_CH1_IDMA	(1 << 10)
+
+#define SEC_ACCEL_STATUS	0xde0c
+#define SEC_ST_ACT_0		(1 << 0)
+#define SEC_ST_ACT_1		(1 << 1)
+
+
+/*
+ * FPGA_INT_STATUS looks like a FPGA leftover and is undocumented. I asumme
+ * that it was part of an IRQ-controller in FPGA and someone forgot to remove
+ * it while switching to the core and moving to SEC_ACCEL_INT_STATUS.
+ */
+#define FPGA_INT_STATUS		0xdd68
+#define SEC_ACCEL_INT_STATUS	0xde20
+#define SEC_INT_AUTH_DONE	(1 << 0)
+#define SEC_INT_DES_E_DONE	(1 << 1)
+#define SEC_INT_AES_E_DONE	(1 << 2)
+#define SEC_INT_AES_D_DONE	(1 << 3)
+#define SEC_INT_ENC_DONE	(1 << 4)
+#define SEC_INT_ACCEL0_DONE	(1 << 5)
+#define SEC_INT_ACCEL1_DONE	(1 << 6)
+#define SEC_INT_ACC0_IDMA_DONE	(1 << 7)
+#define SEC_INT_ACC1_IDMA_DONE	(1 << 8)
+
+#define SEC_ACCEL_INT_MASK	0xde24
+
+#define AES_KEY_LEN	(8 * 4)
+
+struct sec_accel_config {
+
+	u32 config;
+#define CFG_OP_MAC_ONLY		0
+#define CFG_OP_CRYPT_ONLY	1
+#define CFG_OP_MAC_CRYPT	2
+#define CFG_OP_CRYPT_MAC	3
+#define CFG_MACM_MD5		(4 << 4)
+#define CFG_MACM_SHA1		(5 << 4)
+#define CFG_MACM_HMAC_MD5	(6 << 4)
+#define CFG_MACM_HMAC_SHA1	(7 << 4)
+#define CFG_ENCM_DES		(1 << 8)
+#define CFG_ENCM_3DES		(2 << 8)
+#define CFG_ENCM_AES		(3 << 8)
+#define CFG_DIR_ENC		(0 << 12)
+#define CFG_DIR_DEC		(1 << 12)
+#define CFG_ENC_MODE_ECB	(0 << 16)
+#define CFG_ENC_MODE_CBC	(1 << 16)
+#define CFG_3DES_EEE		(0 << 20)
+#define CFG_3DES_EDE		(1 << 20)
+#define CFG_AES_LEN_128		(0 << 24)
+#define CFG_AES_LEN_192		(1 << 24)
+#define CFG_AES_LEN_256		(2 << 24)
+
+	u32 enc_p;
+#define ENC_P_SRC(x)		(x)
+#define ENC_P_DST(x)		((x) << 16)
+
+	u32 enc_len;
+#define ENC_LEN(x)		(x)
+
+	u32 enc_key_p;
+#define ENC_KEY_P(x)		(x)
+
+	u32 enc_iv;
+#define ENC_IV_POINT(x)		((x) << 0)
+#define ENC_IV_BUF_POINT(x)	((x) << 16)
+
+	u32 mac_src_p;
+#define MAC_SRC_DATA_P(x)	(x)
+#define MAC_SRC_TOTAL_LEN(x)	((x) << 16)
+
+	u32 mac_digest;
+	u32 mac_iv;
+}__attribute__ ((packed));
+	/*
+	 * /-----------\ 0
+	 * | ACCEL CFG |	4 * 8
+	 * |-----------| 0x20
+	 * | CRYPT KEY |	8 * 4
+	 * |-----------| 0x40
+	 * |  IV   IN  |	4 * 4
+	 * |-----------| 0x40 (inplace)
+	 * |  IV BUF   |	4 * 4
+	 * |-----------| 0x50
+	 * |  DATA IN  |	16 * x
+	 * |-----------| 0x50 (inplace operation)
+	 * |  DATA OUT |	16 * x
+	 * \-----------/
+	 */
+#define SRAM_CONFIG		0x00
+#define SRAM_DATA_KEY_P		0x20
+#define SRAM_DATA_IV		0x40
+#define SRAM_DATA_IV_BUF	0x40
+#define SRAM_DATA_IN_START	0x50
+#define SRAM_DATA_OUT_START	0x50
+
+struct mv_ctx {
+	u8 aes_enc_key[AES_KEY_LEN];
+	u32 aes_dec_key[8];
+	int key_len;
+	u32 need_calc_aes_dkey;
+};
+
+enum crypto_op {
+	COP_AES_ECB,
+	COP_AES_CBC,
+};
+
+struct mv_req_ctx {
+	enum crypto_op op;
+	int decrypt;
+};
+
+#if 0
+static void hex_dump(unsigned char *info, unsigned char *buf, unsigned int len)
+{
+	printk(KERN_ERR "%s\n", info);
+	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET,
+			16, 1,
+			buf, len, false);
+	printk(KERN_CONT "\n");
+}
+#endif
+static void compute_aes_dec_key(struct mv_ctx *ctx)
+{
+	struct crypto_aes_ctx gen_aes_key;
+	int key_pos;
+
+	if (!ctx->need_calc_aes_dkey)
+		return;
+
+	crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
+
+	key_pos = ctx->key_len + 24;
+	memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
+	switch (ctx->key_len) {
+	case AES_KEYSIZE_256:
+		key_pos -= 2;
+		/* fall */
+	case AES_KEYSIZE_192:
+		key_pos -= 2;
+		memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], 4 * 4);
+		break;
+	}
+	ctx->need_calc_aes_dkey = 0;
+}
+
+static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	switch (len) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_192:
+	case AES_KEYSIZE_256:
+		break;
+	default:
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	ctx->key_len = len;
+	ctx->need_calc_aes_dkey = 1;
+
+	memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
+	return 0;
+}
+static void mv_process_current_q(int first_block);
+
+#define MAX_REQ_SIZE	(8000)
+
+static void setup_data_in(struct ablkcipher_request *req)
+{
+	int ret;
+	void *buf;
+
+	if (!cpg->p.this_sg_b_left) {
+		ret = sg_miter_next(&cpg->p.src_sg_it);
+		BUG_ON(!ret);
+		cpg->p.this_sg_b_left = cpg->p.src_sg_it.length;
+		cpg->p.src_start = 0;
+	}
+
+	cpg->p.crypt_len = min(cpg->p.this_sg_b_left, MAX_REQ_SIZE);
+
+	buf = cpg->p.src_sg_it.addr;
+	buf += cpg->p.src_start;
+
+	memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
+	cpg->p.this_sg_b_left -= cpg->p.crypt_len;
+	cpg->p.src_start += cpg->p.crypt_len;
+}
+
+static void mv_crypto_algo_completion(void)
+{
+	struct ablkcipher_request *req = cpg->cur_req;
+	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+	if (req_ctx->op != COP_AES_CBC)
+		return ;
+
+	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
+}
+
+static void dequeue_complete_req(void)
+{
+	struct ablkcipher_request *req = cpg->cur_req;
+//	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+//	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+	void *buf;
+	int ret;
+
+	cpg->p.total_req_bytes += cpg->p.crypt_len;
+	do {
+		int dst_copy;
+
+		if (!cpg->p.this_dst_sg_b_left) {
+			ret = sg_miter_next(&cpg->p.dst_sg_it);
+			BUG_ON(!ret);
+			cpg->p.this_dst_sg_b_left = cpg->p.dst_sg_it.length;
+			cpg->p.dst_start = 0;
+		}
+
+		buf = cpg->p.dst_sg_it.addr;
+		buf += cpg->p.dst_start;
+
+		dst_copy = min(cpg->p.crypt_len, cpg->p.this_dst_sg_b_left);
+
+		memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
+
+		cpg->p.this_dst_sg_b_left -= dst_copy;
+		cpg->p.crypt_len -= dst_copy;
+		cpg->p.dst_start += dst_copy;
+	} while (cpg->p.crypt_len > 0);
+
+	BUG_ON(cpg->eng_st != engine_w_dequeue);
+	if (cpg->p.total_req_bytes < req->nbytes) {
+		/* process next scatter list entry */
+		cpg->eng_st = engine_busy;
+		mv_process_current_q(0);
+	} else {
+		sg_miter_stop(&cpg->p.src_sg_it);
+		sg_miter_stop(&cpg->p.dst_sg_it);
+		mv_crypto_algo_completion();
+		cpg->eng_st = engine_idle;
+		req->base.complete(&req->base, 0);
+	}
+}
+
+static void mv_process_current_q(int first_block)
+{
+	struct ablkcipher_request *req = cpg->cur_req;
+	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+	struct sec_accel_config op;
+
+	switch (req_ctx->op) {
+	case COP_AES_ECB:
+		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
+		break;
+	case COP_AES_CBC:
+		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
+		op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
+		if (first_block)
+			memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
+		break;
+	}
+	if (req_ctx->decrypt) {
+		op.config |= CFG_DIR_DEC;
+		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, AES_KEY_LEN);
+	} else {
+		op.config |= CFG_DIR_ENC;
+		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, AES_KEY_LEN);
+	}
+
+	switch (ctx->key_len) {
+	case AES_KEYSIZE_128:
+		op.config |= CFG_AES_LEN_128;
+		break;
+	case AES_KEYSIZE_192:
+		op.config |= CFG_AES_LEN_192;
+		break;
+	case AES_KEYSIZE_256:
+		op.config |= CFG_AES_LEN_256;
+		break;
+	}
+	op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
+		ENC_P_DST(SRAM_DATA_OUT_START);
+	op.enc_key_p = SRAM_DATA_KEY_P;
+
+	setup_data_in(req);
+	op.enc_len = cpg->p.crypt_len;
+	memcpy(cpg->sram + SRAM_CONFIG, &op,
+			sizeof(struct sec_accel_config));
+
+	reg_write(cpg->reg + SEC_ACCEL_DESC_P0, SRAM_CONFIG);
+	/* GO */
+	reg_write(cpg->reg + SEC_ACCEL_CMD, SEC_CMD_EN_SEC_ACCL0);
+
+	/*
+	 * XXX: add timer if the interrupt does not occur for some mystery
+	 * reason
+	 */
+}
+
+static int count_sgs(struct ablkcipher_request *req)
+{
+	int total_bytes;
+	int i = 0;
+
+	total_bytes = req->nbytes;
+
+	do {
+		total_bytes -= req->src[i].length;
+		i++;
+
+	} while (total_bytes > 0);
+
+	return i;
+}
+
+static void mv_enqueue_new_req(struct ablkcipher_request *req)
+{
+	int num_sgs;
+
+	cpg->cur_req = req;
+	memset(&cpg->p, 0, sizeof(struct req_progress));
+
+	num_sgs = count_sgs(req);
+	sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, 0);
+	sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, 0);
+	mv_process_current_q(1);
+}
+
+static int queue_manag(void *data)
+{
+	unsigned long flags;
+	enum engine_status old_st;
+
+	do {
+		struct ablkcipher_request *req;
+		struct crypto_async_request *async_req = NULL;
+		struct crypto_async_request *backlog;
+
+		__set_current_state(TASK_INTERRUPTIBLE);
+		spin_lock_irqsave(&cpg->lock, flags);
+		old_st = cpg->eng_st;
+
+		backlog = crypto_get_backlog(&cpg->queue);
+		spin_unlock_irqrestore(&cpg->lock, flags);
+
+		if (old_st == engine_w_dequeue)
+			dequeue_complete_req();
+
+		spin_lock_irqsave(&cpg->lock, flags);
+		if (cpg->eng_st == engine_idle) {
+			async_req = crypto_dequeue_request(&cpg->queue);
+			if (async_req) {
+				BUG_ON(cpg->eng_st != engine_idle);
+				cpg->eng_st = engine_busy;
+			}
+		}
+		spin_unlock_irqrestore(&cpg->lock, flags);
+
+		if (backlog) {
+			backlog->complete(backlog, -EINPROGRESS);
+			backlog = NULL;
+		}
+
+		if (async_req) {
+			req = container_of(async_req, struct ablkcipher_request, base);
+			mv_enqueue_new_req(req);
+			async_req = NULL;
+		}
+
+		schedule();
+
+	} while (!kthread_should_stop());
+	return 0;
+}
+
+static int mv_handle_req(struct ablkcipher_request *req)
+{
+//	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	unsigned long flags;
+	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+	int ret;
+
+	spin_lock_irqsave(&cpg->lock, flags);
+	ret = ablkcipher_enqueue_request(&cpg->queue, req);
+/*	if (cpg->eng_st == engine_idle) */
+	wake_up_process(cpg->queue_th);
+	spin_unlock_irqrestore(&cpg->lock, flags);
+	return ret;
+}
+
+static int mv_enc_aes_ecb(struct ablkcipher_request *req)
+{
+//	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+	req_ctx->op = COP_AES_ECB;
+	req_ctx->decrypt = 0;
+
+	return mv_handle_req(req);
+}
+
+static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+{
+	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+	req_ctx->op = COP_AES_ECB;
+	req_ctx->decrypt = 1;
+
+	compute_aes_dec_key(ctx);
+	return mv_handle_req(req);
+}
+
+static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+{
+//	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+	req_ctx->op = COP_AES_CBC;
+	req_ctx->decrypt = 0;
+
+	return mv_handle_req(req);
+}
+
+static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+{
+	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+	req_ctx->op = COP_AES_CBC;
+	req_ctx->decrypt = 1;
+
+	compute_aes_dec_key(ctx);
+	return mv_handle_req(req);
+}
+
+static int mv_cra_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
+	return 0;
+}
+
+irqreturn_t crypto_int(int irq, void *priv)
+{
+//	struct crypto_priv *cp = priv;
+	u32 val;
+
+	val = reg_read(cpg->reg + SEC_ACCEL_INT_STATUS);
+	if (!(val & SEC_INT_ACCEL0_DONE))
+		return IRQ_NONE;
+
+	val &= ~SEC_INT_ACCEL0_DONE;
+	reg_write(cpg->reg + FPGA_INT_STATUS, val);
+	reg_write(cpg->reg + SEC_ACCEL_INT_STATUS, val);
+	BUG_ON(cpg->eng_st != engine_busy);
+	cpg->eng_st = engine_w_dequeue;
+	wake_up_process(cpg->queue_th);
+	return IRQ_HANDLED;
+}
+
+struct crypto_alg mv_aes_alg_ecb = {
+	.cra_name		= "ecb(aes)",
+	.cra_driver_name	= "mv-ecb-aes",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize	= 16,
+	.cra_ctxsize	= sizeof(struct mv_ctx),
+	.cra_alignmask	= 0,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= mv_cra_init,
+	.cra_u		= {
+		.ablkcipher = {
+			.min_keysize	=	AES_MIN_KEY_SIZE,
+			.max_keysize	=	AES_MAX_KEY_SIZE,
+			.setkey		=	mv_setkey_aes,
+			.encrypt	=	mv_enc_aes_ecb,
+			.decrypt	=	mv_dec_aes_ecb,
+		},
+	},
+};
+
+struct crypto_alg mv_aes_alg_cbc = {
+	.cra_name		= "cbc(aes)",
+	.cra_driver_name	= "mv-cbc-aes",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize	= AES_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct mv_ctx),
+	.cra_alignmask	= 0,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= mv_cra_init,
+	.cra_u		= {
+		.ablkcipher = {
+			.ivsize		=	AES_BLOCK_SIZE,
+			.min_keysize	=	AES_MIN_KEY_SIZE,
+			.max_keysize	=	AES_MAX_KEY_SIZE,
+			.setkey		=	mv_setkey_aes,
+			.encrypt	=	mv_enc_aes_cbc,
+			.decrypt	=	mv_dec_aes_cbc,
+		},
+	},
+};
+
+static int m_probe(struct platform_device *pdev)
+{
+	struct crypto_priv *cp;
+	struct resource *res;
+	int irq;
+	int ret;
+
+	if (cpg) {
+		printk(KERN_ERR "Second crypto dev?\n");
+		return -EBUSY;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+	if (!res)
+		return -ENODEV;
+
+	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+	if (!cp)
+		return -ENOMEM;
+
+	spin_lock_init(&cp->lock);
+	crypto_init_queue(&cp->queue, 50);
+	cp->reg = ioremap(res->start, res->end - res->start + 1);
+	if (!cp->reg) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+	if (!res) {
+		ret = -ENODEV;
+		goto err_unmap_reg;
+	}
+
+	cp->sram = ioremap(res->start, res->end - res->start + 1);
+	if (!cp->sram) {
+		ret = -ENOMEM;
+		goto err_unmap_reg;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0 || irq == NO_IRQ) {
+		ret = irq;
+		goto err_unmap_sram;
+	}
+	cp->irq = irq;
+
+	platform_set_drvdata(pdev, cp);
+	cpg = cp;
+
+	cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
+	if (IS_ERR(cp->queue_th)) {
+		ret = PTR_ERR(cp->queue_th);
+		goto err_thread;
+	}
+
+	ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), cp);
+	if (ret)
+		goto err_unmap_sram;
+
+	reg_write(cpg->reg + SEC_ACCEL_INT_MASK, SEC_INT_ACCEL0_DONE);
+	reg_write(cpg->reg + SEC_ACCEL_CFG, SEC_CFG_STOP_DIG_ERR);
+
+	ret = crypto_register_alg(&mv_aes_alg_ecb);
+	if (ret) {
+		printk(KERN_ERR "Reg of algo failed: %d\n", ret);
+		goto err_reg;
+	}
+	ret = crypto_register_alg(&mv_aes_alg_cbc);
+	if (ret) {
+		printk(KERN_ERR "Reg of algo failed: %d\n", ret);
+		goto err_unreg_ecb;
+	}
+	return 0;
+err_unreg_ecb:
+	crypto_unregister_alg(&mv_aes_alg_ecb);
+err_thread:
+	free_irq(irq, cp);
+err_reg:
+	kthread_stop(cp->queue_th);
+err_unmap_sram:
+	iounmap(cp->sram);
+err_unmap_reg:
+	iounmap(cp->reg);
+err:
+	kfree(cp);
+	cpg = NULL;
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static int m_remove(struct platform_device *pdev)
+{
+	struct crypto_priv *cp = platform_get_drvdata(pdev);
+
+	crypto_unregister_alg(&mv_aes_alg_ecb);
+	crypto_unregister_alg(&mv_aes_alg_cbc);
+	kthread_stop(cp->queue_th);
+	free_irq(cp->irq, cp);
+	memset(cp->sram, 0, 8 * 1024);
+	iounmap(cp->sram);
+	iounmap(cp->reg);
+	kfree(cp);
+	cpg = NULL;
+	return 0;
+}
+
+static struct platform_driver marvell_crypto = {
+	.probe          = m_probe,
+	.remove         = m_remove,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = "mv,orion5x-crypto",
+	},
+};
+
+static int __init crypto_init(void)
+{
+	return platform_driver_register(&marvell_crypto);
+}
+module_init(crypto_init);
+
+static void __exit crypto_exit(void)
+{
+	platform_driver_unregister(&marvell_crypto);
+}
+module_exit(crypto_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
+MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
+MODULE_LICENSE("GPL");
-- 
1.6.0.6


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [WIP] crypto: add support for Orion5X crypto engine
  2009-05-07 21:03 [WIP] crypto: add support for Orion5X crypto engine Sebastian Andrzej Siewior
@ 2009-05-07 21:39 ` Ben Dooks
  2009-06-11 11:40   ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 8+ messages in thread
From: Ben Dooks @ 2009-05-07 21:39 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior; +Cc: linux-arm-kernel, Nicolas Pitre, linux-crypto

On Thu, May 07, 2009 at 11:03:21PM +0200, Sebastian Andrzej Siewior wrote:
> update since last post, unfortunately not much:
> - interrupt handler fix
> - s/mav/mv
> 
> the dm-crypt still crashes but a few delays seem to help argh....
> 
> Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
> ---
>  drivers/crypto/Kconfig      |    9 +
>  drivers/crypto/Makefile     |    1 +
>  drivers/crypto/mv_crypto.c |  725 +++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 735 insertions(+), 0 deletions(-)
>  create mode 100644 drivers/crypto/mv_crypto.c
> 
> diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
> index e522144..fa564b5 100644
> --- a/drivers/crypto/Kconfig
> +++ b/drivers/crypto/Kconfig
> @@ -157,6 +157,15 @@ config S390_PRNG
>  	  ANSI X9.17 standard. The PRNG is usable via the char device
>  	  /dev/prandom.
>  
> +config CRYPTO_DEV_MARVELL_CRYPTO_ENGINE
> +	tristate "Marvell's Cryptographic Engine"
> +	depends on PLAT_ORION
> +	select CRYPTO_ALGAPI
> +	select CRYPTO_AES
> +	help
> +	  This driver allows you utilize the cryptographic engine which can be
> +	  found on certain SoC like QNAP's TS-209.
> +
>  config CRYPTO_DEV_HIFN_795X
>  	tristate "Driver HIFN 795x crypto accelerator chips"
>  	select CRYPTO_DES
> diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
> index 73557b2..6020a58 100644
> --- a/drivers/crypto/Makefile
> +++ b/drivers/crypto/Makefile
> @@ -2,5 +2,6 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
>  obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
>  obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
>  obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
> +obj-$(CONFIG_CRYPTO_DEV_MARVELL_CRYPTO_ENGINE) += mv_crypto.o
>  obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
>  obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
> diff --git a/drivers/crypto/mv_crypto.c b/drivers/crypto/mv_crypto.c
> new file mode 100644
> index 0000000..40eb083
> --- /dev/null
> +++ b/drivers/crypto/mv_crypto.c
> @@ -0,0 +1,725 @@
> +/*
> + * Support for Marvell's crypto engine which can be found on some Orion5X
> + * boards.
> + *
> + * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
> + * License: GPL
> + *
> + */
> +#include <linux/io.h>
> +#include <linux/crypto.h>
> +#include <linux/platform_device.h>
> +#include <linux/interrupt.h>
> +#include <linux/scatterlist.h>
> +#include <linux/io.h>
> +#include <linux/delay.h>
> +#include <linux/kthread.h>
> +#include <crypto/algapi.h>
> +#include <crypto/aes.h>
> +
> +enum engine_status {
> +	engine_idle,
> +	engine_busy,
> +	engine_w_dequeue,
> +};
> +
> +struct req_progress {
> +	struct sg_mapping_iter src_sg_it;
> +	struct sg_mapping_iter dst_sg_it;
> +
> +	/* src mostly */
> +	int this_sg_b_left;
> +	int src_start;
> +	int crypt_len;
> +	/* dst mostly */
> +	int this_dst_sg_b_left;
> +	int dst_start;
> +	int total_req_bytes;
> +};

kerneldoc style documentation wouldn't go amiss here.
> +
> +static void reg_write(void __iomem *mem, u32 val)
> +{
> +	__raw_writel(val, mem);
> +}
> +
> +static u32 reg_read(void __iomem *mem)
> +{
> +	return __raw_readl(mem);
> +}

do you really need to wrapper these? 
it is also readl/writel for pointers obtained from ioremap()

> +#define DIGEST_INITIAL_VAL_A	0xdd00
> +#define DES_CMD_REG		0xdd58
> +
> +#define SEC_ACCEL_CMD		0xde00
> +#define SEC_CMD_EN_SEC_ACCL0	(1 << 0)
> +#define SEC_CMD_EN_SEC_ACCL1	(1 << 1)
> +#define SEC_CMD_DISABLE_SEC	(1 << 2)
> +
> +#define SEC_ACCEL_DESC_P0	0xde04
> +#define SEC_DESC_P0_PTR(x)	(x)
> +
> +#define SEC_ACCEL_DESC_P1	0xde14
> +#define SEC_DESC_P1_PTR(x)	(x)
> +
> +#define SEC_ACCEL_CFG		0xde08
> +#define SEC_CFG_STOP_DIG_ERR	(1 << 0)
> +#define SEC_CFG_CH0_W_IDMA	(1 << 7)
> +#define SEC_CFG_CH1_W_IDMA	(1 << 8)
> +#define SEC_CFG_ACT_CH0_IDMA	(1 << 9)
> +#define SEC_CFG_ACT_CH1_IDMA	(1 << 10)
> +
> +#define SEC_ACCEL_STATUS	0xde0c
> +#define SEC_ST_ACT_0		(1 << 0)
> +#define SEC_ST_ACT_1		(1 << 1)
> +
> +
> +/*
> + * FPGA_INT_STATUS looks like a FPGA leftover and is undocumented. I asumme
> + * that it was part of an IRQ-controller in FPGA and someone forgot to remove
> + * it while switching to the core and moving to SEC_ACCEL_INT_STATUS.
> + */
> +#define FPGA_INT_STATUS		0xdd68
> +#define SEC_ACCEL_INT_STATUS	0xde20
> +#define SEC_INT_AUTH_DONE	(1 << 0)
> +#define SEC_INT_DES_E_DONE	(1 << 1)
> +#define SEC_INT_AES_E_DONE	(1 << 2)
> +#define SEC_INT_AES_D_DONE	(1 << 3)
> +#define SEC_INT_ENC_DONE	(1 << 4)
> +#define SEC_INT_ACCEL0_DONE	(1 << 5)
> +#define SEC_INT_ACCEL1_DONE	(1 << 6)
> +#define SEC_INT_ACC0_IDMA_DONE	(1 << 7)
> +#define SEC_INT_ACC1_IDMA_DONE	(1 << 8)
> +
> +#define SEC_ACCEL_INT_MASK	0xde24
> +
> +#define AES_KEY_LEN	(8 * 4)
> +
> +struct sec_accel_config {
> +
> +	u32 config;
> +#define CFG_OP_MAC_ONLY		0
> +#define CFG_OP_CRYPT_ONLY	1
> +#define CFG_OP_MAC_CRYPT	2
> +#define CFG_OP_CRYPT_MAC	3
> +#define CFG_MACM_MD5		(4 << 4)
> +#define CFG_MACM_SHA1		(5 << 4)
> +#define CFG_MACM_HMAC_MD5	(6 << 4)
> +#define CFG_MACM_HMAC_SHA1	(7 << 4)
> +#define CFG_ENCM_DES		(1 << 8)
> +#define CFG_ENCM_3DES		(2 << 8)
> +#define CFG_ENCM_AES		(3 << 8)
> +#define CFG_DIR_ENC		(0 << 12)
> +#define CFG_DIR_DEC		(1 << 12)
> +#define CFG_ENC_MODE_ECB	(0 << 16)
> +#define CFG_ENC_MODE_CBC	(1 << 16)
> +#define CFG_3DES_EEE		(0 << 20)
> +#define CFG_3DES_EDE		(1 << 20)
> +#define CFG_AES_LEN_128		(0 << 24)
> +#define CFG_AES_LEN_192		(1 << 24)
> +#define CFG_AES_LEN_256		(2 << 24)
> +
> +	u32 enc_p;
> +#define ENC_P_SRC(x)		(x)
> +#define ENC_P_DST(x)		((x) << 16)
> +
> +	u32 enc_len;
> +#define ENC_LEN(x)		(x)
> +
> +	u32 enc_key_p;
> +#define ENC_KEY_P(x)		(x)
> +
> +	u32 enc_iv;
> +#define ENC_IV_POINT(x)		((x) << 0)
> +#define ENC_IV_BUF_POINT(x)	((x) << 16)
> +
> +	u32 mac_src_p;
> +#define MAC_SRC_DATA_P(x)	(x)
> +#define MAC_SRC_TOTAL_LEN(x)	((x) << 16)
> +
> +	u32 mac_digest;
> +	u32 mac_iv;
> +}__attribute__ ((packed));
> +	/*
> +	 * /-----------\ 0
> +	 * | ACCEL CFG |	4 * 8
> +	 * |-----------| 0x20
> +	 * | CRYPT KEY |	8 * 4
> +	 * |-----------| 0x40
> +	 * |  IV   IN  |	4 * 4
> +	 * |-----------| 0x40 (inplace)
> +	 * |  IV BUF   |	4 * 4
> +	 * |-----------| 0x50
> +	 * |  DATA IN  |	16 * x
> +	 * |-----------| 0x50 (inplace operation)
> +	 * |  DATA OUT |	16 * x
> +	 * \-----------/
> +	 */
> +#define SRAM_CONFIG		0x00
> +#define SRAM_DATA_KEY_P		0x20
> +#define SRAM_DATA_IV		0x40
> +#define SRAM_DATA_IV_BUF	0x40
> +#define SRAM_DATA_IN_START	0x50
> +#define SRAM_DATA_OUT_START	0x50
> +
> +struct mv_ctx {
> +	u8 aes_enc_key[AES_KEY_LEN];
> +	u32 aes_dec_key[8];
> +	int key_len;
> +	u32 need_calc_aes_dkey;
> +};
> +
> +enum crypto_op {
> +	COP_AES_ECB,
> +	COP_AES_CBC,
> +};
> +
> +struct mv_req_ctx {
> +	enum crypto_op op;
> +	int decrypt;
> +};
> +
> +#if 0
> +static void hex_dump(unsigned char *info, unsigned char *buf, unsigned int len)
> +{
> +	printk(KERN_ERR "%s\n", info);
> +	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET,
> +			16, 1,
> +			buf, len, false);
> +	printk(KERN_CONT "\n");
> +}
> +#endif

#if 0 considered bad.

> +static void compute_aes_dec_key(struct mv_ctx *ctx)
> +{
> +	struct crypto_aes_ctx gen_aes_key;
> +	int key_pos;
> +
> +	if (!ctx->need_calc_aes_dkey)
> +		return;
> +
> +	crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
> +
> +	key_pos = ctx->key_len + 24;
> +	memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
> +	switch (ctx->key_len) {
> +	case AES_KEYSIZE_256:
> +		key_pos -= 2;
> +		/* fall */
> +	case AES_KEYSIZE_192:
> +		key_pos -= 2;
> +		memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos], 4 * 4);
> +		break;
> +	}
> +	ctx->need_calc_aes_dkey = 0;
> +}
> +
> +static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
> +		unsigned int len)
> +{
> +	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
> +	struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
> +
> +	switch (len) {
> +	case AES_KEYSIZE_128:
> +	case AES_KEYSIZE_192:
> +	case AES_KEYSIZE_256:
> +		break;
> +	default:
> +		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
> +		return -EINVAL;
> +	}
> +	ctx->key_len = len;
> +	ctx->need_calc_aes_dkey = 1;
> +
> +	memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
> +	return 0;
> +}
> +static void mv_process_current_q(int first_block);
> +
> +#define MAX_REQ_SIZE	(8000)
> +
> +static void setup_data_in(struct ablkcipher_request *req)
> +{
> +	int ret;
> +	void *buf;
> +
> +	if (!cpg->p.this_sg_b_left) {
> +		ret = sg_miter_next(&cpg->p.src_sg_it);
> +		BUG_ON(!ret);
> +		cpg->p.this_sg_b_left = cpg->p.src_sg_it.length;
> +		cpg->p.src_start = 0;
> +	}
> +
> +	cpg->p.crypt_len = min(cpg->p.this_sg_b_left, MAX_REQ_SIZE);
> +
> +	buf = cpg->p.src_sg_it.addr;
> +	buf += cpg->p.src_start;
> +
> +	memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
> +	cpg->p.this_sg_b_left -= cpg->p.crypt_len;
> +	cpg->p.src_start += cpg->p.crypt_len;
> +}
> +
> +static void mv_crypto_algo_completion(void)
> +{
> +	struct ablkcipher_request *req = cpg->cur_req;
> +	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +
> +	if (req_ctx->op != COP_AES_CBC)
> +		return ;
> +
> +	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
> +}
> +
> +static void dequeue_complete_req(void)
> +{
> +	struct ablkcipher_request *req = cpg->cur_req;
> +//	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
> +//	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +	void *buf;
> +	int ret;
> +
> +	cpg->p.total_req_bytes += cpg->p.crypt_len;
> +	do {
> +		int dst_copy;
> +
> +		if (!cpg->p.this_dst_sg_b_left) {
> +			ret = sg_miter_next(&cpg->p.dst_sg_it);
> +			BUG_ON(!ret);
> +			cpg->p.this_dst_sg_b_left = cpg->p.dst_sg_it.length;
> +			cpg->p.dst_start = 0;
> +		}
> +
> +		buf = cpg->p.dst_sg_it.addr;
> +		buf += cpg->p.dst_start;
> +
> +		dst_copy = min(cpg->p.crypt_len, cpg->p.this_dst_sg_b_left);
> +
> +		memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
> +
> +		cpg->p.this_dst_sg_b_left -= dst_copy;
> +		cpg->p.crypt_len -= dst_copy;
> +		cpg->p.dst_start += dst_copy;
> +	} while (cpg->p.crypt_len > 0);
> +
> +	BUG_ON(cpg->eng_st != engine_w_dequeue);
> +	if (cpg->p.total_req_bytes < req->nbytes) {
> +		/* process next scatter list entry */
> +		cpg->eng_st = engine_busy;
> +		mv_process_current_q(0);
> +	} else {
> +		sg_miter_stop(&cpg->p.src_sg_it);
> +		sg_miter_stop(&cpg->p.dst_sg_it);
> +		mv_crypto_algo_completion();
> +		cpg->eng_st = engine_idle;
> +		req->base.complete(&req->base, 0);
> +	}
> +}
> +
> +static void mv_process_current_q(int first_block)
> +{
> +	struct ablkcipher_request *req = cpg->cur_req;
> +	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
> +	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +	struct sec_accel_config op;
> +
> +	switch (req_ctx->op) {
> +	case COP_AES_ECB:
> +		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
> +		break;
> +	case COP_AES_CBC:
> +		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
> +		op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) | ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
> +		if (first_block)
> +			memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
> +		break;
> +	}
> +	if (req_ctx->decrypt) {
> +		op.config |= CFG_DIR_DEC;
> +		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key, AES_KEY_LEN);
> +	} else {
> +		op.config |= CFG_DIR_ENC;
> +		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key, AES_KEY_LEN);
> +	}
> +
> +	switch (ctx->key_len) {
> +	case AES_KEYSIZE_128:
> +		op.config |= CFG_AES_LEN_128;
> +		break;
> +	case AES_KEYSIZE_192:
> +		op.config |= CFG_AES_LEN_192;
> +		break;
> +	case AES_KEYSIZE_256:
> +		op.config |= CFG_AES_LEN_256;
> +		break;
> +	}
> +	op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
> +		ENC_P_DST(SRAM_DATA_OUT_START);
> +	op.enc_key_p = SRAM_DATA_KEY_P;
> +
> +	setup_data_in(req);
> +	op.enc_len = cpg->p.crypt_len;
> +	memcpy(cpg->sram + SRAM_CONFIG, &op,
> +			sizeof(struct sec_accel_config));
> +
> +	reg_write(cpg->reg + SEC_ACCEL_DESC_P0, SRAM_CONFIG);
> +	/* GO */
> +	reg_write(cpg->reg + SEC_ACCEL_CMD, SEC_CMD_EN_SEC_ACCL0);
> +
> +	/*
> +	 * XXX: add timer if the interrupt does not occur for some mystery
> +	 * reason
> +	 */
> +}
> +
> +static int count_sgs(struct ablkcipher_request *req)
> +{
> +	int total_bytes;
> +	int i = 0;
> +
> +	total_bytes = req->nbytes;
> +
> +	do {
> +		total_bytes -= req->src[i].length;
> +		i++;
> +
> +	} while (total_bytes > 0);
> +
> +	return i;
> +}
> +
> +static void mv_enqueue_new_req(struct ablkcipher_request *req)
> +{
> +	int num_sgs;
> +
> +	cpg->cur_req = req;
> +	memset(&cpg->p, 0, sizeof(struct req_progress));
> +
> +	num_sgs = count_sgs(req);
> +	sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, 0);
> +	sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, 0);
> +	mv_process_current_q(1);
> +}
> +
> +static int queue_manag(void *data)
> +{
> +	unsigned long flags;
> +	enum engine_status old_st;
> +
> +	do {
> +		struct ablkcipher_request *req;
> +		struct crypto_async_request *async_req = NULL;
> +		struct crypto_async_request *backlog;
> +
> +		__set_current_state(TASK_INTERRUPTIBLE);
> +		spin_lock_irqsave(&cpg->lock, flags);
> +		old_st = cpg->eng_st;
> +
> +		backlog = crypto_get_backlog(&cpg->queue);
> +		spin_unlock_irqrestore(&cpg->lock, flags);
> +
> +		if (old_st == engine_w_dequeue)
> +			dequeue_complete_req();
> +
> +		spin_lock_irqsave(&cpg->lock, flags);
> +		if (cpg->eng_st == engine_idle) {
> +			async_req = crypto_dequeue_request(&cpg->queue);
> +			if (async_req) {
> +				BUG_ON(cpg->eng_st != engine_idle);
> +				cpg->eng_st = engine_busy;
> +			}
> +		}
> +		spin_unlock_irqrestore(&cpg->lock, flags);
> +
> +		if (backlog) {
> +			backlog->complete(backlog, -EINPROGRESS);
> +			backlog = NULL;
> +		}
> +
> +		if (async_req) {
> +			req = container_of(async_req, struct ablkcipher_request, base);
> +			mv_enqueue_new_req(req);
> +			async_req = NULL;
> +		}
> +
> +		schedule();
> +
> +	} while (!kthread_should_stop());
> +	return 0;
> +}
> +
> +static int mv_handle_req(struct ablkcipher_request *req)
> +{
> +//	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
> +	unsigned long flags;
> +	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +	int ret;
> +
> +	spin_lock_irqsave(&cpg->lock, flags);
> +	ret = ablkcipher_enqueue_request(&cpg->queue, req);
> +/*	if (cpg->eng_st == engine_idle) */
> +	wake_up_process(cpg->queue_th);
> +	spin_unlock_irqrestore(&cpg->lock, flags);
> +	return ret;
> +}
> +
> +static int mv_enc_aes_ecb(struct ablkcipher_request *req)
> +{
> +//	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
> +	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +
> +	req_ctx->op = COP_AES_ECB;
> +	req_ctx->decrypt = 0;
> +
> +	return mv_handle_req(req);
> +}
> +
> +static int mv_dec_aes_ecb(struct ablkcipher_request *req)
> +{
> +	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
> +	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +
> +	req_ctx->op = COP_AES_ECB;
> +	req_ctx->decrypt = 1;
> +
> +	compute_aes_dec_key(ctx);
> +	return mv_handle_req(req);
> +}
> +
> +static int mv_enc_aes_cbc(struct ablkcipher_request *req)
> +{
> +//	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
> +	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +
> +	req_ctx->op = COP_AES_CBC;
> +	req_ctx->decrypt = 0;
> +
> +	return mv_handle_req(req);
> +}
> +
> +static int mv_dec_aes_cbc(struct ablkcipher_request *req)
> +{
> +	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
> +	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
> +
> +	req_ctx->op = COP_AES_CBC;
> +	req_ctx->decrypt = 1;
> +
> +	compute_aes_dec_key(ctx);
> +	return mv_handle_req(req);
> +}
> +
> +static int mv_cra_init(struct crypto_tfm *tfm)
> +{
> +	tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
> +	return 0;
> +}
> +
> +irqreturn_t crypto_int(int irq, void *priv)
> +{
> +//	struct crypto_priv *cp = priv;
> +	u32 val;
> +
> +	val = reg_read(cpg->reg + SEC_ACCEL_INT_STATUS);
> +	if (!(val & SEC_INT_ACCEL0_DONE))
> +		return IRQ_NONE;
> +
> +	val &= ~SEC_INT_ACCEL0_DONE;
> +	reg_write(cpg->reg + FPGA_INT_STATUS, val);
> +	reg_write(cpg->reg + SEC_ACCEL_INT_STATUS, val);
> +	BUG_ON(cpg->eng_st != engine_busy);
> +	cpg->eng_st = engine_w_dequeue;
> +	wake_up_process(cpg->queue_th);
> +	return IRQ_HANDLED;
> +}
> +
> +struct crypto_alg mv_aes_alg_ecb = {
> +	.cra_name		= "ecb(aes)",
> +	.cra_driver_name	= "mv-ecb-aes",
> +	.cra_priority	= 300,
> +	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize	= 16,
> +	.cra_ctxsize	= sizeof(struct mv_ctx),
> +	.cra_alignmask	= 0,
> +	.cra_type	= &crypto_ablkcipher_type,
> +	.cra_module	= THIS_MODULE,
> +	.cra_init	= mv_cra_init,
> +	.cra_u		= {
> +		.ablkcipher = {
> +			.min_keysize	=	AES_MIN_KEY_SIZE,
> +			.max_keysize	=	AES_MAX_KEY_SIZE,
> +			.setkey		=	mv_setkey_aes,
> +			.encrypt	=	mv_enc_aes_ecb,
> +			.decrypt	=	mv_dec_aes_ecb,
> +		},
> +	},
> +};
> +
> +struct crypto_alg mv_aes_alg_cbc = {
> +	.cra_name		= "cbc(aes)",
> +	.cra_driver_name	= "mv-cbc-aes",
> +	.cra_priority	= 300,
> +	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
> +	.cra_blocksize	= AES_BLOCK_SIZE,
> +	.cra_ctxsize	= sizeof(struct mv_ctx),
> +	.cra_alignmask	= 0,
> +	.cra_type	= &crypto_ablkcipher_type,
> +	.cra_module	= THIS_MODULE,
> +	.cra_init	= mv_cra_init,
> +	.cra_u		= {
> +		.ablkcipher = {
> +			.ivsize		=	AES_BLOCK_SIZE,
> +			.min_keysize	=	AES_MIN_KEY_SIZE,
> +			.max_keysize	=	AES_MAX_KEY_SIZE,
> +			.setkey		=	mv_setkey_aes,
> +			.encrypt	=	mv_enc_aes_cbc,
> +			.decrypt	=	mv_dec_aes_cbc,
> +		},
> +	},
> +};
> +
> +static int m_probe(struct platform_device *pdev)
> +{
> +	struct crypto_priv *cp;
> +	struct resource *res;
> +	int irq;
> +	int ret;
> +
> +	if (cpg) {
> +		printk(KERN_ERR "Second crypto dev?\n");
> +		return -EBUSY;
> +	}
> +
> +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
> +	if (!res)
> +		return -ENODEV;

Returning -ENODEV is considered harmful, it will not trigger any warning
from the driver core.

> +	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
> +	if (!cp)
> +		return -ENOMEM;
> +
> +	spin_lock_init(&cp->lock);
> +	crypto_init_queue(&cp->queue, 50);
> +	cp->reg = ioremap(res->start, res->end - res->start + 1);
> +	if (!cp->reg) {
> +		ret = -ENOMEM;
> +		goto err;
> +	}
> +
> +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
> +	if (!res) {
> +		ret = -ENODEV;
> +		goto err_unmap_reg;
> +	}

and see above.

> +	cp->sram = ioremap(res->start, res->end - res->start + 1);
> +	if (!cp->sram) {
> +		ret = -ENOMEM;
> +		goto err_unmap_reg;
> +	}
> +
> +	irq = platform_get_irq(pdev, 0);
> +	if (irq < 0 || irq == NO_IRQ) {
> +		ret = irq;
> +		goto err_unmap_sram;
> +	}
> +	cp->irq = irq;
> +
> +	platform_set_drvdata(pdev, cp);
> +	cpg = cp;
> +
> +	cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
> +	if (IS_ERR(cp->queue_th)) {
> +		ret = PTR_ERR(cp->queue_th);
> +		goto err_thread;
> +	}
> +
> +	ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), cp);
> +	if (ret)
> +		goto err_unmap_sram;
> +
> +	reg_write(cpg->reg + SEC_ACCEL_INT_MASK, SEC_INT_ACCEL0_DONE);
> +	reg_write(cpg->reg + SEC_ACCEL_CFG, SEC_CFG_STOP_DIG_ERR);
> +
> +	ret = crypto_register_alg(&mv_aes_alg_ecb);
> +	if (ret) {
> +		printk(KERN_ERR "Reg of algo failed: %d\n", ret);
> +		goto err_reg;
> +	}
> +	ret = crypto_register_alg(&mv_aes_alg_cbc);
> +	if (ret) {
> +		printk(KERN_ERR "Reg of algo failed: %d\n", ret);
> +		goto err_unreg_ecb;
> +	}
> +	return 0;
> +err_unreg_ecb:
> +	crypto_unregister_alg(&mv_aes_alg_ecb);
> +err_thread:
> +	free_irq(irq, cp);
> +err_reg:
> +	kthread_stop(cp->queue_th);
> +err_unmap_sram:
> +	iounmap(cp->sram);
> +err_unmap_reg:
> +	iounmap(cp->reg);
> +err:
> +	kfree(cp);
> +	cpg = NULL;
> +	platform_set_drvdata(pdev, NULL);
> +	return ret;
> +}
> +
> +static int m_remove(struct platform_device *pdev)
> +{
> +	struct crypto_priv *cp = platform_get_drvdata(pdev);
> +
> +	crypto_unregister_alg(&mv_aes_alg_ecb);
> +	crypto_unregister_alg(&mv_aes_alg_cbc);
> +	kthread_stop(cp->queue_th);
> +	free_irq(cp->irq, cp);
> +	memset(cp->sram, 0, 8 * 1024);
> +	iounmap(cp->sram);
> +	iounmap(cp->reg);
> +	kfree(cp);
> +	cpg = NULL;
> +	return 0;
> +}
> +
> +static struct platform_driver marvell_crypto = {
> +	.probe          = m_probe,
> +	.remove         = m_remove,
> +	.driver         = {
> +		.owner  = THIS_MODULE,
> +		.name   = "mv,orion5x-crypto",
> +	},
> +};
> +
> +static int __init crypto_init(void)
> +{
> +	return platform_driver_register(&marvell_crypto);
> +}
> +module_init(crypto_init);
> +
> +static void __exit crypto_exit(void)
> +{
> +	platform_driver_unregister(&marvell_crypto);
> +}
> +module_exit(crypto_exit);
> +
> +MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
> +MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
> +MODULE_LICENSE("GPL");

-- 
Ben

Q:      What's a light-year?
A:      One-third less calories than a regular year.


-------------------------------------------------------------------
List admin: http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel
FAQ:        http://www.arm.linux.org.uk/mailinglists/faq.php
Etiquette:  http://www.arm.linux.org.uk/mailinglists/etiquette.php

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [WIP] crypto: add support for Orion5X crypto engine
  2009-05-07 21:39 ` Ben Dooks
@ 2009-06-11 11:40   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 8+ messages in thread
From: Sebastian Andrzej Siewior @ 2009-06-11 11:40 UTC (permalink / raw)
  To: Ben Dooks; +Cc: linux-arm-kernel, Nicolas Pitre, linux-crypto

* Ben Dooks | 2009-05-07 22:39:22 [+0100]:

Sorry for the late reply.

>> diff --git a/drivers/crypto/mv_crypto.c b/drivers/crypto/mv_crypto.c
>> new file mode 100644
>> index 0000000..40eb083
>> --- /dev/null
>> +++ b/drivers/crypto/mv_crypto.c

>> +struct req_progress {
>> +	struct sg_mapping_iter src_sg_it;
>> +	struct sg_mapping_iter dst_sg_it;
>> +
>> +	/* src mostly */
>> +	int this_sg_b_left;
>> +	int src_start;
>> +	int crypt_len;
>> +	/* dst mostly */
>> +	int this_dst_sg_b_left;
>> +	int dst_start;
>> +	int total_req_bytes;
>> +};
>
>kerneldoc style documentation wouldn't go amiss here.
added

>> +
>> +static void reg_write(void __iomem *mem, u32 val)
>> +{
>> +	__raw_writel(val, mem);
>> +}
>> +
>> +static u32 reg_read(void __iomem *mem)
>> +{
>> +	return __raw_readl(mem);
>> +}
>
>do you really need to wrapper these? 
Not really. Initially I planned to pass the device handle instead of the
memory pointer. Also using (addr, val) looks better than the other way
around.

>it is also readl/writel for pointers obtained from ioremap()
correct. So I get rid of my wrapper and switch to readl/writel

>> +
>> +#if 0
>> +static void hex_dump(unsigned char *info, unsigned char *buf, unsigned int len)
>> +{
>> +	printk(KERN_ERR "%s\n", info);
>> +	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET,
>> +			16, 1,
>> +			buf, len, false);
>> +	printk(KERN_CONT "\n");
>> +}
>> +#endif
>
>#if 0 considered bad.
I needed this a few times. Now I don't and its gone.

>> +
>> +static int m_probe(struct platform_device *pdev)
>> +{
>> +	struct crypto_priv *cp;
>> +	struct resource *res;
>> +	int irq;
>> +	int ret;
>> +
>> +	if (cpg) {
>> +		printk(KERN_ERR "Second crypto dev?\n");
>> +		return -EBUSY;
>> +	}
>> +
>> +	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
>> +	if (!res)
>> +		return -ENODEV;
>
>Returning -ENODEV is considered harmful, it will not trigger any warning
>from the driver core.
I switched to ENXIO because that fits better (No such device or address)
I thing. However this also doesn't trigger any warning from the core.
What do you suggest?

>
>-- 
>Ben

Thanks for the review Ben.

Sebastian

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [WIP] crypto: add support for Orion5X crypto engine
  2009-03-04 16:05 ` Ronen Shitrit
@ 2009-03-04 16:35   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 8+ messages in thread
From: Sebastian Andrzej Siewior @ 2009-03-04 16:35 UTC (permalink / raw)
  To: Ronen Shitrit; +Cc: linux-arm-kernel, linux-crypto

* Ronen Shitrit | 2009-03-04 18:05:12 [+0200]:

>However a SW calculation is also possible.
>Chapter 11.1.4 in [1] says, that the decrypt key is the last 16/24/32 bytes
>created by the expansion algorithm. So I picked the key expand routine
>from generic aes module and just passed the crypto test for decryption
>with a 16 byte long key. The other two key sizes failed. Probably the
>the key slots are different. Currently I have no idea what's wrong.
>
>[Ronen Shitrit] on our driver we also chose the SW calculation WA, I'm not sure why your code fail, but I can refer you to our driver as a reference, maybe you can find it as a good reference also for other issues.
>
>This is an old LSP for the 5182, but the crypto driver supposed to work on the 5182 just fine:
>http://downloads.buffalo.nas-central.org/KBPro_ARM9/GPL/source/linux-2.6.12_lsp.1.10.3.src.tar.gz
>
>Look for aesMakeKey API under arch/arm/mach-mv88fxx81/Soc/cesa/
Oh thanks a lot. I take a look on this.

>I also wanted to point that the crypto engine on the 5182 passed 2 more evolutions after the 5182, which included:
>- Add a dedicated DMA to the crypto unit.
Does this mean that the crypto unit itself is now able to copy data and
I don't have to use the idma for that? That sounds great.

>- Support only one channel.
>- Fix main erratas.
>- Decrease SRAM size to 2K.
>- Add support for chain mode.
I can understand that, since SRAM is not that cheap and with chaining
support it should not matter.

>Maybe you should take those into account in your design to allow support for other crypto versions in the future.
>If you need more details pls check the security chapter on:
>http://www.marvell.com/files/products/embedded_processors/kirkwood/FS_88F6180_9x_6281_OpenSource.pdf
I take a look on this as well.

Sebastian

^ permalink raw reply	[flat|nested] 8+ messages in thread

* RE: [WIP] crypto: add support for Orion5X crypto engine
  2009-03-02 22:10 Sebastian Andrzej Siewior
  2009-03-03 17:49 ` Jason
@ 2009-03-04 16:05 ` Ronen Shitrit
  2009-03-04 16:35   ` Sebastian Andrzej Siewior
  1 sibling, 1 reply; 8+ messages in thread
From: Ronen Shitrit @ 2009-03-04 16:05 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior, linux-arm-kernel; +Cc: linux-crypto

The only functional part is ecb-aes in encryption mode. The decryption
seems to work in 16 byte key mode. According to the spec [1] the
decryption key is different and has to be computed by the HW.
Chapter 11.1.4 says, that the decryption key is computed by performing a
dummy encrypt operation. This does not alter my key at all. Point 1 on
the next side is referring to the AesKeyRdMode bit which must be set
prior reading the key. I can't find a definition of this bit so I guess
the spec is out of date here.
[Ronen Shitrit] you are right, this should be fix accordingly:

" To decrypt a data block with a given key, the host must first load this key into the decryption engine, then start the key generation process setting <AesDecMakeKey> field in the AES Decryption Command Register bit to 1. At the end of the key generation process, the host reads the key registers from the Encryption engine. This decryption key is loaded by the host into the decryption key registers, to start the required description process.
To read the decryption key from the encryption engine, the host must set the
<AesDecKeyReady> field in the AES Decryption Command Register to 1 prior to the reading of the AES encryption key registers. Setting this bit enables reading of the internal key in the AES Encryption engine, which at the end of an encryption process, is the key for the decryption start point."


However a SW calculation is also possible.
Chapter 11.1.4 in [1] says, that the decrypt key is the last 16/24/32 bytes
created by the expansion algorithm. So I picked the key expand routine
from generic aes module and just passed the crypto test for decryption
with a 16 byte long key. The other two key sizes failed. Probably the
the key slots are different. Currently I have no idea what's wrong.

[Ronen Shitrit] on our driver we also chose the SW calculation WA, I'm not sure why your code fail, but I can refer you to our driver as a reference, maybe you can find it as a good reference also for other issues.

This is an old LSP for the 5182, but the crypto driver supposed to work on the 5182 just fine:
http://downloads.buffalo.nas-central.org/KBPro_ARM9/GPL/source/linux-2.6.12_lsp.1.10.3.src.tar.gz

Look for aesMakeKey API under arch/arm/mach-mv88fxx81/Soc/cesa/

I also wanted to point that the crypto engine on the 5182 passed 2 more evolutions after the 5182, which included:
- Add a dedicated DMA to the crypto unit.
- Support only one channel.
- Fix main erratas.
- Decrease SRAM size to 2K.
- Add support for chain mode.
Maybe you should take those into account in your design to allow support for other crypto versions in the future.
If you need more details pls check the security chapter on:
http://www.marvell.com/files/products/embedded_processors/kirkwood/FS_88F6180_9x_6281_OpenSource.pdf



-------------------------------------------------------------------
List admin: http://lists.arm.linux.org.uk/mailman/listinfo/linux-arm-kernel
FAQ:        http://www.arm.linux.org.uk/mailinglists/faq.php
Etiquette:  http://www.arm.linux.org.uk/mailinglists/etiquette.php

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [WIP] crypto: add support for Orion5X crypto engine
  2009-03-03 17:49 ` Jason
@ 2009-03-03 22:08   ` Sebastian Andrzej Siewior
  0 siblings, 0 replies; 8+ messages in thread
From: Sebastian Andrzej Siewior @ 2009-03-03 22:08 UTC (permalink / raw)
  To: Jason; +Cc: linux-crypto

* Jason | 2009-03-03 12:49:37 [-0500]:

> I found the nuts and bolts starting at page 174 of [3], with registers 
> listed starting on page 634.
they look the same to me. The registers seem to be on the same spot, the
description is the same from what I recall.

> thx,
>
> Jason.

Sebastian

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [WIP] crypto: add support for Orion5X crypto engine
  2009-03-02 22:10 Sebastian Andrzej Siewior
@ 2009-03-03 17:49 ` Jason
  2009-03-03 22:08   ` Sebastian Andrzej Siewior
  2009-03-04 16:05 ` Ronen Shitrit
  1 sibling, 1 reply; 8+ messages in thread
From: Jason @ 2009-03-03 17:49 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior; +Cc: linux-crypto

Sebastian Andrzej Siewior wrote:
> From: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
> 
> This is my current status of an async crypto driver for the Orion5X crypto
> unit. The driver uses the crypto accelerator. 

Is this the same security engine found in the new SheevaPlug Devkit [1]?  It's has the Marvell 88F6281 (Kirkwood) processor in it.  This diagram [2] shows a "Security Engine", but doesn't give any detail...

I found the nuts and bolts starting at page 174 of [3], with registers listed starting on page 634.

thx,

Jason.

[1] - http://www.marvell.com/products/embedded_processors/developer/kirkwood/sheevaplug.jsp
[2] - http://www.marvell.com/products/embedded_processors/kirkwood/index.jsp
[3] - http://www.marvell.com/files/products/embedded_processors/kirkwood/FS_88F6180_9x_6281_OpenSource.pdf

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [WIP] crypto: add support for Orion5X crypto engine
@ 2009-03-02 22:10 Sebastian Andrzej Siewior
  2009-03-03 17:49 ` Jason
  2009-03-04 16:05 ` Ronen Shitrit
  0 siblings, 2 replies; 8+ messages in thread
From: Sebastian Andrzej Siewior @ 2009-03-02 22:10 UTC (permalink / raw)
  To: linux-arm-kernel; +Cc: linux-crypto

From: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>

This is my current status of an async crypto driver for the Orion5X crypto
unit. The driver uses the crypto accelerator. The data is copied via
memcpy() and will be replaced with idma once the infrastructure around
it is working. This patch depends on "arm/orion5x: add sram support for
crypto" posted earler to the arm ml [0].

The only functional part is ecb-aes in encryption mode. The decryption
seems to work in 16 byte key mode. According to the spec [1] the
decryption key is different and has to be computed by the HW.
Chapter 11.1.4 says, that the decryption key is computed by performing a
dummy encrypt operation. This does not alter my key at all. Point 1 on
the next side is referring to the AesKeyRdMode bit which must be set
prior reading the key. I can't find a definition of this bit so I guess
the spec is out of date here.
The register definition of the decryption unit has a bit called AesDecMakeKey.
Setting this bit alters the dec key after writing to it and reading it
back. The dummy decryption seems not to be required. However, with this key
the result of the decrypt operation does not match the expecting result.
The errata [2] in chapter 3.2 says that the computed key may be wrong. The
work around is to "write the first key twice". This did not help, so I must
have done it wrong. However if I write parts of the again I get
different results which are wrong.
However a SW calculation is also possible.
Chapter 11.1.4 in [1] says, that the decrypt key is the last 16/24/32 bytes
created by the expansion algorithm. So I picked the key expand routine
from generic aes module and just passed the crypto test for decryption
with a 16 byte long key. The other two key sizes failed. Probably the
the key slots are different. Currently I have no idea what's wrong.

[0] http://lists.arm.linux.org.uk/lurker/message/20090301.231447.6af0663c.en.html
[1] 88F5182, Doc. No. MV-S103345-01, Rev. C, April 29, 2008, Preliminary
[2] 88F5182, Doc. No. MV-S500802-00, Rev. E, April 29, 2008, Preliminary

Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
---
 drivers/crypto/Kconfig      |    9 +
 drivers/crypto/Makefile     |    1 +
 drivers/crypto/mav_crypto.c |  578 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 588 insertions(+), 0 deletions(-)
 create mode 100644 drivers/crypto/mav_crypto.c

diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 01afd75..514fe78 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -157,6 +157,15 @@ config S390_PRNG
 	  ANSI X9.17 standard. The PRNG is usable via the char device
 	  /dev/prandom.
 
+config CRYPTO_DEV_MARVELL_CRYPTO_ENGINE
+	tristate "Marvell's Cryptographic Engine"
+	depends on PLAT_ORION
+	select CRYPTO_ALGAPI
+	select CRYPTO_AES
+	help
+	  This driver allows you utilize the cryptographic engine which can be
+	  found on certain SoC like QNAP's TS-209.
+
 config CRYPTO_DEV_HIFN_795X
 	tristate "Driver HIFN 795x crypto accelerator chips"
 	select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9bf4a2b..9c7053c 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o
 obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o
 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
+obj-$(CONFIG_CRYPTO_DEV_MARVELL_CRYPTO_ENGINE) += mav_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
 obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/mav_crypto.c b/drivers/crypto/mav_crypto.c
new file mode 100644
index 0000000..969edcf
--- /dev/null
+++ b/drivers/crypto/mav_crypto.c
@@ -0,0 +1,578 @@
+/*
+ * Support for Marvell's crypto engine which can be found on some Orion5X
+ * boards.
+ *
+ * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
+ * License: GPL
+ *
+ */
+#include <linux/io.h>
+#include <linux/crypto.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/scatterlist.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+
+struct crypto_priv {
+	void __iomem *reg;
+	void __iomem *sram;
+	wait_queue_head_t wq;
+	int irq;
+};
+
+static struct crypto_priv *cpg;
+
+static void reg_write(void __iomem *mem, u32 val)
+{
+	__raw_writel(val, mem);
+}
+
+static u32 reg_read(void __iomem *mem)
+{
+	return __raw_readl(mem);
+}
+
+#define DIGEST_INITIAL_VAL_A	0xdd00
+#define DES_CMD_REG		0xdd58
+
+#define SEC_ACCEL_CMD		0xde00
+#define SEC_CMD_EN_SEC_ACCL0	(1 << 0)
+#define SEC_CMD_EN_SEC_ACCL1	(1 << 1)
+#define SEC_CMD_DISABLE_SEC	(1 << 2)
+
+#define SEC_ACCEL_DESC_P0	0xde04
+#define SEC_DESC_P0_PTR(x)	(x)
+
+#define SEC_ACCEL_DESC_P1	0xde14
+#define SEC_DESC_P1_PTR(x)	(x)
+
+#define SEC_ACCEL_CFG		0xde08
+#define SEC_CFG_STOP_DIG_ERR	(1 << 0)
+#define SEC_CFG_CH0_W_IDMA	(1 << 7)
+#define SEC_CFG_CH1_W_IDMA	(1 << 8)
+#define SEC_CFG_ACT_CH0_IDMA	(1 << 9)
+#define SEC_CFG_ACT_CH1_IDMA	(1 << 10)
+
+#define SEC_ACCEL_STATUS	0xde0c
+#define SEC_ST_ACT_0		(1 << 0)
+#define SEC_ST_ACT_1		(1 << 1)
+
+
+#define SEC_ACCEL_INT_STATUS	0xde20
+#define SEC_INT_AUTH_DONE	(1 << 0)
+#define SEC_INT_DES_E_DONE	(1 << 1)
+#define SEC_INT_AES_E_DONE	(1 << 2)
+#define SEC_INT_AES_D_DONE	(1 << 3)
+#define SEC_INT_ENC_DONE	(1 << 4)
+#define SEC_INT_ACCEL0_DONE	(1 << 5)
+#define SEC_INT_ACCEL1_DONE	(1 << 6)
+#define SEC_INT_ACC0_IDMA_DONE	(1 << 7)
+#define SEC_INT_ACC1_IDMA_DONE	(1 << 8)
+
+#define SEC_ACCEL_INT_MASK	0xde24
+
+
+#define AES_KEY_LEN	(8 * 4)
+
+struct sec_accel_config {
+
+	u32 config;
+#define CFG_OP_MAC_ONLY		(0)
+#define CFG_OP_CRYPT_ONLY	(1)
+#define CFG_OP_MAC_CRYPT	(2)
+#define CFG_OP_CRYPT_MAC	(3)
+#define CFG_MACM_MD5		(4 << 4)
+#define CFG_MACM_SHA1		(5 << 4)
+#define CFG_MACM_HMAC_MD5	(6 << 4)
+#define CFG_MACM_HMAC_SHA1	(7 << 4)
+#define CFG_ENCM_DES		(1 << 8)
+#define CFG_ENCM_3DES		(2 << 8)
+#define CFG_ENCM_AES		(3 << 8)
+#define CFG_DIR_ENC		(0 << 12)
+#define CFG_DIR_DEC		(1 << 12)
+#define CFG_ENC_MODE_ECB	(0 << 16)
+#define CFG_ENC_MODE_CBC	(1 << 16)
+#define CFG_3DES_EEE		(0 << 20)
+#define CFG_3DES_EDE		(1 << 20)
+#define CFG_AES_LEN_128		(0 << 24)
+#define CFG_AES_LEN_192		(1 << 24)
+#define CFG_AES_LEN_256		(2 << 24)
+
+	u32 enc_p;
+#define ENC_P_SRC(x)		(x)
+#define ENC_P_DST(x)		((x) << 16)
+
+	u32 enc_len;
+#define ENC_LEN(x)		(x)
+
+	u32 enc_key_p;
+#define ENC_KEY_P(x)		(x)
+
+	u32 enc_iv;
+#define ENC_IV_BUF_POINT(x)	((x) << 16)
+
+	u32 mac_src_p;
+#define MAC_SRC_DATA_P(x)	(x)
+#define MAC_SRC_TOTAL_LEN(x)	((x) << 16)
+
+	u32 mac_digest;
+	u32 mac_iv;
+}__attribute__ ((packed));
+
+struct mav_ctx {
+	struct sec_accel_config op;
+	u8 aes_key[AES_KEY_LEN];
+	u32 key_columns[8];
+	u32 key_out[8];
+	int key_len;
+	int hw_key_len;
+	u32 need_calc_dec_key;
+};
+
+static void hex_dump(unsigned char *info, unsigned char *buf, unsigned int len)
+{
+	printk(KERN_ERR "%s\n", info);
+	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET,
+			16, 1,
+			buf, len, false);
+	printk(KERN_CONT "\n");
+}
+
+#define CRYPTOE_AES_E_DATA0		0xddac
+#define CRYPTOE_AES_E_DATA1		0xdda8
+#define CRYPTOE_AES_E_DATA2		0xdda4
+#define CRYPTOE_AES_E_DATA3		0xdda0
+
+#define CRYPTOE_AES_E_KEY0		0xdd9c
+#define CRYPTOE_AES_E_KEY1		0xdd98
+#define CRYPTOE_AES_E_KEY2		0xdd94
+#define CRYPTOE_AES_E_KEY3		0xdd90
+#define CRYPTOE_AES_E_KEY4		0xdd8c
+#define CRYPTOE_AES_E_KEY5		0xdd88
+#define CRYPTOE_AES_E_KEY6		0xdd84
+#define CRYPTOE_AES_E_KEY7		0xdd80
+
+#define CRYPTOE_AES_D_DATA0		0xddec
+#define CRYPTOE_AES_D_DATA1		0xdde8
+#define CRYPTOE_AES_D_DATA2		0xdde4
+#define CRYPTOE_AES_D_DATA3		0xdde0
+
+#define CRYPTOE_AES_D_KEY0		0xdddc
+#define CRYPTOE_AES_D_KEY1		0xddd8
+#define CRYPTOE_AES_D_KEY2		0xddd4
+#define CRYPTOE_AES_D_KEY3		0xddd0
+#define CRYPTOE_AES_D_KEY4		0xddcc
+#define CRYPTOE_AES_D_KEY5		0xddc8
+#define CRYPTOE_AES_D_KEY6		0xddc4
+#define CRYPTOE_AES_D_KEY7		0xddc0
+
+#define CRYPTOE_AES_ECMD		0xddb0
+
+#define CRYPTOE_AES_DCMD		0xddf0
+#define CRYPTOE_AES_CMD_LEN_128		(0 << 0)
+#define CRYPTOE_AES_CMD_LEN_192		(1 << 0)
+#define CRYPTOE_AES_CMD_LEN_256		(2 << 0)
+#define CRYPTOE_AES_DCMD_MAKE_D_KEY	(1 << 2)
+#define CRYPTOE_AES_CMD_READY		(1 << 31)
+static int compute_hw_dec_key(struct mav_ctx *ctx)
+{
+	struct crypto_aes_ctx gen_aes_key;
+	int timeout = 5;
+	int status;
+	int i;
+	int ret;
+	int dec_key_start;
+
+	if (!ctx->need_calc_dec_key)
+		return 0;
+#if 0
+#if 0
+	reg_write(cpg->reg + CRYPTOE_AES_DCMD, ctx->hw_key_len);
+
+	reg_write(cpg->reg + CRYPTOE_AES_D_KEY0, ctx->key_columns[0]);
+	reg_write(cpg->reg + CRYPTOE_AES_D_KEY1, ctx->key_columns[1]);
+	reg_write(cpg->reg + CRYPTOE_AES_D_KEY2, ctx->key_columns[2]);
+	reg_write(cpg->reg + CRYPTOE_AES_D_KEY3, ctx->key_columns[3]);
+
+#endif
+	reg_write(cpg->reg + CRYPTOE_AES_DCMD, ctx->hw_key_len | CRYPTOE_AES_DCMD_MAKE_D_KEY);
+
+	reg_write(cpg->reg + CRYPTOE_AES_D_KEY0, ctx->key_columns[0]);
+	reg_write(cpg->reg + CRYPTOE_AES_D_KEY1, ctx->key_columns[1]);
+	reg_write(cpg->reg + CRYPTOE_AES_D_KEY2, ctx->key_columns[2]);
+	reg_write(cpg->reg + CRYPTOE_AES_D_KEY3, ctx->key_columns[3]);
+
+	if (ctx->key_len >= AES_KEYSIZE_192) {
+		reg_write(cpg->reg + CRYPTOE_AES_D_KEY4, ctx->key_columns[4]);
+		reg_write(cpg->reg + CRYPTOE_AES_D_KEY5, ctx->key_columns[5]);
+	}
+
+	if (ctx->key_len > AES_KEYSIZE_192) {
+		reg_write(cpg->reg + CRYPTOE_AES_D_KEY6, ctx->key_columns[6]);
+		reg_write(cpg->reg + CRYPTOE_AES_D_KEY7, ctx->key_columns[7]);
+	}
+
+#if 1
+	reg_write(cpg->reg + CRYPTOE_AES_D_DATA0, 0);
+	reg_write(cpg->reg + CRYPTOE_AES_D_DATA1, 0);
+	reg_write(cpg->reg + CRYPTOE_AES_D_DATA2, 0);
+	reg_write(cpg->reg + CRYPTOE_AES_D_DATA3, 0);
+
+	do {
+		status = reg_read(cpg->reg + CRYPTOE_AES_DCMD);
+		if (status & CRYPTOE_AES_CMD_READY)
+			break;
+		timeout--;
+		if (!timeout)
+			break;
+		ndelay(1);
+	} while (1);
+
+	if (!timeout) {
+		printk(KERN_ERR "dec key not computed %d.\n", i);
+		return -ETIMEDOUT;
+#endif
+	}
+
+	ctx->key_out[0] = reg_read(cpg->reg + CRYPTOE_AES_D_KEY0);
+	ctx->key_out[1] = reg_read(cpg->reg + CRYPTOE_AES_D_KEY1);
+	ctx->key_out[2] = reg_read(cpg->reg + CRYPTOE_AES_D_KEY2);
+	ctx->key_out[3] = reg_read(cpg->reg + CRYPTOE_AES_D_KEY3);
+	if (ctx->key_len >= AES_KEYSIZE_192) {
+		ctx->key_out[4] = reg_read(cpg->reg + CRYPTOE_AES_D_KEY4);
+		ctx->key_out[5] = reg_read(cpg->reg + CRYPTOE_AES_D_KEY5);
+	}
+	if (ctx->key_len > AES_KEYSIZE_192) {
+		ctx->key_out[6] = reg_read(cpg->reg + CRYPTOE_AES_D_KEY6);
+		ctx->key_out[7] = reg_read(cpg->reg + CRYPTOE_AES_D_KEY7);
+	}
+
+	hex_dump("Key in : ", (unsigned char *)&ctx->key_columns[0], 8 * 4);
+	hex_dump("Key out: ", (unsigned char *)&ctx->key_out[0], 8 * 4);
+#endif
+
+	ctx->need_calc_dec_key = 0;
+	memset(&gen_aes_key, 0, sizeof(gen_aes_key));
+	ret = crypto_aes_expand_key(&gen_aes_key, ctx->key_columns, ctx->key_len);
+	if (ret < 0) {
+		printk(KERN_ERR "Crap: %d\n", ret);
+		return -ETIMEDOUT;
+	}
+
+	/* XXX 16byte key do work, the other two don't */
+	switch (ctx->key_len) {
+	case AES_KEYSIZE_128:
+		dec_key_start = ctx->key_len + 24;
+		break;
+	case AES_KEYSIZE_192:
+		dec_key_start = ctx->key_len + 24;
+		break;
+	case AES_KEYSIZE_256:
+		dec_key_start = ctx->key_len + 20;
+		break;
+	default:
+		return -EINVAL;
+	}
+	memcpy(ctx->key_out, &gen_aes_key.key_enc[dec_key_start], 8 * 4);
+	hex_dump("Key enc:  ", (unsigned char *)gen_aes_key.key_enc, AES_MAX_KEYLENGTH);
+	hex_dump("Key dec:  ", (unsigned char *)gen_aes_key.key_dec, AES_MAX_KEYLENGTH);
+	hex_dump("The copy: ", (unsigned char *)ctx->key_out, 8 * 4);
+	return 0;
+}
+
+static int mav_setkey_aes_ecb(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct mav_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	printk(KERN_ERR "%s() keylen: %d\n", __func__, len);
+	ctx->op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
+
+	switch (len) {
+		case AES_KEYSIZE_128:
+			ctx->op.config |= CFG_AES_LEN_128;
+			ctx->hw_key_len = CRYPTOE_AES_CMD_LEN_128;
+			break;
+
+		case AES_KEYSIZE_192:
+			ctx->op.config |= CFG_AES_LEN_192;
+			ctx->hw_key_len = CRYPTOE_AES_CMD_LEN_192;
+			break;
+
+		case AES_KEYSIZE_256:
+			ctx->op.config |= CFG_AES_LEN_256;
+			ctx->hw_key_len = CRYPTOE_AES_CMD_LEN_256;
+			break;
+		default:
+			crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+			return -EINVAL;
+	}
+	ctx->key_len = len;
+	ctx->need_calc_dec_key = 1;
+
+	memcpy(ctx->aes_key, key, AES_KEY_LEN);
+	memcpy(ctx->key_columns, key, AES_KEY_LEN);
+	hex_dump("Key #1: ", ctx->aes_key, AES_KEY_LEN);
+	return 0;
+}
+
+static void mav_exec_aes_crypt(struct ablkcipher_request *req, int dec_key)
+{
+	struct mav_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	signed long time_out;
+	void *src_p;
+	void *dst_p;
+	DEFINE_WAIT(wait);
+
+	printk(KERN_ERR "%s()\n", __func__);
+	src_p = sg_virt(req->src);
+	dst_p = sg_virt(req->dst);
+	/*
+	 * currently just POC and that means:
+	 * - data is copied via memcpy() into SRAM and back
+	 * - the SG list may have only one entry
+	 * - we must not be called from ATOMIC context
+	 * - request len must be <= 4066 bytes
+	 */
+	BUG_ON(!src_p);
+	BUG_ON(!dst_p);
+	BUG_ON(req->src->length != req->nbytes);
+	BUG_ON(req->nbytes > 4066);
+
+	prepare_to_wait(&cpg->wq, &wait, TASK_INTERRUPTIBLE);
+
+	/*
+	 * /-----------\ 0
+	 * | ACCEL CFG |	4 * 8
+	 * |-----------| 0x20
+	 * | CRYPT KEY |	8 * 4
+	 * |-----------| 0x40
+	 * |  DATA IN  |	16 * x
+	 * |-----------| 0x40 (inplace operation)
+	 * |  DATA OUT |	16 * x
+	 * \-----------/
+	 */
+#define SRAM_CONFIG		(0x00)
+#define SRAM_DATA_KEY_P		(0x20)
+#define SRAM_DATA_IN_START	(0x40)
+#define SRAM_DATA_OUT_START	(0x40)
+
+	ctx->op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
+		ENC_P_DST(SRAM_DATA_OUT_START);
+	ctx->op.enc_key_p = SRAM_DATA_KEY_P;
+	ctx->op.enc_len = req->nbytes;
+
+	memcpy(cpg->sram + SRAM_CONFIG, &ctx->op,
+			sizeof(struct sec_accel_config));
+	memcpy(cpg->sram + SRAM_DATA_IN_START, src_p, req->nbytes);
+	if (dec_key)
+		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->key_out, AES_KEY_LEN);
+	else
+		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_key, AES_KEY_LEN);
+
+	hex_dump("KEY: #2", cpg->sram + SRAM_DATA_KEY_P, AES_KEY_LEN);
+	hex_dump("SRC: #1", cpg->sram + SRAM_DATA_IN_START, req->nbytes);
+	hex_dump("DST: #1 ", cpg->sram + SRAM_DATA_OUT_START, req->nbytes);
+
+	reg_write(cpg->reg + SEC_ACCEL_DESC_P0, SRAM_CONFIG);
+	reg_write(cpg->reg + SEC_ACCEL_INT_MASK, SEC_INT_ACCEL0_DONE);
+	reg_write(cpg->reg + SEC_ACCEL_CMD, SEC_CMD_EN_SEC_ACCL0);
+
+	time_out = schedule_timeout(HZ * 5);
+	if (!time_out)
+		printk(KERN_ERR "Timeout occured, no interrupt :/\n");
+
+	hex_dump("DST: #2 ", cpg->sram + SRAM_DATA_OUT_START, req->nbytes);
+	memcpy(dst_p, cpg->sram + SRAM_DATA_OUT_START, req->nbytes);
+	finish_wait(&cpg->wq, &wait);
+	printk(KERN_ERR "%s() done\n", __func__);
+}
+
+static int mav_enc_aes_ecb(struct ablkcipher_request *req)
+{
+	struct mav_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	printk(KERN_ERR "%s()\n", __func__);
+	ctx->op.config &= ~CFG_DIR_DEC;
+	ctx->op.config |= CFG_DIR_ENC;
+
+	mav_exec_aes_crypt(req, 0);
+	return 0;
+}
+
+static int mav_dec_aes_ecb(struct ablkcipher_request *req)
+{
+	struct mav_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	printk(KERN_ERR "%s()\n", __func__);
+	compute_hw_dec_key(ctx);
+
+	ctx->op.config &= ~CFG_DIR_ENC;
+	ctx->op.config |= CFG_DIR_DEC;
+
+	mav_exec_aes_crypt(req, 1);
+	return 0;
+}
+
+static int mav_cra_init(struct crypto_tfm *tfm)
+{
+
+	printk(KERN_ERR "%s()\n", __func__);
+	return 0;
+}
+
+irqreturn_t crypto_int(int irq, void *priv)
+{
+	struct crypto_priv *cp = priv;
+	u32 val;
+
+	val = reg_read(cpg->reg + SEC_ACCEL_INT_STATUS);
+	printk(KERN_ERR "%s() %08x\n", __func__, val);
+	reg_write(cpg->reg + SEC_ACCEL_INT_MASK, 0);
+	if (!(val & SEC_INT_ACCEL0_DONE))
+		return IRQ_NONE;
+	wake_up(&cpg->wq);
+
+	return IRQ_HANDLED;
+}
+
+struct crypto_alg mav_aes_alg = {
+	.cra_name		= "ecb(aes)",
+	.cra_driver_name	= "mav-ecb-aes",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize	= 16,
+	.cra_ctxsize	= sizeof(struct mav_ctx),
+	.cra_alignmask	= 7,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= mav_cra_init,
+	.cra_u		= {
+		.ablkcipher = {
+			.min_keysize	=	AES_MIN_KEY_SIZE,
+			.max_keysize	=	AES_MAX_KEY_SIZE,
+			.setkey		=	mav_setkey_aes_ecb,
+			.encrypt	=	mav_enc_aes_ecb,
+			.decrypt	=	mav_dec_aes_ecb,
+		},
+	},
+};
+
+static int m_probe(struct platform_device *pdev)
+{
+	struct crypto_priv *cp;
+	struct resource *res;
+	int irq;
+	int ret;
+
+	if (cpg) {
+		printk(KERN_ERR "Second crypto dev?\n");
+		return -EBUSY;
+	}
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+	if (!res)
+		return -ENODEV;
+
+	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+	if (!cp)
+		return -ENOMEM;
+
+	init_waitqueue_head(&cp->wq);
+	printk(KERN_ERR "regs @%x\n", res->start);
+	cp->reg = ioremap(res->start, res->end - res->start + 1);
+	if (!cp->reg) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	printk(KERN_ERR "regs remap @%p\n", cp->reg);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+	if (!res) {
+		ret = -ENODEV;
+		goto err_unmap_reg;
+	}
+	printk(KERN_ERR "sram @%x\n", res->start);
+
+	cp->sram = ioremap(res->start, res->end - res->start + 1);
+	if (!cp->sram) {
+		ret = -ENOMEM;
+		goto err_unmap_reg;
+	}
+
+	memset(cp->sram, 0, 8 * 1024);
+	printk(KERN_ERR "sram remap @%p\n", cp->sram);
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0 || irq == NO_IRQ) {
+		ret = irq;
+		goto err_unmap_sram;
+	}
+	cp->irq = irq;
+
+	ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev), cp);
+	if (ret)
+		goto err_unmap_sram;
+
+	platform_set_drvdata(pdev, cp);
+	cpg = cp;
+
+	ret = crypto_register_alg(&mav_aes_alg);
+	if (ret) {
+		printk(KERN_ERR "Reg of algo failed: %d\n", ret);
+		goto err_reg;
+	}
+	return 0;
+err_reg:
+	free_irq(irq, cp);
+err_unmap_sram:
+	iounmap(cp->sram);
+err_unmap_reg:
+	iounmap(cp->reg);
+err:
+	kfree(cp);
+	cpg = NULL;
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static int m_remove(struct platform_device *pdev)
+{
+	struct crypto_priv *cp = platform_get_drvdata(pdev);
+
+	crypto_unregister_alg(&mav_aes_alg);
+	free_irq(cp->irq, cp);
+	iounmap(cp->sram);
+	iounmap(cp->reg);
+	kfree(cp);
+	cpg = NULL;
+	return 0;
+}
+
+static struct platform_driver marvell_crypto = {
+	.probe          = m_probe,
+	.remove         = m_remove,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = "mav,orion5x-crypto",
+	},
+};
+
+static int __init crypto_init(void)
+{
+	return platform_driver_register(&marvell_crypto);
+}
+module_init(crypto_init);
+
+static void __exit crypto_exit(void)
+{
+	platform_driver_unregister(&marvell_crypto);
+}
+
+module_exit(crypto_exit);
+
+MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
+MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
+MODULE_LICENSE("GPL");
-- 
1.6.0.6


^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2009-06-11 11:40 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-05-07 21:03 [WIP] crypto: add support for Orion5X crypto engine Sebastian Andrzej Siewior
2009-05-07 21:39 ` Ben Dooks
2009-06-11 11:40   ` Sebastian Andrzej Siewior
  -- strict thread matches above, loose matches on Subject: below --
2009-03-02 22:10 Sebastian Andrzej Siewior
2009-03-03 17:49 ` Jason
2009-03-03 22:08   ` Sebastian Andrzej Siewior
2009-03-04 16:05 ` Ronen Shitrit
2009-03-04 16:35   ` Sebastian Andrzej Siewior

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.