* [U-Boot] [PATCH v2 2/3] fit: allow fit to call hardware accelerated hash
2018-06-06 16:03 [U-Boot] [PATCH v2 1/3] crypto: add md5 to common hash functions Ben Whitten
@ 2018-06-06 16:03 ` Ben Whitten
2018-06-18 16:59 ` Tom Rini
2018-06-06 16:03 ` [U-Boot] [PATCH v2 3/3] crypto: add Atmel hardware acceleration for SHA1 & 256 Ben Whitten
1 sibling, 1 reply; 6+ messages in thread
From: Ben Whitten @ 2018-06-06 16:03 UTC (permalink / raw)
To: u-boot
Move to calling the abstraction which allows for hardware acceleration.
We also remove unneeded defines and only include objects if required.
Signed-off-by: Ben Whitten <ben.whitten@lairdtech.com>
---
common/hash.c | 4 ++++
common/image-fit.c | 27 +++++++++------------------
include/image.h | 42 ++++++++++++++++++++++--------------------
lib/Makefile | 5 ++++-
4 files changed, 39 insertions(+), 39 deletions(-)
diff --git a/common/hash.c b/common/hash.c
index d2f4b3f..ceee124 100644
--- a/common/hash.c
+++ b/common/hash.c
@@ -85,6 +85,7 @@ static int hash_finish_sha256(struct hash_algo *algo, void *ctx, void
}
#endif
+#if defined(CONFIG_CRC32)
static int hash_init_crc32(struct hash_algo *algo, void **ctxp)
{
uint32_t *ctx = malloc(sizeof(uint32_t));
@@ -110,6 +111,7 @@ static int hash_finish_crc32(struct hash_algo *algo, void *ctx, void *dest_buf,
free(ctx);
return 0;
}
+#endif
/*
* These are the hash algorithms we support. If we have hardware acceleration
@@ -159,6 +161,7 @@ static struct hash_algo hash_algo[] = {
#endif
},
#endif
+#ifdef CONFIG_CRC32
{
.name = "crc32",
.digest_size = 4,
@@ -168,6 +171,7 @@ static struct hash_algo hash_algo[] = {
.hash_update = hash_update_crc32,
.hash_finish = hash_finish_crc32,
},
+#endif
#ifdef CONFIG_MD5
{
.name = "md5",
diff --git a/common/image-fit.c b/common/image-fit.c
index 8c15ed1..7d8c961 100644
--- a/common/image-fit.c
+++ b/common/image-fit.c
@@ -1082,26 +1082,17 @@ int fit_set_timestamp(void *fit, int noffset, time_t timestamp)
int calculate_hash(const void *data, int data_len, const char *algo,
uint8_t *value, int *value_len)
{
- if (IMAGE_ENABLE_CRC32 && strcmp(algo, "crc32") == 0) {
- *((uint32_t *)value) = crc32_wd(0, data, data_len,
- CHUNKSZ_CRC32);
- *((uint32_t *)value) = cpu_to_uimage(*((uint32_t *)value));
- *value_len = 4;
- } else if (IMAGE_ENABLE_SHA1 && strcmp(algo, "sha1") == 0) {
- sha1_csum_wd((unsigned char *)data, data_len,
- (unsigned char *)value, CHUNKSZ_SHA1);
- *value_len = 20;
- } else if (IMAGE_ENABLE_SHA256 && strcmp(algo, "sha256") == 0) {
- sha256_csum_wd((unsigned char *)data, data_len,
- (unsigned char *)value, CHUNKSZ_SHA256);
- *value_len = SHA256_SUM_LEN;
- } else if (IMAGE_ENABLE_MD5 && strcmp(algo, "md5") == 0) {
- md5_wd((unsigned char *)data, data_len, value, CHUNKSZ_MD5);
- *value_len = 16;
- } else {
+ struct hash_algo *hash_algo;
+ int ret;
+
+ ret = hash_lookup_algo(algo, &hash_algo);
+ if (ret) {
debug("Unsupported hash alogrithm\n");
- return -1;
}
+ hash_algo->hash_func_ws((unsigned char *)data, data_len,
+ (unsigned char *)value, hash_algo->chunk_size);
+ *value_len = hash_algo->digest_size;
+
return 0;
}
diff --git a/include/image.h b/include/image.h
index a5a5807..16bc097 100644
--- a/include/image.h
+++ b/include/image.h
@@ -32,6 +32,7 @@ struct fdt_region;
#define CONFIG_FIT_ENABLE_SHA256_SUPPORT
#define CONFIG_SHA1
#define CONFIG_SHA256
+#define CONFIG_CRC32
#define CONFIG_MD5
#define IMAGE_ENABLE_IGNORE 0
@@ -58,38 +59,39 @@ struct fdt_region;
#include <fdt_support.h>
# ifdef CONFIG_SPL_BUILD
# ifdef CONFIG_SPL_CRC32_SUPPORT
-# define IMAGE_ENABLE_CRC32 1
+# define CONFIG_CRC32
+# else
+# undef CONFIG_CRC32
# endif
# ifdef CONFIG_SPL_MD5_SUPPORT
-# define IMAGE_ENABLE_MD5 1
+# define CONFIG_MD5
+# else
+# undef CONFIG_MD5
# endif
# ifdef CONFIG_SPL_SHA1_SUPPORT
-# define IMAGE_ENABLE_SHA1 1
+# define CONFIG_SHA1
+# else
+# undef CONFIG_SHA1
# endif
# else
+# ifndef CONFIG_CRC32
# define CONFIG_CRC32 /* FIT images need CRC32 support */
-# define IMAGE_ENABLE_CRC32 1
-# define IMAGE_ENABLE_MD5 1
-# define IMAGE_ENABLE_SHA1 1
+# endif
+# ifndef CONFIG_MD5
+# define CONFIG_MD5
+# endif
+# ifndef CONFIG_SHA1
+# define CONFIG_SHA1
+# endif
# endif
-#ifndef IMAGE_ENABLE_CRC32
-#define IMAGE_ENABLE_CRC32 0
-#endif
-
-#ifndef IMAGE_ENABLE_MD5
-#define IMAGE_ENABLE_MD5 0
-#endif
-
-#ifndef IMAGE_ENABLE_SHA1
-#define IMAGE_ENABLE_SHA1 0
-#endif
-
#if defined(CONFIG_FIT_ENABLE_SHA256_SUPPORT) || \
defined(CONFIG_SPL_SHA256_SUPPORT)
-#define IMAGE_ENABLE_SHA256 1
+#ifndef CONFIG_SHA256
+#define CONFIG_SHA256
+#endif
#else
-#define IMAGE_ENABLE_SHA256 0
+#undef CONFIG_SHA256
#endif
#endif /* IMAGE_ENABLE_FIT */
diff --git a/lib/Makefile b/lib/Makefile
index 5c4aa73..5b40444 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_USB_TTY) += circbuf.o
obj-y += crc7.o
obj-y += crc8.o
obj-y += crc16.o
+obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_ERRNO_STR) += errno_str.o
obj-$(CONFIG_FIT) += fdtdec_common.o
obj-$(CONFIG_TEST_FDTDEC) += fdtdec_test.o
@@ -61,6 +62,8 @@ obj-$(CONFIG_$(SPL_TPL_)OF_CONTROL) += fdtdec_common.o
obj-$(CONFIG_$(SPL_TPL_)OF_CONTROL) += fdtdec.o
endif
+
+
ifdef CONFIG_SPL_BUILD
obj-$(CONFIG_SPL_YMODEM_SUPPORT) += crc16.o
obj-$(CONFIG_SPL_NET_SUPPORT) += net_utils.o
@@ -71,7 +74,7 @@ obj-y += errno.o
obj-y += display_options.o
CFLAGS_display_options.o := $(if $(BUILD_TAG),-DBUILD_TAG='"$(BUILD_TAG)"')
obj-$(CONFIG_BCH) += bch.o
-obj-y += crc32.o
+obj-$(CONFIG_$(SPL_TPL_)CRC32_SUPPORT) += crc32.o
obj-$(CONFIG_CRC32C) += crc32c.o
obj-y += ctype.o
obj-y += div64.o
--
2.7.4
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [U-Boot] [PATCH v2 3/3] crypto: add Atmel hardware acceleration for SHA1 & 256
2018-06-06 16:03 [U-Boot] [PATCH v2 1/3] crypto: add md5 to common hash functions Ben Whitten
2018-06-06 16:03 ` [U-Boot] [PATCH v2 2/3] fit: allow fit to call hardware accelerated hash Ben Whitten
@ 2018-06-06 16:03 ` Ben Whitten
1 sibling, 0 replies; 6+ messages in thread
From: Ben Whitten @ 2018-06-06 16:03 UTC (permalink / raw)
To: u-boot
We can use the hardware hash block to reduce space, particularly useful
for verifying FIT signatures from SPL.
Signed-off-by: Ben Whitten <ben.whitten@lairdtech.com>
---
drivers/crypto/Kconfig | 5 +
drivers/crypto/Makefile | 1 +
drivers/crypto/atmel_sha.c | 289 +++++++++++++++++++++++++++++++++++++++++++++
drivers/crypto/atmel_sha.h | 52 ++++++++
lib/Makefile | 2 +
5 files changed, 349 insertions(+)
create mode 100644 drivers/crypto/atmel_sha.c
create mode 100644 drivers/crypto/atmel_sha.h
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 1ea116b..7a20edb 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -2,4 +2,9 @@ menu "Hardware crypto devices"
source drivers/crypto/fsl/Kconfig
+config ATMEL_SHA
+ bool "Atmel SHA Driver support"
+ help
+ Enables the Atmel SHA accelerator.
+
endmenu
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index efbd1d3..07af449 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -4,5 +4,6 @@
# http://www.samsung.com
obj-$(CONFIG_EXYNOS_ACE_SHA) += ace_sha.o
+obj-$(CONFIG_ATMEL_SHA) += atmel_sha.o
obj-y += rsa_mod_exp/
obj-y += fsl/
diff --git a/drivers/crypto/atmel_sha.c b/drivers/crypto/atmel_sha.c
new file mode 100644
index 0000000..ef969eb
--- /dev/null
+++ b/drivers/crypto/atmel_sha.c
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Atmel SHA engine
+ * Copyright (c) 2018 Laird
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include "atmel_sha.h"
+
+#ifdef CONFIG_SHA_HW_ACCEL
+#include <u-boot/sha256.h>
+#include <u-boot/sha1.h>
+#include <hw_sha.h>
+
+#include <asm/io.h>
+#include <asm/arch/clk.h>
+#include <asm/arch/at91_pmc.h>
+
+enum atmel_hash_algos {
+ ATMEL_HASH_SHA1,
+ ATMEL_HASH_SHA256
+};
+
+struct sha_ctx {
+ enum atmel_hash_algos algo;
+ u32 length;
+ u8 buffer[64];
+};
+
+const u8 sha256_der_prefix[SHA256_DER_LEN] = {
+ 0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86,
+ 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05,
+ 0x00, 0x04, 0x20
+};
+
+const u8 sha1_der_prefix[SHA1_DER_LEN] = {
+ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e,
+ 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14
+};
+
+static enum atmel_hash_algos get_hash_type(struct hash_algo *algo)
+{
+ if (!strcmp(algo->name, "sha1"))
+ return ATMEL_HASH_SHA1;
+ else
+ return ATMEL_HASH_SHA256;
+};
+
+static int atmel_sha_process(const u8 *in_addr, u8 buflen)
+{
+ struct atmel_sha *sha = (struct atmel_sha *)ATMEL_BASE_SHA;
+ int i;
+ u32 *addr_buf;
+
+ /* Copy data in */
+ addr_buf = (u32 *)in_addr;
+ for (i = 0; i < (buflen / 4); i++)
+ sha->idatarx[i] = addr_buf[i];
+ debug("Atmel sha, engine is loaded\n");
+
+ /* Wait for hash to complete */
+ while ((readl(&sha->isr) & ATMEL_HASH_ISR_MASK)
+ != ATMEL_HASH_ISR_DATRDY)
+ ;
+ debug("Atmel sha, engine signaled completion\n");
+
+ return 0;
+}
+
+static int atmel_sha_chunk(struct sha_ctx *ctx, const u8 *buf, unsigned int size)
+{
+ u8 remaining, fill;
+
+ /* Chunk to 64 byte blocks */
+ remaining = ctx->length & 0x3F;
+ fill = 64 - remaining;
+
+ /* If we have things in the buffer transfer the remaining into it */
+ if (remaining && size >= fill) {
+ memcpy(ctx->buffer + remaining, buf, fill);
+
+ /* Process 64 byte chunk */
+ atmel_sha_process(ctx->buffer, 64);
+
+ size -= fill;
+ buf += fill;
+ ctx->length += fill;
+ remaining = 0;
+ }
+
+ /* We are aligned take from source for any additional */
+ while (size >= 64) {
+ /* Process 64 byte chunk */
+ atmel_sha_process(buf, 64);
+
+ size -= 64;
+ buf += 64;
+ ctx->length += 64;
+ }
+
+ if (size) {
+ memcpy(ctx->buffer + remaining, buf, size);
+ ctx->length += size;
+ }
+
+ return 0;
+}
+
+static int atmel_sha_fill_padding(struct sha_ctx *ctx)
+{
+ unsigned int index, padlen;
+ u64 size, bits;
+ u8 sha256_padding[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+
+ size = ctx->length;
+
+ bits = cpu_to_be64(size << 3);
+
+ /* 64 byte, 512 bit block size */
+ index = ctx->length & 0x3F;
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+
+ /* set last entry to be 0x80 then 0's*/
+ atmel_sha_chunk(ctx, sha256_padding, padlen);
+ /* Bolt number of bits to the end */
+ atmel_sha_chunk(ctx, (u8 *)&bits, 8);
+
+ if (ctx->length & 0x3F)
+ debug("ERROR, Remainder after PADDING");
+
+ return 0;
+}
+
+/**
+ * Computes hash value of input pbuf using h/w acceleration
+ *
+ * @param in_addr A pointer to the input buffer
+ * @param buflen Byte length of input buffer
+ * @param out_addr A pointer to the output buffer. When complete
+ * 32 bytes are copied to pout[0]...pout[31]. Thus, a user
+ * should allocate at least 32 bytes at pOut in advance.
+ * @param chunk_size chunk size for sha256
+ */
+void hw_sha256(const uchar *in_addr, uint buflen,
+ uchar *out_addr, uint chunk_size)
+{
+ struct hash_algo *algo;
+ struct sha_ctx *ctx;
+
+ hash_lookup_algo("sha256", &algo);
+ hw_sha_init(algo, (void *)&ctx);
+ atmel_sha_chunk((void *)ctx, in_addr, buflen);
+ atmel_sha_fill_padding(ctx);
+ hw_sha_finish(algo, (void *)ctx, out_addr, buflen);
+}
+
+/**
+ * Computes hash value of input pbuf using h/w acceleration
+ *
+ * @param in_addr A pointer to the input buffer
+ * @param buflen Byte length of input buffer
+ * @param out_addr A pointer to the output buffer. When complete
+ * 32 bytes are copied to pout[0]...pout[31]. Thus, a user
+ * should allocate at least 32 bytes at pOut in advance.
+ * @param chunk_size chunk_size for sha1
+ */
+void hw_sha1(const uchar *in_addr, uint buflen,
+ uchar *out_addr, uint chunk_size)
+{
+ struct hash_algo *algo;
+ struct sha_ctx *ctx;
+
+ hash_lookup_algo("sha1", &algo);
+ hw_sha_init(algo, (void *)&ctx);
+ atmel_sha_chunk((void *)ctx, in_addr, buflen);
+ atmel_sha_fill_padding(ctx);
+ hw_sha_finish(algo, (void *)ctx, out_addr, buflen);
+}
+
+/*
+ * Create the context for sha progressive hashing using h/w acceleration
+ *
+ * @algo: Pointer to the hash_algo struct
+ * @ctxp: Pointer to the pointer of the context for hashing
+ * @return 0 if ok, -ve on error
+ */
+int hw_sha_init(struct hash_algo *algo, void **ctxp)
+{
+ struct atmel_sha *sha = (struct atmel_sha *)ATMEL_BASE_SHA;
+ struct sha_ctx *ctx;
+ u32 reg;
+
+ ctx = malloc(sizeof(struct sha_ctx));
+ if (!ctx) {
+ debug("Failed to allocate context\n");
+ return -ENOMEM;
+ }
+ *ctxp = ctx;
+
+ ctx->algo = get_hash_type(algo);
+ ctx->length = 0;
+
+ debug("Atmel sha init\n");
+ at91_periph_clk_enable(ATMEL_ID_SHA);
+
+ /* Reset the SHA engine */
+ writel(ATMEL_HASH_CR_SWRST, &sha->cr);
+
+ /* Set AUTO mode and fastest operation */
+ reg = ATMEL_HASH_MR_SMOD_AUTO | ATMEL_HASH_MR_PROCDLY_SHORT;
+ if (ctx->algo == ATMEL_HASH_SHA1)
+ reg |= ATMEL_HASH_MR_ALGO_SHA1;
+ else
+ reg |= ATMEL_HASH_MR_ALGO_SHA256;
+ writel(reg, &sha->mr);
+
+ /* Set ready to receive first */
+ writel(ATMEL_HASH_CR_FIRST, &sha->cr);
+
+ /* Ready to roll */
+ return 0;
+}
+
+/*
+ * Update buffer for sha progressive hashing using h/w acceleration
+ *
+ * The context is freed by this function if an error occurs.
+ *
+ * @algo: Pointer to the hash_algo struct
+ * @ctx: Pointer to the context for hashing
+ * @buf: Pointer to the buffer being hashed
+ * @size: Size of the buffer being hashed
+ * @is_last: 1 if this is the last update; 0 otherwise
+ * @return 0 if ok, -ve on error
+ */
+int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf,
+ unsigned int size, int is_last)
+{
+ struct sha_ctx *sha_ctx = ctx;
+
+ debug("Atmel sha update: %d bytes\n", size);
+
+ /* Send down in chunks */
+ atmel_sha_chunk(sha_ctx, buf, size);
+
+ if (is_last)
+ atmel_sha_fill_padding(sha_ctx);
+
+ return 0;
+}
+
+/*
+ * Copy sha hash result at destination location
+ *
+ * The context is freed after completion of hash operation or after an error.
+ *
+ * @algo: Pointer to the hash_algo struct
+ * @ctx: Pointer to the context for hashing
+ * @dest_buf: Pointer to the destination buffer where hash is to be copied
+ * @size: Size of the buffer being hashed
+ * @return 0 if ok, -ve on error
+ */
+int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf,
+ int size)
+{
+ struct atmel_sha *sha = (struct atmel_sha *)ATMEL_BASE_SHA;
+ struct sha_ctx *sha_ctx = ctx;
+ unsigned int len, i;
+ u32 *addr_buf;
+
+ /* Copy data back */
+ len = (sha_ctx->algo == ATMEL_HASH_SHA1) ?
+ SHA1_SUM_LEN : SHA256_SUM_LEN;
+ addr_buf = (u32 *)dest_buf;
+ for (i = 0; i < (len / 4); i++)
+ addr_buf[i] = sha->iodatarx[i];
+
+ free(ctx);
+
+ return 0;
+}
+
+#endif /* CONFIG_SHA_HW_ACCEL */
diff --git a/drivers/crypto/atmel_sha.h b/drivers/crypto/atmel_sha.h
new file mode 100644
index 0000000..68ed988
--- /dev/null
+++ b/drivers/crypto/atmel_sha.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Header file for Atmel SHA engine - SFR definitions
+ *
+ * Copyright (c) 2018 Laird
+ */
+
+#ifndef __DRIVERS_ATMEL_SHA_H__
+#define __DRIVERS_ATMEL_SHA_H__
+
+/* SHA register footprint */
+
+struct atmel_sha {
+ u32 cr;
+ u32 mr;
+ u32 reserved0[2];
+ u32 ier;
+ u32 idr;
+ u32 imr;
+ u32 isr;
+ u32 reserved1[8];
+ u32 idatarx[16];
+ u32 iodatarx[16];
+ u32 reserved2[16];
+};
+
+/* CR */
+#define ATMEL_HASH_CR_MASK (0xffff << 0)
+#define ATMEL_HASH_CR_START (1 << 0)
+#define ATMEL_HASH_CR_FIRST (1 << 4)
+#define ATMEL_HASH_CR_SWRST (1 << 8)
+
+/* MR */
+#define ATMEL_HASH_MR_MASK (0xffff << 0)
+#define ATMEL_HASH_MR_SMOD_MANUAL (0 << 0)
+#define ATMEL_HASH_MR_SMOD_AUTO (1 << 0)
+#define ATMEL_HASH_MR_SMOD_IDATAR0 (2 << 0)
+#define ATMEL_HASH_MR_PROCDLY_SHORT (0 << 4)
+#define ATMEL_HASH_MR_PROCDLY_LONG (1 << 4)
+#define ATMEL_HASH_MR_ALGO_SHA1 (0 << 8)
+#define ATMEL_HASH_MR_ALGO_SHA256 (1 << 8)
+#define ATMEL_HASH_MR_ALGO_SHA384 (2 << 8)
+#define ATMEL_HASH_MR_ALGO_SHA512 (3 << 8)
+#define ATMEL_HASH_MR_ALGO_SHA224 (4 << 8)
+#define ATMEL_HASH_MR_DUALBUFF_INACTIVE (0 << 16)
+#define ATMEL_HASH_MR_DUALBUFF_ACTIVE (1 << 16)
+
+/* ISR */
+#define ATMEL_HASH_ISR_MASK (1 << 0)
+#define ATMEL_HASH_ISR_DATRDY (1 << 0)
+
+#endif /* __DRIVERS_ATMEL_SHA_H__ */
diff --git a/lib/Makefile b/lib/Makefile
index 5b40444..834826c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -47,8 +47,10 @@ obj-y += list_sort.o
endif
obj-$(CONFIG_RSA) += rsa/
+ifneq ($(CONFIG_SHA_PROG_HW_ACCEL),y)
obj-$(CONFIG_SHA1) += sha1.o
obj-$(CONFIG_SHA256) += sha256.o
+endif
obj-$(CONFIG_$(SPL_)ZLIB) += zlib/
obj-$(CONFIG_$(SPL_)GZIP) += gunzip.o
--
2.7.4
^ permalink raw reply related [flat|nested] 6+ messages in thread