* [PATCH] arm64: crypto: Add ARM64 CRC32 hw accelerated module
@ 2014-11-19 17:19 Yazen Ghannam
2014-11-20 14:22 ` Yazen Ghannam
[not found] ` <CA+dbMpuXC58d_CPYfzYEOjKkP04ZgcxBj6qtfxFfDJK95BxD3Q@mail.gmail.com>
0 siblings, 2 replies; 6+ messages in thread
From: Yazen Ghannam @ 2014-11-19 17:19 UTC (permalink / raw)
To: linux-kernel, linux-crypto; +Cc: herbert, Yazen Ghannam
This module registers a crc32 algorithm and a crc32c algorithm
that use the optional CRC32 and CRC32C instructions in ARMv8.
Tested on AMD Seattle.
Improvement compared to crc32c-generic algorithm:
TCRYPT CRC32C speed test shows ~450% speedup.
Simple dd write tests to btrfs filesystem show ~30% speedup.
Signed-off-by: Yazen Ghannam <yazen.ghannam@linaro.org>
Acked-by: Steve Capper <steve.capper@linaro.org>
Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/crypto/Kconfig | 4 +
arch/arm64/crypto/Makefile | 4 +
arch/arm64/crypto/crc32-arm64.c | 274 ++++++++++++++++++++++++++++++++++++++++
3 files changed, 282 insertions(+)
create mode 100644 arch/arm64/crypto/crc32-arm64.c
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 5562652..c1a0468 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -50,4 +50,8 @@ config CRYPTO_AES_ARM64_NEON_BLK
select CRYPTO_AES
select CRYPTO_ABLK_HELPER
+config CRYPTO_CRC32_ARM64
+ tristate "CRC32 and CRC32C using optional ARMv8 instructions"
+ depends on ARM64
+ select CRYPTO_HASH
endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index a3f935f..5720608 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -34,5 +34,9 @@ AFLAGS_aes-neon.o := -DINTERLEAVE=4
CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
+obj-$(CONFIG_CRYPTO_CRC32_ARM64) += crc32-arm64.o
+
+CFLAGS_crc32-arm64.o := -mcpu=generic+crc
+
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c
new file mode 100644
index 0000000..9499199
--- /dev/null
+++ b/arch/arm64/crypto/crc32-arm64.c
@@ -0,0 +1,274 @@
+/*
+ * crc32-arm64.c - CRC32 and CRC32C using optional ARMv8 instructions
+ *
+ * Module based on crypto/crc32c_generic.c
+ *
+ * CRC32 loop taken from Ed Nevill's Hadoop CRC patch
+ * http://mail-archives.apache.org/mod_mbox/hadoop-common-dev/201406.mbox/%3C1403687030.3355.19.camel%40localhost.localdomain%3E
+ *
+ * Using inline assembly instead of intrinsics in order to be backwards
+ * compatible with older compilers.
+ *
+ * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/unaligned/access_ok.h>
+#include <linux/cpufeature.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+MODULE_AUTHOR("Yazen Ghannam <yazen.ghannam@linaro.org>");
+MODULE_DESCRIPTION("CRC32 and CRC32C using optional ARMv8 instructions");
+MODULE_LICENSE("GPL v2");
+
+#define CRC32X(crc, value) __asm__("crc32x %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32W(crc, value) __asm__("crc32w %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32H(crc, value) __asm__("crc32h %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32B(crc, value) __asm__("crc32b %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CX(crc, value) __asm__("crc32cx %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CW(crc, value) __asm__("crc32cw %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CH(crc, value) __asm__("crc32ch %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CB(crc, value) __asm__("crc32cb %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+
+static u32 crc32_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
+{
+ s64 length = len;
+
+ while ((length -= sizeof(u64)) >= 0) {
+ CRC32X(crc, get_unaligned_le64(p));
+ p += sizeof(u64);
+ }
+
+ /* The following is more efficient than the straight loop */
+ if (length & sizeof(u32)) {
+ CRC32W(crc, get_unaligned_le32(p));
+ p += sizeof(u32);
+ }
+ if (length & sizeof(u16)) {
+ CRC32H(crc, get_unaligned_le16(p));
+ p += sizeof(u16);
+ }
+ if (length & sizeof(u8))
+ CRC32B(crc, *p);
+
+ return crc;
+}
+
+static u32 crc32c_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
+{
+ s64 length = len;
+
+ while ((length -= sizeof(u64)) >= 0) {
+ CRC32CX(crc, get_unaligned_le64(p));
+ p += sizeof(u64);
+ }
+
+ /* The following is more efficient than the straight loop */
+ if (length & sizeof(u32)) {
+ CRC32CW(crc, get_unaligned_le32(p));
+ p += sizeof(u32);
+ }
+ if (length & sizeof(u16)) {
+ CRC32CH(crc, get_unaligned_le16(p));
+ p += sizeof(u16);
+ }
+ if (length & sizeof(u8))
+ CRC32CB(crc, *p);
+
+ return crc;
+}
+
+#define CHKSUM_BLOCK_SIZE 1
+#define CHKSUM_DIGEST_SIZE 4
+
+struct chksum_ctx {
+ u32 key;
+};
+
+struct chksum_desc_ctx {
+ u32 crc;
+};
+
+static int chksum_init(struct shash_desc *desc)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = mctx->key;
+
+ return 0;
+}
+
+/*
+ * Setting the seed allows arbitrary accumulators and flexible XOR policy
+ * If your algorithm starts with ~0, then XOR with ~0 before you set
+ * the seed.
+ */
+static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
+
+ if (keylen != sizeof(mctx->key)) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ mctx->key = get_unaligned_le32(key);
+ return 0;
+}
+
+static int chksum_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = crc32_arm64_le_hw(ctx->crc, data, length);
+ return 0;
+}
+
+static int chksumc_update(struct shash_desc *desc, const u8 *data,
+ unsigned int length)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ ctx->crc = crc32c_arm64_le_hw(ctx->crc, data, length);
+ return 0;
+}
+
+static int chksum_final(struct shash_desc *desc, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ put_unaligned_le32(~ctx->crc, out);
+ return 0;
+}
+
+static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
+{
+ put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
+ return 0;
+}
+
+static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
+{
+ put_unaligned_le32(~crc32c_arm64_le_hw(crc, data, len), out);
+ return 0;
+}
+
+static int chksum_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ return __chksum_finup(ctx->crc, data, len, out);
+}
+
+static int chksumc_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out)
+{
+ struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+ return __chksumc_finup(ctx->crc, data, len, out);
+}
+
+static int chksum_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+ return __chksum_finup(mctx->key, data, length, out);
+}
+
+static int chksumc_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int length, u8 *out)
+{
+ struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+ return __chksumc_finup(mctx->key, data, length, out);
+}
+
+static int crc32_cra_init(struct crypto_tfm *tfm)
+{
+ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
+
+ mctx->key = ~0;
+ return 0;
+}
+
+static struct shash_alg crc32_alg = {
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .setkey = chksum_setkey,
+ .init = chksum_init,
+ .update = chksum_update,
+ .final = chksum_final,
+ .finup = chksum_finup,
+ .digest = chksum_digest,
+ .descsize = sizeof(struct chksum_desc_ctx),
+ .base = {
+ .cra_name = "crc32",
+ .cra_driver_name = "crc32-arm64-hw",
+ .cra_priority = 300,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_alignmask = 0,
+ .cra_ctxsize = sizeof(struct chksum_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = crc32_cra_init,
+ }
+};
+
+static struct shash_alg crc32c_alg = {
+ .digestsize = CHKSUM_DIGEST_SIZE,
+ .setkey = chksum_setkey,
+ .init = chksum_init,
+ .update = chksumc_update,
+ .final = chksum_final,
+ .finup = chksumc_finup,
+ .digest = chksumc_digest,
+ .descsize = sizeof(struct chksum_desc_ctx),
+ .base = {
+ .cra_name = "crc32c",
+ .cra_driver_name = "crc32c-arm64-hw",
+ .cra_priority = 300,
+ .cra_blocksize = CHKSUM_BLOCK_SIZE,
+ .cra_alignmask = 0,
+ .cra_ctxsize = sizeof(struct chksum_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = crc32_cra_init,
+ }
+};
+
+static int __init crc32_mod_init(void)
+{
+ int err;
+
+ err = crypto_register_shash(&crc32_alg);
+
+ if (err)
+ return err;
+
+ err = crypto_register_shash(&crc32c_alg);
+
+ if (err) {
+ crypto_unregister_shash(&crc32_alg);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit crc32_mod_exit(void)
+{
+ crypto_unregister_shash(&crc32_alg);
+ crypto_unregister_shash(&crc32c_alg);
+}
+
+module_cpu_feature_match(CRC32, crc32_mod_init);
+module_exit(crc32_mod_exit);
--
2.1.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH] arm64: crypto: Add ARM64 CRC32 hw accelerated module
2014-11-19 17:19 [PATCH] arm64: crypto: Add ARM64 CRC32 hw accelerated module Yazen Ghannam
@ 2014-11-20 14:22 ` Yazen Ghannam
2014-11-21 21:39 ` Ard Biesheuvel
[not found] ` <CA+dbMpuXC58d_CPYfzYEOjKkP04ZgcxBj6qtfxFfDJK95BxD3Q@mail.gmail.com>
1 sibling, 1 reply; 6+ messages in thread
From: Yazen Ghannam @ 2014-11-20 14:22 UTC (permalink / raw)
To: linux-kernel, linux-crypto, linux-arm-kernel; +Cc: Herbert Xu, Yazen Ghannam
+linux-arm-kernel@lists.infradead.org
On Wed, Nov 19, 2014 at 11:19 AM, Yazen Ghannam
<yazen.ghannam@linaro.org> wrote:
> This module registers a crc32 algorithm and a crc32c algorithm
> that use the optional CRC32 and CRC32C instructions in ARMv8.
>
> Tested on AMD Seattle.
>
> Improvement compared to crc32c-generic algorithm:
> TCRYPT CRC32C speed test shows ~450% speedup.
> Simple dd write tests to btrfs filesystem show ~30% speedup.
>
> Signed-off-by: Yazen Ghannam <yazen.ghannam@linaro.org>
> Acked-by: Steve Capper <steve.capper@linaro.org>
> Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
> arch/arm64/crypto/Kconfig | 4 +
> arch/arm64/crypto/Makefile | 4 +
> arch/arm64/crypto/crc32-arm64.c | 274 ++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 282 insertions(+)
> create mode 100644 arch/arm64/crypto/crc32-arm64.c
>
> diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
> index 5562652..c1a0468 100644
> --- a/arch/arm64/crypto/Kconfig
> +++ b/arch/arm64/crypto/Kconfig
> @@ -50,4 +50,8 @@ config CRYPTO_AES_ARM64_NEON_BLK
> select CRYPTO_AES
> select CRYPTO_ABLK_HELPER
>
> +config CRYPTO_CRC32_ARM64
> + tristate "CRC32 and CRC32C using optional ARMv8 instructions"
> + depends on ARM64
> + select CRYPTO_HASH
> endif
> diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
> index a3f935f..5720608 100644
> --- a/arch/arm64/crypto/Makefile
> +++ b/arch/arm64/crypto/Makefile
> @@ -34,5 +34,9 @@ AFLAGS_aes-neon.o := -DINTERLEAVE=4
>
> CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
>
> +obj-$(CONFIG_CRYPTO_CRC32_ARM64) += crc32-arm64.o
> +
> +CFLAGS_crc32-arm64.o := -mcpu=generic+crc
> +
> $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
> $(call if_changed_rule,cc_o_c)
> diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c
> new file mode 100644
> index 0000000..9499199
> --- /dev/null
> +++ b/arch/arm64/crypto/crc32-arm64.c
> @@ -0,0 +1,274 @@
> +/*
> + * crc32-arm64.c - CRC32 and CRC32C using optional ARMv8 instructions
> + *
> + * Module based on crypto/crc32c_generic.c
> + *
> + * CRC32 loop taken from Ed Nevill's Hadoop CRC patch
> + * http://mail-archives.apache.org/mod_mbox/hadoop-common-dev/201406.mbox/%3C1403687030.3355.19.camel%40localhost.localdomain%3E
> + *
> + * Using inline assembly instead of intrinsics in order to be backwards
> + * compatible with older compilers.
> + *
> + * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/unaligned/access_ok.h>
> +#include <linux/cpufeature.h>
> +#include <linux/init.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/string.h>
> +
> +#include <crypto/internal/hash.h>
> +
> +MODULE_AUTHOR("Yazen Ghannam <yazen.ghannam@linaro.org>");
> +MODULE_DESCRIPTION("CRC32 and CRC32C using optional ARMv8 instructions");
> +MODULE_LICENSE("GPL v2");
> +
> +#define CRC32X(crc, value) __asm__("crc32x %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
> +#define CRC32W(crc, value) __asm__("crc32w %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
> +#define CRC32H(crc, value) __asm__("crc32h %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
> +#define CRC32B(crc, value) __asm__("crc32b %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
> +#define CRC32CX(crc, value) __asm__("crc32cx %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
> +#define CRC32CW(crc, value) __asm__("crc32cw %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
> +#define CRC32CH(crc, value) __asm__("crc32ch %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
> +#define CRC32CB(crc, value) __asm__("crc32cb %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
> +
> +static u32 crc32_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
> +{
> + s64 length = len;
> +
> + while ((length -= sizeof(u64)) >= 0) {
> + CRC32X(crc, get_unaligned_le64(p));
> + p += sizeof(u64);
> + }
> +
> + /* The following is more efficient than the straight loop */
> + if (length & sizeof(u32)) {
> + CRC32W(crc, get_unaligned_le32(p));
> + p += sizeof(u32);
> + }
> + if (length & sizeof(u16)) {
> + CRC32H(crc, get_unaligned_le16(p));
> + p += sizeof(u16);
> + }
> + if (length & sizeof(u8))
> + CRC32B(crc, *p);
> +
> + return crc;
> +}
> +
> +static u32 crc32c_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
> +{
> + s64 length = len;
> +
> + while ((length -= sizeof(u64)) >= 0) {
> + CRC32CX(crc, get_unaligned_le64(p));
> + p += sizeof(u64);
> + }
> +
> + /* The following is more efficient than the straight loop */
> + if (length & sizeof(u32)) {
> + CRC32CW(crc, get_unaligned_le32(p));
> + p += sizeof(u32);
> + }
> + if (length & sizeof(u16)) {
> + CRC32CH(crc, get_unaligned_le16(p));
> + p += sizeof(u16);
> + }
> + if (length & sizeof(u8))
> + CRC32CB(crc, *p);
> +
> + return crc;
> +}
> +
> +#define CHKSUM_BLOCK_SIZE 1
> +#define CHKSUM_DIGEST_SIZE 4
> +
> +struct chksum_ctx {
> + u32 key;
> +};
> +
> +struct chksum_desc_ctx {
> + u32 crc;
> +};
> +
> +static int chksum_init(struct shash_desc *desc)
> +{
> + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
> +
> + ctx->crc = mctx->key;
> +
> + return 0;
> +}
> +
> +/*
> + * Setting the seed allows arbitrary accumulators and flexible XOR policy
> + * If your algorithm starts with ~0, then XOR with ~0 before you set
> + * the seed.
> + */
> +static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
> + unsigned int keylen)
> +{
> + struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
> +
> + if (keylen != sizeof(mctx->key)) {
> + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
> + return -EINVAL;
> + }
> + mctx->key = get_unaligned_le32(key);
> + return 0;
> +}
> +
> +static int chksum_update(struct shash_desc *desc, const u8 *data,
> + unsigned int length)
> +{
> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
> +
> + ctx->crc = crc32_arm64_le_hw(ctx->crc, data, length);
> + return 0;
> +}
> +
> +static int chksumc_update(struct shash_desc *desc, const u8 *data,
> + unsigned int length)
> +{
> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
> +
> + ctx->crc = crc32c_arm64_le_hw(ctx->crc, data, length);
> + return 0;
> +}
> +
> +static int chksum_final(struct shash_desc *desc, u8 *out)
> +{
> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
> +
> + put_unaligned_le32(~ctx->crc, out);
> + return 0;
> +}
> +
> +static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
> +{
> + put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
> + return 0;
> +}
> +
> +static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
> +{
> + put_unaligned_le32(~crc32c_arm64_le_hw(crc, data, len), out);
> + return 0;
> +}
> +
> +static int chksum_finup(struct shash_desc *desc, const u8 *data,
> + unsigned int len, u8 *out)
> +{
> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
> +
> + return __chksum_finup(ctx->crc, data, len, out);
> +}
> +
> +static int chksumc_finup(struct shash_desc *desc, const u8 *data,
> + unsigned int len, u8 *out)
> +{
> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
> +
> + return __chksumc_finup(ctx->crc, data, len, out);
> +}
> +
> +static int chksum_digest(struct shash_desc *desc, const u8 *data,
> + unsigned int length, u8 *out)
> +{
> + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
> +
> + return __chksum_finup(mctx->key, data, length, out);
> +}
> +
> +static int chksumc_digest(struct shash_desc *desc, const u8 *data,
> + unsigned int length, u8 *out)
> +{
> + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
> +
> + return __chksumc_finup(mctx->key, data, length, out);
> +}
> +
> +static int crc32_cra_init(struct crypto_tfm *tfm)
> +{
> + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
> +
> + mctx->key = ~0;
> + return 0;
> +}
> +
> +static struct shash_alg crc32_alg = {
> + .digestsize = CHKSUM_DIGEST_SIZE,
> + .setkey = chksum_setkey,
> + .init = chksum_init,
> + .update = chksum_update,
> + .final = chksum_final,
> + .finup = chksum_finup,
> + .digest = chksum_digest,
> + .descsize = sizeof(struct chksum_desc_ctx),
> + .base = {
> + .cra_name = "crc32",
> + .cra_driver_name = "crc32-arm64-hw",
> + .cra_priority = 300,
> + .cra_blocksize = CHKSUM_BLOCK_SIZE,
> + .cra_alignmask = 0,
> + .cra_ctxsize = sizeof(struct chksum_ctx),
> + .cra_module = THIS_MODULE,
> + .cra_init = crc32_cra_init,
> + }
> +};
> +
> +static struct shash_alg crc32c_alg = {
> + .digestsize = CHKSUM_DIGEST_SIZE,
> + .setkey = chksum_setkey,
> + .init = chksum_init,
> + .update = chksumc_update,
> + .final = chksum_final,
> + .finup = chksumc_finup,
> + .digest = chksumc_digest,
> + .descsize = sizeof(struct chksum_desc_ctx),
> + .base = {
> + .cra_name = "crc32c",
> + .cra_driver_name = "crc32c-arm64-hw",
> + .cra_priority = 300,
> + .cra_blocksize = CHKSUM_BLOCK_SIZE,
> + .cra_alignmask = 0,
> + .cra_ctxsize = sizeof(struct chksum_ctx),
> + .cra_module = THIS_MODULE,
> + .cra_init = crc32_cra_init,
> + }
> +};
> +
> +static int __init crc32_mod_init(void)
> +{
> + int err;
> +
> + err = crypto_register_shash(&crc32_alg);
> +
> + if (err)
> + return err;
> +
> + err = crypto_register_shash(&crc32c_alg);
> +
> + if (err) {
> + crypto_unregister_shash(&crc32_alg);
> + return err;
> + }
> +
> + return 0;
> +}
> +
> +static void __exit crc32_mod_exit(void)
> +{
> + crypto_unregister_shash(&crc32_alg);
> + crypto_unregister_shash(&crc32c_alg);
> +}
> +
> +module_cpu_feature_match(CRC32, crc32_mod_init);
> +module_exit(crc32_mod_exit);
> --
> 2.1.0
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] arm64: crypto: Add ARM64 CRC32 hw accelerated module
[not found] ` <CA+dbMpuXC58d_CPYfzYEOjKkP04ZgcxBj6qtfxFfDJK95BxD3Q@mail.gmail.com>
@ 2014-11-20 14:43 ` Herbert Xu
0 siblings, 0 replies; 6+ messages in thread
From: Herbert Xu @ 2014-11-20 14:43 UTC (permalink / raw)
To: Yazen Ghannam; +Cc: linux-kernel, linux-crypto, linux-arm-kernel
On Thu, Nov 20, 2014 at 07:42:23AM -0600, Yazen Ghannam wrote:
> +linux-arm-kernel@lists.infradead.org
>
> On Wed, Nov 19, 2014 at 11:19 AM, Yazen Ghannam <yazen.ghannam@linaro.org>
> wrote:
>
> > This module registers a crc32 algorithm and a crc32c algorithm
> > that use the optional CRC32 and CRC32C instructions in ARMv8.
> >
> > Tested on AMD Seattle.
> >
> > Improvement compared to crc32c-generic algorithm:
> > TCRYPT CRC32C speed test shows ~450% speedup.
> > Simple dd write tests to btrfs filesystem show ~30% speedup.
> >
> > Signed-off-by: Yazen Ghannam <yazen.ghannam@linaro.org>
> > Acked-by: Steve Capper <steve.capper@linaro.org>
> > Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Patch applied. Thanks!
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] arm64: crypto: Add ARM64 CRC32 hw accelerated module
2014-11-20 14:22 ` Yazen Ghannam
@ 2014-11-21 21:39 ` Ard Biesheuvel
2014-11-25 16:50 ` Yazen Ghannam
0 siblings, 1 reply; 6+ messages in thread
From: Ard Biesheuvel @ 2014-11-21 21:39 UTC (permalink / raw)
To: Yazen Ghannam; +Cc: linux-kernel, linux-crypto, linux-arm-kernel, Herbert Xu
On 20 November 2014 15:22, Yazen Ghannam <yazen.ghannam@linaro.org> wrote:
> +linux-arm-kernel@lists.infradead.org
>
> On Wed, Nov 19, 2014 at 11:19 AM, Yazen Ghannam
> <yazen.ghannam@linaro.org> wrote:
>> This module registers a crc32 algorithm and a crc32c algorithm
>> that use the optional CRC32 and CRC32C instructions in ARMv8.
>>
>> Tested on AMD Seattle.
>>
>> Improvement compared to crc32c-generic algorithm:
>> TCRYPT CRC32C speed test shows ~450% speedup.
>> Simple dd write tests to btrfs filesystem show ~30% speedup.
>>
>> Signed-off-by: Yazen Ghannam <yazen.ghannam@linaro.org>
>> Acked-by: Steve Capper <steve.capper@linaro.org>
>> Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
>> ---
>> arch/arm64/crypto/Kconfig | 4 +
>> arch/arm64/crypto/Makefile | 4 +
>> arch/arm64/crypto/crc32-arm64.c | 274 ++++++++++++++++++++++++++++++++++++++++
>> 3 files changed, 282 insertions(+)
>> create mode 100644 arch/arm64/crypto/crc32-arm64.c
>>
>> diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
>> index 5562652..c1a0468 100644
>> --- a/arch/arm64/crypto/Kconfig
>> +++ b/arch/arm64/crypto/Kconfig
>> @@ -50,4 +50,8 @@ config CRYPTO_AES_ARM64_NEON_BLK
>> select CRYPTO_AES
>> select CRYPTO_ABLK_HELPER
>>
>> +config CRYPTO_CRC32_ARM64
>> + tristate "CRC32 and CRC32C using optional ARMv8 instructions"
>> + depends on ARM64
>> + select CRYPTO_HASH
>> endif
>> diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
>> index a3f935f..5720608 100644
>> --- a/arch/arm64/crypto/Makefile
>> +++ b/arch/arm64/crypto/Makefile
>> @@ -34,5 +34,9 @@ AFLAGS_aes-neon.o := -DINTERLEAVE=4
>>
>> CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
>>
>> +obj-$(CONFIG_CRYPTO_CRC32_ARM64) += crc32-arm64.o
>> +
>> +CFLAGS_crc32-arm64.o := -mcpu=generic+crc
>> +
>> $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
>> $(call if_changed_rule,cc_o_c)
>> diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c
>> new file mode 100644
>> index 0000000..9499199
>> --- /dev/null
>> +++ b/arch/arm64/crypto/crc32-arm64.c
>> @@ -0,0 +1,274 @@
>> +/*
>> + * crc32-arm64.c - CRC32 and CRC32C using optional ARMv8 instructions
>> + *
>> + * Module based on crypto/crc32c_generic.c
>> + *
>> + * CRC32 loop taken from Ed Nevill's Hadoop CRC patch
>> + * http://mail-archives.apache.org/mod_mbox/hadoop-common-dev/201406.mbox/%3C1403687030.3355.19.camel%40localhost.localdomain%3E
>> + *
>> + * Using inline assembly instead of intrinsics in order to be backwards
>> + * compatible with older compilers.
>> + *
>> + * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + */
>> +
>> +#include <linux/unaligned/access_ok.h>
One final nit: you should not be including this file directly.
You should include <asm/unaligned.h> instead, and it is up to the
architecture to include either access_ok.h or another implementation
of get_unaligned_leXX
Granted, the distinction is fairly artificial on arm64, but it does
increase the portability of the code.
--
Ard.
>> +#include <linux/cpufeature.h>
>> +#include <linux/init.h>
>> +#include <linux/kernel.h>
>> +#include <linux/module.h>
>> +#include <linux/string.h>
>> +
>> +#include <crypto/internal/hash.h>
>> +
>> +MODULE_AUTHOR("Yazen Ghannam <yazen.ghannam@linaro.org>");
>> +MODULE_DESCRIPTION("CRC32 and CRC32C using optional ARMv8 instructions");
>> +MODULE_LICENSE("GPL v2");
>> +
>> +#define CRC32X(crc, value) __asm__("crc32x %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
>> +#define CRC32W(crc, value) __asm__("crc32w %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>> +#define CRC32H(crc, value) __asm__("crc32h %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>> +#define CRC32B(crc, value) __asm__("crc32b %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>> +#define CRC32CX(crc, value) __asm__("crc32cx %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
>> +#define CRC32CW(crc, value) __asm__("crc32cw %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>> +#define CRC32CH(crc, value) __asm__("crc32ch %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>> +#define CRC32CB(crc, value) __asm__("crc32cb %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>> +
>> +static u32 crc32_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
>> +{
>> + s64 length = len;
>> +
>> + while ((length -= sizeof(u64)) >= 0) {
>> + CRC32X(crc, get_unaligned_le64(p));
>> + p += sizeof(u64);
>> + }
>> +
>> + /* The following is more efficient than the straight loop */
>> + if (length & sizeof(u32)) {
>> + CRC32W(crc, get_unaligned_le32(p));
>> + p += sizeof(u32);
>> + }
>> + if (length & sizeof(u16)) {
>> + CRC32H(crc, get_unaligned_le16(p));
>> + p += sizeof(u16);
>> + }
>> + if (length & sizeof(u8))
>> + CRC32B(crc, *p);
>> +
>> + return crc;
>> +}
>> +
>> +static u32 crc32c_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
>> +{
>> + s64 length = len;
>> +
>> + while ((length -= sizeof(u64)) >= 0) {
>> + CRC32CX(crc, get_unaligned_le64(p));
>> + p += sizeof(u64);
>> + }
>> +
>> + /* The following is more efficient than the straight loop */
>> + if (length & sizeof(u32)) {
>> + CRC32CW(crc, get_unaligned_le32(p));
>> + p += sizeof(u32);
>> + }
>> + if (length & sizeof(u16)) {
>> + CRC32CH(crc, get_unaligned_le16(p));
>> + p += sizeof(u16);
>> + }
>> + if (length & sizeof(u8))
>> + CRC32CB(crc, *p);
>> +
>> + return crc;
>> +}
>> +
>> +#define CHKSUM_BLOCK_SIZE 1
>> +#define CHKSUM_DIGEST_SIZE 4
>> +
>> +struct chksum_ctx {
>> + u32 key;
>> +};
>> +
>> +struct chksum_desc_ctx {
>> + u32 crc;
>> +};
>> +
>> +static int chksum_init(struct shash_desc *desc)
>> +{
>> + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>> +
>> + ctx->crc = mctx->key;
>> +
>> + return 0;
>> +}
>> +
>> +/*
>> + * Setting the seed allows arbitrary accumulators and flexible XOR policy
>> + * If your algorithm starts with ~0, then XOR with ~0 before you set
>> + * the seed.
>> + */
>> +static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
>> + unsigned int keylen)
>> +{
>> + struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
>> +
>> + if (keylen != sizeof(mctx->key)) {
>> + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
>> + return -EINVAL;
>> + }
>> + mctx->key = get_unaligned_le32(key);
>> + return 0;
>> +}
>> +
>> +static int chksum_update(struct shash_desc *desc, const u8 *data,
>> + unsigned int length)
>> +{
>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>> +
>> + ctx->crc = crc32_arm64_le_hw(ctx->crc, data, length);
>> + return 0;
>> +}
>> +
>> +static int chksumc_update(struct shash_desc *desc, const u8 *data,
>> + unsigned int length)
>> +{
>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>> +
>> + ctx->crc = crc32c_arm64_le_hw(ctx->crc, data, length);
>> + return 0;
>> +}
>> +
>> +static int chksum_final(struct shash_desc *desc, u8 *out)
>> +{
>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>> +
>> + put_unaligned_le32(~ctx->crc, out);
>> + return 0;
>> +}
>> +
>> +static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
>> +{
>> + put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
>> + return 0;
>> +}
>> +
>> +static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
>> +{
>> + put_unaligned_le32(~crc32c_arm64_le_hw(crc, data, len), out);
>> + return 0;
>> +}
>> +
>> +static int chksum_finup(struct shash_desc *desc, const u8 *data,
>> + unsigned int len, u8 *out)
>> +{
>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>> +
>> + return __chksum_finup(ctx->crc, data, len, out);
>> +}
>> +
>> +static int chksumc_finup(struct shash_desc *desc, const u8 *data,
>> + unsigned int len, u8 *out)
>> +{
>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>> +
>> + return __chksumc_finup(ctx->crc, data, len, out);
>> +}
>> +
>> +static int chksum_digest(struct shash_desc *desc, const u8 *data,
>> + unsigned int length, u8 *out)
>> +{
>> + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
>> +
>> + return __chksum_finup(mctx->key, data, length, out);
>> +}
>> +
>> +static int chksumc_digest(struct shash_desc *desc, const u8 *data,
>> + unsigned int length, u8 *out)
>> +{
>> + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
>> +
>> + return __chksumc_finup(mctx->key, data, length, out);
>> +}
>> +
>> +static int crc32_cra_init(struct crypto_tfm *tfm)
>> +{
>> + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
>> +
>> + mctx->key = ~0;
>> + return 0;
>> +}
>> +
>> +static struct shash_alg crc32_alg = {
>> + .digestsize = CHKSUM_DIGEST_SIZE,
>> + .setkey = chksum_setkey,
>> + .init = chksum_init,
>> + .update = chksum_update,
>> + .final = chksum_final,
>> + .finup = chksum_finup,
>> + .digest = chksum_digest,
>> + .descsize = sizeof(struct chksum_desc_ctx),
>> + .base = {
>> + .cra_name = "crc32",
>> + .cra_driver_name = "crc32-arm64-hw",
>> + .cra_priority = 300,
>> + .cra_blocksize = CHKSUM_BLOCK_SIZE,
>> + .cra_alignmask = 0,
>> + .cra_ctxsize = sizeof(struct chksum_ctx),
>> + .cra_module = THIS_MODULE,
>> + .cra_init = crc32_cra_init,
>> + }
>> +};
>> +
>> +static struct shash_alg crc32c_alg = {
>> + .digestsize = CHKSUM_DIGEST_SIZE,
>> + .setkey = chksum_setkey,
>> + .init = chksum_init,
>> + .update = chksumc_update,
>> + .final = chksum_final,
>> + .finup = chksumc_finup,
>> + .digest = chksumc_digest,
>> + .descsize = sizeof(struct chksum_desc_ctx),
>> + .base = {
>> + .cra_name = "crc32c",
>> + .cra_driver_name = "crc32c-arm64-hw",
>> + .cra_priority = 300,
>> + .cra_blocksize = CHKSUM_BLOCK_SIZE,
>> + .cra_alignmask = 0,
>> + .cra_ctxsize = sizeof(struct chksum_ctx),
>> + .cra_module = THIS_MODULE,
>> + .cra_init = crc32_cra_init,
>> + }
>> +};
>> +
>> +static int __init crc32_mod_init(void)
>> +{
>> + int err;
>> +
>> + err = crypto_register_shash(&crc32_alg);
>> +
>> + if (err)
>> + return err;
>> +
>> + err = crypto_register_shash(&crc32c_alg);
>> +
>> + if (err) {
>> + crypto_unregister_shash(&crc32_alg);
>> + return err;
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static void __exit crc32_mod_exit(void)
>> +{
>> + crypto_unregister_shash(&crc32_alg);
>> + crypto_unregister_shash(&crc32c_alg);
>> +}
>> +
>> +module_cpu_feature_match(CRC32, crc32_mod_init);
>> +module_exit(crc32_mod_exit);
>> --
>> 2.1.0
>>
>
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] arm64: crypto: Add ARM64 CRC32 hw accelerated module
2014-11-21 21:39 ` Ard Biesheuvel
@ 2014-11-25 16:50 ` Yazen Ghannam
2014-11-26 12:00 ` Herbert Xu
0 siblings, 1 reply; 6+ messages in thread
From: Yazen Ghannam @ 2014-11-25 16:50 UTC (permalink / raw)
To: Ard Biesheuvel; +Cc: linux-kernel, linux-crypto, linux-arm-kernel, Herbert Xu
Herbert,
I have a couple of questions.
1) To which release has the patch been applied? We're just curious for
tracking purposes.
2) I'd like to apply Ard's suggestion. Do you prefer a second version
of this patch or a separate fixup patch?
Thanks,
Yazen
On Fri, Nov 21, 2014 at 3:39 PM, Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
> On 20 November 2014 15:22, Yazen Ghannam <yazen.ghannam@linaro.org> wrote:
>> +linux-arm-kernel@lists.infradead.org
>>
>> On Wed, Nov 19, 2014 at 11:19 AM, Yazen Ghannam
>> <yazen.ghannam@linaro.org> wrote:
>>> This module registers a crc32 algorithm and a crc32c algorithm
>>> that use the optional CRC32 and CRC32C instructions in ARMv8.
>>>
>>> Tested on AMD Seattle.
>>>
>>> Improvement compared to crc32c-generic algorithm:
>>> TCRYPT CRC32C speed test shows ~450% speedup.
>>> Simple dd write tests to btrfs filesystem show ~30% speedup.
>>>
>>> Signed-off-by: Yazen Ghannam <yazen.ghannam@linaro.org>
>>> Acked-by: Steve Capper <steve.capper@linaro.org>
>>> Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
>>> ---
>>> arch/arm64/crypto/Kconfig | 4 +
>>> arch/arm64/crypto/Makefile | 4 +
>>> arch/arm64/crypto/crc32-arm64.c | 274 ++++++++++++++++++++++++++++++++++++++++
>>> 3 files changed, 282 insertions(+)
>>> create mode 100644 arch/arm64/crypto/crc32-arm64.c
>>>
>>> diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
>>> index 5562652..c1a0468 100644
>>> --- a/arch/arm64/crypto/Kconfig
>>> +++ b/arch/arm64/crypto/Kconfig
>>> @@ -50,4 +50,8 @@ config CRYPTO_AES_ARM64_NEON_BLK
>>> select CRYPTO_AES
>>> select CRYPTO_ABLK_HELPER
>>>
>>> +config CRYPTO_CRC32_ARM64
>>> + tristate "CRC32 and CRC32C using optional ARMv8 instructions"
>>> + depends on ARM64
>>> + select CRYPTO_HASH
>>> endif
>>> diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
>>> index a3f935f..5720608 100644
>>> --- a/arch/arm64/crypto/Makefile
>>> +++ b/arch/arm64/crypto/Makefile
>>> @@ -34,5 +34,9 @@ AFLAGS_aes-neon.o := -DINTERLEAVE=4
>>>
>>> CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
>>>
>>> +obj-$(CONFIG_CRYPTO_CRC32_ARM64) += crc32-arm64.o
>>> +
>>> +CFLAGS_crc32-arm64.o := -mcpu=generic+crc
>>> +
>>> $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
>>> $(call if_changed_rule,cc_o_c)
>>> diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c
>>> new file mode 100644
>>> index 0000000..9499199
>>> --- /dev/null
>>> +++ b/arch/arm64/crypto/crc32-arm64.c
>>> @@ -0,0 +1,274 @@
>>> +/*
>>> + * crc32-arm64.c - CRC32 and CRC32C using optional ARMv8 instructions
>>> + *
>>> + * Module based on crypto/crc32c_generic.c
>>> + *
>>> + * CRC32 loop taken from Ed Nevill's Hadoop CRC patch
>>> + * http://mail-archives.apache.org/mod_mbox/hadoop-common-dev/201406.mbox/%3C1403687030.3355.19.camel%40localhost.localdomain%3E
>>> + *
>>> + * Using inline assembly instead of intrinsics in order to be backwards
>>> + * compatible with older compilers.
>>> + *
>>> + * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
>>> + *
>>> + * This program is free software; you can redistribute it and/or modify
>>> + * it under the terms of the GNU General Public License version 2 as
>>> + * published by the Free Software Foundation.
>>> + */
>>> +
>>> +#include <linux/unaligned/access_ok.h>
>
> One final nit: you should not be including this file directly.
> You should include <asm/unaligned.h> instead, and it is up to the
> architecture to include either access_ok.h or another implementation
> of get_unaligned_leXX
>
> Granted, the distinction is fairly artificial on arm64, but it does
> increase the portability of the code.
>
> --
> Ard.
>
>
>>> +#include <linux/cpufeature.h>
>>> +#include <linux/init.h>
>>> +#include <linux/kernel.h>
>>> +#include <linux/module.h>
>>> +#include <linux/string.h>
>>> +
>>> +#include <crypto/internal/hash.h>
>>> +
>>> +MODULE_AUTHOR("Yazen Ghannam <yazen.ghannam@linaro.org>");
>>> +MODULE_DESCRIPTION("CRC32 and CRC32C using optional ARMv8 instructions");
>>> +MODULE_LICENSE("GPL v2");
>>> +
>>> +#define CRC32X(crc, value) __asm__("crc32x %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
>>> +#define CRC32W(crc, value) __asm__("crc32w %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>>> +#define CRC32H(crc, value) __asm__("crc32h %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>>> +#define CRC32B(crc, value) __asm__("crc32b %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>>> +#define CRC32CX(crc, value) __asm__("crc32cx %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
>>> +#define CRC32CW(crc, value) __asm__("crc32cw %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>>> +#define CRC32CH(crc, value) __asm__("crc32ch %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>>> +#define CRC32CB(crc, value) __asm__("crc32cb %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
>>> +
>>> +static u32 crc32_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
>>> +{
>>> + s64 length = len;
>>> +
>>> + while ((length -= sizeof(u64)) >= 0) {
>>> + CRC32X(crc, get_unaligned_le64(p));
>>> + p += sizeof(u64);
>>> + }
>>> +
>>> + /* The following is more efficient than the straight loop */
>>> + if (length & sizeof(u32)) {
>>> + CRC32W(crc, get_unaligned_le32(p));
>>> + p += sizeof(u32);
>>> + }
>>> + if (length & sizeof(u16)) {
>>> + CRC32H(crc, get_unaligned_le16(p));
>>> + p += sizeof(u16);
>>> + }
>>> + if (length & sizeof(u8))
>>> + CRC32B(crc, *p);
>>> +
>>> + return crc;
>>> +}
>>> +
>>> +static u32 crc32c_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
>>> +{
>>> + s64 length = len;
>>> +
>>> + while ((length -= sizeof(u64)) >= 0) {
>>> + CRC32CX(crc, get_unaligned_le64(p));
>>> + p += sizeof(u64);
>>> + }
>>> +
>>> + /* The following is more efficient than the straight loop */
>>> + if (length & sizeof(u32)) {
>>> + CRC32CW(crc, get_unaligned_le32(p));
>>> + p += sizeof(u32);
>>> + }
>>> + if (length & sizeof(u16)) {
>>> + CRC32CH(crc, get_unaligned_le16(p));
>>> + p += sizeof(u16);
>>> + }
>>> + if (length & sizeof(u8))
>>> + CRC32CB(crc, *p);
>>> +
>>> + return crc;
>>> +}
>>> +
>>> +#define CHKSUM_BLOCK_SIZE 1
>>> +#define CHKSUM_DIGEST_SIZE 4
>>> +
>>> +struct chksum_ctx {
>>> + u32 key;
>>> +};
>>> +
>>> +struct chksum_desc_ctx {
>>> + u32 crc;
>>> +};
>>> +
>>> +static int chksum_init(struct shash_desc *desc)
>>> +{
>>> + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
>>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>>> +
>>> + ctx->crc = mctx->key;
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +/*
>>> + * Setting the seed allows arbitrary accumulators and flexible XOR policy
>>> + * If your algorithm starts with ~0, then XOR with ~0 before you set
>>> + * the seed.
>>> + */
>>> +static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
>>> + unsigned int keylen)
>>> +{
>>> + struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
>>> +
>>> + if (keylen != sizeof(mctx->key)) {
>>> + crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
>>> + return -EINVAL;
>>> + }
>>> + mctx->key = get_unaligned_le32(key);
>>> + return 0;
>>> +}
>>> +
>>> +static int chksum_update(struct shash_desc *desc, const u8 *data,
>>> + unsigned int length)
>>> +{
>>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>>> +
>>> + ctx->crc = crc32_arm64_le_hw(ctx->crc, data, length);
>>> + return 0;
>>> +}
>>> +
>>> +static int chksumc_update(struct shash_desc *desc, const u8 *data,
>>> + unsigned int length)
>>> +{
>>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>>> +
>>> + ctx->crc = crc32c_arm64_le_hw(ctx->crc, data, length);
>>> + return 0;
>>> +}
>>> +
>>> +static int chksum_final(struct shash_desc *desc, u8 *out)
>>> +{
>>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>>> +
>>> + put_unaligned_le32(~ctx->crc, out);
>>> + return 0;
>>> +}
>>> +
>>> +static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
>>> +{
>>> + put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
>>> + return 0;
>>> +}
>>> +
>>> +static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
>>> +{
>>> + put_unaligned_le32(~crc32c_arm64_le_hw(crc, data, len), out);
>>> + return 0;
>>> +}
>>> +
>>> +static int chksum_finup(struct shash_desc *desc, const u8 *data,
>>> + unsigned int len, u8 *out)
>>> +{
>>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>>> +
>>> + return __chksum_finup(ctx->crc, data, len, out);
>>> +}
>>> +
>>> +static int chksumc_finup(struct shash_desc *desc, const u8 *data,
>>> + unsigned int len, u8 *out)
>>> +{
>>> + struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
>>> +
>>> + return __chksumc_finup(ctx->crc, data, len, out);
>>> +}
>>> +
>>> +static int chksum_digest(struct shash_desc *desc, const u8 *data,
>>> + unsigned int length, u8 *out)
>>> +{
>>> + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
>>> +
>>> + return __chksum_finup(mctx->key, data, length, out);
>>> +}
>>> +
>>> +static int chksumc_digest(struct shash_desc *desc, const u8 *data,
>>> + unsigned int length, u8 *out)
>>> +{
>>> + struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
>>> +
>>> + return __chksumc_finup(mctx->key, data, length, out);
>>> +}
>>> +
>>> +static int crc32_cra_init(struct crypto_tfm *tfm)
>>> +{
>>> + struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
>>> +
>>> + mctx->key = ~0;
>>> + return 0;
>>> +}
>>> +
>>> +static struct shash_alg crc32_alg = {
>>> + .digestsize = CHKSUM_DIGEST_SIZE,
>>> + .setkey = chksum_setkey,
>>> + .init = chksum_init,
>>> + .update = chksum_update,
>>> + .final = chksum_final,
>>> + .finup = chksum_finup,
>>> + .digest = chksum_digest,
>>> + .descsize = sizeof(struct chksum_desc_ctx),
>>> + .base = {
>>> + .cra_name = "crc32",
>>> + .cra_driver_name = "crc32-arm64-hw",
>>> + .cra_priority = 300,
>>> + .cra_blocksize = CHKSUM_BLOCK_SIZE,
>>> + .cra_alignmask = 0,
>>> + .cra_ctxsize = sizeof(struct chksum_ctx),
>>> + .cra_module = THIS_MODULE,
>>> + .cra_init = crc32_cra_init,
>>> + }
>>> +};
>>> +
>>> +static struct shash_alg crc32c_alg = {
>>> + .digestsize = CHKSUM_DIGEST_SIZE,
>>> + .setkey = chksum_setkey,
>>> + .init = chksum_init,
>>> + .update = chksumc_update,
>>> + .final = chksum_final,
>>> + .finup = chksumc_finup,
>>> + .digest = chksumc_digest,
>>> + .descsize = sizeof(struct chksum_desc_ctx),
>>> + .base = {
>>> + .cra_name = "crc32c",
>>> + .cra_driver_name = "crc32c-arm64-hw",
>>> + .cra_priority = 300,
>>> + .cra_blocksize = CHKSUM_BLOCK_SIZE,
>>> + .cra_alignmask = 0,
>>> + .cra_ctxsize = sizeof(struct chksum_ctx),
>>> + .cra_module = THIS_MODULE,
>>> + .cra_init = crc32_cra_init,
>>> + }
>>> +};
>>> +
>>> +static int __init crc32_mod_init(void)
>>> +{
>>> + int err;
>>> +
>>> + err = crypto_register_shash(&crc32_alg);
>>> +
>>> + if (err)
>>> + return err;
>>> +
>>> + err = crypto_register_shash(&crc32c_alg);
>>> +
>>> + if (err) {
>>> + crypto_unregister_shash(&crc32_alg);
>>> + return err;
>>> + }
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +static void __exit crc32_mod_exit(void)
>>> +{
>>> + crypto_unregister_shash(&crc32_alg);
>>> + crypto_unregister_shash(&crc32c_alg);
>>> +}
>>> +
>>> +module_cpu_feature_match(CRC32, crc32_mod_init);
>>> +module_exit(crc32_mod_exit);
>>> --
>>> 2.1.0
>>>
>>
>> _______________________________________________
>> linux-arm-kernel mailing list
>> linux-arm-kernel@lists.infradead.org
>> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH] arm64: crypto: Add ARM64 CRC32 hw accelerated module
2014-11-25 16:50 ` Yazen Ghannam
@ 2014-11-26 12:00 ` Herbert Xu
0 siblings, 0 replies; 6+ messages in thread
From: Herbert Xu @ 2014-11-26 12:00 UTC (permalink / raw)
To: Yazen Ghannam
Cc: Ard Biesheuvel, linux-kernel, linux-crypto, linux-arm-kernel
On Tue, Nov 25, 2014 at 10:50:12AM -0600, Yazen Ghannam wrote:
> Herbert,
>
> I have a couple of questions.
>
> 1) To which release has the patch been applied? We're just curious for
> tracking purposes.
3.19
> 2) I'd like to apply Ard's suggestion. Do you prefer a second version
> of this patch or a separate fixup patch?
Please make your patches against the cryptodev tree.
Thanks,
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2014-11-26 12:00 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-11-19 17:19 [PATCH] arm64: crypto: Add ARM64 CRC32 hw accelerated module Yazen Ghannam
2014-11-20 14:22 ` Yazen Ghannam
2014-11-21 21:39 ` Ard Biesheuvel
2014-11-25 16:50 ` Yazen Ghannam
2014-11-26 12:00 ` Herbert Xu
[not found] ` <CA+dbMpuXC58d_CPYfzYEOjKkP04ZgcxBj6qtfxFfDJK95BxD3Q@mail.gmail.com>
2014-11-20 14:43 ` Herbert Xu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).