linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH RESEND 0/9] crypto: caam - xts(aes) updates
@ 2020-08-06 16:35 Andrei Botila
  2020-08-06 16:35 ` [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV Andrei Botila
                   ` (8 more replies)
  0 siblings, 9 replies; 31+ messages in thread
From: Andrei Botila @ 2020-08-06 16:35 UTC (permalink / raw)
  To: Horia Geanta, Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

From: Andrei Botila <andrei.botila@nxp.com>

This patch series fixes some problems in CAAM's implementation of xts(aes):
 - CAAM until Era 9 can't process XTS with 16B IV
 - CAAM can only process in hardware XTS key lengths of 16B and 32B
 - These hardware limitations are resolved through a fallback

This patch series also adds a new feature in CAAM's xts(aes):
 - CAAM is now able to process XTS with 16B IV in HW

Resending the patch series because it didn't reach the linux-crypto,
linux-kernel mailing lists.

Andrei Botila (9):
  crypto: caam/jr - add fallback for XTS with more than 8B IV
  crypto: caam/qi - add fallback for XTS with more than 8B IV
  crypto: caam/qi2 - add fallback for XTS with more than 8B IV
  crypto: caam/jr - add support for more XTS key lengths
  crypto: caam/qi - add support for more XTS key lengths
  crypto: caam/qi2 - add support for more XTS key lengths
  crypto: caam/jr - add support for XTS with 16B IV
  crypto: caam/qi - add support for XTS with 16B IV
  crypto: caam/qi2 - add support for XTS with 16B IV

 drivers/crypto/caam/caamalg.c      | 81 +++++++++++++++++++++++--
 drivers/crypto/caam/caamalg_desc.c | 27 +++++----
 drivers/crypto/caam/caamalg_qi.c   | 86 ++++++++++++++++++++++++---
 drivers/crypto/caam/caamalg_qi2.c  | 95 ++++++++++++++++++++++++++++--
 drivers/crypto/caam/caamalg_qi2.h  |  2 +
 5 files changed, 261 insertions(+), 30 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 31+ messages in thread

* [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-08-06 16:35 [PATCH RESEND 0/9] crypto: caam - xts(aes) updates Andrei Botila
@ 2020-08-06 16:35 ` Andrei Botila
  2020-08-11 14:30   ` Horia Geantă
                     ` (2 more replies)
  2020-08-06 16:35 ` [PATCH RESEND 2/9] crypto: caam/qi " Andrei Botila
                   ` (7 subsequent siblings)
  8 siblings, 3 replies; 31+ messages in thread
From: Andrei Botila @ 2020-08-06 16:35 UTC (permalink / raw)
  To: Horia Geanta, Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

From: Andrei Botila <andrei.botila@nxp.com>

A hardware limitation exists for CAAM until Era 9 which restricts
the accelerator to IVs with only 8 bytes. When CAAM has a lower era
a fallback is necessary to process 16 bytes IV.

Fixes: c6415a6016bf ("crypto: caam - add support for acipher xts(aes)")
Cc: <stable@vger.kernel.org> # v4.4+
Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
---
 drivers/crypto/caam/caamalg.c | 68 ++++++++++++++++++++++++++++++++---
 1 file changed, 64 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 91feda5b63f6..ebf4dc87ca2e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -57,6 +57,7 @@
 #include "key_gen.h"
 #include "caamalg_desc.h"
 #include <crypto/engine.h>
+#include <asm/unaligned.h>
 
 /*
  * crypto alg
@@ -114,10 +115,12 @@ struct caam_ctx {
 	struct alginfo adata;
 	struct alginfo cdata;
 	unsigned int authsize;
+	struct crypto_skcipher *fallback;
 };
 
 struct caam_skcipher_req_ctx {
 	struct skcipher_edesc *edesc;
+	struct skcipher_request fallback_req;
 };
 
 struct caam_aead_req_ctx {
@@ -830,12 +833,17 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
 	struct device *jrdev = ctx->jrdev;
 	u32 *desc;
+	int err;
 
 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
 		dev_dbg(jrdev, "key size mismatch\n");
 		return -EINVAL;
 	}
 
+	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+	if (err)
+		return err;
+
 	ctx->cdata.keylen = keylen;
 	ctx->cdata.key_virt = key;
 	ctx->cdata.key_inline = true;
@@ -1755,6 +1763,20 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
 	return ret;
 }
 
+static bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+	u64 size = 0;
+
+	if (IS_ALIGNED((unsigned long)req->iv, __alignof__(u64)))
+		size = *(u64 *)(req->iv + (ivsize / 2));
+	else
+		size = get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+
+	return !!size;
+}
+
 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
 {
 	struct skcipher_edesc *edesc;
@@ -1768,6 +1790,21 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
 	if (!req->cryptlen)
 		return 0;
 
+	if (ctx->fallback && xts_skcipher_ivsize(req)) {
+		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+
+		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+		skcipher_request_set_callback(&rctx->fallback_req,
+					      req->base.flags,
+					      req->base.complete,
+					      req->base.data);
+		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+					   req->dst, req->cryptlen, req->iv);
+
+		return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+				  crypto_skcipher_decrypt(&rctx->fallback_req);
+	}
+
 	/* allocate extended descriptor */
 	edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
 	if (IS_ERR(edesc))
@@ -1905,6 +1942,7 @@ static struct caam_skcipher_alg driver_algs[] = {
 			.base = {
 				.cra_name = "xts(aes)",
 				.cra_driver_name = "xts-aes-caam",
+				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
 				.cra_blocksize = AES_BLOCK_SIZE,
 			},
 			.setkey = xts_skcipher_setkey,
@@ -3344,12 +3382,30 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
 	struct caam_skcipher_alg *caam_alg =
 		container_of(alg, typeof(*caam_alg), skcipher);
 	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
 
 	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
 
 	ctx->enginectx.op.do_one_request = skcipher_do_one_req;
 
-	return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+	if (alg_aai == OP_ALG_AAI_XTS) {
+		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+		struct crypto_skcipher *fallback;
+
+		fallback = crypto_alloc_skcipher(tfm_name, 0,
+						 CRYPTO_ALG_NEED_FALLBACK);
+		if (IS_ERR(fallback)) {
+			pr_err("Failed to allocate %s fallback: %ld\n",
+			       tfm_name, PTR_ERR(fallback));
+			return PTR_ERR(fallback);
+		}
+
+		ctx->fallback = fallback;
+		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
+					    crypto_skcipher_reqsize(fallback));
+	}
+
+	return caam_init_common(ctx, &caam_alg->caam,
 				false);
 }
 
@@ -3378,7 +3434,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
 
 static void caam_cra_exit(struct crypto_skcipher *tfm)
 {
-	caam_exit_common(crypto_skcipher_ctx(tfm));
+	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	if (ctx->fallback)
+		crypto_free_skcipher(ctx->fallback);
+	caam_exit_common(ctx);
 }
 
 static void caam_aead_exit(struct crypto_aead *tfm)
@@ -3412,8 +3472,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
 	alg->base.cra_module = THIS_MODULE;
 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
-	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
-			      CRYPTO_ALG_KERN_DRIVER_ONLY;
+	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+			      CRYPTO_ALG_KERN_DRIVER_ONLY);
 
 	alg->init = caam_cra_init;
 	alg->exit = caam_cra_exit;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RESEND 2/9] crypto: caam/qi - add fallback for XTS with more than 8B IV
  2020-08-06 16:35 [PATCH RESEND 0/9] crypto: caam - xts(aes) updates Andrei Botila
  2020-08-06 16:35 ` [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV Andrei Botila
@ 2020-08-06 16:35 ` Andrei Botila
  2020-08-19 23:56   ` Sasha Levin
  2020-08-06 16:35 ` [PATCH RESEND 3/9] crypto: caam/qi2 " Andrei Botila
                   ` (6 subsequent siblings)
  8 siblings, 1 reply; 31+ messages in thread
From: Andrei Botila @ 2020-08-06 16:35 UTC (permalink / raw)
  To: Horia Geanta, Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

From: Andrei Botila <andrei.botila@nxp.com>

A hardware limitation exists for CAAM until Era 9 which restricts
the accelerator to IVs with only 8 bytes. When CAAM has a lower era
a fallback is necessary to process 16 bytes IV.

Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
Cc: <stable@vger.kernel.org> # v4.12+
Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
---
 drivers/crypto/caam/caamalg_qi.c | 73 +++++++++++++++++++++++++++++---
 1 file changed, 68 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index bb1c0106a95c..05cb50561381 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -18,6 +18,7 @@
 #include "qi.h"
 #include "jr.h"
 #include "caamalg_desc.h"
+#include <asm/unaligned.h>
 
 /*
  * crypto alg
@@ -67,6 +68,11 @@ struct caam_ctx {
 	struct device *qidev;
 	spinlock_t lock;	/* Protects multiple init of driver context */
 	struct caam_drv_ctx *drv_ctx[NUM_OP];
+	struct crypto_skcipher *fallback;
+};
+
+struct caam_skcipher_req_ctx {
+	struct skcipher_request fallback_req;
 };
 
 static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -726,12 +732,17 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
 	struct device *jrdev = ctx->jrdev;
 	int ret = 0;
+	int err;
 
 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
 		dev_dbg(jrdev, "key size mismatch\n");
 		return -EINVAL;
 	}
 
+	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+	if (err)
+		return err;
+
 	ctx->cdata.keylen = keylen;
 	ctx->cdata.key_virt = key;
 	ctx->cdata.key_inline = true;
@@ -1373,6 +1384,20 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
 	return edesc;
 }
 
+static bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+	u64 size = 0;
+
+	if (IS_ALIGNED((unsigned long)req->iv, __alignof__(u64)))
+		size = *(u64 *)(req->iv + (ivsize / 2));
+	else
+		size = get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+
+	return !!size;
+}
+
 static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
 {
 	struct skcipher_edesc *edesc;
@@ -1383,6 +1408,21 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
 	if (!req->cryptlen)
 		return 0;
 
+	if (ctx->fallback && xts_skcipher_ivsize(req)) {
+		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
+
+		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
+		skcipher_request_set_callback(&rctx->fallback_req,
+					      req->base.flags,
+					      req->base.complete,
+					      req->base.data);
+		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
+					   req->dst, req->cryptlen, req->iv);
+
+		return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
+				  crypto_skcipher_decrypt(&rctx->fallback_req);
+	}
+
 	if (unlikely(caam_congested))
 		return -EAGAIN;
 
@@ -1507,6 +1547,7 @@ static struct caam_skcipher_alg driver_algs[] = {
 			.base = {
 				.cra_name = "xts(aes)",
 				.cra_driver_name = "xts-aes-caam-qi",
+				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
 				.cra_blocksize = AES_BLOCK_SIZE,
 			},
 			.setkey = xts_skcipher_setkey,
@@ -2440,9 +2481,27 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 	struct caam_skcipher_alg *caam_alg =
 		container_of(alg, typeof(*caam_alg), skcipher);
+	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+	if (alg_aai == OP_ALG_AAI_XTS) {
+		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+		struct crypto_skcipher *fallback;
+
+		fallback = crypto_alloc_skcipher(tfm_name, 0,
+						 CRYPTO_ALG_NEED_FALLBACK);
+		if (IS_ERR(fallback)) {
+			pr_err("Failed to allocate %s fallback: %ld\n",
+			       tfm_name, PTR_ERR(fallback));
+			return PTR_ERR(fallback);
+		}
+
+		ctx->fallback = fallback;
+		crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
+					    crypto_skcipher_reqsize(fallback));
+	}
 
-	return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
-				false);
+	return caam_init_common(ctx, &caam_alg->caam, false);
 }
 
 static int caam_aead_init(struct crypto_aead *tfm)
@@ -2468,7 +2527,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
 
 static void caam_cra_exit(struct crypto_skcipher *tfm)
 {
-	caam_exit_common(crypto_skcipher_ctx(tfm));
+	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	if (ctx->fallback)
+		crypto_free_skcipher(ctx->fallback);
+	caam_exit_common(ctx);
 }
 
 static void caam_aead_exit(struct crypto_aead *tfm)
@@ -2502,8 +2565,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
 	alg->base.cra_module = THIS_MODULE;
 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
-	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
-			      CRYPTO_ALG_KERN_DRIVER_ONLY;
+	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+				CRYPTO_ALG_KERN_DRIVER_ONLY);
 
 	alg->init = caam_cra_init;
 	alg->exit = caam_cra_exit;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RESEND 3/9] crypto: caam/qi2 - add fallback for XTS with more than 8B IV
  2020-08-06 16:35 [PATCH RESEND 0/9] crypto: caam - xts(aes) updates Andrei Botila
  2020-08-06 16:35 ` [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV Andrei Botila
  2020-08-06 16:35 ` [PATCH RESEND 2/9] crypto: caam/qi " Andrei Botila
@ 2020-08-06 16:35 ` Andrei Botila
  2020-08-19 23:56   ` Sasha Levin
  2020-08-06 16:35 ` [PATCH RESEND 4/9] crypto: caam/jr - add support for more XTS key lengths Andrei Botila
                   ` (5 subsequent siblings)
  8 siblings, 1 reply; 31+ messages in thread
From: Andrei Botila @ 2020-08-06 16:35 UTC (permalink / raw)
  To: Horia Geanta, Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

From: Andrei Botila <andrei.botila@nxp.com>

A hardware limitation exists for CAAM until Era 9 which restricts
the accelerator to IVs with only 8 bytes. When CAAM has a lower era
a fallback is necessary to process 16 bytes IV.

Fixes: 226853ac3ebe ("crypto: caam/qi2 - add skcipher algorithms")
Cc: <stable@vger.kernel.org> # v4.20+
Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
---
 drivers/crypto/caam/caamalg_qi2.c | 79 +++++++++++++++++++++++++++++--
 drivers/crypto/caam/caamalg_qi2.h |  2 +
 2 files changed, 77 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 66ae1d581168..a0b13bf6b528 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -19,6 +19,7 @@
 #include <linux/fsl/mc.h>
 #include <soc/fsl/dpaa2-io.h>
 #include <soc/fsl/dpaa2-fd.h>
+#include <asm/unaligned.h>
 
 #define CAAM_CRA_PRIORITY	2000
 
@@ -80,6 +81,7 @@ struct caam_ctx {
 	struct alginfo adata;
 	struct alginfo cdata;
 	unsigned int authsize;
+	struct crypto_skcipher *fallback;
 };
 
 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
@@ -1056,12 +1058,17 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 	struct device *dev = ctx->dev;
 	struct caam_flc *flc;
 	u32 *desc;
+	int err;
 
 	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
 		dev_dbg(dev, "key size mismatch\n");
 		return -EINVAL;
 	}
 
+	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+	if (err)
+		return err;
+
 	ctx->cdata.keylen = keylen;
 	ctx->cdata.key_virt = key;
 	ctx->cdata.key_inline = true;
@@ -1443,6 +1450,20 @@ static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
 	skcipher_request_complete(req, ecode);
 }
 
+static bool xts_skcipher_ivsize(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+	u64 size = 0;
+
+	if (IS_ALIGNED((unsigned long)req->iv, __alignof__(u64)))
+		size = *(u64 *)(req->iv + (ivsize / 2));
+	else
+		size = get_unaligned((u64 *)(req->iv + (ivsize / 2)));
+
+	return !!size;
+}
+
 static int skcipher_encrypt(struct skcipher_request *req)
 {
 	struct skcipher_edesc *edesc;
@@ -1454,6 +1475,18 @@ static int skcipher_encrypt(struct skcipher_request *req)
 	if (!req->cryptlen)
 		return 0;
 
+	if (ctx->fallback && xts_skcipher_ivsize(req)) {
+		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
+		skcipher_request_set_callback(&caam_req->fallback_req,
+					      req->base.flags,
+					      req->base.complete,
+					      req->base.data);
+		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
+					   req->dst, req->cryptlen, req->iv);
+
+		return crypto_skcipher_encrypt(&caam_req->fallback_req);
+	}
+
 	/* allocate extended descriptor */
 	edesc = skcipher_edesc_alloc(req);
 	if (IS_ERR(edesc))
@@ -1484,6 +1517,19 @@ static int skcipher_decrypt(struct skcipher_request *req)
 
 	if (!req->cryptlen)
 		return 0;
+
+	if (ctx->fallback && xts_skcipher_ivsize(req)) {
+		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
+		skcipher_request_set_callback(&caam_req->fallback_req,
+					      req->base.flags,
+					      req->base.complete,
+					      req->base.data);
+		skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
+					   req->dst, req->cryptlen, req->iv);
+
+		return crypto_skcipher_decrypt(&caam_req->fallback_req);
+	}
+
 	/* allocate extended descriptor */
 	edesc = skcipher_edesc_alloc(req);
 	if (IS_ERR(edesc))
@@ -1537,9 +1583,29 @@ static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
 	struct caam_skcipher_alg *caam_alg =
 		container_of(alg, typeof(*caam_alg), skcipher);
+	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
 
 	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
-	return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
+
+	if (alg_aai == OP_ALG_AAI_XTS) {
+		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
+		struct crypto_skcipher *fallback;
+
+		fallback = crypto_alloc_skcipher(tfm_name, 0,
+						 CRYPTO_ALG_NEED_FALLBACK);
+		if (IS_ERR(fallback)) {
+			pr_err("Failed to allocate %s fallback: %ld\n",
+			       tfm_name, PTR_ERR(fallback));
+			return PTR_ERR(fallback);
+		}
+
+		ctx->fallback = fallback;
+		crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
+					    crypto_skcipher_reqsize(fallback));
+	}
+
+	return caam_cra_init(ctx, &caam_alg->caam, false);
 }
 
 static int caam_cra_init_aead(struct crypto_aead *tfm)
@@ -1562,7 +1628,11 @@ static void caam_exit_common(struct caam_ctx *ctx)
 
 static void caam_cra_exit(struct crypto_skcipher *tfm)
 {
-	caam_exit_common(crypto_skcipher_ctx(tfm));
+	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+	if (ctx->fallback)
+		crypto_free_skcipher(ctx->fallback);
+	caam_exit_common(ctx);
 }
 
 static void caam_cra_exit_aead(struct crypto_aead *tfm)
@@ -1665,6 +1735,7 @@ static struct caam_skcipher_alg driver_algs[] = {
 			.base = {
 				.cra_name = "xts(aes)",
 				.cra_driver_name = "xts-aes-caam-qi2",
+				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
 				.cra_blocksize = AES_BLOCK_SIZE,
 			},
 			.setkey = xts_skcipher_setkey,
@@ -2912,8 +2983,8 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
 	alg->base.cra_module = THIS_MODULE;
 	alg->base.cra_priority = CAAM_CRA_PRIORITY;
 	alg->base.cra_ctxsize = sizeof(struct caam_ctx);
-	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
-			      CRYPTO_ALG_KERN_DRIVER_ONLY;
+	alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
+			      CRYPTO_ALG_KERN_DRIVER_ONLY);
 
 	alg->init = caam_cra_init_skcipher;
 	alg->exit = caam_cra_exit;
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index f29cb7bd7dd3..d35253407ade 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -13,6 +13,7 @@
 #include <linux/netdevice.h>
 #include "dpseci.h"
 #include "desc_constr.h"
+#include <crypto/skcipher.h>
 
 #define DPAA2_CAAM_STORE_SIZE	16
 /* NAPI weight *must* be a multiple of the store size. */
@@ -186,6 +187,7 @@ struct caam_request {
 	void (*cbk)(void *ctx, u32 err);
 	void *ctx;
 	void *edesc;
+	struct skcipher_request fallback_req;
 };
 
 /**
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RESEND 4/9] crypto: caam/jr - add support for more XTS key lengths
  2020-08-06 16:35 [PATCH RESEND 0/9] crypto: caam - xts(aes) updates Andrei Botila
                   ` (2 preceding siblings ...)
  2020-08-06 16:35 ` [PATCH RESEND 3/9] crypto: caam/qi2 " Andrei Botila
@ 2020-08-06 16:35 ` Andrei Botila
  2020-08-11 14:36   ` Horia Geantă
  2020-08-19 23:56   ` Sasha Levin
  2020-08-06 16:35 ` [PATCH RESEND 5/9] crypto: caam/qi " Andrei Botila
                   ` (4 subsequent siblings)
  8 siblings, 2 replies; 31+ messages in thread
From: Andrei Botila @ 2020-08-06 16:35 UTC (permalink / raw)
  To: Horia Geanta, Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

From: Andrei Botila <andrei.botila@nxp.com>

CAAM accelerator only supports XTS-AES-128 and XTS-AES-256 since
it adheres strictly to the standard. All the other key lengths
are accepted and processed through a fallback as long as they pass
the xts_verify_key() checks.

Fixes: c6415a6016bf ("crypto: caam - add support for acipher xts(aes)")
Cc: <stable@vger.kernel.org> # v4.4+
Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
---
 drivers/crypto/caam/caamalg.c | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ebf4dc87ca2e..a5447ae430b0 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -57,6 +57,7 @@
 #include "key_gen.h"
 #include "caamalg_desc.h"
 #include <crypto/engine.h>
+#include <crypto/xts.h>
 #include <asm/unaligned.h>
 
 /*
@@ -835,9 +836,10 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 	u32 *desc;
 	int err;
 
-	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+	err = xts_verify_key(skcipher, key, keylen);
+	if (err) {
 		dev_dbg(jrdev, "key size mismatch\n");
-		return -EINVAL;
+		return err;
 	}
 
 	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
@@ -1790,7 +1792,9 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
 	if (!req->cryptlen)
 		return 0;
 
-	if (ctx->fallback && xts_skcipher_ivsize(req)) {
+	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
+			      (ctx->cdata.keylen != 2 * AES_KEYSIZE_128 &&
+			       ctx->cdata.keylen != 2 * AES_KEYSIZE_256))) {
 		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
 
 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RESEND 5/9] crypto: caam/qi - add support for more XTS key lengths
  2020-08-06 16:35 [PATCH RESEND 0/9] crypto: caam - xts(aes) updates Andrei Botila
                   ` (3 preceding siblings ...)
  2020-08-06 16:35 ` [PATCH RESEND 4/9] crypto: caam/jr - add support for more XTS key lengths Andrei Botila
@ 2020-08-06 16:35 ` Andrei Botila
  2020-08-19 23:56   ` Sasha Levin
  2020-08-06 16:35 ` [PATCH RESEND 6/9] crypto: caam/qi2 " Andrei Botila
                   ` (3 subsequent siblings)
  8 siblings, 1 reply; 31+ messages in thread
From: Andrei Botila @ 2020-08-06 16:35 UTC (permalink / raw)
  To: Horia Geanta, Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

From: Andrei Botila <andrei.botila@nxp.com>

CAAM accelerator only supports XTS-AES-128 and XTS-AES-256 since
it adheres strictly to the standard. All the other key lengths
are accepted and processed through a fallback as long as they pass
the xts_verify_key() checks.

Fixes: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms")
Cc: <stable@vger.kernel.org> # v4.12+
Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
---
 drivers/crypto/caam/caamalg_qi.c | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 05cb50561381..1d775a55fcf5 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -18,6 +18,7 @@
 #include "qi.h"
 #include "jr.h"
 #include "caamalg_desc.h"
+#include <crypto/xts.h>
 #include <asm/unaligned.h>
 
 /*
@@ -734,9 +735,10 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 	int ret = 0;
 	int err;
 
-	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+	err = xts_verify_key(skcipher, key, keylen);
+	if (err) {
 		dev_dbg(jrdev, "key size mismatch\n");
-		return -EINVAL;
+		return err;
 	}
 
 	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
@@ -1408,7 +1410,9 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
 	if (!req->cryptlen)
 		return 0;
 
-	if (ctx->fallback && xts_skcipher_ivsize(req)) {
+	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
+			      (ctx->cdata.keylen != 2 * AES_KEYSIZE_128 &&
+			       ctx->cdata.keylen != 2 * AES_KEYSIZE_256))) {
 		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
 
 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RESEND 6/9] crypto: caam/qi2 - add support for more XTS key lengths
  2020-08-06 16:35 [PATCH RESEND 0/9] crypto: caam - xts(aes) updates Andrei Botila
                   ` (4 preceding siblings ...)
  2020-08-06 16:35 ` [PATCH RESEND 5/9] crypto: caam/qi " Andrei Botila
@ 2020-08-06 16:35 ` Andrei Botila
  2020-08-19 23:56   ` Sasha Levin
  2020-08-06 16:35 ` [PATCH RESEND 7/9] crypto: caam/jr - add support for XTS with 16B IV Andrei Botila
                   ` (2 subsequent siblings)
  8 siblings, 1 reply; 31+ messages in thread
From: Andrei Botila @ 2020-08-06 16:35 UTC (permalink / raw)
  To: Horia Geanta, Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

From: Andrei Botila <andrei.botila@nxp.com>

CAAM accelerator only supports XTS-AES-128 and XTS-AES-256 since
it adheres strictly to the standard. All the other key lengths
are accepted and processed through a fallback as long as they pass
the xts_verify_key() checks.

Fixes: 226853ac3ebe ("crypto: caam/qi2 - add skcipher algorithms")
Cc: <stable@vger.kernel.org> # v4.20+
Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
---
 drivers/crypto/caam/caamalg_qi2.c | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index a0b13bf6b528..f2f137f47972 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -19,6 +19,7 @@
 #include <linux/fsl/mc.h>
 #include <soc/fsl/dpaa2-io.h>
 #include <soc/fsl/dpaa2-fd.h>
+#include <crypto/xts.h>
 #include <asm/unaligned.h>
 
 #define CAAM_CRA_PRIORITY	2000
@@ -1060,9 +1061,10 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 	u32 *desc;
 	int err;
 
-	if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
+	err = xts_verify_key(skcipher, key, keylen);
+	if (err) {
 		dev_dbg(dev, "key size mismatch\n");
-		return -EINVAL;
+		return err;
 	}
 
 	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
@@ -1475,7 +1477,9 @@ static int skcipher_encrypt(struct skcipher_request *req)
 	if (!req->cryptlen)
 		return 0;
 
-	if (ctx->fallback && xts_skcipher_ivsize(req)) {
+	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
+			      (ctx->cdata.keylen != 2 * AES_KEYSIZE_128 &&
+			       ctx->cdata.keylen != 2 * AES_KEYSIZE_256))) {
 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
 		skcipher_request_set_callback(&caam_req->fallback_req,
 					      req->base.flags,
@@ -1518,7 +1522,9 @@ static int skcipher_decrypt(struct skcipher_request *req)
 	if (!req->cryptlen)
 		return 0;
 
-	if (ctx->fallback && xts_skcipher_ivsize(req)) {
+	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
+			      (ctx->cdata.keylen != 2 * AES_KEYSIZE_128 &&
+			       ctx->cdata.keylen != 2 * AES_KEYSIZE_256))) {
 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
 		skcipher_request_set_callback(&caam_req->fallback_req,
 					      req->base.flags,
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RESEND 7/9] crypto: caam/jr - add support for XTS with 16B IV
  2020-08-06 16:35 [PATCH RESEND 0/9] crypto: caam - xts(aes) updates Andrei Botila
                   ` (5 preceding siblings ...)
  2020-08-06 16:35 ` [PATCH RESEND 6/9] crypto: caam/qi2 " Andrei Botila
@ 2020-08-06 16:35 ` Andrei Botila
  2020-08-06 16:35 ` [PATCH RESEND 8/9] crypto: caam/qi " Andrei Botila
  2020-08-06 16:35 ` [PATCH RESEND 9/9] crypto: caam/qi2 " Andrei Botila
  8 siblings, 0 replies; 31+ messages in thread
From: Andrei Botila @ 2020-08-06 16:35 UTC (permalink / raw)
  To: Horia Geanta, Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

From: Andrei Botila <andrei.botila@nxp.com>

Newer CAAM versions (Era 9+) support 16B IVs. Since for these devices
the HW limitation is no longer present newer version should process the
requests containing 16B IVs directly in hardware without using a fallback.

Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
---
 drivers/crypto/caam/caamalg.c      | 13 +++++++++----
 drivers/crypto/caam/caamalg_desc.c | 27 ++++++++++++++++-----------
 2 files changed, 25 insertions(+), 15 deletions(-)

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index a5447ae430b0..7e03854252b0 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -833,6 +833,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 {
 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
 	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 	u32 *desc;
 	int err;
 
@@ -842,9 +843,12 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 		return err;
 	}
 
-	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
-	if (err)
-		return err;
+	if (ctrlpriv->era <= 8 || (keylen != 2 * AES_KEYSIZE_128 &&
+				   keylen != 2 * AES_KEYSIZE_256)) {
+		err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+		if (err)
+			return err;
+	}
 
 	ctx->cdata.keylen = keylen;
 	ctx->cdata.key_virt = key;
@@ -1786,13 +1790,14 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
 	struct device *jrdev = ctx->jrdev;
 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 	u32 *desc;
 	int ret = 0;
 
 	if (!req->cryptlen)
 		return 0;
 
-	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
+	if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
 			      (ctx->cdata.keylen != 2 * AES_KEYSIZE_128 &&
 			       ctx->cdata.keylen != 2 * AES_KEYSIZE_256))) {
 		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index d6c58184bb57..433d6d5cd582 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1550,13 +1550,14 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/*
-	 * create sequence for loading the sector index
-	 * Upper 8B of IV - will be used as sector index
-	 * Lower 8B of IV - will be discarded
+	 * create sequence for loading the sector index / 16B tweak value
+	 * Lower 8B of IV - sector index / tweak lower half
+	 * Upper 8B of IV - upper half of 16B tweak
 	 */
 	append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
 			(0x20 << LDST_OFFSET_SHIFT));
-	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
+	append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			(0x30 << LDST_OFFSET_SHIFT));
 
 	/* Load operation */
 	append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
@@ -1565,9 +1566,11 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
 	/* Perform operation */
 	skcipher_append_src_dst(desc);
 
-	/* Store upper 8B of IV */
+	/* Store lower 8B and upper 8B of IV */
 	append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
 			 (0x20 << LDST_OFFSET_SHIFT));
+	append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			 (0x30 << LDST_OFFSET_SHIFT));
 
 	print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__)
 			     ": ", DUMP_PREFIX_ADDRESS, 16, 4,
@@ -1609,23 +1612,25 @@ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/*
-	 * create sequence for loading the sector index
-	 * Upper 8B of IV - will be used as sector index
-	 * Lower 8B of IV - will be discarded
+	 * create sequence for loading the sector index / 16B tweak value
+	 * Lower 8B of IV - sector index / tweak lower half
+	 * Upper 8B of IV - upper half of 16B tweak
 	 */
 	append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
 			(0x20 << LDST_OFFSET_SHIFT));
-	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
-
+	append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			(0x30 << LDST_OFFSET_SHIFT));
 	/* Load operation */
 	append_dec_op1(desc, cdata->algtype);
 
 	/* Perform operation */
 	skcipher_append_src_dst(desc);
 
-	/* Store upper 8B of IV */
+	/* Store lower 8B and upper 8B of IV */
 	append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
 			 (0x20 << LDST_OFFSET_SHIFT));
+	append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			 (0x30 << LDST_OFFSET_SHIFT));
 
 	print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__)
 			     ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RESEND 8/9] crypto: caam/qi - add support for XTS with 16B IV
  2020-08-06 16:35 [PATCH RESEND 0/9] crypto: caam - xts(aes) updates Andrei Botila
                   ` (6 preceding siblings ...)
  2020-08-06 16:35 ` [PATCH RESEND 7/9] crypto: caam/jr - add support for XTS with 16B IV Andrei Botila
@ 2020-08-06 16:35 ` Andrei Botila
  2020-08-06 16:35 ` [PATCH RESEND 9/9] crypto: caam/qi2 " Andrei Botila
  8 siblings, 0 replies; 31+ messages in thread
From: Andrei Botila @ 2020-08-06 16:35 UTC (permalink / raw)
  To: Horia Geanta, Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

From: Andrei Botila <andrei.botila@nxp.com>

Newer CAAM versions (Era 9+) support 16B IVs. Since for these devices
the HW limitation is no longer present newer version should process the
requests containing 16B IVs directly in hardware without using a fallback.

Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
---
 drivers/crypto/caam/caamalg_qi.c | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 1d775a55fcf5..df58a899e97d 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -732,6 +732,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 {
 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
 	struct device *jrdev = ctx->jrdev;
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 	int ret = 0;
 	int err;
 
@@ -741,9 +742,12 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 		return err;
 	}
 
-	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
-	if (err)
-		return err;
+	if (ctrlpriv->era <= 8 || (keylen != 2 * AES_KEYSIZE_128 &&
+				   keylen != 2 * AES_KEYSIZE_256)) {
+		err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+		if (err)
+			return err;
+	}
 
 	ctx->cdata.keylen = keylen;
 	ctx->cdata.key_virt = key;
@@ -1405,12 +1409,13 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
 	struct skcipher_edesc *edesc;
 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
 	int ret;
 
 	if (!req->cryptlen)
 		return 0;
 
-	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
+	if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
 			      (ctx->cdata.keylen != 2 * AES_KEYSIZE_128 &&
 			       ctx->cdata.keylen != 2 * AES_KEYSIZE_256))) {
 		struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* [PATCH RESEND 9/9] crypto: caam/qi2 - add support for XTS with 16B IV
  2020-08-06 16:35 [PATCH RESEND 0/9] crypto: caam - xts(aes) updates Andrei Botila
                   ` (7 preceding siblings ...)
  2020-08-06 16:35 ` [PATCH RESEND 8/9] crypto: caam/qi " Andrei Botila
@ 2020-08-06 16:35 ` Andrei Botila
  8 siblings, 0 replies; 31+ messages in thread
From: Andrei Botila @ 2020-08-06 16:35 UTC (permalink / raw)
  To: Horia Geanta, Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

From: Andrei Botila <andrei.botila@nxp.com>

Newer CAAM versions (Era 9+) support 16B IVs. Since for these devices
the HW limitation is no longer present newer version should process the
requests containing 16B IVs directly in hardware without using a fallback.

Signed-off-by: Andrei Botila <andrei.botila@nxp.com>
---
 drivers/crypto/caam/caamalg_qi2.c | 16 +++++++++++-----
 1 file changed, 11 insertions(+), 5 deletions(-)

diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index f2f137f47972..99b1267b1a0b 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -1057,6 +1057,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 {
 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
 	struct device *dev = ctx->dev;
+	struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
 	struct caam_flc *flc;
 	u32 *desc;
 	int err;
@@ -1067,9 +1068,12 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 		return err;
 	}
 
-	err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
-	if (err)
-		return err;
+	if (priv->sec_attr.era <= 8 || (keylen != 2 * AES_KEYSIZE_128 &&
+					keylen != 2 * AES_KEYSIZE_256)) {
+		err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+		if (err)
+			return err;
+	}
 
 	ctx->cdata.keylen = keylen;
 	ctx->cdata.key_virt = key;
@@ -1472,12 +1476,13 @@ static int skcipher_encrypt(struct skcipher_request *req)
 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
 	struct caam_request *caam_req = skcipher_request_ctx(req);
+	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
 	int ret;
 
 	if (!req->cryptlen)
 		return 0;
 
-	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
+	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
 			      (ctx->cdata.keylen != 2 * AES_KEYSIZE_128 &&
 			       ctx->cdata.keylen != 2 * AES_KEYSIZE_256))) {
 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
@@ -1517,12 +1522,13 @@ static int skcipher_decrypt(struct skcipher_request *req)
 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 	struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
 	struct caam_request *caam_req = skcipher_request_ctx(req);
+	struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
 	int ret;
 
 	if (!req->cryptlen)
 		return 0;
 
-	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
+	if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
 			      (ctx->cdata.keylen != 2 * AES_KEYSIZE_128 &&
 			       ctx->cdata.keylen != 2 * AES_KEYSIZE_256))) {
 		skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-08-06 16:35 ` [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV Andrei Botila
@ 2020-08-11 14:30   ` Horia Geantă
  2020-08-21  3:47     ` Herbert Xu
  2020-08-19 23:56   ` Sasha Levin
  2020-08-21  3:46   ` Herbert Xu
  2 siblings, 1 reply; 31+ messages in thread
From: Horia Geantă @ 2020-08-11 14:30 UTC (permalink / raw)
  To: Andrei Botila (OSS), Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

On 8/6/2020 7:36 PM, Andrei Botila (OSS) wrote:
> @@ -3344,12 +3382,30 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
>  	struct caam_skcipher_alg *caam_alg =
>  		container_of(alg, typeof(*caam_alg), skcipher);
>  	struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
> +	u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
>  
>  	crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
This is being called twice in case of XTS.

>  
>  	ctx->enginectx.op.do_one_request = skcipher_do_one_req;
>  
> -	return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
> +	if (alg_aai == OP_ALG_AAI_XTS) {
> +		const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
> +		struct crypto_skcipher *fallback;
> +
> +		fallback = crypto_alloc_skcipher(tfm_name, 0,
> +						 CRYPTO_ALG_NEED_FALLBACK);
Driver should select CRYPTO_XTS, such that at least the generic
xts implementation is available.

> +		if (IS_ERR(fallback)) {
> +			pr_err("Failed to allocate %s fallback: %ld\n",
> +			       tfm_name, PTR_ERR(fallback));
> +			return PTR_ERR(fallback);
Shouldn't error out so early. It might be that the fallback won't be needed.
Let's postpone this until we're sure fallback is required.

Horia

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 4/9] crypto: caam/jr - add support for more XTS key lengths
  2020-08-06 16:35 ` [PATCH RESEND 4/9] crypto: caam/jr - add support for more XTS key lengths Andrei Botila
@ 2020-08-11 14:36   ` Horia Geantă
  2020-08-19 23:56   ` Sasha Levin
  1 sibling, 0 replies; 31+ messages in thread
From: Horia Geantă @ 2020-08-11 14:36 UTC (permalink / raw)
  To: Andrei Botila (OSS), Aymen Sghaier, Herbert Xu, David S. Miller
  Cc: linux-crypto, linux-kernel

On 8/6/2020 7:36 PM, Andrei Botila (OSS) wrote:
> @@ -1790,7 +1792,9 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
>  	if (!req->cryptlen)
>  		return 0;
>  
> -	if (ctx->fallback && xts_skcipher_ivsize(req)) {
> +	if (ctx->fallback && (xts_skcipher_ivsize(req) ||
> +			      (ctx->cdata.keylen != 2 * AES_KEYSIZE_128 &&
> +			       ctx->cdata.keylen != 2 * AES_KEYSIZE_256))) {
Let's avoid doing this check for every request.
This could be achieved by moving it into the .setkey callback and
setting a flag in the caam_ctx indicating if the fallback is needed or not
for this tfm.

Horia

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 2/9] crypto: caam/qi - add fallback for XTS with more than 8B IV
  2020-08-06 16:35 ` [PATCH RESEND 2/9] crypto: caam/qi " Andrei Botila
@ 2020-08-19 23:56   ` Sasha Levin
  0 siblings, 0 replies; 31+ messages in thread
From: Sasha Levin @ 2020-08-19 23:56 UTC (permalink / raw)
  To: Sasha Levin, Andrei Botila, Andrei Botila, Horia Geanta
  Cc: linux-crypto, linux-kernel, stable

Hi

[This is an automated email]

This commit has been processed because it contains a "Fixes:" tag
fixing commit: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms").

The bot has tested the following trees: v5.8.1, v5.7.15, v5.4.58, v4.19.139, v4.14.193.

v5.8.1: Failed to apply! Possible dependencies:
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")

v5.7.15: Failed to apply! Possible dependencies:
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")

v5.4.58: Failed to apply! Possible dependencies:
    64db5e7439fb ("crypto: sparc/aes - convert to skcipher API")
    66d7fb94e4ff ("crypto: blake2s - generic C library implementation and selftest")
    674f368a952c ("crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN")
    746b2e024c67 ("crypto: lib - tidy up lib/crypto Kconfig and Makefile")
    7988fb2c03c8 ("crypto: s390/aes - convert to skcipher API")
    7f725f41f627 ("crypto: powerpc - convert SPE AES algorithms to skcipher API")
    7f9b0880925f ("crypto: blake2s - implement generic shash driver")
    91d689337fe8 ("crypto: blake2b - add blake2b generic implementation")
    b4d0c0aad57a ("crypto: arm - use Kconfig based compiler checks for crypto opcodes")
    b95bba5d0114 ("crypto: skcipher - rename the crypto_blkcipher module and kconfig option")
    d00c06398154 ("crypto: s390/paes - convert to skcipher API")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")
    ed0356eda153 ("crypto: blake2s - x86_64 SIMD implementation")

v4.19.139: Failed to apply! Possible dependencies:
    0a5dff9882e5 ("crypto: arm/ghash - provide a synchronous version")
    1ca1b917940c ("crypto: chacha20-generic - refactor to allow varying number of rounds")
    5ca7badb1f62 ("crypto: caam/jr - ablkcipher -> skcipher conversion")
    674f368a952c ("crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN")
    8a5a79d5556b ("crypto: x86/chacha20 - Add a 4-block AVX2 variant")
    99680c5e9182 ("crypto: arm - convert to use crypto_simd_usable()")
    9b17608f15b9 ("crypto: x86/chacha20 - Use larger block functions more aggressively")
    9dbe3072c6b1 ("crypto: caam/qi - ablkcipher -> skcipher conversion")
    a5dd97f86211 ("crypto: x86/chacha20 - Add a 2-block AVX2 variant")
    aec48adce85d ("crypto: caam/qi - remove ablkcipher IV generation")
    c3b734dd325d ("crypto: x86/chacha20 - Support partial lengths in 8-block AVX2 variant")
    cf5448b5c3d8 ("crypto: caam/jr - remove ablkcipher IV generation")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")
    db8e15a24957 ("crypto: x86/chacha20 - Support partial lengths in 4-block SSSE3 variant")
    e4e72063d3c0 ("crypto: x86/chacha20 - Support partial lengths in 1-block SSSE3 variant")

v4.14.193: Failed to apply! Possible dependencies:
    5ca7badb1f62 ("crypto: caam/jr - ablkcipher -> skcipher conversion")
    662f70ede597 ("crypto: caam - remove needless ablkcipher key copy")
    7e0880b9fbbe ("crypto: caam - add Derived Key Protocol (DKP) support")
    87ec3a0b1c2d ("crypto: caam - prepare for gcm(aes) support over QI interface")
    9dbe3072c6b1 ("crypto: caam/qi - ablkcipher -> skcipher conversion")
    cf5448b5c3d8 ("crypto: caam/jr - remove ablkcipher IV generation")


NOTE: The patch will not be queued to stable trees until it is upstream.

How should we proceed with this patch?

-- 
Thanks
Sasha

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 3/9] crypto: caam/qi2 - add fallback for XTS with more than 8B IV
  2020-08-06 16:35 ` [PATCH RESEND 3/9] crypto: caam/qi2 " Andrei Botila
@ 2020-08-19 23:56   ` Sasha Levin
  0 siblings, 0 replies; 31+ messages in thread
From: Sasha Levin @ 2020-08-19 23:56 UTC (permalink / raw)
  To: Sasha Levin, Andrei Botila, Andrei Botila, Horia Geanta
  Cc: linux-crypto, linux-kernel, stable

Hi

[This is an automated email]

This commit has been processed because it contains a "Fixes:" tag
fixing commit: 226853ac3ebe ("crypto: caam/qi2 - add skcipher algorithms").

The bot has tested the following trees: v5.8.1, v5.7.15, v5.4.58.

v5.8.1: Failed to apply! Possible dependencies:
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")

v5.7.15: Failed to apply! Possible dependencies:
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")

v5.4.58: Failed to apply! Possible dependencies:
    64db5e7439fb ("crypto: sparc/aes - convert to skcipher API")
    66d7fb94e4ff ("crypto: blake2s - generic C library implementation and selftest")
    674f368a952c ("crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN")
    746b2e024c67 ("crypto: lib - tidy up lib/crypto Kconfig and Makefile")
    7988fb2c03c8 ("crypto: s390/aes - convert to skcipher API")
    7f725f41f627 ("crypto: powerpc - convert SPE AES algorithms to skcipher API")
    7f9b0880925f ("crypto: blake2s - implement generic shash driver")
    91d689337fe8 ("crypto: blake2b - add blake2b generic implementation")
    b4d0c0aad57a ("crypto: arm - use Kconfig based compiler checks for crypto opcodes")
    b95bba5d0114 ("crypto: skcipher - rename the crypto_blkcipher module and kconfig option")
    d00c06398154 ("crypto: s390/paes - convert to skcipher API")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")
    ed0356eda153 ("crypto: blake2s - x86_64 SIMD implementation")


NOTE: The patch will not be queued to stable trees until it is upstream.

How should we proceed with this patch?

-- 
Thanks
Sasha

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-08-06 16:35 ` [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV Andrei Botila
  2020-08-11 14:30   ` Horia Geantă
@ 2020-08-19 23:56   ` Sasha Levin
  2020-08-21  3:46   ` Herbert Xu
  2 siblings, 0 replies; 31+ messages in thread
From: Sasha Levin @ 2020-08-19 23:56 UTC (permalink / raw)
  To: Sasha Levin, Andrei Botila, Andrei Botila, Horia Geanta
  Cc: linux-crypto, linux-kernel, stable

Hi

[This is an automated email]

This commit has been processed because it contains a "Fixes:" tag
fixing commit: c6415a6016bf ("crypto: caam - add support for acipher xts(aes)").

The bot has tested the following trees: v5.8.1, v5.7.15, v5.4.58, v4.19.139, v4.14.193, v4.9.232, v4.4.232.

v5.8.1: Failed to apply! Possible dependencies:
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")

v5.7.15: Failed to apply! Possible dependencies:
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")

v5.4.58: Failed to apply! Possible dependencies:
    1c2402266713 ("crypto: caam - add crypto_engine support for AEAD algorithms")
    4d370a103695 ("crypto: caam - change return code in caam_jr_enqueue function")
    b7f17fe28144 ("crypto: caam - refactor skcipher/aead/gcm/chachapoly {en,de}crypt functions")
    d53e44fe980b ("crypto: caam - refactor RSA private key _done callbacks")
    ee38767f152a ("crypto: caam - support crypto_engine framework for SKCIPHER algorithms")

v4.19.139: Failed to apply! Possible dependencies:
    0efa7579f3de ("crypto: caam - export ahash shared descriptor generation")
    1b46c90c8e00 ("crypto: caam - convert top level drivers to libraries")
    226853ac3ebe ("crypto: caam/qi2 - add skcipher algorithms")
    8d818c105501 ("crypto: caam/qi2 - add DPAA2-CAAM driver")
    94cebd9da42c ("crypto: caam - add Queue Interface v2 error codes")
    96808c596580 ("crypto: caam/qi2 - add CONFIG_NETDEVICES dependency")
    ee38767f152a ("crypto: caam - support crypto_engine framework for SKCIPHER algorithms")

v4.14.193: Failed to apply! Possible dependencies:
    0efa7579f3de ("crypto: caam - export ahash shared descriptor generation")
    1b46c90c8e00 ("crypto: caam - convert top level drivers to libraries")
    226853ac3ebe ("crypto: caam/qi2 - add skcipher algorithms")
    8d818c105501 ("crypto: caam/qi2 - add DPAA2-CAAM driver")
    94cebd9da42c ("crypto: caam - add Queue Interface v2 error codes")
    96808c596580 ("crypto: caam/qi2 - add CONFIG_NETDEVICES dependency")
    ee38767f152a ("crypto: caam - support crypto_engine framework for SKCIPHER algorithms")

v4.9.232: Failed to apply! Possible dependencies:
    1b008eedb0af ("crypto: caam - remove unused command from aead givencrypt")
    281669dfbabe ("crypto: caam - rewrite some generic inline append cmds")
    4cbe79ccb523 ("crypto: caam - improve key inlining")
    62ad8b5c0964 ("crypto: cavium - Enable CPT options crypto for build")
    64c9295b2320 ("crypto: caam - move append_key_aead() into init_sh_desc_key_aead()")
    8cea7b66b821 ("crypto: caam - refactor encryption descriptors generation")
    8d818c105501 ("crypto: caam/qi2 - add DPAA2-CAAM driver")
    db57656b0072 ("crypto: caam - group algorithm related params")
    ee38767f152a ("crypto: caam - support crypto_engine framework for SKCIPHER algorithms")

v4.4.232: Failed to apply! Possible dependencies:
    1b008eedb0af ("crypto: caam - remove unused command from aead givencrypt")
    4cbe79ccb523 ("crypto: caam - improve key inlining")
    5ba1c7b5ffc1 ("crypto: caam - fix rfc3686(ctr(aes)) IV load")
    64c9295b2320 ("crypto: caam - move append_key_aead() into init_sh_desc_key_aead()")
    8c419778ab57 ("crypto: caam - add support for RSA algorithm")
    8cea7b66b821 ("crypto: caam - refactor encryption descriptors generation")
    d6e7a7d0c2c5 ("crypto: caam - Rename jump labels in ahash_setkey()")
    db57656b0072 ("crypto: caam - group algorithm related params")
    e11793f5dad8 ("crypto: caam - ensure descriptor buffers are cacheline aligned")


NOTE: The patch will not be queued to stable trees until it is upstream.

How should we proceed with this patch?

-- 
Thanks
Sasha

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 6/9] crypto: caam/qi2 - add support for more XTS key lengths
  2020-08-06 16:35 ` [PATCH RESEND 6/9] crypto: caam/qi2 " Andrei Botila
@ 2020-08-19 23:56   ` Sasha Levin
  0 siblings, 0 replies; 31+ messages in thread
From: Sasha Levin @ 2020-08-19 23:56 UTC (permalink / raw)
  To: Sasha Levin, Andrei Botila, Andrei Botila, Horia Geanta
  Cc: linux-crypto, linux-kernel, stable

Hi

[This is an automated email]

This commit has been processed because it contains a "Fixes:" tag
fixing commit: 226853ac3ebe ("crypto: caam/qi2 - add skcipher algorithms").

The bot has tested the following trees: v5.8.1, v5.7.15, v5.4.58.

v5.8.1: Failed to apply! Possible dependencies:
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")
    f9665aa69c99 ("crypto: caam/qi2 - add fallback for XTS with more than 8B IV")

v5.7.15: Failed to apply! Possible dependencies:
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")
    f9665aa69c99 ("crypto: caam/qi2 - add fallback for XTS with more than 8B IV")

v5.4.58: Failed to apply! Possible dependencies:
    64db5e7439fb ("crypto: sparc/aes - convert to skcipher API")
    66d7fb94e4ff ("crypto: blake2s - generic C library implementation and selftest")
    674f368a952c ("crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN")
    746b2e024c67 ("crypto: lib - tidy up lib/crypto Kconfig and Makefile")
    7988fb2c03c8 ("crypto: s390/aes - convert to skcipher API")
    7f725f41f627 ("crypto: powerpc - convert SPE AES algorithms to skcipher API")
    7f9b0880925f ("crypto: blake2s - implement generic shash driver")
    91d689337fe8 ("crypto: blake2b - add blake2b generic implementation")
    b4d0c0aad57a ("crypto: arm - use Kconfig based compiler checks for crypto opcodes")
    b95bba5d0114 ("crypto: skcipher - rename the crypto_blkcipher module and kconfig option")
    d00c06398154 ("crypto: s390/paes - convert to skcipher API")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")
    ed0356eda153 ("crypto: blake2s - x86_64 SIMD implementation")
    f9665aa69c99 ("crypto: caam/qi2 - add fallback for XTS with more than 8B IV")


NOTE: The patch will not be queued to stable trees until it is upstream.

How should we proceed with this patch?

-- 
Thanks
Sasha

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 5/9] crypto: caam/qi - add support for more XTS key lengths
  2020-08-06 16:35 ` [PATCH RESEND 5/9] crypto: caam/qi " Andrei Botila
@ 2020-08-19 23:56   ` Sasha Levin
  0 siblings, 0 replies; 31+ messages in thread
From: Sasha Levin @ 2020-08-19 23:56 UTC (permalink / raw)
  To: Sasha Levin, Andrei Botila, Andrei Botila, Horia Geanta
  Cc: linux-crypto, linux-kernel, stable

Hi

[This is an automated email]

This commit has been processed because it contains a "Fixes:" tag
fixing commit: b189817cf789 ("crypto: caam/qi - add ablkcipher and authenc algorithms").

The bot has tested the following trees: v5.8.1, v5.7.15, v5.4.58, v4.19.139, v4.14.193.

v5.8.1: Failed to apply! Possible dependencies:
    297142490236 ("crypto: caam/qi - add fallback for XTS with more than 8B IV")
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")

v5.7.15: Failed to apply! Possible dependencies:
    297142490236 ("crypto: caam/qi - add fallback for XTS with more than 8B IV")
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")

v5.4.58: Failed to apply! Possible dependencies:
    297142490236 ("crypto: caam/qi - add fallback for XTS with more than 8B IV")
    64db5e7439fb ("crypto: sparc/aes - convert to skcipher API")
    66d7fb94e4ff ("crypto: blake2s - generic C library implementation and selftest")
    674f368a952c ("crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN")
    746b2e024c67 ("crypto: lib - tidy up lib/crypto Kconfig and Makefile")
    7988fb2c03c8 ("crypto: s390/aes - convert to skcipher API")
    7f725f41f627 ("crypto: powerpc - convert SPE AES algorithms to skcipher API")
    7f9b0880925f ("crypto: blake2s - implement generic shash driver")
    91d689337fe8 ("crypto: blake2b - add blake2b generic implementation")
    b4d0c0aad57a ("crypto: arm - use Kconfig based compiler checks for crypto opcodes")
    b95bba5d0114 ("crypto: skcipher - rename the crypto_blkcipher module and kconfig option")
    d00c06398154 ("crypto: s390/paes - convert to skcipher API")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")
    ed0356eda153 ("crypto: blake2s - x86_64 SIMD implementation")

v4.19.139: Failed to apply! Possible dependencies:
    0a5dff9882e5 ("crypto: arm/ghash - provide a synchronous version")
    1ca1b917940c ("crypto: chacha20-generic - refactor to allow varying number of rounds")
    297142490236 ("crypto: caam/qi - add fallback for XTS with more than 8B IV")
    5ca7badb1f62 ("crypto: caam/jr - ablkcipher -> skcipher conversion")
    674f368a952c ("crypto: remove CRYPTO_TFM_RES_BAD_KEY_LEN")
    8a5a79d5556b ("crypto: x86/chacha20 - Add a 4-block AVX2 variant")
    99680c5e9182 ("crypto: arm - convert to use crypto_simd_usable()")
    9b17608f15b9 ("crypto: x86/chacha20 - Use larger block functions more aggressively")
    9dbe3072c6b1 ("crypto: caam/qi - ablkcipher -> skcipher conversion")
    a5dd97f86211 ("crypto: x86/chacha20 - Add a 2-block AVX2 variant")
    aec48adce85d ("crypto: caam/qi - remove ablkcipher IV generation")
    c3b734dd325d ("crypto: x86/chacha20 - Support partial lengths in 8-block AVX2 variant")
    cf5448b5c3d8 ("crypto: caam/jr - remove ablkcipher IV generation")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")
    db8e15a24957 ("crypto: x86/chacha20 - Support partial lengths in 4-block SSSE3 variant")
    e4e72063d3c0 ("crypto: x86/chacha20 - Support partial lengths in 1-block SSSE3 variant")

v4.14.193: Failed to apply! Possible dependencies:
    297142490236 ("crypto: caam/qi - add fallback for XTS with more than 8B IV")
    5ca7badb1f62 ("crypto: caam/jr - ablkcipher -> skcipher conversion")
    662f70ede597 ("crypto: caam - remove needless ablkcipher key copy")
    7e0880b9fbbe ("crypto: caam - add Derived Key Protocol (DKP) support")
    9dbe3072c6b1 ("crypto: caam/qi - ablkcipher -> skcipher conversion")
    cf5448b5c3d8 ("crypto: caam/jr - remove ablkcipher IV generation")


NOTE: The patch will not be queued to stable trees until it is upstream.

How should we proceed with this patch?

-- 
Thanks
Sasha

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 4/9] crypto: caam/jr - add support for more XTS key lengths
  2020-08-06 16:35 ` [PATCH RESEND 4/9] crypto: caam/jr - add support for more XTS key lengths Andrei Botila
  2020-08-11 14:36   ` Horia Geantă
@ 2020-08-19 23:56   ` Sasha Levin
  1 sibling, 0 replies; 31+ messages in thread
From: Sasha Levin @ 2020-08-19 23:56 UTC (permalink / raw)
  To: Sasha Levin, Andrei Botila, Andrei Botila, Horia Geanta
  Cc: linux-crypto, linux-kernel, stable

Hi

[This is an automated email]

This commit has been processed because it contains a "Fixes:" tag
fixing commit: c6415a6016bf ("crypto: caam - add support for acipher xts(aes)").

The bot has tested the following trees: v5.8.1, v5.7.15, v5.4.58, v4.19.139, v4.14.193, v4.9.232, v4.4.232.

v5.8.1: Failed to apply! Possible dependencies:
    2d4d8e196706 ("crypto: caam/jr - add fallback for XTS with more than 8B IV")
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")

v5.7.15: Failed to apply! Possible dependencies:
    2d4d8e196706 ("crypto: caam/jr - add fallback for XTS with more than 8B IV")
    528f776df67c ("crypto: qat - allow xts requests not multiple of block")
    a85211f36f3d ("crypto: qat - fallback for xts with 192 bit keys")
    b185a68710e0 ("crypto: qat - validate xts key")
    b8aa7dc5c753 ("crypto: drivers - set the flag CRYPTO_ALG_ALLOCATES_MEMORY")
    da6a66853a38 ("crypto: caam - silence .setkey in case of bad key length")

v5.4.58: Failed to apply! Possible dependencies:
    2d4d8e196706 ("crypto: caam/jr - add fallback for XTS with more than 8B IV")
    4d370a103695 ("crypto: caam - change return code in caam_jr_enqueue function")
    b7f17fe28144 ("crypto: caam - refactor skcipher/aead/gcm/chachapoly {en,de}crypt functions")
    d53e44fe980b ("crypto: caam - refactor RSA private key _done callbacks")
    ee38767f152a ("crypto: caam - support crypto_engine framework for SKCIPHER algorithms")

v4.19.139: Failed to apply! Possible dependencies:
    0efa7579f3de ("crypto: caam - export ahash shared descriptor generation")
    1b46c90c8e00 ("crypto: caam - convert top level drivers to libraries")
    226853ac3ebe ("crypto: caam/qi2 - add skcipher algorithms")
    8d818c105501 ("crypto: caam/qi2 - add DPAA2-CAAM driver")
    94cebd9da42c ("crypto: caam - add Queue Interface v2 error codes")
    96808c596580 ("crypto: caam/qi2 - add CONFIG_NETDEVICES dependency")
    ee38767f152a ("crypto: caam - support crypto_engine framework for SKCIPHER algorithms")

v4.14.193: Failed to apply! Possible dependencies:
    0efa7579f3de ("crypto: caam - export ahash shared descriptor generation")
    1b46c90c8e00 ("crypto: caam - convert top level drivers to libraries")
    226853ac3ebe ("crypto: caam/qi2 - add skcipher algorithms")
    8d818c105501 ("crypto: caam/qi2 - add DPAA2-CAAM driver")
    94cebd9da42c ("crypto: caam - add Queue Interface v2 error codes")
    96808c596580 ("crypto: caam/qi2 - add CONFIG_NETDEVICES dependency")
    ee38767f152a ("crypto: caam - support crypto_engine framework for SKCIPHER algorithms")

v4.9.232: Failed to apply! Possible dependencies:
    1b008eedb0af ("crypto: caam - remove unused command from aead givencrypt")
    281669dfbabe ("crypto: caam - rewrite some generic inline append cmds")
    4cbe79ccb523 ("crypto: caam - improve key inlining")
    62ad8b5c0964 ("crypto: cavium - Enable CPT options crypto for build")
    64c9295b2320 ("crypto: caam - move append_key_aead() into init_sh_desc_key_aead()")
    8cea7b66b821 ("crypto: caam - refactor encryption descriptors generation")
    8d818c105501 ("crypto: caam/qi2 - add DPAA2-CAAM driver")
    db57656b0072 ("crypto: caam - group algorithm related params")
    ee38767f152a ("crypto: caam - support crypto_engine framework for SKCIPHER algorithms")

v4.4.232: Failed to apply! Possible dependencies:
    1b008eedb0af ("crypto: caam - remove unused command from aead givencrypt")
    4cbe79ccb523 ("crypto: caam - improve key inlining")
    5ba1c7b5ffc1 ("crypto: caam - fix rfc3686(ctr(aes)) IV load")
    64c9295b2320 ("crypto: caam - move append_key_aead() into init_sh_desc_key_aead()")
    8c419778ab57 ("crypto: caam - add support for RSA algorithm")
    8cea7b66b821 ("crypto: caam - refactor encryption descriptors generation")
    d6e7a7d0c2c5 ("crypto: caam - Rename jump labels in ahash_setkey()")
    db57656b0072 ("crypto: caam - group algorithm related params")
    e11793f5dad8 ("crypto: caam - ensure descriptor buffers are cacheline aligned")


NOTE: The patch will not be queued to stable trees until it is upstream.

How should we proceed with this patch?

-- 
Thanks
Sasha

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-08-06 16:35 ` [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV Andrei Botila
  2020-08-11 14:30   ` Horia Geantă
  2020-08-19 23:56   ` Sasha Levin
@ 2020-08-21  3:46   ` Herbert Xu
  2020-09-08 10:35     ` Horia Geantă
  2 siblings, 1 reply; 31+ messages in thread
From: Herbert Xu @ 2020-08-21  3:46 UTC (permalink / raw)
  To: Andrei Botila
  Cc: Horia Geanta, Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel

On Thu, Aug 06, 2020 at 07:35:43PM +0300, Andrei Botila wrote:
>
> +static bool xts_skcipher_ivsize(struct skcipher_request *req)
> +{
> +	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
> +	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
> +	u64 size = 0;
> +
> +	if (IS_ALIGNED((unsigned long)req->iv, __alignof__(u64)))
> +		size = *(u64 *)(req->iv + (ivsize / 2));
> +	else
> +		size = get_unaligned((u64 *)(req->iv + (ivsize / 2)));
> +
> +	return !!size;
> +}

Just go with the get_unaligned unconditionally.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-08-11 14:30   ` Horia Geantă
@ 2020-08-21  3:47     ` Herbert Xu
  2020-08-21 12:02       ` Horia Geantă
  0 siblings, 1 reply; 31+ messages in thread
From: Herbert Xu @ 2020-08-21  3:47 UTC (permalink / raw)
  To: Horia Geantă
  Cc: Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel

On Tue, Aug 11, 2020 at 05:30:41PM +0300, Horia Geantă wrote:
>
> > +		if (IS_ERR(fallback)) {
> > +			pr_err("Failed to allocate %s fallback: %ld\n",
> > +			       tfm_name, PTR_ERR(fallback));
> > +			return PTR_ERR(fallback);
> Shouldn't error out so early. It might be that the fallback won't be needed.
> Let's postpone this until we're sure fallback is required.

Why? The generic should always be there as otherwise you won't
even pass the self-test.  If we're OOM then we should error out
ASAP.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-08-21  3:47     ` Herbert Xu
@ 2020-08-21 12:02       ` Horia Geantă
  0 siblings, 0 replies; 31+ messages in thread
From: Horia Geantă @ 2020-08-21 12:02 UTC (permalink / raw)
  To: Herbert Xu
  Cc: Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel

On 8/21/2020 6:47 AM, Herbert Xu wrote:
> On Tue, Aug 11, 2020 at 05:30:41PM +0300, Horia Geantă wrote:
>>
>>> +		if (IS_ERR(fallback)) {
>>> +			pr_err("Failed to allocate %s fallback: %ld\n",
>>> +			       tfm_name, PTR_ERR(fallback));
>>> +			return PTR_ERR(fallback);
>> Shouldn't error out so early. It might be that the fallback won't be needed.
>> Let's postpone this until we're sure fallback is required.
> 
> Why? The generic should always be there as otherwise you won't
> even pass the self-test.  If we're OOM then we should error out
> ASAP.
> 
self-tests don't check the cases where a fallback is needed,
so in theory things could go well for a misconfigured kernel
(where a fallback is not available).

But I agree, if driver is updated to select CRYPTO_XTS
then it's probably better to return an error here.

Thanks,
Horia

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-08-21  3:46   ` Herbert Xu
@ 2020-09-08 10:35     ` Horia Geantă
  2020-09-08 22:10       ` Herbert Xu
  0 siblings, 1 reply; 31+ messages in thread
From: Horia Geantă @ 2020-09-08 10:35 UTC (permalink / raw)
  To: Herbert Xu, Andrei Botila (OSS)
  Cc: Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel,
	Ard Biesheuvel

On 8/21/2020 6:47 AM, Herbert Xu wrote:
> On Thu, Aug 06, 2020 at 07:35:43PM +0300, Andrei Botila wrote:
>>
>> +static bool xts_skcipher_ivsize(struct skcipher_request *req)
>> +{
>> +	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
>> +	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
>> +	u64 size = 0;
>> +
>> +	if (IS_ALIGNED((unsigned long)req->iv, __alignof__(u64)))
>> +		size = *(u64 *)(req->iv + (ivsize / 2));
>> +	else
>> +		size = get_unaligned((u64 *)(req->iv + (ivsize / 2)));
>> +
>> +	return !!size;
>> +}
> 
> Just go with the get_unaligned unconditionally.
> 
Won't this lead to sub-optimal code for ARMv7
in case the IV is aligned?

Thanks,
Horia

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-09-08 10:35     ` Horia Geantă
@ 2020-09-08 22:10       ` Herbert Xu
  2020-09-14 16:23         ` Horia Geantă
  0 siblings, 1 reply; 31+ messages in thread
From: Herbert Xu @ 2020-09-08 22:10 UTC (permalink / raw)
  To: Horia Geantă
  Cc: Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel,
	Ard Biesheuvel

On Tue, Sep 08, 2020 at 01:35:04PM +0300, Horia Geantă wrote:
>
> > Just go with the get_unaligned unconditionally.
>
> Won't this lead to sub-optimal code for ARMv7
> in case the IV is aligned?

If this should be optimised in ARMv7 then that should be done
in get_unaligned itself and not open-coded.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-09-08 22:10       ` Herbert Xu
@ 2020-09-14 16:23         ` Horia Geantă
  2020-09-14 16:28           ` Ard Biesheuvel
  0 siblings, 1 reply; 31+ messages in thread
From: Horia Geantă @ 2020-09-14 16:23 UTC (permalink / raw)
  To: Herbert Xu
  Cc: Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel,
	Ard Biesheuvel

On 9/9/2020 1:10 AM, Herbert Xu wrote:
> On Tue, Sep 08, 2020 at 01:35:04PM +0300, Horia Geantă wrote:
>>
>>> Just go with the get_unaligned unconditionally.
>>
>> Won't this lead to sub-optimal code for ARMv7
>> in case the IV is aligned?
> 
> If this should be optimised in ARMv7 then that should be done
> in get_unaligned itself and not open-coded.
> 
I am not sure what's wrong with avoiding using the unaligned accessors
in case data is aligned.

Documentation/core-api/unaligned-memory-access.rst clearly states:
These macros work for memory accesses of any length (not just 32 bits as
in the examples above). Be aware that when compared to standard access of
aligned memory, using these macros to access unaligned memory can be costly in
terms of performance.

So IMO it makes sense to use get_unaligned() only when needed.
There are several cases of users doing this, e.g. siphash.

Thanks,
Horia

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-09-14 16:23         ` Horia Geantă
@ 2020-09-14 16:28           ` Ard Biesheuvel
  2020-09-14 17:12             ` Horia Geantă
  0 siblings, 1 reply; 31+ messages in thread
From: Ard Biesheuvel @ 2020-09-14 16:28 UTC (permalink / raw)
  To: Horia Geantă
  Cc: Herbert Xu, Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel

On Mon, 14 Sep 2020 at 19:24, Horia Geantă <horia.geanta@nxp.com> wrote:
>
> On 9/9/2020 1:10 AM, Herbert Xu wrote:
> > On Tue, Sep 08, 2020 at 01:35:04PM +0300, Horia Geantă wrote:
> >>
> >>> Just go with the get_unaligned unconditionally.
> >>
> >> Won't this lead to sub-optimal code for ARMv7
> >> in case the IV is aligned?
> >
> > If this should be optimised in ARMv7 then that should be done
> > in get_unaligned itself and not open-coded.
> >
> I am not sure what's wrong with avoiding using the unaligned accessors
> in case data is aligned.
>
> Documentation/core-api/unaligned-memory-access.rst clearly states:
> These macros work for memory accesses of any length (not just 32 bits as
> in the examples above). Be aware that when compared to standard access of
> aligned memory, using these macros to access unaligned memory can be costly in
> terms of performance.
>
> So IMO it makes sense to use get_unaligned() only when needed.
> There are several cases of users doing this, e.g. siphash.
>

For ARMv7 code, using the unaligned accessors unconditionally is fine,
and it will not affect performance.

In general, when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined,
you can use the unaligned accessors. If it is not, it helps to have
different code paths.

This is a bit murky, and through the years, the interpretation of
unaligned-memory-access.rst has shifted a bit, but in this case, it
makes no sense to make the distinction.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-09-14 16:28           ` Ard Biesheuvel
@ 2020-09-14 17:12             ` Horia Geantă
  2020-09-14 18:20               ` Ard Biesheuvel
  0 siblings, 1 reply; 31+ messages in thread
From: Horia Geantă @ 2020-09-14 17:12 UTC (permalink / raw)
  To: Ard Biesheuvel
  Cc: Herbert Xu, Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel

On 9/14/2020 7:28 PM, Ard Biesheuvel wrote:
> On Mon, 14 Sep 2020 at 19:24, Horia Geantă <horia.geanta@nxp.com> wrote:
>>
>> On 9/9/2020 1:10 AM, Herbert Xu wrote:
>>> On Tue, Sep 08, 2020 at 01:35:04PM +0300, Horia Geantă wrote:
>>>>
>>>>> Just go with the get_unaligned unconditionally.
>>>>
>>>> Won't this lead to sub-optimal code for ARMv7
>>>> in case the IV is aligned?
>>>
>>> If this should be optimised in ARMv7 then that should be done
>>> in get_unaligned itself and not open-coded.
>>>
>> I am not sure what's wrong with avoiding using the unaligned accessors
>> in case data is aligned.
>>
>> Documentation/core-api/unaligned-memory-access.rst clearly states:
>> These macros work for memory accesses of any length (not just 32 bits as
>> in the examples above). Be aware that when compared to standard access of
>> aligned memory, using these macros to access unaligned memory can be costly in
>> terms of performance.
>>
>> So IMO it makes sense to use get_unaligned() only when needed.
>> There are several cases of users doing this, e.g. siphash.
>>
> 
> For ARMv7 code, using the unaligned accessors unconditionally is fine,
> and it will not affect performance.
> 
> In general, when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined,
> you can use the unaligned accessors. If it is not, it helps to have
> different code paths.
> 
arch/arm/include/asm/unaligned.h doesn't make use of
linux/unaligned/access_ok.h, even if CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
is set.

I understand the comment in the file, however using get_unaligned()
unconditionally takes away the opportunity to generate optimized code
(using ldrd/ldm) when data is aligned.

> This is a bit murky, and through the years, the interpretation of
> unaligned-memory-access.rst has shifted a bit, but in this case, it
> makes no sense to make the distinction.
> 

Thanks,
Horia

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-09-14 17:12             ` Horia Geantă
@ 2020-09-14 18:20               ` Ard Biesheuvel
  2020-09-15 10:02                 ` Horia Geantă
  0 siblings, 1 reply; 31+ messages in thread
From: Ard Biesheuvel @ 2020-09-14 18:20 UTC (permalink / raw)
  To: Horia Geantă
  Cc: Herbert Xu, Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel

On Mon, 14 Sep 2020 at 20:12, Horia Geantă <horia.geanta@nxp.com> wrote:
>
> On 9/14/2020 7:28 PM, Ard Biesheuvel wrote:
> > On Mon, 14 Sep 2020 at 19:24, Horia Geantă <horia.geanta@nxp.com> wrote:
> >>
> >> On 9/9/2020 1:10 AM, Herbert Xu wrote:
> >>> On Tue, Sep 08, 2020 at 01:35:04PM +0300, Horia Geantă wrote:
> >>>>
> >>>>> Just go with the get_unaligned unconditionally.
> >>>>
> >>>> Won't this lead to sub-optimal code for ARMv7
> >>>> in case the IV is aligned?
> >>>
> >>> If this should be optimised in ARMv7 then that should be done
> >>> in get_unaligned itself and not open-coded.
> >>>
> >> I am not sure what's wrong with avoiding using the unaligned accessors
> >> in case data is aligned.
> >>
> >> Documentation/core-api/unaligned-memory-access.rst clearly states:
> >> These macros work for memory accesses of any length (not just 32 bits as
> >> in the examples above). Be aware that when compared to standard access of
> >> aligned memory, using these macros to access unaligned memory can be costly in
> >> terms of performance.
> >>
> >> So IMO it makes sense to use get_unaligned() only when needed.
> >> There are several cases of users doing this, e.g. siphash.
> >>
> >
> > For ARMv7 code, using the unaligned accessors unconditionally is fine,
> > and it will not affect performance.
> >
> > In general, when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined,
> > you can use the unaligned accessors. If it is not, it helps to have
> > different code paths.
> >
> arch/arm/include/asm/unaligned.h doesn't make use of
> linux/unaligned/access_ok.h, even if CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
> is set.
>
> I understand the comment in the file, however using get_unaligned()
> unconditionally takes away the opportunity to generate optimized code
> (using ldrd/ldm) when data is aligned.
>

But the minimal optimization that is possible here (one ldrd/ldm
instruction vs two ldr instructions) is defeated by the fact that you
are using a conditional branch to select between the two. And this is
not even a hot path to begin with,

> > This is a bit murky, and through the years, the interpretation of
> > unaligned-memory-access.rst has shifted a bit, but in this case, it
> > makes no sense to make the distinction.
> >
>
> Thanks,
> Horia

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-09-14 18:20               ` Ard Biesheuvel
@ 2020-09-15 10:02                 ` Horia Geantă
  2020-09-15 10:26                   ` Ard Biesheuvel
  0 siblings, 1 reply; 31+ messages in thread
From: Horia Geantă @ 2020-09-15 10:02 UTC (permalink / raw)
  To: Ard Biesheuvel
  Cc: Herbert Xu, Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel

On 9/14/2020 9:20 PM, Ard Biesheuvel wrote:
> On Mon, 14 Sep 2020 at 20:12, Horia Geantă <horia.geanta@nxp.com> wrote:
>>
>> On 9/14/2020 7:28 PM, Ard Biesheuvel wrote:
>>> On Mon, 14 Sep 2020 at 19:24, Horia Geantă <horia.geanta@nxp.com> wrote:
>>>>
>>>> On 9/9/2020 1:10 AM, Herbert Xu wrote:
>>>>> On Tue, Sep 08, 2020 at 01:35:04PM +0300, Horia Geantă wrote:
>>>>>>
>>>>>>> Just go with the get_unaligned unconditionally.
>>>>>>
>>>>>> Won't this lead to sub-optimal code for ARMv7
>>>>>> in case the IV is aligned?
>>>>>
>>>>> If this should be optimised in ARMv7 then that should be done
>>>>> in get_unaligned itself and not open-coded.
>>>>>
>>>> I am not sure what's wrong with avoiding using the unaligned accessors
>>>> in case data is aligned.
>>>>
>>>> Documentation/core-api/unaligned-memory-access.rst clearly states:
>>>> These macros work for memory accesses of any length (not just 32 bits as
>>>> in the examples above). Be aware that when compared to standard access of
>>>> aligned memory, using these macros to access unaligned memory can be costly in
>>>> terms of performance.
>>>>
>>>> So IMO it makes sense to use get_unaligned() only when needed.
>>>> There are several cases of users doing this, e.g. siphash.
>>>>
>>>
>>> For ARMv7 code, using the unaligned accessors unconditionally is fine,
>>> and it will not affect performance.
>>>
>>> In general, when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined,
>>> you can use the unaligned accessors. If it is not, it helps to have
>>> different code paths.
>>>
>> arch/arm/include/asm/unaligned.h doesn't make use of
>> linux/unaligned/access_ok.h, even if CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
>> is set.
>>
>> I understand the comment in the file, however using get_unaligned()
>> unconditionally takes away the opportunity to generate optimized code
>> (using ldrd/ldm) when data is aligned.
>>
> 
> But the minimal optimization that is possible here (one ldrd/ldm
> instruction vs two ldr instructions) is defeated by the fact that you
> are using a conditional branch to select between the two. And this is
> not even a hot path to begin with,
> 
This is actually on the hot path (encrypt/decrypt callbacks),
but you're probably right that the conditional branching is going to offset
the optimized code.

To avoid branching, code could be rewritten as:

#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
	size = *(u64 *)(req->iv + (ivsize / 2));
#else
	size = get_unaligned((u64 *)(req->iv + (ivsize / 2)));
#endif

however in this case ARMv7 would suffer since
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y and
ldrd/ldm for accesses not word-aligned are inefficient - lead to traps.

Would it be ok to use:
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && !defined(CONFIG_ARM)
to workaround the ARMv7 inconsistency?

Thanks,
Horia

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-09-15 10:02                 ` Horia Geantă
@ 2020-09-15 10:26                   ` Ard Biesheuvel
  2020-09-15 12:44                     ` Horia Geantă
  0 siblings, 1 reply; 31+ messages in thread
From: Ard Biesheuvel @ 2020-09-15 10:26 UTC (permalink / raw)
  To: Horia Geantă
  Cc: Herbert Xu, Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel

On Tue, 15 Sep 2020 at 13:02, Horia Geantă <horia.geanta@nxp.com> wrote:
>
> On 9/14/2020 9:20 PM, Ard Biesheuvel wrote:
> > On Mon, 14 Sep 2020 at 20:12, Horia Geantă <horia.geanta@nxp.com> wrote:
> >>
> >> On 9/14/2020 7:28 PM, Ard Biesheuvel wrote:
> >>> On Mon, 14 Sep 2020 at 19:24, Horia Geantă <horia.geanta@nxp.com> wrote:
> >>>>
> >>>> On 9/9/2020 1:10 AM, Herbert Xu wrote:
> >>>>> On Tue, Sep 08, 2020 at 01:35:04PM +0300, Horia Geantă wrote:
> >>>>>>
> >>>>>>> Just go with the get_unaligned unconditionally.
> >>>>>>
> >>>>>> Won't this lead to sub-optimal code for ARMv7
> >>>>>> in case the IV is aligned?
> >>>>>
> >>>>> If this should be optimised in ARMv7 then that should be done
> >>>>> in get_unaligned itself and not open-coded.
> >>>>>
> >>>> I am not sure what's wrong with avoiding using the unaligned accessors
> >>>> in case data is aligned.
> >>>>
> >>>> Documentation/core-api/unaligned-memory-access.rst clearly states:
> >>>> These macros work for memory accesses of any length (not just 32 bits as
> >>>> in the examples above). Be aware that when compared to standard access of
> >>>> aligned memory, using these macros to access unaligned memory can be costly in
> >>>> terms of performance.
> >>>>
> >>>> So IMO it makes sense to use get_unaligned() only when needed.
> >>>> There are several cases of users doing this, e.g. siphash.
> >>>>
> >>>
> >>> For ARMv7 code, using the unaligned accessors unconditionally is fine,
> >>> and it will not affect performance.
> >>>
> >>> In general, when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined,
> >>> you can use the unaligned accessors. If it is not, it helps to have
> >>> different code paths.
> >>>
> >> arch/arm/include/asm/unaligned.h doesn't make use of
> >> linux/unaligned/access_ok.h, even if CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
> >> is set.
> >>
> >> I understand the comment in the file, however using get_unaligned()
> >> unconditionally takes away the opportunity to generate optimized code
> >> (using ldrd/ldm) when data is aligned.
> >>
> >
> > But the minimal optimization that is possible here (one ldrd/ldm
> > instruction vs two ldr instructions) is defeated by the fact that you
> > are using a conditional branch to select between the two. And this is
> > not even a hot path to begin with,
> >
> This is actually on the hot path (encrypt/decrypt callbacks),
> but you're probably right that the conditional branching is going to offset
> the optimized code.
>

This is called once per XTS request, right? And you are saying the
extra cycle makes a difference?

> To avoid branching, code could be rewritten as:
>
> #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
>         size = *(u64 *)(req->iv + (ivsize / 2));
> #else
>         size = get_unaligned((u64 *)(req->iv + (ivsize / 2)));
> #endif
>
> however in this case ARMv7 would suffer since
> CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y and
> ldrd/ldm for accesses not word-aligned are inefficient - lead to traps.
>

CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS means 'just use the unaligned
accessors as they are basically free'. Casting a potentially
misaligned u8* to a u64* is not permitted by the C standard.

> Would it be ok to use:
> #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && !defined(CONFIG_ARM)
> to workaround the ARMv7 inconsistency?
>

No, please just use the get_unaligned() accessor.

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-09-15 10:26                   ` Ard Biesheuvel
@ 2020-09-15 12:44                     ` Horia Geantă
  2020-09-15 12:51                       ` Ard Biesheuvel
  0 siblings, 1 reply; 31+ messages in thread
From: Horia Geantă @ 2020-09-15 12:44 UTC (permalink / raw)
  To: Ard Biesheuvel
  Cc: Herbert Xu, Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel

On 9/15/2020 1:26 PM, Ard Biesheuvel wrote:
> On Tue, 15 Sep 2020 at 13:02, Horia Geantă <horia.geanta@nxp.com> wrote:
>>
>> On 9/14/2020 9:20 PM, Ard Biesheuvel wrote:
>>> On Mon, 14 Sep 2020 at 20:12, Horia Geantă <horia.geanta@nxp.com> wrote:
>>>>
>>>> On 9/14/2020 7:28 PM, Ard Biesheuvel wrote:
>>>>> On Mon, 14 Sep 2020 at 19:24, Horia Geantă <horia.geanta@nxp.com> wrote:
>>>>>>
>>>>>> On 9/9/2020 1:10 AM, Herbert Xu wrote:
>>>>>>> On Tue, Sep 08, 2020 at 01:35:04PM +0300, Horia Geantă wrote:
>>>>>>>>
>>>>>>>>> Just go with the get_unaligned unconditionally.
>>>>>>>>
>>>>>>>> Won't this lead to sub-optimal code for ARMv7
>>>>>>>> in case the IV is aligned?
>>>>>>>
>>>>>>> If this should be optimised in ARMv7 then that should be done
>>>>>>> in get_unaligned itself and not open-coded.
>>>>>>>
>>>>>> I am not sure what's wrong with avoiding using the unaligned accessors
>>>>>> in case data is aligned.
>>>>>>
>>>>>> Documentation/core-api/unaligned-memory-access.rst clearly states:
>>>>>> These macros work for memory accesses of any length (not just 32 bits as
>>>>>> in the examples above). Be aware that when compared to standard access of
>>>>>> aligned memory, using these macros to access unaligned memory can be costly in
>>>>>> terms of performance.
>>>>>>
>>>>>> So IMO it makes sense to use get_unaligned() only when needed.
>>>>>> There are several cases of users doing this, e.g. siphash.
>>>>>>
>>>>>
>>>>> For ARMv7 code, using the unaligned accessors unconditionally is fine,
>>>>> and it will not affect performance.
>>>>>
>>>>> In general, when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined,
>>>>> you can use the unaligned accessors. If it is not, it helps to have
>>>>> different code paths.
>>>>>
>>>> arch/arm/include/asm/unaligned.h doesn't make use of
>>>> linux/unaligned/access_ok.h, even if CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
>>>> is set.
>>>>
>>>> I understand the comment in the file, however using get_unaligned()
>>>> unconditionally takes away the opportunity to generate optimized code
>>>> (using ldrd/ldm) when data is aligned.
>>>>
>>>
>>> But the minimal optimization that is possible here (one ldrd/ldm
>>> instruction vs two ldr instructions) is defeated by the fact that you
>>> are using a conditional branch to select between the two. And this is
>>> not even a hot path to begin with,
>>>
>> This is actually on the hot path (encrypt/decrypt callbacks),
>> but you're probably right that the conditional branching is going to offset
>> the optimized code.
>>
> 
> This is called once per XTS request, right? And you are saying the
> extra cycle makes a difference?
> 
Yes, once per request and no, not super-important.

>> To avoid branching, code could be rewritten as:
>>
>> #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
>>         size = *(u64 *)(req->iv + (ivsize / 2));
>> #else
>>         size = get_unaligned((u64 *)(req->iv + (ivsize / 2)));
>> #endif
>>
>> however in this case ARMv7 would suffer since
>> CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y and
>> ldrd/ldm for accesses not word-aligned are inefficient - lead to traps.
>>
> 
> CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS means 'just use the unaligned
> accessors as they are basically free'. Casting a potentially
> misaligned u8* to a u64* is not permitted by the C standard.
> 
Seems that I misunderstood CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.

Looking at its usage, e.g. ether_addr_equal() or __crypto_memneq_*(),
I see similar casts of pointers possibly misaligned.

>> Would it be ok to use:
>> #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && !defined(CONFIG_ARM)
>> to workaround the ARMv7 inconsistency?
>>
> 
> No, please just use the get_unaligned() accessor.
> 
Ok.

Thanks,
Horia

^ permalink raw reply	[flat|nested] 31+ messages in thread

* Re: [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV
  2020-09-15 12:44                     ` Horia Geantă
@ 2020-09-15 12:51                       ` Ard Biesheuvel
  0 siblings, 0 replies; 31+ messages in thread
From: Ard Biesheuvel @ 2020-09-15 12:51 UTC (permalink / raw)
  To: Horia Geantă
  Cc: Herbert Xu, Andrei Botila (OSS),
	Aymen Sghaier, David S. Miller, linux-crypto, linux-kernel

On Tue, 15 Sep 2020 at 15:45, Horia Geantă <horia.geanta@nxp.com> wrote:
>
> On 9/15/2020 1:26 PM, Ard Biesheuvel wrote:
> > On Tue, 15 Sep 2020 at 13:02, Horia Geantă <horia.geanta@nxp.com> wrote:
> >>
> >> On 9/14/2020 9:20 PM, Ard Biesheuvel wrote:
> >>> On Mon, 14 Sep 2020 at 20:12, Horia Geantă <horia.geanta@nxp.com> wrote:
> >>>>
> >>>> On 9/14/2020 7:28 PM, Ard Biesheuvel wrote:
> >>>>> On Mon, 14 Sep 2020 at 19:24, Horia Geantă <horia.geanta@nxp.com> wrote:
> >>>>>>
> >>>>>> On 9/9/2020 1:10 AM, Herbert Xu wrote:
> >>>>>>> On Tue, Sep 08, 2020 at 01:35:04PM +0300, Horia Geantă wrote:
> >>>>>>>>
> >>>>>>>>> Just go with the get_unaligned unconditionally.
> >>>>>>>>
> >>>>>>>> Won't this lead to sub-optimal code for ARMv7
> >>>>>>>> in case the IV is aligned?
> >>>>>>>
> >>>>>>> If this should be optimised in ARMv7 then that should be done
> >>>>>>> in get_unaligned itself and not open-coded.
> >>>>>>>
> >>>>>> I am not sure what's wrong with avoiding using the unaligned accessors
> >>>>>> in case data is aligned.
> >>>>>>
> >>>>>> Documentation/core-api/unaligned-memory-access.rst clearly states:
> >>>>>> These macros work for memory accesses of any length (not just 32 bits as
> >>>>>> in the examples above). Be aware that when compared to standard access of
> >>>>>> aligned memory, using these macros to access unaligned memory can be costly in
> >>>>>> terms of performance.
> >>>>>>
> >>>>>> So IMO it makes sense to use get_unaligned() only when needed.
> >>>>>> There are several cases of users doing this, e.g. siphash.
> >>>>>>
> >>>>>
> >>>>> For ARMv7 code, using the unaligned accessors unconditionally is fine,
> >>>>> and it will not affect performance.
> >>>>>
> >>>>> In general, when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is defined,
> >>>>> you can use the unaligned accessors. If it is not, it helps to have
> >>>>> different code paths.
> >>>>>
> >>>> arch/arm/include/asm/unaligned.h doesn't make use of
> >>>> linux/unaligned/access_ok.h, even if CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
> >>>> is set.
> >>>>
> >>>> I understand the comment in the file, however using get_unaligned()
> >>>> unconditionally takes away the opportunity to generate optimized code
> >>>> (using ldrd/ldm) when data is aligned.
> >>>>
> >>>
> >>> But the minimal optimization that is possible here (one ldrd/ldm
> >>> instruction vs two ldr instructions) is defeated by the fact that you
> >>> are using a conditional branch to select between the two. And this is
> >>> not even a hot path to begin with,
> >>>
> >> This is actually on the hot path (encrypt/decrypt callbacks),
> >> but you're probably right that the conditional branching is going to offset
> >> the optimized code.
> >>
> >
> > This is called once per XTS request, right? And you are saying the
> > extra cycle makes a difference?
> >
> Yes, once per request and no, not super-important.
>
> >> To avoid branching, code could be rewritten as:
> >>
> >> #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
> >>         size = *(u64 *)(req->iv + (ivsize / 2));
> >> #else
> >>         size = get_unaligned((u64 *)(req->iv + (ivsize / 2)));
> >> #endif
> >>
> >> however in this case ARMv7 would suffer since
> >> CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y and
> >> ldrd/ldm for accesses not word-aligned are inefficient - lead to traps.
> >>
> >
> > CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS means 'just use the unaligned
> > accessors as they are basically free'. Casting a potentially
> > misaligned u8* to a u64* is not permitted by the C standard.
> >
> Seems that I misunderstood CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
>

You're not the only one :-) I have been intending to get the
discussion going with the networking folks, who rely heavily on this
as well.

> Looking at its usage, e.g. ether_addr_equal() or __crypto_memneq_*(),
> I see similar casts of pointers possibly misaligned.
>

Yes, that is the confusion. CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
should indicate whether using the unaligned accessors is fine in all
cases, or whether you should find other ways to load the data more
efficiently (compare NET_IP_ALIGN, which shifts the entire IP header
so the 32-bit address field appear aligned in memory)

CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS does *not* mean that you can
simply cast any pointer to any type and dereference it, but the
meaning appears to have shifted this way over the years (and the
Documentation/ was even updated to this effect)

Pre-v6 ARM (and MIPS as well, IIRC) require byte sized accesses and
shift/or sequences to do unaligned accesses, whereas v6 and up simply
allows ldr from a misaligned address. So in the former case, you could
use cra_alignmask to align the data in memory, while the latter case
can ignore it. (arch/arm/crypto/aes-cipher-glue.c uses this as well)

> >> Would it be ok to use:
> >> #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && !defined(CONFIG_ARM)
> >> to workaround the ARMv7 inconsistency?
> >>
> >
> > No, please just use the get_unaligned() accessor.
> >
> Ok.
>
> Thanks,
> Horia

^ permalink raw reply	[flat|nested] 31+ messages in thread

end of thread, other threads:[~2020-09-15 12:59 UTC | newest]

Thread overview: 31+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-06 16:35 [PATCH RESEND 0/9] crypto: caam - xts(aes) updates Andrei Botila
2020-08-06 16:35 ` [PATCH RESEND 1/9] crypto: caam/jr - add fallback for XTS with more than 8B IV Andrei Botila
2020-08-11 14:30   ` Horia Geantă
2020-08-21  3:47     ` Herbert Xu
2020-08-21 12:02       ` Horia Geantă
2020-08-19 23:56   ` Sasha Levin
2020-08-21  3:46   ` Herbert Xu
2020-09-08 10:35     ` Horia Geantă
2020-09-08 22:10       ` Herbert Xu
2020-09-14 16:23         ` Horia Geantă
2020-09-14 16:28           ` Ard Biesheuvel
2020-09-14 17:12             ` Horia Geantă
2020-09-14 18:20               ` Ard Biesheuvel
2020-09-15 10:02                 ` Horia Geantă
2020-09-15 10:26                   ` Ard Biesheuvel
2020-09-15 12:44                     ` Horia Geantă
2020-09-15 12:51                       ` Ard Biesheuvel
2020-08-06 16:35 ` [PATCH RESEND 2/9] crypto: caam/qi " Andrei Botila
2020-08-19 23:56   ` Sasha Levin
2020-08-06 16:35 ` [PATCH RESEND 3/9] crypto: caam/qi2 " Andrei Botila
2020-08-19 23:56   ` Sasha Levin
2020-08-06 16:35 ` [PATCH RESEND 4/9] crypto: caam/jr - add support for more XTS key lengths Andrei Botila
2020-08-11 14:36   ` Horia Geantă
2020-08-19 23:56   ` Sasha Levin
2020-08-06 16:35 ` [PATCH RESEND 5/9] crypto: caam/qi " Andrei Botila
2020-08-19 23:56   ` Sasha Levin
2020-08-06 16:35 ` [PATCH RESEND 6/9] crypto: caam/qi2 " Andrei Botila
2020-08-19 23:56   ` Sasha Levin
2020-08-06 16:35 ` [PATCH RESEND 7/9] crypto: caam/jr - add support for XTS with 16B IV Andrei Botila
2020-08-06 16:35 ` [PATCH RESEND 8/9] crypto: caam/qi " Andrei Botila
2020-08-06 16:35 ` [PATCH RESEND 9/9] crypto: caam/qi2 " Andrei Botila

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).