linux-crypto.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 01/15] crypto: skcipher - Add tailsize attribute
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
@ 2023-12-02  4:55 ` Herbert Xu
  2024-02-14 23:44   ` Eric Biggers
  2023-12-02  5:42 ` [PATCH 02/15] crypto: algif_skcipher - Add support for tailsize Herbert Xu
                   ` (14 subsequent siblings)
  15 siblings, 1 reply; 30+ messages in thread
From: Herbert Xu @ 2023-12-02  4:55 UTC (permalink / raw)
  To: Linux Crypto Mailing List

This patch adds a new tailsize attribute to skcipher and lskcipher
algorithms.  This will be used by algorithms such as CTS which may
need to withhold a number of blocks until the end has been reached.

When issuing a NOTFINAL request, the user must ensure that at least
tailsize bytes will be supplied later on a final request.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/lskcipher.c                 |  1 +
 crypto/skcipher.c                  | 16 ++++++++++++++-
 include/crypto/internal/skcipher.h |  1 +
 include/crypto/skcipher.h          | 33 ++++++++++++++++++++++++++++++
 4 files changed, 50 insertions(+), 1 deletion(-)

diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index 0b6dd8aa21f2..2a602911f4fc 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -300,6 +300,7 @@ static void __maybe_unused crypto_lskcipher_show(
 	seq_printf(m, "ivsize       : %u\n", skcipher->co.ivsize);
 	seq_printf(m, "chunksize    : %u\n", skcipher->co.chunksize);
 	seq_printf(m, "statesize    : %u\n", skcipher->co.statesize);
+	seq_printf(m, "tailsize     : %u\n", skcipher->co.tailsize);
 }
 
 static int __maybe_unused crypto_lskcipher_report(
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index bc70e159d27d..600ec5735ce0 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -368,10 +368,21 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
 			 SKCIPHER_WALK_DIFF);
 
 	n = walk->total;
-	bsize = min(walk->stride, max(n, walk->blocksize));
+
+	bsize = max(n, walk->blocksize);
+	if (n > walk->tailsize)
+		bsize = min(walk->stride, bsize);
+
 	n = scatterwalk_clamp(&walk->in, n);
 	n = scatterwalk_clamp(&walk->out, n);
 
+	/* Retain tail if necessary. */
+	if (walk->tailsize < walk->total && walk->total - n < walk->tailsize) {
+		/* Process at least one block. */
+		n = min(n, bsize);
+		n = max(n, walk->total - walk->tailsize);
+	}
+
 	if (unlikely(n < bsize)) {
 		if (unlikely(walk->total < walk->blocksize))
 			return skcipher_walk_done(walk, -EINVAL);
@@ -487,6 +498,7 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
 	walk->blocksize = crypto_skcipher_blocksize(tfm);
 	walk->ivsize = crypto_skcipher_ivsize(tfm);
 	walk->alignmask = crypto_skcipher_alignmask(tfm);
+	walk->tailsize = crypto_skcipher_tailsize(tfm);
 
 	if (alg->co.base.cra_type != &crypto_skcipher_type)
 		walk->stride = alg->co.chunksize;
@@ -824,6 +836,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 	seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
 	seq_printf(m, "walksize     : %u\n", skcipher->walksize);
 	seq_printf(m, "statesize    : %u\n", skcipher->statesize);
+	seq_printf(m, "tailsize     : %u\n", skcipher->tailsize);
 }
 
 static int __maybe_unused crypto_skcipher_report(
@@ -939,6 +952,7 @@ int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
 	struct crypto_alg *base = &alg->base;
 
 	if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
+	    alg->tailsize > PAGE_SIZE / 8 ||
 	    alg->statesize > PAGE_SIZE / 2 ||
 	    (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
 		return -EINVAL;
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 7ae42afdcf3e..1e35e7719b22 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -86,6 +86,7 @@ struct skcipher_walk {
 	int flags;
 	unsigned int blocksize;
 	unsigned int stride;
+	unsigned int tailsize;
 	unsigned int alignmask;
 };
 
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index c8857d7bdb37..6223d81fed76 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -103,6 +103,8 @@ struct crypto_istat_cipher {
  * @chunksize: Equal to the block size except for stream ciphers such as
  *	       CTR where it is set to the underlying block size.
  * @statesize: Size of the internal state for the algorithm.
+ * @tailsize: Minimum number of bytes to withhold until the end of operation.
+ *	      Used by algorithms such as CTS to support chaining.
  * @stat: Statistics for cipher algorithm
  * @base: Definition of a generic crypto algorithm.
  */
@@ -112,6 +114,7 @@ struct crypto_istat_cipher {
 	unsigned int ivsize;		\
 	unsigned int chunksize;		\
 	unsigned int statesize;		\
+	unsigned int tailsize;		\
 					\
 	SKCIPHER_ALG_COMMON_STAT	\
 					\
@@ -543,6 +546,36 @@ static inline unsigned int crypto_lskcipher_statesize(
 	return crypto_lskcipher_alg(tfm)->co.statesize;
 }
 
+/**
+ * crypto_skcipher_tailsize() - obtain tail size
+ * @tfm: cipher handle
+ *
+ * Some algorithms need to withhold a number of blocks until the end.
+ * The tail size specifies how many bytes to withhold.
+ *
+ * Return: tail size in bytes
+ */
+static inline unsigned int crypto_skcipher_tailsize(
+	struct crypto_skcipher *tfm)
+{
+	return crypto_skcipher_alg_common(tfm)->tailsize;
+}
+
+/**
+ * crypto_lskcipher_tailsize() - obtain tail size
+ * @tfm: cipher handle
+ *
+ * Some algorithms need to withhold a number of blocks until the end.
+ * The tail size specifies how many bytes to withhold.
+ *
+ * Return: tail size in bytes
+ */
+static inline unsigned int crypto_lskcipher_tailsize(
+	struct crypto_lskcipher *tfm)
+{
+	return crypto_lskcipher_alg(tfm)->co.tailsize;
+}
+
 static inline unsigned int crypto_sync_skcipher_blocksize(
 	struct crypto_sync_skcipher *tfm)
 {
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 02/15] crypto: algif_skcipher - Add support for tailsize
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
  2023-12-02  4:55 ` [PATCH 01/15] crypto: skcipher - Add tailsize attribute Herbert Xu
@ 2023-12-02  5:42 ` Herbert Xu
  2023-12-04 10:24 ` [PATCH 04/15] crypto: xts - Convert from skcipher to lskcipher Herbert Xu
                   ` (13 subsequent siblings)
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2023-12-02  5:42 UTC (permalink / raw)
  To: Linux Crypto Mailing List

This patch makes use of the new tailsize attribute so that algorithms
such as CTS can be supported properly when a request it too large to
be processed in one go.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/algif_skcipher.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 02cea2149504..e22516c3d285 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -103,13 +103,14 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
 	struct af_alg_ctx *ctx = ask->private;
 	struct crypto_skcipher *tfm = pask->private;
 	unsigned int bs = crypto_skcipher_chunksize(tfm);
+	unsigned int ts = crypto_skcipher_tailsize(tfm);
 	struct af_alg_async_req *areq;
 	unsigned cflags = 0;
 	int err = 0;
 	size_t len = 0;
 
-	if (!ctx->init || (ctx->more && ctx->used < bs)) {
-		err = af_alg_wait_for_data(sk, flags, bs);
+	if (!ctx->init || (ctx->more && ctx->used < bs + ts)) {
+		err = af_alg_wait_for_data(sk, flags, bs + ts);
 		if (err)
 			return err;
 	}
@@ -130,6 +131,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
 	 * full block size buffers.
 	 */
 	if (ctx->more || len < ctx->used) {
+		if (ctx->more && ctx->used - ts < len)
+			len = ctx->used - ts;
 		len -= len % bs;
 		cflags |= CRYPTO_SKCIPHER_REQ_NOTFINAL;
 	}
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 04/15] crypto: xts - Convert from skcipher to lskcipher
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
  2023-12-02  4:55 ` [PATCH 01/15] crypto: skcipher - Add tailsize attribute Herbert Xu
  2023-12-02  5:42 ` [PATCH 02/15] crypto: algif_skcipher - Add support for tailsize Herbert Xu
@ 2023-12-04 10:24 ` Herbert Xu
  2023-12-05  6:09 ` [PATCH 05/15] crypto: skcipher - Add twopass attribute Herbert Xu
                   ` (12 subsequent siblings)
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2023-12-04 10:24 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Replace skcipher implementation with lskcipher.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/xts.c         | 572 +++++++++++++++++--------------------------
 include/crypto/xts.h |  24 +-
 2 files changed, 244 insertions(+), 352 deletions(-)

diff --git a/crypto/xts.c b/crypto/xts.c
index 672e1a3f0b0c..4a7b1c75bd14 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -7,23 +7,21 @@
  * Based on ecb.c
  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  */
-#include <crypto/internal/cipher.h>
+
+#include <crypto/b128ops.h>
+#include <crypto/gf128mul.h>
 #include <crypto/internal/skcipher.h>
-#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/scatterlist.h>
 #include <linux/slab.h>
-
-#include <crypto/xts.h>
-#include <crypto/b128ops.h>
-#include <crypto/gf128mul.h>
+#include <linux/string.h>
 
 struct xts_tfm_ctx {
-	struct crypto_skcipher *child;
-	struct crypto_cipher *tweak;
+	struct crypto_lskcipher *child;
+	struct crypto_lskcipher *tweak;
 };
 
 struct xts_instance_ctx {
@@ -31,26 +29,21 @@ struct xts_instance_ctx {
 	struct crypto_cipher_spawn tweak_spawn;
 };
 
-struct xts_request_ctx {
-	le128 t;
-	struct scatterlist *tail;
-	struct scatterlist sg[2];
-	struct skcipher_request subreq;
-};
-
-static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
+static int xts_setkey(struct crypto_lskcipher *parent, const u8 *key,
 		      unsigned int keylen)
 {
-	struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
-	struct crypto_skcipher *child;
-	struct crypto_cipher *tweak;
+	struct xts_tfm_ctx *ctx = crypto_lskcipher_ctx(parent);
+	struct crypto_lskcipher *child;
+	struct crypto_lskcipher *tweak;
+	unsigned flags;
 	int err;
 
-	err = xts_verify_key(parent, key, keylen);
+	err = xts_verify_key_lskcipher(parent, key, keylen);
 	if (err)
 		return err;
 
 	keylen /= 2;
+	flags = crypto_lskcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK;
 
 	/* we need two cipher instances: one to compute the initial 'tweak'
 	 * by encrypting the IV (usually the 'plain' iv) and the other
@@ -58,19 +51,17 @@ static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
 
 	/* tweak cipher, uses Key2 i.e. the second half of *key */
 	tweak = ctx->tweak;
-	crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
-				       CRYPTO_TFM_REQ_MASK);
-	err = crypto_cipher_setkey(tweak, key + keylen, keylen);
+	crypto_lskcipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
+	crypto_lskcipher_set_flags(tweak, flags);
+	err = crypto_lskcipher_setkey(tweak, key + keylen, keylen);
 	if (err)
 		return err;
 
 	/* data cipher, uses Key1 i.e. the first half of *key */
 	child = ctx->child;
-	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
-					 CRYPTO_TFM_REQ_MASK);
-	return crypto_skcipher_setkey(child, key, keylen);
+	crypto_lskcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_lskcipher_set_flags(child, flags);
+	return crypto_lskcipher_setkey(child, key, keylen);
 }
 
 /*
@@ -79,359 +70,247 @@ static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
  * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
  * just doing the gf128mul_x_ble() calls again.
  */
-static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
-			 bool enc)
+static int xts_xor_tweak(struct crypto_lskcipher *tfm,
+			 const u8 *src, u8 *dst, unsigned len,
+			 le128 *t0, u32 flags, bool second_pass, bool enc)
 {
-	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
+	const bool cts = (flags & CRYPTO_LSKCIPHER_FLAG_FINAL) &&
+			 len % XTS_BLOCK_SIZE;
 	const int bs = XTS_BLOCK_SIZE;
-	struct skcipher_walk w;
-	le128 t = rctx->t;
+	unsigned int avail = len;
+	const le128 *wsrc;
+	le128 t = *t0;
+	le128 *wdst;
+
+	wsrc = (const le128 *)src;
+	wdst = (le128 *)dst;
+
+	do {
+		if (unlikely(cts) && avail < 2 * XTS_BLOCK_SIZE) {
+			if (!enc) {
+				if (second_pass)
+					*t0 = t;
+				gf128mul_x_ble(&t, &t);
+			}
+			le128_xor(wdst, &t, wsrc);
+			if (enc && second_pass)
+				gf128mul_x_ble(t0, &t);
+			return 0;
+		}
+
+		le128_xor(wdst++, &t, wsrc++);
+		gf128mul_x_ble(&t, &t);
+	} while ((avail -= bs) >= bs);
+
+	if (second_pass && !(flags & CRYPTO_LSKCIPHER_FLAG_FINAL))
+		*t0 = t;
+
+	return 0;
+}
+
+static int xts_xor_tweak_pre(struct crypto_lskcipher *tfm,
+			     const u8 *src, u8 *dst, unsigned len,
+			     le128 *t, u32 flags, bool enc)
+{
+	return xts_xor_tweak(tfm, src, dst, len, t, flags, false, enc);
+}
+
+static int xts_xor_tweak_post(struct crypto_lskcipher *tfm,
+			      const u8 *src, u8 *dst, unsigned len,
+			      le128 *t, u32 flags, bool enc)
+{
+	return xts_xor_tweak(tfm, src, dst, len, t, flags, true, enc);
+}
+
+static int xts_cts_pre(struct crypto_lskcipher *tfm, const u8 *src, u8 *dst,
+		       unsigned len, le128 *t0)
+{
+	int offset = (len & ~(XTS_BLOCK_SIZE - 1)) - XTS_BLOCK_SIZE;
+	int tail = len % XTS_BLOCK_SIZE;
+	le128 b[2];
+
+	b[0] = *(le128 *)(dst + offset);
+	b[1] = b[0];
+	memcpy(b, src + offset + XTS_BLOCK_SIZE, tail);
+
+	le128_xor(b, t0, b);
+
+	memcpy(dst + offset, b, XTS_BLOCK_SIZE + tail);
+
+	return 0;
+}
+
+static int xts_cts_post(struct crypto_lskcipher *tfm, u8 *dst,
+			unsigned len, le128 *t0)
+{
+	int offset = (len & ~(XTS_BLOCK_SIZE - 1)) - XTS_BLOCK_SIZE;
+	le128 *b = (le128 *)(dst + offset);
+
+	le128_xor(b, t0, b);
+
+	return 0;
+}
+
+static int xts_init_crypt(struct crypto_lskcipher *tfm, unsigned len, u8 *iv,
+			  u32 flags)
+{
+	const struct xts_tfm_ctx *ctx = crypto_lskcipher_ctx(tfm);
+
+	if (!len)
+		return -EINVAL;
+
+	if ((flags & CRYPTO_LSKCIPHER_FLAG_CONT))
+		return 0;
+
+	if (len < XTS_BLOCK_SIZE)
+		return -EINVAL;
+
+	/* calculate first value of T */
+	return crypto_lskcipher_encrypt(ctx->tweak, iv, iv,
+					XTS_BLOCK_SIZE, NULL);
+}
+
+static int xts_encrypt(struct crypto_lskcipher *tfm, const u8 *src, u8 *dst,
+		       unsigned len, u8 *iv, u32 flags)
+{
+	struct xts_tfm_ctx *ctx = crypto_lskcipher_ctx(tfm);
+	union {
+		le128 t128;
+		u8 t8[16];
+	} t = {
+		.t128 = *(le128 *)iv,
+	};
 	int err;
 
-	if (second_pass) {
-		req = &rctx->subreq;
-		/* set to our TFM to enforce correct alignment: */
-		skcipher_request_set_tfm(req, tfm);
+	err = xts_init_crypt(tfm, len, t.t8, flags) ?:
+	      xts_xor_tweak_pre(tfm, src, dst, len, &t.t128, flags, true) ?:
+	      crypto_lskcipher_encrypt(ctx->child, dst, dst,
+				       len & ~(XTS_BLOCK_SIZE - 1), NULL) ?:
+	      xts_xor_tweak_post(tfm, dst, dst, len, &t.t128, flags, true);
+
+	if (!err && unlikely(len % XTS_BLOCK_SIZE)) {
+		if ((flags & CRYPTO_LSKCIPHER_FLAG_FINAL)) {
+			int offset = (len & ~(XTS_BLOCK_SIZE - 1)) -
+				     XTS_BLOCK_SIZE;
+
+			err = xts_cts_pre(tfm, src, dst, len, &t.t128) ?:
+			      crypto_lskcipher_encrypt(ctx->child,
+						       dst + offset,
+						       dst + offset,
+						       XTS_BLOCK_SIZE,
+						       NULL) ?:
+			      xts_cts_post(tfm, dst, len, &t.t128);
+		} else
+			err = len % XTS_BLOCK_SIZE;
 	}
-	err = skcipher_walk_virt(&w, req, false);
 
-	while (w.nbytes) {
-		unsigned int avail = w.nbytes;
-		le128 *wsrc;
-		le128 *wdst;
-
-		wsrc = w.src.virt.addr;
-		wdst = w.dst.virt.addr;
-
-		do {
-			if (unlikely(cts) &&
-			    w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
-				if (!enc) {
-					if (second_pass)
-						rctx->t = t;
-					gf128mul_x_ble(&t, &t);
-				}
-				le128_xor(wdst, &t, wsrc);
-				if (enc && second_pass)
-					gf128mul_x_ble(&rctx->t, &t);
-				skcipher_walk_done(&w, avail - bs);
-				return 0;
-			}
-
-			le128_xor(wdst++, &t, wsrc++);
-			gf128mul_x_ble(&t, &t);
-		} while ((avail -= bs) >= bs);
-
-		err = skcipher_walk_done(&w, avail);
-	}
+	if (err < 0 || (flags & CRYPTO_LSKCIPHER_FLAG_FINAL))
+		memzero_explicit(&t, sizeof(t));
+	*(le128 *)iv = t.t128;
 
 	return err;
 }
 
-static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
+static int xts_decrypt(struct crypto_lskcipher *tfm, const u8 *src, u8 *dst,
+		       unsigned len, u8 *iv, u32 flags)
 {
-	return xts_xor_tweak(req, false, enc);
-}
-
-static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
-{
-	return xts_xor_tweak(req, true, enc);
-}
-
-static void xts_cts_done(void *data, int err)
-{
-	struct skcipher_request *req = data;
-	le128 b;
-
-	if (!err) {
-		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
-
-		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
-		le128_xor(&b, &rctx->t, &b);
-		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
-	}
-
-	skcipher_request_complete(req, err);
-}
-
-static int xts_cts_final(struct skcipher_request *req,
-			 int (*crypt)(struct skcipher_request *req))
-{
-	const struct xts_tfm_ctx *ctx =
-		crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
-	int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
-	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
-	struct skcipher_request *subreq = &rctx->subreq;
-	int tail = req->cryptlen % XTS_BLOCK_SIZE;
-	le128 b[2];
+	struct xts_tfm_ctx *ctx = crypto_lskcipher_ctx(tfm);
+	union {
+		le128 t128;
+		u8 t8[16];
+	} t = {
+		.t128 = *(le128 *)iv,
+	};
 	int err;
 
-	rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
-				      offset - XTS_BLOCK_SIZE);
+	err = xts_init_crypt(tfm, len, t.t8, flags) ?:
+	      xts_xor_tweak_pre(tfm, src, dst, len, &t.t128, flags, false) ?:
+	      crypto_lskcipher_decrypt(ctx->child, dst, dst,
+				       len & ~(XTS_BLOCK_SIZE - 1), NULL) ?:
+	      xts_xor_tweak_post(tfm, dst, dst, len, &t.t128, flags, false);
 
-	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
-	b[1] = b[0];
-	scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
+	if (!err && unlikely(len % XTS_BLOCK_SIZE)) {
+		if ((flags & CRYPTO_LSKCIPHER_FLAG_FINAL)) {
+			int offset = (len & ~(XTS_BLOCK_SIZE - 1)) -
+				     XTS_BLOCK_SIZE;
 
-	le128_xor(b, &rctx->t, b);
+			err = xts_cts_pre(tfm, src, dst, len, &t.t128) ?:
+			      crypto_lskcipher_decrypt(ctx->child,
+						       dst + offset,
+						       dst + offset,
+						       XTS_BLOCK_SIZE,
+						       NULL) ?:
+			      xts_cts_post(tfm, dst, len, &t.t128);
+		} else
+			err = len % XTS_BLOCK_SIZE;
+	}
 
-	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
+	if (err < 0 || (flags & CRYPTO_LSKCIPHER_FLAG_FINAL))
+		memzero_explicit(&t, sizeof(t));
+	*(le128 *)iv = t.t128;
 
-	skcipher_request_set_tfm(subreq, ctx->child);
-	skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
-				      req);
-	skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
-				   XTS_BLOCK_SIZE, NULL);
+	return err;
+}
 
-	err = crypt(subreq);
-	if (err)
-		return err;
+static int xts_init_tfm(struct crypto_lskcipher *tfm)
+{
+	struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
+	struct xts_tfm_ctx *ctx = crypto_lskcipher_ctx(tfm);
+	struct crypto_lskcipher_spawn *spawn;
+	struct crypto_lskcipher *cipher;
 
-	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
-	le128_xor(b, &rctx->t, b);
-	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
+	spawn = lskcipher_instance_ctx(inst);
+	cipher = crypto_spawn_lskcipher(spawn);
+	if (IS_ERR(cipher))
+		return PTR_ERR(cipher);
+
+	ctx->child = cipher;
+
+	cipher = crypto_spawn_lskcipher(spawn);
+	if (IS_ERR(cipher)) {
+		crypto_free_lskcipher(ctx->child);
+		return PTR_ERR(cipher);
+	}
+
+	ctx->tweak = cipher;
 
 	return 0;
 }
 
-static void xts_encrypt_done(void *data, int err)
+static void xts_exit_tfm(struct crypto_lskcipher *tfm)
 {
-	struct skcipher_request *req = data;
+	struct xts_tfm_ctx *ctx = crypto_lskcipher_ctx(tfm);
 
-	if (!err) {
-		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
-
-		rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
-		err = xts_xor_tweak_post(req, true);
-
-		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
-			err = xts_cts_final(req, crypto_skcipher_encrypt);
-			if (err == -EINPROGRESS || err == -EBUSY)
-				return;
-		}
-	}
-
-	skcipher_request_complete(req, err);
-}
-
-static void xts_decrypt_done(void *data, int err)
-{
-	struct skcipher_request *req = data;
-
-	if (!err) {
-		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
-
-		rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
-		err = xts_xor_tweak_post(req, false);
-
-		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
-			err = xts_cts_final(req, crypto_skcipher_decrypt);
-			if (err == -EINPROGRESS || err == -EBUSY)
-				return;
-		}
-	}
-
-	skcipher_request_complete(req, err);
-}
-
-static int xts_init_crypt(struct skcipher_request *req,
-			  crypto_completion_t compl)
-{
-	const struct xts_tfm_ctx *ctx =
-		crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
-	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
-	struct skcipher_request *subreq = &rctx->subreq;
-
-	if (req->cryptlen < XTS_BLOCK_SIZE)
-		return -EINVAL;
-
-	skcipher_request_set_tfm(subreq, ctx->child);
-	skcipher_request_set_callback(subreq, req->base.flags, compl, req);
-	skcipher_request_set_crypt(subreq, req->dst, req->dst,
-				   req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
-
-	/* calculate first value of T */
-	crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
-
-	return 0;
-}
-
-static int xts_encrypt(struct skcipher_request *req)
-{
-	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
-	struct skcipher_request *subreq = &rctx->subreq;
-	int err;
-
-	err = xts_init_crypt(req, xts_encrypt_done) ?:
-	      xts_xor_tweak_pre(req, true) ?:
-	      crypto_skcipher_encrypt(subreq) ?:
-	      xts_xor_tweak_post(req, true);
-
-	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
-		return err;
-
-	return xts_cts_final(req, crypto_skcipher_encrypt);
-}
-
-static int xts_decrypt(struct skcipher_request *req)
-{
-	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
-	struct skcipher_request *subreq = &rctx->subreq;
-	int err;
-
-	err = xts_init_crypt(req, xts_decrypt_done) ?:
-	      xts_xor_tweak_pre(req, false) ?:
-	      crypto_skcipher_decrypt(subreq) ?:
-	      xts_xor_tweak_post(req, false);
-
-	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
-		return err;
-
-	return xts_cts_final(req, crypto_skcipher_decrypt);
-}
-
-static int xts_init_tfm(struct crypto_skcipher *tfm)
-{
-	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
-	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
-	struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
-	struct crypto_skcipher *child;
-	struct crypto_cipher *tweak;
-
-	child = crypto_spawn_skcipher(&ictx->spawn);
-	if (IS_ERR(child))
-		return PTR_ERR(child);
-
-	ctx->child = child;
-
-	tweak = crypto_spawn_cipher(&ictx->tweak_spawn);
-	if (IS_ERR(tweak)) {
-		crypto_free_skcipher(ctx->child);
-		return PTR_ERR(tweak);
-	}
-
-	ctx->tweak = tweak;
-
-	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
-					 sizeof(struct xts_request_ctx));
-
-	return 0;
-}
-
-static void xts_exit_tfm(struct crypto_skcipher *tfm)
-{
-	struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-	crypto_free_skcipher(ctx->child);
-	crypto_free_cipher(ctx->tweak);
-}
-
-static void xts_free_instance(struct skcipher_instance *inst)
-{
-	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
-
-	crypto_drop_skcipher(&ictx->spawn);
-	crypto_drop_cipher(&ictx->tweak_spawn);
-	kfree(inst);
+	crypto_free_lskcipher(ctx->tweak);
+	crypto_free_lskcipher(ctx->child);
 }
 
 static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
-	struct skcipher_alg_common *alg;
-	char name[CRYPTO_MAX_ALG_NAME];
-	struct skcipher_instance *inst;
-	struct xts_instance_ctx *ctx;
-	const char *cipher_name;
-	u32 mask;
+	struct lskcipher_instance *inst;
 	int err;
 
-	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
-	if (err)
-		return err;
-
-	cipher_name = crypto_attr_alg_name(tb[1]);
-	if (IS_ERR(cipher_name))
-		return PTR_ERR(cipher_name);
-
-	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
-	if (!inst)
-		return -ENOMEM;
-
-	ctx = skcipher_instance_ctx(inst);
-
-	err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
-				   cipher_name, 0, mask);
-	if (err == -ENOENT) {
-		err = -ENAMETOOLONG;
-		if (snprintf(name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
-			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
-			goto err_free_inst;
-
-		err = crypto_grab_skcipher(&ctx->spawn,
-					   skcipher_crypto_instance(inst),
-					   name, 0, mask);
-	}
-
-	if (err)
-		goto err_free_inst;
-
-	alg = crypto_spawn_skcipher_alg_common(&ctx->spawn);
+	inst = lskcipher_alloc_instance_simple(tmpl, tb);
+	if (IS_ERR(inst))
+		return PTR_ERR(inst);
 
 	err = -EINVAL;
-	if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
+	if (inst->alg.co.base.cra_blocksize != XTS_BLOCK_SIZE)
 		goto err_free_inst;
 
-	if (alg->ivsize)
+	if (inst->alg.co.ivsize)
 		goto err_free_inst;
 
-	err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
-				  &alg->base);
-	if (err)
-		goto err_free_inst;
+	inst->alg.co.base.cra_blocksize = 1;
+	inst->alg.co.base.cra_alignmask |= (__alignof__(le128) - 1);
 
-	err = -EINVAL;
-	cipher_name = alg->base.cra_name;
+	inst->alg.co.ivsize = XTS_BLOCK_SIZE;
+	inst->alg.co.chunksize = XTS_BLOCK_SIZE;
+	inst->alg.co.tailsize = XTS_BLOCK_SIZE * 2;
+	inst->alg.co.min_keysize *= 2;
+	inst->alg.co.max_keysize *= 2;
 
-	/* Alas we screwed up the naming so we have to mangle the
-	 * cipher name.
-	 */
-	if (!strncmp(cipher_name, "ecb(", 4)) {
-		int len;
-
-		len = strscpy(name, cipher_name + 4, sizeof(name));
-		if (len < 2)
-			goto err_free_inst;
-
-		if (name[len - 1] != ')')
-			goto err_free_inst;
-
-		name[len - 1] = 0;
-
-		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-			     "xts(%s)", name) >= CRYPTO_MAX_ALG_NAME) {
-			err = -ENAMETOOLONG;
-			goto err_free_inst;
-		}
-	} else
-		goto err_free_inst;
-
-	err = crypto_grab_cipher(&ctx->tweak_spawn,
-				 skcipher_crypto_instance(inst), name, 0, mask);
-	if (err)
-		goto err_free_inst;
-
-	inst->alg.base.cra_priority = alg->base.cra_priority;
-	inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
-	inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
-				       (__alignof__(u64) - 1);
-
-	inst->alg.ivsize = XTS_BLOCK_SIZE;
-	inst->alg.min_keysize = alg->min_keysize * 2;
-	inst->alg.max_keysize = alg->max_keysize * 2;
-
-	inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
+	inst->alg.co.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
 
 	inst->alg.init = xts_init_tfm;
 	inst->alg.exit = xts_exit_tfm;
@@ -440,12 +319,10 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
 	inst->alg.encrypt = xts_encrypt;
 	inst->alg.decrypt = xts_decrypt;
 
-	inst->free = xts_free_instance;
-
-	err = skcipher_register_instance(tmpl, inst);
+	err = lskcipher_register_instance(tmpl, inst);
 	if (err) {
 err_free_inst:
-		xts_free_instance(inst);
+		inst->free(inst);
 	}
 	return err;
 }
@@ -472,5 +349,4 @@ module_exit(xts_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("XTS block cipher mode");
 MODULE_ALIAS_CRYPTO("xts");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
 MODULE_SOFTDEP("pre: ecb");
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 15b16c4853d8..0287540e2ced 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -5,11 +5,12 @@
 #include <crypto/b128ops.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/fips.h>
+#include <linux/types.h>
 
 #define XTS_BLOCK_SIZE 16
 
-static inline int xts_verify_key(struct crypto_skcipher *tfm,
-				 const u8 *key, unsigned int keylen)
+static inline int xts_verify_key_common(bool forbid_weak_keys,
+					const u8 *key, unsigned int keylen)
 {
 	/*
 	 * key consists of keys of equal size concatenated, therefore
@@ -29,12 +30,27 @@ static inline int xts_verify_key(struct crypto_skcipher *tfm,
 	 * Ensure that the AES and tweak key are not identical when
 	 * in FIPS mode or the FORBID_WEAK_KEYS flag is set.
 	 */
-	if ((fips_enabled || (crypto_skcipher_get_flags(tfm) &
-			      CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) &&
+	if ((fips_enabled || forbid_weak_keys) &&
 	    !crypto_memneq(key, key + (keylen / 2), keylen / 2))
 		return -EINVAL;
 
 	return 0;
 }
 
+static inline int xts_verify_key(struct crypto_skcipher *tfm,
+				 const u8 *key, unsigned int keylen)
+{
+	return xts_verify_key_common(crypto_skcipher_get_flags(tfm) &
+				     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS,
+				     key, keylen);
+}
+
+static inline int xts_verify_key_lskcipher(struct crypto_lskcipher *tfm,
+					   const u8 *key, unsigned int keylen)
+{
+	return xts_verify_key_common(crypto_lskcipher_get_flags(tfm) &
+				     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS,
+				     key, keylen);
+}
+
 #endif  /* _CRYPTO_XTS_H */
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 05/15] crypto: skcipher - Add twopass attribute
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (2 preceding siblings ...)
  2023-12-04 10:24 ` [PATCH 04/15] crypto: xts - Convert from skcipher to lskcipher Herbert Xu
@ 2023-12-05  6:09 ` Herbert Xu
  2023-12-05  6:13 ` [PATCH 06/15] crypto: algif_skcipher - Disallow nonincremental algorithms Herbert Xu
                   ` (11 subsequent siblings)
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2023-12-05  6:09 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Algorithms such as adiantum requires two passes over the input
and therefore cannot support incremental processing.  Add a new
attribute to identify them.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/lskcipher.c        |  2 ++
 crypto/skcipher.c         |  2 ++
 include/crypto/skcipher.h | 32 ++++++++++++++++++++++++++++++++
 3 files changed, 36 insertions(+)

diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index 260666f34500..bc54cfc2734d 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -301,6 +301,8 @@ static void __maybe_unused crypto_lskcipher_show(
 	seq_printf(m, "chunksize    : %u\n", skcipher->co.chunksize);
 	seq_printf(m, "statesize    : %u\n", skcipher->co.statesize);
 	seq_printf(m, "tailsize     : %u\n", skcipher->co.tailsize);
+	seq_printf(m, "incremental  : %s\n", skcipher->co.twopass ?
+					     "no" : "yes");
 }
 
 static int __maybe_unused crypto_lskcipher_report(
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 600ec5735ce0..40e836be354e 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -837,6 +837,8 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
 	seq_printf(m, "walksize     : %u\n", skcipher->walksize);
 	seq_printf(m, "statesize    : %u\n", skcipher->statesize);
 	seq_printf(m, "tailsize     : %u\n", skcipher->tailsize);
+	seq_printf(m, "incremental  : %s\n", skcipher->twopass ?
+					     "no" : "yes");
 }
 
 static int __maybe_unused crypto_skcipher_report(
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 6223d81fed76..3833a2ab1951 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -105,6 +105,7 @@ struct crypto_istat_cipher {
  * @statesize: Size of the internal state for the algorithm.
  * @tailsize: Minimum number of bytes to withhold until the end of operation.
  *	      Used by algorithms such as CTS to support chaining.
+ * @twopass: The algorithm needs two passes over the input, e.g., adiantum.
  * @stat: Statistics for cipher algorithm
  * @base: Definition of a generic crypto algorithm.
  */
@@ -115,6 +116,7 @@ struct crypto_istat_cipher {
 	unsigned int chunksize;		\
 	unsigned int statesize;		\
 	unsigned int tailsize;		\
+	bool twopass;			\
 					\
 	SKCIPHER_ALG_COMMON_STAT	\
 					\
@@ -576,6 +578,36 @@ static inline unsigned int crypto_lskcipher_tailsize(
 	return crypto_lskcipher_alg(tfm)->co.tailsize;
 }
 
+/**
+ * crypto_skcipher_isincremental() - check incremental ability
+ * @tfm: cipher handle
+ *
+ * Most skcipher algorithms can accept data in an incremental fashion.
+ * However, some such as adiantum cannot as they need to pass through
+ * the data twice.
+ *
+ * Return: true if algorithm can accept data incrementally.
+ */
+static inline bool crypto_skcipher_isincremental(struct crypto_skcipher *tfm)
+{
+	return !crypto_skcipher_alg_common(tfm)->twopass;
+}
+
+/**
+ * crypto_lskcipher_isincremental() - check incremental ability
+ * @tfm: cipher handle
+ *
+ * Most lskcipher algorithms can accept data in an incremental fashion.
+ * However, some such as adiantum cannot as they need to pass through
+ * the data twice.
+ *
+ * Return: true if algorithm can accept data incrementally.
+ */
+static inline bool crypto_lskcipher_isincremental(struct crypto_lskcipher *tfm)
+{
+	return !crypto_lskcipher_alg(tfm)->co.twopass;
+}
+
 static inline unsigned int crypto_sync_skcipher_blocksize(
 	struct crypto_sync_skcipher *tfm)
 {
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 06/15] crypto: algif_skcipher - Disallow nonincremental algorithms
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (3 preceding siblings ...)
  2023-12-05  6:09 ` [PATCH 05/15] crypto: skcipher - Add twopass attribute Herbert Xu
@ 2023-12-05  6:13 ` Herbert Xu
  2024-02-14 22:56   ` Eric Biggers
  2023-12-05  9:52 ` [PATCH 07/15] crypto: adiantum - Use lskcipher instead of cipher Herbert Xu
                   ` (10 subsequent siblings)
  15 siblings, 1 reply; 30+ messages in thread
From: Herbert Xu @ 2023-12-05  6:13 UTC (permalink / raw)
  To: Linux Crypto Mailing List

As algif_skcipher does not support nonincremental algorithms, check
for them and return ENOSYS.  If necessary support for them could
be added in the same way as AEAD.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/algif_skcipher.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index e22516c3d285..ac59fd9ea4e4 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -131,6 +131,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
 	 * full block size buffers.
 	 */
 	if (ctx->more || len < ctx->used) {
+		err = -ENOSYS;
+		if (!crypto_skcipher_isincremental(tfm))
+			goto free;
+
 		if (ctx->more && ctx->used - ts < len)
 			len = ctx->used - ts;
 		len -= len % bs;
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 07/15] crypto: adiantum - Use lskcipher instead of cipher
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (4 preceding siblings ...)
  2023-12-05  6:13 ` [PATCH 06/15] crypto: algif_skcipher - Disallow nonincremental algorithms Herbert Xu
@ 2023-12-05  9:52 ` Herbert Xu
  2023-12-06  4:46 ` [PATCH 08/15] crypto: skcipher - Add incremental support to lskcipher wrapper Herbert Xu
                   ` (9 subsequent siblings)
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2023-12-05  9:52 UTC (permalink / raw)
  To: Linux Crypto Mailing List

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain, Size: 10369 bytes --]

Use the new lskcipher interface for simple block cipher.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/adiantum.c | 130 ++++++++++++++++++++++++++++++++++------------
 1 file changed, 96 insertions(+), 34 deletions(-)

diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index 60f3883b736a..ee55b1f8565c 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -32,7 +32,6 @@
 
 #include <crypto/b128ops.h>
 #include <crypto/chacha.h>
-#include <crypto/internal/cipher.h>
 #include <crypto/internal/hash.h>
 #include <crypto/internal/poly1305.h>
 #include <crypto/internal/skcipher.h>
@@ -63,13 +62,13 @@
 
 struct adiantum_instance_ctx {
 	struct crypto_skcipher_spawn streamcipher_spawn;
-	struct crypto_cipher_spawn blockcipher_spawn;
+	struct crypto_lskcipher_spawn blockcipher_spawn;
 	struct crypto_shash_spawn hash_spawn;
 };
 
 struct adiantum_tfm_ctx {
 	struct crypto_skcipher *streamcipher;
-	struct crypto_cipher *blockcipher;
+	struct crypto_lskcipher *blockcipher;
 	struct crypto_shash *hash;
 	struct poly1305_core_key header_hash_key;
 };
@@ -157,12 +156,12 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
 	keyp = data->derived_keys;
 
 	/* Set the block cipher key (K_E) */
-	crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
-	crypto_cipher_set_flags(tctx->blockcipher,
-				crypto_skcipher_get_flags(tfm) &
-				CRYPTO_TFM_REQ_MASK);
-	err = crypto_cipher_setkey(tctx->blockcipher, keyp,
-				   BLOCKCIPHER_KEY_SIZE);
+	crypto_lskcipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
+	crypto_lskcipher_set_flags(tctx->blockcipher,
+				   crypto_skcipher_get_flags(tfm) &
+				   CRYPTO_TFM_REQ_MASK);
+	err = crypto_lskcipher_setkey(tctx->blockcipher, keyp,
+				     BLOCKCIPHER_KEY_SIZE);
 	if (err)
 		goto out;
 	keyp += BLOCKCIPHER_KEY_SIZE;
@@ -287,9 +286,14 @@ static int adiantum_finish(struct skcipher_request *req)
 	int err;
 
 	/* If decrypting, decrypt C_M with the block cipher to get P_M */
-	if (!rctx->enc)
-		crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
-					  rctx->rbuf.bytes);
+	if (!rctx->enc) {
+		err = crypto_lskcipher_decrypt(tctx->blockcipher,
+					       rctx->rbuf.bytes,
+					       rctx->rbuf.bytes,
+					       BLOCKCIPHER_BLOCK_SIZE, NULL);
+		if (err)
+			return err;
+	}
 
 	/*
 	 * Second hash step
@@ -379,9 +383,14 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
 	le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
 
 	/* If encrypting, encrypt P_M with the block cipher to get C_M */
-	if (enc)
-		crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
-					  rctx->rbuf.bytes);
+	if (enc) {
+		err = crypto_lskcipher_encrypt(tctx->blockcipher,
+					       rctx->rbuf.bytes,
+					       rctx->rbuf.bytes,
+					       BLOCKCIPHER_BLOCK_SIZE, NULL);
+		if (err)
+			return err;
+	}
 
 	/* Initialize the rest of the XChaCha IV (first part is C_M) */
 	BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
@@ -430,7 +439,7 @@ static int adiantum_init_tfm(struct crypto_skcipher *tfm)
 	struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
 	struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
 	struct crypto_skcipher *streamcipher;
-	struct crypto_cipher *blockcipher;
+	struct crypto_lskcipher *blockcipher;
 	struct crypto_shash *hash;
 	unsigned int subreq_size;
 	int err;
@@ -439,7 +448,7 @@ static int adiantum_init_tfm(struct crypto_skcipher *tfm)
 	if (IS_ERR(streamcipher))
 		return PTR_ERR(streamcipher);
 
-	blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
+	blockcipher = crypto_spawn_lskcipher(&ictx->blockcipher_spawn);
 	if (IS_ERR(blockcipher)) {
 		err = PTR_ERR(blockcipher);
 		goto err_free_streamcipher;
@@ -470,7 +479,7 @@ static int adiantum_init_tfm(struct crypto_skcipher *tfm)
 	return 0;
 
 err_free_blockcipher:
-	crypto_free_cipher(blockcipher);
+	crypto_free_lskcipher(blockcipher);
 err_free_streamcipher:
 	crypto_free_skcipher(streamcipher);
 	return err;
@@ -481,7 +490,7 @@ static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
 	struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
 
 	crypto_free_skcipher(tctx->streamcipher);
-	crypto_free_cipher(tctx->blockcipher);
+	crypto_free_lskcipher(tctx->blockcipher);
 	crypto_free_shash(tctx->hash);
 }
 
@@ -490,7 +499,7 @@ static void adiantum_free_instance(struct skcipher_instance *inst)
 	struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
 
 	crypto_drop_skcipher(&ictx->streamcipher_spawn);
-	crypto_drop_cipher(&ictx->blockcipher_spawn);
+	crypto_drop_lskcipher(&ictx->blockcipher_spawn);
 	crypto_drop_shash(&ictx->hash_spawn);
 	kfree(inst);
 }
@@ -500,17 +509,21 @@ static void adiantum_free_instance(struct skcipher_instance *inst)
  * See the comment at the beginning of this file.
  */
 static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg,
-					  struct crypto_alg *blockcipher_alg,
+					  struct lskcipher_alg *blockcipher_alg,
 					  struct shash_alg *hash_alg)
 {
 	if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
 	    strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
 		return false;
 
-	if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE ||
-	    blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE)
+	if (blockcipher_alg->co.min_keysize > BLOCKCIPHER_KEY_SIZE ||
+	    blockcipher_alg->co.max_keysize < BLOCKCIPHER_KEY_SIZE)
 		return false;
-	if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
+	if (blockcipher_alg->co.base.cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
+		return false;
+	if (blockcipher_alg->co.ivsize)
+		return false;
+	if (blockcipher_alg->co.statesize)
 		return false;
 
 	if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
@@ -526,8 +539,12 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 	struct skcipher_instance *inst;
 	struct adiantum_instance_ctx *ictx;
 	struct skcipher_alg_common *streamcipher_alg;
-	struct crypto_alg *blockcipher_alg;
+	char ecb_driver_name[CRYPTO_MAX_ALG_NAME];
+	struct lskcipher_alg *blockcipher_alg;
+	char ecb_name[CRYPTO_MAX_ALG_NAME];
+	const char *cipher_driver_name;
 	struct shash_alg *hash_alg;
+	const char *cipher_name;
 	int err;
 
 	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
@@ -548,12 +565,27 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 	streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn);
 
 	/* Block cipher, e.g. "aes" */
-	err = crypto_grab_cipher(&ictx->blockcipher_spawn,
-				 skcipher_crypto_instance(inst),
-				 crypto_attr_alg_name(tb[2]), 0, mask);
+	cipher_name = crypto_attr_alg_name(tb[2]);
+	cipher_driver_name = cipher_name;
+	err = crypto_grab_lskcipher(&ictx->blockcipher_spawn,
+				    skcipher_crypto_instance(inst),
+				    cipher_name, 0, mask);
+
+	ecb_name[0] = 0;
+	if (err == -ENOENT) {
+		err = -ENAMETOOLONG;
+		if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
+			     cipher_name) >= CRYPTO_MAX_ALG_NAME)
+			goto err_free_inst;
+
+		err = crypto_grab_lskcipher(&ictx->blockcipher_spawn,
+					    skcipher_crypto_instance(inst),
+					    ecb_name, 0, mask);
+	}
+
 	if (err)
 		goto err_free_inst;
-	blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
+	blockcipher_alg = crypto_spawn_lskcipher_alg(&ictx->blockcipher_spawn);
 
 	/* NHPoly1305 ε-∆U hash function */
 	nhpoly1305_name = crypto_attr_alg_name(tb[3]);
@@ -571,22 +603,52 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 					   hash_alg)) {
 		pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
 			streamcipher_alg->base.cra_name,
-			blockcipher_alg->cra_name, hash_alg->base.cra_name);
+			blockcipher_alg->co.base.cra_name,
+			hash_alg->base.cra_name);
 		err = -EINVAL;
 		goto err_free_inst;
 	}
 
 	/* Instance fields */
 
+	cipher_name = blockcipher_alg->co.base.cra_name;
+	cipher_driver_name = blockcipher_alg->co.base.cra_driver_name;
+	if (ecb_name[0]) {
+		int len;
+
+		err = -EINVAL;
+		len = strscpy(ecb_name, &blockcipher_alg->co.base.cra_name[4],
+			      sizeof(ecb_name));
+		if (len < 2)
+			goto err_free_inst;
+
+		if (ecb_name[len - 1] != ')')
+			goto err_free_inst;
+
+		ecb_name[len - 1] = 0;
+		cipher_name = ecb_name;
+
+		len = strscpy(ecb_driver_name, &blockcipher_alg->co.base.cra_driver_name[4],
+			      sizeof(ecb_driver_name));
+		if (len < 2)
+			goto err_free_inst;
+
+		if (ecb_driver_name[len - 1] != ')')
+			goto err_free_inst;
+
+		ecb_driver_name[len - 1] = 0;
+		cipher_driver_name = ecb_driver_name;
+	}
+
 	err = -ENAMETOOLONG;
 	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 		     "adiantum(%s,%s)", streamcipher_alg->base.cra_name,
-		     blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
+		     cipher_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_free_inst;
 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "adiantum(%s,%s,%s)",
 		     streamcipher_alg->base.cra_driver_name,
-		     blockcipher_alg->cra_driver_name,
+		     cipher_driver_name,
 		     hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_free_inst;
 
@@ -601,7 +663,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 	 */
 	inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
 				       2 * hash_alg->base.cra_priority +
-				       blockcipher_alg->cra_priority) / 7;
+				       blockcipher_alg->co.base.cra_priority) / 7;
 
 	inst->alg.setkey = adiantum_setkey;
 	inst->alg.encrypt = adiantum_encrypt;
@@ -611,6 +673,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 	inst->alg.min_keysize = streamcipher_alg->min_keysize;
 	inst->alg.max_keysize = streamcipher_alg->max_keysize;
 	inst->alg.ivsize = TWEAK_SIZE;
+	inst->alg.co.twopass = true;
 
 	inst->free = adiantum_free_instance;
 
@@ -646,4 +709,3 @@ MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
 MODULE_ALIAS_CRYPTO("adiantum");
-MODULE_IMPORT_NS(CRYPTO_INTERNAL);
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 08/15] crypto: skcipher - Add incremental support to lskcipher wrapper
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (5 preceding siblings ...)
  2023-12-05  9:52 ` [PATCH 07/15] crypto: adiantum - Use lskcipher instead of cipher Herbert Xu
@ 2023-12-06  4:46 ` Herbert Xu
  2023-12-06  5:49 ` [PATCH 09/15] crypto: chacha-generic - Convert from skcipher to lskcipher Herbert Xu
                   ` (8 subsequent siblings)
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2023-12-06  4:46 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Execute a second pass for incremental lskcipher algorithms when the
skcipher request contains all the data and when the SG list itself
cannot be passed to the lskcipher in one go.

If the SG list can be processed in one go, there is no need for a
second pass.  If the skcipher request itself is incremental, then
the expectation is for the user to execute a second pass on the
skcipher request.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/lskcipher.c | 29 ++++++++++++++++++++---------
 1 file changed, 20 insertions(+), 9 deletions(-)

diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index bc54cfc2734d..10e082f3cde6 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -206,11 +206,15 @@ static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
 	u8 *ivs = skcipher_request_ctx(req);
 	struct crypto_lskcipher *tfm = *ctx;
 	struct skcipher_walk walk;
+	int secondpass = 0;
+	bool isincremental;
+	bool morethanone;
 	unsigned ivsize;
 	u32 flags;
 	int err;
 
 	ivsize = crypto_lskcipher_ivsize(tfm);
+	isincremental = crypto_lskcipher_isincremental(tfm);
 	ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(skcipher) + 1);
 
 	flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -223,16 +227,23 @@ static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
 	if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
 		flags |= CRYPTO_LSKCIPHER_FLAG_FINAL;
 
-	err = skcipher_walk_virt(&walk, req, false);
+	do {
+		err = skcipher_walk_virt(&walk, req, false);
+		morethanone = walk.nbytes != walk.total;
 
-	while (walk.nbytes) {
-		err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr,
-			    walk.nbytes, ivs,
-			    flags & ~(walk.nbytes == walk.total ?
-			    0 : CRYPTO_LSKCIPHER_FLAG_FINAL));
-		err = skcipher_walk_done(&walk, err);
-		flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
-	}
+		while (walk.nbytes) {
+			err = crypt(tfm, walk.src.virt.addr,
+				    walk.dst.virt.addr,
+				    walk.nbytes, ivs,
+				    flags & ~(walk.nbytes == walk.total ?
+				    0 : CRYPTO_LSKCIPHER_FLAG_FINAL));
+			err = skcipher_walk_done(&walk, err);
+			flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
+		}
+
+		if (err)
+			return err;
+	} while (!secondpass++ && !isincremental && morethanone);
 
 	if (flags & CRYPTO_LSKCIPHER_FLAG_FINAL)
 		memcpy(req->iv, ivs, ivsize);
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 09/15] crypto: chacha-generic - Convert from skcipher to lskcipher
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (6 preceding siblings ...)
  2023-12-06  4:46 ` [PATCH 08/15] crypto: skcipher - Add incremental support to lskcipher wrapper Herbert Xu
@ 2023-12-06  5:49 ` Herbert Xu
  2024-02-14 23:41   ` Eric Biggers
  2023-12-06  6:05 ` [PATCH 10/15] crypto: skcipher - Move nesting check into ecb Herbert Xu
                   ` (7 subsequent siblings)
  15 siblings, 1 reply; 30+ messages in thread
From: Herbert Xu @ 2023-12-06  5:49 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Replace skcipher implementation with lskcipher.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/chacha_generic.c          | 161 ++++++++++++++++---------------
 include/crypto/internal/chacha.h |  22 ++++-
 2 files changed, 100 insertions(+), 83 deletions(-)

diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
index 8beea79ab117..6500fa570ddc 100644
--- a/crypto/chacha_generic.c
+++ b/crypto/chacha_generic.c
@@ -7,122 +7,127 @@
  */
 
 #include <asm/unaligned.h>
-#include <crypto/algapi.h>
 #include <crypto/internal/chacha.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/string.h>
 
-static int chacha_stream_xor(struct skcipher_request *req,
-			     const struct chacha_ctx *ctx, const u8 *iv)
+static int chacha_stream_xor(const struct chacha_ctx *ctx, const u8 *src,
+			     u8 *dst, unsigned nbytes, u8 *siv, u32 flags)
 {
-	struct skcipher_walk walk;
-	u32 state[16];
-	int err;
+	u32 *state = (u32 *)(siv + CHACHA_IV_SIZE);
+	unsigned len = nbytes;
 
-	err = skcipher_walk_virt(&walk, req, false);
+	if (!(flags & CRYPTO_LSKCIPHER_FLAG_CONT))
+		chacha_init_generic(state, ctx->key, siv);
 
-	chacha_init_generic(state, ctx->key, iv);
+	if (!(flags & CRYPTO_LSKCIPHER_FLAG_FINAL))
+		len = round_down(len, CHACHA_BLOCK_SIZE);
 
-	while (walk.nbytes > 0) {
-		unsigned int nbytes = walk.nbytes;
+	chacha_crypt_generic(state, dst, src, len, ctx->nrounds);
 
-		if (nbytes < walk.total)
-			nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
-
-		chacha_crypt_generic(state, walk.dst.virt.addr,
-				     walk.src.virt.addr, nbytes, ctx->nrounds);
-		err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
-	}
-
-	return err;
+	return nbytes - len;
 }
 
-static int crypto_chacha_crypt(struct skcipher_request *req)
+static int crypto_chacha_crypt(struct crypto_lskcipher *tfm, const u8 *src,
+			       u8 *dst, unsigned nbytes, u8 *siv, u32 flags)
 {
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+	const struct chacha_ctx *ctx = crypto_lskcipher_ctx(tfm);
 
-	return chacha_stream_xor(req, ctx, req->iv);
+	return chacha_stream_xor(ctx, src, dst, nbytes, siv, flags);
 }
 
-static int crypto_xchacha_crypt(struct skcipher_request *req)
+static int crypto_xchacha_crypt(struct crypto_lskcipher *tfm, const u8 *src,
+				u8 *dst, unsigned nbytes, u8 *siv, u32 flags)
 {
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+	struct chacha_ctx *ctx = crypto_lskcipher_ctx(tfm);
 	struct chacha_ctx subctx;
-	u32 state[16];
-	u8 real_iv[16];
+	u8 *real_iv;
+	u32 *state;
 
-	/* Compute the subkey given the original key and first 128 nonce bits */
-	chacha_init_generic(state, ctx->key, req->iv);
-	hchacha_block_generic(state, subctx.key, ctx->nrounds);
+	real_iv = siv + XCHACHA_IV_SIZE;
+	state = (u32 *)(real_iv + CHACHA_IV_SIZE);
 	subctx.nrounds = ctx->nrounds;
 
-	/* Build the real IV */
-	memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
-	memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
+	if (flags & CRYPTO_LSKCIPHER_FLAG_CONT)
+		goto out;
 
+	/* Compute the subkey given the original key and first 128 nonce bits */
+	chacha_init_generic(state, ctx->key, siv);
+	hchacha_block_generic(state, subctx.key, ctx->nrounds);
+
+	/* Build the real IV */
+	memcpy(&real_iv[0], siv + 24, 8); /* stream position */
+	memcpy(&real_iv[8], siv + 16, 8); /* remaining 64 nonce bits */
+
+out:
 	/* Generate the stream and XOR it with the data */
-	return chacha_stream_xor(req, &subctx, real_iv);
+	return chacha_stream_xor(&subctx, src, dst, nbytes, real_iv, flags);
 }
 
-static struct skcipher_alg algs[] = {
+static struct lskcipher_alg algs[] = {
 	{
-		.base.cra_name		= "chacha20",
-		.base.cra_driver_name	= "chacha20-generic",
-		.base.cra_priority	= 100,
-		.base.cra_blocksize	= 1,
-		.base.cra_ctxsize	= sizeof(struct chacha_ctx),
-		.base.cra_module	= THIS_MODULE,
+		.co.base.cra_name		= "chacha20",
+		.co.base.cra_driver_name	= "chacha20-generic",
+		.co.base.cra_priority		= 100,
+		.co.base.cra_blocksize		= 1,
+		.co.base.cra_ctxsize		= sizeof(struct chacha_ctx),
+		.co.base.cra_alignmask		= __alignof__(u32) - 1,
+		.co.base.cra_module		= THIS_MODULE,
 
-		.min_keysize		= CHACHA_KEY_SIZE,
-		.max_keysize		= CHACHA_KEY_SIZE,
-		.ivsize			= CHACHA_IV_SIZE,
-		.chunksize		= CHACHA_BLOCK_SIZE,
-		.setkey			= chacha20_setkey,
-		.encrypt		= crypto_chacha_crypt,
-		.decrypt		= crypto_chacha_crypt,
+		.co.min_keysize			= CHACHA_KEY_SIZE,
+		.co.max_keysize			= CHACHA_KEY_SIZE,
+		.co.ivsize			= CHACHA_IV_SIZE,
+		.co.chunksize			= CHACHA_BLOCK_SIZE,
+		.co.statesize			= 64,
+		.setkey				= chacha20_lskcipher_setkey,
+		.encrypt			= crypto_chacha_crypt,
+		.decrypt			= crypto_chacha_crypt,
 	}, {
-		.base.cra_name		= "xchacha20",
-		.base.cra_driver_name	= "xchacha20-generic",
-		.base.cra_priority	= 100,
-		.base.cra_blocksize	= 1,
-		.base.cra_ctxsize	= sizeof(struct chacha_ctx),
-		.base.cra_module	= THIS_MODULE,
+		.co.base.cra_name		= "xchacha20",
+		.co.base.cra_driver_name	= "xchacha20-generic",
+		.co.base.cra_priority		= 100,
+		.co.base.cra_blocksize		= 1,
+		.co.base.cra_ctxsize		= sizeof(struct chacha_ctx),
+		.co.base.cra_alignmask		= __alignof__(u32) - 1,
+		.co.base.cra_module		= THIS_MODULE,
 
-		.min_keysize		= CHACHA_KEY_SIZE,
-		.max_keysize		= CHACHA_KEY_SIZE,
-		.ivsize			= XCHACHA_IV_SIZE,
-		.chunksize		= CHACHA_BLOCK_SIZE,
-		.setkey			= chacha20_setkey,
-		.encrypt		= crypto_xchacha_crypt,
-		.decrypt		= crypto_xchacha_crypt,
+		.co.min_keysize			= CHACHA_KEY_SIZE,
+		.co.max_keysize			= CHACHA_KEY_SIZE,
+		.co.ivsize			= XCHACHA_IV_SIZE,
+		.co.chunksize			= CHACHA_BLOCK_SIZE,
+		.co.statesize			= 80,
+		.setkey				= chacha20_lskcipher_setkey,
+		.encrypt			= crypto_xchacha_crypt,
+		.decrypt			= crypto_xchacha_crypt,
 	}, {
-		.base.cra_name		= "xchacha12",
-		.base.cra_driver_name	= "xchacha12-generic",
-		.base.cra_priority	= 100,
-		.base.cra_blocksize	= 1,
-		.base.cra_ctxsize	= sizeof(struct chacha_ctx),
-		.base.cra_module	= THIS_MODULE,
+		.co.base.cra_name		= "xchacha12",
+		.co.base.cra_driver_name	= "xchacha12-generic",
+		.co.base.cra_priority		= 100,
+		.co.base.cra_blocksize		= 1,
+		.co.base.cra_ctxsize		= sizeof(struct chacha_ctx),
+		.co.base.cra_alignmask		= __alignof__(u32) - 1,
+		.co.base.cra_module		= THIS_MODULE,
 
-		.min_keysize		= CHACHA_KEY_SIZE,
-		.max_keysize		= CHACHA_KEY_SIZE,
-		.ivsize			= XCHACHA_IV_SIZE,
-		.chunksize		= CHACHA_BLOCK_SIZE,
-		.setkey			= chacha12_setkey,
-		.encrypt		= crypto_xchacha_crypt,
-		.decrypt		= crypto_xchacha_crypt,
+		.co.min_keysize			= CHACHA_KEY_SIZE,
+		.co.max_keysize			= CHACHA_KEY_SIZE,
+		.co.ivsize			= XCHACHA_IV_SIZE,
+		.co.chunksize			= CHACHA_BLOCK_SIZE,
+		.co.statesize			= 80,
+		.setkey				= chacha12_lskcipher_setkey,
+		.encrypt			= crypto_xchacha_crypt,
+		.decrypt			= crypto_xchacha_crypt,
 	}
 };
 
 static int __init chacha_generic_mod_init(void)
 {
-	return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+	return crypto_register_lskciphers(algs, ARRAY_SIZE(algs));
 }
 
 static void __exit chacha_generic_mod_fini(void)
 {
-	crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+	crypto_unregister_lskciphers(algs, ARRAY_SIZE(algs));
 }
 
 subsys_initcall(chacha_generic_mod_init);
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
index b085dc1ac151..568c7c7f042f 100644
--- a/include/crypto/internal/chacha.h
+++ b/include/crypto/internal/chacha.h
@@ -5,17 +5,15 @@
 
 #include <crypto/chacha.h>
 #include <crypto/internal/skcipher.h>
-#include <linux/crypto.h>
 
 struct chacha_ctx {
 	u32 key[8];
 	int nrounds;
 };
 
-static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
+static inline int chacha_setkey(struct chacha_ctx *ctx, const u8 *key,
 				unsigned int keysize, int nrounds)
 {
-	struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
 	int i;
 
 	if (keysize != CHACHA_KEY_SIZE)
@@ -31,13 +29,27 @@ static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
 static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
 				  unsigned int keysize)
 {
-	return chacha_setkey(tfm, key, keysize, 20);
+	return chacha_setkey(crypto_skcipher_ctx(tfm), key, keysize, 20);
+}
+
+static inline int chacha20_lskcipher_setkey(struct crypto_lskcipher *tfm,
+					    const u8 *key,
+					    unsigned int keysize)
+{
+	return chacha_setkey(crypto_lskcipher_ctx(tfm), key, keysize, 20);
 }
 
 static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
 				  unsigned int keysize)
 {
-	return chacha_setkey(tfm, key, keysize, 12);
+	return chacha_setkey(crypto_skcipher_ctx(tfm), key, keysize, 12);
+}
+
+static inline int chacha12_lskcipher_setkey(struct crypto_lskcipher *tfm,
+					    const u8 *key,
+					    unsigned int keysize)
+{
+	return chacha_setkey(crypto_lskcipher_ctx(tfm), key, keysize, 12);
 }
 
 #endif /* _CRYPTO_CHACHA_H */
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 10/15] crypto: skcipher - Move nesting check into ecb
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (7 preceding siblings ...)
  2023-12-06  5:49 ` [PATCH 09/15] crypto: chacha-generic - Convert from skcipher to lskcipher Herbert Xu
@ 2023-12-06  6:05 ` Herbert Xu
  2023-12-06  8:55 ` [PATCH 11/15] crypto: skcipher - Propagate zero-length requests to lskcipher Herbert Xu
                   ` (6 subsequent siblings)
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2023-12-06  6:05 UTC (permalink / raw)
  To: Linux Crypto Mailing List

The lskcipher simple template does not allow nesting.  The intention
is to prevent instances such as ecb(ecb(aes)).  However, as the
simple template itself can obviously be nested (e.g., xts(ecb(aes))),
move the check into ecb instead.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/ecb.c       | 4 ++++
 crypto/lskcipher.c | 5 -----
 2 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/crypto/ecb.c b/crypto/ecb.c
index e3a67789050e..2b61c557e307 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -189,6 +189,10 @@ static int crypto_ecb_create(struct crypto_template *tmpl, struct rtattr **tb)
 	if (cipher_alg->co.ivsize)
 		return -EINVAL;
 
+	/* Don't allow nesting. */
+	if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE))
+		return -ELOOP;
+
 	inst->alg.co.base.cra_ctxsize = cipher_alg->co.base.cra_ctxsize;
 	inst->alg.setkey = cipher_alg->setkey;
 	inst->alg.encrypt = cipher_alg->encrypt;
diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index 10e082f3cde6..8660d6e3ccce 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -636,11 +636,6 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple(
 			     "%s(%s)", tmpl->name, cipher_name) >=
 		    CRYPTO_MAX_ALG_NAME)
 			goto err_free_inst;
-	} else {
-		/* Don't allow nesting. */
-		err = -ELOOP;
-		if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE))
-			goto err_free_inst;
 	}
 
 	inst->free = lskcipher_free_instance_simple;
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 11/15] crypto: skcipher - Propagate zero-length requests to lskcipher
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (8 preceding siblings ...)
  2023-12-06  6:05 ` [PATCH 10/15] crypto: skcipher - Move nesting check into ecb Herbert Xu
@ 2023-12-06  8:55 ` Herbert Xu
  2023-12-07 10:03 ` [PATCH 03/15] crypto: skcipher - Remove ivsize check for lskcipher simple templates Herbert Xu
                   ` (5 subsequent siblings)
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2023-12-06  8:55 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Propagate zero-length requests down to the lskcipher algorithm as
otherwise the return value could be different, e.g., zero vs. -EINVAL
for xts.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/lskcipher.c | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index 8660d6e3ccce..00ea963a2d2d 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -227,6 +227,11 @@ static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
 	if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
 		flags |= CRYPTO_LSKCIPHER_FLAG_FINAL;
 
+	if (unlikely(!req->cryptlen)) {
+		err = crypt(tfm, NULL, NULL, 0, ivs, flags);
+		goto out;
+	}
+
 	do {
 		err = skcipher_walk_virt(&walk, req, false);
 		morethanone = walk.nbytes != walk.total;
@@ -245,6 +250,7 @@ static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
 			return err;
 	} while (!secondpass++ && !isincremental && morethanone);
 
+out:
 	if (flags & CRYPTO_LSKCIPHER_FLAG_FINAL)
 		memcpy(req->iv, ivs, ivsize);
 
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 03/15] crypto: skcipher - Remove ivsize check for lskcipher simple templates
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (9 preceding siblings ...)
  2023-12-06  8:55 ` [PATCH 11/15] crypto: skcipher - Propagate zero-length requests to lskcipher Herbert Xu
@ 2023-12-07 10:03 ` Herbert Xu
  2023-12-07 10:13 ` [PATCH 12/15] crypto: cts - Convert from skcipher to lskcipher Herbert Xu
                   ` (4 subsequent siblings)
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2023-12-07 10:03 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Remove the ivsize check for lskcipher simple templates so that it
can be used for cts.  Check for the ivsize in users such as cbc
instead.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/cbc.c       | 5 +++++
 crypto/lskcipher.c | 6 +-----
 2 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/crypto/cbc.c b/crypto/cbc.c
index eedddef9ce40..173e47aecb1f 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -148,6 +148,11 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
 	if (!is_power_of_2(inst->alg.co.base.cra_blocksize))
 		goto out_free_inst;
 
+	if (inst->alg.co.ivsize)
+		goto out_free_inst;
+
+	inst->alg.co.ivsize = inst->alg.co.base.cra_blocksize;
+
 	inst->alg.encrypt = crypto_cbc_encrypt;
 	inst->alg.decrypt = crypto_cbc_decrypt;
 
diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index 2a602911f4fc..260666f34500 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -630,10 +630,6 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple(
 			goto err_free_inst;
 	}
 
-	err = -EINVAL;
-	if (cipher_alg->co.ivsize)
-		goto err_free_inst;
-
 	inst->free = lskcipher_free_instance_simple;
 
 	/* Default algorithm properties, can be overridden */
@@ -642,7 +638,7 @@ struct lskcipher_instance *lskcipher_alloc_instance_simple(
 	inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority;
 	inst->alg.co.min_keysize = cipher_alg->co.min_keysize;
 	inst->alg.co.max_keysize = cipher_alg->co.max_keysize;
-	inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize;
+	inst->alg.co.ivsize = cipher_alg->co.ivsize;
 	inst->alg.co.statesize = cipher_alg->co.statesize;
 
 	/* Use struct crypto_lskcipher * by default, can be overridden */
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 12/15] crypto: cts - Convert from skcipher to lskcipher
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (10 preceding siblings ...)
  2023-12-07 10:03 ` [PATCH 03/15] crypto: skcipher - Remove ivsize check for lskcipher simple templates Herbert Xu
@ 2023-12-07 10:13 ` Herbert Xu
  2023-12-29 10:47 ` [PATCH 13/15] crypto: cts,xts - Update parameters blocksize/chunksize/tailsize Herbert Xu
                   ` (3 subsequent siblings)
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2023-12-07 10:13 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Replace skcipher implementation with lskcipher.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/cts.c | 355 +++++++++++++--------------------------------------
 1 file changed, 89 insertions(+), 266 deletions(-)

diff --git a/crypto/cts.c b/crypto/cts.c
index f5b42156b6c7..4ead59de59c8 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -40,166 +40,81 @@
  * rfc3962 includes errata information in its Appendix A.
  */
 
-#include <crypto/algapi.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/err.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/log2.h>
 #include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <crypto/scatterwalk.h>
-#include <linux/slab.h>
-#include <linux/compiler.h>
+#include <linux/string.h>
 
 struct crypto_cts_ctx {
-	struct crypto_skcipher *child;
+	struct crypto_lskcipher *child;
 };
 
-struct crypto_cts_reqctx {
-	struct scatterlist sg[2];
-	unsigned offset;
-	struct skcipher_request subreq;
-};
-
-static inline u8 *crypto_cts_reqctx_space(struct skcipher_request *req)
+static int cts_cbc_encrypt(struct crypto_lskcipher *tfm,
+			   const u8 *src, u8 *dst,
+			   unsigned offset, unsigned lastn, u8 *iv)
 {
-	struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
-	struct crypto_skcipher *child = ctx->child;
+	const unsigned bsize = crypto_lskcipher_blocksize(tfm);
+	u8 d[MAX_CIPHER_BLOCKSIZE * 2];
 
-	return PTR_ALIGN((u8 *)(rctx + 1) + crypto_skcipher_reqsize(child),
-			 crypto_skcipher_alignmask(tfm) + 1);
-}
-
-static int crypto_cts_setkey(struct crypto_skcipher *parent, const u8 *key,
-			     unsigned int keylen)
-{
-	struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(parent);
-	struct crypto_skcipher *child = ctx->child;
-
-	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
-					 CRYPTO_TFM_REQ_MASK);
-	return crypto_skcipher_setkey(child, key, keylen);
-}
-
-static void cts_cbc_crypt_done(void *data, int err)
-{
-	struct skcipher_request *req = data;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	skcipher_request_complete(req, err);
-}
-
-static int cts_cbc_encrypt(struct skcipher_request *req)
-{
-	struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	struct skcipher_request *subreq = &rctx->subreq;
-	int bsize = crypto_skcipher_blocksize(tfm);
-	u8 d[MAX_CIPHER_BLOCKSIZE * 2] __aligned(__alignof__(u32));
-	struct scatterlist *sg;
-	unsigned int offset;
-	int lastn;
-
-	offset = rctx->offset;
-	lastn = req->cryptlen - offset;
-
-	sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
-	scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
+	memcpy(d + bsize, dst + offset - bsize, bsize);
 
 	memset(d, 0, bsize);
-	scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
+	memcpy(d, src + offset, lastn);
 
-	scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
+	memcpy(dst + offset - bsize, d, bsize + lastn);
 	memzero_explicit(d, sizeof(d));
 
-	skcipher_request_set_callback(subreq, req->base.flags &
-					      CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      cts_cbc_crypt_done, req);
-	skcipher_request_set_crypt(subreq, sg, sg, bsize, req->iv);
-	return crypto_skcipher_encrypt(subreq);
+	return crypto_lskcipher_encrypt(tfm, dst + offset - bsize,
+					dst + offset - bsize, bsize, iv);
 }
 
-static void crypto_cts_encrypt_done(void *data, int err)
+static int crypto_cts_encrypt(struct crypto_lskcipher *tfm,
+			      const u8 *src, u8 *dst, unsigned nbytes,
+			      u8 *iv, u32 flags)
 {
-	struct skcipher_request *req = data;
+	struct crypto_cts_ctx *ctx = crypto_lskcipher_ctx(tfm);
+	struct crypto_lskcipher *child = ctx->child;
+	unsigned bsize;
+	unsigned len;
+	int err;
 
-	if (err)
-		goto out;
-
-	err = cts_cbc_encrypt(req);
-	if (err == -EINPROGRESS || err == -EBUSY)
-		return;
-
-out:
-	skcipher_request_complete(req, err);
-}
-
-static int crypto_cts_encrypt(struct skcipher_request *req)
-{
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
-	struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
-	struct skcipher_request *subreq = &rctx->subreq;
-	int bsize = crypto_skcipher_blocksize(tfm);
-	unsigned int nbytes = req->cryptlen;
-	unsigned int offset;
-
-	skcipher_request_set_tfm(subreq, ctx->child);
+	bsize = crypto_lskcipher_blocksize(child);
 
 	if (nbytes < bsize)
 		return -EINVAL;
 
-	if (nbytes == bsize) {
-		skcipher_request_set_callback(subreq, req->base.flags,
-					      req->base.complete,
-					      req->base.data);
-		skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
-					   req->iv);
-		return crypto_skcipher_encrypt(subreq);
-	}
+	if (nbytes == bsize)
+		len = nbytes;
+	else if ((flags & CRYPTO_LSKCIPHER_FLAG_FINAL))
+		len = rounddown(nbytes - 1, bsize);
+	else
+		len = rounddown(nbytes, bsize);
 
-	offset = rounddown(nbytes - 1, bsize);
-	rctx->offset = offset;
+	nbytes -= len;
 
-	skcipher_request_set_callback(subreq, req->base.flags,
-				      crypto_cts_encrypt_done, req);
-	skcipher_request_set_crypt(subreq, req->src, req->dst,
-				   offset, req->iv);
+	err = crypto_lskcipher_encrypt(child, src, dst, len, iv);
+	if (err)
+		return err;
 
-	return crypto_skcipher_encrypt(subreq) ?:
-	       cts_cbc_encrypt(req);
+	if (!(flags & CRYPTO_LSKCIPHER_FLAG_FINAL) || !nbytes)
+		return nbytes;
+
+	return cts_cbc_encrypt(child, src, dst, len, nbytes, iv);
 }
 
-static int cts_cbc_decrypt(struct skcipher_request *req)
+static int cts_cbc_decrypt(struct crypto_lskcipher *tfm,
+			   const u8 *src, u8 *dst,
+			   unsigned offset, unsigned lastn, u8 *iv)
 {
-	struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	struct skcipher_request *subreq = &rctx->subreq;
-	int bsize = crypto_skcipher_blocksize(tfm);
-	u8 d[MAX_CIPHER_BLOCKSIZE * 2] __aligned(__alignof__(u32));
-	struct scatterlist *sg;
-	unsigned int offset;
-	u8 *space;
-	int lastn;
-
-	offset = rctx->offset;
-	lastn = req->cryptlen - offset;
-
-	sg = scatterwalk_ffwd(rctx->sg, req->dst, offset - bsize);
+	const unsigned bsize = crypto_lskcipher_blocksize(tfm);
+	u8 d[MAX_CIPHER_BLOCKSIZE * 2];
 
 	/* 1. Decrypt Cn-1 (s) to create Dn */
-	scatterwalk_map_and_copy(d + bsize, sg, 0, bsize, 0);
-	space = crypto_cts_reqctx_space(req);
-	crypto_xor(d + bsize, space, bsize);
+	crypto_xor_cpy(d + bsize, dst + offset - bsize, iv, bsize);
 	/* 2. Pad Cn with zeros at the end to create C of length BB */
 	memset(d, 0, bsize);
-	scatterwalk_map_and_copy(d, req->src, offset, lastn, 0);
+	memcpy(d, src + offset, lastn);
 	/* 3. Exclusive-or Dn with C to create Xn */
 	/* 4. Select the first Ln bytes of Xn to create Pn */
 	crypto_xor(d + bsize, d, lastn);
@@ -208,180 +123,88 @@ static int cts_cbc_decrypt(struct skcipher_request *req)
 	memcpy(d + lastn, d + bsize + lastn, bsize - lastn);
 	/* 6. Decrypt En to create Pn-1 */
 
-	scatterwalk_map_and_copy(d, sg, 0, bsize + lastn, 1);
+	memcpy(dst + offset - bsize, d, bsize + lastn);
 	memzero_explicit(d, sizeof(d));
 
-	skcipher_request_set_callback(subreq, req->base.flags &
-					      CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      cts_cbc_crypt_done, req);
-
-	skcipher_request_set_crypt(subreq, sg, sg, bsize, space);
-	return crypto_skcipher_decrypt(subreq);
+	return crypto_lskcipher_decrypt(tfm, dst + offset - bsize,
+					dst + offset - bsize, bsize, iv);
 }
 
-static void crypto_cts_decrypt_done(void *data, int err)
+static int crypto_cts_decrypt(struct crypto_lskcipher *tfm,
+			      const u8 *src, u8 *dst, unsigned nbytes,
+			      u8 *iv, u32 flags)
 {
-	struct skcipher_request *req = data;
 
-	if (err)
-		goto out;
+	struct crypto_cts_ctx *ctx = crypto_lskcipher_ctx(tfm);
+	struct crypto_lskcipher *child = ctx->child;
+	u8 d[MAX_CIPHER_BLOCKSIZE * 2];
+	unsigned bsize;
+	unsigned len;
+	int err;
 
-	err = cts_cbc_decrypt(req);
-	if (err == -EINPROGRESS || err == -EBUSY)
-		return;
-
-out:
-	skcipher_request_complete(req, err);
-}
-
-static int crypto_cts_decrypt(struct skcipher_request *req)
-{
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	struct crypto_cts_reqctx *rctx = skcipher_request_ctx(req);
-	struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
-	struct skcipher_request *subreq = &rctx->subreq;
-	int bsize = crypto_skcipher_blocksize(tfm);
-	unsigned int nbytes = req->cryptlen;
-	unsigned int offset;
-	u8 *space;
-
-	skcipher_request_set_tfm(subreq, ctx->child);
+	bsize = crypto_lskcipher_blocksize(child);
 
 	if (nbytes < bsize)
 		return -EINVAL;
 
-	if (nbytes == bsize) {
-		skcipher_request_set_callback(subreq, req->base.flags,
-					      req->base.complete,
-					      req->base.data);
-		skcipher_request_set_crypt(subreq, req->src, req->dst, nbytes,
-					   req->iv);
-		return crypto_skcipher_decrypt(subreq);
-	}
+	if (nbytes == bsize)
+		len = nbytes;
+	else if ((flags & CRYPTO_LSKCIPHER_FLAG_FINAL)) {
+		len = rounddown(nbytes - 1, bsize);
 
-	skcipher_request_set_callback(subreq, req->base.flags,
-				      crypto_cts_decrypt_done, req);
+		if (len <= bsize)
+			memcpy(d, iv, bsize);
+		else
+			memcpy(d, src + len - 2 * bsize, bsize);
+	} else
+		len = rounddown(nbytes, bsize);
 
-	space = crypto_cts_reqctx_space(req);
+	nbytes -= len;
 
-	offset = rounddown(nbytes - 1, bsize);
-	rctx->offset = offset;
+	err = crypto_lskcipher_decrypt(child, src, dst, len, iv);
+	if (err)
+		return err;
 
-	if (offset <= bsize)
-		memcpy(space, req->iv, bsize);
-	else
-		scatterwalk_map_and_copy(space, req->src, offset - 2 * bsize,
-					 bsize, 0);
+	if (!(flags & CRYPTO_LSKCIPHER_FLAG_FINAL) || !nbytes)
+		return nbytes;
 
-	skcipher_request_set_crypt(subreq, req->src, req->dst,
-				   offset, req->iv);
+	memcpy(iv, d, bsize);
+	memzero_explicit(d, sizeof(d));
 
-	return crypto_skcipher_decrypt(subreq) ?:
-	       cts_cbc_decrypt(req);
-}
-
-static int crypto_cts_init_tfm(struct crypto_skcipher *tfm)
-{
-	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
-	struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
-	struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
-	struct crypto_skcipher *cipher;
-	unsigned reqsize;
-	unsigned bsize;
-	unsigned align;
-
-	cipher = crypto_spawn_skcipher(spawn);
-	if (IS_ERR(cipher))
-		return PTR_ERR(cipher);
-
-	ctx->child = cipher;
-
-	align = crypto_skcipher_alignmask(tfm);
-	bsize = crypto_skcipher_blocksize(cipher);
-	reqsize = ALIGN(sizeof(struct crypto_cts_reqctx) +
-			crypto_skcipher_reqsize(cipher),
-			crypto_tfm_ctx_alignment()) +
-		  (align & ~(crypto_tfm_ctx_alignment() - 1)) + bsize;
-
-	crypto_skcipher_set_reqsize(tfm, reqsize);
-
-	return 0;
-}
-
-static void crypto_cts_exit_tfm(struct crypto_skcipher *tfm)
-{
-	struct crypto_cts_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-	crypto_free_skcipher(ctx->child);
-}
-
-static void crypto_cts_free(struct skcipher_instance *inst)
-{
-	crypto_drop_skcipher(skcipher_instance_ctx(inst));
-	kfree(inst);
+	return cts_cbc_decrypt(child, src, dst, len, nbytes, iv);
 }
 
 static int crypto_cts_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
-	struct crypto_skcipher_spawn *spawn;
-	struct skcipher_alg_common *alg;
-	struct skcipher_instance *inst;
-	u32 mask;
+	struct lskcipher_instance *inst;
+	struct lskcipher_alg *alg;
 	int err;
 
-	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
-	if (err)
-		return err;
+	inst = lskcipher_alloc_instance_simple(tmpl, tb);
+	if (IS_ERR(inst))
+		return PTR_ERR(inst);
 
-	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
-	if (!inst)
-		return -ENOMEM;
-
-	spawn = skcipher_instance_ctx(inst);
-
-	err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst),
-				   crypto_attr_alg_name(tb[1]), 0, mask);
-	if (err)
-		goto err_free_inst;
-
-	alg = crypto_spawn_skcipher_alg_common(spawn);
+	alg = &inst->alg;
 
 	err = -EINVAL;
-	if (alg->ivsize != alg->base.cra_blocksize)
+	if (alg->co.ivsize != alg->co.base.cra_blocksize)
 		goto err_free_inst;
 
-	if (strncmp(alg->base.cra_name, "cbc(", 4))
+	if (strncmp(alg->co.base.cra_name, "cts(cbc(", 8))
 		goto err_free_inst;
 
-	err = crypto_inst_setname(skcipher_crypto_instance(inst), "cts",
-				  &alg->base);
-	if (err)
-		goto err_free_inst;
+	alg->co.base.cra_blocksize = 1;
 
-	inst->alg.base.cra_priority = alg->base.cra_priority;
-	inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
-	inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
+	alg->co.chunksize = alg->co.ivsize;
+	alg->co.tailsize = alg->co.ivsize * 2;
 
-	inst->alg.ivsize = alg->base.cra_blocksize;
-	inst->alg.chunksize = alg->chunksize;
-	inst->alg.min_keysize = alg->min_keysize;
-	inst->alg.max_keysize = alg->max_keysize;
+	alg->encrypt = crypto_cts_encrypt;
+	alg->decrypt = crypto_cts_decrypt;
 
-	inst->alg.base.cra_ctxsize = sizeof(struct crypto_cts_ctx);
-
-	inst->alg.init = crypto_cts_init_tfm;
-	inst->alg.exit = crypto_cts_exit_tfm;
-
-	inst->alg.setkey = crypto_cts_setkey;
-	inst->alg.encrypt = crypto_cts_encrypt;
-	inst->alg.decrypt = crypto_cts_decrypt;
-
-	inst->free = crypto_cts_free;
-
-	err = skcipher_register_instance(tmpl, inst);
+	err = lskcipher_register_instance(tmpl, inst);
 	if (err) {
 err_free_inst:
-		crypto_cts_free(inst);
+		inst->free(inst);
 	}
 	return err;
 }
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 13/15] crypto: cts,xts - Update parameters blocksize/chunksize/tailsize
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (11 preceding siblings ...)
  2023-12-07 10:13 ` [PATCH 12/15] crypto: cts - Convert from skcipher to lskcipher Herbert Xu
@ 2023-12-29 10:47 ` Herbert Xu
  2024-02-14 23:00   ` Eric Biggers
  2023-12-30  7:16 ` [PATCH 14/15] crypto: lskcipher - Export incremental interface internally Herbert Xu
                   ` (2 subsequent siblings)
  15 siblings, 1 reply; 30+ messages in thread
From: Herbert Xu @ 2023-12-29 10:47 UTC (permalink / raw)
  To: Linux Crypto Mailing List

As all implementations need to use the same paramters, change
all implementations of cts and xts to use the correct block, chunk
and tail size.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 arch/arm/crypto/aes-ce-glue.c                 |  8 +++++--
 arch/arm/crypto/aes-neonbs-glue.c             |  4 +++-
 arch/arm64/crypto/aes-glue.c                  |  8 +++++--
 arch/arm64/crypto/aes-neonbs-glue.c           |  4 +++-
 arch/arm64/crypto/sm4-ce-glue.c               |  8 +++++--
 arch/powerpc/crypto/aes-spe-glue.c            |  4 +++-
 arch/powerpc/crypto/aes_xts.c                 |  4 +++-
 arch/s390/crypto/aes_s390.c                   |  4 +++-
 arch/s390/crypto/paes_s390.c                  |  4 +++-
 arch/x86/crypto/aesni-intel_glue.c            |  8 +++++--
 drivers/crypto/atmel-aes.c                    |  4 +++-
 drivers/crypto/axis/artpec6_crypto.c          |  2 ++
 drivers/crypto/bcm/cipher.c                   |  4 +++-
 drivers/crypto/caam/caamalg.c                 |  4 +++-
 drivers/crypto/caam/caamalg_qi.c              |  4 +++-
 drivers/crypto/caam/caamalg_qi2.c             |  4 +++-
 drivers/crypto/cavium/cpt/cptvf_algs.c        |  4 +++-
 .../crypto/cavium/nitrox/nitrox_skcipher.c    |  8 +++++--
 drivers/crypto/ccp/ccp-crypto-aes-xts.c       |  4 +++-
 drivers/crypto/ccree/cc_cipher.c              | 12 ++++++++--
 drivers/crypto/chelsio/chcr_algo.c            |  4 +++-
 drivers/crypto/hisilicon/sec/sec_algs.c       |  4 +++-
 drivers/crypto/hisilicon/sec2/sec_crypto.c    | 23 +++++++++++--------
 .../crypto/inside-secure/safexcel_cipher.c    |  4 +++-
 .../intel/keembay/keembay-ocs-aes-core.c      | 11 ++++++---
 .../crypto/intel/qat/qat_common/qat_algs.c    |  4 +++-
 .../crypto/marvell/octeontx/otx_cptvf_algs.c  |  4 +++-
 .../marvell/octeontx2/otx2_cptvf_algs.c       |  4 +++-
 drivers/crypto/qce/skcipher.c                 |  6 ++++-
 29 files changed, 125 insertions(+), 45 deletions(-)

diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index b668c97663ec..3bfa8accf2c2 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -619,13 +619,15 @@ static struct skcipher_alg aes_algs[] = { {
 	.base.cra_driver_name	= "__cts-cbc-aes-ce",
 	.base.cra_priority	= 300,
 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
-	.base.cra_blocksize	= AES_BLOCK_SIZE,
+	.base.cra_blocksize	= 1,
 	.base.cra_ctxsize	= sizeof(struct crypto_aes_ctx),
 	.base.cra_module	= THIS_MODULE,
 
 	.min_keysize		= AES_MIN_KEY_SIZE,
 	.max_keysize		= AES_MAX_KEY_SIZE,
 	.ivsize			= AES_BLOCK_SIZE,
+	.chunksize		= AES_BLOCK_SIZE,
+	.tailsize		= 2 * AES_BLOCK_SIZE,
 	.walksize		= 2 * AES_BLOCK_SIZE,
 	.setkey			= ce_aes_setkey,
 	.encrypt		= cts_cbc_encrypt,
@@ -666,13 +668,15 @@ static struct skcipher_alg aes_algs[] = { {
 	.base.cra_driver_name	= "__xts-aes-ce",
 	.base.cra_priority	= 300,
 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
-	.base.cra_blocksize	= AES_BLOCK_SIZE,
+	.base.cra_blocksize	= 1,
 	.base.cra_ctxsize	= sizeof(struct crypto_aes_xts_ctx),
 	.base.cra_module	= THIS_MODULE,
 
 	.min_keysize		= 2 * AES_MIN_KEY_SIZE,
 	.max_keysize		= 2 * AES_MAX_KEY_SIZE,
 	.ivsize			= AES_BLOCK_SIZE,
+	.chunksize		= AES_BLOCK_SIZE,
+	.tailsize		= 2 * AES_BLOCK_SIZE,
 	.walksize		= 2 * AES_BLOCK_SIZE,
 	.setkey			= xts_set_key,
 	.encrypt		= xts_encrypt,
diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c
index f00f042ef357..d2a032cbc5ac 100644
--- a/arch/arm/crypto/aes-neonbs-glue.c
+++ b/arch/arm/crypto/aes-neonbs-glue.c
@@ -487,13 +487,15 @@ static struct skcipher_alg aes_algs[] = { {
 	.base.cra_name		= "__xts(aes)",
 	.base.cra_driver_name	= "__xts-aes-neonbs",
 	.base.cra_priority	= 250,
-	.base.cra_blocksize	= AES_BLOCK_SIZE,
+	.base.cra_blocksize	= 1,
 	.base.cra_ctxsize	= sizeof(struct aesbs_xts_ctx),
 	.base.cra_module	= THIS_MODULE,
 	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
 
 	.min_keysize		= 2 * AES_MIN_KEY_SIZE,
 	.max_keysize		= 2 * AES_MAX_KEY_SIZE,
+	.chunksize		= AES_BLOCK_SIZE,
+	.tailsize		= 2 * AES_BLOCK_SIZE,
 	.walksize		= 8 * AES_BLOCK_SIZE,
 	.ivsize			= AES_BLOCK_SIZE,
 	.setkey			= aesbs_xts_setkey,
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index a147e847a5a1..733e40213445 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -750,13 +750,15 @@ static struct skcipher_alg aes_algs[] = { {
 		.cra_name		= "xts(aes)",
 		.cra_driver_name	= "xts-aes-" MODE,
 		.cra_priority		= PRIO,
-		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_blocksize		= 1,
 		.cra_ctxsize		= sizeof(struct crypto_aes_xts_ctx),
 		.cra_module		= THIS_MODULE,
 	},
 	.min_keysize	= 2 * AES_MIN_KEY_SIZE,
 	.max_keysize	= 2 * AES_MAX_KEY_SIZE,
 	.ivsize		= AES_BLOCK_SIZE,
+	.chunksize	= AES_BLOCK_SIZE,
+	.tailsize	= 2 * AES_BLOCK_SIZE,
 	.walksize	= 2 * AES_BLOCK_SIZE,
 	.setkey		= xts_set_key,
 	.encrypt	= xts_encrypt,
@@ -767,13 +769,15 @@ static struct skcipher_alg aes_algs[] = { {
 		.cra_name		= "cts(cbc(aes))",
 		.cra_driver_name	= "cts-cbc-aes-" MODE,
 		.cra_priority		= PRIO,
-		.cra_blocksize		= AES_BLOCK_SIZE,
+		.cra_blocksize		= 1,
 		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
 		.cra_module		= THIS_MODULE,
 	},
 	.min_keysize	= AES_MIN_KEY_SIZE,
 	.max_keysize	= AES_MAX_KEY_SIZE,
 	.ivsize		= AES_BLOCK_SIZE,
+	.chunksize	= AES_BLOCK_SIZE,
+	.tailsize	= 2 * AES_BLOCK_SIZE,
 	.walksize	= 2 * AES_BLOCK_SIZE,
 	.setkey		= skcipher_aes_setkey,
 	.encrypt	= cts_cbc_encrypt,
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index bac4cabef607..f29770c3c063 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -427,12 +427,14 @@ static struct skcipher_alg aes_algs[] = { {
 	.base.cra_name		= "xts(aes)",
 	.base.cra_driver_name	= "xts-aes-neonbs",
 	.base.cra_priority	= 250,
-	.base.cra_blocksize	= AES_BLOCK_SIZE,
+	.base.cra_blocksize	= 1,
 	.base.cra_ctxsize	= sizeof(struct aesbs_xts_ctx),
 	.base.cra_module	= THIS_MODULE,
 
 	.min_keysize		= 2 * AES_MIN_KEY_SIZE,
 	.max_keysize		= 2 * AES_MAX_KEY_SIZE,
+	.chunksize		= AES_BLOCK_SIZE,
+	.tailsize		= 2 * AES_BLOCK_SIZE,
 	.walksize		= 8 * AES_BLOCK_SIZE,
 	.ivsize			= AES_BLOCK_SIZE,
 	.setkey			= aesbs_xts_setkey,
diff --git a/arch/arm64/crypto/sm4-ce-glue.c b/arch/arm64/crypto/sm4-ce-glue.c
index 43741bed874e..650049d51d99 100644
--- a/arch/arm64/crypto/sm4-ce-glue.c
+++ b/arch/arm64/crypto/sm4-ce-glue.c
@@ -474,13 +474,15 @@ static struct skcipher_alg sm4_algs[] = {
 			.cra_name		= "cts(cbc(sm4))",
 			.cra_driver_name	= "cts-cbc-sm4-ce",
 			.cra_priority		= 400,
-			.cra_blocksize		= SM4_BLOCK_SIZE,
+			.cra_blocksize		= 1,
 			.cra_ctxsize		= sizeof(struct sm4_ctx),
 			.cra_module		= THIS_MODULE,
 		},
 		.min_keysize	= SM4_KEY_SIZE,
 		.max_keysize	= SM4_KEY_SIZE,
 		.ivsize		= SM4_BLOCK_SIZE,
+		.chunksize	= SM4_BLOCK_SIZE,
+		.tailsize	= SM4_BLOCK_SIZE * 2,
 		.walksize	= SM4_BLOCK_SIZE * 2,
 		.setkey		= sm4_setkey,
 		.encrypt	= sm4_cbc_cts_encrypt,
@@ -490,13 +492,15 @@ static struct skcipher_alg sm4_algs[] = {
 			.cra_name		= "xts(sm4)",
 			.cra_driver_name	= "xts-sm4-ce",
 			.cra_priority		= 400,
-			.cra_blocksize		= SM4_BLOCK_SIZE,
+			.cra_blocksize		= 1,
 			.cra_ctxsize		= sizeof(struct sm4_xts_ctx),
 			.cra_module		= THIS_MODULE,
 		},
 		.min_keysize	= SM4_KEY_SIZE * 2,
 		.max_keysize	= SM4_KEY_SIZE * 2,
 		.ivsize		= SM4_BLOCK_SIZE,
+		.chunksize	= SM4_BLOCK_SIZE,
+		.tailsize	= SM4_BLOCK_SIZE * 2,
 		.walksize	= SM4_BLOCK_SIZE * 2,
 		.setkey		= sm4_xts_setkey,
 		.encrypt	= sm4_xts_encrypt,
diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c
index efab78a3a8f6..b0c6cb44da94 100644
--- a/arch/powerpc/crypto/aes-spe-glue.c
+++ b/arch/powerpc/crypto/aes-spe-glue.c
@@ -474,12 +474,14 @@ static struct skcipher_alg aes_skcipher_algs[] = {
 		.base.cra_name		=	"xts(aes)",
 		.base.cra_driver_name	=	"xts-ppc-spe",
 		.base.cra_priority	=	300,
-		.base.cra_blocksize	=	AES_BLOCK_SIZE,
+		.base.cra_blocksize	=	1,
 		.base.cra_ctxsize	=	sizeof(struct ppc_xts_ctx),
 		.base.cra_module	=	THIS_MODULE,
 		.min_keysize		=	AES_MIN_KEY_SIZE * 2,
 		.max_keysize		=	AES_MAX_KEY_SIZE * 2,
 		.ivsize			=	AES_BLOCK_SIZE,
+		.chunksize		=	AES_BLOCK_SIZE,
+		.tailsize		=	AES_BLOCK_SIZE * 2,
 		.setkey			=	ppc_xts_setkey,
 		.encrypt		=	ppc_xts_encrypt,
 		.decrypt		=	ppc_xts_decrypt,
diff --git a/arch/powerpc/crypto/aes_xts.c b/arch/powerpc/crypto/aes_xts.c
index dabbccb41550..44828127156f 100644
--- a/arch/powerpc/crypto/aes_xts.c
+++ b/arch/powerpc/crypto/aes_xts.c
@@ -149,7 +149,7 @@ struct skcipher_alg p8_aes_xts_alg = {
 	.base.cra_module = THIS_MODULE,
 	.base.cra_priority = 2000,
 	.base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
-	.base.cra_blocksize = AES_BLOCK_SIZE,
+	.base.cra_blocksize = 1,
 	.base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx),
 	.setkey = p8_aes_xts_setkey,
 	.encrypt = p8_aes_xts_encrypt,
@@ -159,4 +159,6 @@ struct skcipher_alg p8_aes_xts_alg = {
 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
 	.ivsize = AES_BLOCK_SIZE,
+	.chunksize = AES_BLOCK_SIZE,
+	.tailsize = 2 * AES_BLOCK_SIZE,
 };
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index c6fe5405de4a..774c8f4e7a89 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -513,7 +513,7 @@ static struct skcipher_alg xts_aes_alg = {
 	.base.cra_driver_name	=	"xts-aes-s390",
 	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
-	.base.cra_blocksize	=	AES_BLOCK_SIZE,
+	.base.cra_blocksize	=	1,
 	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
 	.base.cra_module	=	THIS_MODULE,
 	.init			=	xts_fallback_init,
@@ -521,6 +521,8 @@ static struct skcipher_alg xts_aes_alg = {
 	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
 	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
 	.ivsize			=	AES_BLOCK_SIZE,
+	.chunksize		=	AES_BLOCK_SIZE,
+	.tailsize		=	2 * AES_BLOCK_SIZE,
 	.setkey			=	xts_aes_set_key,
 	.encrypt		=	xts_aes_encrypt,
 	.decrypt		=	xts_aes_decrypt,
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 55ee5567a5ea..0bc03c999a6d 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -559,7 +559,7 @@ static struct skcipher_alg xts_paes_alg = {
 	.base.cra_name		=	"xts(paes)",
 	.base.cra_driver_name	=	"xts-paes-s390",
 	.base.cra_priority	=	402,	/* ecb-paes-s390 + 1 */
-	.base.cra_blocksize	=	AES_BLOCK_SIZE,
+	.base.cra_blocksize	=	1,
 	.base.cra_ctxsize	=	sizeof(struct s390_pxts_ctx),
 	.base.cra_module	=	THIS_MODULE,
 	.base.cra_list		=	LIST_HEAD_INIT(xts_paes_alg.base.cra_list),
@@ -568,6 +568,8 @@ static struct skcipher_alg xts_paes_alg = {
 	.min_keysize		=	2 * PAES_MIN_KEYSIZE,
 	.max_keysize		=	2 * PAES_MAX_KEYSIZE,
 	.ivsize			=	AES_BLOCK_SIZE,
+	.chunksize		=	AES_BLOCK_SIZE,
+	.tailsize		=	2 * AES_BLOCK_SIZE,
 	.setkey			=	xts_paes_set_key,
 	.encrypt		=	xts_paes_encrypt,
 	.decrypt		=	xts_paes_decrypt,
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index b1d90c25975a..a40f9a9c3978 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1059,13 +1059,15 @@ static struct skcipher_alg aesni_skciphers[] = {
 			.cra_driver_name	= "__cts-cbc-aes-aesni",
 			.cra_priority		= 400,
 			.cra_flags		= CRYPTO_ALG_INTERNAL,
-			.cra_blocksize		= AES_BLOCK_SIZE,
+			.cra_blocksize		= 1,
 			.cra_ctxsize		= CRYPTO_AES_CTX_SIZE,
 			.cra_module		= THIS_MODULE,
 		},
 		.min_keysize	= AES_MIN_KEY_SIZE,
 		.max_keysize	= AES_MAX_KEY_SIZE,
 		.ivsize		= AES_BLOCK_SIZE,
+		.chunksize	= AES_BLOCK_SIZE,
+		.tailsize	= 2 * AES_BLOCK_SIZE,
 		.walksize	= 2 * AES_BLOCK_SIZE,
 		.setkey		= aesni_skcipher_setkey,
 		.encrypt	= cts_cbc_encrypt,
@@ -1095,13 +1097,15 @@ static struct skcipher_alg aesni_skciphers[] = {
 			.cra_driver_name	= "__xts-aes-aesni",
 			.cra_priority		= 401,
 			.cra_flags		= CRYPTO_ALG_INTERNAL,
-			.cra_blocksize		= AES_BLOCK_SIZE,
+			.cra_blocksize		= 1,
 			.cra_ctxsize		= XTS_AES_CTX_SIZE,
 			.cra_module		= THIS_MODULE,
 		},
 		.min_keysize	= 2 * AES_MIN_KEY_SIZE,
 		.max_keysize	= 2 * AES_MAX_KEY_SIZE,
 		.ivsize		= AES_BLOCK_SIZE,
+		.chunksize	= AES_BLOCK_SIZE,
+		.tailsize	= 2 * AES_BLOCK_SIZE,
 		.walksize	= 2 * AES_BLOCK_SIZE,
 		.setkey		= xts_aesni_setkey,
 		.encrypt	= xts_encrypt,
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 8bd64fc37e75..4820f1c7fe09 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1741,13 +1741,15 @@ static void atmel_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
 static struct skcipher_alg aes_xts_alg = {
 	.base.cra_name		= "xts(aes)",
 	.base.cra_driver_name	= "atmel-xts-aes",
-	.base.cra_blocksize	= AES_BLOCK_SIZE,
+	.base.cra_blocksize	= 1,
 	.base.cra_ctxsize	= sizeof(struct atmel_aes_xts_ctx),
 	.base.cra_flags		= CRYPTO_ALG_NEED_FALLBACK,
 
 	.min_keysize		= 2 * AES_MIN_KEY_SIZE,
 	.max_keysize		= 2 * AES_MAX_KEY_SIZE,
 	.ivsize			= AES_BLOCK_SIZE,
+	.chunksize		= AES_BLOCK_SIZE,
+	.tailsize		= 2 * AES_BLOCK_SIZE,
 	.setkey			= atmel_aes_xts_setkey,
 	.encrypt		= atmel_aes_xts_encrypt,
 	.decrypt		= atmel_aes_xts_decrypt,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index dbc1d483f2af..f2f19467d8e1 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2777,6 +2777,8 @@ static struct skcipher_alg crypto_algos[] = {
 		.min_keysize = 2*AES_MIN_KEY_SIZE,
 		.max_keysize = 2*AES_MAX_KEY_SIZE,
 		.ivsize = 16,
+		.chunksize = 16,
+		.tailsize = 32,
 		.setkey = artpec6_crypto_xts_set_key,
 		.encrypt = artpec6_crypto_encrypt,
 		.decrypt = artpec6_crypto_decrypt,
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 1a3ecd44cbaf..06b2a5cc9084 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3652,10 +3652,12 @@ static struct iproc_alg_s driver_algs[] = {
 	 .alg.skcipher = {
 			.base.cra_name = "xts(aes)",
 			.base.cra_driver_name = "xts-aes-iproc",
-			.base.cra_blocksize = AES_BLOCK_SIZE,
+			.base.cra_blocksize = 1,
 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
+			.chunksize = AES_BLOCK_SIZE,
+			.tailsize = 2 * AES_BLOCK_SIZE,
 			},
 	 .cipher_info = {
 			 .alg = CIPHER_ALG_AES,
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 066f08a3a040..b62aea2aa65e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1995,7 +1995,7 @@ static struct caam_skcipher_alg driver_algs[] = {
 				.cra_name = "xts(aes)",
 				.cra_driver_name = "xts-aes-caam",
 				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
-				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_blocksize = 1,
 			},
 			.setkey = xts_skcipher_setkey,
 			.encrypt = skcipher_encrypt,
@@ -2003,6 +2003,8 @@ static struct caam_skcipher_alg driver_algs[] = {
 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
+			.chunksize = AES_BLOCK_SIZE,
+			.tailsize = 2 * AES_BLOCK_SIZE,
 		},
 		.skcipher.op = {
 			.do_one_request = skcipher_do_one_req,
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 743ce50c14f2..7658ffc70e1d 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -1574,7 +1574,7 @@ static struct caam_skcipher_alg driver_algs[] = {
 				.cra_name = "xts(aes)",
 				.cra_driver_name = "xts-aes-caam-qi",
 				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
-				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_blocksize = 1,
 			},
 			.setkey = xts_skcipher_setkey,
 			.encrypt = skcipher_encrypt,
@@ -1582,6 +1582,8 @@ static struct caam_skcipher_alg driver_algs[] = {
 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
+			.chunksize = AES_BLOCK_SIZE,
+			.tailsize = 2 * AES_BLOCK_SIZE,
 		},
 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
 	},
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index a4f6884416a0..b724671c04be 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -1767,7 +1767,7 @@ static struct caam_skcipher_alg driver_algs[] = {
 				.cra_name = "xts(aes)",
 				.cra_driver_name = "xts-aes-caam-qi2",
 				.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
-				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_blocksize = 1,
 			},
 			.setkey = xts_skcipher_setkey,
 			.encrypt = skcipher_encrypt,
@@ -1775,6 +1775,8 @@ static struct caam_skcipher_alg driver_algs[] = {
 			.min_keysize = 2 * AES_MIN_KEY_SIZE,
 			.max_keysize = 2 * AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
+			.chunksize = AES_BLOCK_SIZE,
+			.tailsize = 2 * AES_BLOCK_SIZE,
 		},
 		.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
 	},
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 219fe9be7606..30f8f1dec5cd 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -335,7 +335,7 @@ static int cvm_enc_dec_init(struct crypto_skcipher *tfm)
 static struct skcipher_alg algs[] = { {
 	.base.cra_flags		= CRYPTO_ALG_ASYNC |
 				  CRYPTO_ALG_ALLOCATES_MEMORY,
-	.base.cra_blocksize	= AES_BLOCK_SIZE,
+	.base.cra_blocksize	= 1,
 	.base.cra_ctxsize	= sizeof(struct cvm_enc_ctx),
 	.base.cra_alignmask	= 7,
 	.base.cra_priority	= 4001,
@@ -344,6 +344,8 @@ static struct skcipher_alg algs[] = { {
 	.base.cra_module	= THIS_MODULE,
 
 	.ivsize			= AES_BLOCK_SIZE,
+	.chunksize		= AES_BLOCK_SIZE,
+	.tailsize		= 2 * AES_BLOCK_SIZE,
 	.min_keysize		= 2 * AES_MIN_KEY_SIZE,
 	.max_keysize		= 2 * AES_MAX_KEY_SIZE,
 	.setkey			= cvm_xts_setkey,
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 6e5e667bab75..2f2c9a1170a0 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -425,7 +425,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
 		.cra_driver_name = "n5_xts(aes)",
 		.cra_priority = PRIO,
 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_blocksize = 1,
 		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
 		.cra_alignmask = 0,
 		.cra_module = THIS_MODULE,
@@ -433,6 +433,8 @@ static struct skcipher_alg nitrox_skciphers[] = { {
 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
 	.ivsize = AES_BLOCK_SIZE,
+	.chunksize = AES_BLOCK_SIZE,
+	.tailsize = 2 * AES_BLOCK_SIZE,
 	.setkey = nitrox_aes_xts_setkey,
 	.encrypt = nitrox_aes_encrypt,
 	.decrypt = nitrox_aes_decrypt,
@@ -463,7 +465,7 @@ static struct skcipher_alg nitrox_skciphers[] = { {
 		.cra_driver_name = "n5_cts(cbc(aes))",
 		.cra_priority = PRIO,
 		.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_blocksize = 1,
 		.cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
 		.cra_alignmask = 0,
 		.cra_module = THIS_MODULE,
@@ -471,6 +473,8 @@ static struct skcipher_alg nitrox_skciphers[] = { {
 	.min_keysize = AES_MIN_KEY_SIZE,
 	.max_keysize = AES_MAX_KEY_SIZE,
 	.ivsize = AES_BLOCK_SIZE,
+	.chunksize = AES_BLOCK_SIZE,
+	.tailsize = 2 * AES_BLOCK_SIZE,
 	.setkey = nitrox_aes_setkey,
 	.encrypt = nitrox_aes_encrypt,
 	.decrypt = nitrox_aes_decrypt,
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 93f735d6b02b..247025918861 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -246,7 +246,7 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
 				  CRYPTO_ALG_ALLOCATES_MEMORY |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
 				  CRYPTO_ALG_NEED_FALLBACK;
-	alg->base.cra_blocksize	= AES_BLOCK_SIZE;
+	alg->base.cra_blocksize	= 1;
 	alg->base.cra_ctxsize	= sizeof(struct ccp_ctx) +
 				  crypto_dma_padding();
 	alg->base.cra_priority	= CCP_CRA_PRIORITY;
@@ -258,6 +258,8 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
 	alg->min_keysize	= AES_MIN_KEY_SIZE * 2;
 	alg->max_keysize	= AES_MAX_KEY_SIZE * 2;
 	alg->ivsize		= AES_BLOCK_SIZE;
+	alg->chunksize		= AES_BLOCK_SIZE;
+	alg->tailsize		= 2 * AES_BLOCK_SIZE;
 	alg->init		= ccp_aes_xts_init_tfm;
 	alg->exit		= ccp_aes_xts_exit_tfm;
 
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index cd66a580e8b6..18ea3e90d039 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1018,6 +1018,8 @@ static const struct cc_alg_template skcipher_algs[] = {
 			.min_keysize = CC_HW_KEY_SIZE,
 			.max_keysize = CC_HW_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
+			.chunksize = AES_BLOCK_SIZE,
+			.tailsize = 2 * AES_BLOCK_SIZE,
 			},
 		.cipher_mode = DRV_CIPHER_XTS,
 		.flow_mode = S_DIN_to_AES,
@@ -1082,7 +1084,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 	{
 		.name = "cts(cbc(paes))",
 		.driver_name = "cts-cbc-paes-ccree",
-		.blocksize = AES_BLOCK_SIZE,
+		.blocksize = 1,
 		.template_skcipher = {
 			.setkey = cc_cipher_sethkey,
 			.encrypt = cc_cipher_encrypt,
@@ -1090,6 +1092,8 @@ static const struct cc_alg_template skcipher_algs[] = {
 			.min_keysize = CC_HW_KEY_SIZE,
 			.max_keysize = CC_HW_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
+			.chunksize = AES_BLOCK_SIZE,
+			.tailsize = 2 * AES_BLOCK_SIZE,
 			},
 		.cipher_mode = DRV_CIPHER_CBC_CTS,
 		.flow_mode = S_DIN_to_AES,
@@ -1130,6 +1134,8 @@ static const struct cc_alg_template skcipher_algs[] = {
 			.min_keysize = AES_MIN_KEY_SIZE * 2,
 			.max_keysize = AES_MAX_KEY_SIZE * 2,
 			.ivsize = AES_BLOCK_SIZE,
+			.chunksize = AES_BLOCK_SIZE,
+			.tailsize = 2 * AES_BLOCK_SIZE,
 			},
 		.cipher_mode = DRV_CIPHER_XTS,
 		.flow_mode = S_DIN_to_AES,
@@ -1190,7 +1196,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 	{
 		.name = "cts(cbc(aes))",
 		.driver_name = "cts-cbc-aes-ccree",
-		.blocksize = AES_BLOCK_SIZE,
+		.blocksize = 1,
 		.template_skcipher = {
 			.setkey = cc_cipher_setkey,
 			.encrypt = cc_cipher_encrypt,
@@ -1198,6 +1204,8 @@ static const struct cc_alg_template skcipher_algs[] = {
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
+			.chunksize = AES_BLOCK_SIZE,
+			.tailsize = 2 * AES_BLOCK_SIZE,
 			},
 		.cipher_mode = DRV_CIPHER_CBC_CTS,
 		.flow_mode = S_DIN_to_AES,
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 177428480c7d..cdad84a26a58 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -3882,13 +3882,15 @@ static struct chcr_alg_template driver_algs[] = {
 		.alg.skcipher = {
 			.base.cra_name		= "xts(aes)",
 			.base.cra_driver_name	= "xts-aes-chcr",
-			.base.cra_blocksize	= AES_BLOCK_SIZE,
+			.base.cra_blocksize	= 1,
 
 			.init			= chcr_init_tfm,
 			.exit			= chcr_exit_tfm,
 			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
 			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
 			.ivsize			= AES_BLOCK_SIZE,
+			.chunksize		= AES_BLOCK_SIZE,
+			.tailsize		= 2 * AES_BLOCK_SIZE,
 			.setkey			= chcr_aes_xts_setkey,
 			.encrypt		= chcr_aes_encrypt,
 			.decrypt		= chcr_aes_decrypt,
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 1189effcdad0..7dcbeb824d23 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -996,7 +996,7 @@ static struct skcipher_alg sec_algs[] = {
 			.cra_priority = 4001,
 			.cra_flags = CRYPTO_ALG_ASYNC |
 				     CRYPTO_ALG_ALLOCATES_MEMORY,
-			.cra_blocksize = AES_BLOCK_SIZE,
+			.cra_blocksize = 1,
 			.cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
 			.cra_alignmask = 0,
 			.cra_module = THIS_MODULE,
@@ -1009,6 +1009,8 @@ static struct skcipher_alg sec_algs[] = {
 		.min_keysize = 2 * AES_MIN_KEY_SIZE,
 		.max_keysize = 2 * AES_MAX_KEY_SIZE,
 		.ivsize = AES_BLOCK_SIZE,
+		.chunksize = AES_BLOCK_SIZE,
+		.tailsize = 2 * AES_BLOCK_SIZE,
 	}, {
 	/* Unable to find any test vectors so untested */
 		.base = {
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 93a972fcbf63..2fe673da569e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -2142,7 +2142,8 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
 }
 
 #define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \
-	sec_min_key_size, sec_max_key_size, blk_size, iv_size)\
+	sec_min_key_size, sec_max_key_size, blk_size, iv_size, \
+	chunk_size, tail_size) \
 {\
 	.base = {\
 		.cra_name = sec_cra_name,\
@@ -2162,54 +2163,56 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
 	.min_keysize = sec_min_key_size,\
 	.max_keysize = sec_max_key_size,\
 	.ivsize = iv_size,\
+	.chunksize = chunk_size,\
+	.tailsize = tail_size,\
 }
 
 static struct sec_skcipher sec_skciphers[] = {
 	{
 		.alg_msk = BIT(0),
 		.alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
-					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
+					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0, 0, 0),
 	},
 	{
 		.alg_msk = BIT(1),
 		.alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
-					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+					AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE, 0, 0),
 	},
 	{
 		.alg_msk = BIT(2),
 		.alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr,	AES_MIN_KEY_SIZE,
-					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+					AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, 0, 0),
 	},
 	{
 		.alg_msk = BIT(3),
 		.alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,	SEC_XTS_MIN_KEY_SIZE,
-					SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+					SEC_XTS_MAX_KEY_SIZE, 1, AES_BLOCK_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE * 2),
 	},
 	{
 		.alg_msk = BIT(12),
 		.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,	AES_MIN_KEY_SIZE,
-					AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+					AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE, 0, 0),
 	},
 	{
 		.alg_msk = BIT(13),
 		.alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
-					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
+					AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, 0, 0),
 	},
 	{
 		.alg_msk = BIT(14),
 		.alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,	SEC_XTS_MIN_KEY_SIZE,
-					SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
+					SEC_XTS_MIN_KEY_SIZE, 1, AES_BLOCK_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE * 2),
 	},
 	{
 		.alg_msk = BIT(23),
 		.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
-					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
+					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0, 0, 0),
 	},
 	{
 		.alg_msk = BIT(24),
 		.alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
 					SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
-					DES3_EDE_BLOCK_SIZE),
+					DES3_EDE_BLOCK_SIZE, 0, 0),
 	},
 };
 
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 42677f7458b7..a7e8f99924c4 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -2484,6 +2484,8 @@ struct safexcel_alg_template safexcel_alg_xts_aes = {
 		.min_keysize = AES_MIN_KEY_SIZE * 2,
 		.max_keysize = AES_MAX_KEY_SIZE * 2,
 		.ivsize = XTS_BLOCK_SIZE,
+		.chunksize = XTS_BLOCK_SIZE,
+		.tailsize = XTS_BLOCK_SIZE * 2,
 		.base = {
 			.cra_name = "xts(aes)",
 			.cra_driver_name = "safexcel-xts-aes",
@@ -2491,7 +2493,7 @@ struct safexcel_alg_template safexcel_alg_xts_aes = {
 			.cra_flags = CRYPTO_ALG_ASYNC |
 				     CRYPTO_ALG_ALLOCATES_MEMORY |
 				     CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = XTS_BLOCK_SIZE,
+			.cra_blocksize = 1,
 			.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
 			.cra_alignmask = 0,
 			.cra_init = safexcel_skcipher_aes_xts_cra_init,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
index 9b2d098e5eb2..6cde89563e1d 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
@@ -11,6 +11,7 @@
 #include <crypto/internal/aead.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
+#include <crypto/sm4.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/dma-mapping.h>
@@ -1331,7 +1332,7 @@ static struct skcipher_engine_alg algs[] = {
 		.base.base.cra_flags = CRYPTO_ALG_ASYNC |
 				       CRYPTO_ALG_KERN_DRIVER_ONLY |
 				       CRYPTO_ALG_NEED_FALLBACK,
-		.base.base.cra_blocksize = AES_BLOCK_SIZE,
+		.base.base.cra_blocksize = 1,
 		.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
 		.base.base.cra_module = THIS_MODULE,
 		.base.base.cra_alignmask = 0,
@@ -1339,6 +1340,8 @@ static struct skcipher_engine_alg algs[] = {
 		.base.min_keysize = OCS_AES_MIN_KEY_SIZE,
 		.base.max_keysize = OCS_AES_MAX_KEY_SIZE,
 		.base.ivsize = AES_BLOCK_SIZE,
+		.base.chunksize = AES_BLOCK_SIZE,
+		.base.tailsize = 2 * AES_BLOCK_SIZE,
 		.base.setkey = kmb_ocs_aes_set_key,
 		.base.encrypt = kmb_ocs_aes_cts_encrypt,
 		.base.decrypt = kmb_ocs_aes_cts_decrypt,
@@ -1418,14 +1421,16 @@ static struct skcipher_engine_alg algs[] = {
 		.base.base.cra_priority = KMB_OCS_PRIORITY,
 		.base.base.cra_flags = CRYPTO_ALG_ASYNC |
 				       CRYPTO_ALG_KERN_DRIVER_ONLY,
-		.base.base.cra_blocksize = AES_BLOCK_SIZE,
+		.base.base.cra_blocksize = 1,
 		.base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
 		.base.base.cra_module = THIS_MODULE,
 		.base.base.cra_alignmask = 0,
 
 		.base.min_keysize = OCS_SM4_KEY_SIZE,
 		.base.max_keysize = OCS_SM4_KEY_SIZE,
-		.base.ivsize = AES_BLOCK_SIZE,
+		.base.ivsize = SM4_BLOCK_SIZE,
+		.base.chunksize = SM4_BLOCK_SIZE,
+		.base.tailsize = 2 * SM4_BLOCK_SIZE,
 		.base.setkey = kmb_ocs_sm4_set_key,
 		.base.encrypt = kmb_ocs_sm4_cts_encrypt,
 		.base.decrypt = kmb_ocs_sm4_cts_decrypt,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 3c4bba4a8779..945f245f7640 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -1368,7 +1368,7 @@ static struct skcipher_alg qat_skciphers[] = { {
 	.base.cra_priority = 4001,
 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
 			  CRYPTO_ALG_ALLOCATES_MEMORY,
-	.base.cra_blocksize = AES_BLOCK_SIZE,
+	.base.cra_blocksize = 1,
 	.base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
 	.base.cra_alignmask = 0,
 	.base.cra_module = THIS_MODULE,
@@ -1381,6 +1381,8 @@ static struct skcipher_alg qat_skciphers[] = { {
 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
 	.ivsize = AES_BLOCK_SIZE,
+	.chunksize = AES_BLOCK_SIZE,
+	.tailsize = 2 * AES_BLOCK_SIZE,
 } };
 
 int qat_algs_register(void)
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 3c5d577d8f0d..67e90b79e0ad 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -1298,7 +1298,7 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
 	.base.cra_name = "xts(aes)",
 	.base.cra_driver_name = "cpt_xts_aes",
 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
-	.base.cra_blocksize = AES_BLOCK_SIZE,
+	.base.cra_blocksize = 1,
 	.base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
 	.base.cra_alignmask = 7,
 	.base.cra_priority = 4001,
@@ -1306,6 +1306,8 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
 
 	.init = otx_cpt_enc_dec_init,
 	.ivsize = AES_BLOCK_SIZE,
+	.chunksize = AES_BLOCK_SIZE,
+	.tailsize = 2 * AES_BLOCK_SIZE,
 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
 	.setkey = otx_cpt_skcipher_xts_setkey,
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 1604fc58dc13..13b9662c5f85 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -1396,7 +1396,7 @@ static struct skcipher_alg otx2_cpt_skciphers[] = { {
 	.base.cra_name = "xts(aes)",
 	.base.cra_driver_name = "cpt_xts_aes",
 	.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
-	.base.cra_blocksize = AES_BLOCK_SIZE,
+	.base.cra_blocksize = 1,
 	.base.cra_ctxsize = sizeof(struct otx2_cpt_enc_ctx),
 	.base.cra_alignmask = 7,
 	.base.cra_priority = 4001,
@@ -1405,6 +1405,8 @@ static struct skcipher_alg otx2_cpt_skciphers[] = { {
 	.init = otx2_cpt_enc_dec_init,
 	.exit = otx2_cpt_skcipher_exit,
 	.ivsize = AES_BLOCK_SIZE,
+	.chunksize = AES_BLOCK_SIZE,
+	.tailsize = 2 * AES_BLOCK_SIZE,
 	.min_keysize = 2 * AES_MIN_KEY_SIZE,
 	.max_keysize = 2 * AES_MAX_KEY_SIZE,
 	.setkey = otx2_cpt_skcipher_xts_setkey,
diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c
index 5b493fdc1e74..015a02ccdb7b 100644
--- a/drivers/crypto/qce/skcipher.c
+++ b/drivers/crypto/qce/skcipher.c
@@ -353,6 +353,7 @@ struct qce_skcipher_def {
 	unsigned int blocksize;
 	unsigned int chunksize;
 	unsigned int ivsize;
+	unsigned int tailsize;
 	unsigned int min_keysize;
 	unsigned int max_keysize;
 };
@@ -390,8 +391,10 @@ static const struct qce_skcipher_def skcipher_def[] = {
 		.flags		= QCE_ALG_AES | QCE_MODE_XTS,
 		.name		= "xts(aes)",
 		.drv_name	= "xts-aes-qce",
-		.blocksize	= AES_BLOCK_SIZE,
+		.blocksize	= 1,
 		.ivsize		= AES_BLOCK_SIZE,
+		.chunksize	= AES_BLOCK_SIZE,
+		.tailsize	= AES_BLOCK_SIZE * 2,
 		.min_keysize	= AES_MIN_KEY_SIZE * 2,
 		.max_keysize	= AES_MAX_KEY_SIZE * 2,
 	},
@@ -453,6 +456,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
 	alg->base.cra_blocksize		= def->blocksize;
 	alg->chunksize			= def->chunksize;
 	alg->ivsize			= def->ivsize;
+	alg->tailsize			= def->tailsize;
 	alg->min_keysize		= def->min_keysize;
 	alg->max_keysize		= def->max_keysize;
 	alg->setkey			= IS_3DES(def->flags) ? qce_des3_setkey :
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 14/15] crypto: lskcipher - Export incremental interface internally
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (12 preceding siblings ...)
  2023-12-29 10:47 ` [PATCH 13/15] crypto: cts,xts - Update parameters blocksize/chunksize/tailsize Herbert Xu
@ 2023-12-30  7:16 ` Herbert Xu
  2024-02-13  8:48 ` [PATCH 15/15] crypto: adiantum - Convert from skcipher to lskcipher Herbert Xu
  2024-02-14 23:35 ` [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Eric Biggers
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2023-12-30  7:16 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Export the incremental interface internally so that composite
algorithms such as adiantum can use it.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/lskcipher.c                 | 45 +++++++++++++++++++++---------
 include/crypto/internal/skcipher.h | 42 ++++++++++++++++++++++++++++
 2 files changed, 74 insertions(+), 13 deletions(-)

diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
index 00ea963a2d2d..e8b97e4fd579 100644
--- a/crypto/lskcipher.c
+++ b/crypto/lskcipher.c
@@ -87,8 +87,9 @@ EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
 
 static int crypto_lskcipher_crypt_unaligned(
 	struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
-	u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
-			     u8 *dst, unsigned len, u8 *iv, u32 flags))
+	u8 *iv, u32 flags,
+	int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
+		     u8 *dst, unsigned len, u8 *iv, u32 flags))
 {
 	unsigned statesize = crypto_lskcipher_statesize(tfm);
 	unsigned ivsize = crypto_lskcipher_ivsize(tfm);
@@ -120,7 +121,7 @@ static int crypto_lskcipher_crypt_unaligned(
 			chunk &= ~(cs - 1);
 
 		memcpy(p, src, chunk);
-		err = crypt(tfm, p, p, chunk, tiv, CRYPTO_LSKCIPHER_FLAG_FINAL);
+		err = crypt(tfm, p, p, chunk, tiv, flags);
 		if (err)
 			goto out;
 
@@ -140,7 +141,7 @@ static int crypto_lskcipher_crypt_unaligned(
 }
 
 static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
-				  u8 *dst, unsigned len, u8 *iv,
+				  u8 *dst, unsigned len, u8 *iv, u32 flags,
 				  int (*crypt)(struct crypto_lskcipher *tfm,
 					       const u8 *src, u8 *dst,
 					       unsigned len, u8 *iv,
@@ -153,18 +154,18 @@ static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
 	if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
 	    alignmask) {
 		ret = crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
-						       crypt);
+						       flags, crypt);
 		goto out;
 	}
 
-	ret = crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
+	ret = crypt(tfm, src, dst, len, iv, flags);
 
 out:
 	return crypto_lskcipher_errstat(alg, ret);
 }
 
-int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
-			     u8 *dst, unsigned len, u8 *iv)
+int crypto_lskcipher_encrypt_ext(struct crypto_lskcipher *tfm, const u8 *src,
+				 u8 *dst, unsigned len, u8 *iv, u32 flags)
 {
 	struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
 
@@ -175,12 +176,13 @@ int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
 		atomic64_add(len, &istat->encrypt_tlen);
 	}
 
-	return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
+	return crypto_lskcipher_crypt(tfm, src, dst, len, iv, flags,
+				      alg->encrypt);
 }
-EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
+EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt_ext);
 
-int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
-			     u8 *dst, unsigned len, u8 *iv)
+int crypto_lskcipher_decrypt_ext(struct crypto_lskcipher *tfm, const u8 *src,
+				 u8 *dst, unsigned len, u8 *iv, u32 flags)
 {
 	struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
 
@@ -191,7 +193,24 @@ int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
 		atomic64_add(len, &istat->decrypt_tlen);
 	}
 
-	return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
+	return crypto_lskcipher_crypt(tfm, src, dst, len, iv, flags,
+				      alg->decrypt);
+}
+EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt_ext);
+
+int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+			     u8 *dst, unsigned len, u8 *iv)
+{
+	return crypto_lskcipher_encrypt_ext(tfm, src, dst, len, iv,
+					    CRYPTO_LSKCIPHER_FLAG_FINAL);
+}
+EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
+
+int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+			     u8 *dst, unsigned len, u8 *iv)
+{
+	return crypto_lskcipher_decrypt_ext(tfm, src, dst, len, iv,
+					    CRYPTO_LSKCIPHER_FLAG_FINAL);
 }
 EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
 
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 1e35e7719b22..0d43153f3cd2 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -90,6 +90,48 @@ struct skcipher_walk {
 	unsigned int alignmask;
 };
 
+/**
+ * crypto_lskcipher_encrypt_ext() - encrypt plaintext with continuation
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation.  The length of the IV must
+ *	 comply with the IV size defined by crypto_lskcipher_ivsize.  The
+ *	 IV is then followed with a buffer with the length as specified by
+ *	 crypto_lskcipher_statesize.
+ * @flags: Indicates whether this is a continuation and/or final operation.
+ *
+ * Encrypt plaintext data using the lskcipher handle with continuation.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ *	   then this many bytes have been left unprocessed;
+ *	   < 0 if an error occurred
+ */
+int crypto_lskcipher_encrypt_ext(struct crypto_lskcipher *tfm, const u8 *src,
+				 u8 *dst, unsigned len, u8 *siv, u32 flags);
+
+/**
+ * crypto_lskcipher_decrypt_ext() - decrypt ciphertext with continuation
+ * @tfm: lskcipher handle
+ * @src: source buffer
+ * @dst: destination buffer
+ * @len: number of bytes to process
+ * @siv: IV + state for the cipher operation.  The length of the IV must
+ *	 comply with the IV size defined by crypto_lskcipher_ivsize.  The
+ *	 IV is then followed with a buffer with the length as specified by
+ *	 crypto_lskcipher_statesize.
+ * @flags: Indicates whether this is a continuation and/or final operation.
+ *
+ * Decrypt ciphertext data using the lskcipher handle with continuation.
+ *
+ * Return: >=0 if the cipher operation was successful, if positive
+ *	   then this many bytes have been left unprocessed;
+ *	   < 0 if an error occurred
+ */
+int crypto_lskcipher_decrypt_ext(struct crypto_lskcipher *tfm, const u8 *src,
+				 u8 *dst, unsigned len, u8 *siv, u32 flags);
+
 static inline struct crypto_instance *skcipher_crypto_instance(
 	struct skcipher_instance *inst)
 {
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 15/15] crypto: adiantum - Convert from skcipher to lskcipher
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (13 preceding siblings ...)
  2023-12-30  7:16 ` [PATCH 14/15] crypto: lskcipher - Export incremental interface internally Herbert Xu
@ 2024-02-13  8:48 ` Herbert Xu
  2024-02-14 23:35 ` [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Eric Biggers
  15 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2024-02-13  8:48 UTC (permalink / raw)
  To: Linux Crypto Mailing List

[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #1: Type: text/plain, Size: 26808 bytes --]

Replace skcipher implementation with lskcipher.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/adiantum.c | 471 ++++++++++++++++++++++------------------------
 1 file changed, 222 insertions(+), 249 deletions(-)

diff --git a/crypto/adiantum.c b/crypto/adiantum.c
index ee55b1f8565c..8ee48393c5c5 100644
--- a/crypto/adiantum.c
+++ b/crypto/adiantum.c
@@ -61,47 +61,45 @@
 #define TWEAK_SIZE		32
 
 struct adiantum_instance_ctx {
-	struct crypto_skcipher_spawn streamcipher_spawn;
+	struct crypto_lskcipher_spawn streamcipher_spawn;
 	struct crypto_lskcipher_spawn blockcipher_spawn;
 	struct crypto_shash_spawn hash_spawn;
 };
 
 struct adiantum_tfm_ctx {
-	struct crypto_skcipher *streamcipher;
+	struct crypto_lskcipher *streamcipher;
 	struct crypto_lskcipher *blockcipher;
 	struct crypto_shash *hash;
 	struct poly1305_core_key header_hash_key;
 };
 
-struct adiantum_request_ctx {
+/*
+ * Buffer for right-hand part of data, i.e.
+ *
+ *    P_L => P_M => C_M => C_R when encrypting, or
+ *    C_R => C_M => P_M => P_L when decrypting.
+ *
+ * Also used to build the IV for the stream cipher.
+ */
+union adiantum_rbuf {
+	u8 bytes[XCHACHA_IV_SIZE];
+	__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
+	le128 bignum;	/* interpret as element of Z/(2^{128}Z) */
+};
 
+struct adiantum_state {
 	/*
-	 * Buffer for right-hand part of data, i.e.
-	 *
-	 *    P_L => P_M => C_M => C_R when encrypting, or
-	 *    C_R => C_M => P_M => P_L when decrypting.
-	 *
-	 * Also used to build the IV for the stream cipher.
-	 */
-	union {
-		u8 bytes[XCHACHA_IV_SIZE];
-		__le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
-		le128 bignum;	/* interpret as element of Z/(2^{128}Z) */
-	} rbuf;
-
-	bool enc; /* true if encrypting, false if decrypting */
-
-	/*
-	 * The result of the Poly1305 ε-∆U hash function applied to
+	 * The result of the Poly1305 \u03b5-\u2206U hash function applied to
 	 * (bulk length, tweak)
 	 */
 	le128 header_hash;
 
+	unsigned int bulk_len;
+	bool secondpass;
+	bool secondinit;
+
 	/* Sub-requests, must be last */
-	union {
-		struct shash_desc hash_desc;
-		struct skcipher_request streamcipher_req;
-	} u;
+	struct shash_desc hash_desc;
 };
 
 /*
@@ -113,44 +111,34 @@ struct adiantum_request_ctx {
  * Note that this denotes using bits from the XChaCha keystream, which here we
  * get indirectly by encrypting a buffer containing all 0's.
  */
-static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
+static int adiantum_setkey(struct crypto_lskcipher *tfm, const u8 *key,
 			   unsigned int keylen)
 {
-	struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+	struct adiantum_tfm_ctx *tctx = crypto_lskcipher_ctx(tfm);
 	struct {
-		u8 iv[XCHACHA_IV_SIZE];
 		u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE];
-		struct scatterlist sg;
-		struct crypto_wait wait;
-		struct skcipher_request req; /* must be last */
+		u8 iv[XCHACHA_IV_SIZE];
 	} *data;
 	u8 *keyp;
 	int err;
 
 	/* Set the stream cipher key (K_S) */
-	crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
-	crypto_skcipher_set_flags(tctx->streamcipher,
-				  crypto_skcipher_get_flags(tfm) &
-				  CRYPTO_TFM_REQ_MASK);
-	err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
+	crypto_lskcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
+	crypto_lskcipher_set_flags(tctx->streamcipher,
+				   crypto_lskcipher_get_flags(tfm) &
+				   CRYPTO_TFM_REQ_MASK);
+	err = crypto_lskcipher_setkey(tctx->streamcipher, key, keylen);
 	if (err)
 		return err;
 
 	/* Derive the subkeys */
-	data = kzalloc(sizeof(*data) +
-		       crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL);
+	data = kzalloc(sizeof(*data), GFP_ATOMIC);
 	if (!data)
 		return -ENOMEM;
 	data->iv[0] = 1;
-	sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys));
-	crypto_init_wait(&data->wait);
-	skcipher_request_set_tfm(&data->req, tctx->streamcipher);
-	skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
-						  CRYPTO_TFM_REQ_MAY_BACKLOG,
-				      crypto_req_done, &data->wait);
-	skcipher_request_set_crypt(&data->req, &data->sg, &data->sg,
-				   sizeof(data->derived_keys), data->iv);
-	err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait);
+	err = crypto_lskcipher_encrypt(tctx->streamcipher, data->derived_keys,
+				       data->derived_keys,
+				       sizeof(data->derived_keys), data->iv);
 	if (err)
 		goto out;
 	keyp = data->derived_keys;
@@ -158,7 +146,7 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
 	/* Set the block cipher key (K_E) */
 	crypto_lskcipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
 	crypto_lskcipher_set_flags(tctx->blockcipher,
-				   crypto_skcipher_get_flags(tfm) &
+				   crypto_lskcipher_get_flags(tfm) &
 				   CRYPTO_TFM_REQ_MASK);
 	err = crypto_lskcipher_setkey(tctx->blockcipher, keyp,
 				     BLOCKCIPHER_KEY_SIZE);
@@ -171,7 +159,7 @@ static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
 	keyp += POLY1305_BLOCK_SIZE;
 
 	crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
-	crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
+	crypto_shash_set_flags(tctx->hash, crypto_lskcipher_get_flags(tfm) &
 					   CRYPTO_TFM_REQ_MASK);
 	err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
 	keyp += NHPOLY1305_KEY_SIZE;
@@ -205,7 +193,7 @@ static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
 
 /*
  * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
- * result to rctx->header_hash.  This is the calculation
+ * result to state->header_hash.  This is the calculation
  *
  *	H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
  *
@@ -215,12 +203,11 @@ static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
  * inputs only) taken over the left-hand part (the "bulk") of the message, to
  * give the overall Adiantum hash of the (tweak, left-hand part) pair.
  */
-static void adiantum_hash_header(struct skcipher_request *req)
+static void adiantum_hash_header(struct crypto_lskcipher *tfm,
+				 struct adiantum_state *astate, u8 *iv)
 {
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
-	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
-	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+	const struct adiantum_tfm_ctx *tctx = crypto_lskcipher_ctx(tfm);
+	const unsigned int bulk_len = astate->bulk_len;
 	struct {
 		__le64 message_bits;
 		__le64 padding;
@@ -236,157 +223,98 @@ static void adiantum_hash_header(struct skcipher_request *req)
 			     &header, sizeof(header) / POLY1305_BLOCK_SIZE, 1);
 
 	BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
-	poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
+	poly1305_core_blocks(&state, &tctx->header_hash_key, iv,
 			     TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
 
-	poly1305_core_emit(&state, NULL, &rctx->header_hash);
-}
-
-/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
-static int adiantum_hash_message(struct skcipher_request *req,
-				 struct scatterlist *sgl, unsigned int nents,
-				 le128 *digest)
-{
-	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
-	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
-	struct shash_desc *hash_desc = &rctx->u.hash_desc;
-	struct sg_mapping_iter miter;
-	unsigned int i, n;
-	int err;
-
-	err = crypto_shash_init(hash_desc);
-	if (err)
-		return err;
-
-	sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
-	for (i = 0; i < bulk_len; i += n) {
-		sg_miter_next(&miter);
-		n = min_t(unsigned int, miter.length, bulk_len - i);
-		err = crypto_shash_update(hash_desc, miter.addr, n);
-		if (err)
-			break;
-	}
-	sg_miter_stop(&miter);
-	if (err)
-		return err;
-
-	return crypto_shash_final(hash_desc, (u8 *)digest);
+	poly1305_core_emit(&state, NULL, &astate->header_hash);
 }
 
 /* Continue Adiantum encryption/decryption after the stream cipher step */
-static int adiantum_finish(struct skcipher_request *req)
+static int adiantum_finish(struct adiantum_state *state,
+			   union adiantum_rbuf *subsiv, le128 *digest,
+			   u8 *dst)
 {
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
-	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
-	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
-	struct scatterlist *dst = req->dst;
-	const unsigned int dst_nents = sg_nents(dst);
-	le128 digest;
-	int err;
-
-	/* If decrypting, decrypt C_M with the block cipher to get P_M */
-	if (!rctx->enc) {
-		err = crypto_lskcipher_decrypt(tctx->blockcipher,
-					       rctx->rbuf.bytes,
-					       rctx->rbuf.bytes,
-					       BLOCKCIPHER_BLOCK_SIZE, NULL);
-		if (err)
-			return err;
-	}
-
 	/*
 	 * Second hash step
 	 *	enc: C_R = C_M - H_{K_H}(T, C_L)
 	 *	dec: P_R = P_M - H_{K_H}(T, P_L)
 	 */
-	rctx->u.hash_desc.tfm = tctx->hash;
-	le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
-	if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
-		/* Fast path for single-page destination */
-		struct page *page = sg_page(dst);
-		void *virt = kmap_local_page(page) + dst->offset;
-
-		err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
-					  (u8 *)&digest);
-		if (err) {
-			kunmap_local(virt);
-			return err;
-		}
-		le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
-		memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
-		flush_dcache_page(page);
-		kunmap_local(virt);
-	} else {
-		/* Slow path that works for any destination scatterlist */
-		err = adiantum_hash_message(req, dst, dst_nents, &digest);
-		if (err)
-			return err;
-		le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
-		scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
-					 bulk_len, sizeof(le128), 1);
-	}
+	le128_sub(&subsiv->bignum, &subsiv->bignum, &state->header_hash);
+	le128_sub(&subsiv->bignum, &subsiv->bignum, digest);
+	memcpy(dst, &subsiv->bignum, sizeof(le128));
 	return 0;
 }
 
-static void adiantum_streamcipher_done(void *data, int err)
+static int adiantum_crypt(struct crypto_lskcipher *tfm, const u8 *src,
+			  u8 *dst, unsigned nbytes, u8 *siv, u32 flags,
+			  bool enc)
 {
-	struct skcipher_request *req = data;
-
-	if (!err)
-		err = adiantum_finish(req);
-
-	skcipher_request_complete(req, err);
-}
-
-static int adiantum_crypt(struct skcipher_request *req, bool enc)
-{
-	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-	const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
-	struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
-	const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
-	struct scatterlist *src = req->src;
-	const unsigned int src_nents = sg_nents(src);
-	unsigned int stream_len;
+	const struct adiantum_tfm_ctx *tctx = crypto_lskcipher_ctx(tfm);
+	struct adiantum_state *state = (void *)(siv + TWEAK_SIZE);
+	union adiantum_rbuf *subsiv;
+	unsigned int bulk_len;
 	le128 digest;
 	int err;
 
-	if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
+	state = PTR_ALIGN(state, __alignof__(*state));
+	subsiv = (union adiantum_rbuf *)
+		 ((u8 *)shash_desc_ctx(&state->hash_desc) +
+		  crypto_shash_descsize(tctx->hash));
+
+	if (nbytes < BLOCKCIPHER_BLOCK_SIZE)
 		return -EINVAL;
 
-	rctx->enc = enc;
+	bulk_len = nbytes;
+	if ((flags & CRYPTO_LSKCIPHER_FLAG_FINAL))
+		bulk_len -= BLOCKCIPHER_BLOCK_SIZE;
+
+	if ((flags & CRYPTO_LSKCIPHER_FLAG_CONT)) {
+		if (state->secondpass)
+			goto secondpass;
+
+		if (state->bulk_len + bulk_len < state->bulk_len)
+			return -EOVERFLOW;
+
+		state->bulk_len += bulk_len;
+	} else {
+		state->bulk_len = bulk_len;
+		state->secondpass = false;
+		state->hash_desc.tfm = tctx->hash;
+
+		if (!(flags & CRYPTO_LSKCIPHER_FLAG_FINAL)) {
+			err = crypto_shash_init(&state->hash_desc);
+			if (err)
+				return err;
+		}
+	}
+
+	if (!(flags & CRYPTO_LSKCIPHER_FLAG_FINAL))
+		return crypto_shash_update(&state->hash_desc, src, bulk_len);
+
+	if ((flags & CRYPTO_LSKCIPHER_FLAG_CONT))
+		err = crypto_shash_finup(&state->hash_desc, src,
+					 bulk_len, (u8 *)&digest);
+	else
+		err = crypto_shash_digest(&state->hash_desc, src,
+					  bulk_len, (u8 *)&digest);
+	if (err)
+		return err;
 
 	/*
 	 * First hash step
 	 *	enc: P_M = P_R + H_{K_H}(T, P_L)
 	 *	dec: C_M = C_R + H_{K_H}(T, C_L)
 	 */
-	adiantum_hash_header(req);
-	rctx->u.hash_desc.tfm = tctx->hash;
-	if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
-		/* Fast path for single-page source */
-		void *virt = kmap_local_page(sg_page(src)) + src->offset;
-
-		err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
-					  (u8 *)&digest);
-		memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
-		kunmap_local(virt);
-	} else {
-		/* Slow path that works for any source scatterlist */
-		err = adiantum_hash_message(req, src, src_nents, &digest);
-		scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
-					 bulk_len, sizeof(le128), 0);
-	}
-	if (err)
-		return err;
-	le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
-	le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
+	memcpy(&subsiv->bignum, src + bulk_len, sizeof(le128));
+	adiantum_hash_header(tfm, state, siv);
+	le128_add(&subsiv->bignum, &subsiv->bignum, &state->header_hash);
+	le128_add(&subsiv->bignum, &subsiv->bignum, &digest);
 
 	/* If encrypting, encrypt P_M with the block cipher to get C_M */
 	if (enc) {
 		err = crypto_lskcipher_encrypt(tctx->blockcipher,
-					       rctx->rbuf.bytes,
-					       rctx->rbuf.bytes,
+					       subsiv->bytes,
+					       subsiv->bytes,
 					       BLOCKCIPHER_BLOCK_SIZE, NULL);
 		if (err)
 			return err;
@@ -395,10 +323,22 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
 	/* Initialize the rest of the XChaCha IV (first part is C_M) */
 	BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
 	BUILD_BUG_ON(XCHACHA_IV_SIZE != 32);	/* nonce || stream position */
-	rctx->rbuf.words[4] = cpu_to_le32(1);
-	rctx->rbuf.words[5] = 0;
-	rctx->rbuf.words[6] = 0;
-	rctx->rbuf.words[7] = 0;
+	subsiv->words[4] = cpu_to_le32(1);
+	subsiv->words[5] = 0;
+	subsiv->words[6] = 0;
+	subsiv->words[7] = 0;
+
+	state->secondpass = true;
+	state->secondinit = true;
+
+	if ((flags & CRYPTO_LSKCIPHER_FLAG_CONT))
+		return 0;
+
+secondpass:
+	if (state->secondinit) {
+		state->secondinit = false;
+		flags &= ~CRYPTO_LSKCIPHER_FLAG_CONT;
+	}
 
 	/*
 	 * XChaCha needs to be done on all the data except the last 16 bytes;
@@ -409,42 +349,69 @@ static int adiantum_crypt(struct skcipher_request *req, bool enc)
 	 * as the second hash step will overwrite them.  Thus, round the XChaCha
 	 * length up to the next 64-byte boundary if possible.
 	 */
-	stream_len = bulk_len;
-	if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
-		stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE);
+	err = crypto_lskcipher_encrypt_ext(tctx->streamcipher, src, dst,
+					   nbytes, subsiv->bytes, flags);
+	if (err < 0)
+		return err;
 
-	skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
-	skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
-				   req->dst, stream_len, &rctx->rbuf);
-	skcipher_request_set_callback(&rctx->u.streamcipher_req,
-				      req->base.flags,
-				      adiantum_streamcipher_done, req);
-	return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
-		adiantum_finish(req);
+	if (!(flags & CRYPTO_LSKCIPHER_FLAG_FINAL)) {
+		bulk_len -= err;
+
+		if (!(flags & CRYPTO_LSKCIPHER_FLAG_CONT)) {
+			err = crypto_shash_init(&state->hash_desc);
+			if (err)
+				return err;
+		}
+		return crypto_shash_update(&state->hash_desc, dst, bulk_len) ?:
+		       nbytes - bulk_len;
+	}
+
+	if ((flags & CRYPTO_LSKCIPHER_FLAG_CONT))
+		err = crypto_shash_finup(&state->hash_desc, dst,
+					  bulk_len, (u8 *)&digest);
+	else
+		err = crypto_shash_digest(&state->hash_desc, dst,
+					  bulk_len, (u8 *)&digest);
+
+	if (err)
+		return err;
+
+	/* If decrypting, decrypt C_M with the block cipher to get P_M */
+	if (!enc) {
+		err = crypto_lskcipher_decrypt(tctx->blockcipher,
+					       subsiv->bytes,
+					       subsiv->bytes,
+					       BLOCKCIPHER_BLOCK_SIZE, NULL);
+		if (err)
+			return err;
+	}
+
+	return adiantum_finish(state, subsiv, &digest, dst + bulk_len);
 }
 
-static int adiantum_encrypt(struct skcipher_request *req)
+static int adiantum_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+			    u8 *dst, unsigned nbytes, u8 *siv, u32 flags)
 {
-	return adiantum_crypt(req, true);
+	return adiantum_crypt(tfm, src, dst, nbytes, siv, flags, true);
 }
 
-static int adiantum_decrypt(struct skcipher_request *req)
+static int adiantum_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+			    u8 *dst, unsigned nbytes, u8 *siv, u32 flags)
 {
-	return adiantum_crypt(req, false);
+	return adiantum_crypt(tfm, src, dst, nbytes, siv, flags, false);
 }
 
-static int adiantum_init_tfm(struct crypto_skcipher *tfm)
+static int adiantum_init_tfm(struct crypto_lskcipher *tfm)
 {
-	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
-	struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
-	struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
-	struct crypto_skcipher *streamcipher;
+	struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
+	struct adiantum_instance_ctx *ictx = lskcipher_instance_ctx(inst);
+	struct adiantum_tfm_ctx *tctx = crypto_lskcipher_ctx(tfm);
+	struct crypto_lskcipher *streamcipher;
 	struct crypto_lskcipher *blockcipher;
 	struct crypto_shash *hash;
-	unsigned int subreq_size;
 	int err;
 
-	streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
+	streamcipher = crypto_spawn_lskcipher(&ictx->streamcipher_spawn);
 	if (IS_ERR(streamcipher))
 		return PTR_ERR(streamcipher);
 
@@ -460,45 +427,39 @@ static int adiantum_init_tfm(struct crypto_skcipher *tfm)
 		goto err_free_blockcipher;
 	}
 
+	err = -EINVAL;
+	if (crypto_shash_descsize(hash) > crypto_shash_alg(hash)->descsize)
+		goto err_free_hash;
+
 	tctx->streamcipher = streamcipher;
 	tctx->blockcipher = blockcipher;
 	tctx->hash = hash;
 
-	BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
-		     sizeof(struct adiantum_request_ctx));
-	subreq_size = max(sizeof_field(struct adiantum_request_ctx,
-				       u.hash_desc) +
-			  crypto_shash_descsize(hash),
-			  sizeof_field(struct adiantum_request_ctx,
-				       u.streamcipher_req) +
-			  crypto_skcipher_reqsize(streamcipher));
-
-	crypto_skcipher_set_reqsize(tfm,
-				    offsetof(struct adiantum_request_ctx, u) +
-				    subreq_size);
 	return 0;
 
+err_free_hash:
+	crypto_free_shash(hash);
 err_free_blockcipher:
 	crypto_free_lskcipher(blockcipher);
 err_free_streamcipher:
-	crypto_free_skcipher(streamcipher);
+	crypto_free_lskcipher(streamcipher);
 	return err;
 }
 
-static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
+static void adiantum_exit_tfm(struct crypto_lskcipher *tfm)
 {
-	struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+	struct adiantum_tfm_ctx *tctx = crypto_lskcipher_ctx(tfm);
 
-	crypto_free_skcipher(tctx->streamcipher);
+	crypto_free_lskcipher(tctx->streamcipher);
 	crypto_free_lskcipher(tctx->blockcipher);
 	crypto_free_shash(tctx->hash);
 }
 
-static void adiantum_free_instance(struct skcipher_instance *inst)
+static void adiantum_free_instance(struct lskcipher_instance *inst)
 {
-	struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
+	struct adiantum_instance_ctx *ictx = lskcipher_instance_ctx(inst);
 
-	crypto_drop_skcipher(&ictx->streamcipher_spawn);
+	crypto_drop_lskcipher(&ictx->streamcipher_spawn);
 	crypto_drop_lskcipher(&ictx->blockcipher_spawn);
 	crypto_drop_shash(&ictx->hash_spawn);
 	kfree(inst);
@@ -508,12 +469,12 @@ static void adiantum_free_instance(struct skcipher_instance *inst)
  * Check for a supported set of inner algorithms.
  * See the comment at the beginning of this file.
  */
-static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg,
+static bool adiantum_supported_algorithms(struct lskcipher_alg *streamcipher_alg,
 					  struct lskcipher_alg *blockcipher_alg,
 					  struct shash_alg *hash_alg)
 {
-	if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
-	    strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
+	if (strcmp(streamcipher_alg->co.base.cra_name, "xchacha12") != 0 &&
+	    strcmp(streamcipher_alg->co.base.cra_name, "xchacha20") != 0)
 		return false;
 
 	if (blockcipher_alg->co.min_keysize > BLOCKCIPHER_KEY_SIZE ||
@@ -536,9 +497,9 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 	u32 mask;
 	const char *nhpoly1305_name;
-	struct skcipher_instance *inst;
+	struct lskcipher_instance *inst;
 	struct adiantum_instance_ctx *ictx;
-	struct skcipher_alg_common *streamcipher_alg;
+	struct lskcipher_alg *streamcipher_alg;
 	char ecb_driver_name[CRYPTO_MAX_ALG_NAME];
 	struct lskcipher_alg *blockcipher_alg;
 	char ecb_name[CRYPTO_MAX_ALG_NAME];
@@ -547,28 +508,28 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 	const char *cipher_name;
 	int err;
 
-	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
+	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
 	if (err)
 		return err;
 
 	inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
 	if (!inst)
 		return -ENOMEM;
-	ictx = skcipher_instance_ctx(inst);
+	ictx = lskcipher_instance_ctx(inst);
 
 	/* Stream cipher, e.g. "xchacha12" */
-	err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
-				   skcipher_crypto_instance(inst),
-				   crypto_attr_alg_name(tb[1]), 0, mask);
+	err = crypto_grab_lskcipher(&ictx->streamcipher_spawn,
+				    lskcipher_crypto_instance(inst),
+				    crypto_attr_alg_name(tb[1]), 0, mask);
 	if (err)
 		goto err_free_inst;
-	streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn);
+	streamcipher_alg = crypto_spawn_lskcipher_alg(&ictx->streamcipher_spawn);
 
 	/* Block cipher, e.g. "aes" */
 	cipher_name = crypto_attr_alg_name(tb[2]);
 	cipher_driver_name = cipher_name;
 	err = crypto_grab_lskcipher(&ictx->blockcipher_spawn,
-				    skcipher_crypto_instance(inst),
+				    lskcipher_crypto_instance(inst),
 				    cipher_name, 0, mask);
 
 	ecb_name[0] = 0;
@@ -579,7 +540,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 			goto err_free_inst;
 
 		err = crypto_grab_lskcipher(&ictx->blockcipher_spawn,
-					    skcipher_crypto_instance(inst),
+					    lskcipher_crypto_instance(inst),
 					    ecb_name, 0, mask);
 	}
 
@@ -592,7 +553,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 	if (nhpoly1305_name == ERR_PTR(-ENOENT))
 		nhpoly1305_name = "nhpoly1305";
 	err = crypto_grab_shash(&ictx->hash_spawn,
-				skcipher_crypto_instance(inst),
+				lskcipher_crypto_instance(inst),
 				nhpoly1305_name, 0, mask);
 	if (err)
 		goto err_free_inst;
@@ -602,7 +563,7 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 	if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
 					   hash_alg)) {
 		pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
-			streamcipher_alg->base.cra_name,
+			streamcipher_alg->co.base.cra_name,
 			blockcipher_alg->co.base.cra_name,
 			hash_alg->base.cra_name);
 		err = -EINVAL;
@@ -641,43 +602,55 @@ static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
 	}
 
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-		     "adiantum(%s,%s)", streamcipher_alg->base.cra_name,
+	if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "adiantum(%s,%s)", streamcipher_alg->co.base.cra_name,
 		     cipher_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_free_inst;
-	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.co.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "adiantum(%s,%s,%s)",
-		     streamcipher_alg->base.cra_driver_name,
+		     streamcipher_alg->co.base.cra_driver_name,
 		     cipher_driver_name,
 		     hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_free_inst;
 
-	inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
-	inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
-	inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask;
+	inst->alg.co.base.cra_blocksize = 1;
+	inst->alg.co.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
+	inst->alg.co.base.cra_alignmask = streamcipher_alg->co.base.cra_alignmask;
 	/*
 	 * The block cipher is only invoked once per message, so for long
 	 * messages (e.g. sectors for disk encryption) its performance doesn't
 	 * matter as much as that of the stream cipher and hash function.  Thus,
 	 * weigh the block cipher's ->cra_priority less.
 	 */
-	inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
-				       2 * hash_alg->base.cra_priority +
-				       blockcipher_alg->co.base.cra_priority) / 7;
+	inst->alg.co.base.cra_priority = (4 * streamcipher_alg->co.base.cra_priority +
+					 2 * hash_alg->base.cra_priority +
+					 blockcipher_alg->co.base.cra_priority) / 7;
 
 	inst->alg.setkey = adiantum_setkey;
 	inst->alg.encrypt = adiantum_encrypt;
 	inst->alg.decrypt = adiantum_decrypt;
 	inst->alg.init = adiantum_init_tfm;
 	inst->alg.exit = adiantum_exit_tfm;
-	inst->alg.min_keysize = streamcipher_alg->min_keysize;
-	inst->alg.max_keysize = streamcipher_alg->max_keysize;
-	inst->alg.ivsize = TWEAK_SIZE;
+	inst->alg.co.min_keysize = streamcipher_alg->co.min_keysize;
+	inst->alg.co.max_keysize = streamcipher_alg->co.max_keysize;
+	inst->alg.co.ivsize = TWEAK_SIZE;
+	inst->alg.co.chunksize = streamcipher_alg->co.chunksize;
+	inst->alg.co.tailsize = streamcipher_alg->co.chunksize * 2;
+
+	BUILD_BUG_ON(offsetofend(struct adiantum_state, hash_desc) !=
+		     sizeof(struct adiantum_state));
+
+	inst->alg.co.statesize = sizeof(struct adiantum_state) +
+				 hash_alg->descsize +
+				 streamcipher_alg->co.ivsize +
+				 streamcipher_alg->co.statesize +
+				 ((__alignof__(struct adiantum_state) - 1) &
+				  ~streamcipher_alg->co.base.cra_alignmask);
 	inst->alg.co.twopass = true;
 
 	inst->free = adiantum_free_instance;
 
-	err = skcipher_register_instance(tmpl, inst);
+	err = lskcipher_register_instance(tmpl, inst);
 	if (err) {
 err_free_inst:
 		adiantum_free_instance(inst);
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH 00/15] crypto: Add twopass lskcipher for adiantum
@ 2024-02-13  9:04 Herbert Xu
  2023-12-02  4:55 ` [PATCH 01/15] crypto: skcipher - Add tailsize attribute Herbert Xu
                   ` (15 more replies)
  0 siblings, 16 replies; 30+ messages in thread
From: Herbert Xu @ 2024-02-13  9:04 UTC (permalink / raw)
  To: Linux Crypto Mailing List

In order to process the data incrementally, adiantum needs see the
source data twice.  This patch series adds support for such algorithms
in lskcipher including adaptations to algif_skcipher.

For now this capability isn't actually exported completely through
algif_skcipher.  That is, if the source data is too large to be fed
at once through an SG list the operation will fail with ENOSYS.

As a future extension, the incremental processing can be extended
through algif_skcipher (and perhaps even algif_aead).  However,
I'd like to see some real uses for it before adding this complexity.
For example, one valid use-case would be some hardware that directly
supported such incremental processing.

In addition to converting adiantum, the underlying chacha algorithm
is also converted over to lskcipher.

The algorithms cts + xts have been converted too to ensure that the
tailsize mechanism works properly for them.  While doing this the
parameters for cts + xts have been modified so that blocksize is now
1.  This entails changing the paramters of all drivers that support
cts and/or xts.

Herbert Xu (15):
  crypto: skcipher - Add tailsize attribute
  crypto: algif_skcipher - Add support for tailsize
  crypto: skcipher - Remove ivsize check for lskcipher simple templates
  crypto: xts - Convert from skcipher to lskcipher
  crypto: skcipher - Add twopass attribute
  crypto: algif_skcipher - Disallow nonincremental algorithms
  crypto: adiantum - Use lskcipher instead of cipher
  crypto: skcipher - Add incremental support to lskcipher wrapper
  crypto: chacha-generic - Convert from skcipher to lskcipher
  crypto: skcipher - Move nesting check into ecb
  crypto: skcipher - Propagate zero-length requests to lskcipher
  crypto: cts - Convert from skcipher to lskcipher
  crypto: cts,xts - Update parameters blocksize/chunksize/tailsize
  crypto: lskcipher - Export incremental interface internally
  crypto: adiantum - Convert from skcipher to lskcipher

 arch/arm/crypto/aes-ce-glue.c                 |   8 +-
 arch/arm/crypto/aes-neonbs-glue.c             |   4 +-
 arch/arm64/crypto/aes-glue.c                  |   8 +-
 arch/arm64/crypto/aes-neonbs-glue.c           |   4 +-
 arch/arm64/crypto/sm4-ce-glue.c               |   8 +-
 arch/powerpc/crypto/aes-spe-glue.c            |   4 +-
 arch/powerpc/crypto/aes_xts.c                 |   4 +-
 arch/s390/crypto/aes_s390.c                   |   4 +-
 arch/s390/crypto/paes_s390.c                  |   4 +-
 arch/x86/crypto/aesni-intel_glue.c            |   8 +-
 crypto/adiantum.c                             | 573 ++++++++++--------
 crypto/algif_skcipher.c                       |  11 +-
 crypto/cbc.c                                  |   5 +
 crypto/chacha_generic.c                       | 161 ++---
 crypto/cts.c                                  | 355 +++--------
 crypto/ecb.c                                  |   4 +
 crypto/lskcipher.c                            |  94 ++-
 crypto/skcipher.c                             |  18 +-
 crypto/xts.c                                  | 572 +++++++----------
 drivers/crypto/atmel-aes.c                    |   4 +-
 drivers/crypto/axis/artpec6_crypto.c          |   2 +
 drivers/crypto/bcm/cipher.c                   |   4 +-
 drivers/crypto/caam/caamalg.c                 |   4 +-
 drivers/crypto/caam/caamalg_qi.c              |   4 +-
 drivers/crypto/caam/caamalg_qi2.c             |   4 +-
 drivers/crypto/cavium/cpt/cptvf_algs.c        |   4 +-
 .../crypto/cavium/nitrox/nitrox_skcipher.c    |   8 +-
 drivers/crypto/ccp/ccp-crypto-aes-xts.c       |   4 +-
 drivers/crypto/ccree/cc_cipher.c              |  12 +-
 drivers/crypto/chelsio/chcr_algo.c            |   4 +-
 drivers/crypto/hisilicon/sec/sec_algs.c       |   4 +-
 drivers/crypto/hisilicon/sec2/sec_crypto.c    |  23 +-
 .../crypto/inside-secure/safexcel_cipher.c    |   4 +-
 .../intel/keembay/keembay-ocs-aes-core.c      |  11 +-
 .../crypto/intel/qat/qat_common/qat_algs.c    |   4 +-
 .../crypto/marvell/octeontx/otx_cptvf_algs.c  |   4 +-
 .../marvell/octeontx2/otx2_cptvf_algs.c       |   4 +-
 drivers/crypto/qce/skcipher.c                 |   6 +-
 include/crypto/internal/chacha.h              |  22 +-
 include/crypto/internal/skcipher.h            |  43 ++
 include/crypto/skcipher.h                     |  65 ++
 include/crypto/xts.h                          |  24 +-
 42 files changed, 1067 insertions(+), 1050 deletions(-)

-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt


^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 06/15] crypto: algif_skcipher - Disallow nonincremental algorithms
  2023-12-05  6:13 ` [PATCH 06/15] crypto: algif_skcipher - Disallow nonincremental algorithms Herbert Xu
@ 2024-02-14 22:56   ` Eric Biggers
  2024-02-15  6:47     ` Herbert Xu
  0 siblings, 1 reply; 30+ messages in thread
From: Eric Biggers @ 2024-02-14 22:56 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Tue, Dec 05, 2023 at 02:13:26PM +0800, Herbert Xu wrote:
> As algif_skcipher does not support nonincremental algorithms, check
> for them and return ENOSYS

Shouldn't they still be supported if the data is being read/written all at once?

Also, ENOSYS isn't really an appropriate error code.  ENOSYS normally means that
the system call isn't supported at all.  Maybe use EOPNOTSUPP?

- Eric

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 13/15] crypto: cts,xts - Update parameters blocksize/chunksize/tailsize
  2023-12-29 10:47 ` [PATCH 13/15] crypto: cts,xts - Update parameters blocksize/chunksize/tailsize Herbert Xu
@ 2024-02-14 23:00   ` Eric Biggers
  2024-02-15  7:57     ` Herbert Xu
  0 siblings, 1 reply; 30+ messages in thread
From: Eric Biggers @ 2024-02-14 23:00 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Fri, Dec 29, 2023 at 06:47:00PM +0800, Herbert Xu wrote:
> diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
> index b668c97663ec..3bfa8accf2c2 100644
> --- a/arch/arm/crypto/aes-ce-glue.c
> +++ b/arch/arm/crypto/aes-ce-glue.c
> @@ -619,13 +619,15 @@ static struct skcipher_alg aes_algs[] = { {
>  	.base.cra_driver_name	= "__cts-cbc-aes-ce",
>  	.base.cra_priority	= 300,
>  	.base.cra_flags		= CRYPTO_ALG_INTERNAL,
> -	.base.cra_blocksize	= AES_BLOCK_SIZE,
> +	.base.cra_blocksize	= 1,

Before messing around with cra_blocksize, it needs to be decided what it
actually means, and document it appropriately.  According to the current
specification, AES_BLOCK_SIZE is correct here, not 1:

 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
 *		   of the smallest possible unit which can be transformed with
 *		   this algorithm. The users must respect this value.
 *		   In case of HASH transformation, it is possible for a smaller
 *		   block than @cra_blocksize to be passed to the crypto API for
 *		   transformation, in case of any other transformation type, an
 * 		   error will be returned upon any attempt to transform smaller
 *		   than @cra_blocksize chunks.

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 00/15] crypto: Add twopass lskcipher for adiantum
  2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
                   ` (14 preceding siblings ...)
  2024-02-13  8:48 ` [PATCH 15/15] crypto: adiantum - Convert from skcipher to lskcipher Herbert Xu
@ 2024-02-14 23:35 ` Eric Biggers
  2024-02-15  8:20   ` Herbert Xu
  15 siblings, 1 reply; 30+ messages in thread
From: Eric Biggers @ 2024-02-14 23:35 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Tue, Feb 13, 2024 at 05:04:25PM +0800, Herbert Xu wrote:
> [PATCH 00/15] crypto: Add twopass lskcipher for adiantum

Thanks.  Can you include an explanation of the high-level context and goals for
this work?  It's still not clear to me.  I'm guessing that the main goal is to
get rid of the vaddr => scatterlist => vaddr round trip for software
encryption/decryption, which hopefully will improve performance and make the API
easier to use?  And to do that, all software algorithms need to be converted to
"lskcipher"?  Will skcipher API users actually be able to convert to lskcipher,
or will they be blocked by people expecting to be able to use hardware crypto
accelerators?  Would you accept lskcipher being used alongside skcipher?
Previously you had said you don't want shash being used alongside ahash.

I'd prefer there was a clear plan before merging a bunch of patches that leave
everything in a half-finished state.

By the way, note that hctr2 requires two passes too, as it's an SPRP like
Adiantum.  Also note that SPRPs in general may require more than two passes,
though Adiantum and HCTR2 were designed to only need two (technically they have
three passes, but two are combinable).  It's fine to support only two passes if
that's what's needed now; I just thought I'd mention that there's no guarantee
that two passes will be enough forever.

> In addition to converting adiantum, the underlying chacha algorithm
> is also converted over to lskcipher.
> 
> The algorithms cts + xts have been converted too to ensure that the
> tailsize mechanism works properly for them.  While doing this the
> parameters for cts + xts have been modified so that blocksize is now
> 1.  This entails changing the paramters of all drivers that support
> cts and/or xts.

cts and xts have nothing to do with adiantum.  So this further indicates that
the scope of this work is broader than just "crypto: Add twopass lskcipher for
adiantum" as suggested by the title.

It would be good to have a sense for the direction of this work.  What will be
coming next?

- Eric

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 09/15] crypto: chacha-generic - Convert from skcipher to lskcipher
  2023-12-06  5:49 ` [PATCH 09/15] crypto: chacha-generic - Convert from skcipher to lskcipher Herbert Xu
@ 2024-02-14 23:41   ` Eric Biggers
  2024-02-15  6:52     ` Herbert Xu
  0 siblings, 1 reply; 30+ messages in thread
From: Eric Biggers @ 2024-02-14 23:41 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Wed, Dec 06, 2023 at 01:49:32PM +0800, Herbert Xu wrote:
> +static int chacha_stream_xor(const struct chacha_ctx *ctx, const u8 *src,
> +			     u8 *dst, unsigned nbytes, u8 *siv, u32 flags)

In cryptography, siv normally stands for Synthetic Initialization Vector.  I
*think* that here you're having it stand for "state and IV", or something like
that.  Is there a better name for it?  Maybe it should just be state?

> -static int crypto_xchacha_crypt(struct skcipher_request *req)
> +static int crypto_xchacha_crypt(struct crypto_lskcipher *tfm, const u8 *src,
> +				u8 *dst, unsigned nbytes, u8 *siv, u32 flags)
>  {
> -	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
> -	struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
> +	struct chacha_ctx *ctx = crypto_lskcipher_ctx(tfm);
>  	struct chacha_ctx subctx;
> -	u32 state[16];
> -	u8 real_iv[16];
> +	u8 *real_iv;
> +	u32 *state;
>  
> -	/* Compute the subkey given the original key and first 128 nonce bits */
> -	chacha_init_generic(state, ctx->key, req->iv);
> -	hchacha_block_generic(state, subctx.key, ctx->nrounds);
> +	real_iv = siv + XCHACHA_IV_SIZE;
> +	state = (u32 *)(real_iv + CHACHA_IV_SIZE);

So the "siv" contains xchacha_iv || real_iv || state?  That's 112 bytes, which
is more than the 80 that's allocated for it.

Isn't the state the only thing that actually needs to be carried forward?

- Eric

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 01/15] crypto: skcipher - Add tailsize attribute
  2023-12-02  4:55 ` [PATCH 01/15] crypto: skcipher - Add tailsize attribute Herbert Xu
@ 2024-02-14 23:44   ` Eric Biggers
  2024-02-15  6:40     ` Herbert Xu
  0 siblings, 1 reply; 30+ messages in thread
From: Eric Biggers @ 2024-02-14 23:44 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Sat, Dec 02, 2023 at 12:55:02PM +0800, Herbert Xu wrote:
> This patch adds a new tailsize attribute to skcipher and lskcipher
> algorithms.  This will be used by algorithms such as CTS which may
> need to withhold a number of blocks until the end has been reached.
> 
> When issuing a NOTFINAL request, the user must ensure that at least
> tailsize bytes will be supplied later on a final request.
> 
> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
> ---
>  crypto/lskcipher.c                 |  1 +
>  crypto/skcipher.c                  | 16 ++++++++++++++-
>  include/crypto/internal/skcipher.h |  1 +
>  include/crypto/skcipher.h          | 33 ++++++++++++++++++++++++++++++
>  4 files changed, 50 insertions(+), 1 deletion(-)
> 
> diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
> index 0b6dd8aa21f2..2a602911f4fc 100644
> --- a/crypto/lskcipher.c
> +++ b/crypto/lskcipher.c
> @@ -300,6 +300,7 @@ static void __maybe_unused crypto_lskcipher_show(
>  	seq_printf(m, "ivsize       : %u\n", skcipher->co.ivsize);
>  	seq_printf(m, "chunksize    : %u\n", skcipher->co.chunksize);
>  	seq_printf(m, "statesize    : %u\n", skcipher->co.statesize);
> +	seq_printf(m, "tailsize     : %u\n", skcipher->co.tailsize);

Do we really want to add new attributes like this to /proc/crypto?

I worry about userspace starting to depend on these algorithm attributes in a
weird way.

What is the use case for exposing them to userspace?

- Eric

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 01/15] crypto: skcipher - Add tailsize attribute
  2024-02-14 23:44   ` Eric Biggers
@ 2024-02-15  6:40     ` Herbert Xu
  2024-02-23  6:01       ` Eric Biggers
  0 siblings, 1 reply; 30+ messages in thread
From: Herbert Xu @ 2024-02-15  6:40 UTC (permalink / raw)
  To: Eric Biggers; +Cc: Linux Crypto Mailing List

On Wed, Feb 14, 2024 at 03:44:13PM -0800, Eric Biggers wrote:
>
> > diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
> > index 0b6dd8aa21f2..2a602911f4fc 100644
> > --- a/crypto/lskcipher.c
> > +++ b/crypto/lskcipher.c
> > @@ -300,6 +300,7 @@ static void __maybe_unused crypto_lskcipher_show(
> >  	seq_printf(m, "ivsize       : %u\n", skcipher->co.ivsize);
> >  	seq_printf(m, "chunksize    : %u\n", skcipher->co.chunksize);
> >  	seq_printf(m, "statesize    : %u\n", skcipher->co.statesize);
> > +	seq_printf(m, "tailsize     : %u\n", skcipher->co.tailsize);
> 
> Do we really want to add new attributes like this to /proc/crypto?
>
> I worry about userspace starting to depend on these algorithm attributes in a
> weird way.
> 
> What is the use case for exposing them to userspace?

Well this particular parameter is needed for user-space apps to know
whether their next read will block or not.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 06/15] crypto: algif_skcipher - Disallow nonincremental algorithms
  2024-02-14 22:56   ` Eric Biggers
@ 2024-02-15  6:47     ` Herbert Xu
  2024-02-23  6:00       ` Eric Biggers
  0 siblings, 1 reply; 30+ messages in thread
From: Herbert Xu @ 2024-02-15  6:47 UTC (permalink / raw)
  To: Eric Biggers; +Cc: Linux Crypto Mailing List

On Wed, Feb 14, 2024 at 02:56:38PM -0800, Eric Biggers wrote:
>
> Shouldn't they still be supported if the data is being read/written all at once?

It is supported, or at least it worked for my libkcapi tests on
adiantum.  This error only triggers if we enter the code-path that
splits the operation into two or more (because the user didn't
write all the data in one go).

> Also, ENOSYS isn't really an appropriate error code.  ENOSYS normally means that
> the system call isn't supported at all.  Maybe use EOPNOTSUPP?

Within the crypto subsystem ENOSYS means that a particular
functionality is not supported.  I'm happy to change that but
that should go into a different patch as there are existing uses
which are similar (e.g., cloning).

Thanks,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 09/15] crypto: chacha-generic - Convert from skcipher to lskcipher
  2024-02-14 23:41   ` Eric Biggers
@ 2024-02-15  6:52     ` Herbert Xu
  0 siblings, 0 replies; 30+ messages in thread
From: Herbert Xu @ 2024-02-15  6:52 UTC (permalink / raw)
  To: Eric Biggers; +Cc: Linux Crypto Mailing List

On Wed, Feb 14, 2024 at 03:41:51PM -0800, Eric Biggers wrote:
> On Wed, Dec 06, 2023 at 01:49:32PM +0800, Herbert Xu wrote:
> > +static int chacha_stream_xor(const struct chacha_ctx *ctx, const u8 *src,
> > +			     u8 *dst, unsigned nbytes, u8 *siv, u32 flags)
> 
> In cryptography, siv normally stands for Synthetic Initialization Vector.  I
> *think* that here you're having it stand for "state and IV", or something like
> that.  Is there a better name for it?  Maybe it should just be state?

Thanks, I'll change this to ivst.

> So the "siv" contains xchacha_iv || real_iv || state?  That's 112 bytes, which
> is more than the 80 that's allocated for it.

Correct, it's 112 bytes.  The caller is meant to allocate enough
space for the IV and state: 32(ivsize) + 80(statesize).

> Isn't the state the only thing that actually needs to be carried forward?

Some algorithms (statesize == 0) will carry all their state in
the IV, e.g., cbc.

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 13/15] crypto: cts,xts - Update parameters blocksize/chunksize/tailsize
  2024-02-14 23:00   ` Eric Biggers
@ 2024-02-15  7:57     ` Herbert Xu
  2024-02-23  6:09       ` Eric Biggers
  0 siblings, 1 reply; 30+ messages in thread
From: Herbert Xu @ 2024-02-15  7:57 UTC (permalink / raw)
  To: Eric Biggers; +Cc: Linux Crypto Mailing List

On Wed, Feb 14, 2024 at 03:00:21PM -0800, Eric Biggers wrote:
>
> Before messing around with cra_blocksize, it needs to be decided what it
> actually means, and document it appropriately.  According to the current
> specification, AES_BLOCK_SIZE is correct here, not 1:

Block size should always be set to 1 unless the algorithm is only
capable of handling input data that is a multiple of block size.

>  * @cra_blocksize: Minimum block size of this transformation. The size in bytes
>  *		   of the smallest possible unit which can be transformed with
>  *		   this algorithm. The users must respect this value.
>  *		   In case of HASH transformation, it is possible for a smaller
>  *		   block than @cra_blocksize to be passed to the crypto API for
>  *		   transformation, in case of any other transformation type, an
>  * 		   error will be returned upon any attempt to transform smaller
>  *		   than @cra_blocksize chunks.

OK this is wrong.  We should fix it.  For skciphers, the input
length must be a multiple of blocksize.

We should probably replace this comment with one that refers to
each algorithm type and then document the meaning there.

Thanks,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 00/15] crypto: Add twopass lskcipher for adiantum
  2024-02-14 23:35 ` [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Eric Biggers
@ 2024-02-15  8:20   ` Herbert Xu
  2024-02-23  6:39     ` Eric Biggers
  0 siblings, 1 reply; 30+ messages in thread
From: Herbert Xu @ 2024-02-15  8:20 UTC (permalink / raw)
  To: Eric Biggers; +Cc: Linux Crypto Mailing List

On Wed, Feb 14, 2024 at 03:35:17PM -0800, Eric Biggers wrote:
> 
> Thanks.  Can you include an explanation of the high-level context and goals for
> this work?  It's still not clear to me.  I'm guessing that the main goal is to
> get rid of the vaddr => scatterlist => vaddr round trip for software
> encryption/decryption, which hopefully will improve performance and make the API
> easier to use?  And to do that, all software algorithms need to be converted to

The main goal is to remove the legacy cipher type, and replacing
it with lskcipher.  The vaddr interface is simply a bonus.  In fact
this particular series is basically my response to your questions
about adiantum from that thread:

https://lore.kernel.org/linux-crypto/20230914082828.895403-1-herbert@gondor.apana.org.au/

But yes I will update the cover letter.

> "lskcipher"?  Will skcipher API users actually be able to convert to lskcipher,
> or will they be blocked by people expecting to be able to use hardware crypto
> accelerators?  Would you accept lskcipher being used alongside skcipher?

That's a question for each user to decide.

> Previously you had said you don't want shash being used alongside ahash.

In general, if the amount of data being processed is large, then
I would expect the use of hardware accelerators to be a possibility
and therefore choose the SG-based interface.

I wouldn't consider 4K to be large though.  So it's really when you
feed hundreds of kilobytes of data through the algorithm when I would
recommend against using shash.


> By the way, note that hctr2 requires two passes too, as it's an SPRP like
> Adiantum.  Also note that SPRPs in general may require more than two passes,
> though Adiantum and HCTR2 were designed to only need two (technically they have
> three passes, but two are combinable).  It's fine to support only two passes if
> that's what's needed now; I just thought I'd mention that there's no guarantee
> that two passes will be enough forever.

Right, there is no reason why we couldn't extend this to more than
two passes when the need arises.  The CCM algorithm could also be
implemented in this manner with three passes (although the first
pass is a bit of a waste since it simply tallies up the length of
the input).

Cheers,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 06/15] crypto: algif_skcipher - Disallow nonincremental algorithms
  2024-02-15  6:47     ` Herbert Xu
@ 2024-02-23  6:00       ` Eric Biggers
  0 siblings, 0 replies; 30+ messages in thread
From: Eric Biggers @ 2024-02-23  6:00 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Thu, Feb 15, 2024 at 02:47:07PM +0800, Herbert Xu wrote:
> On Wed, Feb 14, 2024 at 02:56:38PM -0800, Eric Biggers wrote:
> >
> > Shouldn't they still be supported if the data is being read/written all at once?
> 
> It is supported, or at least it worked for my libkcapi tests on
> adiantum.  This error only triggers if we enter the code-path that
> splits the operation into two or more (because the user didn't
> write all the data in one go).

Great, that isn't what the commit message says though.

> 
> > Also, ENOSYS isn't really an appropriate error code.  ENOSYS normally means that
> > the system call isn't supported at all.  Maybe use EOPNOTSUPP?
> 
> Within the crypto subsystem ENOSYS means that a particular
> functionality is not supported.  I'm happy to change that but
> that should go into a different patch as there are existing uses
> which are similar (e.g., cloning).

This is a user API; it's not "within the crypto subsystem".  The usual
conventions for system calls apply.

- Eric

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 01/15] crypto: skcipher - Add tailsize attribute
  2024-02-15  6:40     ` Herbert Xu
@ 2024-02-23  6:01       ` Eric Biggers
  0 siblings, 0 replies; 30+ messages in thread
From: Eric Biggers @ 2024-02-23  6:01 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Thu, Feb 15, 2024 at 02:40:05PM +0800, Herbert Xu wrote:
> On Wed, Feb 14, 2024 at 03:44:13PM -0800, Eric Biggers wrote:
> >
> > > diff --git a/crypto/lskcipher.c b/crypto/lskcipher.c
> > > index 0b6dd8aa21f2..2a602911f4fc 100644
> > > --- a/crypto/lskcipher.c
> > > +++ b/crypto/lskcipher.c
> > > @@ -300,6 +300,7 @@ static void __maybe_unused crypto_lskcipher_show(
> > >  	seq_printf(m, "ivsize       : %u\n", skcipher->co.ivsize);
> > >  	seq_printf(m, "chunksize    : %u\n", skcipher->co.chunksize);
> > >  	seq_printf(m, "statesize    : %u\n", skcipher->co.statesize);
> > > +	seq_printf(m, "tailsize     : %u\n", skcipher->co.tailsize);
> > 
> > Do we really want to add new attributes like this to /proc/crypto?
> >
> > I worry about userspace starting to depend on these algorithm attributes in a
> > weird way.
> > 
> > What is the use case for exposing them to userspace?
> 
> Well this particular parameter is needed for user-space apps to know
> whether their next read will block or not.
> 

Can you give a specific example of how this would be useful?

- Eric

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 13/15] crypto: cts,xts - Update parameters blocksize/chunksize/tailsize
  2024-02-15  7:57     ` Herbert Xu
@ 2024-02-23  6:09       ` Eric Biggers
  0 siblings, 0 replies; 30+ messages in thread
From: Eric Biggers @ 2024-02-23  6:09 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Thu, Feb 15, 2024 at 03:57:38PM +0800, Herbert Xu wrote:
> On Wed, Feb 14, 2024 at 03:00:21PM -0800, Eric Biggers wrote:
> >
> > Before messing around with cra_blocksize, it needs to be decided what it
> > actually means, and document it appropriately.  According to the current
> > specification, AES_BLOCK_SIZE is correct here, not 1:
> 
> Block size should always be set to 1 unless the algorithm is only
> capable of handling input data that is a multiple of block size.
> 
> >  * @cra_blocksize: Minimum block size of this transformation. The size in bytes
> >  *		   of the smallest possible unit which can be transformed with
> >  *		   this algorithm. The users must respect this value.
> >  *		   In case of HASH transformation, it is possible for a smaller
> >  *		   block than @cra_blocksize to be passed to the crypto API for
> >  *		   transformation, in case of any other transformation type, an
> >  * 		   error will be returned upon any attempt to transform smaller
> >  *		   than @cra_blocksize chunks.
> 
> OK this is wrong.  We should fix it.  For skciphers, the input
> length must be a multiple of blocksize.

That seems logical, but everything needs to be updated to be consistent.  Note
that adiantum and hctr2 also use the currently documented convention, i.e. they
support byte-aligned messages but they set cra_blocksize to 16 because that's
their minimum message size.  Also, while the proposed definition seems logical,
do you have any more specific rationale for wanting to make this change?

- Eric

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH 00/15] crypto: Add twopass lskcipher for adiantum
  2024-02-15  8:20   ` Herbert Xu
@ 2024-02-23  6:39     ` Eric Biggers
  0 siblings, 0 replies; 30+ messages in thread
From: Eric Biggers @ 2024-02-23  6:39 UTC (permalink / raw)
  To: Herbert Xu; +Cc: Linux Crypto Mailing List

On Thu, Feb 15, 2024 at 04:20:34PM +0800, Herbert Xu wrote:
> On Wed, Feb 14, 2024 at 03:35:17PM -0800, Eric Biggers wrote:
> > 
> > Thanks.  Can you include an explanation of the high-level context and goals for
> > this work?  It's still not clear to me.  I'm guessing that the main goal is to
> > get rid of the vaddr => scatterlist => vaddr round trip for software
> > encryption/decryption, which hopefully will improve performance and make the API
> > easier to use?  And to do that, all software algorithms need to be converted to
> 
> The main goal is to remove the legacy cipher type, and replacing
> it with lskcipher.

What is the benefit of that change?

This series also goes way beyond that, so it seems like there's more going on
here.  I do like the support for vaddr; the scatterlist-based APIs have always
been one of the main pain points with the crypto API.  But you're claiming
that fixing that isn't actually the goal.  So I'm confused.

> > "lskcipher"?  Will skcipher API users actually be able to convert to lskcipher,
> > or will they be blocked by people expecting to be able to use hardware crypto
> > accelerators?  Would you accept lskcipher being used alongside skcipher?
> 
> That's a question for each user to decide.
> 
> > Previously you had said you don't want shash being used alongside ahash.
> 
> In general, if the amount of data being processed is large, then
> I would expect the use of hardware accelerators to be a possibility
> and therefore choose the SG-based interface.
> 
> I wouldn't consider 4K to be large though.  So it's really when you
> feed hundreds of kilobytes of data through the algorithm when I would
> recommend against using shash.

dm-verity usually hashes 4K at a time, but that was enough for people to want it
to support hardware accelerators, so it had to be switched to ahash.  But, you
objected to my patch that added shash support to dm-verity alongside ahash
(https://lore.kernel.org/dm-devel/20231030023351.6041-1-ebiggers@kernel.org).

That suggests that adding lskcipher support to dm-crypt and fscrypt alongside
skcipher would similarly not be "allowed".  Most users don't use off-CPU
hardware accelerators with those, but some do.

I did get away with (so far) switching fs/verity/ to shash.  I'm not sure I
could similarly get away with switching fs/crypto/ to lskcipher.  There are
people using the CAAM AES-CBC hardware accelerator with fscrypt.

Before we go through a big effort to convert all these algorithms to lskcipher,
or (more likely based on history) leave everything in a half-finished state, I'd
like to get a good sense that lskcipher will be useful.

- Eric

^ permalink raw reply	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2024-02-23  6:39 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-02-13  9:04 [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Herbert Xu
2023-12-02  4:55 ` [PATCH 01/15] crypto: skcipher - Add tailsize attribute Herbert Xu
2024-02-14 23:44   ` Eric Biggers
2024-02-15  6:40     ` Herbert Xu
2024-02-23  6:01       ` Eric Biggers
2023-12-02  5:42 ` [PATCH 02/15] crypto: algif_skcipher - Add support for tailsize Herbert Xu
2023-12-04 10:24 ` [PATCH 04/15] crypto: xts - Convert from skcipher to lskcipher Herbert Xu
2023-12-05  6:09 ` [PATCH 05/15] crypto: skcipher - Add twopass attribute Herbert Xu
2023-12-05  6:13 ` [PATCH 06/15] crypto: algif_skcipher - Disallow nonincremental algorithms Herbert Xu
2024-02-14 22:56   ` Eric Biggers
2024-02-15  6:47     ` Herbert Xu
2024-02-23  6:00       ` Eric Biggers
2023-12-05  9:52 ` [PATCH 07/15] crypto: adiantum - Use lskcipher instead of cipher Herbert Xu
2023-12-06  4:46 ` [PATCH 08/15] crypto: skcipher - Add incremental support to lskcipher wrapper Herbert Xu
2023-12-06  5:49 ` [PATCH 09/15] crypto: chacha-generic - Convert from skcipher to lskcipher Herbert Xu
2024-02-14 23:41   ` Eric Biggers
2024-02-15  6:52     ` Herbert Xu
2023-12-06  6:05 ` [PATCH 10/15] crypto: skcipher - Move nesting check into ecb Herbert Xu
2023-12-06  8:55 ` [PATCH 11/15] crypto: skcipher - Propagate zero-length requests to lskcipher Herbert Xu
2023-12-07 10:03 ` [PATCH 03/15] crypto: skcipher - Remove ivsize check for lskcipher simple templates Herbert Xu
2023-12-07 10:13 ` [PATCH 12/15] crypto: cts - Convert from skcipher to lskcipher Herbert Xu
2023-12-29 10:47 ` [PATCH 13/15] crypto: cts,xts - Update parameters blocksize/chunksize/tailsize Herbert Xu
2024-02-14 23:00   ` Eric Biggers
2024-02-15  7:57     ` Herbert Xu
2024-02-23  6:09       ` Eric Biggers
2023-12-30  7:16 ` [PATCH 14/15] crypto: lskcipher - Export incremental interface internally Herbert Xu
2024-02-13  8:48 ` [PATCH 15/15] crypto: adiantum - Convert from skcipher to lskcipher Herbert Xu
2024-02-14 23:35 ` [PATCH 00/15] crypto: Add twopass lskcipher for adiantum Eric Biggers
2024-02-15  8:20   ` Herbert Xu
2024-02-23  6:39     ` Eric Biggers

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).