stable.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC/RFT PATCH 01/15] crypto: aegis - fix handling chunked inputs
       [not found] <20190123224926.250525-1-ebiggers@kernel.org>
@ 2019-01-23 22:49 ` Eric Biggers
  2019-01-23 22:49 ` [RFC/RFT PATCH 02/15] crypto: morus " Eric Biggers
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Eric Biggers @ 2019-01-23 22:49 UTC (permalink / raw)
  To: linux-crypto, Herbert Xu
  Cc: linux-kernel, Jason A . Donenfeld, stable, Ondrej Mosnacek

From: Eric Biggers <ebiggers@google.com>

The generic AEGIS implementations all fail the improved AEAD tests
because they produce the wrong result with some data layouts.  Fix them.

Fixes: f606a88e5823 ("crypto: aegis - Add generic AEGIS AEAD implementations")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/aegis128.c  | 14 +++++++-------
 crypto/aegis128l.c | 14 +++++++-------
 crypto/aegis256.c  | 14 +++++++-------
 3 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/crypto/aegis128.c b/crypto/aegis128.c
index c22f4414856d..789716f92e4c 100644
--- a/crypto/aegis128.c
+++ b/crypto/aegis128.c
@@ -290,19 +290,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
 					  const struct aegis128_ops *ops)
 {
 	struct skcipher_walk walk;
-	u8 *src, *dst;
-	unsigned int chunksize;
 
 	ops->skcipher_walk_init(&walk, req, false);
 
 	while (walk.nbytes) {
-		src = walk.src.virt.addr;
-		dst = walk.dst.virt.addr;
-		chunksize = walk.nbytes;
+		unsigned int nbytes = walk.nbytes;
 
-		ops->crypt_chunk(state, dst, src, chunksize);
+		if (nbytes < walk.total)
+			nbytes = round_down(nbytes, walk.stride);
 
-		skcipher_walk_done(&walk, 0);
+		ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+				 nbytes);
+
+		skcipher_walk_done(&walk, walk.nbytes - nbytes);
 	}
 }
 
diff --git a/crypto/aegis128l.c b/crypto/aegis128l.c
index b6fb21ebdc3e..73811448cb6b 100644
--- a/crypto/aegis128l.c
+++ b/crypto/aegis128l.c
@@ -353,19 +353,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
 					   const struct aegis128l_ops *ops)
 {
 	struct skcipher_walk walk;
-	u8 *src, *dst;
-	unsigned int chunksize;
 
 	ops->skcipher_walk_init(&walk, req, false);
 
 	while (walk.nbytes) {
-		src = walk.src.virt.addr;
-		dst = walk.dst.virt.addr;
-		chunksize = walk.nbytes;
+		unsigned int nbytes = walk.nbytes;
 
-		ops->crypt_chunk(state, dst, src, chunksize);
+		if (nbytes < walk.total)
+			nbytes = round_down(nbytes, walk.stride);
 
-		skcipher_walk_done(&walk, 0);
+		ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+				 nbytes);
+
+		skcipher_walk_done(&walk, walk.nbytes - nbytes);
 	}
 }
 
diff --git a/crypto/aegis256.c b/crypto/aegis256.c
index 11f0f8ec9c7c..8a71e9c06193 100644
--- a/crypto/aegis256.c
+++ b/crypto/aegis256.c
@@ -303,19 +303,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
 					  const struct aegis256_ops *ops)
 {
 	struct skcipher_walk walk;
-	u8 *src, *dst;
-	unsigned int chunksize;
 
 	ops->skcipher_walk_init(&walk, req, false);
 
 	while (walk.nbytes) {
-		src = walk.src.virt.addr;
-		dst = walk.dst.virt.addr;
-		chunksize = walk.nbytes;
+		unsigned int nbytes = walk.nbytes;
 
-		ops->crypt_chunk(state, dst, src, chunksize);
+		if (nbytes < walk.total)
+			nbytes = round_down(nbytes, walk.stride);
 
-		skcipher_walk_done(&walk, 0);
+		ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+				 nbytes);
+
+		skcipher_walk_done(&walk, walk.nbytes - nbytes);
 	}
 }
 
-- 
2.20.1.321.g9e740568ce-goog


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC/RFT PATCH 02/15] crypto: morus - fix handling chunked inputs
       [not found] <20190123224926.250525-1-ebiggers@kernel.org>
  2019-01-23 22:49 ` [RFC/RFT PATCH 01/15] crypto: aegis - fix handling chunked inputs Eric Biggers
@ 2019-01-23 22:49 ` Eric Biggers
  2019-01-31  9:05   ` Ondrej Mosnacek
  2019-01-23 22:49 ` [RFC/RFT PATCH 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP Eric Biggers
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 10+ messages in thread
From: Eric Biggers @ 2019-01-23 22:49 UTC (permalink / raw)
  To: linux-crypto, Herbert Xu
  Cc: linux-kernel, Jason A . Donenfeld, stable, Ondrej Mosnacek

From: Eric Biggers <ebiggers@google.com>

The generic MORUS implementations all fail the improved AEAD tests
because they produce the wrong result with some data layouts.  Fix them.

Fixes: 396be41f16fd ("crypto: morus - Add generic MORUS AEAD implementations")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/morus1280.c | 13 +++++++------
 crypto/morus640.c  | 13 +++++++------
 2 files changed, 14 insertions(+), 12 deletions(-)

diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index 3889c188f266..b83576b4eb55 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
 					   const struct morus1280_ops *ops)
 {
 	struct skcipher_walk walk;
-	u8 *dst;
-	const u8 *src;
 
 	ops->skcipher_walk_init(&walk, req, false);
 
 	while (walk.nbytes) {
-		src = walk.src.virt.addr;
-		dst = walk.dst.virt.addr;
+		unsigned int nbytes = walk.nbytes;
 
-		ops->crypt_chunk(state, dst, src, walk.nbytes);
+		if (nbytes < walk.total)
+			nbytes = round_down(nbytes, walk.stride);
 
-		skcipher_walk_done(&walk, 0);
+		ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+				 nbytes);
+
+		skcipher_walk_done(&walk, walk.nbytes - nbytes);
 	}
 }
 
diff --git a/crypto/morus640.c b/crypto/morus640.c
index da06ec2f6a80..b6a477444f6d 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -365,18 +365,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
 					  const struct morus640_ops *ops)
 {
 	struct skcipher_walk walk;
-	u8 *dst;
-	const u8 *src;
 
 	ops->skcipher_walk_init(&walk, req, false);
 
 	while (walk.nbytes) {
-		src = walk.src.virt.addr;
-		dst = walk.dst.virt.addr;
+		unsigned int nbytes = walk.nbytes;
 
-		ops->crypt_chunk(state, dst, src, walk.nbytes);
+		if (nbytes < walk.total)
+			nbytes = round_down(nbytes, walk.stride);
 
-		skcipher_walk_done(&walk, 0);
+		ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
+				 nbytes);
+
+		skcipher_walk_done(&walk, walk.nbytes - nbytes);
 	}
 }
 
-- 
2.20.1.321.g9e740568ce-goog


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC/RFT PATCH 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP
       [not found] <20190123224926.250525-1-ebiggers@kernel.org>
  2019-01-23 22:49 ` [RFC/RFT PATCH 01/15] crypto: aegis - fix handling chunked inputs Eric Biggers
  2019-01-23 22:49 ` [RFC/RFT PATCH 02/15] crypto: morus " Eric Biggers
@ 2019-01-23 22:49 ` Eric Biggers
  2019-01-23 22:49 ` [RFC/RFT PATCH 04/15] crypto: x86/morus " Eric Biggers
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Eric Biggers @ 2019-01-23 22:49 UTC (permalink / raw)
  To: linux-crypto, Herbert Xu
  Cc: linux-kernel, Jason A . Donenfeld, stable, Ondrej Mosnacek

From: Eric Biggers <ebiggers@google.com>

The x86 AEGIS implementations all fail the improved AEAD tests because
they produce the wrong result with some data layouts.  Also, when the
MAY_SLEEP flag is given, they can sleep in the skcipher_walk_*()
functions while preemption is disabled by kernel_fpu_begin().

Fix these bugs.

Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 arch/x86/crypto/aegis128-aesni-glue.c  | 38 ++++++++++----------------
 arch/x86/crypto/aegis128l-aesni-glue.c | 38 ++++++++++----------------
 arch/x86/crypto/aegis256-aesni-glue.c  | 38 ++++++++++----------------
 3 files changed, 45 insertions(+), 69 deletions(-)

diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index 2a356b948720..3ea71b871813 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_process_ad(
 }
 
 static void crypto_aegis128_aesni_process_crypt(
-		struct aegis_state *state, struct aead_request *req,
+		struct aegis_state *state, struct skcipher_walk *walk,
 		const struct aegis_crypt_ops *ops)
 {
-	struct skcipher_walk walk;
-	u8 *src, *dst;
-	unsigned int chunksize, base;
-
-	ops->skcipher_walk_init(&walk, req, false);
-
-	while (walk.nbytes) {
-		src = walk.src.virt.addr;
-		dst = walk.dst.virt.addr;
-		chunksize = walk.nbytes;
-
-		ops->crypt_blocks(state, chunksize, src, dst);
-
-		base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
-		src += base;
-		dst += base;
-		chunksize &= AEGIS128_BLOCK_SIZE - 1;
-
-		if (chunksize > 0)
-			ops->crypt_tail(state, chunksize, src, dst);
+	while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
+		ops->crypt_blocks(state,
+				  round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
+				  walk->src.virt.addr, walk->dst.virt.addr);
+		skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
+	}
 
-		skcipher_walk_done(&walk, 0);
+	if (walk->nbytes) {
+		ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+				walk->dst.virt.addr);
+		skcipher_walk_done(walk, 0);
 	}
 }
 
@@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
+	struct skcipher_walk walk;
 	struct aegis_state state;
 
+	ops->skcipher_walk_init(&walk, req, true);
+
 	kernel_fpu_begin();
 
 	crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
 	crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
-	crypto_aegis128_aesni_process_crypt(&state, req, ops);
+	crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
 	crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
 
 	kernel_fpu_end();
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index dbe8bb980da1..1b1b39c66c5e 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_process_ad(
 }
 
 static void crypto_aegis128l_aesni_process_crypt(
-		struct aegis_state *state, struct aead_request *req,
+		struct aegis_state *state, struct skcipher_walk *walk,
 		const struct aegis_crypt_ops *ops)
 {
-	struct skcipher_walk walk;
-	u8 *src, *dst;
-	unsigned int chunksize, base;
-
-	ops->skcipher_walk_init(&walk, req, false);
-
-	while (walk.nbytes) {
-		src = walk.src.virt.addr;
-		dst = walk.dst.virt.addr;
-		chunksize = walk.nbytes;
-
-		ops->crypt_blocks(state, chunksize, src, dst);
-
-		base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
-		src += base;
-		dst += base;
-		chunksize &= AEGIS128L_BLOCK_SIZE - 1;
-
-		if (chunksize > 0)
-			ops->crypt_tail(state, chunksize, src, dst);
+	while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
+		ops->crypt_blocks(state, round_down(walk->nbytes,
+						    AEGIS128L_BLOCK_SIZE),
+				  walk->src.virt.addr, walk->dst.virt.addr);
+		skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
+	}
 
-		skcipher_walk_done(&walk, 0);
+	if (walk->nbytes) {
+		ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+				walk->dst.virt.addr);
+		skcipher_walk_done(walk, 0);
 	}
 }
 
@@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt(struct aead_request *req,
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
+	struct skcipher_walk walk;
 	struct aegis_state state;
 
+	ops->skcipher_walk_init(&walk, req, true);
+
 	kernel_fpu_begin();
 
 	crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
 	crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
-	crypto_aegis128l_aesni_process_crypt(&state, req, ops);
+	crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
 	crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
 
 	kernel_fpu_end();
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index 8bebda2de92f..6227ca3220a0 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_process_ad(
 }
 
 static void crypto_aegis256_aesni_process_crypt(
-		struct aegis_state *state, struct aead_request *req,
+		struct aegis_state *state, struct skcipher_walk *walk,
 		const struct aegis_crypt_ops *ops)
 {
-	struct skcipher_walk walk;
-	u8 *src, *dst;
-	unsigned int chunksize, base;
-
-	ops->skcipher_walk_init(&walk, req, false);
-
-	while (walk.nbytes) {
-		src = walk.src.virt.addr;
-		dst = walk.dst.virt.addr;
-		chunksize = walk.nbytes;
-
-		ops->crypt_blocks(state, chunksize, src, dst);
-
-		base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
-		src += base;
-		dst += base;
-		chunksize &= AEGIS256_BLOCK_SIZE - 1;
-
-		if (chunksize > 0)
-			ops->crypt_tail(state, chunksize, src, dst);
+	while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
+		ops->crypt_blocks(state,
+				  round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
+				  walk->src.virt.addr, walk->dst.virt.addr);
+		skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
+	}
 
-		skcipher_walk_done(&walk, 0);
+	if (walk->nbytes) {
+		ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+				walk->dst.virt.addr);
+		skcipher_walk_done(walk, 0);
 	}
 }
 
@@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(struct aead_request *req,
 {
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
+	struct skcipher_walk walk;
 	struct aegis_state state;
 
+	ops->skcipher_walk_init(&walk, req, true);
+
 	kernel_fpu_begin();
 
 	crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
 	crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
-	crypto_aegis256_aesni_process_crypt(&state, req, ops);
+	crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
 	crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
 
 	kernel_fpu_end();
-- 
2.20.1.321.g9e740568ce-goog


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC/RFT PATCH 04/15] crypto: x86/morus - fix handling chunked inputs and MAY_SLEEP
       [not found] <20190123224926.250525-1-ebiggers@kernel.org>
                   ` (2 preceding siblings ...)
  2019-01-23 22:49 ` [RFC/RFT PATCH 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP Eric Biggers
@ 2019-01-23 22:49 ` Eric Biggers
  2019-01-23 22:49 ` [RFC/RFT PATCH 05/15] crypto: x86/aesni-gcm - fix crash on empty plaintext Eric Biggers
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 10+ messages in thread
From: Eric Biggers @ 2019-01-23 22:49 UTC (permalink / raw)
  To: linux-crypto, Herbert Xu
  Cc: linux-kernel, Jason A . Donenfeld, stable, Ondrej Mosnacek

From: Eric Biggers <ebiggers@google.com>

The x86 MORUS implementations all fail the improved AEAD tests because
they produce the wrong result with some data layouts.  Also, when the
MAY_SLEEP flag is given, they can sleep in the skcipher_walk_*()
functions while preemption is disabled by kernel_fpu_begin().

Fix these bugs.

Fixes: 56e8e57fc3a7 ("crypto: morus - Add common SIMD glue code for MORUS")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 arch/x86/crypto/morus1280_glue.c | 40 +++++++++++++-------------------
 arch/x86/crypto/morus640_glue.c  | 39 ++++++++++++-------------------
 2 files changed, 31 insertions(+), 48 deletions(-)

diff --git a/arch/x86/crypto/morus1280_glue.c b/arch/x86/crypto/morus1280_glue.c
index 0dccdda1eb3a..7e600f8bcdad 100644
--- a/arch/x86/crypto/morus1280_glue.c
+++ b/arch/x86/crypto/morus1280_glue.c
@@ -85,31 +85,20 @@ static void crypto_morus1280_glue_process_ad(
 
 static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
 						struct morus1280_ops ops,
-						struct aead_request *req)
+						struct skcipher_walk *walk)
 {
-	struct skcipher_walk walk;
-	u8 *cursor_src, *cursor_dst;
-	unsigned int chunksize, base;
-
-	ops.skcipher_walk_init(&walk, req, false);
-
-	while (walk.nbytes) {
-		cursor_src = walk.src.virt.addr;
-		cursor_dst = walk.dst.virt.addr;
-		chunksize = walk.nbytes;
-
-		ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
-
-		base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
-		cursor_src += base;
-		cursor_dst += base;
-		chunksize &= MORUS1280_BLOCK_SIZE - 1;
-
-		if (chunksize > 0)
-			ops.crypt_tail(state, cursor_src, cursor_dst,
-				       chunksize);
+	while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
+		ops.crypt_blocks(state, walk->src.virt.addr,
+				 walk->dst.virt.addr,
+				 round_down(walk->nbytes,
+					    MORUS1280_BLOCK_SIZE));
+		skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
+	}
 
-		skcipher_walk_done(&walk, 0);
+	if (walk->nbytes) {
+		ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
+			       walk->nbytes);
+		skcipher_walk_done(walk, 0);
 	}
 }
 
@@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(struct aead_request *req,
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
 	struct morus1280_state state;
+	struct skcipher_walk walk;
+
+	ops.skcipher_walk_init(&walk, req, true);
 
 	kernel_fpu_begin();
 
 	ctx->ops->init(&state, &ctx->key, req->iv);
 	crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
-	crypto_morus1280_glue_process_crypt(&state, ops, req);
+	crypto_morus1280_glue_process_crypt(&state, ops, &walk);
 	ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
 
 	kernel_fpu_end();
diff --git a/arch/x86/crypto/morus640_glue.c b/arch/x86/crypto/morus640_glue.c
index 7b58fe4d9bd1..cb3a81732016 100644
--- a/arch/x86/crypto/morus640_glue.c
+++ b/arch/x86/crypto/morus640_glue.c
@@ -85,31 +85,19 @@ static void crypto_morus640_glue_process_ad(
 
 static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
 					       struct morus640_ops ops,
-					       struct aead_request *req)
+					       struct skcipher_walk *walk)
 {
-	struct skcipher_walk walk;
-	u8 *cursor_src, *cursor_dst;
-	unsigned int chunksize, base;
-
-	ops.skcipher_walk_init(&walk, req, false);
-
-	while (walk.nbytes) {
-		cursor_src = walk.src.virt.addr;
-		cursor_dst = walk.dst.virt.addr;
-		chunksize = walk.nbytes;
-
-		ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
-
-		base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
-		cursor_src += base;
-		cursor_dst += base;
-		chunksize &= MORUS640_BLOCK_SIZE - 1;
-
-		if (chunksize > 0)
-			ops.crypt_tail(state, cursor_src, cursor_dst,
-				       chunksize);
+	while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
+		ops.crypt_blocks(state, walk->src.virt.addr,
+				 walk->dst.virt.addr,
+				 round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
+		skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
+	}
 
-		skcipher_walk_done(&walk, 0);
+	if (walk->nbytes) {
+		ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
+			       walk->nbytes);
+		skcipher_walk_done(walk, 0);
 	}
 }
 
@@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(struct aead_request *req,
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
 	struct morus640_state state;
+	struct skcipher_walk walk;
+
+	ops.skcipher_walk_init(&walk, req, true);
 
 	kernel_fpu_begin();
 
 	ctx->ops->init(&state, &ctx->key, req->iv);
 	crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
-	crypto_morus640_glue_process_crypt(&state, ops, req);
+	crypto_morus640_glue_process_crypt(&state, ops, &walk);
 	ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
 
 	kernel_fpu_end();
-- 
2.20.1.321.g9e740568ce-goog


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC/RFT PATCH 05/15] crypto: x86/aesni-gcm - fix crash on empty plaintext
       [not found] <20190123224926.250525-1-ebiggers@kernel.org>
                   ` (3 preceding siblings ...)
  2019-01-23 22:49 ` [RFC/RFT PATCH 04/15] crypto: x86/morus " Eric Biggers
@ 2019-01-23 22:49 ` Eric Biggers
  2019-01-23 22:49 ` [RFC/RFT PATCH 06/15] crypto: ahash - fix another early termination in hash walk Eric Biggers
  2019-01-23 22:49 ` [RFC/RFT PATCH 07/15] crypto: arm64/aes-neonbs - fix returning final keystream block Eric Biggers
  6 siblings, 0 replies; 10+ messages in thread
From: Eric Biggers @ 2019-01-23 22:49 UTC (permalink / raw)
  To: linux-crypto, Herbert Xu
  Cc: linux-kernel, Jason A . Donenfeld, stable, Dave Watson

From: Eric Biggers <ebiggers@google.com>

gcmaes_crypt_by_sg() dereferences the NULL pointer returned by
scatterwalk_ffwd() when encrypting an empty plaintext and the source
scatterlist ends immediately after the associated data.

Fix it by only fast-forwarding to the src/dst data scatterlists if the
data length is nonzero.

This bug is reproduced by the "rfc4543(gcm(aes))" test vectors when run
with the new AEAD test manager.

Fixes: e845520707f8 ("crypto: aesni - Update aesni-intel_glue to use scatter/gather")
Cc: <stable@vger.kernel.org> # v4.17+
Cc: Dave Watson <davejwatson@fb.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 arch/x86/crypto/aesni-intel_glue.c | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 9b5ccde3ef31..1e3d2102033a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -813,11 +813,14 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
 	}
 
-	src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
-	scatterwalk_start(&src_sg_walk, src_sg);
-	if (req->src != req->dst) {
-		dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
-		scatterwalk_start(&dst_sg_walk, dst_sg);
+	if (left) {
+		src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
+		scatterwalk_start(&src_sg_walk, src_sg);
+		if (req->src != req->dst) {
+			dst_sg = scatterwalk_ffwd(dst_start, req->dst,
+						  req->assoclen);
+			scatterwalk_start(&dst_sg_walk, dst_sg);
+		}
 	}
 
 	kernel_fpu_begin();
-- 
2.20.1.321.g9e740568ce-goog


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC/RFT PATCH 06/15] crypto: ahash - fix another early termination in hash walk
       [not found] <20190123224926.250525-1-ebiggers@kernel.org>
                   ` (4 preceding siblings ...)
  2019-01-23 22:49 ` [RFC/RFT PATCH 05/15] crypto: x86/aesni-gcm - fix crash on empty plaintext Eric Biggers
@ 2019-01-23 22:49 ` Eric Biggers
  2019-01-23 22:49 ` [RFC/RFT PATCH 07/15] crypto: arm64/aes-neonbs - fix returning final keystream block Eric Biggers
  6 siblings, 0 replies; 10+ messages in thread
From: Eric Biggers @ 2019-01-23 22:49 UTC (permalink / raw)
  To: linux-crypto, Herbert Xu; +Cc: linux-kernel, Jason A . Donenfeld, stable

From: Eric Biggers <ebiggers@google.com>

Hash algorithms with an alignmask set, e.g. "xcbc(aes-aesni)" and
"michael_mic", fail the improved hash tests because they sometimes
produce the wrong digest.  The bug is that in the case where a
scatterlist element crosses pages, not all the data is actually hashed
because the scatterlist walk terminates too early.  This happens because
the 'nbytes' variable in crypto_hash_walk_done() is assigned the number
of bytes remaining in the page, then later interpreted as the number of
bytes remaining in the scatterlist element.  Fix it.

Fixes: 900a081f6912 ("crypto: ahash - Fix early termination in hash walk")
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/ahash.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/crypto/ahash.c b/crypto/ahash.c
index ca0d3e281fef..81e2767e2164 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -86,17 +86,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 {
 	unsigned int alignmask = walk->alignmask;
-	unsigned int nbytes = walk->entrylen;
 
 	walk->data -= walk->offset;
 
-	if (nbytes && walk->offset & alignmask && !err) {
-		walk->offset = ALIGN(walk->offset, alignmask + 1);
-		nbytes = min(nbytes,
-			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
-		walk->entrylen -= nbytes;
+	if (walk->entrylen && (walk->offset & alignmask) && !err) {
+		unsigned int nbytes;
 
+		walk->offset = ALIGN(walk->offset, alignmask + 1);
+		nbytes = min(walk->entrylen,
+			     (unsigned int)(PAGE_SIZE - walk->offset));
 		if (nbytes) {
+			walk->entrylen -= nbytes;
 			walk->data += walk->offset;
 			return nbytes;
 		}
@@ -116,7 +116,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 	if (err)
 		return err;
 
-	if (nbytes) {
+	if (walk->entrylen) {
 		walk->offset = 0;
 		walk->pg++;
 		return hash_walk_next(walk);
-- 
2.20.1.321.g9e740568ce-goog


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [RFC/RFT PATCH 07/15] crypto: arm64/aes-neonbs - fix returning final keystream block
       [not found] <20190123224926.250525-1-ebiggers@kernel.org>
                   ` (5 preceding siblings ...)
  2019-01-23 22:49 ` [RFC/RFT PATCH 06/15] crypto: ahash - fix another early termination in hash walk Eric Biggers
@ 2019-01-23 22:49 ` Eric Biggers
  2019-01-24 12:11   ` Ard Biesheuvel
  6 siblings, 1 reply; 10+ messages in thread
From: Eric Biggers @ 2019-01-23 22:49 UTC (permalink / raw)
  To: linux-crypto, Herbert Xu
  Cc: linux-kernel, Jason A . Donenfeld, stable, Ard Biesheuvel

From: Eric Biggers <ebiggers@google.com>

The arm64 NEON bit-sliced implementation of AES-CTR fails the improved
skcipher tests because it sometimes produces the wrong ciphertext.  The
bug is that the final keystream block isn't returned from the assembly
code when the number of non-final blocks is zero.  This can happen if
the input data ends a few bytes after a page boundary.  In this case the
last bytes get "encrypted" by XOR'ing them with uninitialized memory.

Fix the assembly code to return the final keystream block when needed.

Fixes: 88a3f582bea9 ("crypto: arm64/aes - don't use IV buffer to return final keystream block")
Cc: <stable@vger.kernel.org> # v4.11+
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 arch/arm64/crypto/aes-neonbs-core.S | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
index e613a87f8b53..8432c8d0dea6 100644
--- a/arch/arm64/crypto/aes-neonbs-core.S
+++ b/arch/arm64/crypto/aes-neonbs-core.S
@@ -971,18 +971,22 @@ CPU_LE(	rev		x8, x8		)
 
 8:	next_ctr	v0
 	st1		{v0.16b}, [x24]
-	cbz		x23, 0f
+	cbz		x23, .Lctr_done
 
 	cond_yield_neon	98b
 	b		99b
 
-0:	frame_pop
+.Lctr_done:
+	frame_pop
 	ret
 
 	/*
 	 * If we are handling the tail of the input (x6 != NULL), return the
 	 * final keystream block back to the caller.
 	 */
+0:	cbz		x25, 8b
+	st1		{v0.16b}, [x25]
+	b		8b
 1:	cbz		x25, 8b
 	st1		{v1.16b}, [x25]
 	b		8b
-- 
2.20.1.321.g9e740568ce-goog


^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [RFC/RFT PATCH 07/15] crypto: arm64/aes-neonbs - fix returning final keystream block
  2019-01-23 22:49 ` [RFC/RFT PATCH 07/15] crypto: arm64/aes-neonbs - fix returning final keystream block Eric Biggers
@ 2019-01-24 12:11   ` Ard Biesheuvel
  0 siblings, 0 replies; 10+ messages in thread
From: Ard Biesheuvel @ 2019-01-24 12:11 UTC (permalink / raw)
  To: Eric Biggers
  Cc: open list:HARDWARE RANDOM NUMBER GENERATOR CORE, Herbert Xu,
	Linux Kernel Mailing List, Jason A . Donenfeld, stable

On Wed, 23 Jan 2019 at 23:52, Eric Biggers <ebiggers@kernel.org> wrote:
>
> From: Eric Biggers <ebiggers@google.com>
>
> The arm64 NEON bit-sliced implementation of AES-CTR fails the improved
> skcipher tests because it sometimes produces the wrong ciphertext.  The
> bug is that the final keystream block isn't returned from the assembly
> code when the number of non-final blocks is zero.  This can happen if
> the input data ends a few bytes after a page boundary.  In this case the
> last bytes get "encrypted" by XOR'ing them with uninitialized memory.
>
> Fix the assembly code to return the final keystream block when needed.
>
> Fixes: 88a3f582bea9 ("crypto: arm64/aes - don't use IV buffer to return final keystream block")
> Cc: <stable@vger.kernel.org> # v4.11+
> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> Signed-off-by: Eric Biggers <ebiggers@google.com>

Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

> ---
>  arch/arm64/crypto/aes-neonbs-core.S | 8 ++++++--
>  1 file changed, 6 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/crypto/aes-neonbs-core.S b/arch/arm64/crypto/aes-neonbs-core.S
> index e613a87f8b53..8432c8d0dea6 100644
> --- a/arch/arm64/crypto/aes-neonbs-core.S
> +++ b/arch/arm64/crypto/aes-neonbs-core.S
> @@ -971,18 +971,22 @@ CPU_LE(   rev             x8, x8          )
>
>  8:     next_ctr        v0
>         st1             {v0.16b}, [x24]
> -       cbz             x23, 0f
> +       cbz             x23, .Lctr_done
>
>         cond_yield_neon 98b
>         b               99b
>
> -0:     frame_pop
> +.Lctr_done:
> +       frame_pop
>         ret
>
>         /*
>          * If we are handling the tail of the input (x6 != NULL), return the
>          * final keystream block back to the caller.
>          */
> +0:     cbz             x25, 8b
> +       st1             {v0.16b}, [x25]
> +       b               8b
>  1:     cbz             x25, 8b
>         st1             {v1.16b}, [x25]
>         b               8b
> --
> 2.20.1.321.g9e740568ce-goog
>

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [RFC/RFT PATCH 02/15] crypto: morus - fix handling chunked inputs
  2019-01-23 22:49 ` [RFC/RFT PATCH 02/15] crypto: morus " Eric Biggers
@ 2019-01-31  9:05   ` Ondrej Mosnacek
  2019-02-01  5:25     ` Eric Biggers
  0 siblings, 1 reply; 10+ messages in thread
From: Ondrej Mosnacek @ 2019-01-31  9:05 UTC (permalink / raw)
  To: Eric Biggers
  Cc: linux-crypto, Herbert Xu, Linux kernel mailing list,
	Jason A . Donenfeld, stable

Hi Eric,

On Wed, Jan 23, 2019 at 11:52 PM Eric Biggers <ebiggers@kernel.org> wrote:
> From: Eric Biggers <ebiggers@google.com>
>
> The generic MORUS implementations all fail the improved AEAD tests
> because they produce the wrong result with some data layouts.  Fix them.
>
> Fixes: 396be41f16fd ("crypto: morus - Add generic MORUS AEAD implementations")
> Cc: <stable@vger.kernel.org> # v4.18+
> Cc: Ondrej Mosnacek <omosnace@redhat.com>
> Signed-off-by: Eric Biggers <ebiggers@google.com>
> ---
>  crypto/morus1280.c | 13 +++++++------
>  crypto/morus640.c  | 13 +++++++------
>  2 files changed, 14 insertions(+), 12 deletions(-)
>
> diff --git a/crypto/morus1280.c b/crypto/morus1280.c
> index 3889c188f266..b83576b4eb55 100644
> --- a/crypto/morus1280.c
> +++ b/crypto/morus1280.c
> @@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
>                                            const struct morus1280_ops *ops)
>  {
>         struct skcipher_walk walk;
> -       u8 *dst;
> -       const u8 *src;
>
>         ops->skcipher_walk_init(&walk, req, false);
>
>         while (walk.nbytes) {
> -               src = walk.src.virt.addr;
> -               dst = walk.dst.virt.addr;
> +               unsigned int nbytes = walk.nbytes;
>
> -               ops->crypt_chunk(state, dst, src, walk.nbytes);
> +               if (nbytes < walk.total)
> +                       nbytes = round_down(nbytes, walk.stride);
>
> -               skcipher_walk_done(&walk, 0);
> +               ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
> +                                nbytes);
> +
> +               skcipher_walk_done(&walk, walk.nbytes - nbytes);
>         }

Hm... I assume the problem was that skcipher_walk may give you nbytes
that is not rounded to the algorithm's walksize (aka walk.stride) even
in the middle of the stream, right? I must have misread the code in
skcipher.c and assumed that it automatically makes every step of a
round size, with the exception of the very last one, which may include
a final partial block. Thinking about it now it makes sense to me that
this isn't the case, since for stream ciphers it would mean some
unnecessary memcpy'ing in the skcipher_walk code...

Maybe you could explain the problem in one or two sentences in the
commit message(s), since the contract of skcipher_walk doesn't seem to
be documented anywhere and so it is not obvious why the change is
necessary.

Thank you for all your work on this - the improved testmgr tests were
long needed!

>  }
>
> diff --git a/crypto/morus640.c b/crypto/morus640.c
> index da06ec2f6a80..b6a477444f6d 100644
> --- a/crypto/morus640.c
> +++ b/crypto/morus640.c
> @@ -365,18 +365,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state,
>                                           const struct morus640_ops *ops)
>  {
>         struct skcipher_walk walk;
> -       u8 *dst;
> -       const u8 *src;
>
>         ops->skcipher_walk_init(&walk, req, false);
>
>         while (walk.nbytes) {
> -               src = walk.src.virt.addr;
> -               dst = walk.dst.virt.addr;
> +               unsigned int nbytes = walk.nbytes;
>
> -               ops->crypt_chunk(state, dst, src, walk.nbytes);
> +               if (nbytes < walk.total)
> +                       nbytes = round_down(nbytes, walk.stride);
>
> -               skcipher_walk_done(&walk, 0);
> +               ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
> +                                nbytes);
> +
> +               skcipher_walk_done(&walk, walk.nbytes - nbytes);
>         }
>  }
>
> --
> 2.20.1.321.g9e740568ce-goog
>

--
Ondrej Mosnacek <omosnace at redhat dot com>
Associate Software Engineer, Security Technologies
Red Hat, Inc.

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [RFC/RFT PATCH 02/15] crypto: morus - fix handling chunked inputs
  2019-01-31  9:05   ` Ondrej Mosnacek
@ 2019-02-01  5:25     ` Eric Biggers
  0 siblings, 0 replies; 10+ messages in thread
From: Eric Biggers @ 2019-02-01  5:25 UTC (permalink / raw)
  To: Ondrej Mosnacek
  Cc: linux-crypto, Herbert Xu, Linux kernel mailing list,
	Jason A . Donenfeld, stable

On Thu, Jan 31, 2019 at 10:05:17AM +0100, Ondrej Mosnacek wrote:
> Hi Eric,
> 
> On Wed, Jan 23, 2019 at 11:52 PM Eric Biggers <ebiggers@kernel.org> wrote:
> > From: Eric Biggers <ebiggers@google.com>
> >
> > The generic MORUS implementations all fail the improved AEAD tests
> > because they produce the wrong result with some data layouts.  Fix them.
> >
> > Fixes: 396be41f16fd ("crypto: morus - Add generic MORUS AEAD implementations")
> > Cc: <stable@vger.kernel.org> # v4.18+
> > Cc: Ondrej Mosnacek <omosnace@redhat.com>
> > Signed-off-by: Eric Biggers <ebiggers@google.com>
> > ---
> >  crypto/morus1280.c | 13 +++++++------
> >  crypto/morus640.c  | 13 +++++++------
> >  2 files changed, 14 insertions(+), 12 deletions(-)
> >
> > diff --git a/crypto/morus1280.c b/crypto/morus1280.c
> > index 3889c188f266..b83576b4eb55 100644
> > --- a/crypto/morus1280.c
> > +++ b/crypto/morus1280.c
> > @@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state,
> >                                            const struct morus1280_ops *ops)
> >  {
> >         struct skcipher_walk walk;
> > -       u8 *dst;
> > -       const u8 *src;
> >
> >         ops->skcipher_walk_init(&walk, req, false);
> >
> >         while (walk.nbytes) {
> > -               src = walk.src.virt.addr;
> > -               dst = walk.dst.virt.addr;
> > +               unsigned int nbytes = walk.nbytes;
> >
> > -               ops->crypt_chunk(state, dst, src, walk.nbytes);
> > +               if (nbytes < walk.total)
> > +                       nbytes = round_down(nbytes, walk.stride);
> >
> > -               skcipher_walk_done(&walk, 0);
> > +               ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
> > +                                nbytes);
> > +
> > +               skcipher_walk_done(&walk, walk.nbytes - nbytes);
> >         }
> 
> Hm... I assume the problem was that skcipher_walk may give you nbytes
> that is not rounded to the algorithm's walksize (aka walk.stride) even
> in the middle of the stream, right? I must have misread the code in
> skcipher.c and assumed that it automatically makes every step of a
> round size, with the exception of the very last one, which may include
> a final partial block. Thinking about it now it makes sense to me that
> this isn't the case, since for stream ciphers it would mean some
> unnecessary memcpy'ing in the skcipher_walk code...
> 
> Maybe you could explain the problem in one or two sentences in the
> commit message(s), since the contract of skcipher_walk doesn't seem to
> be documented anywhere and so it is not obvious why the change is
> necessary.
> 
> Thank you for all your work on this - the improved testmgr tests were
> long needed!
> 

Yes, that's correct.  I'll add an explanation to the commit messages.  I'm
actually not convinced that this behavior should be the default (vs. allowing
the caller to opt-in to it if needed), but this is how it works now...  

A while back I also started work on a documentation patch for the skcipher_walk
functions, but it's still pretty incomplete.  I'll see if I can get it in a
state to send out.

- Eric

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2019-02-01  5:25 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20190123224926.250525-1-ebiggers@kernel.org>
2019-01-23 22:49 ` [RFC/RFT PATCH 01/15] crypto: aegis - fix handling chunked inputs Eric Biggers
2019-01-23 22:49 ` [RFC/RFT PATCH 02/15] crypto: morus " Eric Biggers
2019-01-31  9:05   ` Ondrej Mosnacek
2019-02-01  5:25     ` Eric Biggers
2019-01-23 22:49 ` [RFC/RFT PATCH 03/15] crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP Eric Biggers
2019-01-23 22:49 ` [RFC/RFT PATCH 04/15] crypto: x86/morus " Eric Biggers
2019-01-23 22:49 ` [RFC/RFT PATCH 05/15] crypto: x86/aesni-gcm - fix crash on empty plaintext Eric Biggers
2019-01-23 22:49 ` [RFC/RFT PATCH 06/15] crypto: ahash - fix another early termination in hash walk Eric Biggers
2019-01-23 22:49 ` [RFC/RFT PATCH 07/15] crypto: arm64/aes-neonbs - fix returning final keystream block Eric Biggers
2019-01-24 12:11   ` Ard Biesheuvel

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).