All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] crypto: gcm-aes-ni cleanups
@ 2020-12-12  9:16 Ard Biesheuvel
  2020-12-12  9:16 ` [PATCH 1/4] crypto: x86/gcm-aes-ni - prevent misaligned IV buffers on the stack Ard Biesheuvel
                   ` (4 more replies)
  0 siblings, 5 replies; 7+ messages in thread
From: Ard Biesheuvel @ 2020-12-12  9:16 UTC (permalink / raw)
  To: linux-crypto; +Cc: Ard Biesheuvel, Eric Biggers, Herbert Xu

Clean up some issues and peculiarities in the gcm(aes-ni) driver.

Cc: Eric Biggers <ebiggers@google.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>

Ard Biesheuvel (4):
  crypto: x86/gcm-aes-ni - prevent misaligned IV buffers on the stack
  crypto: x86/gcm-aes-ni - drop unused asm prototypes
  crypto: x86/gcm-aes-ni - clean up mapping of associated data
  crypto: x86/gcm-aes-ni - refactor scatterlist processing

 arch/x86/crypto/aesni-intel_glue.c | 238 ++++++--------------
 1 file changed, 74 insertions(+), 164 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/4] crypto: x86/gcm-aes-ni - prevent misaligned IV buffers on the stack
  2020-12-12  9:16 [PATCH 0/4] crypto: gcm-aes-ni cleanups Ard Biesheuvel
@ 2020-12-12  9:16 ` Ard Biesheuvel
  2020-12-12  9:16 ` [PATCH 2/4] crypto: x86/gcm-aes-ni - drop unused asm prototypes Ard Biesheuvel
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Ard Biesheuvel @ 2020-12-12  9:16 UTC (permalink / raw)
  To: linux-crypto; +Cc: Ard Biesheuvel, Eric Biggers, Herbert Xu, stable

The GCM mode driver uses 16 byte aligned buffers on the stack to pass
the IV and other data to the asm helpers, but unfortunately, the x86
port does not guarantee that the stack pointer is 16 byte aligned upon
entry in the first place. Since the compiler is not aware of this, it
will not emit the additional stack realignment sequence that is needed,
and so the alignment is not guaranteed to be more than 8 bytes.

So instead, allocate some padding on the stack, and realign the IV and
data pointers by hand.

Cc: <stable@vger.kernel.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/x86/crypto/aesni-intel_glue.c | 28 +++++++++++---------
 1 file changed, 16 insertions(+), 12 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 8bfc2bbc877d..223670feaffa 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -803,7 +803,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 	const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
-	struct gcm_context_data data AESNI_ALIGN_ATTR;
+	u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
+	struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
 	struct scatter_walk dst_sg_walk = {};
 	unsigned long left = req->cryptlen;
 	unsigned long len, srclen, dstlen;
@@ -852,8 +853,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 	}
 
 	kernel_fpu_begin();
-	gcm_tfm->init(aes_ctx, &data, iv,
-		hash_subkey, assoc, assoclen);
+	gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
 	if (req->src != req->dst) {
 		while (left) {
 			src = scatterwalk_map(&src_sg_walk);
@@ -863,10 +863,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 			len = min(srclen, dstlen);
 			if (len) {
 				if (enc)
-					gcm_tfm->enc_update(aes_ctx, &data,
+					gcm_tfm->enc_update(aes_ctx, data,
 							     dst, src, len);
 				else
-					gcm_tfm->dec_update(aes_ctx, &data,
+					gcm_tfm->dec_update(aes_ctx, data,
 							     dst, src, len);
 			}
 			left -= len;
@@ -884,10 +884,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 			len = scatterwalk_clamp(&src_sg_walk, left);
 			if (len) {
 				if (enc)
-					gcm_tfm->enc_update(aes_ctx, &data,
+					gcm_tfm->enc_update(aes_ctx, data,
 							     src, src, len);
 				else
-					gcm_tfm->dec_update(aes_ctx, &data,
+					gcm_tfm->dec_update(aes_ctx, data,
 							     src, src, len);
 			}
 			left -= len;
@@ -896,7 +896,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 			scatterwalk_done(&src_sg_walk, 1, left);
 		}
 	}
-	gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
+	gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
 	kernel_fpu_end();
 
 	if (!assocmem)
@@ -945,7 +945,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 	void *aes_ctx = &(ctx->aes_key_expanded);
-	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
+	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
 	unsigned int i;
 	__be32 counter = cpu_to_be32(1);
 
@@ -972,7 +973,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
 	void *aes_ctx = &(ctx->aes_key_expanded);
-	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
+	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
 	unsigned int i;
 
 	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
@@ -1119,7 +1121,8 @@ static int generic_gcmaes_encrypt(struct aead_request *req)
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
 	void *aes_ctx = &(ctx->aes_key_expanded);
-	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
+	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
 	__be32 counter = cpu_to_be32(1);
 
 	memcpy(iv, req->iv, 12);
@@ -1135,7 +1138,8 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
 	void *aes_ctx = &(ctx->aes_key_expanded);
-	u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
+	u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
+	u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
 
 	memcpy(iv, req->iv, 12);
 	*((__be32 *)(iv+12)) = counter;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/4] crypto: x86/gcm-aes-ni - drop unused asm prototypes
  2020-12-12  9:16 [PATCH 0/4] crypto: gcm-aes-ni cleanups Ard Biesheuvel
  2020-12-12  9:16 ` [PATCH 1/4] crypto: x86/gcm-aes-ni - prevent misaligned IV buffers on the stack Ard Biesheuvel
@ 2020-12-12  9:16 ` Ard Biesheuvel
  2020-12-12  9:16 ` [PATCH 3/4] crypto: x86/gcm-aes-ni - clean up mapping of associated data Ard Biesheuvel
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 7+ messages in thread
From: Ard Biesheuvel @ 2020-12-12  9:16 UTC (permalink / raw)
  To: linux-crypto; +Cc: Ard Biesheuvel, Eric Biggers, Herbert Xu

Drop some prototypes that are declared but never called.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/x86/crypto/aesni-intel_glue.c | 67 --------------------
 1 file changed, 67 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 223670feaffa..063cf579a8fc 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -111,49 +111,6 @@ asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out,
 				 const u8 *in, bool enc, le128 *iv);
 
-/* asmlinkage void aesni_gcm_enc()
- * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
- * struct gcm_context_data.  May be uninitialized.
- * u8 *out, Ciphertext output. Encrypt in-place is allowed.
- * const u8 *in, Plaintext input
- * unsigned long plaintext_len, Length of data in bytes for encryption.
- * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
- *         16-byte aligned pointer.
- * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
- * const u8 *aad, Additional Authentication Data (AAD)
- * unsigned long aad_len, Length of AAD in bytes.
- * u8 *auth_tag, Authenticated Tag output.
- * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
- *          Valid values are 16 (most likely), 12 or 8.
- */
-asmlinkage void aesni_gcm_enc(void *ctx,
-			struct gcm_context_data *gdata, u8 *out,
-			const u8 *in, unsigned long plaintext_len, u8 *iv,
-			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
-			u8 *auth_tag, unsigned long auth_tag_len);
-
-/* asmlinkage void aesni_gcm_dec()
- * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
- * struct gcm_context_data.  May be uninitialized.
- * u8 *out, Plaintext output. Decrypt in-place is allowed.
- * const u8 *in, Ciphertext input
- * unsigned long ciphertext_len, Length of data in bytes for decryption.
- * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
- *         16-byte aligned pointer.
- * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
- * const u8 *aad, Additional Authentication Data (AAD)
- * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
- * to be 8 or 12 bytes
- * u8 *auth_tag, Authenticated Tag output.
- * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
- * Valid values are 16 (most likely), 12 or 8.
- */
-asmlinkage void aesni_gcm_dec(void *ctx,
-			struct gcm_context_data *gdata, u8 *out,
-			const u8 *in, unsigned long ciphertext_len, u8 *iv,
-			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
-			u8 *auth_tag, unsigned long auth_tag_len);
-
 /* Scatter / Gather routines, with args similar to above */
 asmlinkage void aesni_gcm_init(void *ctx,
 			       struct gcm_context_data *gdata,
@@ -218,18 +175,6 @@ asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
 				   struct gcm_context_data *gdata,
 				   u8 *auth_tag, unsigned long auth_tag_len);
 
-asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
-				struct gcm_context_data *gdata, u8 *out,
-			const u8 *in, unsigned long plaintext_len, u8 *iv,
-			const u8 *aad, unsigned long aad_len,
-			u8 *auth_tag, unsigned long auth_tag_len);
-
-asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
-				struct gcm_context_data *gdata, u8 *out,
-			const u8 *in, unsigned long ciphertext_len, u8 *iv,
-			const u8 *aad, unsigned long aad_len,
-			u8 *auth_tag, unsigned long auth_tag_len);
-
 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
 	.init = &aesni_gcm_init_avx_gen2,
 	.enc_update = &aesni_gcm_enc_update_avx_gen2,
@@ -260,18 +205,6 @@ asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
 				   struct gcm_context_data *gdata,
 				   u8 *auth_tag, unsigned long auth_tag_len);
 
-asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
-				struct gcm_context_data *gdata, u8 *out,
-			const u8 *in, unsigned long plaintext_len, u8 *iv,
-			const u8 *aad, unsigned long aad_len,
-			u8 *auth_tag, unsigned long auth_tag_len);
-
-asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
-				struct gcm_context_data *gdata, u8 *out,
-			const u8 *in, unsigned long ciphertext_len, u8 *iv,
-			const u8 *aad, unsigned long aad_len,
-			u8 *auth_tag, unsigned long auth_tag_len);
-
 static const struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
 	.init = &aesni_gcm_init_avx_gen4,
 	.enc_update = &aesni_gcm_enc_update_avx_gen4,
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/4] crypto: x86/gcm-aes-ni - clean up mapping of associated data
  2020-12-12  9:16 [PATCH 0/4] crypto: gcm-aes-ni cleanups Ard Biesheuvel
  2020-12-12  9:16 ` [PATCH 1/4] crypto: x86/gcm-aes-ni - prevent misaligned IV buffers on the stack Ard Biesheuvel
  2020-12-12  9:16 ` [PATCH 2/4] crypto: x86/gcm-aes-ni - drop unused asm prototypes Ard Biesheuvel
@ 2020-12-12  9:16 ` Ard Biesheuvel
  2020-12-12  9:17 ` [PATCH 4/4] crypto: x86/gcm-aes-ni - refactor scatterlist processing Ard Biesheuvel
  2020-12-21 22:01 ` [PATCH 0/4] crypto: gcm-aes-ni cleanups Eric Biggers
  4 siblings, 0 replies; 7+ messages in thread
From: Ard Biesheuvel @ 2020-12-12  9:16 UTC (permalink / raw)
  To: linux-crypto; +Cc: Ard Biesheuvel, Eric Biggers, Herbert Xu

The gcm(aes-ni) driver is only built for x86_64, which does not make
use of highmem. So testing for PageHighMem is pointless and can be
omitted.

While at it, replace GFP_ATOMIC with the appropriate runtime decided
value based on the context.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/x86/crypto/aesni-intel_glue.c | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 063cf579a8fc..e5c4d0cce828 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -760,14 +760,15 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 		gcm_tfm = &aesni_gcm_tfm_sse;
 
 	/* Linearize assoc, if not already linear */
-	if (req->src->length >= assoclen && req->src->length &&
-		(!PageHighMem(sg_page(req->src)) ||
-			req->src->offset + req->src->length <= PAGE_SIZE)) {
+	if (req->src->length >= assoclen && req->src->length) {
 		scatterwalk_start(&assoc_sg_walk, req->src);
 		assoc = scatterwalk_map(&assoc_sg_walk);
 	} else {
+		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+			      ? GFP_KERNEL : GFP_ATOMIC;
+
 		/* assoc can be any length, so must be on heap */
-		assocmem = kmalloc(assoclen, GFP_ATOMIC);
+		assocmem = kmalloc(assoclen, flags);
 		if (unlikely(!assocmem))
 			return -ENOMEM;
 		assoc = assocmem;
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/4] crypto: x86/gcm-aes-ni - refactor scatterlist processing
  2020-12-12  9:16 [PATCH 0/4] crypto: gcm-aes-ni cleanups Ard Biesheuvel
                   ` (2 preceding siblings ...)
  2020-12-12  9:16 ` [PATCH 3/4] crypto: x86/gcm-aes-ni - clean up mapping of associated data Ard Biesheuvel
@ 2020-12-12  9:17 ` Ard Biesheuvel
  2020-12-21 22:01 ` [PATCH 0/4] crypto: gcm-aes-ni cleanups Eric Biggers
  4 siblings, 0 replies; 7+ messages in thread
From: Ard Biesheuvel @ 2020-12-12  9:17 UTC (permalink / raw)
  To: linux-crypto; +Cc: Ard Biesheuvel, Eric Biggers, Herbert Xu

Currently, the gcm(aes-ni) driver open codes the scatterlist handling
that is encapsulated by the skcipher walk API. So let's switch to that
instead.

Also, move the handling at the end of gcmaes_crypt_by_sg() that is
dependent on whether we are encrypting or decrypting into the callers,
which always do one or the other.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/x86/crypto/aesni-intel_glue.c | 144 ++++++++------------
 1 file changed, 58 insertions(+), 86 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index e5c4d0cce828..b0a13ab2f70b 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -731,25 +731,18 @@ static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
 
 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 			      unsigned int assoclen, u8 *hash_subkey,
-			      u8 *iv, void *aes_ctx)
+			      u8 *iv, void *aes_ctx, u8 *auth_tag,
+			      unsigned long auth_tag_len)
 {
-	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
 	const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
 	u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
 	struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
-	struct scatter_walk dst_sg_walk = {};
 	unsigned long left = req->cryptlen;
-	unsigned long len, srclen, dstlen;
 	struct scatter_walk assoc_sg_walk;
-	struct scatter_walk src_sg_walk;
-	struct scatterlist src_start[2];
-	struct scatterlist dst_start[2];
-	struct scatterlist *src_sg;
-	struct scatterlist *dst_sg;
-	u8 *src, *dst, *assoc;
+	struct skcipher_walk walk;
 	u8 *assocmem = NULL;
-	u8 authTag[16];
+	u8 *assoc;
+	int err;
 
 	if (!enc)
 		left -= auth_tag_len;
@@ -776,61 +769,27 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
 	}
 
-	if (left) {
-		src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
-		scatterwalk_start(&src_sg_walk, src_sg);
-		if (req->src != req->dst) {
-			dst_sg = scatterwalk_ffwd(dst_start, req->dst,
-						  req->assoclen);
-			scatterwalk_start(&dst_sg_walk, dst_sg);
-		}
-	}
+	err = enc ? skcipher_walk_aead_encrypt(&walk, req, false)
+		  : skcipher_walk_aead_decrypt(&walk, req, false);
+	if (err)
+		return err;
 
 	kernel_fpu_begin();
 	gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
-	if (req->src != req->dst) {
-		while (left) {
-			src = scatterwalk_map(&src_sg_walk);
-			dst = scatterwalk_map(&dst_sg_walk);
-			srclen = scatterwalk_clamp(&src_sg_walk, left);
-			dstlen = scatterwalk_clamp(&dst_sg_walk, left);
-			len = min(srclen, dstlen);
-			if (len) {
-				if (enc)
-					gcm_tfm->enc_update(aes_ctx, data,
-							     dst, src, len);
-				else
-					gcm_tfm->dec_update(aes_ctx, data,
-							     dst, src, len);
-			}
-			left -= len;
-
-			scatterwalk_unmap(src);
-			scatterwalk_unmap(dst);
-			scatterwalk_advance(&src_sg_walk, len);
-			scatterwalk_advance(&dst_sg_walk, len);
-			scatterwalk_done(&src_sg_walk, 0, left);
-			scatterwalk_done(&dst_sg_walk, 1, left);
-		}
-	} else {
-		while (left) {
-			dst = src = scatterwalk_map(&src_sg_walk);
-			len = scatterwalk_clamp(&src_sg_walk, left);
-			if (len) {
-				if (enc)
-					gcm_tfm->enc_update(aes_ctx, data,
-							     src, src, len);
-				else
-					gcm_tfm->dec_update(aes_ctx, data,
-							     src, src, len);
-			}
-			left -= len;
-			scatterwalk_unmap(src);
-			scatterwalk_advance(&src_sg_walk, len);
-			scatterwalk_done(&src_sg_walk, 1, left);
-		}
+
+	while (walk.nbytes > 0) {
+		(enc ? gcm_tfm->enc_update
+		     : gcm_tfm->dec_update)(aes_ctx, data, walk.dst.virt.addr,
+					    walk.src.virt.addr, walk.nbytes);
+		kernel_fpu_end();
+
+		err = skcipher_walk_done(&walk, 0);
+		if (err)
+			return err;
+
+		kernel_fpu_begin();
 	}
-	gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
+	gcm_tfm->finalize(aes_ctx, data, auth_tag, auth_tag_len);
 	kernel_fpu_end();
 
 	if (!assocmem)
@@ -838,40 +797,53 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
 	else
 		kfree(assocmem);
 
-	if (!enc) {
-		u8 authTagMsg[16];
-
-		/* Copy out original authTag */
-		scatterwalk_map_and_copy(authTagMsg, req->src,
-					 req->assoclen + req->cryptlen -
-					 auth_tag_len,
-					 auth_tag_len, 0);
-
-		/* Compare generated tag with passed in tag. */
-		return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
-			-EBADMSG : 0;
-	}
-
-	/* Copy in the authTag */
-	scatterwalk_map_and_copy(authTag, req->dst,
-				 req->assoclen + req->cryptlen,
-				 auth_tag_len, 1);
-
 	return 0;
 }
 
 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
 {
-	return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
-				aes_ctx);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+	u8 auth_tag[16];
+	int err;
+
+	err = gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv, aes_ctx,
+				 auth_tag, auth_tag_len);
+	if (err)
+		return err;
+
+	scatterwalk_map_and_copy(auth_tag, req->dst,
+				 req->assoclen + req->cryptlen,
+				 auth_tag_len, 1);
+	return 0;
 }
 
 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
 			  u8 *hash_subkey, u8 *iv, void *aes_ctx)
 {
-	return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
-				aes_ctx);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+	u8 auth_tag_msg[16];
+	u8 auth_tag[16];
+	int err;
+
+	err = gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv, aes_ctx,
+				 auth_tag, auth_tag_len);
+	if (err)
+		return err;
+
+	/* Copy out original auth_tag */
+	scatterwalk_map_and_copy(auth_tag_msg, req->src,
+				 req->assoclen + req->cryptlen - auth_tag_len,
+				 auth_tag_len, 0);
+
+	/* Compare generated tag with passed in tag. */
+	if (crypto_memneq(auth_tag_msg, auth_tag, auth_tag_len)) {
+		memzero_explicit(auth_tag, sizeof(auth_tag));
+		return -EBADMSG;
+	}
+	return 0;
 }
 
 static int helper_rfc4106_encrypt(struct aead_request *req)
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 0/4] crypto: gcm-aes-ni cleanups
  2020-12-12  9:16 [PATCH 0/4] crypto: gcm-aes-ni cleanups Ard Biesheuvel
                   ` (3 preceding siblings ...)
  2020-12-12  9:17 ` [PATCH 4/4] crypto: x86/gcm-aes-ni - refactor scatterlist processing Ard Biesheuvel
@ 2020-12-21 22:01 ` Eric Biggers
  2020-12-22  8:00   ` Ard Biesheuvel
  4 siblings, 1 reply; 7+ messages in thread
From: Eric Biggers @ 2020-12-21 22:01 UTC (permalink / raw)
  To: Ard Biesheuvel; +Cc: linux-crypto, Herbert Xu

Hi Ard,

On Sat, Dec 12, 2020 at 10:16:56AM +0100, Ard Biesheuvel wrote:
> Clean up some issues and peculiarities in the gcm(aes-ni) driver.
> 
> Cc: Eric Biggers <ebiggers@google.com>
> Cc: Herbert Xu <herbert@gondor.apana.org.au>
> 
> Ard Biesheuvel (4):
>   crypto: x86/gcm-aes-ni - prevent misaligned IV buffers on the stack
>   crypto: x86/gcm-aes-ni - drop unused asm prototypes
>   crypto: x86/gcm-aes-ni - clean up mapping of associated data
>   crypto: x86/gcm-aes-ni - refactor scatterlist processing
> 
>  arch/x86/crypto/aesni-intel_glue.c | 238 ++++++--------------
>  1 file changed, 74 insertions(+), 164 deletions(-)
> 

I get the following with this series applied:

BUG: sleeping function called from invalid context at mm/page_alloc.c:4934
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 426, name: cryptomgr_test
no locks held by cryptomgr_test/426.
CPU: 0 PID: 426 Comm: cryptomgr_test Not tainted 5.10.0-12509-g8cc543a98aac #2
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ArchLinux 1.14.0-1 04/01/2014
Call Trace:
 show_stack+0x3d/0x3f arch/x86/kernel/dumpstack.c:318
 __dump_stack lib/dump_stack.c:79 [inline]
 dump_stack+0xa4/0xd9 lib/dump_stack.c:120
 ___might_sleep.cold+0x186/0x1b5 kernel/sched/core.c:7911
 __might_sleep+0xa1/0x1a0 kernel/sched/core.c:7865
 prepare_alloc_pages mm/page_alloc.c:4934 [inline]
 __alloc_pages_nodemask+0x46f/0x6b0 mm/page_alloc.c:4978
 alloc_pages_current+0x139/0x240 mm/mempolicy.c:2267
 alloc_pages include/linux/gfp.h:547 [inline]
 __get_free_pages+0x10/0xa0 mm/page_alloc.c:5031
 skcipher_walk_next+0x736/0xd30 crypto/skcipher.c:370
 skcipher_walk_first+0xc5/0x110 crypto/skcipher.c:445
 skcipher_walk_aead_common+0x7f2/0xbe0 crypto/skcipher.c:544
 skcipher_walk_aead_encrypt+0x6d/0xa0 crypto/skcipher.c:557
 gcmaes_crypt_by_sg+0x3e2/0x740 arch/x86/crypto/aesni-intel_glue.c:655
 gcmaes_encrypt+0xd2/0x260 arch/x86/crypto/aesni-intel_glue.c:694
 helper_rfc4106_encrypt+0x213/0x4d0 arch/x86/crypto/aesni-intel_glue.c:755
 crypto_aead_encrypt+0xf1/0x160 crypto/aead.c:94
 simd_aead_encrypt+0x186/0x270 crypto/simd.c:328
 crypto_aead_encrypt+0xf1/0x160 crypto/aead.c:94
 test_aead_vec_cfg+0x9e0/0x1980 crypto/testmgr.c:2012
 test_aead_vec+0x1e8/0x280 crypto/testmgr.c:2134
 test_aead crypto/testmgr.c:2539 [inline]
 alg_test_aead+0x1f7/0x410 crypto/testmgr.c:2585
 alg_test+0x3fc/0x840 crypto/testmgr.c:5660
 cryptomgr_test+0x5a/0x80 crypto/algboss.c:206
 kthread+0x366/0x440 kernel/kthread.c:292
 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:296

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 0/4] crypto: gcm-aes-ni cleanups
  2020-12-21 22:01 ` [PATCH 0/4] crypto: gcm-aes-ni cleanups Eric Biggers
@ 2020-12-22  8:00   ` Ard Biesheuvel
  0 siblings, 0 replies; 7+ messages in thread
From: Ard Biesheuvel @ 2020-12-22  8:00 UTC (permalink / raw)
  To: Eric Biggers; +Cc: Linux Crypto Mailing List, Herbert Xu

On Mon, 21 Dec 2020 at 23:01, Eric Biggers <ebiggers@kernel.org> wrote:
>
> Hi Ard,
>
> On Sat, Dec 12, 2020 at 10:16:56AM +0100, Ard Biesheuvel wrote:
> > Clean up some issues and peculiarities in the gcm(aes-ni) driver.
> >
> > Cc: Eric Biggers <ebiggers@google.com>
> > Cc: Herbert Xu <herbert@gondor.apana.org.au>
> >
> > Ard Biesheuvel (4):
> >   crypto: x86/gcm-aes-ni - prevent misaligned IV buffers on the stack
> >   crypto: x86/gcm-aes-ni - drop unused asm prototypes
> >   crypto: x86/gcm-aes-ni - clean up mapping of associated data
> >   crypto: x86/gcm-aes-ni - refactor scatterlist processing
> >
> >  arch/x86/crypto/aesni-intel_glue.c | 238 ++++++--------------
> >  1 file changed, 74 insertions(+), 164 deletions(-)
> >
>
> I get the following with this series applied:
>
> BUG: sleeping function called from invalid context at mm/page_alloc.c:4934
> in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 426, name: cryptomgr_test
> no locks held by cryptomgr_test/426.
> CPU: 0 PID: 426 Comm: cryptomgr_test Not tainted 5.10.0-12509-g8cc543a98aac #2
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ArchLinux 1.14.0-1 04/01/2014
> Call Trace:
>  show_stack+0x3d/0x3f arch/x86/kernel/dumpstack.c:318
>  __dump_stack lib/dump_stack.c:79 [inline]
>  dump_stack+0xa4/0xd9 lib/dump_stack.c:120
>  ___might_sleep.cold+0x186/0x1b5 kernel/sched/core.c:7911
>  __might_sleep+0xa1/0x1a0 kernel/sched/core.c:7865
>  prepare_alloc_pages mm/page_alloc.c:4934 [inline]
>  __alloc_pages_nodemask+0x46f/0x6b0 mm/page_alloc.c:4978
>  alloc_pages_current+0x139/0x240 mm/mempolicy.c:2267
>  alloc_pages include/linux/gfp.h:547 [inline]
>  __get_free_pages+0x10/0xa0 mm/page_alloc.c:5031
>  skcipher_walk_next+0x736/0xd30 crypto/skcipher.c:370
>  skcipher_walk_first+0xc5/0x110 crypto/skcipher.c:445
>  skcipher_walk_aead_common+0x7f2/0xbe0 crypto/skcipher.c:544
>  skcipher_walk_aead_encrypt+0x6d/0xa0 crypto/skcipher.c:557
>  gcmaes_crypt_by_sg+0x3e2/0x740 arch/x86/crypto/aesni-intel_glue.c:655
>  gcmaes_encrypt+0xd2/0x260 arch/x86/crypto/aesni-intel_glue.c:694
>  helper_rfc4106_encrypt+0x213/0x4d0 arch/x86/crypto/aesni-intel_glue.c:755
>  crypto_aead_encrypt+0xf1/0x160 crypto/aead.c:94
...


Thanks for spotting that. Turns out the scatterwalk map/unmap of the
assoc data was keeping preemption disabled. I'll fix this in v2.

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-12-22  8:02 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-12  9:16 [PATCH 0/4] crypto: gcm-aes-ni cleanups Ard Biesheuvel
2020-12-12  9:16 ` [PATCH 1/4] crypto: x86/gcm-aes-ni - prevent misaligned IV buffers on the stack Ard Biesheuvel
2020-12-12  9:16 ` [PATCH 2/4] crypto: x86/gcm-aes-ni - drop unused asm prototypes Ard Biesheuvel
2020-12-12  9:16 ` [PATCH 3/4] crypto: x86/gcm-aes-ni - clean up mapping of associated data Ard Biesheuvel
2020-12-12  9:17 ` [PATCH 4/4] crypto: x86/gcm-aes-ni - refactor scatterlist processing Ard Biesheuvel
2020-12-21 22:01 ` [PATCH 0/4] crypto: gcm-aes-ni cleanups Eric Biggers
2020-12-22  8:00   ` Ard Biesheuvel

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.