All of lore.kernel.org
 help / color / mirror / Atom feed
* [BUGFIX -v2 for .32] crypto, gcm, fix another complete call in complete fuction
@ 2009-11-09  7:24 Huang Ying
  2009-11-09 19:02 ` Herbert Xu
  0 siblings, 1 reply; 5+ messages in thread
From: Huang Ying @ 2009-11-09  7:24 UTC (permalink / raw)
  To: Herbert Xu; +Cc: linux-kernel, linux-crypto

The flow of the complete function (xxx_done) in gcm.c is as follow:

void complete(struct crypto_async_request *areq, int err)
{
	struct aead_request *req = areq->data;

	if (!err) {
		err = async_next_step();
		if (err == -EINPROGRESS || err == -EBUSY)
			return;
	}

	complete_for_next_step(areq, err);
}

But *areq may be destroyed in async_next_step(), this makes
complete_for_next_step() can not work properly. To fix this, one of
following methods is used for each complete function.

- Add a __complete() for each complete(), which accept struct
  aead_request *req instead of areq, so avoid using areq after it is
  destroyed.

- Expand complete_for_next_step().

The fixing method is based on the idea of Herbert Xu.

Signed-off-by: Huang Ying <ying.huang@intel.com>
---
 crypto/gcm.c |  120 ++++++++++++++++++++++++++++++++++++++++++-----------------
 1 file changed, 86 insertions(+), 34 deletions(-)

--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -40,7 +40,7 @@ struct crypto_rfc4106_ctx {
 struct crypto_gcm_ghash_ctx {
 	unsigned int cryptlen;
 	struct scatterlist *src;
-	crypto_completion_t complete;
+	void (*complete)(struct aead_request *req, int err);
 };
 
 struct crypto_gcm_req_priv_ctx {
@@ -267,54 +267,73 @@ static int gcm_hash_final(struct aead_re
 	return crypto_ahash_final(ahreq);
 }
 
-static void gcm_hash_final_done(struct crypto_async_request *areq,
-				int err)
+static void __gcm_hash_final_done(struct aead_request *req,
+				  struct crypto_gcm_req_priv_ctx *pctx,
+				  int err)
 {
-	struct aead_request *req = areq->data;
-	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
 	struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
 
 	if (!err)
 		crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
 
-	gctx->complete(areq, err);
+	gctx->complete(req, err);
 }
 
-static void gcm_hash_len_done(struct crypto_async_request *areq,
-			      int err)
+static void gcm_hash_final_done(struct crypto_async_request *areq, int err)
 {
 	struct aead_request *req = areq->data;
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
 
+	__gcm_hash_final_done(req, pctx, err);
+}
+
+static void __gcm_hash_len_done(struct aead_request *req,
+				struct crypto_gcm_req_priv_ctx *pctx,
+				int err)
+{
 	if (!err) {
 		err = gcm_hash_final(req, pctx);
 		if (err == -EINPROGRESS || err == -EBUSY)
 			return;
 	}
 
-	gcm_hash_final_done(areq, err);
+	__gcm_hash_final_done(req, pctx, err);
 }
 
-static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
-				       int err)
+static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
 {
 	struct aead_request *req = areq->data;
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
 
+	__gcm_hash_len_done(req, pctx, err);
+}
+
+static void __gcm_hash_crypt_remain_done(struct aead_request *req,
+					 struct crypto_gcm_req_priv_ctx *pctx,
+					 int err)
+{
 	if (!err) {
 		err = gcm_hash_len(req, pctx);
 		if (err == -EINPROGRESS || err == -EBUSY)
 			return;
 	}
 
-	gcm_hash_len_done(areq, err);
+	__gcm_hash_len_done(req, pctx, err);
 }
 
-static void gcm_hash_crypt_done(struct crypto_async_request *areq,
-				int err)
+static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
+				       int err)
 {
 	struct aead_request *req = areq->data;
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+	__gcm_hash_crypt_remain_done(req, pctx, err);
+}
+
+static void __gcm_hash_crypt_done(struct aead_request *req,
+				  struct crypto_gcm_req_priv_ctx *pctx,
+				  int err)
+{
 	struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
 	unsigned int remain;
 
@@ -327,14 +346,21 @@ static void gcm_hash_crypt_done(struct c
 			return;
 	}
 
-	gcm_hash_crypt_remain_done(areq, err);
+	__gcm_hash_crypt_remain_done(req, pctx, err);
 }
 
-static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
-					   int err)
+static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
 {
 	struct aead_request *req = areq->data;
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+	__gcm_hash_crypt_done(req, pctx, err);
+}
+
+static void __gcm_hash_assoc_remain_done(struct aead_request *req,
+					 struct crypto_gcm_req_priv_ctx *pctx,
+					 int err)
+{
 	struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
 	crypto_completion_t complete;
 	unsigned int remain = 0;
@@ -350,16 +376,24 @@ static void gcm_hash_assoc_remain_done(s
 	}
 
 	if (remain)
-		gcm_hash_crypt_done(areq, err);
+		__gcm_hash_crypt_done(req, pctx, err);
 	else
-		gcm_hash_crypt_remain_done(areq, err);
+		__gcm_hash_crypt_remain_done(req, pctx, err);
 }
 
-static void gcm_hash_assoc_done(struct crypto_async_request *areq,
-				int err)
+static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
+				       int err)
 {
 	struct aead_request *req = areq->data;
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+	__gcm_hash_assoc_remain_done(req, pctx, err);
+}
+
+static void __gcm_hash_assoc_done(struct aead_request *req,
+				  struct crypto_gcm_req_priv_ctx *pctx,
+				  int err)
+{
 	unsigned int remain;
 
 	if (!err) {
@@ -371,14 +405,21 @@ static void gcm_hash_assoc_done(struct c
 			return;
 	}
 
-	gcm_hash_assoc_remain_done(areq, err);
+	__gcm_hash_assoc_remain_done(req, pctx, err);
 }
 
-static void gcm_hash_init_done(struct crypto_async_request *areq,
-			       int err)
+static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
 {
 	struct aead_request *req = areq->data;
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+	__gcm_hash_assoc_done(req, pctx, err);
+}
+
+static void __gcm_hash_init_done(struct aead_request *req,
+				 struct crypto_gcm_req_priv_ctx *pctx,
+				 int err)
+{
 	crypto_completion_t complete;
 	unsigned int remain = 0;
 
@@ -393,9 +434,17 @@ static void gcm_hash_init_done(struct cr
 	}
 
 	if (remain)
-		gcm_hash_assoc_done(areq, err);
+		__gcm_hash_assoc_done(req, pctx, err);
 	else
-		gcm_hash_assoc_remain_done(areq, err);
+		__gcm_hash_assoc_remain_done(req, pctx, err);
+}
+
+static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
+{
+	struct aead_request *req = areq->data;
+	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
+
+	__gcm_hash_init_done(req, pctx, err);
 }
 
 static int gcm_hash(struct aead_request *req,
@@ -457,10 +506,8 @@ static void gcm_enc_copy_hash(struct aea
 				 crypto_aead_authsize(aead), 1);
 }
 
-static void gcm_enc_hash_done(struct crypto_async_request *areq,
-				     int err)
+static void gcm_enc_hash_done(struct aead_request *req, int err)
 {
-	struct aead_request *req = areq->data;
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
 
 	if (!err)
@@ -470,7 +517,7 @@ static void gcm_enc_hash_done(struct cry
 }
 
 static void gcm_encrypt_done(struct crypto_async_request *areq,
-				     int err)
+			     int err)
 {
 	struct aead_request *req = areq->data;
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
@@ -479,9 +526,13 @@ static void gcm_encrypt_done(struct cryp
 		err = gcm_hash(req, pctx);
 		if (err == -EINPROGRESS || err == -EBUSY)
 			return;
+		else if (!err) {
+			crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
+			gcm_enc_copy_hash(req, pctx);
+		}
 	}
 
-	gcm_enc_hash_done(areq, err);
+	aead_request_complete(req, err);
 }
 
 static int crypto_gcm_encrypt(struct aead_request *req)
@@ -538,9 +589,8 @@ static void gcm_decrypt_done(struct cryp
 	aead_request_complete(req, err);
 }
 
-static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
+static void gcm_dec_hash_done(struct aead_request *req, int err)
 {
-	struct aead_request *req = areq->data;
 	struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
 	struct ablkcipher_request *abreq = &pctx->u.abreq;
 	struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
@@ -552,9 +602,11 @@ static void gcm_dec_hash_done(struct cry
 		err = crypto_ablkcipher_decrypt(abreq);
 		if (err == -EINPROGRESS || err == -EBUSY)
 			return;
+		else if (!err)
+			err = crypto_gcm_verify(req, pctx);
 	}
 
-	gcm_decrypt_done(areq, err);
+	aead_request_complete(req, err);
 }
 
 static int crypto_gcm_decrypt(struct aead_request *req)



^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [BUGFIX -v2 for .32] crypto, gcm, fix another complete call in complete fuction
  2009-11-09  7:24 [BUGFIX -v2 for .32] crypto, gcm, fix another complete call in complete fuction Huang Ying
@ 2009-11-09 19:02 ` Herbert Xu
  2009-11-10  2:49   ` Huang Ying
  0 siblings, 1 reply; 5+ messages in thread
From: Herbert Xu @ 2009-11-09 19:02 UTC (permalink / raw)
  To: Huang Ying; +Cc: linux-kernel, linux-crypto

On Mon, Nov 09, 2009 at 03:24:14PM +0800, Huang Ying wrote:
> The flow of the complete function (xxx_done) in gcm.c is as follow:

Thanks the patch looks pretty good overall.

> -static void gcm_hash_final_done(struct crypto_async_request *areq,
> -				int err)
> +static void __gcm_hash_final_done(struct aead_request *req,
> +				  struct crypto_gcm_req_priv_ctx *pctx,
> +				  int err)

Just one nit though, do we really need to carry this pctx around
everywhere? It seems to me that it's always crypto_gcm_reqctx(req),
no?

Cheers,
-- 
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [BUGFIX -v2 for .32] crypto, gcm, fix another complete call in complete fuction
  2009-11-09 19:02 ` Herbert Xu
@ 2009-11-10  2:49   ` Huang Ying
  2009-11-10  3:10     ` Herbert Xu
  0 siblings, 1 reply; 5+ messages in thread
From: Huang Ying @ 2009-11-10  2:49 UTC (permalink / raw)
  To: Herbert Xu; +Cc: linux-kernel, linux-crypto

On Tue, 2009-11-10 at 03:02 +0800, Herbert Xu wrote: 
> On Mon, Nov 09, 2009 at 03:24:14PM +0800, Huang Ying wrote:
> > The flow of the complete function (xxx_done) in gcm.c is as follow:
> 
> Thanks the patch looks pretty good overall.
> 
> > -static void gcm_hash_final_done(struct crypto_async_request *areq,
> > -				int err)
> > +static void __gcm_hash_final_done(struct aead_request *req,
> > +				  struct crypto_gcm_req_priv_ctx *pctx,
> > +				  int err)
> 
> Just one nit though, do we really need to carry this pctx around
> everywhere? It seems to me that it's always crypto_gcm_reqctx(req),
> no?

Yes. This is for performance only. Because crypto_gcm_reqctx(req) is not
so trivial (it needs access tfm), and used by every xxx_done function,
so I think it is better to just call crypto_gcm_reqctx once and pass it
down. Do you think so?

Best Regards,
Huang Ying


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [BUGFIX -v2 for .32] crypto, gcm, fix another complete call in complete fuction
  2009-11-10  2:49   ` Huang Ying
@ 2009-11-10  3:10     ` Herbert Xu
  2009-11-10  4:20       ` Huang Ying
  0 siblings, 1 reply; 5+ messages in thread
From: Herbert Xu @ 2009-11-10  3:10 UTC (permalink / raw)
  To: Huang Ying; +Cc: linux-kernel, linux-crypto

On Tue, Nov 10, 2009 at 10:49:59AM +0800, Huang Ying wrote:
>
> Yes. This is for performance only. Because crypto_gcm_reqctx(req) is not
> so trivial (it needs access tfm), and used by every xxx_done function,
> so I think it is better to just call crypto_gcm_reqctx once and pass it
> down. Do you think so?

Since we only support blocksize == 16, that means the alignment
cannot exceed 16 bytes.  So just always align to 16 bytes and that
should make crypto_gcm_reqctx trivial once optimised by the
compiler.

Cheers,
-- 
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [BUGFIX -v2 for .32] crypto, gcm, fix another complete call in complete fuction
  2009-11-10  3:10     ` Herbert Xu
@ 2009-11-10  4:20       ` Huang Ying
  0 siblings, 0 replies; 5+ messages in thread
From: Huang Ying @ 2009-11-10  4:20 UTC (permalink / raw)
  To: Herbert Xu; +Cc: linux-kernel, linux-crypto

On Tue, 2009-11-10 at 11:10 +0800, Herbert Xu wrote: 
> On Tue, Nov 10, 2009 at 10:49:59AM +0800, Huang Ying wrote:
> >
> > Yes. This is for performance only. Because crypto_gcm_reqctx(req) is not
> > so trivial (it needs access tfm), and used by every xxx_done function,
> > so I think it is better to just call crypto_gcm_reqctx once and pass it
> > down. Do you think so?
> 
> Since we only support blocksize == 16, that means the alignment
> cannot exceed 16 bytes.  So just always align to 16 bytes and that
> should make crypto_gcm_reqctx trivial once optimised by the
> compiler.

Does it seem a little tricky?

Anyway, I will prepare a patch for pure bug fix, and leave performance
optimization for later.

Best Regards,
Huang Ying



^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2009-11-10  4:20 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-11-09  7:24 [BUGFIX -v2 for .32] crypto, gcm, fix another complete call in complete fuction Huang Ying
2009-11-09 19:02 ` Herbert Xu
2009-11-10  2:49   ` Huang Ying
2009-11-10  3:10     ` Herbert Xu
2009-11-10  4:20       ` Huang Ying

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.