All of lore.kernel.org
 help / color / mirror / Atom feed
From: Eric Biggers <ebiggers@kernel.org>
To: linux-crypto@vger.kernel.org
Cc: Arnd Bergmann <arnd@arndb.de>
Subject: [PATCH] crypto: xts - prefix function and struct names with "xts"
Date: Fri, 10 Jul 2020 20:34:28 -0700	[thread overview]
Message-ID: <20200711033428.134567-1-ebiggers@kernel.org> (raw)

From: Eric Biggers <ebiggers@google.com>

Overly-generic names can cause problems like naming collisions,
confusing crash reports, and reduced grep-ability.  E.g. see
commit d099ea6e6fde ("crypto - Avoid free() namespace collision").

Clean this up for the xts template by prefixing the names with "xts_".

(I didn't use "crypto_xts_" instead because that seems overkill.)

Also constify the tfm context in a couple places, and make
xts_free_instance() use the instance context structure so that it
doesn't just assume the crypto_skcipher_spawn is at the beginning.

Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 crypto/xts.c | 137 +++++++++++++++++++++++++++------------------------
 1 file changed, 72 insertions(+), 65 deletions(-)

diff --git a/crypto/xts.c b/crypto/xts.c
index 9a7adab6c3e1..3c3ed02c7663 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -20,7 +20,7 @@
 #include <crypto/b128ops.h>
 #include <crypto/gf128mul.h>
 
-struct priv {
+struct xts_tfm_ctx {
 	struct crypto_skcipher *child;
 	struct crypto_cipher *tweak;
 };
@@ -30,17 +30,17 @@ struct xts_instance_ctx {
 	char name[CRYPTO_MAX_ALG_NAME];
 };
 
-struct rctx {
+struct xts_request_ctx {
 	le128 t;
 	struct scatterlist *tail;
 	struct scatterlist sg[2];
 	struct skcipher_request subreq;
 };
 
-static int setkey(struct crypto_skcipher *parent, const u8 *key,
-		  unsigned int keylen)
+static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
+		      unsigned int keylen)
 {
-	struct priv *ctx = crypto_skcipher_ctx(parent);
+	struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
 	struct crypto_skcipher *child;
 	struct crypto_cipher *tweak;
 	int err;
@@ -78,9 +78,10 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
  * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
  * just doing the gf128mul_x_ble() calls again.
  */
-static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
+static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
+			 bool enc)
 {
-	struct rctx *rctx = skcipher_request_ctx(req);
+	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 	const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
 	const int bs = XTS_BLOCK_SIZE;
@@ -128,23 +129,23 @@ static int xor_tweak(struct skcipher_request *req, bool second_pass, bool enc)
 	return err;
 }
 
-static int xor_tweak_pre(struct skcipher_request *req, bool enc)
+static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
 {
-	return xor_tweak(req, false, enc);
+	return xts_xor_tweak(req, false, enc);
 }
 
-static int xor_tweak_post(struct skcipher_request *req, bool enc)
+static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
 {
-	return xor_tweak(req, true, enc);
+	return xts_xor_tweak(req, true, enc);
 }
 
-static void cts_done(struct crypto_async_request *areq, int err)
+static void xts_cts_done(struct crypto_async_request *areq, int err)
 {
 	struct skcipher_request *req = areq->data;
 	le128 b;
 
 	if (!err) {
-		struct rctx *rctx = skcipher_request_ctx(req);
+		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 
 		scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
 		le128_xor(&b, &rctx->t, &b);
@@ -154,12 +155,13 @@ static void cts_done(struct crypto_async_request *areq, int err)
 	skcipher_request_complete(req, err);
 }
 
-static int cts_final(struct skcipher_request *req,
-		     int (*crypt)(struct skcipher_request *req))
+static int xts_cts_final(struct skcipher_request *req,
+			 int (*crypt)(struct skcipher_request *req))
 {
-	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+	const struct xts_tfm_ctx *ctx =
+		crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 	int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
-	struct rctx *rctx = skcipher_request_ctx(req);
+	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 	struct skcipher_request *subreq = &rctx->subreq;
 	int tail = req->cryptlen % XTS_BLOCK_SIZE;
 	le128 b[2];
@@ -177,7 +179,8 @@ static int cts_final(struct skcipher_request *req,
 	scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
 
 	skcipher_request_set_tfm(subreq, ctx->child);
-	skcipher_request_set_callback(subreq, req->base.flags, cts_done, req);
+	skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
+				      req);
 	skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
 				   XTS_BLOCK_SIZE, NULL);
 
@@ -192,18 +195,18 @@ static int cts_final(struct skcipher_request *req,
 	return 0;
 }
 
-static void encrypt_done(struct crypto_async_request *areq, int err)
+static void xts_encrypt_done(struct crypto_async_request *areq, int err)
 {
 	struct skcipher_request *req = areq->data;
 
 	if (!err) {
-		struct rctx *rctx = skcipher_request_ctx(req);
+		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 
 		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-		err = xor_tweak_post(req, true);
+		err = xts_xor_tweak_post(req, true);
 
 		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
-			err = cts_final(req, crypto_skcipher_encrypt);
+			err = xts_cts_final(req, crypto_skcipher_encrypt);
 			if (err == -EINPROGRESS)
 				return;
 		}
@@ -212,18 +215,18 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
 	skcipher_request_complete(req, err);
 }
 
-static void decrypt_done(struct crypto_async_request *areq, int err)
+static void xts_decrypt_done(struct crypto_async_request *areq, int err)
 {
 	struct skcipher_request *req = areq->data;
 
 	if (!err) {
-		struct rctx *rctx = skcipher_request_ctx(req);
+		struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 
 		rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-		err = xor_tweak_post(req, false);
+		err = xts_xor_tweak_post(req, false);
 
 		if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
-			err = cts_final(req, crypto_skcipher_decrypt);
+			err = xts_cts_final(req, crypto_skcipher_decrypt);
 			if (err == -EINPROGRESS)
 				return;
 		}
@@ -232,10 +235,12 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
 	skcipher_request_complete(req, err);
 }
 
-static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
+static int xts_init_crypt(struct skcipher_request *req,
+			  crypto_completion_t compl)
 {
-	struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
-	struct rctx *rctx = skcipher_request_ctx(req);
+	const struct xts_tfm_ctx *ctx =
+		crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 	struct skcipher_request *subreq = &rctx->subreq;
 
 	if (req->cryptlen < XTS_BLOCK_SIZE)
@@ -252,45 +257,45 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t compl)
 	return 0;
 }
 
-static int encrypt(struct skcipher_request *req)
+static int xts_encrypt(struct skcipher_request *req)
 {
-	struct rctx *rctx = skcipher_request_ctx(req);
+	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 	struct skcipher_request *subreq = &rctx->subreq;
 	int err;
 
-	err = init_crypt(req, encrypt_done) ?:
-	      xor_tweak_pre(req, true) ?:
+	err = xts_init_crypt(req, xts_encrypt_done) ?:
+	      xts_xor_tweak_pre(req, true) ?:
 	      crypto_skcipher_encrypt(subreq) ?:
-	      xor_tweak_post(req, true);
+	      xts_xor_tweak_post(req, true);
 
 	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
 		return err;
 
-	return cts_final(req, crypto_skcipher_encrypt);
+	return xts_cts_final(req, crypto_skcipher_encrypt);
 }
 
-static int decrypt(struct skcipher_request *req)
+static int xts_decrypt(struct skcipher_request *req)
 {
-	struct rctx *rctx = skcipher_request_ctx(req);
+	struct xts_request_ctx *rctx = skcipher_request_ctx(req);
 	struct skcipher_request *subreq = &rctx->subreq;
 	int err;
 
-	err = init_crypt(req, decrypt_done) ?:
-	      xor_tweak_pre(req, false) ?:
+	err = xts_init_crypt(req, xts_decrypt_done) ?:
+	      xts_xor_tweak_pre(req, false) ?:
 	      crypto_skcipher_decrypt(subreq) ?:
-	      xor_tweak_post(req, false);
+	      xts_xor_tweak_post(req, false);
 
 	if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
 		return err;
 
-	return cts_final(req, crypto_skcipher_decrypt);
+	return xts_cts_final(req, crypto_skcipher_decrypt);
 }
 
-static int init_tfm(struct crypto_skcipher *tfm)
+static int xts_init_tfm(struct crypto_skcipher *tfm)
 {
 	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
-	struct priv *ctx = crypto_skcipher_ctx(tfm);
+	struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 	struct crypto_skcipher *child;
 	struct crypto_cipher *tweak;
 
@@ -309,26 +314,28 @@ static int init_tfm(struct crypto_skcipher *tfm)
 	ctx->tweak = tweak;
 
 	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
-					 sizeof(struct rctx));
+					 sizeof(struct xts_request_ctx));
 
 	return 0;
 }
 
-static void exit_tfm(struct crypto_skcipher *tfm)
+static void xts_exit_tfm(struct crypto_skcipher *tfm)
 {
-	struct priv *ctx = crypto_skcipher_ctx(tfm);
+	struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 	crypto_free_skcipher(ctx->child);
 	crypto_free_cipher(ctx->tweak);
 }
 
-static void crypto_xts_free(struct skcipher_instance *inst)
+static void xts_free_instance(struct skcipher_instance *inst)
 {
-	crypto_drop_skcipher(skcipher_instance_ctx(inst));
+	struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
+
+	crypto_drop_skcipher(&ictx->spawn);
 	kfree(inst);
 }
 
-static int create(struct crypto_template *tmpl, struct rtattr **tb)
+static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 	struct skcipher_instance *inst;
 	struct xts_instance_ctx *ctx;
@@ -416,43 +423,43 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
 	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
 	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
 
-	inst->alg.base.cra_ctxsize = sizeof(struct priv);
+	inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
 
-	inst->alg.init = init_tfm;
-	inst->alg.exit = exit_tfm;
+	inst->alg.init = xts_init_tfm;
+	inst->alg.exit = xts_exit_tfm;
 
-	inst->alg.setkey = setkey;
-	inst->alg.encrypt = encrypt;
-	inst->alg.decrypt = decrypt;
+	inst->alg.setkey = xts_setkey;
+	inst->alg.encrypt = xts_encrypt;
+	inst->alg.decrypt = xts_decrypt;
 
-	inst->free = crypto_xts_free;
+	inst->free = xts_free_instance;
 
 	err = skcipher_register_instance(tmpl, inst);
 	if (err) {
 err_free_inst:
-		crypto_xts_free(inst);
+		xts_free_instance(inst);
 	}
 	return err;
 }
 
-static struct crypto_template crypto_tmpl = {
+static struct crypto_template xts_tmpl = {
 	.name = "xts",
-	.create = create,
+	.create = xts_create,
 	.module = THIS_MODULE,
 };
 
-static int __init crypto_module_init(void)
+static int __init xts_module_init(void)
 {
-	return crypto_register_template(&crypto_tmpl);
+	return crypto_register_template(&xts_tmpl);
 }
 
-static void __exit crypto_module_exit(void)
+static void __exit xts_module_exit(void)
 {
-	crypto_unregister_template(&crypto_tmpl);
+	crypto_unregister_template(&xts_tmpl);
 }
 
-subsys_initcall(crypto_module_init);
-module_exit(crypto_module_exit);
+subsys_initcall(xts_module_init);
+module_exit(xts_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("XTS block cipher mode");
-- 
2.27.0


             reply	other threads:[~2020-07-11  3:36 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-11  3:34 Eric Biggers [this message]
2020-07-16 11:56 ` [PATCH] crypto: xts - prefix function and struct names with "xts" Herbert Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200711033428.134567-1-ebiggers@kernel.org \
    --to=ebiggers@kernel.org \
    --cc=arnd@arndb.de \
    --cc=linux-crypto@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.