All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Herbert Xu" <herbert@gondor.apana.org.au>
To: Ard Biesheuvel <ardb@kernel.org>,
	Stephan Mueller <smueller@chronox.de>,
	Linux Crypto Mailing List <linux-crypto@vger.kernel.org>,
	Eric Biggers <ebiggers@kernel.org>
Subject: [v3 PATCH 6/31] crypto: ccree - Add support for chaining CTS
Date: Tue, 28 Jul 2020 17:18:51 +1000	[thread overview]
Message-ID: <E1k0Jsx-0006Jt-2z@fornost.hmeau.com> (raw)
In-Reply-To: 20200728071746.GA22352@gondor.apana.org.au

As it stands cts cannot do chaining.  That is, it always performs
the cipher-text stealing at the end of a request.  This patch adds
support for chaining when the CRYPTO_TM_REQ_MORE flag is set.

It also sets the final_chunksize so that data can be withheld by
the caller to enable correct processing at the true end of a request.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---

 drivers/crypto/ccree/cc_cipher.c |   72 +++++++++++++++++++++++++--------------
 1 file changed, 47 insertions(+), 25 deletions(-)

diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index beeb283c3c949..83567b60d6908 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -61,9 +61,9 @@ struct cc_cipher_ctx {
 
 static void cc_cipher_complete(struct device *dev, void *cc_req, int err);
 
-static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm)
+static inline enum cc_key_type cc_key_type(struct crypto_skcipher *tfm)
 {
-	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct cc_cipher_ctx *ctx_p = crypto_skcipher_ctx(tfm);
 
 	return ctx_p->key_type;
 }
@@ -105,12 +105,26 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
 	return -EINVAL;
 }
 
-static int validate_data_size(struct cc_cipher_ctx *ctx_p,
+static inline int req_cipher_mode(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct cc_cipher_ctx *ctx_p = crypto_skcipher_ctx(tfm);
+	int cipher_mode = ctx_p->cipher_mode;
+
+	if (cipher_mode == DRV_CIPHER_CBC_CTS &&
+	    req->base.flags & CRYPTO_TFM_REQ_MORE)
+		cipher_mode = DRV_CIPHER_CBC;
+
+	return cipher_mode;
+}
+
+static int validate_data_size(struct skcipher_request *req,
+			      struct cc_cipher_ctx *ctx_p,
 			      unsigned int size)
 {
 	switch (ctx_p->flow_mode) {
 	case S_DIN_to_AES:
-		switch (ctx_p->cipher_mode) {
+		switch (req_cipher_mode(req)) {
 		case DRV_CIPHER_XTS:
 		case DRV_CIPHER_CBC_CTS:
 			if (size >= AES_BLOCK_SIZE)
@@ -508,17 +522,18 @@ static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p)
 	}
 }
 
-static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
+static void cc_setup_readiv_desc(struct skcipher_request *req,
 				 struct cipher_req_ctx *req_ctx,
 				 unsigned int ivsize, struct cc_hw_desc desc[],
 				 unsigned int *seq_size)
 {
-	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct cc_cipher_ctx *ctx_p = crypto_skcipher_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
-	int cipher_mode = ctx_p->cipher_mode;
 	int flow_mode = cc_out_setup_mode(ctx_p);
 	int direction = req_ctx->gen_ctx.op_type;
 	dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
+	int cipher_mode = req_cipher_mode(req);
 
 	if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY)
 		return;
@@ -565,15 +580,16 @@ static void cc_setup_readiv_desc(struct crypto_tfm *tfm,
 }
 
 
-static void cc_setup_state_desc(struct crypto_tfm *tfm,
+static void cc_setup_state_desc(struct skcipher_request *req,
 				 struct cipher_req_ctx *req_ctx,
 				 unsigned int ivsize, unsigned int nbytes,
 				 struct cc_hw_desc desc[],
 				 unsigned int *seq_size)
 {
-	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct cc_cipher_ctx *ctx_p = crypto_skcipher_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
-	int cipher_mode = ctx_p->cipher_mode;
+	int cipher_mode = req_cipher_mode(req);
 	int flow_mode = ctx_p->flow_mode;
 	int direction = req_ctx->gen_ctx.op_type;
 	dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
@@ -610,15 +626,16 @@ static void cc_setup_state_desc(struct crypto_tfm *tfm,
 }
 
 
-static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
+static void cc_setup_xex_state_desc(struct skcipher_request *req,
 				 struct cipher_req_ctx *req_ctx,
 				 unsigned int ivsize, unsigned int nbytes,
 				 struct cc_hw_desc desc[],
 				 unsigned int *seq_size)
 {
-	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct cc_cipher_ctx *ctx_p = crypto_skcipher_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
-	int cipher_mode = ctx_p->cipher_mode;
+	int cipher_mode = req_cipher_mode(req);
 	int flow_mode = ctx_p->flow_mode;
 	int direction = req_ctx->gen_ctx.op_type;
 	dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
@@ -628,8 +645,8 @@ static void cc_setup_xex_state_desc(struct crypto_tfm *tfm,
 	unsigned int key_offset = key_len;
 
 	struct cc_crypto_alg *cc_alg =
-		container_of(tfm->__crt_alg, struct cc_crypto_alg,
-			     skcipher_alg.base);
+		container_of(crypto_skcipher_alg(tfm), struct cc_crypto_alg,
+			     skcipher_alg);
 
 	if (cc_alg->data_unit)
 		du_size = cc_alg->data_unit;
@@ -697,14 +714,15 @@ static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p)
 	}
 }
 
-static void cc_setup_key_desc(struct crypto_tfm *tfm,
+static void cc_setup_key_desc(struct skcipher_request *req,
 			      struct cipher_req_ctx *req_ctx,
 			      unsigned int nbytes, struct cc_hw_desc desc[],
 			      unsigned int *seq_size)
 {
-	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct cc_cipher_ctx *ctx_p = crypto_skcipher_ctx(tfm);
 	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
-	int cipher_mode = ctx_p->cipher_mode;
+	int cipher_mode = req_cipher_mode(req);
 	int flow_mode = ctx_p->flow_mode;
 	int direction = req_ctx->gen_ctx.op_type;
 	dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr;
@@ -912,7 +930,7 @@ static int cc_cipher_process(struct skcipher_request *req,
 
 	/* STAT_PHASE_0: Init and sanity checks */
 
-	if (validate_data_size(ctx_p, nbytes)) {
+	if (validate_data_size(req, ctx_p, nbytes)) {
 		dev_dbg(dev, "Unsupported data size %d.\n", nbytes);
 		rc = -EINVAL;
 		goto exit_process;
@@ -969,17 +987,17 @@ static int cc_cipher_process(struct skcipher_request *req,
 	/* STAT_PHASE_2: Create sequence */
 
 	/* Setup state (IV)  */
-	cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+	cc_setup_state_desc(req, req_ctx, ivsize, nbytes, desc, &seq_len);
 	/* Setup MLLI line, if needed */
 	cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len);
 	/* Setup key */
-	cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len);
+	cc_setup_key_desc(req, req_ctx, nbytes, desc, &seq_len);
 	/* Setup state (IV and XEX key)  */
-	cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len);
+	cc_setup_xex_state_desc(req, req_ctx, ivsize, nbytes, desc, &seq_len);
 	/* Data processing */
 	cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len);
 	/* Read next IV */
-	cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len);
+	cc_setup_readiv_desc(req, req_ctx, ivsize, desc, &seq_len);
 
 	/* STAT_PHASE_3: Lock HW and push sequence */
 
@@ -1113,7 +1131,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 	{
 		.name = "cts(cbc(paes))",
 		.driver_name = "cts-cbc-paes-ccree",
-		.blocksize = AES_BLOCK_SIZE,
+		.blocksize = 1,
 		.template_skcipher = {
 			.setkey = cc_cipher_sethkey,
 			.encrypt = cc_cipher_encrypt,
@@ -1121,6 +1139,8 @@ static const struct cc_alg_template skcipher_algs[] = {
 			.min_keysize = CC_HW_KEY_SIZE,
 			.max_keysize = CC_HW_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
+			.chunksize = AES_BLOCK_SIZE,
+			.final_chunksize = 2 * AES_BLOCK_SIZE,
 			},
 		.cipher_mode = DRV_CIPHER_CBC_CTS,
 		.flow_mode = S_DIN_to_AES,
@@ -1238,7 +1258,7 @@ static const struct cc_alg_template skcipher_algs[] = {
 	{
 		.name = "cts(cbc(aes))",
 		.driver_name = "cts-cbc-aes-ccree",
-		.blocksize = AES_BLOCK_SIZE,
+		.blocksize = 1,
 		.template_skcipher = {
 			.setkey = cc_cipher_setkey,
 			.encrypt = cc_cipher_encrypt,
@@ -1246,6 +1266,8 @@ static const struct cc_alg_template skcipher_algs[] = {
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
+			.chunksize = AES_BLOCK_SIZE,
+			.final_chunksize = 2 * AES_BLOCK_SIZE,
 			},
 		.cipher_mode = DRV_CIPHER_CBC_CTS,
 		.flow_mode = S_DIN_to_AES,

  parent reply	other threads:[~2020-07-28  7:18 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-28  7:17 [v3 PATCH 0/31] crypto: skcipher - Add support for no chaining and partial chaining Herbert Xu
2020-07-28  7:18 ` [v3 PATCH 1/31] crypto: skcipher - Add final chunk size field for chaining Herbert Xu
2020-07-28 17:15   ` Eric Biggers
2020-07-28 17:22     ` Herbert Xu
2020-07-28 17:26       ` Ard Biesheuvel
2020-07-28 17:30         ` Herbert Xu
2020-07-28 17:46           ` Ard Biesheuvel
2020-07-28 22:12             ` Herbert Xu
2020-07-28  7:18 ` [v3 PATCH 2/31] crypto: algif_skcipher - Add support for final_chunksize Herbert Xu
2020-07-28  7:18 ` [v3 PATCH 3/31] crypto: cts - Add support for chaining Herbert Xu
2020-07-28 11:05   ` Ard Biesheuvel
2020-07-28 11:53     ` Herbert Xu
2020-07-28 11:59       ` Ard Biesheuvel
2020-07-28 12:03         ` Herbert Xu
2020-07-28 12:08           ` Ard Biesheuvel
2020-07-28 12:19             ` Herbert Xu
2020-07-28  7:18 ` [v3 PATCH 4/31] crypto: arm64/aes-glue - Add support for chaining CTS Herbert Xu
2020-07-28  7:18 ` [v3 PATCH 5/31] crypto: nitrox " Herbert Xu
2020-07-28  7:18 ` Herbert Xu [this message]
2020-07-28  7:18 ` [v3 PATCH 7/31] crypto: skcipher - Add alg reqsize field Herbert Xu
2020-07-28  7:18 ` [v3 PATCH 8/31] crypto: skcipher - Initialise requests to zero Herbert Xu
2020-07-28 17:10   ` Eric Biggers
2020-07-29  3:38     ` Herbert Xu
2020-07-28  7:18 ` [v3 PATCH 9/31] crypto: cryptd - Add support for chaining Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 10/31] crypto: chacha-generic " Herbert Xu
2020-08-10 15:20   ` Horia Geantă
2020-08-11  0:57     ` Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 11/31] crypto: arm/chacha " Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 12/31] crypto: arm64/chacha " Herbert Xu
2020-07-29  6:16   ` Ard Biesheuvel
2020-07-29  6:28     ` Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 13/31] crypto: mips/chacha " Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 14/31] crypto: x86/chacha " Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 15/31] crypto: inside-secure - Set final_chunksize on chacha Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 16/31] crypto: caam/qi2 " Herbert Xu
2020-08-10 15:24   ` Horia Geantă
2020-07-28  7:19 ` [v3 PATCH 17/31] crypto: ctr - Allow rfc3686 to be chained Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 18/31] crypto: crypto4xx - Remove rfc3686 implementation Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 19/31] crypto: caam - Remove rfc3686 implementations Herbert Xu
2020-08-10 16:47   ` Horia Geantă
2020-08-11  0:59     ` Herbert Xu
2020-08-11  7:32       ` Horia Geantă
2020-08-11  7:34         ` Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 20/31] crypto: nitrox - Set final_chunksize on rfc3686 Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 21/31] crypto: ccp - Remove rfc3686 implementation Herbert Xu
2020-08-06 19:16   ` John Allen
2020-07-28  7:19 ` [v3 PATCH 22/31] crypto: chelsio " Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 23/31] crypto: inside-secure - Set final_chunksize on rfc3686 Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 24/31] crypto: ixp4xx - Remove rfc3686 implementation Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 25/31] crypto: nx - Set final_chunksize on rfc3686 Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 26/31] crypto: essiv - Set final_chunksize Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 27/31] crypto: simd - Add support for chaining Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 28/31] crypto: arm64/essiv - Set final_chunksize Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 29/31] crypto: ccree - Set final_chunksize on essiv Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 30/31] crypto: kw - Set final_chunksize Herbert Xu
2020-07-28  7:19 ` [v3 PATCH 31/31] crypto: salsa20-generic - dd support for chaining Herbert Xu
2020-07-28 17:19 ` [v3 PATCH 0/31] crypto: skcipher - Add support for no chaining and partial chaining Eric Biggers
2020-07-29  3:40   ` Herbert Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=E1k0Jsx-0006Jt-2z@fornost.hmeau.com \
    --to=herbert@gondor.apana.org.au \
    --cc=ardb@kernel.org \
    --cc=ebiggers@kernel.org \
    --cc=linux-crypto@vger.kernel.org \
    --cc=smueller@chronox.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.