linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: Shawn Guo <shawnguo@kernel.org>
To: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Marek Vasut <marex@denx.de>
Cc: Herbert Xu <herbert@gondor.apana.org.au>,
	Eric Biggers <ebiggers@google.com>,
	Sascha Hauer <s.hauer@pengutronix.de>,
	linux-crypto@vger.kernel.org,
	Pengutronix Kernel Team <kernel@pengutronix.de>,
	Fabio Estevam <festevam@gmail.com>,
	"David S. Miller" <davem@davemloft.net>,
	linux-arm-kernel@lists.infradead.org,
	NXP Linux Team <linux-imx@nxp.com>
Subject: Re: [PATCH 16/25] crypto: mxs - switch to skcipher API
Date: Mon, 14 Oct 2019 21:01:28 +0800	[thread overview]
Message-ID: <20191014130124.GT12262@dragon> (raw)
In-Reply-To: <20191014121910.7264-17-ard.biesheuvel@linaro.org>

Copy Marek who is the author of the driver.

Shawn

On Mon, Oct 14, 2019 at 02:19:01PM +0200, Ard Biesheuvel wrote:
> Commit 7a7ffe65c8c5 ("crypto: skcipher - Add top-level skcipher interface")
> dated 20 august 2015 introduced the new skcipher API which is supposed to
> replace both blkcipher and ablkcipher. While all consumers of the API have
> been converted long ago, some producers of the ablkcipher remain, forcing
> us to keep the ablkcipher support routines alive, along with the matching
> code to expose [a]blkciphers via the skcipher API.
> 
> So switch this driver to the skcipher API, allowing us to finally drop the
> blkcipher code in the near future.
> 
> Cc: Shawn Guo <shawnguo@kernel.org>
> Cc: Sascha Hauer <s.hauer@pengutronix.de>
> Cc: Pengutronix Kernel Team <kernel@pengutronix.de>
> Cc: Fabio Estevam <festevam@gmail.com>
> Cc: NXP Linux Team <linux-imx@nxp.com>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
>  drivers/crypto/mxs-dcp.c | 140 +++++++++-----------
>  1 file changed, 65 insertions(+), 75 deletions(-)
> 
> diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
> index bf8d2197bc11..f438b425c655 100644
> --- a/drivers/crypto/mxs-dcp.c
> +++ b/drivers/crypto/mxs-dcp.c
> @@ -211,11 +211,11 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
>   * Encryption (AES128)
>   */
>  static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
> -			   struct ablkcipher_request *req, int init)
> +			   struct skcipher_request *req, int init)
>  {
>  	struct dcp *sdcp = global_sdcp;
>  	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
> -	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
> +	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
>  	int ret;
>  
>  	dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
> @@ -274,9 +274,9 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
>  {
>  	struct dcp *sdcp = global_sdcp;
>  
> -	struct ablkcipher_request *req = ablkcipher_request_cast(arq);
> +	struct skcipher_request *req = skcipher_request_cast(arq);
>  	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
> -	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
> +	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
>  
>  	struct scatterlist *dst = req->dst;
>  	struct scatterlist *src = req->src;
> @@ -305,7 +305,7 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
>  
>  	if (!rctx->ecb) {
>  		/* Copy the CBC IV just past the key. */
> -		memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
> +		memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
>  		/* CBC needs the INIT set. */
>  		init = 1;
>  	} else {
> @@ -316,10 +316,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
>  		src_buf = sg_virt(src);
>  		len = sg_dma_len(src);
>  		tlen += len;
> -		limit_hit = tlen > req->nbytes;
> +		limit_hit = tlen > req->cryptlen;
>  
>  		if (limit_hit)
> -			len = req->nbytes - (tlen - len);
> +			len = req->cryptlen - (tlen - len);
>  
>  		do {
>  			if (actx->fill + len > out_off)
> @@ -375,10 +375,10 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
>  	/* Copy the IV for CBC for chaining */
>  	if (!rctx->ecb) {
>  		if (rctx->enc)
> -			memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
> +			memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
>  				AES_BLOCK_SIZE);
>  		else
> -			memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
> +			memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
>  				AES_BLOCK_SIZE);
>  	}
>  
> @@ -422,17 +422,17 @@ static int dcp_chan_thread_aes(void *data)
>  	return 0;
>  }
>  
> -static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
> +static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
>  {
> -	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
> -	struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
> +	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
> +	struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
>  	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
>  	int ret;
>  
>  	skcipher_request_set_sync_tfm(subreq, ctx->fallback);
>  	skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
>  	skcipher_request_set_crypt(subreq, req->src, req->dst,
> -				   req->nbytes, req->info);
> +				   req->cryptlen, req->iv);
>  
>  	if (enc)
>  		ret = crypto_skcipher_encrypt(subreq);
> @@ -444,12 +444,12 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
>  	return ret;
>  }
>  
> -static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
> +static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
>  {
>  	struct dcp *sdcp = global_sdcp;
>  	struct crypto_async_request *arq = &req->base;
>  	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
> -	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
> +	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
>  	int ret;
>  
>  	if (unlikely(actx->key_len != AES_KEYSIZE_128))
> @@ -468,30 +468,30 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
>  	return ret;
>  }
>  
> -static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
> +static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
>  {
>  	return mxs_dcp_aes_enqueue(req, 0, 1);
>  }
>  
> -static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
> +static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
>  {
>  	return mxs_dcp_aes_enqueue(req, 1, 1);
>  }
>  
> -static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
> +static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
>  {
>  	return mxs_dcp_aes_enqueue(req, 0, 0);
>  }
>  
> -static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
> +static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
>  {
>  	return mxs_dcp_aes_enqueue(req, 1, 0);
>  }
>  
> -static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
> +static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
>  			      unsigned int len)
>  {
> -	struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
> +	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
>  	unsigned int ret;
>  
>  	/*
> @@ -525,10 +525,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
>  	return ret;
>  }
>  
> -static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
> +static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
>  {
> -	const char *name = crypto_tfm_alg_name(tfm);
> -	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
> +	const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
> +	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
>  	struct crypto_sync_skcipher *blk;
>  
>  	blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
> @@ -536,13 +536,13 @@ static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
>  		return PTR_ERR(blk);
>  
>  	actx->fallback = blk;
> -	tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
> +	crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx));
>  	return 0;
>  }
>  
> -static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
> +static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
>  {
> -	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
> +	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
>  
>  	crypto_free_sync_skcipher(actx->fallback);
>  }
> @@ -854,54 +854,44 @@ static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
>  }
>  
>  /* AES 128 ECB and AES 128 CBC */
> -static struct crypto_alg dcp_aes_algs[] = {
> +static struct skcipher_alg dcp_aes_algs[] = {
>  	{
> -		.cra_name		= "ecb(aes)",
> -		.cra_driver_name	= "ecb-aes-dcp",
> -		.cra_priority		= 400,
> -		.cra_alignmask		= 15,
> -		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
> -					  CRYPTO_ALG_ASYNC |
> +		.base.cra_name		= "ecb(aes)",
> +		.base.cra_driver_name	= "ecb-aes-dcp",
> +		.base.cra_priority	= 400,
> +		.base.cra_alignmask	= 15,
> +		.base.cra_flags		= CRYPTO_ALG_ASYNC |
>  					  CRYPTO_ALG_NEED_FALLBACK,
> -		.cra_init		= mxs_dcp_aes_fallback_init,
> -		.cra_exit		= mxs_dcp_aes_fallback_exit,
> -		.cra_blocksize		= AES_BLOCK_SIZE,
> -		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
> -		.cra_type		= &crypto_ablkcipher_type,
> -		.cra_module		= THIS_MODULE,
> -		.cra_u	= {
> -			.ablkcipher = {
> -				.min_keysize	= AES_MIN_KEY_SIZE,
> -				.max_keysize	= AES_MAX_KEY_SIZE,
> -				.setkey		= mxs_dcp_aes_setkey,
> -				.encrypt	= mxs_dcp_aes_ecb_encrypt,
> -				.decrypt	= mxs_dcp_aes_ecb_decrypt
> -			},
> -		},
> +		.base.cra_blocksize	= AES_BLOCK_SIZE,
> +		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
> +		.base.cra_module	= THIS_MODULE,
> +
> +		.min_keysize		= AES_MIN_KEY_SIZE,
> +		.max_keysize		= AES_MAX_KEY_SIZE,
> +		.setkey			= mxs_dcp_aes_setkey,
> +		.encrypt		= mxs_dcp_aes_ecb_encrypt,
> +		.decrypt		= mxs_dcp_aes_ecb_decrypt,
> +		.init			= mxs_dcp_aes_fallback_init_tfm,
> +		.exit			= mxs_dcp_aes_fallback_exit_tfm,
>  	}, {
> -		.cra_name		= "cbc(aes)",
> -		.cra_driver_name	= "cbc-aes-dcp",
> -		.cra_priority		= 400,
> -		.cra_alignmask		= 15,
> -		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
> -					  CRYPTO_ALG_ASYNC |
> +		.base.cra_name		= "cbc(aes)",
> +		.base.cra_driver_name	= "cbc-aes-dcp",
> +		.base.cra_priority	= 400,
> +		.base.cra_alignmask	= 15,
> +		.base.cra_flags		= CRYPTO_ALG_ASYNC |
>  					  CRYPTO_ALG_NEED_FALLBACK,
> -		.cra_init		= mxs_dcp_aes_fallback_init,
> -		.cra_exit		= mxs_dcp_aes_fallback_exit,
> -		.cra_blocksize		= AES_BLOCK_SIZE,
> -		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
> -		.cra_type		= &crypto_ablkcipher_type,
> -		.cra_module		= THIS_MODULE,
> -		.cra_u = {
> -			.ablkcipher = {
> -				.min_keysize	= AES_MIN_KEY_SIZE,
> -				.max_keysize	= AES_MAX_KEY_SIZE,
> -				.setkey		= mxs_dcp_aes_setkey,
> -				.encrypt	= mxs_dcp_aes_cbc_encrypt,
> -				.decrypt	= mxs_dcp_aes_cbc_decrypt,
> -				.ivsize		= AES_BLOCK_SIZE,
> -			},
> -		},
> +		.base.cra_blocksize	= AES_BLOCK_SIZE,
> +		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
> +		.base.cra_module	= THIS_MODULE,
> +
> +		.min_keysize		= AES_MIN_KEY_SIZE,
> +		.max_keysize		= AES_MAX_KEY_SIZE,
> +		.setkey			= mxs_dcp_aes_setkey,
> +		.encrypt		= mxs_dcp_aes_cbc_encrypt,
> +		.decrypt		= mxs_dcp_aes_cbc_decrypt,
> +		.ivsize			= AES_BLOCK_SIZE,
> +		.init			= mxs_dcp_aes_fallback_init_tfm,
> +		.exit			= mxs_dcp_aes_fallback_exit_tfm,
>  	},
>  };
>  
> @@ -1104,8 +1094,8 @@ static int mxs_dcp_probe(struct platform_device *pdev)
>  	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
>  
>  	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
> -		ret = crypto_register_algs(dcp_aes_algs,
> -					   ARRAY_SIZE(dcp_aes_algs));
> +		ret = crypto_register_skciphers(dcp_aes_algs,
> +						ARRAY_SIZE(dcp_aes_algs));
>  		if (ret) {
>  			/* Failed to register algorithm. */
>  			dev_err(dev, "Failed to register AES crypto!\n");
> @@ -1139,7 +1129,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
>  
>  err_unregister_aes:
>  	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
> -		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
> +		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
>  
>  err_destroy_aes_thread:
>  	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
> @@ -1164,7 +1154,7 @@ static int mxs_dcp_remove(struct platform_device *pdev)
>  		crypto_unregister_ahash(&dcp_sha1_alg);
>  
>  	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
> -		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
> +		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
>  
>  	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
>  	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
> -- 
> 2.20.1
> 

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2019-10-14 13:02 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-14 12:18 [PATCH 00/25] crypto: convert h/w accelerator driver to skcipher API Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 01/25] crypto: virtio - implement missing support for output IVs Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 02/25] crypto: virtio - deal with unsupported input sizes Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 03/25] crypto: virtio - switch to skcipher API Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 04/25] crypto: ccp - switch from ablkcipher to skcipher Ard Biesheuvel
2019-10-18 14:15   ` Hook, Gary
2019-10-21 11:54     ` Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 05/25] crypto: omap - switch to skcipher API Ard Biesheuvel
2019-10-15 17:28   ` Tony Lindgren
2019-10-17 10:25     ` Tero Kristo
2019-10-17 10:45       ` Ard Biesheuvel
2019-10-17 11:25         ` Tero Kristo
2019-10-17 11:56           ` Ard Biesheuvel
2019-10-17 12:07             ` Tero Kristo
2019-10-14 12:18 ` [PATCH 06/25] crypto: ux500 " Ard Biesheuvel
2019-10-16 13:01   ` Linus Walleij
2019-10-14 12:18 ` [PATCH 07/25] crypto: s5p " Ard Biesheuvel
2019-10-17 15:18   ` Kamil Konieczny
2019-10-17 15:28     ` Ard Biesheuvel
2019-10-21 10:05   ` Krzysztof Kozlowski
2019-10-21 12:00     ` Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 08/25] crypto: atmel-aes " Ard Biesheuvel
2019-10-15 10:17   ` Tudor.Ambarus
2019-10-15 10:31     ` Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 09/25] crypto: atmel-tdes " Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 10/25] crypto: bcm-spu " Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 11/25] crypto: nitrox - remove cra_type reference to ablkcipher Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 12/25] crypto: cavium/cpt - switch to skcipher API Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 13/25] crypto: chelsio " Ard Biesheuvel
2019-10-14 12:18 ` [PATCH 14/25] crypto: hifn " Ard Biesheuvel
2019-10-14 12:19 ` [PATCH 15/25] crypto: ixp4xx " Ard Biesheuvel
2019-10-16 13:02   ` Linus Walleij
2019-10-14 12:19 ` [PATCH 16/25] crypto: mxs " Ard Biesheuvel
2019-10-14 13:01   ` Shawn Guo [this message]
2019-10-16 15:59   ` Horia Geanta
2019-10-14 12:19 ` [PATCH 17/25] crypto: mediatek " Ard Biesheuvel
2019-10-14 12:19 ` [PATCH 18/25] crypto: picoxcell " Ard Biesheuvel
2019-10-14 12:19 ` [PATCH 19/25] crypto: sahara " Ard Biesheuvel
2019-10-14 12:19 ` [PATCH 20/25] crypto: stm32 " Ard Biesheuvel
2019-10-14 12:19 ` [PATCH 21/25] crypto: rockchip " Ard Biesheuvel
2019-10-14 12:19 ` [PATCH 22/25] crypto: qce " Ard Biesheuvel
2019-10-14 12:19 ` [PATCH 23/25] crypto: niagara2 " Ard Biesheuvel
2019-10-14 18:08   ` David Miller
2019-10-14 12:19 ` [PATCH 24/25] crypto: talitos " Ard Biesheuvel
2019-10-14 12:19 ` [PATCH 25/25] crypto: qat " Ard Biesheuvel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191014130124.GT12262@dragon \
    --to=shawnguo@kernel.org \
    --cc=ard.biesheuvel@linaro.org \
    --cc=davem@davemloft.net \
    --cc=ebiggers@google.com \
    --cc=festevam@gmail.com \
    --cc=herbert@gondor.apana.org.au \
    --cc=kernel@pengutronix.de \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-imx@nxp.com \
    --cc=marex@denx.de \
    --cc=s.hauer@pengutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).