From: Sven Auhagen <sven.auhagen@voleatech.de>
To: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Linux Crypto Mailing List <linux-crypto@vger.kernel.org>,
Boris Brezillon <bbrezillon@kernel.org>,
Arnaud Ebalard <arno@natisbad.org>,
Srujana Challa <schalla@marvell.com>
Subject: Re: [PATCH] crypto: marvell/cesa - Fix use of sg_pcopy on iomem pointer
Date: Fri, 22 Jan 2021 11:02:03 +0100 [thread overview]
Message-ID: <20210122100203.uyurjtxjiier6257@SvensMacbookPro.hq.voleatech.com> (raw)
In-Reply-To: <20210121051646.GA24114@gondor.apana.org.au>
On Thu, Jan 21, 2021 at 04:16:46PM +1100, Herbert Xu wrote:
> The cesa driver mixes use of iomem pointers and normal kernel
> pointers. Sometimes it uses memcpy_toio/memcpy_fromio on both
> while other times it would use straight memcpy on both, through
> the sg_pcopy_* helpers.
>
> This patch fixes this by adding a new field sram_pool to the engine
> for the normal pointer case which then allows us to use the right
> interface depending on the value of engine->pool.
Hi Herbert,
sorry, no luck on my armfh system with this patch:
[169999.310405] INFO: task cryptomgr_test:7698 blocked for more than 120 seconds.
[169999.317669] Tainted: G OE 5.10.0-1-vtair-armmp #1 Debian 5.10.3-3~vtair
[169999.326139] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
Best
Sven
>
> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
>
> diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
> index 06211858bf2e..f14aac532f53 100644
> --- a/drivers/crypto/marvell/cesa/cesa.c
> +++ b/drivers/crypto/marvell/cesa/cesa.c
> @@ -381,10 +381,10 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
> engine->pool = of_gen_pool_get(cesa->dev->of_node,
> "marvell,crypto-srams", idx);
> if (engine->pool) {
> - engine->sram = gen_pool_dma_alloc(engine->pool,
> - cesa->sram_size,
> - &engine->sram_dma);
> - if (engine->sram)
> + engine->sram_pool = gen_pool_dma_alloc(engine->pool,
> + cesa->sram_size,
> + &engine->sram_dma);
> + if (engine->sram_pool)
> return 0;
>
> engine->pool = NULL;
> @@ -422,7 +422,7 @@ static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
> struct mv_cesa_engine *engine = &cesa->engines[idx];
>
> if (engine->pool)
> - gen_pool_free(engine->pool, (unsigned long)engine->sram,
> + gen_pool_free(engine->pool, (unsigned long)engine->sram_pool,
> cesa->sram_size);
> else
> dma_unmap_resource(cesa->dev, engine->sram_dma,
> diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h
> index fabfaaccca87..5c5eff0adbcc 100644
> --- a/drivers/crypto/marvell/cesa/cesa.h
> +++ b/drivers/crypto/marvell/cesa/cesa.h
> @@ -428,6 +428,7 @@ struct mv_cesa_dev {
> * @id: engine id
> * @regs: engine registers
> * @sram: SRAM memory region
> + * @sram_pool: SRAM memory region from pool
> * @sram_dma: DMA address of the SRAM memory region
> * @lock: engine lock
> * @req: current crypto request
> @@ -448,7 +449,10 @@ struct mv_cesa_dev {
> struct mv_cesa_engine {
> int id;
> void __iomem *regs;
> - void __iomem *sram;
> + union {
> + void __iomem *sram;
> + void *sram_pool;
> + };
> dma_addr_t sram_dma;
> spinlock_t lock;
> struct crypto_async_request *req;
> @@ -867,6 +871,31 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
> struct mv_cesa_sg_dma_iter *sgiter,
> gfp_t gfp_flags);
>
> +size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
> + struct scatterlist *sgl, unsigned int nents,
> + unsigned int sram_off, size_t buflen, off_t skip,
> + bool to_sram);
> +
> +static inline size_t mv_cesa_sg_copy_to_sram(struct mv_cesa_engine *engine,
> + struct scatterlist *sgl,
> + unsigned int nents,
> + unsigned int sram_off,
> + size_t buflen, off_t skip)
> +{
> + return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
> + true);
> +}
> +
> +static inline size_t mv_cesa_sg_copy_from_sram(struct mv_cesa_engine *engine,
> + struct scatterlist *sgl,
> + unsigned int nents,
> + unsigned int sram_off,
> + size_t buflen, off_t skip)
> +{
> + return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip,
> + false);
> +}
> +
> /* Algorithm definitions */
>
> extern struct ahash_alg mv_md5_alg;
> diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c
> index b4a6ff9dd6d5..b739d3b873dc 100644
> --- a/drivers/crypto/marvell/cesa/cipher.c
> +++ b/drivers/crypto/marvell/cesa/cipher.c
> @@ -89,22 +89,29 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
> CESA_SA_SRAM_PAYLOAD_SIZE);
>
> mv_cesa_adjust_op(engine, &sreq->op);
> - memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
> + if (engine->pool)
> + memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
> + else
> + memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
>
> - len = sg_pcopy_to_buffer(req->src, creq->src_nents,
> - engine->sram + CESA_SA_DATA_SRAM_OFFSET,
> - len, sreq->offset);
> + len = mv_cesa_sg_copy_to_sram(engine, req->src, creq->src_nents,
> + CESA_SA_DATA_SRAM_OFFSET, len,
> + sreq->offset);
>
> sreq->size = len;
> mv_cesa_set_crypt_op_len(&sreq->op, len);
>
> /* FIXME: only update enc_len field */
> if (!sreq->skip_ctx) {
> - memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
> + if (engine->pool)
> + memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op));
> + else
> + memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
> sreq->skip_ctx = true;
> - } else {
> + } else if (engine->pool)
> + memcpy(engine->sram_pool, &sreq->op, sizeof(sreq->op.desc));
> + else
> memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
> - }
>
> mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
> writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
> @@ -121,9 +128,9 @@ static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
> struct mv_cesa_engine *engine = creq->base.engine;
> size_t len;
>
> - len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
> - engine->sram + CESA_SA_DATA_SRAM_OFFSET,
> - sreq->size, sreq->offset);
> + len = mv_cesa_sg_copy_from_sram(engine, req->dst, creq->dst_nents,
> + CESA_SA_DATA_SRAM_OFFSET, sreq->size,
> + sreq->offset);
>
> sreq->offset += len;
> if (sreq->offset < req->cryptlen)
> @@ -214,11 +221,14 @@ mv_cesa_skcipher_complete(struct crypto_async_request *req)
> basereq = &creq->base;
> memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
> ivsize);
> - } else {
> + } else if (engine->pool)
> + memcpy(skreq->iv,
> + engine->sram_pool + CESA_SA_CRYPT_IV_SRAM_OFFSET,
> + ivsize);
> + else
> memcpy_fromio(skreq->iv,
> engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
> ivsize);
> - }
> }
>
> static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
> diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
> index 8cf9fd518d86..c72b0672fc71 100644
> --- a/drivers/crypto/marvell/cesa/hash.c
> +++ b/drivers/crypto/marvell/cesa/hash.c
> @@ -168,7 +168,12 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
> int i;
>
> mv_cesa_adjust_op(engine, &creq->op_tmpl);
> - memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
> + if (engine->pool)
> + memcpy(engine->sram_pool, &creq->op_tmpl,
> + sizeof(creq->op_tmpl));
> + else
> + memcpy_toio(engine->sram, &creq->op_tmpl,
> + sizeof(creq->op_tmpl));
>
> if (!sreq->offset) {
> digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
> @@ -177,9 +182,14 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
> engine->regs + CESA_IVDIG(i));
> }
>
> - if (creq->cache_ptr)
> - memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
> - creq->cache, creq->cache_ptr);
> + if (creq->cache_ptr) {
> + if (engine->pool)
> + memcpy(engine->sram_pool + CESA_SA_DATA_SRAM_OFFSET,
> + creq->cache, creq->cache_ptr);
> + else
> + memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
> + creq->cache, creq->cache_ptr);
> + }
>
> len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
> CESA_SA_SRAM_PAYLOAD_SIZE);
> @@ -190,12 +200,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
> }
>
> if (len - creq->cache_ptr)
> - sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
> - engine->sram +
> - CESA_SA_DATA_SRAM_OFFSET +
> - creq->cache_ptr,
> - len - creq->cache_ptr,
> - sreq->offset);
> + sreq->offset += mv_cesa_sg_copy_to_sram(
> + engine, req->src, creq->src_nents,
> + CESA_SA_DATA_SRAM_OFFSET + creq->cache_ptr,
> + len - creq->cache_ptr, sreq->offset);
>
> op = &creq->op_tmpl;
>
> @@ -220,16 +228,28 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
> if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
> len &= CESA_HASH_BLOCK_SIZE_MSK;
> new_cache_ptr = 64 - trailerlen;
> - memcpy_fromio(creq->cache,
> - engine->sram +
> - CESA_SA_DATA_SRAM_OFFSET + len,
> - new_cache_ptr);
> + if (engine->pool)
> + memcpy(creq->cache,
> + engine->sram_pool +
> + CESA_SA_DATA_SRAM_OFFSET + len,
> + new_cache_ptr);
> + else
> + memcpy_fromio(creq->cache,
> + engine->sram +
> + CESA_SA_DATA_SRAM_OFFSET +
> + len,
> + new_cache_ptr);
> } else {
> i = mv_cesa_ahash_pad_req(creq, creq->cache);
> len += i;
> - memcpy_toio(engine->sram + len +
> - CESA_SA_DATA_SRAM_OFFSET,
> - creq->cache, i);
> + if (engine->pool)
> + memcpy(engine->sram_pool + len +
> + CESA_SA_DATA_SRAM_OFFSET,
> + creq->cache, i);
> + else
> + memcpy_toio(engine->sram + len +
> + CESA_SA_DATA_SRAM_OFFSET,
> + creq->cache, i);
> }
>
> if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
> @@ -243,7 +263,10 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req)
> mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
>
> /* FIXME: only update enc_len field */
> - memcpy_toio(engine->sram, op, sizeof(*op));
> + if (engine->pool)
> + memcpy(engine->sram_pool, op, sizeof(*op));
> + else
> + memcpy_toio(engine->sram, op, sizeof(*op));
>
> if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
> mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
> diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
> index 0e0d63359798..f0b5537038c2 100644
> --- a/drivers/crypto/marvell/cesa/tdma.c
> +++ b/drivers/crypto/marvell/cesa/tdma.c
> @@ -350,3 +350,53 @@ int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain,
>
> return 0;
> }
> +
> +size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
> + struct scatterlist *sgl, unsigned int nents,
> + unsigned int sram_off, size_t buflen, off_t skip,
> + bool to_sram)
> +{
> + unsigned int sg_flags = SG_MITER_ATOMIC;
> + struct sg_mapping_iter miter;
> + unsigned int offset = 0;
> +
> + if (to_sram)
> + sg_flags |= SG_MITER_FROM_SG;
> + else
> + sg_flags |= SG_MITER_TO_SG;
> +
> + sg_miter_start(&miter, sgl, nents, sg_flags);
> +
> + if (!sg_miter_skip(&miter, skip))
> + return 0;
> +
> + while ((offset < buflen) && sg_miter_next(&miter)) {
> + unsigned int len;
> +
> + len = min(miter.length, buflen - offset);
> +
> + if (to_sram) {
> + if (engine->pool)
> + memcpy(engine->sram_pool + sram_off + offset,
> + miter.addr, len);
> + else
> + memcpy_toio(engine->sram + sram_off + offset,
> + miter.addr, len);
> + } else {
> + if (engine->pool)
> + memcpy(miter.addr,
> + engine->sram_pool + sram_off + offset,
> + len);
> + else
> + memcpy_fromio(miter.addr,
> + engine->sram + sram_off + offset,
> + len);
> + }
> +
> + offset += len;
> + }
> +
> + sg_miter_stop(&miter);
> +
> + return offset;
> +}
> --
> Email: Herbert Xu <herbert@gondor.apana.org.au>
> Home Page: https://eur03.safelinks.protection.outlook.com/?url=http:%2F%2Fgondor.apana.org.au%2F~herbert%2F&data=04%7C01%7Csven.auhagen%40voleatech.de%7Ce92f884faf51475c1f5f08d8bdcbcf36%7Cb82a99f679814a7295344d35298f847b%7C0%7C0%7C637468030359443421%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=k2cxVHZ3iMK8XE4OTd%2BHz%2F9PPXOvKPJoBv4VvGCGgww%3D&reserved=0
> PGP Key: https://eur03.safelinks.protection.outlook.com/?url=http:%2F%2Fgondor.apana.org.au%2F~herbert%2Fpubkey.txt&data=04%7C01%7Csven.auhagen%40voleatech.de%7Ce92f884faf51475c1f5f08d8bdcbcf36%7Cb82a99f679814a7295344d35298f847b%7C0%7C0%7C637468030359443421%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=vLLpgcyh1wy9Efd%2B9qx4Ip7h%2FqJuO90%2B%2F3nJtQAlVPU%3D&reserved=0
next prev parent reply other threads:[~2021-01-22 10:05 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-21 5:16 [PATCH] crypto: marvell/cesa - Fix use of sg_pcopy on iomem pointer Herbert Xu
2021-01-22 10:02 ` Sven Auhagen [this message]
2021-01-22 10:40 ` Herbert Xu
2021-01-22 10:59 ` Sven Auhagen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210122100203.uyurjtxjiier6257@SvensMacbookPro.hq.voleatech.com \
--to=sven.auhagen@voleatech.de \
--cc=arno@natisbad.org \
--cc=bbrezillon@kernel.org \
--cc=herbert@gondor.apana.org.au \
--cc=linux-crypto@vger.kernel.org \
--cc=schalla@marvell.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).