From: Ard Biesheuvel <ard.biesheuvel@linaro.org> To: linux-crypto@vger.kernel.org Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>, Herbert Xu <herbert@gondor.apana.org.au>, Eric Biggers <ebiggers@google.com>, dm-devel@redhat.com, linux-fscrypt@vger.kernel.org, Gilad Ben-Yossef <gilad@benyossef.com>, Milan Broz <gmazyland@gmail.com> Subject: [PATCH v11 3/4] crypto: arm64/aes-cts-cbc - factor out CBC en/decryption of a walk Date: Wed, 14 Aug 2019 19:37:45 +0300 Message-ID: <20190814163746.3525-4-ard.biesheuvel@linaro.org> (raw) In-Reply-To: <20190814163746.3525-1-ard.biesheuvel@linaro.org> The plain CBC driver and the CTS one share some code that iterates over a scatterwalk and invokes the CBC asm code to do the processing. The upcoming ESSIV/CBC mode will clone that pattern for the third time, so let's factor it out first. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- arch/arm64/crypto/aes-glue.c | 82 ++++++++++---------- 1 file changed, 40 insertions(+), 42 deletions(-) diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 55d6d4838708..23abf335f1ee 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c @@ -186,46 +186,64 @@ static int ecb_decrypt(struct skcipher_request *req) return err; } -static int cbc_encrypt(struct skcipher_request *req) +static int cbc_encrypt_walk(struct skcipher_request *req, + struct skcipher_walk *walk) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int err, rounds = 6 + ctx->key_length / 4; - struct skcipher_walk walk; + int err = 0, rounds = 6 + ctx->key_length / 4; unsigned int blocks; - err = skcipher_walk_virt(&walk, req, false); - - while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { + while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); - aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, rounds, blocks, walk.iv); + aes_cbc_encrypt(walk->dst.virt.addr, walk->src.virt.addr, + ctx->key_enc, rounds, blocks, walk->iv); kernel_neon_end(); - err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); + err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE); } return err; } -static int cbc_decrypt(struct skcipher_request *req) +static int cbc_encrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - int err, rounds = 6 + ctx->key_length / 4; struct skcipher_walk walk; - unsigned int blocks; + int err; err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; + return cbc_encrypt_walk(req, &walk); +} - while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { +static int cbc_decrypt_walk(struct skcipher_request *req, + struct skcipher_walk *walk) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + int err = 0, rounds = 6 + ctx->key_length / 4; + unsigned int blocks; + + while ((blocks = (walk->nbytes / AES_BLOCK_SIZE))) { kernel_neon_begin(); - aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, rounds, blocks, walk.iv); + aes_cbc_decrypt(walk->dst.virt.addr, walk->src.virt.addr, + ctx->key_dec, rounds, blocks, walk->iv); kernel_neon_end(); - err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE); + err = skcipher_walk_done(walk, walk->nbytes % AES_BLOCK_SIZE); } return err; } +static int cbc_decrypt(struct skcipher_request *req) +{ + struct skcipher_walk walk; + int err; + + err = skcipher_walk_virt(&walk, req, false); + if (err) + return err; + return cbc_decrypt_walk(req, &walk); +} + static int cts_cbc_init_tfm(struct crypto_skcipher *tfm) { crypto_skcipher_set_reqsize(tfm, sizeof(struct cts_cbc_req_ctx)); @@ -251,22 +269,12 @@ static int cts_cbc_encrypt(struct skcipher_request *req) } if (cbc_blocks > 0) { - unsigned int blocks; - skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst, cbc_blocks * AES_BLOCK_SIZE, req->iv); - err = skcipher_walk_virt(&walk, &rctx->subreq, false); - - while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, rounds, blocks, walk.iv); - kernel_neon_end(); - err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); - } + err = skcipher_walk_virt(&walk, &rctx->subreq, false) ?: + cbc_encrypt_walk(&rctx->subreq, &walk); if (err) return err; @@ -316,22 +324,12 @@ static int cts_cbc_decrypt(struct skcipher_request *req) } if (cbc_blocks > 0) { - unsigned int blocks; - skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst, cbc_blocks * AES_BLOCK_SIZE, req->iv); - err = skcipher_walk_virt(&walk, &rctx->subreq, false); - - while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) { - kernel_neon_begin(); - aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, rounds, blocks, walk.iv); - kernel_neon_end(); - err = skcipher_walk_done(&walk, - walk.nbytes % AES_BLOCK_SIZE); - } + err = skcipher_walk_virt(&walk, &rctx->subreq, false) ?: + cbc_decrypt_walk(&rctx->subreq, &walk); if (err) return err; -- 2.17.1
next prev parent reply index Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top 2019-08-14 16:37 [PATCH v11 0/4] crypto: switch to crypto API for ESSIV generation Ard Biesheuvel 2019-08-14 16:37 ` [PATCH v11 1/4] crypto: essiv - create wrapper template " Ard Biesheuvel 2019-08-15 2:37 ` Herbert Xu 2019-08-15 4:59 ` Ard Biesheuvel 2019-08-15 5:13 ` Herbert Xu 2019-08-15 5:15 ` Ard Biesheuvel 2019-08-15 11:35 ` Herbert Xu 2019-08-15 17:46 ` Ard Biesheuvel 2019-08-15 17:59 ` Ard Biesheuvel 2019-08-14 16:37 ` [PATCH v11 2/4] crypto: essiv - add tests for essiv in cbc(aes)+sha256 mode Ard Biesheuvel 2019-08-14 16:37 ` Ard Biesheuvel [this message] 2019-08-14 16:37 ` [PATCH v11 4/4] crypto: arm64/aes - implement accelerated ESSIV/CBC mode Ard Biesheuvel
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20190814163746.3525-4-ard.biesheuvel@linaro.org \ --to=ard.biesheuvel@linaro.org \ --cc=dm-devel@redhat.com \ --cc=ebiggers@google.com \ --cc=gilad@benyossef.com \ --cc=gmazyland@gmail.com \ --cc=herbert@gondor.apana.org.au \ --cc=linux-crypto@vger.kernel.org \ --cc=linux-fscrypt@vger.kernel.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
Linux-Crypto Archive on lore.kernel.org Archives are clonable: git clone --mirror https://lore.kernel.org/linux-crypto/0 linux-crypto/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 linux-crypto linux-crypto/ https://lore.kernel.org/linux-crypto \ linux-crypto@vger.kernel.org public-inbox-index linux-crypto Example config snippet for mirrors Newsgroup available over NNTP: nntp://nntp.lore.kernel.org/org.kernel.vger.linux-crypto AGPL code for this site: git clone https://public-inbox.org/public-inbox.git