All of lore.kernel.org
 help / color / mirror / Atom feed
* [bug report] crypto: aesni - xts_crypt() return if walk.nbytes is 0
@ 2021-09-08 12:54 Dan Carpenter
  2021-09-10 15:54 ` Shreyansh Chouhan
  0 siblings, 1 reply; 7+ messages in thread
From: Dan Carpenter @ 2021-09-08 12:54 UTC (permalink / raw)
  To: chouhan.shreyansh630; +Cc: linux-crypto

Hello Shreyansh Chouhan,

The patch 72ff2bf04db2: "crypto: aesni - xts_crypt() return if
walk.nbytes is 0" from Aug 22, 2021, leads to the following
Smatch static checker warning:

	arch/x86/crypto/aesni-intel_glue.c:915 xts_crypt()
	warn: possible missing kernel_fpu_end()

arch/x86/crypto/aesni-intel_glue.c
    839 static int xts_crypt(struct skcipher_request *req, bool encrypt)
    840 {
    841         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
    842         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
    843         int tail = req->cryptlen % AES_BLOCK_SIZE;
    844         struct skcipher_request subreq;
    845         struct skcipher_walk walk;
    846         int err;
    847 
    848         if (req->cryptlen < AES_BLOCK_SIZE)
    849                 return -EINVAL;
    850 
    851         err = skcipher_walk_virt(&walk, req, false);
    852         if (!walk.nbytes)
    853                 return err;

The patch adds this check for "walk.nbytes == 0".

    854 
    855         if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
                                         ^^^^^^^^^^^^^^^^^^^^^^^^
But Smatch says that "walk.nbytes" can be set to zero inside this
if statement.

    856                 int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
    857 
    858                 skcipher_walk_abort(&walk);
    859 
    860                 skcipher_request_set_tfm(&subreq, tfm);
    861                 skcipher_request_set_callback(&subreq,
    862                                               skcipher_request_flags(req),
    863                                               NULL, NULL);
    864                 skcipher_request_set_crypt(&subreq, req->src, req->dst,
    865                                            blocks * AES_BLOCK_SIZE, req->iv);
    866                 req = &subreq;
    867 
    868                 err = skcipher_walk_virt(&walk, req, false);
    869                 if (err)
    870                         return err;
    871         } else {
    872                 tail = 0;
    873         }
    874 
    875         kernel_fpu_begin();
    876 
    877         /* calculate first value of T */
    878         aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
    879 

Leading to not entering this loop and so we don't restore kernel_fpu_end().

So maybe the "if (walk.nbytes == 0)" check should be moved to right
before the call to kernel_fpu_begin()?

    880         while (walk.nbytes > 0) {
    881                 int nbytes = walk.nbytes;
    882 
    883                 if (nbytes < walk.total)
    884                         nbytes &= ~(AES_BLOCK_SIZE - 1);
    885 
    886                 if (encrypt)
    887                         aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
    888                                           walk.dst.virt.addr, walk.src.virt.addr,
    889                                           nbytes, walk.iv);
    890                 else
    891                         aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
    892                                           walk.dst.virt.addr, walk.src.virt.addr,
    893                                           nbytes, walk.iv);
    894                 kernel_fpu_end();
    895 
    896                 err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
    897 
    898                 if (walk.nbytes > 0)
    899                         kernel_fpu_begin();
    900         }
    901 
    902         if (unlikely(tail > 0 && !err)) {
    903                 struct scatterlist sg_src[2], sg_dst[2];
    904                 struct scatterlist *src, *dst;
    905 
    906                 dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
    907                 if (req->dst != req->src)
    908                         dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
    909 
    910                 skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
    911                                            req->iv);
    912 
    913                 err = skcipher_walk_virt(&walk, &subreq, false);
    914                 if (err)
--> 915                         return err;
    916 
    917                 kernel_fpu_begin();
    918                 if (encrypt)
    919                         aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
    920                                           walk.dst.virt.addr, walk.src.virt.addr,
    921                                           walk.nbytes, walk.iv);
    922                 else
    923                         aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
    924                                           walk.dst.virt.addr, walk.src.virt.addr,
    925                                           walk.nbytes, walk.iv);
    926                 kernel_fpu_end();
    927 
    928                 err = skcipher_walk_done(&walk, 0);
    929         }
    930         return err;
    931 }

regards,
dan carpenter

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-09-12  6:45 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-09-08 12:54 [bug report] crypto: aesni - xts_crypt() return if walk.nbytes is 0 Dan Carpenter
2021-09-10 15:54 ` Shreyansh Chouhan
2021-09-10 15:57   ` Shreyansh Chouhan
2021-09-11  7:32     ` Dan Carpenter
2021-09-11 16:23       ` Ard Biesheuvel
2021-09-12  5:02         ` Shreyansh Chouhan
2021-09-12  6:45           ` Ard Biesheuvel

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.