All of lore.kernel.org
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: davem@davemloft.net, netdev@vger.kernel.org, herbert.xu@redhat.com
Cc: hch@infradead.org, arjan@infradead.org, dhowells@redhat.com
Subject: [PATCH 3/5] Crypto: Add blkcipher accessors for using kernel data directly
Date: Thu, 08 Feb 2007 16:32:29 +0000	[thread overview]
Message-ID: <20070208163229.23973.71555.stgit@warthog.cambridge.redhat.com> (raw)
In-Reply-To: <20070208163211.23973.5877.stgit@warthog.cambridge.redhat.com>

Add blkcipher accessors for using kernel data directly without the use of
scatter lists.

Also add a CRYPTO_ALG_DMA algorithm capability flag to permit or deny the use
of DMA and hardware accelerators.  A hardware accelerator may not be used to
access any arbitrary piece of kernel memory lest it not be in a DMA'able
region.  Only software algorithms may do that.

If kernel data is going to be accessed directly, then CRYPTO_ALG_DMA must, for
instance, be passed in the mask of crypto_alloc_blkcipher(), but not the type.

Signed-Off-By: David Howells <dhowells@redhat.com>
---

 crypto/blkcipher.c     |    2 ++
 crypto/pcbc.c          |   60 ++++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/crypto.h |   52 +++++++++++++++++++++++++++++++++++++++++-
 3 files changed, 113 insertions(+), 1 deletions(-)

diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 6e93004..d43c22e 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -371,6 +371,8 @@ static int crypto_init_blkcipher_ops(str
 	crt->setkey = setkey;
 	crt->encrypt = alg->encrypt;
 	crt->decrypt = alg->decrypt;
+	crt->encrypt_kernel = alg->encrypt_kernel;
+	crt->decrypt_kernel = alg->decrypt_kernel;
 
 	addr = (unsigned long)crypto_tfm_ctx(tfm);
 	addr = ALIGN(addr, align);
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index 0ffb46e..5ac751e 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -126,6 +126,35 @@ static int crypto_pcbc_encrypt(struct bl
 	return err;
 }
 
+static int crypto_pcbc_encrypt_kernel(struct blkcipher_desc *desc,
+				      u8 *dst, const u8 *src,
+				      unsigned int nbytes)
+{
+	struct blkcipher_walk walk;
+	struct crypto_blkcipher *tfm = desc->tfm;
+	struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+	struct crypto_cipher *child = ctx->child;
+	void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
+
+	BUG_ON(crypto_tfm_alg_capabilities(child) & CRYPTO_ALG_DMA);
+
+	if (nbytes == 0)
+		return 0;
+
+	memset(&walk, 0, sizeof(walk));
+	walk.src.virt.addr = (u8 *) src;
+	walk.dst.virt.addr = (u8 *) dst;
+	walk.nbytes = nbytes;
+	walk.total = nbytes;
+	walk.iv = desc->info;
+
+	if (walk.src.virt.addr == walk.dst.virt.addr)
+		nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, child, xor);
+	else
+		nbytes = crypto_pcbc_encrypt_segment(desc, &walk, child, xor);
+	return 0;
+}
+
 static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
 				       struct blkcipher_walk *walk,
 				       struct crypto_cipher *tfm,
@@ -211,6 +240,35 @@ static int crypto_pcbc_decrypt(struct bl
 	return err;
 }
 
+static int crypto_pcbc_decrypt_kernel(struct blkcipher_desc *desc,
+				      u8 *dst, const u8 *src,
+				      unsigned int nbytes)
+{
+	struct blkcipher_walk walk;
+	struct crypto_blkcipher *tfm = desc->tfm;
+	struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+	struct crypto_cipher *child = ctx->child;
+	void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
+
+	BUG_ON(crypto_tfm_alg_capabilities(child) & CRYPTO_ALG_DMA);
+
+	if (nbytes == 0)
+		return 0;
+
+	memset(&walk, 0, sizeof(walk));
+	walk.src.virt.addr = (u8 *) src;
+	walk.dst.virt.addr = (u8 *) dst;
+	walk.nbytes = nbytes;
+	walk.total = nbytes;
+	walk.iv = desc->info;
+
+	if (walk.src.virt.addr == walk.dst.virt.addr)
+		nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, child, xor);
+	else
+		nbytes = crypto_pcbc_decrypt_segment(desc, &walk, child, xor);
+	return 0;
+}
+
 static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
 {
 	do {
@@ -312,6 +370,8 @@ static struct crypto_instance *crypto_pc
 	inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey;
 	inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt;
 	inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt;
+	inst->alg.cra_blkcipher.encrypt_kernel = crypto_pcbc_encrypt_kernel;
+	inst->alg.cra_blkcipher.decrypt_kernel = crypto_pcbc_decrypt_kernel;
 
 out_put_alg:
 	crypto_mod_put(alg);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 4aa9046..00ec299 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -40,7 +40,10 @@ #define CRYPTO_ALG_TYPE_HASH_MASK	0x0000
 #define CRYPTO_ALG_LARVAL		0x00000010
 #define CRYPTO_ALG_DEAD			0x00000020
 #define CRYPTO_ALG_DYING		0x00000040
-#define CRYPTO_ALG_ASYNC		0x00000080
+
+#define CRYPTO_ALG_CAP_MASK		0x00000180	/* capabilities mask */
+#define CRYPTO_ALG_ASYNC		0x00000080	/* capable of async operation */
+#define CRYPTO_ALG_DMA			0x00000100	/* capable of using of DMA */
 
 /*
  * Set this bit if and only if the algorithm requires another algorithm of
@@ -135,6 +138,10 @@ struct blkcipher_alg {
 	int (*decrypt)(struct blkcipher_desc *desc,
 		       struct scatterlist *dst, struct scatterlist *src,
 		       unsigned int nbytes);
+	int (*encrypt_kernel)(struct blkcipher_desc *desc, u8 *dst,
+			      const u8 *src, unsigned int nbytes);
+	int (*decrypt_kernel)(struct blkcipher_desc *desc, u8 *dst,
+			      const u8 *src, unsigned int nbytes);
 
 	unsigned int min_keysize;
 	unsigned int max_keysize;
@@ -268,6 +275,10 @@ struct blkcipher_tfm {
 		       struct scatterlist *src, unsigned int nbytes);
 	int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
 		       struct scatterlist *src, unsigned int nbytes);
+	int (*encrypt_kernel)(struct blkcipher_desc *desc, u8 *dst,
+			      const u8 *src, unsigned int nbytes);
+	int (*decrypt_kernel)(struct blkcipher_desc *desc, u8 *dst,
+			      const u8 *src, unsigned int nbytes);
 };
 
 struct cipher_tfm {
@@ -395,6 +406,11 @@ static inline u32 crypto_tfm_alg_type(st
 	return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
 }
 
+static inline u32 crypto_tfm_alg_capabilities(struct crypto_tfm *tfm)
+{
+	return tfm->__crt_alg->cra_flags & CRYPTO_ALG_CAP_MASK;
+}
+
 static unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
 	__deprecated;
 static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
@@ -581,6 +597,23 @@ static inline int crypto_blkcipher_encry
 	return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
 }
 
+static inline void crypto_blkcipher_encrypt_kernel(struct blkcipher_desc *desc,
+						   u8 *dst, const u8 *src,
+						   unsigned int nbytes)
+{
+	desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
+	crypto_blkcipher_crt(desc->tfm)->encrypt_kernel(desc, dst, src,
+							nbytes);
+}
+
+static inline void crypto_blkcipher_encrypt_kernel_iv(struct blkcipher_desc *desc,
+						      u8 *dst, const u8 *src,
+						      unsigned int nbytes)
+{
+	crypto_blkcipher_crt(desc->tfm)->encrypt_kernel(desc, dst, src,
+							nbytes);
+}
+
 static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
 					   struct scatterlist *dst,
 					   struct scatterlist *src,
@@ -598,6 +631,23 @@ static inline int crypto_blkcipher_decry
 	return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
 }
 
+static inline void crypto_cipher_decrypt_kernel(struct blkcipher_desc *desc,
+						u8 *dst, const u8 *src,
+						unsigned int nbytes)
+{
+	desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
+	crypto_blkcipher_crt(desc->tfm)->decrypt_kernel(desc, dst, src,
+							nbytes);
+}
+
+static inline void crypto_cipher_decrypt_kernel_iv(struct blkcipher_desc *desc,
+						   u8 *dst, const u8 *src,
+						   unsigned int nbytes)
+{
+	crypto_blkcipher_crt(desc->tfm)->decrypt_kernel(desc, dst, src,
+							nbytes);
+}
+
 static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
 					   const u8 *src, unsigned int len)
 {

  parent reply	other threads:[~2007-02-08 16:32 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-02-08 16:32 [PATCH 0/5] [RFC] AF_RXRPC socket family implementation David Howells
2007-02-08 16:32 ` [PATCH 1/5] Add PCBC crypto template support David Howells
2007-02-08 21:32   ` Herbert Xu
2007-02-09 11:18   ` David Howells
2007-02-09 22:26     ` David Miller
2007-02-12 11:35     ` David Howells
2007-02-08 16:32 ` [PATCH 2/5] FCrypt encryption module David Howells
2007-02-08 19:20   ` Christoph Hellwig
2007-02-08 16:32 ` David Howells [this message]
2007-02-08 16:32 ` [PATCH 4/5] Move generic skbuff stuff from XFRM code to generic code David Howells
2007-02-08 16:55 ` [PATCH 0/5] [RFC] AF_RXRPC socket family implementation YOSHIFUJI Hideaki / 吉藤英明
2007-02-08 22:01   ` David Miller
2007-02-09 10:31 ` David Howells
2007-02-09 10:37   ` YOSHIFUJI Hideaki / 吉藤英明
2007-02-09 12:31 ` David Howells
2007-02-09 13:14   ` YOSHIFUJI Hideaki / 吉藤英明
2007-02-09 13:45   ` David Howells
2007-02-22 13:02 ` David Howells

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20070208163229.23973.71555.stgit@warthog.cambridge.redhat.com \
    --to=dhowells@redhat.com \
    --cc=arjan@infradead.org \
    --cc=davem@davemloft.net \
    --cc=hch@infradead.org \
    --cc=herbert.xu@redhat.com \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.