linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] crypto: algif - change algif_skcipher to be asynchronous
@ 2015-01-29 23:13 Tadeusz Struk
  2015-01-29 23:13 ` [PATCH 1/3] net: socket: enable async read and write Tadeusz Struk
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Tadeusz Struk @ 2015-01-29 23:13 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, netdev, davem, qat-linux, linux-kernel

The way the algif_skcipher works currently is that on sendmsg/sendpage it
builds an sgl for the input data and then on read/recvmsg it sends the job
for encryption putting the user to sleep till the data is processed.
This way it can only handle one job at a given time.
To be able to fuly utilize the potential of existing crypto hardware
accelerators it is required to submit multiple jobs in asynchronously.
First patch enables asynchronous read and write on socket.
Second patch enables af_alg sgl to be linked.
Third patch implement asynch read for skcipher.

Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
---
Tadeusz Struk (3):
      net: socket: enable async read and write
      crypto: af_alg - Allow to link sgl
      crypto: algif - change algif_skcipher to be asynchronous


 crypto/af_alg.c         |   16 ++
 crypto/algif_skcipher.c |  315 ++++++++++++++++++++++++++++++++++++++++++++++-
 include/crypto/if_alg.h |    4 -
 include/net/sock.h      |    2 
 net/socket.c            |   48 ++++++-
 5 files changed, 364 insertions(+), 21 deletions(-)

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/3] net: socket: enable async read and write
  2015-01-29 23:13 [PATCH 0/3] crypto: algif - change algif_skcipher to be asynchronous Tadeusz Struk
@ 2015-01-29 23:13 ` Tadeusz Struk
  2015-01-30 18:30   ` Tadeusz Struk
  2015-01-29 23:13 ` [PATCH 2/3] crypto: af_alg - Allow to link sgl Tadeusz Struk
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Tadeusz Struk @ 2015-01-29 23:13 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, netdev, davem, qat-linux, linux-kernel

AIO read or write are not currently supported on sockets.
This patch enables real socket async read/write.

Please note - this patch is generated against cryptodev.

Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
---
 include/net/sock.h |    2 ++
 net/socket.c       |   48 ++++++++++++++++++++++++++++++++++++++----------
 2 files changed, 40 insertions(+), 10 deletions(-)

diff --git a/include/net/sock.h b/include/net/sock.h
index 2210fec..2c7d160 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1397,6 +1397,8 @@ static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
 	return si->kiocb;
 }
 
+void sock_aio_complete(struct kiocb *iocb, long res, long res2);
+
 struct socket_alloc {
 	struct socket socket;
 	struct inode vfs_inode;
diff --git a/net/socket.c b/net/socket.c
index a2c33a4..368fa9f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -866,14 +866,25 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
 	return sock->ops->splice_read(sock, ppos, pipe, len, flags);
 }
 
+void sock_aio_complete(struct kiocb *iocb, long res, long res2)
+{
+	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
+
+	kfree(siocb);
+	aio_complete(iocb, res, res2);
+}
+EXPORT_SYMBOL(sock_aio_complete);
+
 static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
 					 struct sock_iocb *siocb)
 {
-	if (!is_sync_kiocb(iocb))
-		BUG();
+	if (!siocb)
+		siocb = kmalloc(sizeof(*siocb), GFP_KERNEL);
 
-	siocb->kiocb = iocb;
-	iocb->private = siocb;
+	if (siocb) {
+		siocb->kiocb = iocb;
+		iocb->private = siocb;
+	}
 	return siocb;
 }
 
@@ -901,7 +912,8 @@ static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
 				unsigned long nr_segs, loff_t pos)
 {
-	struct sock_iocb siocb, *x;
+	struct sock_iocb siocb, *x = NULL;
+	int ret;
 
 	if (pos != 0)
 		return -ESPIPE;
@@ -909,11 +921,18 @@ static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
 	if (iocb->ki_nbytes == 0)	/* Match SYS5 behaviour */
 		return 0;
 
+	if (is_sync_kiocb(iocb))
+		x = &siocb;
 
-	x = alloc_sock_iocb(iocb, &siocb);
+	x = alloc_sock_iocb(iocb, x);
 	if (!x)
 		return -ENOMEM;
-	return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+	ret = do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+
+	if (!is_sync_kiocb(iocb) && ret != -EIOCBQUEUED)
+		kfree(x);
+
+	return ret;
 }
 
 static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
@@ -942,16 +961,25 @@ static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
 static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
 			  unsigned long nr_segs, loff_t pos)
 {
-	struct sock_iocb siocb, *x;
+	struct sock_iocb siocb, *x = NULL;
+	int ret;
 
 	if (pos != 0)
 		return -ESPIPE;
 
-	x = alloc_sock_iocb(iocb, &siocb);
+	if (is_sync_kiocb(iocb))
+		x = &siocb;
+
+	x = alloc_sock_iocb(iocb, x);
 	if (!x)
 		return -ENOMEM;
 
-	return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+	ret = do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+
+	if (!is_sync_kiocb(iocb) && ret != -EIOCBQUEUED)
+		kfree(x);
+
+	return ret;
 }
 
 /*


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/3] crypto: af_alg - Allow to link sgl
  2015-01-29 23:13 [PATCH 0/3] crypto: algif - change algif_skcipher to be asynchronous Tadeusz Struk
  2015-01-29 23:13 ` [PATCH 1/3] net: socket: enable async read and write Tadeusz Struk
@ 2015-01-29 23:13 ` Tadeusz Struk
  2015-01-29 23:13 ` [PATCH 3/3] crypto: algif - change algif_skcipher to be asynchronous Tadeusz Struk
  2015-02-01 18:31 ` [PATCH 0/3] " Stephan Mueller
  3 siblings, 0 replies; 8+ messages in thread
From: Tadeusz Struk @ 2015-01-29 23:13 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, netdev, davem, qat-linux, linux-kernel

Allow to link af_alg sgls.

Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
---
 crypto/af_alg.c         |   16 ++++++++++++----
 include/crypto/if_alg.h |    4 +++-
 2 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 76d739d..99608f2 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -374,7 +374,8 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
 
 	err = 0;
 
-	sg_init_table(sgl->sg, npages);
+	/* Add one extra for linking */
+	sg_init_table(sgl->sg, npages + 1);
 
 	for (i = 0; i < npages; i++) {
 		int plen = min_t(int, len, PAGE_SIZE - off);
@@ -385,20 +386,27 @@ int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
 		len -= plen;
 		err += plen;
 	}
+	sg_mark_end(sgl->sg + npages - 1);
+	sgl->npages = npages;
 
 out:
 	return err;
 }
 EXPORT_SYMBOL_GPL(af_alg_make_sg);
 
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new)
+{
+	sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
+	sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
+}
+EXPORT_SYMBOL(af_alg_link_sg);
+
 void af_alg_free_sg(struct af_alg_sgl *sgl)
 {
 	int i;
 
-	i = 0;
-	do {
+	for (i = 0; i < sgl->npages; i++)
 		put_page(sgl->pages[i]);
-	} while (!sg_is_last(sgl->sg + (i++)));
 }
 EXPORT_SYMBOL_GPL(af_alg_free_sg);
 
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 5c7b6c5..0908050 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -58,8 +58,9 @@ struct af_alg_type {
 };
 
 struct af_alg_sgl {
-	struct scatterlist sg[ALG_MAX_PAGES];
+	struct scatterlist sg[ALG_MAX_PAGES + 1];
 	struct page *pages[ALG_MAX_PAGES];
+	unsigned int npages;
 };
 
 int af_alg_register_type(const struct af_alg_type *type);
@@ -71,6 +72,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock);
 int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
 		   int write);
 void af_alg_free_sg(struct af_alg_sgl *sgl);
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
 
 int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
 


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/3] crypto: algif - change algif_skcipher to be asynchronous
  2015-01-29 23:13 [PATCH 0/3] crypto: algif - change algif_skcipher to be asynchronous Tadeusz Struk
  2015-01-29 23:13 ` [PATCH 1/3] net: socket: enable async read and write Tadeusz Struk
  2015-01-29 23:13 ` [PATCH 2/3] crypto: af_alg - Allow to link sgl Tadeusz Struk
@ 2015-01-29 23:13 ` Tadeusz Struk
  2015-02-01 18:31 ` [PATCH 0/3] " Stephan Mueller
  3 siblings, 0 replies; 8+ messages in thread
From: Tadeusz Struk @ 2015-01-29 23:13 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, netdev, davem, qat-linux, linux-kernel

The way the algif_skcipher works currently is that on sendmsg/sendpage it
builds an sgl for the input data and then on read/recvmsg it sends the job
for encryption putting the user to sleep till the data is processed.
This way it can only handle one job at a given time.
This patch changes it to be asynchronous by adding AIO support.

Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
---
 crypto/algif_skcipher.c |  315 ++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 309 insertions(+), 6 deletions(-)

diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 38a6757..c953200 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -19,9 +19,11 @@
 #include <linux/list.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/mempool.h>
 #include <linux/module.h>
 #include <linux/net.h>
 #include <net/sock.h>
+#include <linux/aio.h>
 
 struct skcipher_sg_list {
 	struct list_head list;
@@ -39,6 +41,9 @@ struct skcipher_ctx {
 
 	struct af_alg_completion completion;
 
+	struct kmem_cache *cache;
+	mempool_t *pool;
+	atomic_t inflight;
 	unsigned used;
 
 	unsigned int len;
@@ -49,9 +54,135 @@ struct skcipher_ctx {
 	struct ablkcipher_request req;
 };
 
+struct skcipher_async_rsgl {
+	struct af_alg_sgl sgl;
+	struct list_head list;
+};
+
+struct skcipher_async_req {
+	struct kiocb *iocb;
+	struct skcipher_async_rsgl first_sgl;
+	struct list_head list;
+	struct scatterlist *tsg;
+	char iv[];
+};
+
+#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
+	crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
+
+#define GET_REQ_SIZE(ctx) \
+	crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
+
+#define GET_IV_SIZE(ctx) \
+	crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
+
 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
 		      sizeof(struct scatterlist) - 1)
 
+static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
+{
+	struct skcipher_async_rsgl *rsgl;
+	struct scatterlist *sgl;
+	struct scatterlist *sg;
+	int i, n;
+
+	list_for_each_entry(rsgl, &sreq->list, list) {
+		af_alg_free_sg(&rsgl->sgl);
+		if (rsgl != &sreq->first_sgl)
+			kfree(rsgl);
+	}
+	sgl = sreq->tsg;
+	n = sg_nents(sgl);
+	for_each_sg(sgl, sg, n, i)
+		put_page(sg_page(sg));
+
+	kfree(sreq->tsg);
+}
+
+static void skcipher_async_cb(struct crypto_async_request *req, int err)
+{
+	struct sock *sk = req->data;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+	struct kiocb *iocb = sreq->iocb;
+
+	atomic_dec(&ctx->inflight);
+	skcipher_free_async_sgls(sreq);
+	mempool_free(req, ctx->pool);
+	sock_aio_complete(iocb, err, err);
+}
+
+static void skcipher_mempool_free(void *_req, void *_sk)
+{
+	struct sock *sk = _sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct kmem_cache *cache = ctx->cache;
+
+	kmem_cache_free(cache, _req);
+}
+
+static void *skcipher_mempool_alloc(gfp_t gfp_mask, void *_sk)
+{
+	struct sock *sk = _sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct kmem_cache *cache = ctx->cache;
+	struct ablkcipher_request *req;
+
+	req = kmem_cache_alloc(cache, gfp_mask);
+	if (req) {
+		ablkcipher_request_set_tfm(req,
+					   crypto_ablkcipher_reqtfm(&ctx->req));
+		ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+						skcipher_async_cb, sk);
+	}
+	return req;
+}
+
+static void skcipher_cache_constructor(void *v)
+{
+	memset(v, 0, sizeof(struct skcipher_async_req));
+}
+
+static int skcipher_mempool_create(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	unsigned int len = sizeof(struct skcipher_async_req) +
+		GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+	char buf[32];
+
+	snprintf(buf, sizeof(buf), "skcipher_%p", ctx);
+	ctx->cache = kmem_cache_create(buf, len, 0, SLAB_HWCACHE_ALIGN |
+				       SLAB_TEMPORARY,
+				       skcipher_cache_constructor);
+	if (unlikely(!ctx->cache))
+		return -ENOMEM;
+
+	ctx->pool = mempool_create(128, skcipher_mempool_alloc,
+				   skcipher_mempool_free, sk);
+
+	if (unlikely(!ctx->pool)) {
+		kmem_cache_destroy(ctx->cache);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void skcipher_mempool_destroy(struct skcipher_ctx *ctx)
+{
+	if (ctx->pool)
+		mempool_destroy(ctx->pool);
+
+	if (ctx->cache)
+		kmem_cache_destroy(ctx->cache);
+
+	ctx->cache = NULL;
+	ctx->pool = NULL;
+}
+
 static inline int skcipher_sndbuf(struct sock *sk)
 {
 	struct alg_sock *ask = alg_sk(sk);
@@ -96,7 +227,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
 	return 0;
 }
 
-static void skcipher_pull_sgl(struct sock *sk, int used)
+static void skcipher_pull_sgl(struct sock *sk, int used, int put)
 {
 	struct alg_sock *ask = alg_sk(sk);
 	struct skcipher_ctx *ctx = ask->private;
@@ -124,7 +255,8 @@ static void skcipher_pull_sgl(struct sock *sk, int used)
 			if (sg[i].length)
 				return;
 
-			put_page(sg_page(sg + i));
+			if (put)
+				put_page(sg_page(sg + i));
 			sg_assign_page(sg + i, NULL);
 		}
 
@@ -143,7 +275,7 @@ static void skcipher_free_sgl(struct sock *sk)
 	struct alg_sock *ask = alg_sk(sk);
 	struct skcipher_ctx *ctx = ask->private;
 
-	skcipher_pull_sgl(sk, ctx->used);
+	skcipher_pull_sgl(sk, ctx->used, 1);
 }
 
 static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
@@ -424,8 +556,152 @@ unlock:
 	return err ?: size;
 }
 
-static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
-			    struct msghdr *msg, size_t ignored, int flags)
+static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
+{
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg;
+	int nents = 0;
+
+	list_for_each_entry(sgl, &ctx->tsgl, list) {
+		sg = sgl->sg;
+
+		while (!sg->length)
+			sg++;
+
+		nents += sg_nents(sg);
+	}
+	return nents;
+}
+
+static int skcipher_recvmsg_async(struct kiocb *iocb, struct socket *sock,
+				  struct msghdr *msg, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	const struct iovec *iov;
+	unsigned long iovlen;
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg;
+	struct skcipher_async_req *sreq;
+	struct ablkcipher_request *req;
+	struct skcipher_async_rsgl *last_rsgl = NULL;
+	unsigned int len = 0, tx_nents = skcipher_all_sg_nents(ctx);
+	int i = 0;
+	int err = -ENOMEM;
+
+	lock_sock(sk);
+	req = mempool_alloc(ctx->pool, GFP_KERNEL);
+	if (unlikely(!req))
+		goto unlock;
+
+	sreq = GET_SREQ(req, ctx);
+	sreq->iocb = iocb;
+	INIT_LIST_HEAD(&sreq->list);
+	memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
+	sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
+	if (!sreq->tsg) {
+		mempool_free(req, ctx->pool);
+		goto unlock;
+	}
+	sg_init_table(sreq->tsg, tx_nents);
+	for (iov = msg->msg_iter.iov, iovlen = msg->msg_iter.nr_segs;
+	     iovlen > 0; iovlen--, iov++) {
+		unsigned long seglen = iov->iov_len;
+		char __user *from = iov->iov_base;
+		struct skcipher_async_rsgl *rsgl;
+
+		while (seglen) {
+			unsigned long used;
+
+			if (!ctx->used) {
+				err = skcipher_wait_for_data(sk, flags);
+				if (err)
+					goto free;
+			}
+			sgl = list_first_entry(&ctx->tsgl,
+					       struct skcipher_sg_list, list);
+			sg = sgl->sg;
+
+			while (!sg->length)
+				sg++;
+
+			used = min_t(unsigned long, ctx->used, seglen);
+			used = min_t(unsigned long, used, sg->length);
+
+			if (i == tx_nents) {
+				struct scatterlist *tmp;
+				int x;
+
+				/* Ran out of tx slots in async request
+				 * need to expand */
+				tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
+					      GFP_KERNEL);
+				if (!tmp)
+					goto free;
+
+				sg_init_table(tmp, tx_nents * 2);
+				for (x = 0; x < tx_nents; x++)
+					sg_set_page(&tmp[x],
+						    sg_page(&sreq->tsg[x]),
+						    sreq->tsg[x].length,
+						    sreq->tsg[x].offset);
+				kfree(sreq->tsg);
+				sreq->tsg = tmp;
+				tx_nents *= 2;
+			}
+			/* Need to take over the tx sgl from ctx
+			 * to the asynch req - these sgls will be freed later */
+			sg_set_page(sreq->tsg + i++, sg_page(sg), sg->length,
+				    sg->offset);
+
+			if (list_empty(&sreq->list)) {
+				rsgl = &sreq->first_sgl;
+				list_add(&rsgl->list, &sreq->list);
+			} else {
+				rsgl = kzalloc(sizeof(*rsgl), GFP_KERNEL);
+				if (!rsgl) {
+					err = -ENOMEM;
+					goto free;
+				}
+				list_add(&rsgl->list, &sreq->list);
+			}
+
+			used = af_alg_make_sg(&rsgl->sgl, from, used, 1);
+			err = used;
+			if (used < 0)
+				goto free;
+			if (last_rsgl)
+				af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
+
+			last_rsgl = rsgl;
+			len += used;
+			from += used;
+			seglen -= used;
+			skcipher_pull_sgl(sk, used, 0);
+		}
+	}
+
+	ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
+				     len, sreq->iv);
+	err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
+			 crypto_ablkcipher_decrypt(req);
+	if (err == -EINPROGRESS) {
+		atomic_inc(&ctx->inflight);
+		err = -EIOCBQUEUED;
+		goto unlock;
+	}
+free:
+	skcipher_free_async_sgls(sreq);
+	mempool_free(req, ctx->pool);
+unlock:
+	skcipher_wmem_wakeup(sk);
+	release_sock(sk);
+	return err;
+}
+
+static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
+				 int flags)
 {
 	struct sock *sk = sock->sk;
 	struct alg_sock *ask = alg_sk(sk);
@@ -493,7 +769,7 @@ free:
 			copied += used;
 			from += used;
 			seglen -= used;
-			skcipher_pull_sgl(sk, used);
+			skcipher_pull_sgl(sk, used, 1);
 		}
 	}
 
@@ -506,6 +782,13 @@ unlock:
 	return copied ?: err;
 }
 
+static int skcipher_recvmsg(struct kiocb *iocb, struct socket *sock,
+			    struct msghdr *msg, size_t ignored, int flags)
+{
+	return is_sync_kiocb(iocb) ?
+		skcipher_recvmsg_sync(sock, msg, flags) :
+		skcipher_recvmsg_async(iocb, sock, msg, flags);
+}
 
 static unsigned int skcipher_poll(struct file *file, struct socket *sock,
 				  poll_table *wait)
@@ -564,12 +847,25 @@ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
 	return crypto_ablkcipher_setkey(private, key, keylen);
 }
 
+static void skcipher_wait(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	int ctr = 0;
+
+	while (atomic_read(&ctx->inflight) && ctr++ < 100)
+		msleep(100);
+}
+
 static void skcipher_sock_destruct(struct sock *sk)
 {
 	struct alg_sock *ask = alg_sk(sk);
 	struct skcipher_ctx *ctx = ask->private;
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
 
+	if (atomic_read(&ctx->inflight))
+		skcipher_wait(sk);
+	skcipher_mempool_destroy(ctx);
 	skcipher_free_sgl(sk);
 	sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
 	sock_kfree_s(sk, ctx, ctx->len);
@@ -601,6 +897,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
 	ctx->more = 0;
 	ctx->merge = 0;
 	ctx->enc = 0;
+	atomic_set(&ctx->inflight, 0);
 	af_alg_init_completion(&ctx->completion);
 
 	ask->private = ctx;
@@ -609,6 +906,12 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
 	ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
 					af_alg_complete, &ctx->completion);
 
+	if (skcipher_mempool_create(sk)) {
+		sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(private));
+		sock_kfree_s(sk, ctx, ctx->len);
+		return -ENOMEM;
+	}
+
 	sk->sk_destruct = skcipher_sock_destruct;
 
 	return 0;


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/3] net: socket: enable async read and write
  2015-01-29 23:13 ` [PATCH 1/3] net: socket: enable async read and write Tadeusz Struk
@ 2015-01-30 18:30   ` Tadeusz Struk
  0 siblings, 0 replies; 8+ messages in thread
From: Tadeusz Struk @ 2015-01-30 18:30 UTC (permalink / raw)
  To: herbert; +Cc: linux-crypto, netdev, davem, qat-linux, linux-kernel

On 01/29/2015 03:13 PM, Tadeusz Struk wrote:
> AIO read or write are not currently supported on sockets.
> This patch enables real socket async read/write.
> 
> Please note - this patch is generated against cryptodev.
> 
> Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
> ---
>  include/net/sock.h |    2 ++
>  net/socket.c       |   48 ++++++++++++++++++++++++++++++++++++++----------
>  2 files changed, 40 insertions(+), 10 deletions(-)
> 
> diff --git a/include/net/sock.h b/include/net/sock.h
> index 2210fec..2c7d160 100644
> --- a/include/net/sock.h
> +++ b/include/net/sock.h
> @@ -1397,6 +1397,8 @@ static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
>  	return si->kiocb;
>  }
>  
> +void sock_aio_complete(struct kiocb *iocb, long res, long res2);
> +
>  struct socket_alloc {
>  	struct socket socket;
>  	struct inode vfs_inode;
> diff --git a/net/socket.c b/net/socket.c
> index a2c33a4..368fa9f 100644
> --- a/net/socket.c
> +++ b/net/socket.c
> @@ -866,14 +866,25 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
>  	return sock->ops->splice_read(sock, ppos, pipe, len, flags);
>  }
>  
> +void sock_aio_complete(struct kiocb *iocb, long res, long res2)
> +{
> +	struct sock_iocb *siocb = kiocb_to_siocb(iocb);
> +
> +	kfree(siocb);
> +	aio_complete(iocb, res, res2);
> +}
> +EXPORT_SYMBOL(sock_aio_complete);
> +
>  static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
>  					 struct sock_iocb *siocb)
>  {
> -	if (!is_sync_kiocb(iocb))
> -		BUG();
> +	if (!siocb)
> +		siocb = kmalloc(sizeof(*siocb), GFP_KERNEL);
>  
> -	siocb->kiocb = iocb;
> -	iocb->private = siocb;
> +	if (siocb) {
> +		siocb->kiocb = iocb;
> +		iocb->private = siocb;
> +	}
>  	return siocb;
>  }
>  
> @@ -901,7 +912,8 @@ static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
>  static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
>  				unsigned long nr_segs, loff_t pos)
>  {
> -	struct sock_iocb siocb, *x;
> +	struct sock_iocb siocb, *x = NULL;
> +	int ret;
>  
>  	if (pos != 0)
>  		return -ESPIPE;
> @@ -909,11 +921,18 @@ static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
>  	if (iocb->ki_nbytes == 0)	/* Match SYS5 behaviour */
>  		return 0;
>  
> +	if (is_sync_kiocb(iocb))
> +		x = &siocb;
>  
> -	x = alloc_sock_iocb(iocb, &siocb);
> +	x = alloc_sock_iocb(iocb, x);
>  	if (!x)
>  		return -ENOMEM;
> -	return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
> +	ret = do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
> +
> +	if (!is_sync_kiocb(iocb) && ret != -EIOCBQUEUED)
> +		kfree(x);
> +
> +	return ret;
>  }
>  
>  static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
> @@ -942,16 +961,25 @@ static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
>  static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
>  			  unsigned long nr_segs, loff_t pos)
>  {
> -	struct sock_iocb siocb, *x;
> +	struct sock_iocb siocb, *x = NULL;
> +	int ret;
>  
>  	if (pos != 0)
>  		return -ESPIPE;
>  
> -	x = alloc_sock_iocb(iocb, &siocb);
> +	if (is_sync_kiocb(iocb))
> +		x = &siocb;
> +
> +	x = alloc_sock_iocb(iocb, x);
>  	if (!x)
>  		return -ENOMEM;
>  
> -	return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
> +	ret = do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
> +
> +	if (!is_sync_kiocb(iocb) && ret != -EIOCBQUEUED)
> +		kfree(x);
> +
> +	return ret;
>  }
>  
>  /*

Hi Herbert,
Just noticed that the struct sock_iocb has just been removed on net-next
(see [1]). What we can do is to call aio_complete() directly from
algif_skcipher, assuming that it is ok to call asynchronous read or
write with the struct msghdr allocated on the stack.
Please let me know what you think.
Thanks,
Tadeusz

[1]
https://git.kernel.org/cgit/linux/kernel/git/davem/net-next.git/commit/?id=7cc05662682da4b0e0a4fdf3c3f190577803ae81




^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/3] crypto: algif - change algif_skcipher to be asynchronous
  2015-01-29 23:13 [PATCH 0/3] crypto: algif - change algif_skcipher to be asynchronous Tadeusz Struk
                   ` (2 preceding siblings ...)
  2015-01-29 23:13 ` [PATCH 3/3] crypto: algif - change algif_skcipher to be asynchronous Tadeusz Struk
@ 2015-02-01 18:31 ` Stephan Mueller
  2015-02-02 15:03   ` Tadeusz Struk
  3 siblings, 1 reply; 8+ messages in thread
From: Stephan Mueller @ 2015-02-01 18:31 UTC (permalink / raw)
  To: Tadeusz Struk
  Cc: herbert, linux-crypto, netdev, davem, qat-linux, linux-kernel

Am Donnerstag, 29. Januar 2015, 15:13:39 schrieb Tadeusz Struk:

Hi Tadeusz,

> The way the algif_skcipher works currently is that on sendmsg/sendpage it
> builds an sgl for the input data and then on read/recvmsg it sends the job
> for encryption putting the user to sleep till the data is processed.
> This way it can only handle one job at a given time.
> To be able to fuly utilize the potential of existing crypto hardware
> accelerators it is required to submit multiple jobs in asynchronously.
> First patch enables asynchronous read and write on socket.
> Second patch enables af_alg sgl to be linked.
> Third patch implement asynch read for skcipher.

Do you have a code fragment on how to test that patch? I would like to see 
whether I can test that with my libkcapi.
> 
> Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
> ---
> Tadeusz Struk (3):
>       net: socket: enable async read and write
>       crypto: af_alg - Allow to link sgl
>       crypto: algif - change algif_skcipher to be asynchronous
> 
> 
>  crypto/af_alg.c         |   16 ++
>  crypto/algif_skcipher.c |  315
> ++++++++++++++++++++++++++++++++++++++++++++++- include/crypto/if_alg.h |  
>  4 -
>  include/net/sock.h      |    2
>  net/socket.c            |   48 ++++++-
>  5 files changed, 364 insertions(+), 21 deletions(-)
> --
> To unsubscribe from this list: send the line "unsubscribe linux-crypto" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html


-- 
Ciao
Stephan

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/3] crypto: algif - change algif_skcipher to be asynchronous
  2015-02-01 18:31 ` [PATCH 0/3] " Stephan Mueller
@ 2015-02-02 15:03   ` Tadeusz Struk
  2015-02-02 16:40     ` Stephan Mueller
  0 siblings, 1 reply; 8+ messages in thread
From: Tadeusz Struk @ 2015-02-02 15:03 UTC (permalink / raw)
  To: Stephan Mueller
  Cc: herbert, linux-crypto, netdev, davem, qat-linux, linux-kernel

On 02/01/2015 10:31 AM, Stephan Mueller wrote:
> Hi Tadeusz,
> 
>> > The way the algif_skcipher works currently is that on sendmsg/sendpage it
>> > builds an sgl for the input data and then on read/recvmsg it sends the job
>> > for encryption putting the user to sleep till the data is processed.
>> > This way it can only handle one job at a given time.
>> > To be able to fuly utilize the potential of existing crypto hardware
>> > accelerators it is required to submit multiple jobs in asynchronously.
>> > First patch enables asynchronous read and write on socket.
>> > Second patch enables af_alg sgl to be linked.
>> > Third patch implement asynch read for skcipher.
> Do you have a code fragment on how to test that patch? I would like to see 
> whether I can test that with my libkcapi.

Hi Stephan,
This is what I'm using.

#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <linux/types.h>
#include <linux/aio_abi.h>
#include <sys/syscall.h>
#include <sys/uio.h>

#define SOL_ALG 279

#define SPLICE_F_GIFT	(0x08)	/* pages passed in are a gift */
struct sockaddr_alg {
	__u16	salg_family;
	__u8	salg_type[14];
	__u32	salg_feat;
	__u32	salg_mask;
	__u8	salg_name[64];
};
struct af_alg_iv {
	__u32	ivlen;
	__u8	iv[0];
};
/* Socket options */
#define ALG_SET_KEY			1
#define ALG_SET_IV			2
#define ALG_SET_OP			3
#define ALG_SET_AEAD_ASSOCLEN		4
#define ALG_SET_AEAD_AUTHSIZE		5

/* Operations */
#define ALG_OP_DECRYPT			0
#define ALG_OP_ENCRYPT			1

#define BUFFSIZE (4096)
//#define BUFFSIZE (4096)
#define PKGSIZE (4096)

#define INFLIGTHS 256
#define TO_SEND (1024 * 1024)
//#define OUT_OFFSET 2048;
//#define IN_OFFSET 4064;
#define OUT_OFFSET 0;
#define IN_OFFSET 0;

static char buf[BUFFSIZE] __attribute__((__aligned__(BUFFSIZE)));
static char *buf_out = buf;

static inline int io_setup(unsigned n, aio_context_t *ctx)
{
	return syscall(__NR_io_setup, n, ctx);
}

static inline int io_destroy(aio_context_t ctx)
{
	return syscall(__NR_io_destroy, ctx);
}

static inline int io_read(aio_context_t ctx, long n,  struct iocb **iocb)
{
	return syscall(__NR_io_submit, ctx, n, iocb);
}

static inline int io_getevents(aio_context_t ctx, long min, long max,
			struct io_event *events, struct timespec *timeout)
{
	return syscall(__NR_io_getevents, ctx, min, max, events, timeout);
}

static inline int eventfd(int n)
{
	return syscall(__NR_eventfd, n);
}

static int crypt_kernel(const char *key, char *oiv, int zcp)
{
	int opfd;
	int tfmfd;
	int efd;
	struct timespec timeout;
	fd_set rfds;
        struct timeval tv;
	struct sockaddr_alg sa = {
		.salg_family = AF_ALG,
		.salg_type = "skcipher",
		.salg_name = "cbc(aes)"
	};
	struct msghdr msg = {};
	struct cmsghdr *cmsg;
	char cbuf[CMSG_SPACE(4) + CMSG_SPACE(20)] = {};
	struct aes_iv {
		__u32 len;
		__u8 iv[16];
	} *iv;
	struct iovec iov;
	int pipes[2];
	aio_context_t aio_ctx;
	struct iocb *cb;
	struct iocb cbt[INFLIGTHS];
	struct io_event events[INFLIGTHS];
	unsigned int received = 0;
	int i, r;

	timeout.tv_sec = 0;
	timeout.tv_nsec = 0;
	pipe(pipes);
	memset(cbt, 0, sizeof(cbt));
	efd = eventfd(0);
	tfmfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
	bind(tfmfd, (struct sockaddr *)&sa, sizeof(sa));
	setsockopt(tfmfd, SOL_ALG, ALG_SET_KEY, key, 16);
	opfd = accept(tfmfd, NULL, 0);

	msg.msg_control = cbuf;
	msg.msg_controllen = sizeof(cbuf);

	cmsg = CMSG_FIRSTHDR(&msg);
	cmsg->cmsg_level = SOL_ALG;
	cmsg->cmsg_type = ALG_SET_OP;
	cmsg->cmsg_len = CMSG_LEN(4);
	*(__u32 *)CMSG_DATA(cmsg) = ALG_OP_ENCRYPT;

	cmsg = CMSG_NXTHDR(&msg, cmsg);
	cmsg->cmsg_level = SOL_ALG;
	cmsg->cmsg_type = ALG_SET_IV;
	cmsg->cmsg_len = CMSG_LEN(20);
	iv = (void *)CMSG_DATA(cmsg);
	iv->len = 16;
	memcpy(iv->iv, oiv, 16);

	iov.iov_base = buf + IN_OFFSET;
	iov.iov_len = PKGSIZE;
	msg.msg_flags = MSG_MORE;

	aio_ctx = 0;
	r = io_setup(INFLIGTHS, &aio_ctx);
	if (r < 0) {
		perror("io_setup error");
		return -1;
	}

	for (i = 0; i < TO_SEND; i++) {
		if (zcp) {
			msg.msg_iovlen = 0;
			msg.msg_iov = NULL;

			r = sendmsg(opfd, &msg, 0);
			if (r < 0)
				printf("sendmsg returned Error: %d\n", errno);

			r = vmsplice(pipes[1], &iov, 1, SPLICE_F_GIFT);
			if (r < 0)
				printf("vmsplice returned Error: %d\n", errno);

			r = splice(pipes[0], NULL, opfd, NULL, PKGSIZE, 0);
			if (r < 0)
				printf("splice returned Error: %d\n", errno);
		} else {
			msg.msg_iovlen = 1;
			msg.msg_iov = &iov;
			r = sendmsg(opfd, &msg, PKGSIZE);
			if (r < 0)
				printf("zero cp sendmsg returned Error: %d\n", errno);
		}

		cb = &cbt[i % INFLIGTHS];
		if (cb->aio_fildes) {
			printf("req %d not processed yet???\n", i - INFLIGTHS);
			return -1;
		}
		memset(cb, '\0', sizeof(*cb));
		cb->aio_fildes = opfd;
		cb->aio_lio_opcode = IOCB_CMD_PREAD;
		cb->aio_buf = (unsigned long)buf_out + OUT_OFFSET;
		cb->aio_offset = 0;
		cb->aio_nbytes = PKGSIZE;
		cb->aio_flags = IOCB_FLAG_RESFD;
		cb->aio_resfd = efd;
		r = io_read(aio_ctx, 1, &cb);
		if (r != 1) {
			if (r < 0) {
				printf("io_read Error: %d\n", errno);
				return -1;
			} else {
				printf("Could not sumbit AIO read\n");
				return -1;
			}
		}

		FD_ZERO(&rfds);
		FD_SET(efd, &rfds);
		tv.tv_sec = 0;
		tv.tv_usec = 0;
		r = select(efd + 1, &rfds, NULL, NULL, &tv);

		if (r == -1) {
			printf("Select Error: %d\n", errno);
			return -1;
		} else if (FD_ISSET(efd, &rfds)) {
			r = io_getevents(aio_ctx, 1, INFLIGTHS, events + (received % INFLIGTHS),
					 &timeout);
			if (r > 0) {
				int y;

				for (y = 0; y < r; y++) {
					cb = (void*) events[(received + y) % INFLIGTHS].obj;
					cb->aio_fildes = 0;
					if (events[(received + y) % INFLIGTHS].res)
						printf("req %d failed\n", received + y);
				}
				received += r;
			} else if (r < 0) {
				printf("io_getevents Error: %d\n", errno);
				return -1;
			}
		}

		if (i == 0) {
			msg.msg_control = NULL;
			msg.msg_controllen = 0;
		}
	}

	while (received != TO_SEND) {
		r = io_getevents(aio_ctx, 1, TO_SEND - received,
				events + (received % INFLIGTHS), NULL);
		if (r > 0)
			received += r;
	}
	printf("All done!\n");
	close(efd);
	close(opfd);
	close(tfmfd);
	close(pipes[0]);
	close(pipes[1]);
	io_destroy(aio_ctx);
	return 0;
}

int main(int argc, char **argv)
{
	const char key[16] =
		"\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
		"\x51\x2e\x03\xd5\x34\x12\x00\x06";
	char iv[16] =
		"\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
		"\xb4\x22\xda\x80\x2c\x9f\xac\x41";

	memcpy(buf, "Single block msg", 16);

	if (argc == 2) {
		printf("zero copy\n");
		crypt_kernel(key, iv, 1);
	}
	else {
		printf("copy\n");
		crypt_kernel(key, iv, 0);
	}
	return 0;
}

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/3] crypto: algif - change algif_skcipher to be asynchronous
  2015-02-02 15:03   ` Tadeusz Struk
@ 2015-02-02 16:40     ` Stephan Mueller
  0 siblings, 0 replies; 8+ messages in thread
From: Stephan Mueller @ 2015-02-02 16:40 UTC (permalink / raw)
  To: Tadeusz Struk
  Cc: herbert, linux-crypto, netdev, davem, qat-linux, linux-kernel

Am Montag, 2. Februar 2015, 07:03:02 schrieb Tadeusz Struk:

Hi Tadeusz,

>On 02/01/2015 10:31 AM, Stephan Mueller wrote:
>> Hi Tadeusz,
>> 
>>> > The way the algif_skcipher works currently is that on
>>> > sendmsg/sendpage it builds an sgl for the input data and then on
>>> > read/recvmsg it sends the job for encryption putting the user to
>>> > sleep till the data is processed. This way it can only handle one
>>> > job at a given time.
>>> > To be able to fuly utilize the potential of existing crypto
>>> > hardware
>>> > accelerators it is required to submit multiple jobs in
>>> > asynchronously.
>>> > First patch enables asynchronous read and write on socket.
>>> > Second patch enables af_alg sgl to be linked.
>>> > Third patch implement asynch read for skcipher.
>> 
>> Do you have a code fragment on how to test that patch? I would like
>> to see whether I can test that with my libkcapi.
>
>Hi Stephan,
>This is what I'm using.

Thanks for the listing.

Are you aware of the speed tester that I added to libkcapi? See [1] 
subdir speed-test/

If you want to play with it, all you need to do is to add your 
init/encryption/fini code into cryptoperf-skcipher.c.

The key is that cp_ablkcipher_enc_test/cp_ablkcipher_dec_test performs 
only the encryption/decryption operation (setkey, etc is done in the 
init call). The speed measuring is done only over that function.

[1] http://www.chronox.de/libkcapi.html

Ciao
Stephan

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2015-02-02 16:42 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-01-29 23:13 [PATCH 0/3] crypto: algif - change algif_skcipher to be asynchronous Tadeusz Struk
2015-01-29 23:13 ` [PATCH 1/3] net: socket: enable async read and write Tadeusz Struk
2015-01-30 18:30   ` Tadeusz Struk
2015-01-29 23:13 ` [PATCH 2/3] crypto: af_alg - Allow to link sgl Tadeusz Struk
2015-01-29 23:13 ` [PATCH 3/3] crypto: algif - change algif_skcipher to be asynchronous Tadeusz Struk
2015-02-01 18:31 ` [PATCH 0/3] " Stephan Mueller
2015-02-02 15:03   ` Tadeusz Struk
2015-02-02 16:40     ` Stephan Mueller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).