All of lore.kernel.org
 help / color / mirror / Atom feed
* [rfc 4.16-rt patch] crypto: cryptd - serialize RT request enqueue/dequeue with a local lock
@ 2018-07-11 15:19 Mike Galbraith
  2018-07-26 16:58 ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 3+ messages in thread
From: Mike Galbraith @ 2018-07-11 15:19 UTC (permalink / raw)
  To: linux-rt-users; +Cc: Sebastian Andrzej Siewior, Thomas Gleixner, Steven Rostedt

Note: patch is the result of code inspection...

cryptod disables preemption to provide request enqueue/dequeue exclusion,
use a LOCAL_IRQ_LOCK to do the same for RT, keeping preemption enabled.

Signed-off-by: Mike Galbraith <efault@gmx.de>
---
 crypto/cryptd.c |   10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -31,10 +31,12 @@
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/locallock.h>
 
 static unsigned int cryptd_max_cpu_qlen = 1000;
 module_param(cryptd_max_cpu_qlen, uint, 0);
 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
+static DEFINE_LOCAL_IRQ_LOCK(cryptod_request_lock);
 
 struct cryptd_cpu_queue {
 	struct crypto_queue queue;
@@ -141,7 +143,7 @@ static int cryptd_enqueue_request(struct
 	struct cryptd_cpu_queue *cpu_queue;
 	atomic_t *refcnt;
 
-	cpu = get_cpu();
+	cpu = local_lock_cpu(cryptod_request_lock);
 	cpu_queue = this_cpu_ptr(queue->cpu_queue);
 	err = crypto_enqueue_request(&cpu_queue->queue, request);
 
@@ -158,7 +160,7 @@ static int cryptd_enqueue_request(struct
 	atomic_inc(refcnt);
 
 out_put_cpu:
-	put_cpu();
+	local_unlock_cpu(cryptod_request_lock);
 
 	return err;
 }
@@ -179,10 +181,10 @@ static void cryptd_queue_worker(struct w
 	 * cryptd_enqueue_request() being accessed from software interrupts.
 	 */
 	local_bh_disable();
-	preempt_disable();
+	local_lock(cryptod_request_lock);
 	backlog = crypto_get_backlog(&cpu_queue->queue);
 	req = crypto_dequeue_request(&cpu_queue->queue);
-	preempt_enable();
+	local_unlock(cryptod_request_lock);
 	local_bh_enable();
 
 	if (!req)

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [rfc 4.16-rt patch] crypto: cryptd - serialize RT request enqueue/dequeue with a local lock
  2018-07-11 15:19 [rfc 4.16-rt patch] crypto: cryptd - serialize RT request enqueue/dequeue with a local lock Mike Galbraith
@ 2018-07-26 16:58 ` Sebastian Andrzej Siewior
  2018-07-26 18:03   ` Mike Galbraith
  0 siblings, 1 reply; 3+ messages in thread
From: Sebastian Andrzej Siewior @ 2018-07-26 16:58 UTC (permalink / raw)
  To: Mike Galbraith; +Cc: linux-rt-users, Thomas Gleixner, Steven Rostedt

On 2018-07-11 17:19:59 [+0200], Mike Galbraith wrote:
> Note: patch is the result of code inspection...
> 
> cryptod disables preemption to provide request enqueue/dequeue exclusion,
> use a LOCAL_IRQ_LOCK to do the same for RT, keeping preemption enabled.

What about this:

-------- >8

From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 26 Jul 2018 18:52:00 +0200
Subject: [PATCH] crypto: cryptd - add a lock instead
 preempt_disable/local_bh_disable

cryptd has a per-CPU lock which protected with local_bh_disable() and
preempt_disable().
Add an explicit spin_lock to make the locking context more obvious and
visible to lockdep. Since it is a per-CPU lock, there should be no lock
contention on the actual spinlock.
There is a small race-window where we could be migrated to another CPU
after the cpu_queue has been obtain. This is not a problem because the
actual ressource is protected by the spinlock.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 crypto/cryptd.c | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index addca7bae33f..8ad657cddc0a 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -39,6 +39,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
 struct cryptd_cpu_queue {
 	struct crypto_queue queue;
 	struct work_struct work;
+	spinlock_t qlock;
 };
 
 struct cryptd_queue {
@@ -117,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
+		spin_lock_init(&cpu_queue->qlock);
 	}
 	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
 	return 0;
@@ -141,8 +143,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
 	struct cryptd_cpu_queue *cpu_queue;
 	atomic_t *refcnt;
 
-	cpu = get_cpu();
-	cpu_queue = this_cpu_ptr(queue->cpu_queue);
+	cpu_queue = raw_cpu_ptr(queue->cpu_queue);
+	spin_lock_bh(&cpu_queue->qlock);
+	cpu = smp_processor_id();
+
 	err = crypto_enqueue_request(&cpu_queue->queue, request);
 
 	refcnt = crypto_tfm_ctx(request->tfm);
@@ -158,7 +162,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
 	atomic_inc(refcnt);
 
 out_put_cpu:
-	put_cpu();
+	spin_unlock_bh(&cpu_queue->qlock);
 
 	return err;
 }
@@ -174,16 +178,11 @@ static void cryptd_queue_worker(struct work_struct *work)
 	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 	/*
 	 * Only handle one request at a time to avoid hogging crypto workqueue.
-	 * preempt_disable/enable is used to prevent being preempted by
-	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
-	 * cryptd_enqueue_request() being accessed from software interrupts.
 	 */
-	local_bh_disable();
-	preempt_disable();
+	spin_lock_bh(&cpu_queue->qlock);
 	backlog = crypto_get_backlog(&cpu_queue->queue);
 	req = crypto_dequeue_request(&cpu_queue->queue);
-	preempt_enable();
-	local_bh_enable();
+	spin_unlock_bh(&cpu_queue->qlock);
 
 	if (!req)
 		return;
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [rfc 4.16-rt patch] crypto: cryptd - serialize RT request enqueue/dequeue with a local lock
  2018-07-26 16:58 ` Sebastian Andrzej Siewior
@ 2018-07-26 18:03   ` Mike Galbraith
  0 siblings, 0 replies; 3+ messages in thread
From: Mike Galbraith @ 2018-07-26 18:03 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior; +Cc: linux-rt-users, Thomas Gleixner, Steven Rostedt

On Thu, 2018-07-26 at 18:58 +0200, Sebastian Andrzej Siewior wrote:
> On 2018-07-11 17:19:59 [+0200], Mike Galbraith wrote:
> > Note: patch is the result of code inspection...
> > 
> > cryptod disables preemption to provide request enqueue/dequeue exclusion,
> > use a LOCAL_IRQ_LOCK to do the same for RT, keeping preemption enabled.
> 
> What about this:

Looks fine to me, I'll ask my arm-thing box for its opinion.

> -------- >8
> 
> From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> Date: Thu, 26 Jul 2018 18:52:00 +0200
> Subject: [PATCH] crypto: cryptd - add a lock instead
>  preempt_disable/local_bh_disable
> 
> cryptd has a per-CPU lock which protected with local_bh_disable() and
> preempt_disable().
> Add an explicit spin_lock to make the locking context more obvious and
> visible to lockdep. Since it is a per-CPU lock, there should be no lock
> contention on the actual spinlock.
> There is a small race-window where we could be migrated to another CPU
> after the cpu_queue has been obtain. This is not a problem because the
> actual ressource is protected by the spinlock.
> 
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> ---
>  crypto/cryptd.c | 19 +++++++++----------
>  1 file changed, 9 insertions(+), 10 deletions(-)
> 
> diff --git a/crypto/cryptd.c b/crypto/cryptd.c
> index addca7bae33f..8ad657cddc0a 100644
> --- a/crypto/cryptd.c
> +++ b/crypto/cryptd.c
> @@ -39,6 +39,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
>  struct cryptd_cpu_queue {
>  	struct crypto_queue queue;
>  	struct work_struct work;
> +	spinlock_t qlock;
>  };
>  
>  struct cryptd_queue {
> @@ -117,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
>  		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
>  		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
>  		INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
> +		spin_lock_init(&cpu_queue->qlock);
>  	}
>  	pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
>  	return 0;
> @@ -141,8 +143,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
>  	struct cryptd_cpu_queue *cpu_queue;
>  	atomic_t *refcnt;
>  
> -	cpu = get_cpu();
> -	cpu_queue = this_cpu_ptr(queue->cpu_queue);
> +	cpu_queue = raw_cpu_ptr(queue->cpu_queue);
> +	spin_lock_bh(&cpu_queue->qlock);
> +	cpu = smp_processor_id();
> +
>  	err = crypto_enqueue_request(&cpu_queue->queue, request);
>  
>  	refcnt = crypto_tfm_ctx(request->tfm);
> @@ -158,7 +162,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
>  	atomic_inc(refcnt);
>  
>  out_put_cpu:
> -	put_cpu();
> +	spin_unlock_bh(&cpu_queue->qlock);
>  
>  	return err;
>  }
> @@ -174,16 +178,11 @@ static void cryptd_queue_worker(struct work_struct *work)
>  	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
>  	/*
>  	 * Only handle one request at a time to avoid hogging crypto workqueue.
> -	 * preempt_disable/enable is used to prevent being preempted by
> -	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
> -	 * cryptd_enqueue_request() being accessed from software interrupts.
>  	 */
> -	local_bh_disable();
> -	preempt_disable();
> +	spin_lock_bh(&cpu_queue->qlock);
>  	backlog = crypto_get_backlog(&cpu_queue->queue);
>  	req = crypto_dequeue_request(&cpu_queue->queue);
> -	preempt_enable();
> -	local_bh_enable();
> +	spin_unlock_bh(&cpu_queue->qlock);
>  
>  	if (!req)
>  		return;

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2018-07-26 19:21 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-07-11 15:19 [rfc 4.16-rt patch] crypto: cryptd - serialize RT request enqueue/dequeue with a local lock Mike Galbraith
2018-07-26 16:58 ` Sebastian Andrzej Siewior
2018-07-26 18:03   ` Mike Galbraith

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.