All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3] blktrace: switch trace spinlock to a raw spinlock
@ 2021-12-17 14:16 Wander Lairson Costa
  2021-12-20 10:05 ` Sebastian Andrzej Siewior
  0 siblings, 1 reply; 3+ messages in thread
From: Wander Lairson Costa @ 2021-12-17 14:16 UTC (permalink / raw)
  To: linux-kernel, Jens Axboe, Steven Rostedt, Ingo Molnar,
	open list:BLOCK LAYER
  Cc: Thomas Gleixner, Sebastian Andrzej Siewior, Wander Lairson Costa

TRACE_EVENT disables preemption before calling the callback. Because of
that blktrace triggers the following bug under PREEMPT_RT:

 BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:35
 in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 119, name: kworker/u2:2
 5 locks held by kworker/u2:2/119:
  #0: ffff8c2e4a88f538 ((wq_completion)xfs-cil/dm-0){+.+.}-{0:0}, at: process_one_work+0x200/0x450
  #1: ffffab3840ac7e68 ((work_completion)(&cil->xc_push_work)){+.+.}-{0:0}, at: process_one_work+0x200/0x450
  #2: ffff8c2e4a887128 (&cil->xc_ctx_lock){++++}-{3:3}, at: xlog_cil_push_work+0xb7/0x670 [xfs]
  #3: ffffffffa6a63780 (rcu_read_lock){....}-{1:2}, at: blk_add_trace_bio+0x0/0x1f0
  #4: ffffffffa6610620 (running_trace_lock){+.+.}-{2:2}, at: __blk_add_trace+0x3ef/0x480
 Preemption disabled at:
 [<ffffffffa4d35c05>] migrate_enable+0x45/0x140
 CPU: 0 PID: 119 Comm: kworker/u2:2 Kdump: loaded Not tainted 5.14.0-25.rt21.25.light.el9.x86_64+debug #1
 Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
 Workqueue: xfs-cil/dm-0 xlog_cil_push_work [xfs]
 Call Trace:
  ? migrate_enable+0x45/0x140
  dump_stack_lvl+0x57/0x7d
  ___might_sleep.cold+0xe3/0xf7
  rt_spin_lock+0x3a/0xd0
  ? __blk_add_trace+0x3ef/0x480
  __blk_add_trace+0x3ef/0x480
  blk_add_trace_bio+0x18d/0x1f0
  trace_block_bio_queue+0xb5/0x150
  submit_bio_checks+0x1f0/0x520
  ? sched_clock_cpu+0xb/0x100
  submit_bio_noacct+0x30/0x1d0
  ? bio_associate_blkg+0x66/0x190
  xlog_cil_push_work+0x1b6/0x670 [xfs]
  ? register_lock_class+0x43/0x4f0
  ? xfs_swap_extents+0x5f0/0x5f0 [xfs]
  process_one_work+0x275/0x450
  ? process_one_work+0x200/0x450
  worker_thread+0x55/0x3c0
  ? process_one_work+0x450/0x450
  kthread+0x188/0x1a0
  ? set_kthread_struct+0x40/0x40
  ret_from_fork+0x22/0x30

To avoid this bug, we switch the trace lock to a raw spinlock.

Signed-off-by: Wander Lairson Costa <wander@redhat.com>
---
 kernel/trace/blktrace.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 1183c88634aa..a86e022f7155 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -34,7 +34,7 @@ static struct trace_array *blk_tr;
 static bool blk_tracer_enabled __read_mostly;
 
 static LIST_HEAD(running_trace_list);
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
+static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(running_trace_lock);
 
 /* Select an alternative, minimalistic output than the original one */
 #define TRACE_BLK_OPT_CLASSIC	0x1
@@ -121,12 +121,12 @@ static void trace_note_tsk(struct task_struct *tsk)
 	struct blk_trace *bt;
 
 	tsk->btrace_seq = blktrace_seq;
-	spin_lock_irqsave(&running_trace_lock, flags);
+	raw_spin_lock_irqsave(&running_trace_lock, flags);
 	list_for_each_entry(bt, &running_trace_list, running_list) {
 		trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
 			   sizeof(tsk->comm), 0);
 	}
-	spin_unlock_irqrestore(&running_trace_lock, flags);
+	raw_spin_unlock_irqrestore(&running_trace_lock, flags);
 }
 
 static void trace_note_time(struct blk_trace *bt)
@@ -666,9 +666,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start)
 			blktrace_seq++;
 			smp_mb();
 			bt->trace_state = Blktrace_running;
-			spin_lock_irq(&running_trace_lock);
+			raw_spin_lock_irq(&running_trace_lock);
 			list_add(&bt->running_list, &running_trace_list);
-			spin_unlock_irq(&running_trace_lock);
+			raw_spin_unlock_irq(&running_trace_lock);
 
 			trace_note_time(bt);
 			ret = 0;
@@ -676,9 +676,9 @@ static int __blk_trace_startstop(struct request_queue *q, int start)
 	} else {
 		if (bt->trace_state == Blktrace_running) {
 			bt->trace_state = Blktrace_stopped;
-			spin_lock_irq(&running_trace_lock);
+			raw_spin_lock_irq(&running_trace_lock);
 			list_del_init(&bt->running_list);
-			spin_unlock_irq(&running_trace_lock);
+			raw_spin_unlock_irq(&running_trace_lock);
 			relay_flush(bt->rchan);
 			ret = 0;
 		}
@@ -1608,9 +1608,9 @@ static int blk_trace_remove_queue(struct request_queue *q)
 
 	if (bt->trace_state == Blktrace_running) {
 		bt->trace_state = Blktrace_stopped;
-		spin_lock_irq(&running_trace_lock);
+		raw_spin_lock_irq(&running_trace_lock);
 		list_del_init(&bt->running_list);
-		spin_unlock_irq(&running_trace_lock);
+		raw_spin_unlock_irq(&running_trace_lock);
 		relay_flush(bt->rchan);
 	}
 
-- 
2.31.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH v3] blktrace: switch trace spinlock to a raw spinlock
  2021-12-17 14:16 [PATCH v3] blktrace: switch trace spinlock to a raw spinlock Wander Lairson Costa
@ 2021-12-20 10:05 ` Sebastian Andrzej Siewior
  2021-12-20 13:03   ` Wander Costa
  0 siblings, 1 reply; 3+ messages in thread
From: Sebastian Andrzej Siewior @ 2021-12-20 10:05 UTC (permalink / raw)
  To: Wander Lairson Costa
  Cc: linux-kernel, Jens Axboe, Steven Rostedt, Ingo Molnar,
	open list:BLOCK LAYER, Thomas Gleixner

On 2021-12-17 11:16:56 [-0300], Wander Lairson Costa wrote:

Assuming neither Steven nor Jens object,

> TRACE_EVENT disables preemption before calling the callback. Because of
> that blktrace triggers the following bug under PREEMPT_RT:

The tracepoint is invoked with disabled preemption.

>  BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:35
>  in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 119, name: kworker/u2:2
>  5 locks held by kworker/u2:2/119:
>   #0: ffff8c2e4a88f538 ((wq_completion)xfs-cil/dm-0){+.+.}-{0:0}, at: process_one_work+0x200/0x450
>   #1: ffffab3840ac7e68 ((work_completion)(&cil->xc_push_work)){+.+.}-{0:0}, at: process_one_work+0x200/0x450
>   #2: ffff8c2e4a887128 (&cil->xc_ctx_lock){++++}-{3:3}, at: xlog_cil_push_work+0xb7/0x670 [xfs]
>   #3: ffffffffa6a63780 (rcu_read_lock){....}-{1:2}, at: blk_add_trace_bio+0x0/0x1f0
>   #4: ffffffffa6610620 (running_trace_lock){+.+.}-{2:2}, at: __blk_add_trace+0x3ef/0x480
>  Preemption disabled at:
>  [<ffffffffa4d35c05>] migrate_enable+0x45/0x140
>  CPU: 0 PID: 119 Comm: kworker/u2:2 Kdump: loaded Not tainted 5.14.0-25.rt21.25.light.el9.x86_64+debug #1
>  Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
>  Workqueue: xfs-cil/dm-0 xlog_cil_push_work [xfs]
>  Call Trace:
>   ? migrate_enable+0x45/0x140
>   dump_stack_lvl+0x57/0x7d
>   ___might_sleep.cold+0xe3/0xf7
>   rt_spin_lock+0x3a/0xd0
>   ? __blk_add_trace+0x3ef/0x480
>   __blk_add_trace+0x3ef/0x480
>   blk_add_trace_bio+0x18d/0x1f0
>   trace_block_bio_queue+0xb5/0x150
>   submit_bio_checks+0x1f0/0x520
>   ? sched_clock_cpu+0xb/0x100
>   submit_bio_noacct+0x30/0x1d0
>   ? bio_associate_blkg+0x66/0x190
>   xlog_cil_push_work+0x1b6/0x670 [xfs]
>   ? register_lock_class+0x43/0x4f0
>   ? xfs_swap_extents+0x5f0/0x5f0 [xfs]
>   process_one_work+0x275/0x450
>   ? process_one_work+0x200/0x450
>   worker_thread+0x55/0x3c0
>   ? process_one_work+0x450/0x450
>   kthread+0x188/0x1a0
>   ? set_kthread_struct+0x40/0x40
>   ret_from_fork+0x22/0x30

The above fills 90% of my screen with _no_ additional information. What
about:

   The running_trace_lock protects running_trace_list and is acquired
   within the tracepoint which implies disabled preemption. The spinlock_t
   typed lock can not be acquired with disabled preemption on PREEMPT_RT
   because it becomes a sleeping lock.
   The runtime of the tracepoint depends on the number of entries in
   running_trace_list and has no limit. The blk-tracer is considered debug
   code and higher latencies here are okay.
   
   Make running_trace_lock a raw_spinlock_t

> To avoid this bug, we switch the trace lock to a raw spinlock.

Basically I want to give rationale _why_ changing a lock to
raw_spinlock_t _here_ is okay. I want to avoid that people slap a
s/spinlock_t/raw_spinlock_t/ each time they see warning of this kind.

> Signed-off-by: Wander Lairson Costa <wander@redhat.com>

Sebastian

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH v3] blktrace: switch trace spinlock to a raw spinlock
  2021-12-20 10:05 ` Sebastian Andrzej Siewior
@ 2021-12-20 13:03   ` Wander Costa
  0 siblings, 0 replies; 3+ messages in thread
From: Wander Costa @ 2021-12-20 13:03 UTC (permalink / raw)
  To: Sebastian Andrzej Siewior
  Cc: Wander Lairson Costa, open list, Jens Axboe, Steven Rostedt,
	Ingo Molnar, open list:BLOCK LAYER, Thomas Gleixner

On Mon, Dec 20, 2021 at 7:05 AM Sebastian Andrzej Siewior
<bigeasy@linutronix.de> wrote:
>
> On 2021-12-17 11:16:56 [-0300], Wander Lairson Costa wrote:
>
> Assuming neither Steven nor Jens object,
>
> > TRACE_EVENT disables preemption before calling the callback. Because of
> > that blktrace triggers the following bug under PREEMPT_RT:
>
> The tracepoint is invoked with disabled preemption.
>
> >  BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:35
> >  in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 119, name: kworker/u2:2
> >  5 locks held by kworker/u2:2/119:
> >   #0: ffff8c2e4a88f538 ((wq_completion)xfs-cil/dm-0){+.+.}-{0:0}, at: process_one_work+0x200/0x450
> >   #1: ffffab3840ac7e68 ((work_completion)(&cil->xc_push_work)){+.+.}-{0:0}, at: process_one_work+0x200/0x450
> >   #2: ffff8c2e4a887128 (&cil->xc_ctx_lock){++++}-{3:3}, at: xlog_cil_push_work+0xb7/0x670 [xfs]
> >   #3: ffffffffa6a63780 (rcu_read_lock){....}-{1:2}, at: blk_add_trace_bio+0x0/0x1f0
> >   #4: ffffffffa6610620 (running_trace_lock){+.+.}-{2:2}, at: __blk_add_trace+0x3ef/0x480
> >  Preemption disabled at:
> >  [<ffffffffa4d35c05>] migrate_enable+0x45/0x140
> >  CPU: 0 PID: 119 Comm: kworker/u2:2 Kdump: loaded Not tainted 5.14.0-25.rt21.25.light.el9.x86_64+debug #1
> >  Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
> >  Workqueue: xfs-cil/dm-0 xlog_cil_push_work [xfs]
> >  Call Trace:
> >   ? migrate_enable+0x45/0x140
> >   dump_stack_lvl+0x57/0x7d
> >   ___might_sleep.cold+0xe3/0xf7
> >   rt_spin_lock+0x3a/0xd0
> >   ? __blk_add_trace+0x3ef/0x480
> >   __blk_add_trace+0x3ef/0x480
> >   blk_add_trace_bio+0x18d/0x1f0
> >   trace_block_bio_queue+0xb5/0x150
> >   submit_bio_checks+0x1f0/0x520
> >   ? sched_clock_cpu+0xb/0x100
> >   submit_bio_noacct+0x30/0x1d0
> >   ? bio_associate_blkg+0x66/0x190
> >   xlog_cil_push_work+0x1b6/0x670 [xfs]
> >   ? register_lock_class+0x43/0x4f0
> >   ? xfs_swap_extents+0x5f0/0x5f0 [xfs]
> >   process_one_work+0x275/0x450
> >   ? process_one_work+0x200/0x450
> >   worker_thread+0x55/0x3c0
> >   ? process_one_work+0x450/0x450
> >   kthread+0x188/0x1a0
> >   ? set_kthread_struct+0x40/0x40
> >   ret_from_fork+0x22/0x30
>
> The above fills 90% of my screen with _no_ additional information. What
> about:
>
>    The running_trace_lock protects running_trace_list and is acquired
>    within the tracepoint which implies disabled preemption. The spinlock_t
>    typed lock can not be acquired with disabled preemption on PREEMPT_RT
>    because it becomes a sleeping lock.
>    The runtime of the tracepoint depends on the number of entries in
>    running_trace_list and has no limit. The blk-tracer is considered debug
>    code and higher latencies here are okay.
>
>    Make running_trace_lock a raw_spinlock_t
>
> > To avoid this bug, we switch the trace lock to a raw spinlock.
>
> Basically I want to give rationale _why_ changing a lock to
> raw_spinlock_t _here_ is okay. I want to avoid that people slap a
> s/spinlock_t/raw_spinlock_t/ each time they see warning of this kind.
>

Thanks, this sounds great. I am going to send v4 with the modified
commit message right now.

> > Signed-off-by: Wander Lairson Costa <wander@redhat.com>
>
> Sebastian
>


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2021-12-20 13:03 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-17 14:16 [PATCH v3] blktrace: switch trace spinlock to a raw spinlock Wander Lairson Costa
2021-12-20 10:05 ` Sebastian Andrzej Siewior
2021-12-20 13:03   ` Wander Costa

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.