linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug
@ 2019-06-10  5:52 Yuyang Du
  2019-06-10 15:35 ` Qian Cai
  2019-07-08 17:32 ` Qian Cai
  0 siblings, 2 replies; 6+ messages in thread
From: Yuyang Du @ 2019-06-10  5:52 UTC (permalink / raw)
  To: peterz, will.deacon, mingo; +Cc: cai, linux-kernel, Yuyang Du

The commit:

  091806515124b20 ("locking/lockdep: Consolidate lock usage bit initialization")

misses marking LOCK_USED flag at IRQ usage initialization when CONFIG_TRACE_IRQFLAGS
or CONFIG_PROVE_LOCKING is not defined. Fix it.

Reported-by: Qian Cai <cai@lca.pw>
Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
 kernel/locking/lockdep.c | 110 +++++++++++++++++++++++------------------------
 1 file changed, 53 insertions(+), 57 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 48a840a..c3db987 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3460,9 +3460,61 @@ void trace_softirqs_off(unsigned long ip)
 		debug_atomic_inc(redundant_softirqs_off);
 }
 
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+	return 2 * !!task->hardirq_context + !!task->softirq_context;
+}
+
+static int separate_irq_context(struct task_struct *curr,
+		struct held_lock *hlock)
+{
+	unsigned int depth = curr->lockdep_depth;
+
+	/*
+	 * Keep track of points where we cross into an interrupt context:
+	 */
+	if (depth) {
+		struct held_lock *prev_hlock;
+
+		prev_hlock = curr->held_locks + depth-1;
+		/*
+		 * If we cross into another context, reset the
+		 * hash key (this also prevents the checking and the
+		 * adding of the dependency to 'prev'):
+		 */
+		if (prev_hlock->irq_context != hlock->irq_context)
+			return 1;
+	}
+	return 0;
+}
+
+#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
+
+static inline
+int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
+		enum lock_usage_bit new_bit)
+{
+	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
+	return 1;
+}
+
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+	return 0;
+}
+
+static inline int separate_irq_context(struct task_struct *curr,
+		struct held_lock *hlock)
+{
+	return 0;
+}
+
+#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
+
 static int
 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
 {
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 	if (!check)
 		goto lock_used;
 
@@ -3510,6 +3562,7 @@ void trace_softirqs_off(unsigned long ip)
 	}
 
 lock_used:
+#endif
 	/* mark it as used: */
 	if (!mark_lock(curr, hlock, LOCK_USED))
 		return 0;
@@ -3517,63 +3570,6 @@ void trace_softirqs_off(unsigned long ip)
 	return 1;
 }
 
-static inline unsigned int task_irq_context(struct task_struct *task)
-{
-	return 2 * !!task->hardirq_context + !!task->softirq_context;
-}
-
-static int separate_irq_context(struct task_struct *curr,
-		struct held_lock *hlock)
-{
-	unsigned int depth = curr->lockdep_depth;
-
-	/*
-	 * Keep track of points where we cross into an interrupt context:
-	 */
-	if (depth) {
-		struct held_lock *prev_hlock;
-
-		prev_hlock = curr->held_locks + depth-1;
-		/*
-		 * If we cross into another context, reset the
-		 * hash key (this also prevents the checking and the
-		 * adding of the dependency to 'prev'):
-		 */
-		if (prev_hlock->irq_context != hlock->irq_context)
-			return 1;
-	}
-	return 0;
-}
-
-#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
-
-static inline
-int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
-		enum lock_usage_bit new_bit)
-{
-	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
-	return 1;
-}
-
-static inline int
-mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
-{
-	return 1;
-}
-
-static inline unsigned int task_irq_context(struct task_struct *task)
-{
-	return 0;
-}
-
-static inline int separate_irq_context(struct task_struct *curr,
-		struct held_lock *hlock)
-{
-	return 0;
-}
-
-#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
-
 /*
  * Mark a lock with a usage bit, and validate the state transition:
  */
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug
  2019-06-10  5:52 [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug Yuyang Du
@ 2019-06-10 15:35 ` Qian Cai
  2019-06-11 10:09   ` Yuyang Du
  2019-07-08 17:32 ` Qian Cai
  1 sibling, 1 reply; 6+ messages in thread
From: Qian Cai @ 2019-06-10 15:35 UTC (permalink / raw)
  To: Yuyang Du, peterz, will.deacon, mingo; +Cc: linux-kernel

On Mon, 2019-06-10 at 13:52 +0800, Yuyang Du wrote:
> The commit:
> 
>   091806515124b20 ("locking/lockdep: Consolidate lock usage bit
> initialization")
> 
> misses marking LOCK_USED flag at IRQ usage initialization when
> CONFIG_TRACE_IRQFLAGS
> or CONFIG_PROVE_LOCKING is not defined. Fix it.
> 
> Reported-by: Qian Cai <cai@lca.pw>
> Signed-off-by: Yuyang Du <duyuyang@gmail.com>

It works fine.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug
  2019-06-10 15:35 ` Qian Cai
@ 2019-06-11 10:09   ` Yuyang Du
  0 siblings, 0 replies; 6+ messages in thread
From: Yuyang Du @ 2019-06-11 10:09 UTC (permalink / raw)
  To: Qian Cai; +Cc: Peter Zijlstra, Will Deacon, Ingo Molnar, LKML

Great, thanks.

On Mon, 10 Jun 2019 at 23:35, Qian Cai <cai@lca.pw> wrote:
>
> On Mon, 2019-06-10 at 13:52 +0800, Yuyang Du wrote:
> > The commit:
> >
> >   091806515124b20 ("locking/lockdep: Consolidate lock usage bit
> > initialization")
> >
> > misses marking LOCK_USED flag at IRQ usage initialization when
> > CONFIG_TRACE_IRQFLAGS
> > or CONFIG_PROVE_LOCKING is not defined. Fix it.
> >
> > Reported-by: Qian Cai <cai@lca.pw>
> > Signed-off-by: Yuyang Du <duyuyang@gmail.com>
>
> It works fine.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug
  2019-06-10  5:52 [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug Yuyang Du
  2019-06-10 15:35 ` Qian Cai
@ 2019-07-08 17:32 ` Qian Cai
  2019-07-09  1:21   ` Yuyang Du
  1 sibling, 1 reply; 6+ messages in thread
From: Qian Cai @ 2019-07-08 17:32 UTC (permalink / raw)
  To: Yuyang Du, peterz, will.deacon, mingo; +Cc: linux-kernel

I saw Ingo send a pull request to Linus for 5.3 [1] includes the offensive
commit "locking/lockdep: Consolidate lock usage bit initialization" but did not
include this patch.

[1] https://lore.kernel.org/lkml/20190708093516.GA57558@gmail.com/

On Mon, 2019-06-10 at 13:52 +0800, Yuyang Du wrote:
> The commit:
> 
>   091806515124b20 ("locking/lockdep: Consolidate lock usage bit
> initialization")
> 
> misses marking LOCK_USED flag at IRQ usage initialization when
> CONFIG_TRACE_IRQFLAGS
> or CONFIG_PROVE_LOCKING is not defined. Fix it.
> 
> Reported-by: Qian Cai <cai@lca.pw>
> Signed-off-by: Yuyang Du <duyuyang@gmail.com>
> ---
>  kernel/locking/lockdep.c | 110 +++++++++++++++++++++++-----------------------
> -
>  1 file changed, 53 insertions(+), 57 deletions(-)
> 
> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> index 48a840a..c3db987 100644
> --- a/kernel/locking/lockdep.c
> +++ b/kernel/locking/lockdep.c
> @@ -3460,9 +3460,61 @@ void trace_softirqs_off(unsigned long ip)
>  		debug_atomic_inc(redundant_softirqs_off);
>  }
>  
> +static inline unsigned int task_irq_context(struct task_struct *task)
> +{
> +	return 2 * !!task->hardirq_context + !!task->softirq_context;
> +}
> +
> +static int separate_irq_context(struct task_struct *curr,
> +		struct held_lock *hlock)
> +{
> +	unsigned int depth = curr->lockdep_depth;
> +
> +	/*
> +	 * Keep track of points where we cross into an interrupt context:
> +	 */
> +	if (depth) {
> +		struct held_lock *prev_hlock;
> +
> +		prev_hlock = curr->held_locks + depth-1;
> +		/*
> +		 * If we cross into another context, reset the
> +		 * hash key (this also prevents the checking and the
> +		 * adding of the dependency to 'prev'):
> +		 */
> +		if (prev_hlock->irq_context != hlock->irq_context)
> +			return 1;
> +	}
> +	return 0;
> +}
> +
> +#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> +
> +static inline
> +int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> +		enum lock_usage_bit new_bit)
> +{
> +	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> +	return 1;
> +}
> +
> +static inline unsigned int task_irq_context(struct task_struct *task)
> +{
> +	return 0;
> +}
> +
> +static inline int separate_irq_context(struct task_struct *curr,
> +		struct held_lock *hlock)
> +{
> +	return 0;
> +}
> +
> +#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> +
>  static int
>  mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
>  {
> +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
>  	if (!check)
>  		goto lock_used;
>  
> @@ -3510,6 +3562,7 @@ void trace_softirqs_off(unsigned long ip)
>  	}
>  
>  lock_used:
> +#endif
>  	/* mark it as used: */
>  	if (!mark_lock(curr, hlock, LOCK_USED))
>  		return 0;
> @@ -3517,63 +3570,6 @@ void trace_softirqs_off(unsigned long ip)
>  	return 1;
>  }
>  
> -static inline unsigned int task_irq_context(struct task_struct *task)
> -{
> -	return 2 * !!task->hardirq_context + !!task->softirq_context;
> -}
> -
> -static int separate_irq_context(struct task_struct *curr,
> -		struct held_lock *hlock)
> -{
> -	unsigned int depth = curr->lockdep_depth;
> -
> -	/*
> -	 * Keep track of points where we cross into an interrupt context:
> -	 */
> -	if (depth) {
> -		struct held_lock *prev_hlock;
> -
> -		prev_hlock = curr->held_locks + depth-1;
> -		/*
> -		 * If we cross into another context, reset the
> -		 * hash key (this also prevents the checking and the
> -		 * adding of the dependency to 'prev'):
> -		 */
> -		if (prev_hlock->irq_context != hlock->irq_context)
> -			return 1;
> -	}
> -	return 0;
> -}
> -
> -#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> -
> -static inline
> -int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> -		enum lock_usage_bit new_bit)
> -{
> -	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> -	return 1;
> -}
> -
> -static inline int
> -mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
> -{
> -	return 1;
> -}
> -
> -static inline unsigned int task_irq_context(struct task_struct *task)
> -{
> -	return 0;
> -}
> -
> -static inline int separate_irq_context(struct task_struct *curr,
> -		struct held_lock *hlock)
> -{
> -	return 0;
> -}
> -
> -#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> -
>  /*
>   * Mark a lock with a usage bit, and validate the state transition:
>   */

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug
  2019-07-08 17:32 ` Qian Cai
@ 2019-07-09  1:21   ` Yuyang Du
  2019-07-09  1:50     ` Qian Cai
  0 siblings, 1 reply; 6+ messages in thread
From: Yuyang Du @ 2019-07-09  1:21 UTC (permalink / raw)
  To: Qian Cai; +Cc: Peter Zijlstra, Will Deacon, Ingo Molnar, LKML

The problem should have been fixed with this in that pull:

locking/lockdep: Move mark_lock() inside CONFIG_TRACE_IRQFLAGS &&
CONFIG_PROVE_LOCKING

and this is a better fix than mine.

Thanks,
Yuyang

On Tue, 9 Jul 2019 at 01:32, Qian Cai <cai@lca.pw> wrote:
>
> I saw Ingo send a pull request to Linus for 5.3 [1] includes the offensive
> commit "locking/lockdep: Consolidate lock usage bit initialization" but did not
> include this patch.
>
> [1] https://lore.kernel.org/lkml/20190708093516.GA57558@gmail.com/
>
> On Mon, 2019-06-10 at 13:52 +0800, Yuyang Du wrote:
> > The commit:
> >
> >   091806515124b20 ("locking/lockdep: Consolidate lock usage bit
> > initialization")
> >
> > misses marking LOCK_USED flag at IRQ usage initialization when
> > CONFIG_TRACE_IRQFLAGS
> > or CONFIG_PROVE_LOCKING is not defined. Fix it.
> >
> > Reported-by: Qian Cai <cai@lca.pw>
> > Signed-off-by: Yuyang Du <duyuyang@gmail.com>
> > ---
> >  kernel/locking/lockdep.c | 110 +++++++++++++++++++++++-----------------------
> > -
> >  1 file changed, 53 insertions(+), 57 deletions(-)
> >
> > diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> > index 48a840a..c3db987 100644
> > --- a/kernel/locking/lockdep.c
> > +++ b/kernel/locking/lockdep.c
> > @@ -3460,9 +3460,61 @@ void trace_softirqs_off(unsigned long ip)
> >               debug_atomic_inc(redundant_softirqs_off);
> >  }
> >
> > +static inline unsigned int task_irq_context(struct task_struct *task)
> > +{
> > +     return 2 * !!task->hardirq_context + !!task->softirq_context;
> > +}
> > +
> > +static int separate_irq_context(struct task_struct *curr,
> > +             struct held_lock *hlock)
> > +{
> > +     unsigned int depth = curr->lockdep_depth;
> > +
> > +     /*
> > +      * Keep track of points where we cross into an interrupt context:
> > +      */
> > +     if (depth) {
> > +             struct held_lock *prev_hlock;
> > +
> > +             prev_hlock = curr->held_locks + depth-1;
> > +             /*
> > +              * If we cross into another context, reset the
> > +              * hash key (this also prevents the checking and the
> > +              * adding of the dependency to 'prev'):
> > +              */
> > +             if (prev_hlock->irq_context != hlock->irq_context)
> > +                     return 1;
> > +     }
> > +     return 0;
> > +}
> > +
> > +#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> > +
> > +static inline
> > +int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> > +             enum lock_usage_bit new_bit)
> > +{
> > +     WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> > +     return 1;
> > +}
> > +
> > +static inline unsigned int task_irq_context(struct task_struct *task)
> > +{
> > +     return 0;
> > +}
> > +
> > +static inline int separate_irq_context(struct task_struct *curr,
> > +             struct held_lock *hlock)
> > +{
> > +     return 0;
> > +}
> > +
> > +#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> > +
> >  static int
> >  mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
> >  {
> > +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
> >       if (!check)
> >               goto lock_used;
> >
> > @@ -3510,6 +3562,7 @@ void trace_softirqs_off(unsigned long ip)
> >       }
> >
> >  lock_used:
> > +#endif
> >       /* mark it as used: */
> >       if (!mark_lock(curr, hlock, LOCK_USED))
> >               return 0;
> > @@ -3517,63 +3570,6 @@ void trace_softirqs_off(unsigned long ip)
> >       return 1;
> >  }
> >
> > -static inline unsigned int task_irq_context(struct task_struct *task)
> > -{
> > -     return 2 * !!task->hardirq_context + !!task->softirq_context;
> > -}
> > -
> > -static int separate_irq_context(struct task_struct *curr,
> > -             struct held_lock *hlock)
> > -{
> > -     unsigned int depth = curr->lockdep_depth;
> > -
> > -     /*
> > -      * Keep track of points where we cross into an interrupt context:
> > -      */
> > -     if (depth) {
> > -             struct held_lock *prev_hlock;
> > -
> > -             prev_hlock = curr->held_locks + depth-1;
> > -             /*
> > -              * If we cross into another context, reset the
> > -              * hash key (this also prevents the checking and the
> > -              * adding of the dependency to 'prev'):
> > -              */
> > -             if (prev_hlock->irq_context != hlock->irq_context)
> > -                     return 1;
> > -     }
> > -     return 0;
> > -}
> > -
> > -#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> > -
> > -static inline
> > -int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> > -             enum lock_usage_bit new_bit)
> > -{
> > -     WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> > -     return 1;
> > -}
> > -
> > -static inline int
> > -mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
> > -{
> > -     return 1;
> > -}
> > -
> > -static inline unsigned int task_irq_context(struct task_struct *task)
> > -{
> > -     return 0;
> > -}
> > -
> > -static inline int separate_irq_context(struct task_struct *curr,
> > -             struct held_lock *hlock)
> > -{
> > -     return 0;
> > -}
> > -
> > -#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> > -
> >  /*
> >   * Mark a lock with a usage bit, and validate the state transition:
> >   */

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug
  2019-07-09  1:21   ` Yuyang Du
@ 2019-07-09  1:50     ` Qian Cai
  0 siblings, 0 replies; 6+ messages in thread
From: Qian Cai @ 2019-07-09  1:50 UTC (permalink / raw)
  To: Yuyang Du; +Cc: Peter Zijlstra, Will Deacon, Ingo Molnar, LKML



> On Jul 8, 2019, at 9:21 PM, Yuyang Du <duyuyang@gmail.com> wrote:
> 
> The problem should have been fixed with this in that pull:
> 
> locking/lockdep: Move mark_lock() inside CONFIG_TRACE_IRQFLAGS &&
> CONFIG_PROVE_LOCKING
> 
> and this is a better fix than mine.

I don’t think so. That commit was included in today’s linux-next, but I can still reproduce the issue there.

[ 8872.727085] DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused)
[ 8872.727113] WARNING: CPU: 60 PID: 124801 at kernel/locking/lockdep_proc.c:249 lockdep_stats_show+0xe44/0x11e0
[ 8872.727157] Modules linked in: brd ext4 crc16 mbcache jbd2 loop i2c_opal i2c_core ip_tables x_tables xfs sd_mod ahci libahci tg3 firmware_class libata libphy dm_mirror dm_region_hash dm_log dm_mod [last unloaded: dummy_del_mod]
[ 8872.727213] CPU: 60 PID: 124801 Comm: proc01 Tainted: G        W  O      5.2.0-next-20190708+ #1
[ 8872.727253] NIP:  c0000000001a97b4 LR: c0000000001a97b0 CTR: c0000000008c4660
[ 8872.727293] REGS: c000001c2e84f8e0 TRAP: 0700   Tainted: G        W  O       (5.2.0-next-20190708+)
[ 8872.727326] MSR:  9000000000029033 <SF,HV,EE,ME,IR,DR,RI,LE>  CR: 28888402  XER: 20040000
[ 8872.727371] CFAR: c00000000011a0d4 IRQMASK: 0 
               GPR00: c0000000001a97b0 c000001c2e84fb70 c0000000016f8b00 0000000000000044 
               GPR04: c00000000172c258 c0000000001b9288 4e5241575f534b43 75626564284e4f5f 
               GPR08: 0000001ffdd10000 0000000000000000 0000000000000000 212029736b636f6c 
               GPR12: 756e755f726e203d c000001ffffce400 0000000000000000 c0000000015bd610 
               GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000dfd 
               GPR20: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 
               GPR24: 0000000000000000 0000000000000000 0000000000000000 c000001fb7d0f0a8 
               GPR28: 0000000000000000 c00000000172c278 c00000000172c4c4 c00000000172ba18 
[ 8872.727698] NIP [c0000000001a97b4] lockdep_stats_show+0xe44/0x11e0
[ 8872.727742] LR [c0000000001a97b0] lockdep_stats_show+0xe40/0x11e0
[ 8872.727775] Call Trace:
[ 8872.727800] [c000001c2e84fb70] [c0000000001a97b0] lockdep_stats_show+0xe40/0x11e0 (unreliable)
[ 8872.727831] [c000001c2e84fca0] [c000000000492434] seq_read+0x1d4/0x620
[ 8872.727886] [c000001c2e84fd30] [c000000000515520] proc_reg_read+0x90/0x130
[ 8872.727938] [c000001c2e84fd60] [c000000000452d6c] __vfs_read+0x3c/0x70
[ 8872.727989] [c000001c2e84fd80] [c000000000452e5c] vfs_read+0xbc/0x1a0
[ 8872.728033] [c000001c2e84fdd0] [c0000000004532ec] ksys_read+0x7c/0x140
[ 8872.728073] [c000001c2e84fe20] [c00000000000ae08] system_call+0x5c/0x70
[ 8872.728126] Instruction dump:
[ 8872.728161] 419ef3b0 3d220003 392974fc 81290000 2f890000 409ef39c 3c82ff33 3c62ff33 
[ 8872.728194] 38841af8 386308e8 4bf708c1 60000000 <0fe00000> 4bfff37c 60000000 39200000 
[ 8872.728251] ---[ end trace 42d16c13415f9f32 ]---

> 
> Thanks,
> Yuyang
> 
> On Tue, 9 Jul 2019 at 01:32, Qian Cai <cai@lca.pw> wrote:
>> 
>> I saw Ingo send a pull request to Linus for 5.3 [1] includes the offensive
>> commit "locking/lockdep: Consolidate lock usage bit initialization" but did not
>> include this patch.
>> 
>> [1] https://lore.kernel.org/lkml/20190708093516.GA57558@gmail.com/
>> 
>> On Mon, 2019-06-10 at 13:52 +0800, Yuyang Du wrote:
>>> The commit:
>>> 
>>>  091806515124b20 ("locking/lockdep: Consolidate lock usage bit
>>> initialization")
>>> 
>>> misses marking LOCK_USED flag at IRQ usage initialization when
>>> CONFIG_TRACE_IRQFLAGS
>>> or CONFIG_PROVE_LOCKING is not defined. Fix it.
>>> 
>>> Reported-by: Qian Cai <cai@lca.pw>
>>> Signed-off-by: Yuyang Du <duyuyang@gmail.com>
>>> ---
>>> kernel/locking/lockdep.c | 110 +++++++++++++++++++++++-----------------------
>>> -
>>> 1 file changed, 53 insertions(+), 57 deletions(-)
>>> 
>>> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
>>> index 48a840a..c3db987 100644
>>> --- a/kernel/locking/lockdep.c
>>> +++ b/kernel/locking/lockdep.c
>>> @@ -3460,9 +3460,61 @@ void trace_softirqs_off(unsigned long ip)
>>>              debug_atomic_inc(redundant_softirqs_off);
>>> }
>>> 
>>> +static inline unsigned int task_irq_context(struct task_struct *task)
>>> +{
>>> +     return 2 * !!task->hardirq_context + !!task->softirq_context;
>>> +}
>>> +
>>> +static int separate_irq_context(struct task_struct *curr,
>>> +             struct held_lock *hlock)
>>> +{
>>> +     unsigned int depth = curr->lockdep_depth;
>>> +
>>> +     /*
>>> +      * Keep track of points where we cross into an interrupt context:
>>> +      */
>>> +     if (depth) {
>>> +             struct held_lock *prev_hlock;
>>> +
>>> +             prev_hlock = curr->held_locks + depth-1;
>>> +             /*
>>> +              * If we cross into another context, reset the
>>> +              * hash key (this also prevents the checking and the
>>> +              * adding of the dependency to 'prev'):
>>> +              */
>>> +             if (prev_hlock->irq_context != hlock->irq_context)
>>> +                     return 1;
>>> +     }
>>> +     return 0;
>>> +}
>>> +
>>> +#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
>>> +
>>> +static inline
>>> +int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
>>> +             enum lock_usage_bit new_bit)
>>> +{
>>> +     WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
>>> +     return 1;
>>> +}
>>> +
>>> +static inline unsigned int task_irq_context(struct task_struct *task)
>>> +{
>>> +     return 0;
>>> +}
>>> +
>>> +static inline int separate_irq_context(struct task_struct *curr,
>>> +             struct held_lock *hlock)
>>> +{
>>> +     return 0;
>>> +}
>>> +
>>> +#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
>>> +
>>> static int
>>> mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
>>> {
>>> +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
>>>      if (!check)
>>>              goto lock_used;
>>> 
>>> @@ -3510,6 +3562,7 @@ void trace_softirqs_off(unsigned long ip)
>>>      }
>>> 
>>> lock_used:
>>> +#endif
>>>      /* mark it as used: */
>>>      if (!mark_lock(curr, hlock, LOCK_USED))
>>>              return 0;
>>> @@ -3517,63 +3570,6 @@ void trace_softirqs_off(unsigned long ip)
>>>      return 1;
>>> }
>>> 
>>> -static inline unsigned int task_irq_context(struct task_struct *task)
>>> -{
>>> -     return 2 * !!task->hardirq_context + !!task->softirq_context;
>>> -}
>>> -
>>> -static int separate_irq_context(struct task_struct *curr,
>>> -             struct held_lock *hlock)
>>> -{
>>> -     unsigned int depth = curr->lockdep_depth;
>>> -
>>> -     /*
>>> -      * Keep track of points where we cross into an interrupt context:
>>> -      */
>>> -     if (depth) {
>>> -             struct held_lock *prev_hlock;
>>> -
>>> -             prev_hlock = curr->held_locks + depth-1;
>>> -             /*
>>> -              * If we cross into another context, reset the
>>> -              * hash key (this also prevents the checking and the
>>> -              * adding of the dependency to 'prev'):
>>> -              */
>>> -             if (prev_hlock->irq_context != hlock->irq_context)
>>> -                     return 1;
>>> -     }
>>> -     return 0;
>>> -}
>>> -
>>> -#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
>>> -
>>> -static inline
>>> -int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
>>> -             enum lock_usage_bit new_bit)
>>> -{
>>> -     WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
>>> -     return 1;
>>> -}
>>> -
>>> -static inline int
>>> -mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
>>> -{
>>> -     return 1;
>>> -}
>>> -
>>> -static inline unsigned int task_irq_context(struct task_struct *task)
>>> -{
>>> -     return 0;
>>> -}
>>> -
>>> -static inline int separate_irq_context(struct task_struct *curr,
>>> -             struct held_lock *hlock)
>>> -{
>>> -     return 0;
>>> -}
>>> -
>>> -#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
>>> -
>>> /*
>>>  * Mark a lock with a usage bit, and validate the state transition:
>>>  */


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-07-09  1:50 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-06-10  5:52 [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug Yuyang Du
2019-06-10 15:35 ` Qian Cai
2019-06-11 10:09   ` Yuyang Du
2019-07-08 17:32 ` Qian Cai
2019-07-09  1:21   ` Yuyang Du
2019-07-09  1:50     ` Qian Cai

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).