All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/8] locking/lockdep: Add comments and make some code adjustments
@ 2019-02-26 10:03 Yuyang Du
  2019-02-26 10:03 ` [PATCH 1/8] locking/lockdep: Change all print_*() return type to void Yuyang Du
                   ` (7 more replies)
  0 siblings, 8 replies; 9+ messages in thread
From: Yuyang Du @ 2019-02-26 10:03 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, Yuyang Du

Hi Peter,

I recently tried to look at some system hang issues. While at it, I
attempted to use and understand lockdep. These patches are made as a result.
I believe they should have helped me, so hopefully they do for others as
well.

Thanks,
Yuyang

--

Yuyang Du (8):
  locking/lockdep: Change all print_*() return type to void
  locking/lockdep: Add description and explanation in lockdep design doc
  locking/lockdep: Adjust lock usage bit character checks
  locking/lockdep: Remove useless conditional macro
  locking/lockdep: Adjust indents for some lines
  locking/lockdep: Print right depth for chain key colission
  locking/lockdep: Update obsolete struct field description
  locking/lockdep: Consider zapped locks when printing lock chains in
    /proc

 Documentation/locking/lockdep-design.txt |  89 ++++++---
 include/linux/lockdep.h                  |   6 +-
 kernel/locking/lockdep.c                 | 315 ++++++++++++++++---------------
 kernel/locking/lockdep_internals.h       |   1 +
 kernel/locking/lockdep_proc.c            |  18 +-
 5 files changed, 247 insertions(+), 182 deletions(-)

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/8] locking/lockdep: Change all print_*() return type to void
  2019-02-26 10:03 [PATCH 0/8] locking/lockdep: Add comments and make some code adjustments Yuyang Du
@ 2019-02-26 10:03 ` Yuyang Du
  2019-02-26 10:03 ` [PATCH 2/8] locking/lockdep: Add description and explanation in lockdep design doc Yuyang Du
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Yuyang Du @ 2019-02-26 10:03 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, Yuyang Du

None of the print_*() function's return value is necessary, so change
their return type to void. No functional change.

In cases where an invariable return value is used, this change
improves readability, i.e.:

	print_x();
	return 0;

is definitely better than:

	return print_x(); /* where print_x() only returns 0 */

Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
 kernel/locking/lockdep.c | 222 ++++++++++++++++++++++++-----------------------
 1 file changed, 112 insertions(+), 110 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index c1653a1..41eab3d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1084,23 +1084,20 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
  * Print a dependency chain entry (this is only done when a deadlock
  * has been detected):
  */
-static noinline int
+static noinline void
 print_circular_bug_entry(struct lock_list *target, int depth)
 {
 	if (debug_locks_silent)
-		return 0;
+		return;
 	printk("\n-> #%u", depth);
 	print_lock_name(target->class);
 	printk(KERN_CONT ":\n");
 	print_stack_trace(&target->trace, 6);
-
-	return 0;
 }
 
-static void
-print_circular_lock_scenario(struct held_lock *src,
-			     struct held_lock *tgt,
-			     struct lock_list *prt)
+static void print_circular_lock_scenario(struct held_lock *src,
+					 struct held_lock *tgt,
+					 struct lock_list *prt)
 {
 	struct lock_class *source = hlock_class(src);
 	struct lock_class *target = hlock_class(tgt);
@@ -1151,7 +1148,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
  * When a circular dependency is detected, print the
  * header first:
  */
-static noinline int
+static noinline void
 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
 			struct held_lock *check_src,
 			struct held_lock *check_tgt)
@@ -1159,7 +1156,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
 	struct task_struct *curr = current;
 
 	if (debug_locks_silent)
-		return 0;
+		return;
 
 	pr_warn("\n");
 	pr_warn("======================================================\n");
@@ -1177,8 +1174,6 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
 	pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
 
 	print_circular_bug_entry(entry, depth);
-
-	return 0;
 }
 
 static inline int class_equal(struct lock_list *entry, void *data)
@@ -1186,11 +1181,11 @@ static inline int class_equal(struct lock_list *entry, void *data)
 	return entry->class == data;
 }
 
-static noinline int print_circular_bug(struct lock_list *this,
-				struct lock_list *target,
-				struct held_lock *check_src,
-				struct held_lock *check_tgt,
-				struct stack_trace *trace)
+static noinline void print_circular_bug(struct lock_list *this,
+					struct lock_list *target,
+					struct held_lock *check_src,
+					struct held_lock *check_tgt,
+					struct stack_trace *trace)
 {
 	struct task_struct *curr = current;
 	struct lock_list *parent;
@@ -1198,10 +1193,10 @@ static noinline int print_circular_bug(struct lock_list *this,
 	int depth;
 
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
-		return 0;
+		return;
 
 	if (!save_trace(&this->trace))
-		return 0;
+		return;
 
 	depth = get_lock_depth(target);
 
@@ -1223,21 +1218,17 @@ static noinline int print_circular_bug(struct lock_list *this,
 
 	printk("\nstack backtrace:\n");
 	dump_stack();
-
-	return 0;
 }
 
-static noinline int print_bfs_bug(int ret)
+static noinline void print_bfs_bug(int ret)
 {
 	if (!debug_locks_off_graph_unlock())
-		return 0;
+		return;
 
 	/*
 	 * Breadth-first-search failed, graph got corrupted?
 	 */
 	WARN(1, "lockdep bfs error:%d\n", ret);
-
-	return 0;
 }
 
 static int noop_count(struct lock_list *entry, void *data)
@@ -1420,7 +1411,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
  */
 static void __used
 print_shortest_lock_dependencies(struct lock_list *leaf,
-				struct lock_list *root)
+				 struct lock_list *root)
 {
 	struct lock_list *entry = leaf;
 	int depth;
@@ -1442,8 +1433,6 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 		entry = get_lock_parent(entry);
 		depth--;
 	} while (entry && (depth >= 0));
-
-	return;
 }
 
 static void
@@ -1502,7 +1491,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 	printk("\n *** DEADLOCK ***\n\n");
 }
 
-static int
+static void
 print_bad_irq_dependency(struct task_struct *curr,
 			 struct lock_list *prev_root,
 			 struct lock_list *next_root,
@@ -1515,7 +1504,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 			 const char *irqclass)
 {
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
-		return 0;
+		return;
 
 	pr_warn("\n");
 	pr_warn("=====================================================\n");
@@ -1561,19 +1550,17 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 
 	pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
 	if (!save_trace(&prev_root->trace))
-		return 0;
+		return;
 	print_shortest_lock_dependencies(backwards_entry, prev_root);
 
 	pr_warn("\nthe dependencies between the lock to be acquired");
 	pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
 	if (!save_trace(&next_root->trace))
-		return 0;
+		return;
 	print_shortest_lock_dependencies(forwards_entry, next_root);
 
 	pr_warn("\nstack backtrace:\n");
 	dump_stack();
-
-	return 0;
 }
 
 static int
@@ -1590,23 +1577,28 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 
 	this.class = hlock_class(prev);
 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
-	if (ret < 0)
-		return print_bfs_bug(ret);
+	if (ret < 0) {
+		print_bfs_bug(ret);
+		return 0;
+	}
 	if (ret == 1)
 		return ret;
 
 	that.parent = NULL;
 	that.class = hlock_class(next);
 	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
-	if (ret < 0)
-		return print_bfs_bug(ret);
+	if (ret < 0) {
+		print_bfs_bug(ret);
+		return 0;
+	}
 	if (ret == 1)
 		return ret;
 
-	return print_bad_irq_dependency(curr, &this, &that,
-			target_entry, target_entry1,
-			prev, next,
-			bit_backwards, bit_forwards, irqclass);
+	print_bad_irq_dependency(curr, &this, &that,
+				 target_entry, target_entry1,
+				 prev, next,
+				 bit_backwards, bit_forwards, irqclass);
+	return 0;
 }
 
 static const char *state_names[] = {
@@ -1696,7 +1688,7 @@ static void inc_chains(void)
 
 static inline int
 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
-		struct held_lock *next)
+		   struct held_lock *next)
 {
 	return 1;
 }
@@ -1708,9 +1700,8 @@ static inline void inc_chains(void)
 
 #endif
 
-static void
-print_deadlock_scenario(struct held_lock *nxt,
-			     struct held_lock *prv)
+static void print_deadlock_scenario(struct held_lock *nxt,
+				    struct held_lock *prv)
 {
 	struct lock_class *next = hlock_class(nxt);
 	struct lock_class *prev = hlock_class(prv);
@@ -1728,12 +1719,12 @@ static inline void inc_chains(void)
 	printk(" May be due to missing lock nesting notation\n\n");
 }
 
-static int
+static void
 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 		   struct held_lock *next)
 {
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
-		return 0;
+		return;
 
 	pr_warn("\n");
 	pr_warn("============================================\n");
@@ -1752,8 +1743,6 @@ static inline void inc_chains(void)
 
 	pr_warn("\nstack backtrace:\n");
 	dump_stack();
-
-	return 0;
 }
 
 /*
@@ -1795,7 +1784,8 @@ static inline void inc_chains(void)
 		if (nest)
 			return 2;
 
-		return print_deadlock_bug(curr, prev, next);
+		print_deadlock_bug(curr, prev, next);
+		return 0;
 	}
 	return 1;
 }
@@ -1853,10 +1843,13 @@ static inline void inc_chains(void)
 			 */
 			save(trace);
 		}
-		return print_circular_bug(&this, target_entry, next, prev, trace);
+		print_circular_bug(&this, target_entry, next, prev, trace);
+		return 0;
+	}
+	else if (unlikely(ret < 0)) {
+		print_bfs_bug(ret);
+		return 0;
 	}
-	else if (unlikely(ret < 0))
-		return print_bfs_bug(ret);
 
 	if (!check_prev_add_irq(curr, prev, next))
 		return 0;
@@ -1897,8 +1890,10 @@ static inline void inc_chains(void)
 		debug_atomic_inc(nr_redundant);
 		return 2;
 	}
-	if (ret < 0)
-		return print_bfs_bug(ret);
+	if (ret < 0) {
+		print_bfs_bug(ret);
+		return 0;
+	}
 
 
 	if (!trace->entries && !save(trace))
@@ -2088,8 +2083,8 @@ static void print_chain_keys_chain(struct lock_chain *chain)
 }
 
 static void print_collision(struct task_struct *curr,
-			struct held_lock *hlock_next,
-			struct lock_chain *chain)
+			    struct held_lock *hlock_next,
+			    struct lock_chain *chain)
 {
 	pr_warn("\n");
 	pr_warn("============================\n");
@@ -2352,8 +2347,8 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
 }
 #else
 static inline int validate_chain(struct task_struct *curr,
-	       	struct lockdep_map *lock, struct held_lock *hlock,
-		int chain_head, u64 chain_key)
+				 struct lockdep_map *lock, struct held_lock *hlock,
+				 int chain_head, u64 chain_key)
 {
 	return 1;
 }
@@ -2410,8 +2405,7 @@ static void check_chain_key(struct task_struct *curr)
 #endif
 }
 
-static void
-print_usage_bug_scenario(struct held_lock *lock)
+static void print_usage_bug_scenario(struct held_lock *lock)
 {
 	struct lock_class *class = hlock_class(lock);
 
@@ -2428,12 +2422,12 @@ static void check_chain_key(struct task_struct *curr)
 	printk("\n *** DEADLOCK ***\n\n");
 }
 
-static int
+static void
 print_usage_bug(struct task_struct *curr, struct held_lock *this,
 		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
 {
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
-		return 0;
+		return;
 
 	pr_warn("\n");
 	pr_warn("================================\n");
@@ -2463,8 +2457,6 @@ static void check_chain_key(struct task_struct *curr)
 
 	pr_warn("\nstack backtrace:\n");
 	dump_stack();
-
-	return 0;
 }
 
 /*
@@ -2474,8 +2466,10 @@ static void check_chain_key(struct task_struct *curr)
 valid_state(struct task_struct *curr, struct held_lock *this,
 	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
 {
-	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
-		return print_usage_bug(curr, this, bad_bit, new_bit);
+	if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) {
+		print_usage_bug(curr, this, bad_bit, new_bit);
+		return 0;
+	}
 	return 1;
 }
 
@@ -2487,7 +2481,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 /*
  * print irq inversion bug:
  */
-static int
+static void
 print_irq_inversion_bug(struct task_struct *curr,
 			struct lock_list *root, struct lock_list *other,
 			struct held_lock *this, int forwards,
@@ -2498,7 +2492,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 	int depth;
 
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
-		return 0;
+		return;
 
 	pr_warn("\n");
 	pr_warn("========================================================\n");
@@ -2539,13 +2533,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 
 	pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
 	if (!save_trace(&root->trace))
-		return 0;
+		return;
 	print_shortest_lock_dependencies(other, root);
 
 	pr_warn("\nstack backtrace:\n");
 	dump_stack();
-
-	return 0;
 }
 
 /*
@@ -2563,13 +2555,16 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 	root.parent = NULL;
 	root.class = hlock_class(this);
 	ret = find_usage_forwards(&root, bit, &target_entry);
-	if (ret < 0)
-		return print_bfs_bug(ret);
+	if (ret < 0) {
+		print_bfs_bug(ret);
+		return 0;
+	}
 	if (ret == 1)
 		return ret;
 
-	return print_irq_inversion_bug(curr, &root, target_entry,
-					this, 1, irqclass);
+	print_irq_inversion_bug(curr, &root, target_entry,
+				this, 1, irqclass);
+	return 0;
 }
 
 /*
@@ -2587,13 +2582,16 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 	root.parent = NULL;
 	root.class = hlock_class(this);
 	ret = find_usage_backwards(&root, bit, &target_entry);
-	if (ret < 0)
-		return print_bfs_bug(ret);
+	if (ret < 0) {
+		print_bfs_bug(ret);
+		return 0;
+	}
 	if (ret == 1)
 		return ret;
 
-	return print_irq_inversion_bug(curr, &root, target_entry,
-					this, 0, irqclass);
+	print_irq_inversion_bug(curr, &root, target_entry,
+				this, 0, irqclass);
+	return 0;
 }
 
 void print_irqtrace_events(struct task_struct *curr)
@@ -3137,15 +3135,15 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
 struct lock_class_key __lockdep_no_validate__;
 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
 
-static int
+static void
 print_lock_nested_lock_not_held(struct task_struct *curr,
 				struct held_lock *hlock,
 				unsigned long ip)
 {
 	if (!debug_locks_off())
-		return 0;
+		return;
 	if (debug_locks_silent)
-		return 0;
+		return;
 
 	pr_warn("\n");
 	pr_warn("==================================\n");
@@ -3167,8 +3165,6 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
 
 	pr_warn("\nstack backtrace:\n");
 	dump_stack();
-
-	return 0;
 }
 
 static int __lock_is_held(const struct lockdep_map *lock, int read);
@@ -3317,8 +3313,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 	}
 	chain_key = iterate_chain_key(chain_key, class_idx);
 
-	if (nest_lock && !__lock_is_held(nest_lock, -1))
-		return print_lock_nested_lock_not_held(curr, hlock, ip);
+	if (nest_lock && !__lock_is_held(nest_lock, -1)) {
+		print_lock_nested_lock_not_held(curr, hlock, ip);
+		return 0;
+	}
 
 	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
 		return 0;
@@ -3349,14 +3347,14 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 	return 1;
 }
 
-static int
-print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
-			   unsigned long ip)
+static void print_unlock_imbalance_bug(struct task_struct *curr,
+				       struct lockdep_map *lock,
+				       unsigned long ip)
 {
 	if (!debug_locks_off())
-		return 0;
+		return;
 	if (debug_locks_silent)
-		return 0;
+		return;
 
 	pr_warn("\n");
 	pr_warn("=====================================\n");
@@ -3374,8 +3372,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 
 	pr_warn("\nstack backtrace:\n");
 	dump_stack();
-
-	return 0;
 }
 
 static int match_held_lock(const struct held_lock *hlock,
@@ -3494,8 +3490,10 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
 		return 0;
 
 	hlock = find_held_lock(curr, lock, depth, &i);
-	if (!hlock)
-		return print_unlock_imbalance_bug(curr, lock, ip);
+	if (!hlock) {
+		print_unlock_imbalance_bug(curr, lock, ip);
+		return 0;
+	}
 
 	lockdep_init_map(lock, name, key, 0);
 	class = register_lock_class(lock, subclass, 0);
@@ -3535,8 +3533,10 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
 		return 0;
 
 	hlock = find_held_lock(curr, lock, depth, &i);
-	if (!hlock)
-		return print_unlock_imbalance_bug(curr, lock, ip);
+	if (!hlock) {
+		print_unlock_imbalance_bug(curr, lock, ip);
+		return 0;
+	}
 
 	curr->lockdep_depth = i;
 	curr->curr_chain_key = hlock->prev_chain_key;
@@ -3580,16 +3580,20 @@ static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
 	 * So we're all set to release this lock.. wait what lock? We don't
 	 * own any locks, you've been drinking again?
 	 */
-	if (DEBUG_LOCKS_WARN_ON(depth <= 0))
-		 return print_unlock_imbalance_bug(curr, lock, ip);
+	if (DEBUG_LOCKS_WARN_ON(depth <= 0)) {
+		print_unlock_imbalance_bug(curr, lock, ip);
+		return 0;
+	}
 
 	/*
 	 * Check whether the lock exists in the current stack
 	 * of held locks:
 	 */
 	hlock = find_held_lock(curr, lock, depth, &i);
-	if (!hlock)
-		return print_unlock_imbalance_bug(curr, lock, ip);
+	if (!hlock) {
+		print_unlock_imbalance_bug(curr, lock, ip);
+		return 0;
+	}
 
 	if (hlock->instance == lock)
 		lock_release_holdtime(hlock);
@@ -3932,14 +3936,14 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
 EXPORT_SYMBOL_GPL(lock_unpin_lock);
 
 #ifdef CONFIG_LOCK_STAT
-static int
-print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
-			   unsigned long ip)
+static void print_lock_contention_bug(struct task_struct *curr,
+				      struct lockdep_map *lock,
+				      unsigned long ip)
 {
 	if (!debug_locks_off())
-		return 0;
+		return;
 	if (debug_locks_silent)
-		return 0;
+		return;
 
 	pr_warn("\n");
 	pr_warn("=================================\n");
@@ -3957,8 +3961,6 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
 
 	pr_warn("\nstack backtrace:\n");
 	dump_stack();
-
-	return 0;
 }
 
 static void
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/8] locking/lockdep: Add description and explanation in lockdep design doc
  2019-02-26 10:03 [PATCH 0/8] locking/lockdep: Add comments and make some code adjustments Yuyang Du
  2019-02-26 10:03 ` [PATCH 1/8] locking/lockdep: Change all print_*() return type to void Yuyang Du
@ 2019-02-26 10:03 ` Yuyang Du
  2019-02-26 10:03 ` [PATCH 3/8] locking/lockdep: Adjust lock usage bit character checks Yuyang Du
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Yuyang Du @ 2019-02-26 10:03 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, Yuyang Du

More words are added to lockdep design document regarding the key concepts,
which can help to understand the design as well as to read the reports.

Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
 Documentation/locking/lockdep-design.txt | 89 +++++++++++++++++++++++---------
 1 file changed, 64 insertions(+), 25 deletions(-)

diff --git a/Documentation/locking/lockdep-design.txt b/Documentation/locking/lockdep-design.txt
index 49f58a0..621d8f4 100644
--- a/Documentation/locking/lockdep-design.txt
+++ b/Documentation/locking/lockdep-design.txt
@@ -10,56 +10,95 @@ Lock-class
 The basic object the validator operates upon is a 'class' of locks.
 
 A class of locks is a group of locks that are logically the same with
-respect to locking rules, even if the locks may have multiple (possibly
-tens of thousands of) instantiations. For example a lock in the inode
-struct is one class, while each inode has its own instantiation of that
-lock class.
-
-The validator tracks the 'state' of lock-classes, and it tracks
-dependencies between different lock-classes. The validator maintains a
-rolling proof that the state and the dependencies are correct.
-
-Unlike an lock instantiation, the lock-class itself never goes away: when
-a lock-class is used for the first time after bootup it gets registered,
-and all subsequent uses of that lock-class will be attached to this
-lock-class.
+respect to locking rules, even if the locks may have multiple (possibly tens
+of thousands of) instantiations. For example a lock in the inode struct is
+one class, while each inode has its own instantiation of that lock class.
+
+The validator tracks the 'usage state' of lock-classes, and it tracks the
+dependencies between different lock-classes. The dependency can be
+understood as lock order, where L1 -> L2 suggests L1 depends on L2, which
+can also be expressed as a forward dependency (L1 -> L2) or a backward
+dependency (L2 <- L1). From lockdep's perspective, the two locks (L1 and L2)
+are not necessarily related as opposed to in some modules an order must be
+followed. Here it just means that order ever happened. The validator
+maintains a continuing effort to prove that the lock usages and their
+dependencies are correct or the validator will shoot a splat if they are
+potentially incorrect.
+
+Unlike a lock instance, a lock-class itself never goes away: when a
+lock-class's instance is used for the first time after bootup the class gets
+registered, and all (subsequent) instances of that lock-class will be mapped
+to the lock-class.
 
 State
 -----
 
-The validator tracks lock-class usage history into 4 * nSTATEs + 1 separate
-state bits:
+The validator tracks lock-class usage history and divides the usage into
+(4 usages * n STATEs + 1) categories:
 
+Where the 4 usages can be:
 - 'ever held in STATE context'
 - 'ever held as readlock in STATE context'
 - 'ever held with STATE enabled'
 - 'ever held as readlock with STATE enabled'
 
-Where STATE can be either one of (kernel/locking/lockdep_states.h)
- - hardirq
- - softirq
+Where the n STATEs are coded in kernel/locking/lockdep_states.h and as of
+now they include:
+- hardirq
+- softirq
 
+Where the last 1 category is:
 - 'ever used'                                       [ == !unused        ]
 
-When locking rules are violated, these state bits are presented in the
-locking error messages, inside curlies. A contrived example:
+When locking rules are violated, these usage bits are presented in the
+locking error messages, inside curlies, with a total of 2 * n STATEs bits.
+See a contrived example:
 
    modprobe/2287 is trying to acquire lock:
-    (&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
+    (&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
 
    but task is already holding lock:
-    (&sio_locks[i].lock){-.-...}, at: [<c02867fd>] mutex_lock+0x21/0x24
+    (&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
 
 
-The bit position indicates STATE, STATE-read, for each of the states listed
-above, and the character displayed in each indicates:
+For a given lock, the bit positions from left to right indicate the usage
+of the lock and readlock (if exists), for each of the n STATEs listed
+above respectively, and the character displayed at each bit position
+indicates:
 
    '.'  acquired while irqs disabled and not in irq context
    '-'  acquired in irq context
    '+'  acquired with irqs enabled
    '?'  acquired in irq context with irqs enabled.
 
-Unused mutexes cannot be part of the cause of an error.
+The bits are illustrated with an example:
+
+    (&sio_locks[i].lock){-.-.}, at: [<c02867fd>] mutex_lock+0x21/0x24
+                         ||||
+                         ||| \-> softirq disabled and not in softirq context
+                         || \--> acquired in softirq context
+                         | \---> hardirq disabled and not in hardirq context
+                          \----> acquired in hardirq context
+
+
+For a given STATE, whether the lock is ever acquired in that STATE context
+and whether that STATE is enabled yields four possible cases as shown in the
+table below. It is worth noting that the bit character is able to indicate
+which exact case is for the lock as of the reporting time.
+
+   -------------------------------------------------
+  |              | enabled in irq | disabled in irq |
+   -------------------------------------------------
+  | ever in irq  |        ?       |        -        |
+   -------------------------------------------------
+  | never in irq |        +       |        .        |
+   -------------------------------------------------
+
+The character '-' suggests irq is disabled because if otherwise, the
+charactor '?' would have been shown instead. Similar deduction can be
+applied for '+' too.
+
+Unused locks (e.g., mutexes) cannot be part of the cause of an error.
 
 
 Single-lock state rules:
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/8] locking/lockdep: Adjust lock usage bit character checks
  2019-02-26 10:03 [PATCH 0/8] locking/lockdep: Add comments and make some code adjustments Yuyang Du
  2019-02-26 10:03 ` [PATCH 1/8] locking/lockdep: Change all print_*() return type to void Yuyang Du
  2019-02-26 10:03 ` [PATCH 2/8] locking/lockdep: Add description and explanation in lockdep design doc Yuyang Du
@ 2019-02-26 10:03 ` Yuyang Du
  2019-02-26 10:03 ` [PATCH 4/8] locking/lockdep: Remove useless conditional macro Yuyang Du
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Yuyang Du @ 2019-02-26 10:03 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, Yuyang Du

The lock usage bit characters are defined and determined with tricks. Use a
macro and add some explanation to make it a bit more clear. Then adjust the
logic to check the usage, which optimizes the code a bit.

No functional change.

Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
 kernel/locking/lockdep.c           | 21 ++++++++++++++++-----
 kernel/locking/lockdep_internals.h |  1 +
 2 files changed, 17 insertions(+), 5 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 41eab3d..8b8495e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -471,15 +471,26 @@ static inline unsigned long lock_flag(enum lock_usage_bit bit)
 
 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
 {
+	/*
+	 * The usage character defaults to '.' (i.e., irqs disabled and not in
+	 * irq context), which is the safest usage category.
+	 */
 	char c = '.';
 
-	if (class->usage_mask & lock_flag(bit + 2))
+	/*
+	 * The order of the following usage checks matters, which will
+	 * result in the outcome character as follows:
+	 *
+	 * - '+': irq is enabled and not in irq context
+	 * - '-': in irq context and irq is disabled
+	 * - '?': in irq context and irq is enabled
+	 */
+	if (class->usage_mask & lock_flag(bit + LOCK_USAGE_TO_ENABLED_STEP)) {
 		c = '+';
-	if (class->usage_mask & lock_flag(bit)) {
-		c = '-';
-		if (class->usage_mask & lock_flag(bit + 2))
+		if (class->usage_mask & lock_flag(bit))
 			c = '?';
-	}
+	} else if (class->usage_mask & lock_flag(bit))
+		c = '-';
 
 	return c;
 }
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 2ebb9d0..2815e85 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -25,6 +25,7 @@ enum lock_usage_bit {
 #define LOCK_USAGE_READ_MASK 1
 #define LOCK_USAGE_DIR_MASK  2
 #define LOCK_USAGE_STATE_MASK (~(LOCK_USAGE_READ_MASK | LOCK_USAGE_DIR_MASK))
+#define LOCK_USAGE_TO_ENABLED_STEP 2
 
 /*
  * Usage-state bitmasks:
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 4/8] locking/lockdep: Remove useless conditional macro
  2019-02-26 10:03 [PATCH 0/8] locking/lockdep: Add comments and make some code adjustments Yuyang Du
                   ` (2 preceding siblings ...)
  2019-02-26 10:03 ` [PATCH 3/8] locking/lockdep: Adjust lock usage bit character checks Yuyang Du
@ 2019-02-26 10:03 ` Yuyang Du
  2019-02-26 10:03 ` [PATCH 5/8] locking/lockdep: Adjust indents for some lines Yuyang Du
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Yuyang Du @ 2019-02-26 10:03 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, Yuyang Du

Since #defined(CONFIG_PROVE_LOCKING) is used in the scope of #ifdef
CONFIG_PROVE_LOCKING, it can be removed.

Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
 kernel/locking/lockdep.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 8b8495e..0610c0b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1331,7 +1331,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
 	return result;
 }
 
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+#if defined(CONFIG_TRACE_IRQFLAGS)
 /*
  * Forwards and backwards subgraph searching, for the purposes of
  * proving that two subgraphs can be connected by a new dependency
@@ -1709,7 +1709,7 @@ static inline void inc_chains(void)
 	nr_process_chains++;
 }
 
-#endif
+#endif /* CONFIG_TRACE_IRQFLAGS */
 
 static void print_deadlock_scenario(struct held_lock *nxt,
 				    struct held_lock *prv)
@@ -2363,7 +2363,7 @@ static inline int validate_chain(struct task_struct *curr,
 {
 	return 1;
 }
-#endif
+#endif /* CONFIG_PROVE_LOCKING */
 
 /*
  * We are building curr_chain_key incrementally, so double-check
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 5/8] locking/lockdep: Adjust indents for some lines
  2019-02-26 10:03 [PATCH 0/8] locking/lockdep: Add comments and make some code adjustments Yuyang Du
                   ` (3 preceding siblings ...)
  2019-02-26 10:03 ` [PATCH 4/8] locking/lockdep: Remove useless conditional macro Yuyang Du
@ 2019-02-26 10:03 ` Yuyang Du
  2019-02-26 10:03 ` [PATCH 6/8] locking/lockdep: Print right depth for chain key colission Yuyang Du
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Yuyang Du @ 2019-02-26 10:03 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, Yuyang Du

Being paranoid to see function arguments lines are aligned.

Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
 kernel/locking/lockdep.c | 59 +++++++++++++++++++++++-------------------------
 1 file changed, 28 insertions(+), 31 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 0610c0b..55270938 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1067,19 +1067,17 @@ static int __bfs(struct lock_list *source_entry,
 	return ret;
 }
 
-static inline int __bfs_forwards(struct lock_list *src_entry,
-			void *data,
-			int (*match)(struct lock_list *entry, void *data),
-			struct lock_list **target_entry)
+static inline int __bfs_forwards(struct lock_list *src_entry, void *data,
+				 int (*match)(struct lock_list *entry, void *data),
+				 struct lock_list **target_entry)
 {
 	return __bfs(src_entry, data, match, target_entry, 1);
 
 }
 
-static inline int __bfs_backwards(struct lock_list *src_entry,
-			void *data,
-			int (*match)(struct lock_list *entry, void *data),
-			struct lock_list **target_entry)
+static inline int __bfs_backwards(struct lock_list *src_entry, void *data,
+				  int (*match)(struct lock_list *entry, void *data),
+				  struct lock_list **target_entry)
 {
 	return __bfs(src_entry, data, match, target_entry, 0);
 
@@ -1161,8 +1159,8 @@ static void print_circular_lock_scenario(struct held_lock *src,
  */
 static noinline void
 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
-			struct held_lock *check_src,
-			struct held_lock *check_tgt)
+			  struct held_lock *check_src,
+			  struct held_lock *check_tgt)
 {
 	struct task_struct *curr = current;
 
@@ -1307,7 +1305,7 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
  */
 static noinline int
 check_noncircular(struct lock_list *root, struct lock_class *target,
-		struct lock_list **target_entry)
+		  struct lock_list **target_entry)
 {
 	int result;
 
@@ -1357,7 +1355,7 @@ static inline int usage_match(struct lock_list *entry, void *bit)
  */
 static int
 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
-			struct lock_list **target_entry)
+		    struct lock_list **target_entry)
 {
 	int result;
 
@@ -1380,7 +1378,7 @@ static inline int usage_match(struct lock_list *entry, void *bit)
  */
 static int
 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
-			struct lock_list **target_entry)
+		     struct lock_list **target_entry)
 {
 	int result;
 
@@ -1672,7 +1670,7 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
 
 static int
 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
-		struct held_lock *next)
+		   struct held_lock *next)
 {
 #define LOCKDEP_STATE(__STATE)						\
 	if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE))	\
@@ -2028,7 +2026,7 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
  * Returns the index of the first held_lock of the current chain
  */
 static inline int get_first_held_lock(struct task_struct *curr,
-					struct held_lock *hlock)
+				      struct held_lock *hlock)
 {
 	int i;
 	struct held_lock *hlock_curr;
@@ -2123,8 +2121,8 @@ static void print_collision(struct task_struct *curr,
  * Returns: 0 not passed, 1 passed
  */
 static int check_no_collision(struct task_struct *curr,
-			struct held_lock *hlock,
-			struct lock_chain *chain)
+			      struct held_lock *hlock,
+			      struct lock_chain *chain)
 {
 #ifdef CONFIG_DEBUG_LOCKDEP
 	int i, j, id;
@@ -2301,7 +2299,7 @@ static inline int lookup_chain_cache_add(struct task_struct *curr,
 }
 
 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
-		struct held_lock *hlock, int chain_head, u64 chain_key)
+			  struct held_lock *hlock, int chain_head, u64 chain_key)
 {
 	/*
 	 * Trylock needs to maintain the stack of held locks, but it
@@ -2686,7 +2684,7 @@ typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
 	 * states.
 	 */
 	if ((!read || !dir || STRICT_READ_CHECKS) &&
-			!usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
+	    !usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
 		return 0;
 
 	/*
@@ -2697,8 +2695,8 @@ typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
 			return 0;
 
 		if (STRICT_READ_CHECKS &&
-			!usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
-				state_name(new_bit + LOCK_USAGE_READ_MASK)))
+		    !usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
+			   state_name(new_bit + LOCK_USAGE_READ_MASK)))
 			return 0;
 	}
 
@@ -2964,7 +2962,7 @@ static inline unsigned int task_irq_context(struct task_struct *task)
 }
 
 static int separate_irq_context(struct task_struct *curr,
-		struct held_lock *hlock)
+				struct held_lock *hlock)
 {
 	unsigned int depth = curr->lockdep_depth;
 
@@ -2990,14 +2988,14 @@ static int separate_irq_context(struct task_struct *curr,
 
 static inline
 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
-		enum lock_usage_bit new_bit)
+		  enum lock_usage_bit new_bit)
 {
 	WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
 	return 1;
 }
 
 static inline int mark_irqflags(struct task_struct *curr,
-		struct held_lock *hlock)
+				struct held_lock *hlock)
 {
 	return 1;
 }
@@ -3008,7 +3006,7 @@ static inline unsigned int task_irq_context(struct task_struct *task)
 }
 
 static inline int separate_irq_context(struct task_struct *curr,
-		struct held_lock *hlock)
+				       struct held_lock *hlock)
 {
 	return 0;
 }
@@ -3019,7 +3017,7 @@ static inline int separate_irq_context(struct task_struct *curr,
  * Mark a lock with a usage bit, and validate the state transition:
  */
 static int mark_lock(struct task_struct *curr, struct held_lock *this,
-			     enum lock_usage_bit new_bit)
+		     enum lock_usage_bit new_bit)
 {
 	unsigned int new_mask = 1 << new_bit, ret = 1;
 
@@ -3458,7 +3456,7 @@ static struct held_lock *find_held_lock(struct task_struct *curr,
 }
 
 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
-			      int idx)
+				int idx)
 {
 	struct held_lock *hlock;
 
@@ -3832,8 +3830,8 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
  * and also avoid lockdep recursion:
  */
 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
-			  int trylock, int read, int check,
-			  struct lockdep_map *nest_lock, unsigned long ip)
+		  int trylock, int read, int check,
+		  struct lockdep_map *nest_lock, unsigned long ip)
 {
 	unsigned long flags;
 
@@ -3852,8 +3850,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 }
 EXPORT_SYMBOL_GPL(lock_acquire);
 
-void lock_release(struct lockdep_map *lock, int nested,
-			  unsigned long ip)
+void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 {
 	unsigned long flags;
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 6/8] locking/lockdep: Print right depth for chain key colission
  2019-02-26 10:03 [PATCH 0/8] locking/lockdep: Add comments and make some code adjustments Yuyang Du
                   ` (4 preceding siblings ...)
  2019-02-26 10:03 ` [PATCH 5/8] locking/lockdep: Adjust indents for some lines Yuyang Du
@ 2019-02-26 10:03 ` Yuyang Du
  2019-02-26 10:03 ` [PATCH 7/8] locking/lockdep: Update obsolete struct field description Yuyang Du
  2019-02-26 10:03 ` [PATCH 8/8] locking/lockdep: Consider zapped locks when printing lock chains in /proc Yuyang Du
  7 siblings, 0 replies; 9+ messages in thread
From: Yuyang Du @ 2019-02-26 10:03 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, Yuyang Du

Since chains are separated by irq context, so when printing a chain the
depth should be consistent with it.

Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
 kernel/locking/lockdep.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 55270938..ec4f0c6 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2061,10 +2061,11 @@ static u64 print_chain_key_iteration(int class_idx, u64 chain_key)
 	struct held_lock *hlock;
 	u64 chain_key = 0;
 	int depth = curr->lockdep_depth;
-	int i;
+	int i = get_first_held_lock(curr, hlock_next);
 
-	printk("depth: %u\n", depth + 1);
-	for (i = get_first_held_lock(curr, hlock_next); i < depth; i++) {
+	printk("depth: %u (irq_context %u)\n", depth - i + 1,
+		hlock_next->irq_context);
+	for (i; i < depth; i++) {
 		hlock = curr->held_locks + i;
 		chain_key = print_chain_key_iteration(hlock->class_idx, chain_key);
 
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 7/8] locking/lockdep: Update obsolete struct field description
  2019-02-26 10:03 [PATCH 0/8] locking/lockdep: Add comments and make some code adjustments Yuyang Du
                   ` (5 preceding siblings ...)
  2019-02-26 10:03 ` [PATCH 6/8] locking/lockdep: Print right depth for chain key colission Yuyang Du
@ 2019-02-26 10:03 ` Yuyang Du
  2019-02-26 10:03 ` [PATCH 8/8] locking/lockdep: Consider zapped locks when printing lock chains in /proc Yuyang Du
  7 siblings, 0 replies; 9+ messages in thread
From: Yuyang Du @ 2019-02-26 10:03 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, Yuyang Du

The lock_chain struct definition has outdated comment, so update it.

Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
 include/linux/lockdep.h | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c5335df..128f276 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -192,7 +192,11 @@ struct lock_list {
  * We record lock dependency chains, so that we can cache them:
  */
 struct lock_chain {
-	/* see BUILD_BUG_ON()s in lookup_chain_cache() */
+	/*
+	 * irq_context: the same as irq_context in held_lock below
+	 * depth:       the number of held locks in this chain
+	 * base:        the index in chain_hlocks for this chain
+	 */
 	unsigned int			irq_context :  2,
 					depth       :  6,
 					base	    : 24;
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 8/8] locking/lockdep: Consider zapped locks when printing lock chains in /proc
  2019-02-26 10:03 [PATCH 0/8] locking/lockdep: Add comments and make some code adjustments Yuyang Du
                   ` (6 preceding siblings ...)
  2019-02-26 10:03 ` [PATCH 7/8] locking/lockdep: Update obsolete struct field description Yuyang Du
@ 2019-02-26 10:03 ` Yuyang Du
  7 siblings, 0 replies; 9+ messages in thread
From: Yuyang Du @ 2019-02-26 10:03 UTC (permalink / raw)
  To: peterz, mingo; +Cc: linux-kernel, Yuyang Du

It is not rare to spot empty lock chains in /proc/lockdep_chains, where only
shows irq_context but not the locks, which is useless and makes people
confused.

This happens because the chained lock classes are freeed, so consider this
when printing the lock chains.

Signed-off-by: Yuyang Du <duyuyang@gmail.com>
---
 kernel/locking/lockdep_proc.c | 18 ++++++++++++++----
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 3d31f9b..47a6f4f 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -127,7 +127,7 @@ static int lc_show(struct seq_file *m, void *v)
 {
 	struct lock_chain *chain = v;
 	struct lock_class *class;
-	int i;
+	int i, nr_locks = 0;
 
 	if (v == SEQ_START_TOKEN) {
 		if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
@@ -136,18 +136,28 @@ static int lc_show(struct seq_file *m, void *v)
 		return 0;
 	}
 
-	seq_printf(m, "irq_context: %d\n", chain->irq_context);
-
 	for (i = 0; i < chain->depth; i++) {
 		class = lock_chain_get_class(chain, i);
+		/*
+		 * Is this lock class zapped?
+		 */
 		if (!class->key)
 			continue;
 
+		if (!nr_locks++)
+			seq_printf(m, "irq_context: %d\n", chain->irq_context);
+
 		seq_printf(m, "[%p] ", class->key);
 		print_name(m, class);
 		seq_puts(m, "\n");
 	}
-	seq_puts(m, "\n");
+
+	if (nr_locks) {
+		if (nr_locks != chain->depth)
+			seq_printf(m, "(chain has %d zapped classes)\n",
+				   chain->depth - nr_locks);
+		seq_puts(m, "\n");
+	}
 
 	return 0;
 }
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2019-02-26 10:04 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-02-26 10:03 [PATCH 0/8] locking/lockdep: Add comments and make some code adjustments Yuyang Du
2019-02-26 10:03 ` [PATCH 1/8] locking/lockdep: Change all print_*() return type to void Yuyang Du
2019-02-26 10:03 ` [PATCH 2/8] locking/lockdep: Add description and explanation in lockdep design doc Yuyang Du
2019-02-26 10:03 ` [PATCH 3/8] locking/lockdep: Adjust lock usage bit character checks Yuyang Du
2019-02-26 10:03 ` [PATCH 4/8] locking/lockdep: Remove useless conditional macro Yuyang Du
2019-02-26 10:03 ` [PATCH 5/8] locking/lockdep: Adjust indents for some lines Yuyang Du
2019-02-26 10:03 ` [PATCH 6/8] locking/lockdep: Print right depth for chain key colission Yuyang Du
2019-02-26 10:03 ` [PATCH 7/8] locking/lockdep: Update obsolete struct field description Yuyang Du
2019-02-26 10:03 ` [PATCH 8/8] locking/lockdep: Consider zapped locks when printing lock chains in /proc Yuyang Du

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.