All of lore.kernel.org
 help / color / mirror / Atom feed
* [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
@ 2009-06-28 15:04 tom.leiming
  2009-06-28 15:04 ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle tom.leiming
                   ` (2 more replies)
  0 siblings, 3 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo

Hi,Peter

Currently lockdep uses recursion DFS(depth-first search) algorithm to
search target in checking lock circle(check_noncircular()),irq-safe
-> irq-unsafe(check_irq_usage()) and irq inversion when adding a new
lock dependency. This patches replace the current DFS with BFS, based on
the following consideration:

    1,no loss of efficiency, no matter DFS or BFS, the running time
    are O(V+E) (V is vertex count, and E is edge count of one
    graph);

    2,BFS may be easily implemented by circular queue and consumes
    much less kernel stack space than DFS for DFS is implemented by
    recursion.

    3,The shortest path can be obtained by BFS if the target is
    found, but can't be got by DFS. By the shortest path, we can
    shorten the lock dependency chain and help to troubleshoot lock
    problem easier than before.




^ permalink raw reply	[flat|nested] 54+ messages in thread

* [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle
  2009-06-28 15:04 [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS tom.leiming
@ 2009-06-28 15:04 ` tom.leiming
  2009-06-28 15:04   ` [RESEND PATCH 02/11] kernel:lockdep:improve implementation of BFS tom.leiming
                     ` (4 more replies)
  2009-07-11  0:43 ` [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS Frederic Weisbecker
  2009-07-13  9:16 ` Peter Zijlstra
  2 siblings, 5 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

Currently lockdep will print the 1st circle detected if it exists when
acquiring a new (next) lock.

This patch prints the shortest path from the next lock to be acquired to
the previous held lock if a circle is found.

The patch still uses the current method to check circle, and once the
circle is found, breadth-first search algorithem is used to compute the
shortest path from the next lock to the previous lock in the forward
lock dependency graph.

Printing the shortest path will shorten the dependency chain, and make
troubleshooting for possible circular locking easier.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 include/linux/lockdep.h    |    6 ++
 kernel/lockdep.c           |  115 ++++++++++++++++++++++++++++++++++++++++----
 kernel/lockdep_internals.h |   83 +++++++++++++++++++++++++++++++
 3 files changed, 195 insertions(+), 9 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b25d1b5..9ec026f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -149,6 +149,12 @@ struct lock_list {
 	struct lock_class		*class;
 	struct stack_trace		trace;
 	int				distance;
+
+	/*The parent field is used to implement breadth-first search,and
+	 *the bit 0 is reused to indicate if the lock has been accessed
+	 *in BFS.
+	 */
+	struct lock_list		*parent;
 };
 
 /*
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8bbeef9..93dc70d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -897,6 +897,79 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 	return 1;
 }
 
+static struct circular_queue  lock_cq;
+static int __search_shortest_path(struct lock_list *source_entry,
+				struct lock_class *target,
+				struct lock_list **target_entry,
+				int forward)
+{
+	struct lock_list *entry;
+	struct circular_queue *cq = &lock_cq;
+	int ret = 1;
+
+	__cq_init(cq);
+
+	mark_lock_accessed(source_entry, NULL);
+	if (source_entry->class == target) {
+		*target_entry = source_entry;
+		ret = 0;
+		goto exit;
+	}
+
+	__cq_enqueue(cq, (unsigned long)source_entry);
+
+	while (!__cq_empty(cq)) {
+		struct lock_list *lock;
+		struct list_head *head;
+
+		__cq_dequeue(cq, (unsigned long *)&lock);
+
+		if (!lock->class) {
+			ret = -2;
+			goto exit;
+		}
+
+		if (forward)
+			head = &lock->class->locks_after;
+		else
+			head = &lock->class->locks_before;
+
+		list_for_each_entry(entry, head, entry) {
+			if (!lock_accessed(entry)) {
+				mark_lock_accessed(entry, lock);
+				if (entry->class == target) {
+					*target_entry = entry;
+					ret = 0;
+					goto exit;
+				}
+
+				if (__cq_enqueue(cq, (unsigned long)entry)) {
+					ret = -1;
+					goto exit;
+				}
+			}
+		}
+	}
+exit:
+	return ret;
+}
+
+static inline int __search_forward_shortest_path(struct lock_list *src_entry,
+				struct lock_class *target,
+				struct lock_list **target_entry)
+{
+	return __search_shortest_path(src_entry, target, target_entry, 1);
+
+}
+
+static inline int __search_backward_shortest_path(struct lock_list *src_entry,
+				struct lock_class *target,
+				struct lock_list **target_entry)
+{
+	return __search_shortest_path(src_entry, target, target_entry, 0);
+
+}
+
 /*
  * Recursive, forwards-direction lock-dependency checking, used for
  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
@@ -934,7 +1007,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 {
 	struct task_struct *curr = current;
 
-	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+	if (debug_locks_silent)
 		return 0;
 
 	printk("\n=======================================================\n");
@@ -954,19 +1027,41 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 	return 0;
 }
 
-static noinline int print_circular_bug_tail(void)
+static noinline int print_circular_bug(void)
 {
 	struct task_struct *curr = current;
 	struct lock_list this;
+	struct lock_list *target;
+	struct lock_list *parent;
+	int result;
+	unsigned long depth;
 
-	if (debug_locks_silent)
+	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
 	this.class = hlock_class(check_source);
 	if (!save_trace(&this.trace))
 		return 0;
 
-	print_circular_bug_entry(&this, 0);
+	result = __search_forward_shortest_path(&this,
+						hlock_class(check_target),
+						&target);
+	if (result) {
+		printk("\n%s:search shortest path failed:%d\n", __func__,
+			result);
+		return 0;
+	}
+
+	depth = get_lock_depth(target);
+
+	print_circular_bug_header(target, depth);
+
+	parent = get_lock_parent(target);
+
+	while (parent) {
+		print_circular_bug_entry(parent, --depth);
+		parent = get_lock_parent(parent);
+	}
 
 	printk("\nother info that might help us debug this:\n\n");
 	lockdep_print_held_locks(curr);
@@ -1072,14 +1167,15 @@ check_noncircular(struct lock_class *source, unsigned int depth)
 	 */
 	list_for_each_entry(entry, &source->locks_after, entry) {
 		if (entry->class == hlock_class(check_target))
-			return print_circular_bug_header(entry, depth+1);
+			return 2;
 		debug_atomic_inc(&nr_cyclic_checks);
-		if (!check_noncircular(entry->class, depth+1))
-			return print_circular_bug_entry(entry, depth+1);
+		if (check_noncircular(entry->class, depth+1) == 2)
+			return 2;
 	}
 	return 1;
 }
 
+
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 /*
  * Forwards and backwards subgraph searching, for the purposes of
@@ -1484,8 +1580,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 	 */
 	check_source = next;
 	check_target = prev;
-	if (!(check_noncircular(hlock_class(next), 0)))
-		return print_circular_bug_tail();
+	if (check_noncircular(hlock_class(next), 0) == 2)
+		return print_circular_bug();
+
 
 	if (!check_prev_add_irq(curr, prev, next))
 		return 0;
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 699a2ac..6f48d37 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -136,3 +136,86 @@ extern atomic_t nr_find_usage_backwards_recursions;
 # define debug_atomic_dec(ptr)		do { } while (0)
 # define debug_atomic_read(ptr)		0
 #endif
+
+/* The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ * */
+#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
+struct circular_queue{
+	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
+	unsigned int  front, rear;
+};
+
+#define LOCK_ACCESSED 		1UL
+#define LOCK_ACCESSED_MASK	(~LOCK_ACCESSED)
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+	cq->front = cq->rear = 0;
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+	return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+	return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE)  == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+	if (__cq_full(cq))
+		return -1;
+
+	cq->element[cq->rear] = elem;
+	cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
+	return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+	if (__cq_empty(cq))
+		return -1;
+
+	*elem = cq->element[cq->front];
+	cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
+	return 0;
+}
+
+static inline int __cq_get_elem_count(struct circular_queue *cq)
+{
+	return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+					struct lock_list *parent)
+{
+	lock->parent = (void *) parent + LOCK_ACCESSED;
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+	return (unsigned long)lock->parent & LOCK_ACCESSED;
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+	return (struct lock_list *)
+		((unsigned long)child->parent & LOCK_ACCESSED_MASK);
+}
+
+static inline unsigned long get_lock_depth(struct lock_list *child)
+{
+	unsigned long depth = 0;
+	struct lock_list *parent;
+
+	while ((parent = get_lock_parent(child))) {
+		child = parent;
+		depth++;
+	}
+	return depth;
+}
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [RESEND PATCH 02/11] kernel:lockdep:improve implementation of BFS
  2009-06-28 15:04 ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle tom.leiming
@ 2009-06-28 15:04   ` tom.leiming
  2009-06-28 15:04     ` [RESEND PATCH 03/11] kernel:lockdep: introduce match function to BFS tom.leiming
                       ` (2 more replies)
  2009-07-11 21:30   ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle Frederic Weisbecker
                     ` (3 subsequent siblings)
  4 siblings, 3 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

1,replace %MAX_CIRCULAR_QUE_SIZE with &(MAX_CIRCULAR_QUE_SIZE-1)
since we define MAX_CIRCULAR_QUE_SIZE as power of 2;

2,use bitmap to mark if a lock is accessed in BFS in order to
clear it quickly, because we may search a graph many times.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 kernel/lockdep.c           |   23 ++++++++++++++++-------
 kernel/lockdep_internals.h |   35 +++++++++++++++++++++++------------
 2 files changed, 39 insertions(+), 19 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 93dc70d..5dcca26 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,7 +42,7 @@
 #include <linux/hash.h>
 #include <linux/ftrace.h>
 #include <linux/stringify.h>
-
+#include <linux/bitops.h>
 #include <asm/sections.h>
 
 #include "lockdep_internals.h"
@@ -118,7 +118,7 @@ static inline int debug_locks_off_graph_unlock(void)
 static int lockdep_initialized;
 
 unsigned long nr_list_entries;
-static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
 /*
  * All data structures here are protected by the global debug_lock.
@@ -897,30 +897,38 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 	return 1;
 }
 
+unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
 static struct circular_queue  lock_cq;
+
 static int __search_shortest_path(struct lock_list *source_entry,
 				struct lock_class *target,
 				struct lock_list **target_entry,
 				int forward)
 {
 	struct lock_list *entry;
+	struct list_head *head;
 	struct circular_queue *cq = &lock_cq;
 	int ret = 1;
 
-	__cq_init(cq);
-
-	mark_lock_accessed(source_entry, NULL);
 	if (source_entry->class == target) {
 		*target_entry = source_entry;
 		ret = 0;
 		goto exit;
 	}
 
+	if (forward)
+		head = &source_entry->class->locks_after;
+	else
+		head = &source_entry->class->locks_before;
+
+	if (list_empty(head))
+		goto exit;
+
+	__cq_init(cq);
 	__cq_enqueue(cq, (unsigned long)source_entry);
 
 	while (!__cq_empty(cq)) {
 		struct lock_list *lock;
-		struct list_head *head;
 
 		__cq_dequeue(cq, (unsigned long *)&lock);
 
@@ -1040,6 +1048,7 @@ static noinline int print_circular_bug(void)
 		return 0;
 
 	this.class = hlock_class(check_source);
+	this.parent = NULL;
 	if (!save_trace(&this.trace))
 		return 0;
 
@@ -1580,10 +1589,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 	 */
 	check_source = next;
 	check_target = prev;
+
 	if (check_noncircular(hlock_class(next), 0) == 2)
 		return print_circular_bug();
 
-
 	if (!check_prev_add_irq(curr, prev, next))
 		return 0;
 
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 6f48d37..c2f6594 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -137,23 +137,28 @@ extern atomic_t nr_find_usage_backwards_recursions;
 # define debug_atomic_read(ptr)		0
 #endif
 
+
+extern unsigned long nr_list_entries;
+extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+extern unsigned long bfs_accessed[];
+
+/*For good efficiency of modular, we use power of 2*/
+#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
+
 /* The circular_queue and helpers is used to implement the
  * breadth-first search(BFS)algorithem, by which we can build
  * the shortest path from the next lock to be acquired to the
  * previous held lock if there is a circular between them.
  * */
-#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
 struct circular_queue{
 	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
 	unsigned int  front, rear;
 };
 
-#define LOCK_ACCESSED 		1UL
-#define LOCK_ACCESSED_MASK	(~LOCK_ACCESSED)
-
 static inline void __cq_init(struct circular_queue *cq)
 {
 	cq->front = cq->rear = 0;
+	bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
 }
 
 static inline int __cq_empty(struct circular_queue *cq)
@@ -163,7 +168,7 @@ static inline int __cq_empty(struct circular_queue *cq)
 
 static inline int __cq_full(struct circular_queue *cq)
 {
-	return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE)  == cq->front;
+	return ((cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1))  == cq->front;
 }
 
 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
@@ -172,7 +177,7 @@ static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
 		return -1;
 
 	cq->element[cq->rear] = elem;
-	cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
+	cq->rear = (cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
 	return 0;
 }
 
@@ -182,30 +187,36 @@ static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
 		return -1;
 
 	*elem = cq->element[cq->front];
-	cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
+	cq->front = (cq->front + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
 	return 0;
 }
 
 static inline int __cq_get_elem_count(struct circular_queue *cq)
 {
-	return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
+	return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
 }
 
 static inline void mark_lock_accessed(struct lock_list *lock,
 					struct lock_list *parent)
 {
-	lock->parent = (void *) parent + LOCK_ACCESSED;
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	lock->parent = parent;
+	set_bit(nr, bfs_accessed);
 }
 
 static inline unsigned long lock_accessed(struct lock_list *lock)
 {
-	return (unsigned long)lock->parent & LOCK_ACCESSED;
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	return test_bit(nr, bfs_accessed);
 }
 
 static inline struct lock_list *get_lock_parent(struct lock_list *child)
 {
-	return (struct lock_list *)
-		((unsigned long)child->parent & LOCK_ACCESSED_MASK);
+	return child->parent;
 }
 
 static inline unsigned long get_lock_depth(struct lock_list *child)
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [RESEND PATCH 03/11] kernel:lockdep: introduce match function to BFS
  2009-06-28 15:04   ` [RESEND PATCH 02/11] kernel:lockdep:improve implementation of BFS tom.leiming
@ 2009-06-28 15:04     ` tom.leiming
  2009-06-28 15:04       ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() by BFS tom.leiming
                         ` (2 more replies)
  2009-07-18 14:23     ` [tip:core/locking] lockdep: Improve implementation of BFS tip-bot for Ming Lei
  2009-08-02 13:01     ` tip-bot for Ming Lei
  2 siblings, 3 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

1,introduce match() to BFS in order to make it usable to
match different pattern;

2,also rename some functions to make them more suitable.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 kernel/lockdep.c |   43 ++++++++++++++++++++++++++-----------------
 1 files changed, 26 insertions(+), 17 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 5dcca26..ce6d09e 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -900,17 +900,18 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
 static struct circular_queue  lock_cq;
 
-static int __search_shortest_path(struct lock_list *source_entry,
-				struct lock_class *target,
-				struct lock_list **target_entry,
-				int forward)
+static int __bfs(struct lock_list *source_entry,
+			void *data,
+			int (*match)(struct lock_list *entry, void *data),
+			struct lock_list **target_entry,
+			int forward)
 {
 	struct lock_list *entry;
 	struct list_head *head;
 	struct circular_queue *cq = &lock_cq;
 	int ret = 1;
 
-	if (source_entry->class == target) {
+	if (match(source_entry, data)) {
 		*target_entry = source_entry;
 		ret = 0;
 		goto exit;
@@ -945,7 +946,7 @@ static int __search_shortest_path(struct lock_list *source_entry,
 		list_for_each_entry(entry, head, entry) {
 			if (!lock_accessed(entry)) {
 				mark_lock_accessed(entry, lock);
-				if (entry->class == target) {
+				if (match(entry, data)) {
 					*target_entry = entry;
 					ret = 0;
 					goto exit;
@@ -962,19 +963,21 @@ exit:
 	return ret;
 }
 
-static inline int __search_forward_shortest_path(struct lock_list *src_entry,
-				struct lock_class *target,
-				struct lock_list **target_entry)
+static inline int __bfs_forward(struct lock_list *src_entry,
+			void *data,
+			int (*match)(struct lock_list *entry, void *data),
+			struct lock_list **target_entry)
 {
-	return __search_shortest_path(src_entry, target, target_entry, 1);
+	return __bfs(src_entry, data, match, target_entry, 1);
 
 }
 
-static inline int __search_backward_shortest_path(struct lock_list *src_entry,
-				struct lock_class *target,
-				struct lock_list **target_entry)
+static inline int __bfs_backward(struct lock_list *src_entry,
+			void *data,
+			int (*match)(struct lock_list *entry, void *data),
+			struct lock_list **target_entry)
 {
-	return __search_shortest_path(src_entry, target, target_entry, 0);
+	return __bfs(src_entry, data, match, target_entry, 0);
 
 }
 
@@ -1035,6 +1038,11 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 	return 0;
 }
 
+static inline int class_equal(struct lock_list *entry, void *data)
+{
+	return entry->class == data;
+}
+
 static noinline int print_circular_bug(void)
 {
 	struct task_struct *curr = current;
@@ -1052,9 +1060,10 @@ static noinline int print_circular_bug(void)
 	if (!save_trace(&this.trace))
 		return 0;
 
-	result = __search_forward_shortest_path(&this,
-						hlock_class(check_target),
-						&target);
+	result = __bfs_forward(&this,
+			hlock_class(check_target),
+			class_equal,
+			&target);
 	if (result) {
 		printk("\n%s:search shortest path failed:%d\n", __func__,
 			result);
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() by BFS
  2009-06-28 15:04     ` [RESEND PATCH 03/11] kernel:lockdep: introduce match function to BFS tom.leiming
@ 2009-06-28 15:04       ` tom.leiming
  2009-06-28 15:04         ` [RESEND PATCH 05/11] kernel:lockdep:implement find_usage_*wards " tom.leiming
                           ` (3 more replies)
  2009-07-18 14:23       ` [tip:core/locking] lockdep: Introduce match function to BFS tip-bot for Ming Lei
  2009-08-02 13:01       ` tip-bot for Ming Lei
  2 siblings, 4 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

This patch uses BFS to implement check_noncircular() and
prints the generated shortest circle if exists.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 kernel/lockdep.c |   89 ++++++++++++++++++++++-------------------------------
 1 files changed, 37 insertions(+), 52 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ce6d09e..f740088 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -985,12 +985,7 @@ static inline int __bfs_backward(struct lock_list *src_entry,
  * Recursive, forwards-direction lock-dependency checking, used for
  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
  * checking.
- *
- * (to keep the stackframe of the recursive functions small we
- *  use these global variables, and we also mark various helper
- *  functions as noinline.)
  */
-static struct held_lock *check_source, *check_target;
 
 /*
  * Print a dependency chain entry (this is only done when a deadlock
@@ -1014,7 +1009,9 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
  * header first:
  */
 static noinline int
-print_circular_bug_header(struct lock_list *entry, unsigned int depth)
+print_circular_bug_header(struct lock_list *entry, unsigned int depth,
+			struct held_lock *check_src,
+			struct held_lock *check_tgt)
 {
 	struct task_struct *curr = current;
 
@@ -1027,9 +1024,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 	printk(  "-------------------------------------------------------\n");
 	printk("%s/%d is trying to acquire lock:\n",
 		curr->comm, task_pid_nr(curr));
-	print_lock(check_source);
+	print_lock(check_src);
 	printk("\nbut task is already holding lock:\n");
-	print_lock(check_target);
+	print_lock(check_tgt);
 	printk("\nwhich lock already depends on the new lock.\n\n");
 	printk("\nthe existing dependency chain (in reverse order) is:\n");
 
@@ -1043,36 +1040,24 @@ static inline int class_equal(struct lock_list *entry, void *data)
 	return entry->class == data;
 }
 
-static noinline int print_circular_bug(void)
+static noinline int print_circular_bug(struct lock_list *this,
+				struct lock_list *target,
+				struct held_lock *check_src,
+				struct held_lock *check_tgt)
 {
 	struct task_struct *curr = current;
-	struct lock_list this;
-	struct lock_list *target;
 	struct lock_list *parent;
-	int result;
 	unsigned long depth;
 
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
-	this.class = hlock_class(check_source);
-	this.parent = NULL;
-	if (!save_trace(&this.trace))
+	if (!save_trace(&this->trace))
 		return 0;
 
-	result = __bfs_forward(&this,
-			hlock_class(check_target),
-			class_equal,
-			&target);
-	if (result) {
-		printk("\n%s:search shortest path failed:%d\n", __func__,
-			result);
-		return 0;
-	}
-
 	depth = get_lock_depth(target);
 
-	print_circular_bug_header(target, depth);
+	print_circular_bug_header(target, depth, check_src, check_tgt);
 
 	parent = get_lock_parent(target);
 
@@ -1090,6 +1075,16 @@ static noinline int print_circular_bug(void)
 	return 0;
 }
 
+static int noinline print_bfs_bug(int ret)
+{
+	if (!debug_locks_off_graph_unlock())
+		return 0;
+
+	WARN(1, "lockdep bfs error:%d\n", ret);
+
+	return 0;
+}
+
 #define RECURSION_LIMIT 40
 
 static int noinline print_infinite_recursion_bug(void)
@@ -1168,31 +1163,17 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
  * lead to <target>. Print an error and return 0 if it does.
  */
 static noinline int
-check_noncircular(struct lock_class *source, unsigned int depth)
+check_noncircular(struct lock_list *root, struct lock_class *target,
+		struct lock_list **target_entry)
 {
-	struct lock_list *entry;
+	int result;
 
-	if (lockdep_dependency_visit(source, depth))
-		return 1;
+	debug_atomic_inc(&nr_cyclic_checks);
 
-	debug_atomic_inc(&nr_cyclic_check_recursions);
-	if (depth > max_recursion_depth)
-		max_recursion_depth = depth;
-	if (depth >= RECURSION_LIMIT)
-		return print_infinite_recursion_bug();
-	/*
-	 * Check this lock's dependency list:
-	 */
-	list_for_each_entry(entry, &source->locks_after, entry) {
-		if (entry->class == hlock_class(check_target))
-			return 2;
-		debug_atomic_inc(&nr_cyclic_checks);
-		if (check_noncircular(entry->class, depth+1) == 2)
-			return 2;
-	}
-	return 1;
-}
+	result = __bfs_forward(root, target, class_equal, target_entry);
 
+	return result;
+}
 
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 /*
@@ -1586,6 +1567,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 {
 	struct lock_list *entry;
 	int ret;
+	struct lock_list this;
+	struct lock_list *uninitialized_var(target_entry);
 
 	/*
 	 * Prove that the new <prev> -> <next> dependency would not
@@ -1596,11 +1579,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 	 * We are using global variables to control the recursion, to
 	 * keep the stackframe size of the recursive functions low:
 	 */
-	check_source = next;
-	check_target = prev;
-
-	if (check_noncircular(hlock_class(next), 0) == 2)
-		return print_circular_bug();
+	this.class = hlock_class(next);
+	this.parent = NULL;
+	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
+	if (unlikely(!ret))
+		return print_circular_bug(&this, target_entry, next, prev);
+	else if (unlikely(ret < 0))
+		return print_bfs_bug(ret);
 
 	if (!check_prev_add_irq(curr, prev, next))
 		return 0;
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [RESEND PATCH 05/11] kernel:lockdep:implement find_usage_*wards by BFS
  2009-06-28 15:04       ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() by BFS tom.leiming
@ 2009-06-28 15:04         ` tom.leiming
  2009-06-28 15:04           ` [RESEND PATCH 06/11] kernel:lockdep:introduce print_shortest_lock_dependencies tom.leiming
                             ` (2 more replies)
  2009-07-13  8:02         ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() " Dave Young
                           ` (2 subsequent siblings)
  3 siblings, 3 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

This patch uses BFS to implement find_usage_*wards(),which
was originally writen by DFS.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 kernel/lockdep.c |  180 ++++++++++++++++++++++--------------------------------
 1 files changed, 72 insertions(+), 108 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index f740088..94b2f1f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -963,7 +963,7 @@ exit:
 	return ret;
 }
 
-static inline int __bfs_forward(struct lock_list *src_entry,
+static inline int __bfs_forwards(struct lock_list *src_entry,
 			void *data,
 			int (*match)(struct lock_list *entry, void *data),
 			struct lock_list **target_entry)
@@ -972,7 +972,7 @@ static inline int __bfs_forward(struct lock_list *src_entry,
 
 }
 
-static inline int __bfs_backward(struct lock_list *src_entry,
+static inline int __bfs_backwards(struct lock_list *src_entry,
 			void *data,
 			int (*match)(struct lock_list *entry, void *data),
 			struct lock_list **target_entry)
@@ -1085,18 +1085,6 @@ static int noinline print_bfs_bug(int ret)
 	return 0;
 }
 
-#define RECURSION_LIMIT 40
-
-static int noinline print_infinite_recursion_bug(void)
-{
-	if (!debug_locks_off_graph_unlock())
-		return 0;
-
-	WARN_ON(1);
-
-	return 0;
-}
-
 unsigned long __lockdep_count_forward_deps(struct lock_class *class,
 					   unsigned int depth)
 {
@@ -1170,7 +1158,7 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
 
 	debug_atomic_inc(&nr_cyclic_checks);
 
-	result = __bfs_forward(root, target, class_equal, target_entry);
+	result = __bfs_forwards(root, target, class_equal, target_entry);
 
 	return result;
 }
@@ -1181,101 +1169,70 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
  * proving that two subgraphs can be connected by a new dependency
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
-static enum lock_usage_bit find_usage_bit;
 static struct lock_class *forwards_match, *backwards_match;
 
+
+#define   BFS_PROCESS_RET(ret)	do { \
+					if (ret < 0) \
+						return print_bfs_bug(ret); \
+					if (ret == 1) \
+						return 1; \
+				} while (0)
+
+static inline int usage_match(struct lock_list *entry, void *bit)
+{
+	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+}
+
+
+
 /*
  * Find a node in the forwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
  *
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <forwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
  *
- * Return 1 otherwise and keep <forwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
  */
-static noinline int
-find_usage_forwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+			struct lock_list **target_entry)
 {
-	struct lock_list *entry;
-	int ret;
-
-	if (lockdep_dependency_visit(source, depth))
-		return 1;
-
-	if (depth > max_recursion_depth)
-		max_recursion_depth = depth;
-	if (depth >= RECURSION_LIMIT)
-		return print_infinite_recursion_bug();
+	int result;
 
 	debug_atomic_inc(&nr_find_usage_forwards_checks);
-	if (source->usage_mask & (1 << find_usage_bit)) {
-		forwards_match = source;
-		return 2;
-	}
 
-	/*
-	 * Check this lock's dependency list:
-	 */
-	list_for_each_entry(entry, &source->locks_after, entry) {
-		debug_atomic_inc(&nr_find_usage_forwards_recursions);
-		ret = find_usage_forwards(entry->class, depth+1);
-		if (ret == 2 || ret == 0)
-			return ret;
-	}
-	return 1;
+	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+
+	return result;
 }
 
 /*
  * Find a node in the backwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
  *
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <backwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
  *
- * Return 1 otherwise and keep <backwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
  */
-static noinline int
-find_usage_backwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+			struct lock_list **target_entry)
 {
-	struct lock_list *entry;
-	int ret;
-
-	if (lockdep_dependency_visit(source, depth))
-		return 1;
-
-	if (!__raw_spin_is_locked(&lockdep_lock))
-		return DEBUG_LOCKS_WARN_ON(1);
-
-	if (depth > max_recursion_depth)
-		max_recursion_depth = depth;
-	if (depth >= RECURSION_LIMIT)
-		return print_infinite_recursion_bug();
+	int result;
 
 	debug_atomic_inc(&nr_find_usage_backwards_checks);
-	if (source->usage_mask & (1 << find_usage_bit)) {
-		backwards_match = source;
-		return 2;
-	}
 
-	if (!source && debug_locks_off_graph_unlock()) {
-		WARN_ON(1);
-		return 0;
-	}
+	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
 
-	/*
-	 * Check this lock's dependency list:
-	 */
-	list_for_each_entry(entry, &source->locks_before, entry) {
-		debug_atomic_inc(&nr_find_usage_backwards_recursions);
-		ret = find_usage_backwards(entry->class, depth+1);
-		if (ret == 2 || ret == 0)
-			return ret;
-	}
-	return 1;
+	return result;
 }
 
+
 static int
 print_bad_irq_dependency(struct task_struct *curr,
 			 struct held_lock *prev,
@@ -1343,18 +1300,21 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 	    enum lock_usage_bit bit_forwards, const char *irqclass)
 {
 	int ret;
+	struct lock_list this;
+	struct lock_list *uninitialized_var(target_entry);
+
+	this.parent = NULL;
+
+	this.class = hlock_class(prev);
+	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+	BFS_PROCESS_RET(ret);
+	backwards_match = target_entry->class;
+
+	this.class = hlock_class(next);
+	ret = find_usage_forwards(&this, bit_forwards, &target_entry);
+	BFS_PROCESS_RET(ret);
+	forwards_match = target_entry->class;
 
-	find_usage_bit = bit_backwards;
-	/* fills in <backwards_match> */
-	ret = find_usage_backwards(hlock_class(prev), 0);
-	if (!ret || ret == 1)
-		return ret;
-
-	find_usage_bit = bit_forwards;
-	ret = find_usage_forwards(hlock_class(next), 0);
-	if (!ret || ret == 1)
-		return ret;
-	/* ret == 2 */
 	return print_bad_irq_dependency(curr, prev, next,
 			bit_backwards, bit_forwards, irqclass);
 }
@@ -2029,14 +1989,16 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 		     enum lock_usage_bit bit, const char *irqclass)
 {
 	int ret;
+	struct lock_list root;
+	struct lock_list *uninitialized_var(target_entry);
 
-	find_usage_bit = bit;
-	/* fills in <forwards_match> */
-	ret = find_usage_forwards(hlock_class(this), 0);
-	if (!ret || ret == 1)
-		return ret;
+	root.parent = NULL;
+	root.class = hlock_class(this);
+	ret = find_usage_forwards(&root, bit, &target_entry);
+	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
+	return print_irq_inversion_bug(curr, target_entry->class,
+					this, 1, irqclass);
 }
 
 /*
@@ -2048,14 +2010,16 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 		      enum lock_usage_bit bit, const char *irqclass)
 {
 	int ret;
+	struct lock_list root;
+	struct lock_list *uninitialized_var(target_entry);
 
-	find_usage_bit = bit;
-	/* fills in <backwards_match> */
-	ret = find_usage_backwards(hlock_class(this), 0);
-	if (!ret || ret == 1)
-		return ret;
+	root.parent = NULL;
+	root.class = hlock_class(this);
+	ret = find_usage_backwards(&root, bit, &target_entry);
+	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
+	return print_irq_inversion_bug(curr, target_entry->class,
+					this, 1, irqclass);
 }
 
 void print_irqtrace_events(struct task_struct *curr)
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [RESEND PATCH 06/11] kernel:lockdep:introduce print_shortest_lock_dependencies
  2009-06-28 15:04         ` [RESEND PATCH 05/11] kernel:lockdep:implement find_usage_*wards " tom.leiming
@ 2009-06-28 15:04           ` tom.leiming
  2009-06-28 15:04             ` [RESEND PATCH 07/11] kernel:lockdep: implement lockdep_count_*ward_deps by BFS tom.leiming
                               ` (2 more replies)
  2009-07-18 14:23           ` [tip:core/locking] lockdep: Implement find_usage_*wards by BFS tip-bot for Ming Lei
  2009-08-02 13:02           ` tip-bot for Ming Lei
  2 siblings, 3 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

Since the shortest lock dependencies' path may be obtained by BFS,
we print the shortest one by print_shortest_lock_dependencies().

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 kernel/lockdep.c           |   93 +++++++++++++++++++++++++++++++------------
 kernel/lockdep_internals.h |    4 +-
 2 files changed, 69 insertions(+), 28 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 94b2f1f..d839994 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -576,6 +576,36 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 }
 
 /*
+ * printk the shortest lock dependencies from @start to @end in reverse order:
+ */
+static void __used
+print_shortest_lock_dependencies(struct lock_list *leaf,
+				struct lock_list *root)
+{
+	struct lock_list *entry = leaf;
+	int depth;
+
+	/*compute depth from generated tree by BFS*/
+	depth = get_lock_depth(leaf);
+
+	do {
+		print_lock_class_header(entry->class, depth);
+		printk("%*s ... acquired at:\n", depth, "");
+		print_stack_trace(&entry->trace, 2);
+		printk("\n");
+
+		if (depth == 0 && (entry != root)) {
+			printk("lockdep:%s bad BFS generated tree\n", __func__);
+			break;
+		}
+
+		entry = get_lock_parent(entry);
+		depth--;
+	} while (entry && (depth >= 0));
+
+	return;
+}
+/*
  * printk all lock dependencies starting at <entry>:
  */
 static void __used
@@ -992,7 +1022,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
  * has been detected):
  */
 static noinline int
-print_circular_bug_entry(struct lock_list *target, unsigned int depth)
+print_circular_bug_entry(struct lock_list *target, int depth)
 {
 	if (debug_locks_silent)
 		return 0;
@@ -1047,7 +1077,7 @@ static noinline int print_circular_bug(struct lock_list *this,
 {
 	struct task_struct *curr = current;
 	struct lock_list *parent;
-	unsigned long depth;
+	int depth;
 
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
@@ -1169,7 +1199,6 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
  * proving that two subgraphs can be connected by a new dependency
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
-static struct lock_class *forwards_match, *backwards_match;
 
 
 #define   BFS_PROCESS_RET(ret)	do { \
@@ -1235,6 +1264,10 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
 
 static int
 print_bad_irq_dependency(struct task_struct *curr,
+			 struct lock_list *prev_root,
+			 struct lock_list *next_root,
+			 struct lock_list *backwards_entry,
+			 struct lock_list *forwards_entry,
 			 struct held_lock *prev,
 			 struct held_lock *next,
 			 enum lock_usage_bit bit1,
@@ -1267,26 +1300,32 @@ print_bad_irq_dependency(struct task_struct *curr,
 
 	printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
 		irqclass);
-	print_lock_name(backwards_match);
+	print_lock_name(backwards_entry->class);
 	printk("\n... which became %s-irq-safe at:\n", irqclass);
 
-	print_stack_trace(backwards_match->usage_traces + bit1, 1);
+	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
 
 	printk("\nto a %s-irq-unsafe lock:\n", irqclass);
-	print_lock_name(forwards_match);
+	print_lock_name(forwards_entry->class);
 	printk("\n... which became %s-irq-unsafe at:\n", irqclass);
 	printk("...");
 
-	print_stack_trace(forwards_match->usage_traces + bit2, 1);
+	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
 
 	printk("\nother info that might help us debug this:\n\n");
 	lockdep_print_held_locks(curr);
 
-	printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
-	print_lock_dependencies(backwards_match, 0);
+	printk("\nthe dependencies between %s-irq-safe lock", irqclass);
+	printk(" and the holding lock:\n");
+	if (!save_trace(&prev_root->trace))
+		return 0;
+	print_shortest_lock_dependencies(backwards_entry, prev_root);
 
-	printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
-	print_lock_dependencies(forwards_match, 0);
+	printk("\nthe dependencies between the lock to be acquired");
+	printk(" and %s-irq-unsafe lock:\n", irqclass);
+	if (!save_trace(&next_root->trace))
+		return 0;
+	print_shortest_lock_dependencies(forwards_entry, next_root);
 
 	printk("\nstack backtrace:\n");
 	dump_stack();
@@ -1300,22 +1339,24 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 	    enum lock_usage_bit bit_forwards, const char *irqclass)
 {
 	int ret;
-	struct lock_list this;
+	struct lock_list this, that;
 	struct lock_list *uninitialized_var(target_entry);
+	struct lock_list *uninitialized_var(target_entry1);
 
 	this.parent = NULL;
 
 	this.class = hlock_class(prev);
 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
 	BFS_PROCESS_RET(ret);
-	backwards_match = target_entry->class;
 
-	this.class = hlock_class(next);
-	ret = find_usage_forwards(&this, bit_forwards, &target_entry);
+	that.parent = NULL;
+	that.class = hlock_class(next);
+	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
 	BFS_PROCESS_RET(ret);
-	forwards_match = target_entry->class;
 
-	return print_bad_irq_dependency(curr, prev, next,
+	return print_bad_irq_dependency(curr, &this, &that,
+			target_entry, target_entry1,
+			prev, next,
 			bit_backwards, bit_forwards, irqclass);
 }
 
@@ -1944,7 +1985,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
  * print irq inversion bug:
  */
 static int
-print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
+print_irq_inversion_bug(struct task_struct *curr,
+			struct lock_list *root, struct lock_list *other,
 			struct held_lock *this, int forwards,
 			const char *irqclass)
 {
@@ -1962,17 +2004,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
 		printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
 	else
 		printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
-	print_lock_name(other);
+	print_lock_name(other->class);
 	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
 	printk("\nother info that might help us debug this:\n");
 	lockdep_print_held_locks(curr);
 
-	printk("\nthe first lock's dependencies:\n");
-	print_lock_dependencies(hlock_class(this), 0);
-
-	printk("\nthe second lock's dependencies:\n");
-	print_lock_dependencies(other, 0);
+	printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
+	if (!save_trace(&root->trace))
+		return 0;
+	print_shortest_lock_dependencies(other, root);
 
 	printk("\nstack backtrace:\n");
 	dump_stack();
@@ -1997,7 +2038,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 	ret = find_usage_forwards(&root, bit, &target_entry);
 	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, target_entry->class,
+	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
 }
 
@@ -2018,7 +2059,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 	ret = find_usage_backwards(&root, bit, &target_entry);
 	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, target_entry->class,
+	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
 }
 
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index c2f6594..b115aaa 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -219,9 +219,9 @@ static inline struct lock_list *get_lock_parent(struct lock_list *child)
 	return child->parent;
 }
 
-static inline unsigned long get_lock_depth(struct lock_list *child)
+static inline int get_lock_depth(struct lock_list *child)
 {
-	unsigned long depth = 0;
+	int depth = 0;
 	struct lock_list *parent;
 
 	while ((parent = get_lock_parent(child))) {
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [RESEND PATCH 07/11] kernel:lockdep: implement lockdep_count_*ward_deps by BFS
  2009-06-28 15:04           ` [RESEND PATCH 06/11] kernel:lockdep:introduce print_shortest_lock_dependencies tom.leiming
@ 2009-06-28 15:04             ` tom.leiming
  2009-06-28 15:04               ` [RESEND PATCH 08/11] kernel:lockdep: update memory usage introduced " tom.leiming
                                 ` (2 more replies)
  2009-07-18 14:24             ` [tip:core/locking] lockdep: Introduce print_shortest_lock_dependencies tip-bot for Ming Lei
  2009-08-02 13:02             ` tip-bot for Ming Lei
  2 siblings, 3 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 kernel/lockdep.c |   52 +++++++++++++++++++++++++---------------------------
 1 files changed, 25 insertions(+), 27 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index d839994..6e31e4b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1115,61 +1115,59 @@ static int noinline print_bfs_bug(int ret)
 	return 0;
 }
 
-unsigned long __lockdep_count_forward_deps(struct lock_class *class,
-					   unsigned int depth)
+static int noop_count(struct lock_list *entry, void *data)
 {
-	struct lock_list *entry;
-	unsigned long ret = 1;
+	(*(unsigned long *)data)++;
+	return 0;
+}
 
-	if (lockdep_dependency_visit(class, depth))
-		return 0;
+unsigned long __lockdep_count_forward_deps(struct lock_list *this)
+{
+	unsigned long  count = 0;
+	struct lock_list *uninitialized_var(target_entry);
 
-	/*
-	 * Recurse this class's dependency list:
-	 */
-	list_for_each_entry(entry, &class->locks_after, entry)
-		ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
 
-	return ret;
+	return count;
 }
-
 unsigned long lockdep_count_forward_deps(struct lock_class *class)
 {
 	unsigned long ret, flags;
+	struct lock_list this;
+
+	this.parent = NULL;
+	this.class = class;
 
 	local_irq_save(flags);
 	__raw_spin_lock(&lockdep_lock);
-	ret = __lockdep_count_forward_deps(class, 0);
+	ret = __lockdep_count_forward_deps(&this);
 	__raw_spin_unlock(&lockdep_lock);
 	local_irq_restore(flags);
 
 	return ret;
 }
 
-unsigned long __lockdep_count_backward_deps(struct lock_class *class,
-					    unsigned int depth)
+unsigned long __lockdep_count_backward_deps(struct lock_list *this)
 {
-	struct lock_list *entry;
-	unsigned long ret = 1;
+	unsigned long  count = 0;
+	struct lock_list *uninitialized_var(target_entry);
 
-	if (lockdep_dependency_visit(class, depth))
-		return 0;
-	/*
-	 * Recurse this class's dependency list:
-	 */
-	list_for_each_entry(entry, &class->locks_before, entry)
-		ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
 
-	return ret;
+	return count;
 }
 
 unsigned long lockdep_count_backward_deps(struct lock_class *class)
 {
 	unsigned long ret, flags;
+	struct lock_list this;
+
+	this.parent = NULL;
+	this.class = class;
 
 	local_irq_save(flags);
 	__raw_spin_lock(&lockdep_lock);
-	ret = __lockdep_count_backward_deps(class, 0);
+	ret = __lockdep_count_backward_deps(&this);
 	__raw_spin_unlock(&lockdep_lock);
 	local_irq_restore(flags);
 
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [RESEND PATCH 08/11] kernel:lockdep: update memory usage introduced by BFS
  2009-06-28 15:04             ` [RESEND PATCH 07/11] kernel:lockdep: implement lockdep_count_*ward_deps by BFS tom.leiming
@ 2009-06-28 15:04               ` tom.leiming
  2009-06-28 15:04                 ` [RESEND PATCH 09/11] kernel:lockdep:add statistics info for max bfs queue depth tom.leiming
                                   ` (2 more replies)
  2009-07-18 14:24               ` [tip:core/locking] lockdep: Implement lockdep_count_*ward_deps " tip-bot for Ming Lei
  2009-08-02 13:02               ` tip-bot for Ming Lei
  2 siblings, 3 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 kernel/lockdep.c |    3 ++-
 1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 6e31e4b..faa39cb 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3429,7 +3429,8 @@ void __init lockdep_info(void)
 		sizeof(struct list_head) * CLASSHASH_SIZE +
 		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
 		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
-		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
+		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024 +
+		sizeof(struct circular_queue) + sizeof(bfs_accessed));
 
 	printk(" per task-struct memory footprint: %lu bytes\n",
 		sizeof(struct held_lock) * MAX_LOCK_DEPTH);
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [RESEND PATCH 09/11] kernel:lockdep:add statistics info for max bfs queue depth
  2009-06-28 15:04               ` [RESEND PATCH 08/11] kernel:lockdep: update memory usage introduced " tom.leiming
@ 2009-06-28 15:04                 ` tom.leiming
  2009-06-28 15:04                   ` [RESEND PATCH 10/11] BFS cleanup tom.leiming
                                     ` (2 more replies)
  2009-07-18 14:24                 ` [tip:core/locking] lockdep: Update memory usage introduced by BFS tip-bot for Ming Lei
  2009-08-02 13:02                 ` tip-bot for Ming Lei
  2 siblings, 3 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 kernel/lockdep.c           |    6 +++++-
 kernel/lockdep_internals.h |    3 ++-
 kernel/lockdep_proc.c      |    2 ++
 3 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index faa39cb..b20f08b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -929,7 +929,7 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 
 unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
 static struct circular_queue  lock_cq;
-
+unsigned int max_bfs_queue_depth;
 static int __bfs(struct lock_list *source_entry,
 			void *data,
 			int (*match)(struct lock_list *entry, void *data),
@@ -975,6 +975,7 @@ static int __bfs(struct lock_list *source_entry,
 
 		list_for_each_entry(entry, head, entry) {
 			if (!lock_accessed(entry)) {
+				unsigned int cq_depth;
 				mark_lock_accessed(entry, lock);
 				if (match(entry, data)) {
 					*target_entry = entry;
@@ -986,6 +987,9 @@ static int __bfs(struct lock_list *source_entry,
 					ret = -1;
 					goto exit;
 				}
+				cq_depth = __cq_get_elem_count(cq);
+				if (max_bfs_queue_depth < cq_depth)
+					max_bfs_queue_depth = cq_depth;
 			}
 		}
 	}
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index b115aaa..6baa880 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -138,6 +138,7 @@ extern atomic_t nr_find_usage_backwards_recursions;
 #endif
 
 
+extern unsigned int max_bfs_queue_depth;
 extern unsigned long nr_list_entries;
 extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 extern unsigned long bfs_accessed[];
@@ -191,7 +192,7 @@ static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
 	return 0;
 }
 
-static inline int __cq_get_elem_count(struct circular_queue *cq)
+static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
 {
 	return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
 }
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index d7135aa..9a1bf34 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -411,6 +411,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
 			max_lockdep_depth);
 	seq_printf(m, " max recursion depth:           %11u\n",
 			max_recursion_depth);
+	seq_printf(m, " max bfs queue depth:           %11u\n",
+			max_bfs_queue_depth);
 	lockdep_stats_debug_show(m);
 	seq_printf(m, " debug_locks:                   %11u\n",
 			debug_locks);
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [RESEND PATCH 10/11] BFS cleanup
  2009-06-28 15:04                 ` [RESEND PATCH 09/11] kernel:lockdep:add statistics info for max bfs queue depth tom.leiming
@ 2009-06-28 15:04                   ` tom.leiming
  2009-06-28 15:04                     ` [RESEND PATCH 11/11] kernel:lockdep:fix return value of check_usage*() tom.leiming
                                       ` (2 more replies)
  2009-07-18 14:24                   ` [tip:core/locking] lockdep: Add statistics info for max bfs queue depth tip-bot for Ming Lei
  2009-08-02 13:03                   ` tip-bot for Ming Lei
  2 siblings, 3 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
 include/linux/lockdep.h    |    7 +-
 kernel/lockdep.c           |  232 +++++++++++++++++++++++++++-----------------
 kernel/lockdep_internals.h |   97 +------------------
 3 files changed, 147 insertions(+), 189 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 9ec026f..12aabfc 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -58,7 +58,6 @@ struct lock_class {
 
 	struct lockdep_subclass_key	*key;
 	unsigned int			subclass;
-	unsigned int			dep_gen_id;
 
 	/*
 	 * IRQ/softirq usage tracking bits:
@@ -150,9 +149,9 @@ struct lock_list {
 	struct stack_trace		trace;
 	int				distance;
 
-	/*The parent field is used to implement breadth-first search,and
-	 *the bit 0 is reused to indicate if the lock has been accessed
-	 *in BFS.
+	/*
+	 * The parent field is used to implement breadth-first search, and the
+	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
 	 */
 	struct lock_list		*parent;
 };
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b20f08b..09a141f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -43,6 +43,7 @@
 #include <linux/ftrace.h>
 #include <linux/stringify.h>
 #include <linux/bitops.h>
+
 #include <asm/sections.h>
 
 #include "lockdep_internals.h"
@@ -118,7 +119,7 @@ static inline int debug_locks_off_graph_unlock(void)
 static int lockdep_initialized;
 
 unsigned long nr_list_entries;
-struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
 /*
  * All data structures here are protected by the global debug_lock.
@@ -390,19 +391,6 @@ unsigned int nr_process_chains;
 unsigned int max_lockdep_depth;
 unsigned int max_recursion_depth;
 
-static unsigned int lockdep_dependency_gen_id;
-
-static bool lockdep_dependency_visit(struct lock_class *source,
-				     unsigned int depth)
-{
-	if (!depth)
-		lockdep_dependency_gen_id++;
-	if (source->dep_gen_id == lockdep_dependency_gen_id)
-		return true;
-	source->dep_gen_id = lockdep_dependency_gen_id;
-	return false;
-}
-
 #ifdef CONFIG_DEBUG_LOCKDEP
 /*
  * We cannot printk in early bootup code. Not even early_printk()
@@ -575,64 +563,6 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 	print_ip_sym((unsigned long)class->key);
 }
 
-/*
- * printk the shortest lock dependencies from @start to @end in reverse order:
- */
-static void __used
-print_shortest_lock_dependencies(struct lock_list *leaf,
-				struct lock_list *root)
-{
-	struct lock_list *entry = leaf;
-	int depth;
-
-	/*compute depth from generated tree by BFS*/
-	depth = get_lock_depth(leaf);
-
-	do {
-		print_lock_class_header(entry->class, depth);
-		printk("%*s ... acquired at:\n", depth, "");
-		print_stack_trace(&entry->trace, 2);
-		printk("\n");
-
-		if (depth == 0 && (entry != root)) {
-			printk("lockdep:%s bad BFS generated tree\n", __func__);
-			break;
-		}
-
-		entry = get_lock_parent(entry);
-		depth--;
-	} while (entry && (depth >= 0));
-
-	return;
-}
-/*
- * printk all lock dependencies starting at <entry>:
- */
-static void __used
-print_lock_dependencies(struct lock_class *class, int depth)
-{
-	struct lock_list *entry;
-
-	if (lockdep_dependency_visit(class, depth))
-		return;
-
-	if (DEBUG_LOCKS_WARN_ON(depth >= 20))
-		return;
-
-	print_lock_class_header(class, depth);
-
-	list_for_each_entry(entry, &class->locks_after, entry) {
-		if (DEBUG_LOCKS_WARN_ON(!entry->class))
-			return;
-
-		print_lock_dependencies(entry->class, depth + 1);
-
-		printk("%*s ... acquired at:\n",depth,"");
-		print_stack_trace(&entry->trace, 2);
-		printk("\n");
-	}
-}
-
 static void print_kernel_version(void)
 {
 	printk("%s %.*s\n", init_utsname()->release,
@@ -927,14 +857,106 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 	return 1;
 }
 
-unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
-static struct circular_queue  lock_cq;
+/*For good efficiency of modular, we use power of 2*/
+#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
+#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
+
+/* The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ * */
+struct circular_queue {
+	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+	unsigned int  front, rear;
+};
+
+static struct circular_queue lock_cq;
+static unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
+
 unsigned int max_bfs_queue_depth;
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+	cq->front = cq->rear = 0;
+	bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+	return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+	return ((cq->rear + 1) & CQ_MASK) == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+	if (__cq_full(cq))
+		return -1;
+
+	cq->element[cq->rear] = elem;
+	cq->rear = (cq->rear + 1) & CQ_MASK;
+	return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+	if (__cq_empty(cq))
+		return -1;
+
+	*elem = cq->element[cq->front];
+	cq->front = (cq->front + 1) & CQ_MASK;
+	return 0;
+}
+
+static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
+{
+	return (cq->rear - cq->front) & CQ_MASK;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+					struct lock_list *parent)
+{
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	lock->parent = parent;
+	set_bit(nr, bfs_accessed);
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	return test_bit(nr, bfs_accessed);
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+	return child->parent;
+}
+
+static inline int get_lock_depth(struct lock_list *child)
+{
+	int depth = 0;
+	struct lock_list *parent;
+
+	while ((parent = get_lock_parent(child))) {
+		child = parent;
+		depth++;
+	}
+	return depth;
+}
+
 static int __bfs(struct lock_list *source_entry,
-			void *data,
-			int (*match)(struct lock_list *entry, void *data),
-			struct lock_list **target_entry,
-			int forward)
+		 void *data,
+		 int (*match)(struct lock_list *entry, void *data),
+		 struct lock_list **target_entry,
+		 int forward)
 {
 	struct lock_list *entry;
 	struct list_head *head;
@@ -1202,14 +1224,6 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
 
-
-#define   BFS_PROCESS_RET(ret)	do { \
-					if (ret < 0) \
-						return print_bfs_bug(ret); \
-					if (ret == 1) \
-						return 1; \
-				} while (0)
-
 static inline int usage_match(struct lock_list *entry, void *bit)
 {
 	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
@@ -1236,6 +1250,8 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
 	debug_atomic_inc(&nr_find_usage_forwards_checks);
 
 	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+	if (result < 0)
+		return print_bfs_bug(result);
 
 	return result;
 }
@@ -1259,10 +1275,42 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
 	debug_atomic_inc(&nr_find_usage_backwards_checks);
 
 	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
+	if (result < 0)
+		return print_bfs_bug(result);
 
 	return result;
 }
 
+/*
+ * printk the shortest lock dependencies from @start to @end in reverse order:
+ */
+static void __used
+print_shortest_lock_dependencies(struct lock_list *leaf,
+				struct lock_list *root)
+{
+	struct lock_list *entry = leaf;
+	int depth;
+
+	/*compute depth from generated tree by BFS*/
+	depth = get_lock_depth(leaf);
+
+	do {
+		print_lock_class_header(entry->class, depth);
+		printk("%*s ... acquired at:\n", depth, "");
+		print_stack_trace(&entry->trace, 2);
+		printk("\n");
+
+		if (depth == 0 && (entry != root)) {
+			printk("lockdep:%s bad BFS generated tree\n", __func__);
+			break;
+		}
+
+		entry = get_lock_parent(entry);
+		depth--;
+	} while (entry && (depth >= 0));
+
+	return;
+}
 
 static int
 print_bad_irq_dependency(struct task_struct *curr,
@@ -1349,12 +1397,14 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 
 	this.class = hlock_class(prev);
 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
-	BFS_PROCESS_RET(ret);
+	if (!ret || ret == 1)
+		return ret;
 
 	that.parent = NULL;
 	that.class = hlock_class(next);
 	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
-	BFS_PROCESS_RET(ret);
+	if (!ret || ret == 1)
+		return ret;
 
 	return print_bad_irq_dependency(curr, &this, &that,
 			target_entry, target_entry1,
@@ -2038,7 +2088,8 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 	root.parent = NULL;
 	root.class = hlock_class(this);
 	ret = find_usage_forwards(&root, bit, &target_entry);
-	BFS_PROCESS_RET(ret);
+	if (!ret || ret == 1)
+		return ret;
 
 	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
@@ -2059,7 +2110,8 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 	root.parent = NULL;
 	root.class = hlock_class(this);
 	ret = find_usage_backwards(&root, bit, &target_entry);
-	BFS_PROCESS_RET(ret);
+	if (!ret || ret == 1)
+		return ret;
 
 	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 6baa880..a2ee95a 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -91,6 +91,8 @@ extern unsigned int nr_process_chains;
 extern unsigned int max_lockdep_depth;
 extern unsigned int max_recursion_depth;
 
+extern unsigned int max_bfs_queue_depth;
+
 #ifdef CONFIG_PROVE_LOCKING
 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
@@ -136,98 +138,3 @@ extern atomic_t nr_find_usage_backwards_recursions;
 # define debug_atomic_dec(ptr)		do { } while (0)
 # define debug_atomic_read(ptr)		0
 #endif
-
-
-extern unsigned int max_bfs_queue_depth;
-extern unsigned long nr_list_entries;
-extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
-extern unsigned long bfs_accessed[];
-
-/*For good efficiency of modular, we use power of 2*/
-#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
-
-/* The circular_queue and helpers is used to implement the
- * breadth-first search(BFS)algorithem, by which we can build
- * the shortest path from the next lock to be acquired to the
- * previous held lock if there is a circular between them.
- * */
-struct circular_queue{
-	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
-	unsigned int  front, rear;
-};
-
-static inline void __cq_init(struct circular_queue *cq)
-{
-	cq->front = cq->rear = 0;
-	bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
-}
-
-static inline int __cq_empty(struct circular_queue *cq)
-{
-	return (cq->front == cq->rear);
-}
-
-static inline int __cq_full(struct circular_queue *cq)
-{
-	return ((cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1))  == cq->front;
-}
-
-static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
-{
-	if (__cq_full(cq))
-		return -1;
-
-	cq->element[cq->rear] = elem;
-	cq->rear = (cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
-	return 0;
-}
-
-static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
-{
-	if (__cq_empty(cq))
-		return -1;
-
-	*elem = cq->element[cq->front];
-	cq->front = (cq->front + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
-	return 0;
-}
-
-static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
-{
-	return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
-}
-
-static inline void mark_lock_accessed(struct lock_list *lock,
-					struct lock_list *parent)
-{
-	unsigned long nr;
-	nr = lock - list_entries;
-	WARN_ON(nr >= nr_list_entries);
-	lock->parent = parent;
-	set_bit(nr, bfs_accessed);
-}
-
-static inline unsigned long lock_accessed(struct lock_list *lock)
-{
-	unsigned long nr;
-	nr = lock - list_entries;
-	WARN_ON(nr >= nr_list_entries);
-	return test_bit(nr, bfs_accessed);
-}
-
-static inline struct lock_list *get_lock_parent(struct lock_list *child)
-{
-	return child->parent;
-}
-
-static inline int get_lock_depth(struct lock_list *child)
-{
-	int depth = 0;
-	struct lock_list *parent;
-
-	while ((parent = get_lock_parent(child))) {
-		child = parent;
-		depth++;
-	}
-	return depth;
-}
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [RESEND PATCH 11/11] kernel:lockdep:fix return value of check_usage*()
  2009-06-28 15:04                   ` [RESEND PATCH 10/11] BFS cleanup tom.leiming
@ 2009-06-28 15:04                     ` tom.leiming
  2009-07-18 14:24                     ` [tip:core/locking] lockdep: BFS cleanup tip-bot for Peter Zijlstra
  2009-08-02 13:03                     ` tip-bot for Peter Zijlstra
  2 siblings, 0 replies; 54+ messages in thread
From: tom.leiming @ 2009-06-28 15:04 UTC (permalink / raw)
  To: a.p.zijlstra; +Cc: linux-kernel, akpm, mingo, Ming Lei

From: Ming Lei <tom.leiming@gmail.com>

Return zero if BFS find the target, so fix return value of callers
of BFS.

The patch fixes the boot failure introduced by the patch:
	lockdep: clean up after BFS patches
.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
---
 kernel/lockdep.c |   20 ++++++++++++--------
 1 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 09a141f..48c3364 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1250,8 +1250,6 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
 	debug_atomic_inc(&nr_find_usage_forwards_checks);
 
 	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
-	if (result < 0)
-		return print_bfs_bug(result);
 
 	return result;
 }
@@ -1275,8 +1273,6 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
 	debug_atomic_inc(&nr_find_usage_backwards_checks);
 
 	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
-	if (result < 0)
-		return print_bfs_bug(result);
 
 	return result;
 }
@@ -1397,13 +1393,17 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 
 	this.class = hlock_class(prev);
 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
-	if (!ret || ret == 1)
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
 		return ret;
 
 	that.parent = NULL;
 	that.class = hlock_class(next);
 	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
-	if (!ret || ret == 1)
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
 		return ret;
 
 	return print_bad_irq_dependency(curr, &this, &that,
@@ -2088,7 +2088,9 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 	root.parent = NULL;
 	root.class = hlock_class(this);
 	ret = find_usage_forwards(&root, bit, &target_entry);
-	if (!ret || ret == 1)
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
 		return ret;
 
 	return print_irq_inversion_bug(curr, &root, target_entry,
@@ -2110,7 +2112,9 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 	root.parent = NULL;
 	root.class = hlock_class(this);
 	ret = find_usage_backwards(&root, bit, &target_entry);
-	if (!ret || ret == 1)
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
 		return ret;
 
 	return print_irq_inversion_bug(curr, &root, target_entry,
-- 
1.6.0.GIT


^ permalink raw reply related	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
  2009-06-28 15:04 [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS tom.leiming
  2009-06-28 15:04 ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle tom.leiming
@ 2009-07-11  0:43 ` Frederic Weisbecker
  2009-07-11  3:25   ` Ming Lei
  2009-07-13  9:16 ` Peter Zijlstra
  2 siblings, 1 reply; 54+ messages in thread
From: Frederic Weisbecker @ 2009-07-11  0:43 UTC (permalink / raw)
  To: tom.leiming; +Cc: a.p.zijlstra, linux-kernel, akpm, mingo

Hi,

On Sun, Jun 28, 2009 at 11:04:35PM +0800, tom.leiming@gmail.com wrote:
> Hi,Peter
> 
> Currently lockdep uses recursion DFS(depth-first search) algorithm to
> search target in checking lock circle(check_noncircular()),irq-safe
> -> irq-unsafe(check_irq_usage()) and irq inversion when adding a new
> lock dependency. This patches replace the current DFS with BFS, based on
> the following consideration:
> 
>     1,no loss of efficiency, no matter DFS or BFS, the running time
>     are O(V+E) (V is vertex count, and E is edge count of one
>     graph);
> 
>     2,BFS may be easily implemented by circular queue and consumes
>     much less kernel stack space than DFS for DFS is implemented by
>     recursion.



Looks like a valuable argument. check_noncircular() can be called
in very random places in the kernel where the stack may be
already deep, and this recursive DFS doesn't help there.



>     3,The shortest path can be obtained by BFS if the target is
>     found, but can't be got by DFS. By the shortest path, we can
>     shorten the lock dependency chain and help to troubleshoot lock
>     problem easier than before.


But there I don't understand your argument.
The shortest path finding doesn't seem to me a need.
Example:

Task 1 acquires: A B C
And Later:
Task 2 acquires: C B A

DFS will probably report a circular lock dependency
with A and C.
BFS will probably report a circular lock dependency
with B and C.

Which one is the most important? Both dependencies must be fixed
anyway. Once the developer will fix one of those, the remaining one
will be reported and so on...

Or am I missing something else?


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
  2009-07-11  0:43 ` [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS Frederic Weisbecker
@ 2009-07-11  3:25   ` Ming Lei
  2009-07-11 21:09     ` Frederic Weisbecker
  0 siblings, 1 reply; 54+ messages in thread
From: Ming Lei @ 2009-07-11  3:25 UTC (permalink / raw)
  To: Frederic Weisbecker; +Cc: a.p.zijlstra, linux-kernel, akpm, mingo

2009/7/11 Frederic Weisbecker <fweisbec@gmail.com>:
> Hi,
>
> On Sun, Jun 28, 2009 at 11:04:35PM +0800, tom.leiming@gmail.com wrote:
>> Hi,Peter
>>
>> Currently lockdep uses recursion DFS(depth-first search) algorithm to
>> search target in checking lock circle(check_noncircular()),irq-safe
>> -> irq-unsafe(check_irq_usage()) and irq inversion when adding a new
>> lock dependency. This patches replace the current DFS with BFS, based on
>> the following consideration:
>>
>>     1,no loss of efficiency, no matter DFS or BFS, the running time
>>     are O(V+E) (V is vertex count, and E is edge count of one
>>     graph);
>>
>>     2,BFS may be easily implemented by circular queue and consumes
>>     much less kernel stack space than DFS for DFS is implemented by
>>     recursion.
>
>
>
> Looks like a valuable argument. check_noncircular() can be called
> in very random places in the kernel where the stack may be
> already deep, and this recursive DFS doesn't help there.

Yes,  BFS uses the preallocated queue buffer as "stack" and removes
the recursive implementation of DFS, so does decrease kernel stack
consume
largely.

>From this point, BFS patch is valuable.

>
>
>
>>     3,The shortest path can be obtained by BFS if the target is
>>     found, but can't be got by DFS. By the shortest path, we can
>>     shorten the lock dependency chain and help to troubleshoot lock
>>     problem easier than before.
>
>
> But there I don't understand your argument.
> The shortest path finding doesn't seem to me a need.
> Example:
>
> Task 1 acquires: A B C
> And Later:
> Task 2 acquires: C B A
>
> DFS will probably report a circular lock dependency
> with A and C.
> BFS will probably report a circular lock dependency
> with B and C.
>
> Which one is the most important? Both dependencies must be fixed
> anyway. Once the developer will fix one of those, the remaining one
> will be reported and so on...
>
> Or am I missing something else?

Yes, you are right.  By BFS, we can always find the shortest circle, but we
find a random circle by DFS.   No one can say which circle is the most
important from the point of deadlock.

But it is easier to start troubleshooting from the shortest circle
than a random circle , then from the next shortest circle if other
circle still exists .

Right?

-- 
Lei Ming

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
  2009-07-11  3:25   ` Ming Lei
@ 2009-07-11 21:09     ` Frederic Weisbecker
  2009-07-12  2:29       ` Ming Lei
  0 siblings, 1 reply; 54+ messages in thread
From: Frederic Weisbecker @ 2009-07-11 21:09 UTC (permalink / raw)
  To: Ming Lei; +Cc: a.p.zijlstra, linux-kernel, akpm, mingo

On Sat, Jul 11, 2009 at 11:25:29AM +0800, Ming Lei wrote:
> 2009/7/11 Frederic Weisbecker <fweisbec@gmail.com>:
> > Hi,
> >
> > On Sun, Jun 28, 2009 at 11:04:35PM +0800, tom.leiming@gmail.com wrote:
> >> Hi,Peter
> >>
> >> Currently lockdep uses recursion DFS(depth-first search) algorithm to
> >> search target in checking lock circle(check_noncircular()),irq-safe
> >> -> irq-unsafe(check_irq_usage()) and irq inversion when adding a new
> >> lock dependency. This patches replace the current DFS with BFS, based on
> >> the following consideration:
> >>
> >>     1,no loss of efficiency, no matter DFS or BFS, the running time
> >>     are O(V+E) (V is vertex count, and E is edge count of one
> >>     graph);
> >>
> >>     2,BFS may be easily implemented by circular queue and consumes
> >>     much less kernel stack space than DFS for DFS is implemented by
> >>     recursion.
> >
> >
> >
> > Looks like a valuable argument. check_noncircular() can be called
> > in very random places in the kernel where the stack may be
> > already deep, and this recursive DFS doesn't help there.
> 
> Yes,  BFS uses the preallocated queue buffer as "stack" and removes
> the recursive implementation of DFS, so does decrease kernel stack
> consume
> largely.
> 
> From this point, BFS patch is valuable.


Right!

 
> >
> >
> >
> >>     3,The shortest path can be obtained by BFS if the target is
> >>     found, but can't be got by DFS. By the shortest path, we can
> >>     shorten the lock dependency chain and help to troubleshoot lock
> >>     problem easier than before.
> >
> >
> > But there I don't understand your argument.
> > The shortest path finding doesn't seem to me a need.
> > Example:
> >
> > Task 1 acquires: A B C
> > And Later:
> > Task 2 acquires: C B A
> >
> > DFS will probably report a circular lock dependency
> > with A and C.
> > BFS will probably report a circular lock dependency
> > with B and C.
> >
> > Which one is the most important? Both dependencies must be fixed
> > anyway. Once the developer will fix one of those, the remaining one
> > will be reported and so on...
> >
> > Or am I missing something else?
> 
> Yes, you are right.  By BFS, we can always find the shortest circle, but we
> find a random circle by DFS.   No one can say which circle is the most
> important from the point of deadlock.
> 
> But it is easier to start troubleshooting from the shortest circle
> than a random circle , then from the next shortest circle if other
> circle still exists .
> 
> Right?


I don't have a strong opinion on this. I just don't think the shortest path is
the most important if there are many many paths.
Whatever AB-BA is encountered, all of them must be fixed.
What might give a degree of importance for such bad circle is the window
in which it triggers.


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle
  2009-06-28 15:04 ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle tom.leiming
  2009-06-28 15:04   ` [RESEND PATCH 02/11] kernel:lockdep:improve implementation of BFS tom.leiming
@ 2009-07-11 21:30   ` Frederic Weisbecker
  2009-07-12  2:42     ` Ming Lei
  2009-07-13  9:01   ` Dave Young
                     ` (2 subsequent siblings)
  4 siblings, 1 reply; 54+ messages in thread
From: Frederic Weisbecker @ 2009-07-11 21:30 UTC (permalink / raw)
  To: tom.leiming; +Cc: a.p.zijlstra, linux-kernel, akpm, mingo

On Sun, Jun 28, 2009 at 11:04:36PM +0800, tom.leiming@gmail.com wrote:
> From: Ming Lei <tom.leiming@gmail.com>
> 
> Currently lockdep will print the 1st circle detected if it exists when
> acquiring a new (next) lock.
> 
> This patch prints the shortest path from the next lock to be acquired to
> the previous held lock if a circle is found.
> 
> The patch still uses the current method to check circle, and once the
> circle is found, breadth-first search algorithem is used to compute the
> shortest path from the next lock to the previous lock in the forward
> lock dependency graph.
> 
> Printing the shortest path will shorten the dependency chain, and make
> troubleshooting for possible circular locking easier.



Oh! That looks fairly different from what I was thinking about...

If I understand well, lets imagine the following:

Task 1 acquires: A B F
Task 2 acquires: A B C D E F
Task 3 acquires: F B

Before your patch, DFS would report the BF - FB wicked dependency
by reporting the Task 2 dependency snapshot, which is cluttered by
the other locks C, D and E (that would be reported in the dependency chain
if I'm not wrong) whereas BFS would be smarter by finding the shortest
snapshot found in Task 3: just F B.

Correct me if I misunderstand this patch.

I suggest you to provide an example along this patch, that would make
it easier to demonstrate its importance.

Thanks,
Frederic.


 
> Signed-off-by: Ming Lei <tom.leiming@gmail.com>
> ---
>  include/linux/lockdep.h    |    6 ++
>  kernel/lockdep.c           |  115 ++++++++++++++++++++++++++++++++++++++++----
>  kernel/lockdep_internals.h |   83 +++++++++++++++++++++++++++++++
>  3 files changed, 195 insertions(+), 9 deletions(-)
> 
> diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
> index b25d1b5..9ec026f 100644
> --- a/include/linux/lockdep.h
> +++ b/include/linux/lockdep.h
> @@ -149,6 +149,12 @@ struct lock_list {
>  	struct lock_class		*class;
>  	struct stack_trace		trace;
>  	int				distance;
> +
> +	/*The parent field is used to implement breadth-first search,and
> +	 *the bit 0 is reused to indicate if the lock has been accessed
> +	 *in BFS.
> +	 */
> +	struct lock_list		*parent;
>  };
>  
>  /*
> diff --git a/kernel/lockdep.c b/kernel/lockdep.c
> index 8bbeef9..93dc70d 100644
> --- a/kernel/lockdep.c
> +++ b/kernel/lockdep.c
> @@ -897,6 +897,79 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
>  	return 1;
>  }
>  
> +static struct circular_queue  lock_cq;
> +static int __search_shortest_path(struct lock_list *source_entry,
> +				struct lock_class *target,
> +				struct lock_list **target_entry,
> +				int forward)
> +{
> +	struct lock_list *entry;
> +	struct circular_queue *cq = &lock_cq;
> +	int ret = 1;
> +
> +	__cq_init(cq);
> +
> +	mark_lock_accessed(source_entry, NULL);
> +	if (source_entry->class == target) {
> +		*target_entry = source_entry;
> +		ret = 0;
> +		goto exit;
> +	}
> +
> +	__cq_enqueue(cq, (unsigned long)source_entry);
> +
> +	while (!__cq_empty(cq)) {
> +		struct lock_list *lock;
> +		struct list_head *head;
> +
> +		__cq_dequeue(cq, (unsigned long *)&lock);
> +
> +		if (!lock->class) {
> +			ret = -2;
> +			goto exit;
> +		}
> +
> +		if (forward)
> +			head = &lock->class->locks_after;
> +		else
> +			head = &lock->class->locks_before;
> +
> +		list_for_each_entry(entry, head, entry) {
> +			if (!lock_accessed(entry)) {
> +				mark_lock_accessed(entry, lock);
> +				if (entry->class == target) {
> +					*target_entry = entry;
> +					ret = 0;
> +					goto exit;
> +				}
> +
> +				if (__cq_enqueue(cq, (unsigned long)entry)) {
> +					ret = -1;
> +					goto exit;
> +				}
> +			}
> +		}
> +	}
> +exit:
> +	return ret;
> +}
> +
> +static inline int __search_forward_shortest_path(struct lock_list *src_entry,
> +				struct lock_class *target,
> +				struct lock_list **target_entry)
> +{
> +	return __search_shortest_path(src_entry, target, target_entry, 1);
> +
> +}
> +
> +static inline int __search_backward_shortest_path(struct lock_list *src_entry,
> +				struct lock_class *target,
> +				struct lock_list **target_entry)
> +{
> +	return __search_shortest_path(src_entry, target, target_entry, 0);
> +
> +}
> +
>  /*
>   * Recursive, forwards-direction lock-dependency checking, used for
>   * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
> @@ -934,7 +1007,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
>  {
>  	struct task_struct *curr = current;
>  
> -	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
> +	if (debug_locks_silent)
>  		return 0;
>  
>  	printk("\n=======================================================\n");
> @@ -954,19 +1027,41 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
>  	return 0;
>  }
>  
> -static noinline int print_circular_bug_tail(void)
> +static noinline int print_circular_bug(void)
>  {
>  	struct task_struct *curr = current;
>  	struct lock_list this;
> +	struct lock_list *target;
> +	struct lock_list *parent;
> +	int result;
> +	unsigned long depth;
>  
> -	if (debug_locks_silent)
> +	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
>  		return 0;
>  
>  	this.class = hlock_class(check_source);
>  	if (!save_trace(&this.trace))
>  		return 0;
>  
> -	print_circular_bug_entry(&this, 0);
> +	result = __search_forward_shortest_path(&this,
> +						hlock_class(check_target),
> +						&target);
> +	if (result) {
> +		printk("\n%s:search shortest path failed:%d\n", __func__,
> +			result);
> +		return 0;
> +	}
> +
> +	depth = get_lock_depth(target);
> +
> +	print_circular_bug_header(target, depth);
> +
> +	parent = get_lock_parent(target);
> +
> +	while (parent) {
> +		print_circular_bug_entry(parent, --depth);
> +		parent = get_lock_parent(parent);
> +	}
>  
>  	printk("\nother info that might help us debug this:\n\n");
>  	lockdep_print_held_locks(curr);
> @@ -1072,14 +1167,15 @@ check_noncircular(struct lock_class *source, unsigned int depth)
>  	 */
>  	list_for_each_entry(entry, &source->locks_after, entry) {
>  		if (entry->class == hlock_class(check_target))
> -			return print_circular_bug_header(entry, depth+1);
> +			return 2;
>  		debug_atomic_inc(&nr_cyclic_checks);
> -		if (!check_noncircular(entry->class, depth+1))
> -			return print_circular_bug_entry(entry, depth+1);
> +		if (check_noncircular(entry->class, depth+1) == 2)
> +			return 2;
>  	}
>  	return 1;
>  }
>  
> +
>  #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
>  /*
>   * Forwards and backwards subgraph searching, for the purposes of
> @@ -1484,8 +1580,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
>  	 */
>  	check_source = next;
>  	check_target = prev;
> -	if (!(check_noncircular(hlock_class(next), 0)))
> -		return print_circular_bug_tail();
> +	if (check_noncircular(hlock_class(next), 0) == 2)
> +		return print_circular_bug();
> +
>  
>  	if (!check_prev_add_irq(curr, prev, next))
>  		return 0;
> diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
> index 699a2ac..6f48d37 100644
> --- a/kernel/lockdep_internals.h
> +++ b/kernel/lockdep_internals.h
> @@ -136,3 +136,86 @@ extern atomic_t nr_find_usage_backwards_recursions;
>  # define debug_atomic_dec(ptr)		do { } while (0)
>  # define debug_atomic_read(ptr)		0
>  #endif
> +
> +/* The circular_queue and helpers is used to implement the
> + * breadth-first search(BFS)algorithem, by which we can build
> + * the shortest path from the next lock to be acquired to the
> + * previous held lock if there is a circular between them.
> + * */
> +#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
> +struct circular_queue{
> +	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
> +	unsigned int  front, rear;
> +};
> +
> +#define LOCK_ACCESSED 		1UL
> +#define LOCK_ACCESSED_MASK	(~LOCK_ACCESSED)
> +
> +static inline void __cq_init(struct circular_queue *cq)
> +{
> +	cq->front = cq->rear = 0;
> +}
> +
> +static inline int __cq_empty(struct circular_queue *cq)
> +{
> +	return (cq->front == cq->rear);
> +}
> +
> +static inline int __cq_full(struct circular_queue *cq)
> +{
> +	return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE)  == cq->front;
> +}
> +
> +static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
> +{
> +	if (__cq_full(cq))
> +		return -1;
> +
> +	cq->element[cq->rear] = elem;
> +	cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
> +	return 0;
> +}
> +
> +static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
> +{
> +	if (__cq_empty(cq))
> +		return -1;
> +
> +	*elem = cq->element[cq->front];
> +	cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
> +	return 0;
> +}
> +
> +static inline int __cq_get_elem_count(struct circular_queue *cq)
> +{
> +	return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
> +}
> +
> +static inline void mark_lock_accessed(struct lock_list *lock,
> +					struct lock_list *parent)
> +{
> +	lock->parent = (void *) parent + LOCK_ACCESSED;
> +}
> +
> +static inline unsigned long lock_accessed(struct lock_list *lock)
> +{
> +	return (unsigned long)lock->parent & LOCK_ACCESSED;
> +}
> +
> +static inline struct lock_list *get_lock_parent(struct lock_list *child)
> +{
> +	return (struct lock_list *)
> +		((unsigned long)child->parent & LOCK_ACCESSED_MASK);
> +}
> +
> +static inline unsigned long get_lock_depth(struct lock_list *child)
> +{
> +	unsigned long depth = 0;
> +	struct lock_list *parent;
> +
> +	while ((parent = get_lock_parent(child))) {
> +		child = parent;
> +		depth++;
> +	}
> +	return depth;
> +}
> -- 
> 1.6.0.GIT
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
  2009-07-11 21:09     ` Frederic Weisbecker
@ 2009-07-12  2:29       ` Ming Lei
  2009-07-13  7:02         ` Ingo Molnar
  0 siblings, 1 reply; 54+ messages in thread
From: Ming Lei @ 2009-07-12  2:29 UTC (permalink / raw)
  To: Frederic Weisbecker; +Cc: a.p.zijlstra, linux-kernel, akpm, mingo

2009/7/12 Frederic Weisbecker <fweisbec@gmail.com>:
> On Sat, Jul 11, 2009 at 11:25:29AM +0800, Ming Lei wrote:
>> 2009/7/11 Frederic Weisbecker <fweisbec@gmail.com>:

>> >>     3,The shortest path can be obtained by BFS if the target is
>> >>     found, but can't be got by DFS. By the shortest path, we can
>> >>     shorten the lock dependency chain and help to troubleshoot lock
>> >>     problem easier than before.
>> >
>> >
>> > But there I don't understand your argument.
>> > The shortest path finding doesn't seem to me a need.
>> > Example:
>> >
>> > Task 1 acquires: A B C
>> > And Later:
>> > Task 2 acquires: C B A
>> >
>> > DFS will probably report a circular lock dependency
>> > with A and C.
>> > BFS will probably report a circular lock dependency
>> > with B and C.
>> >
>> > Which one is the most important? Both dependencies must be fixed
>> > anyway. Once the developer will fix one of those, the remaining one
>> > will be reported and so on...
>> >
>> > Or am I missing something else?
>>
>> Yes, you are right.  By BFS, we can always find the shortest circle, but we
>> find a random circle by DFS.   No one can say which circle is the most
>> important from the point of deadlock.
>>
>> But it is easier to start troubleshooting from the shortest circle
>> than a random circle , then from the next shortest circle if other
>> circle still exists .
>>
>> Right?
>
>
> I don't have a strong opinion on this. I just don't think the shortest path is
> the most important if there are many many paths.
> Whatever AB-BA is encountered, all of them must be fixed.
> What might give a degree of importance for such bad circle is the window
> in which it triggers.

The shortest path is just a characteristic of BFS, and we do not need
to pay extra
work for it.  So finding the shortest circle have not any bad effect at least.

IMHO, troubleshooting from the shortest circle is easier than from other circle.

-- 
Lei Ming

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency  chain if finding a circle
  2009-07-11 21:30   ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle Frederic Weisbecker
@ 2009-07-12  2:42     ` Ming Lei
  2009-07-13  7:01       ` Ingo Molnar
  0 siblings, 1 reply; 54+ messages in thread
From: Ming Lei @ 2009-07-12  2:42 UTC (permalink / raw)
  To: Frederic Weisbecker; +Cc: a.p.zijlstra, linux-kernel, akpm, mingo

2009/7/12 Frederic Weisbecker <fweisbec@gmail.com>:
> On Sun, Jun 28, 2009 at 11:04:36PM +0800, tom.leiming@gmail.com wrote:
>> From: Ming Lei <tom.leiming@gmail.com>
>>
>> Currently lockdep will print the 1st circle detected if it exists when
>> acquiring a new (next) lock.
>>
>> This patch prints the shortest path from the next lock to be acquired to
>> the previous held lock if a circle is found.
>>
>> The patch still uses the current method to check circle, and once the
>> circle is found, breadth-first search algorithem is used to compute the
>> shortest path from the next lock to the previous lock in the forward
>> lock dependency graph.
>>
>> Printing the shortest path will shorten the dependency chain, and make
>> troubleshooting for possible circular locking easier.
>
>
>
> Oh! That looks fairly different from what I was thinking about...

If you continue to review the following patches, you will find it is the same
from what you are thinking about.

The patch is a middle patch and just a start point. It is  this patch that
checking circle by BFS comes to my mind.

Maybe I should rearrange the patch set, I really want to get more suggestions
and comments to do it.

Thanks for your review.

>
> If I understand well, lets imagine the following:
>
> Task 1 acquires: A B F
> Task 2 acquires: A B C D E F
> Task 3 acquires: F B
>
> Before your patch, DFS would report the BF - FB wicked dependency
> by reporting the Task 2 dependency snapshot, which is cluttered by
> the other locks C, D and E (that would be reported in the dependency chain
> if I'm not wrong) whereas BFS would be smarter by finding the shortest
> snapshot found in Task 3: just F B.
>
> Correct me if I misunderstand this patch.

You are correct, BFS will always find the shortest circle if there is circle.

>
> I suggest you to provide an example along this patch, that would make
> it easier to demonstrate its importance.

No, as you said, the shortest circle is not very important, and it is just a
byproduct and we have no reason to reject it.

-- 
Lei Ming

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle
  2009-07-12  2:42     ` Ming Lei
@ 2009-07-13  7:01       ` Ingo Molnar
  2009-07-13  9:14         ` Peter Zijlstra
  2009-07-13 13:51         ` Ming Lei
  0 siblings, 2 replies; 54+ messages in thread
From: Ingo Molnar @ 2009-07-13  7:01 UTC (permalink / raw)
  To: Ming Lei; +Cc: Frederic Weisbecker, a.p.zijlstra, linux-kernel, akpm


* Ming Lei <tom.leiming@gmail.com> wrote:

> > If I understand well, lets imagine the following:
> >
> > Task 1 acquires: A B F
> > Task 2 acquires: A B C D E F
> > Task 3 acquires: F B
> >
> > Before your patch, DFS would report the BF - FB wicked 
> > dependency by reporting the Task 2 dependency snapshot, which is 
> > cluttered by the other locks C, D and E (that would be reported 
> > in the dependency chain if I'm not wrong) whereas BFS would be 
> > smarter by finding the shortest snapshot found in Task 3: just F 
> > B.
> >
> > Correct me if I misunderstand this patch.
> 
> You are correct, BFS will always find the shortest circle if there 
> is circle.
> 
> > I suggest you to provide an example along this patch, that would 
> > make it easier to demonstrate its importance.
> 
> No, as you said, the shortest circle is not very important, and it 
> is just a byproduct and we have no reason to reject it.

It's a nice byproduct, beyond the primary advantage of not being a 
stack based recursion check.

I think this patch-set is great, and there's just one more step 
needed to make it round: it would be nice to remove the limitation 
of maximum number of locks held per task. (MAX_LOCK_DEPTH)

The way we could do it is to split out this bit of struct task:

#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
        u64 curr_chain_key;
        int lockdep_depth;
        unsigned int lockdep_recursion;
        struct held_lock held_locks[MAX_LOCK_DEPTH];
        gfp_t lockdep_reclaim_gfp;
#endif

into a separate 'struct lockdep_state' structure, and allocate it 
dynamically during fork with a initial pre-set size of say 64 locks 
depth. If we hit that limit, we'd double the allocation threshold, 
which would cause a larger structure to be allocated for all newly 
allocated tasks.

( This means that the task that hits this threshold needs to have
  lockdep disabled until it exits - but that's OK. )

	Ingo

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
  2009-07-12  2:29       ` Ming Lei
@ 2009-07-13  7:02         ` Ingo Molnar
  0 siblings, 0 replies; 54+ messages in thread
From: Ingo Molnar @ 2009-07-13  7:02 UTC (permalink / raw)
  To: Ming Lei; +Cc: Frederic Weisbecker, a.p.zijlstra, linux-kernel, akpm


* Ming Lei <tom.leiming@gmail.com> wrote:

> > I don't have a strong opinion on this. I just don't think the 
> > shortest path is the most important if there are many many 
> > paths. Whatever AB-BA is encountered, all of them must be fixed. 
> > What might give a degree of importance for such bad circle is 
> > the window in which it triggers.
> 
> The shortest path is just a characteristic of BFS, and we do not 
> need to pay extra work for it.  So finding the shortest circle 
> have not any bad effect at least.
> 
> IMHO, troubleshooting from the shortest circle is easier than from 
> other circle.

Agreed. Making lockdep reports more readable (i.e. smaller) is a 
very important goal.

Often the longer circles are just a side-effect of the shorter 
circles and fixing the short one will fix the large ones too. So 
printing the smallest is an advantage IMO.

	Ingo

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() by BFS
  2009-06-28 15:04       ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() by BFS tom.leiming
  2009-06-28 15:04         ` [RESEND PATCH 05/11] kernel:lockdep:implement find_usage_*wards " tom.leiming
@ 2009-07-13  8:02         ` Dave Young
  2009-07-13  8:08           ` Dave Young
  2009-07-18 14:23         ` [tip:core/locking] lockdep: Implement " tip-bot for Ming Lei
  2009-08-02 13:02         ` tip-bot for Ming Lei
  3 siblings, 1 reply; 54+ messages in thread
From: Dave Young @ 2009-07-13  8:02 UTC (permalink / raw)
  To: tom.leiming; +Cc: a.p.zijlstra, linux-kernel, akpm, mingo

On Sun, Jun 28, 2009 at 11:04:39PM +0800, tom.leiming@gmail.com wrote:
> From: Ming Lei <tom.leiming@gmail.com>
> 
> This patch uses BFS to implement check_noncircular() and
> prints the generated shortest circle if exists.
> 
> Signed-off-by: Ming Lei <tom.leiming@gmail.com>
> ---
>  kernel/lockdep.c |   89 ++++++++++++++++++++++-------------------------------
>  1 files changed, 37 insertions(+), 52 deletions(-)
> 
> diff --git a/kernel/lockdep.c b/kernel/lockdep.c
> index ce6d09e..f740088 100644
> --- a/kernel/lockdep.c
> +++ b/kernel/lockdep.c
> @@ -985,12 +985,7 @@ static inline int __bfs_backward(struct lock_list *src_entry,
>   * Recursive, forwards-direction lock-dependency checking, used for
>   * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
>   * checking.
> - *
> - * (to keep the stackframe of the recursive functions small we
> - *  use these global variables, and we also mark various helper
> - *  functions as noinline.)
>   */
> -static struct held_lock *check_source, *check_target;
>  
>  /*
>   * Print a dependency chain entry (this is only done when a deadlock
> @@ -1014,7 +1009,9 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
>   * header first:
>   */
>  static noinline int
> -print_circular_bug_header(struct lock_list *entry, unsigned int depth)
> +print_circular_bug_header(struct lock_list *entry, unsigned int depth,
> +			struct held_lock *check_src,
> +			struct held_lock *check_tgt)
>  {
>  	struct task_struct *curr = current;
>  
> @@ -1027,9 +1024,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
>  	printk(  "-------------------------------------------------------\n");
>  	printk("%s/%d is trying to acquire lock:\n",
>  		curr->comm, task_pid_nr(curr));
> -	print_lock(check_source);
> +	print_lock(check_src);
>  	printk("\nbut task is already holding lock:\n");
> -	print_lock(check_target);
> +	print_lock(check_tgt);
>  	printk("\nwhich lock already depends on the new lock.\n\n");
>  	printk("\nthe existing dependency chain (in reverse order) is:\n");
>  
> @@ -1043,36 +1040,24 @@ static inline int class_equal(struct lock_list *entry, void *data)
>  	return entry->class == data;
>  }
>  
> -static noinline int print_circular_bug(void)
> +static noinline int print_circular_bug(struct lock_list *this,

Shouldn't be 'static noinline int' here? Same question to following ones.

> +				struct lock_list *target,
> +				struct held_lock *check_src,
> +				struct held_lock *check_tgt)
>  {
>  	struct task_struct *curr = current;
> -	struct lock_list this;
> -	struct lock_list *target;
>  	struct lock_list *parent;
> -	int result;
>  	unsigned long depth;
>  
>  	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
>  		return 0;
>  
> -	this.class = hlock_class(check_source);
> -	this.parent = NULL;
> -	if (!save_trace(&this.trace))
> +	if (!save_trace(&this->trace))
>  		return 0;
>  
> -	result = __bfs_forward(&this,
> -			hlock_class(check_target),
> -			class_equal,
> -			&target);
> -	if (result) {
> -		printk("\n%s:search shortest path failed:%d\n", __func__,
> -			result);
> -		return 0;
> -	}
> -
>  	depth = get_lock_depth(target);
>  
> -	print_circular_bug_header(target, depth);
> +	print_circular_bug_header(target, depth, check_src, check_tgt);
>  
>  	parent = get_lock_parent(target);
>  
> @@ -1090,6 +1075,16 @@ static noinline int print_circular_bug(void)
>  	return 0;
>  }
>  
> +static int noinline print_bfs_bug(int ret)
> +{
> +	if (!debug_locks_off_graph_unlock())
> +		return 0;
> +
> +	WARN(1, "lockdep bfs error:%d\n", ret);
> +
> +	return 0;
> +}
> +
>  #define RECURSION_LIMIT 40
>  
>  static int noinline print_infinite_recursion_bug(void)
> @@ -1168,31 +1163,17 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
>   * lead to <target>. Print an error and return 0 if it does.
>   */
>  static noinline int
> -check_noncircular(struct lock_class *source, unsigned int depth)
> +check_noncircular(struct lock_list *root, struct lock_class *target,
> +		struct lock_list **target_entry)
>  {
> -	struct lock_list *entry;
> +	int result;
>  
> -	if (lockdep_dependency_visit(source, depth))
> -		return 1;
> +	debug_atomic_inc(&nr_cyclic_checks);
>  
> -	debug_atomic_inc(&nr_cyclic_check_recursions);
> -	if (depth > max_recursion_depth)
> -		max_recursion_depth = depth;
> -	if (depth >= RECURSION_LIMIT)
> -		return print_infinite_recursion_bug();
> -	/*
> -	 * Check this lock's dependency list:
> -	 */
> -	list_for_each_entry(entry, &source->locks_after, entry) {
> -		if (entry->class == hlock_class(check_target))
> -			return 2;
> -		debug_atomic_inc(&nr_cyclic_checks);
> -		if (check_noncircular(entry->class, depth+1) == 2)
> -			return 2;
> -	}
> -	return 1;
> -}
> +	result = __bfs_forward(root, target, class_equal, target_entry);
>  
> +	return result;
> +}
>  
>  #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
>  /*
> @@ -1586,6 +1567,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
>  {
>  	struct lock_list *entry;
>  	int ret;
> +	struct lock_list this;
> +	struct lock_list *uninitialized_var(target_entry);
>  
>  	/*
>  	 * Prove that the new <prev> -> <next> dependency would not
> @@ -1596,11 +1579,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
>  	 * We are using global variables to control the recursion, to
>  	 * keep the stackframe size of the recursive functions low:
>  	 */
> -	check_source = next;
> -	check_target = prev;
> -
> -	if (check_noncircular(hlock_class(next), 0) == 2)
> -		return print_circular_bug();
> +	this.class = hlock_class(next);
> +	this.parent = NULL;
> +	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
> +	if (unlikely(!ret))
> +		return print_circular_bug(&this, target_entry, next, prev);
> +	else if (unlikely(ret < 0))
> +		return print_bfs_bug(ret);
>  
>  	if (!check_prev_add_irq(curr, prev, next))
>  		return 0;
> -- 
> 1.6.0.GIT
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular()  by BFS
  2009-07-13  8:02         ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() " Dave Young
@ 2009-07-13  8:08           ` Dave Young
  2009-07-21  3:33             ` Ming Lei
  0 siblings, 1 reply; 54+ messages in thread
From: Dave Young @ 2009-07-13  8:08 UTC (permalink / raw)
  To: tom.leiming; +Cc: a.p.zijlstra, linux-kernel, akpm, mingo

On Mon, Jul 13, 2009 at 4:02 PM, Dave Young<hidave.darkstar@gmail.com> wrote:
> On Sun, Jun 28, 2009 at 11:04:39PM +0800, tom.leiming@gmail.com wrote:
>> From: Ming Lei <tom.leiming@gmail.com>
>>
>> This patch uses BFS to implement check_noncircular() and
>> prints the generated shortest circle if exists.
>>
>> Signed-off-by: Ming Lei <tom.leiming@gmail.com>
>> ---
>>  kernel/lockdep.c |   89 ++++++++++++++++++++++-------------------------------
>>  1 files changed, 37 insertions(+), 52 deletions(-)
>>
>> diff --git a/kernel/lockdep.c b/kernel/lockdep.c
>> index ce6d09e..f740088 100644
>> --- a/kernel/lockdep.c
>> +++ b/kernel/lockdep.c
>> @@ -985,12 +985,7 @@ static inline int __bfs_backward(struct lock_list *src_entry,
>>   * Recursive, forwards-direction lock-dependency checking, used for
>>   * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
>>   * checking.
>> - *
>> - * (to keep the stackframe of the recursive functions small we
>> - *  use these global variables, and we also mark various helper
>> - *  functions as noinline.)
>>   */
>> -static struct held_lock *check_source, *check_target;
>>
>>  /*
>>   * Print a dependency chain entry (this is only done when a deadlock
>> @@ -1014,7 +1009,9 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
>>   * header first:
>>   */
>>  static noinline int
>> -print_circular_bug_header(struct lock_list *entry, unsigned int depth)
>> +print_circular_bug_header(struct lock_list *entry, unsigned int depth,
>> +                     struct held_lock *check_src,
>> +                     struct held_lock *check_tgt)
>>  {
>>       struct task_struct *curr = current;
>>
>> @@ -1027,9 +1024,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
>>       printk(  "-------------------------------------------------------\n");
>>       printk("%s/%d is trying to acquire lock:\n",
>>               curr->comm, task_pid_nr(curr));
>> -     print_lock(check_source);
>> +     print_lock(check_src);
>>       printk("\nbut task is already holding lock:\n");
>> -     print_lock(check_target);
>> +     print_lock(check_tgt);
>>       printk("\nwhich lock already depends on the new lock.\n\n");
>>       printk("\nthe existing dependency chain (in reverse order) is:\n");
>>
>> @@ -1043,36 +1040,24 @@ static inline int class_equal(struct lock_list *entry, void *data)
>>       return entry->class == data;
>>  }
>>
>> -static noinline int print_circular_bug(void)
>> +static noinline int print_circular_bug(struct lock_list *this,
>
> Shouldn't be 'static noinline int' here? Same question to following ones.
>

Sorry for pointing to wrong place. please grep to find them:

161:+static int noinline print_bfs_bug(int ret)
173: static int noinline print_infinite_recursion_bug(void)

-- 
Regards
dave

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle
  2009-06-28 15:04 ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle tom.leiming
  2009-06-28 15:04   ` [RESEND PATCH 02/11] kernel:lockdep:improve implementation of BFS tom.leiming
  2009-07-11 21:30   ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle Frederic Weisbecker
@ 2009-07-13  9:01   ` Dave Young
  2009-07-18 14:23   ` [tip:core/locking] lockdep: Print " tip-bot for Ming Lei
  2009-08-02 13:01   ` tip-bot for Ming Lei
  4 siblings, 0 replies; 54+ messages in thread
From: Dave Young @ 2009-07-13  9:01 UTC (permalink / raw)
  To: tom.leiming; +Cc: a.p.zijlstra, linux-kernel, akpm, mingo

On Sun, Jun 28, 2009 at 11:04:36PM +0800, tom.leiming@gmail.com wrote:
> From: Ming Lei <tom.leiming@gmail.com>
> 
> Currently lockdep will print the 1st circle detected if it exists when
> acquiring a new (next) lock.
> 
> This patch prints the shortest path from the next lock to be acquired to
> the previous held lock if a circle is found.
> 
> The patch still uses the current method to check circle, and once the
> circle is found, breadth-first search algorithem is used to compute the
> shortest path from the next lock to the previous lock in the forward
> lock dependency graph.
> 
> Printing the shortest path will shorten the dependency chain, and make
> troubleshooting for possible circular locking easier.
> 
> Signed-off-by: Ming Lei <tom.leiming@gmail.com>
> ---
>  include/linux/lockdep.h    |    6 ++
>  kernel/lockdep.c           |  115 ++++++++++++++++++++++++++++++++++++++++----
>  kernel/lockdep_internals.h |   83 +++++++++++++++++++++++++++++++
>  3 files changed, 195 insertions(+), 9 deletions(-)

Style issue, spaces missing around operators somewhere. Seems checkpatch.pl did not warn about it, checkpatch.pl bug?

> 
> diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
> index b25d1b5..9ec026f 100644
> --- a/include/linux/lockdep.h
> +++ b/include/linux/lockdep.h
> @@ -149,6 +149,12 @@ struct lock_list {
>  	struct lock_class		*class;
>  	struct stack_trace		trace;
>  	int				distance;
> +
> +	/*The parent field is used to implement breadth-first search,and
> +	 *the bit 0 is reused to indicate if the lock has been accessed
> +	 *in BFS.
> +	 */
> +	struct lock_list		*parent;
>  };
>  
>  /*
> diff --git a/kernel/lockdep.c b/kernel/lockdep.c
> index 8bbeef9..93dc70d 100644
> --- a/kernel/lockdep.c
> +++ b/kernel/lockdep.c
> @@ -897,6 +897,79 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
>  	return 1;
>  }
>  
> +static struct circular_queue  lock_cq;
> +static int __search_shortest_path(struct lock_list *source_entry,
> +				struct lock_class *target,
> +				struct lock_list **target_entry,
> +				int forward)
> +{
> +	struct lock_list *entry;
> +	struct circular_queue *cq = &lock_cq;
> +	int ret = 1;
> +
> +	__cq_init(cq);
> +
> +	mark_lock_accessed(source_entry, NULL);
> +	if (source_entry->class == target) {
> +		*target_entry = source_entry;
> +		ret = 0;
> +		goto exit;
> +	}
> +
> +	__cq_enqueue(cq, (unsigned long)source_entry);
> +
> +	while (!__cq_empty(cq)) {
> +		struct lock_list *lock;
> +		struct list_head *head;
> +
> +		__cq_dequeue(cq, (unsigned long *)&lock);
> +
> +		if (!lock->class) {
> +			ret = -2;
> +			goto exit;
> +		}
> +
> +		if (forward)
> +			head = &lock->class->locks_after;
> +		else
> +			head = &lock->class->locks_before;
> +
> +		list_for_each_entry(entry, head, entry) {
> +			if (!lock_accessed(entry)) {
> +				mark_lock_accessed(entry, lock);
> +				if (entry->class == target) {
> +					*target_entry = entry;
> +					ret = 0;
> +					goto exit;
> +				}
> +
> +				if (__cq_enqueue(cq, (unsigned long)entry)) {
> +					ret = -1;
> +					goto exit;
> +				}
> +			}
> +		}
> +	}
> +exit:
> +	return ret;
> +}
> +
> +static inline int __search_forward_shortest_path(struct lock_list *src_entry,
> +				struct lock_class *target,
> +				struct lock_list **target_entry)
> +{
> +	return __search_shortest_path(src_entry, target, target_entry, 1);
> +
> +}
> +
> +static inline int __search_backward_shortest_path(struct lock_list *src_entry,
> +				struct lock_class *target,
> +				struct lock_list **target_entry)
> +{
> +	return __search_shortest_path(src_entry, target, target_entry, 0);
> +
> +}
> +
>  /*
>   * Recursive, forwards-direction lock-dependency checking, used for
>   * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
> @@ -934,7 +1007,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
>  {
>  	struct task_struct *curr = current;
>  
> -	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
> +	if (debug_locks_silent)
>  		return 0;
>  
>  	printk("\n=======================================================\n");
> @@ -954,19 +1027,41 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
>  	return 0;
>  }
>  
> -static noinline int print_circular_bug_tail(void)
> +static noinline int print_circular_bug(void)
>  {
>  	struct task_struct *curr = current;
>  	struct lock_list this;
> +	struct lock_list *target;
> +	struct lock_list *parent;
> +	int result;
> +	unsigned long depth;
>  
> -	if (debug_locks_silent)
> +	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
>  		return 0;
>  
>  	this.class = hlock_class(check_source);
>  	if (!save_trace(&this.trace))
>  		return 0;
>  
> -	print_circular_bug_entry(&this, 0);
> +	result = __search_forward_shortest_path(&this,
> +						hlock_class(check_target),
> +						&target);
> +	if (result) {
> +		printk("\n%s:search shortest path failed:%d\n", __func__,
> +			result);
> +		return 0;
> +	}
> +
> +	depth = get_lock_depth(target);
> +
> +	print_circular_bug_header(target, depth);
> +
> +	parent = get_lock_parent(target);
> +
> +	while (parent) {
> +		print_circular_bug_entry(parent, --depth);
> +		parent = get_lock_parent(parent);
> +	}
>  
>  	printk("\nother info that might help us debug this:\n\n");
>  	lockdep_print_held_locks(curr);
> @@ -1072,14 +1167,15 @@ check_noncircular(struct lock_class *source, unsigned int depth)
>  	 */
>  	list_for_each_entry(entry, &source->locks_after, entry) {
>  		if (entry->class == hlock_class(check_target))
> -			return print_circular_bug_header(entry, depth+1);
> +			return 2;
>  		debug_atomic_inc(&nr_cyclic_checks);
> -		if (!check_noncircular(entry->class, depth+1))
> -			return print_circular_bug_entry(entry, depth+1);
> +		if (check_noncircular(entry->class, depth+1) == 2)

Here

> +			return 2;
>  	}
>  	return 1;
>  }
>  
> +
>  #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
>  /*
>   * Forwards and backwards subgraph searching, for the purposes of
> @@ -1484,8 +1580,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
>  	 */
>  	check_source = next;
>  	check_target = prev;
> -	if (!(check_noncircular(hlock_class(next), 0)))
> -		return print_circular_bug_tail();
> +	if (check_noncircular(hlock_class(next), 0) == 2)
> +		return print_circular_bug();
> +
>  
>  	if (!check_prev_add_irq(curr, prev, next))
>  		return 0;
> diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
> index 699a2ac..6f48d37 100644
> --- a/kernel/lockdep_internals.h
> +++ b/kernel/lockdep_internals.h
> @@ -136,3 +136,86 @@ extern atomic_t nr_find_usage_backwards_recursions;
>  # define debug_atomic_dec(ptr)		do { } while (0)
>  # define debug_atomic_read(ptr)		0
>  #endif
> +
> +/* The circular_queue and helpers is used to implement the
> + * breadth-first search(BFS)algorithem, by which we can build
> + * the shortest path from the next lock to be acquired to the
> + * previous held lock if there is a circular between them.
> + * */
> +#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
> +struct circular_queue{
> +	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
> +	unsigned int  front, rear;
> +};
> +
> +#define LOCK_ACCESSED 		1UL
> +#define LOCK_ACCESSED_MASK	(~LOCK_ACCESSED)
> +
> +static inline void __cq_init(struct circular_queue *cq)
> +{
> +	cq->front = cq->rear = 0;
> +}
> +
> +static inline int __cq_empty(struct circular_queue *cq)
> +{
> +	return (cq->front == cq->rear);
> +}
> +
> +static inline int __cq_full(struct circular_queue *cq)
> +{
> +	return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE)  == cq->front;

And here

> +}
> +
> +static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
> +{
> +	if (__cq_full(cq))
> +		return -1;
> +
> +	cq->element[cq->rear] = elem;
> +	cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;

And here

> +	return 0;
> +}
> +
> +static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
> +{
> +	if (__cq_empty(cq))
> +		return -1;
> +
> +	*elem = cq->element[cq->front];
> +	cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;

And here

> +	return 0;
> +}
> +
> +static inline int __cq_get_elem_count(struct circular_queue *cq)
> +{
> +	return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;

And here

> +}
> +
> +static inline void mark_lock_accessed(struct lock_list *lock,
> +					struct lock_list *parent)
> +{
> +	lock->parent = (void *) parent + LOCK_ACCESSED;

(void *)parent

> +}
> +
> +static inline unsigned long lock_accessed(struct lock_list *lock)
> +{
> +	return (unsigned long)lock->parent & LOCK_ACCESSED;
> +}
> +
> +static inline struct lock_list *get_lock_parent(struct lock_list *child)
> +{
> +	return (struct lock_list *)
> +		((unsigned long)child->parent & LOCK_ACCESSED_MASK);
> +}
> +
> +static inline unsigned long get_lock_depth(struct lock_list *child)
> +{
> +	unsigned long depth = 0;
> +	struct lock_list *parent;
> +
> +	while ((parent = get_lock_parent(child))) {
> +		child = parent;
> +		depth++;
> +	}
> +	return depth;
> +}
> -- 
> 1.6.0.GIT
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle
  2009-07-13  7:01       ` Ingo Molnar
@ 2009-07-13  9:14         ` Peter Zijlstra
  2009-07-13 13:56           ` Ming Lei
  2009-07-13 13:51         ` Ming Lei
  1 sibling, 1 reply; 54+ messages in thread
From: Peter Zijlstra @ 2009-07-13  9:14 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: Ming Lei, Frederic Weisbecker, linux-kernel, akpm

On Mon, 2009-07-13 at 09:01 +0200, Ingo Molnar wrote:
> 
> It's a nice byproduct, beyond the primary advantage of not being a 
> stack based recursion check.
> 
> I think this patch-set is great, and there's just one more step 
> needed to make it round: it would be nice to remove the limitation 
> of maximum number of locks held per task. (MAX_LOCK_DEPTH)
> 
> The way we could do it is to split out this bit of struct task:
> 
> #ifdef CONFIG_LOCKDEP
> # define MAX_LOCK_DEPTH 48UL
>         u64 curr_chain_key;
>         int lockdep_depth;
>         unsigned int lockdep_recursion;
>         struct held_lock held_locks[MAX_LOCK_DEPTH];
>         gfp_t lockdep_reclaim_gfp;
> #endif
> 
> into a separate 'struct lockdep_state' structure, and allocate it 
> dynamically during fork with a initial pre-set size of say 64 locks 
> depth. If we hit that limit, we'd double the allocation threshold, 
> which would cause a larger structure to be allocated for all newly 
> allocated tasks.

Right, except allocating stuff while in the middle of lockdep is very
hard since it involves taking more locks :-)

I've tried it several times but never quite managed it in a way that I
felt comfortable with.

It would require having a reserve and serializing over that reserve.


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
  2009-06-28 15:04 [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS tom.leiming
  2009-06-28 15:04 ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle tom.leiming
  2009-07-11  0:43 ` [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS Frederic Weisbecker
@ 2009-07-13  9:16 ` Peter Zijlstra
  2009-07-16  4:39   ` Ming Lei
  2 siblings, 1 reply; 54+ messages in thread
From: Peter Zijlstra @ 2009-07-13  9:16 UTC (permalink / raw)
  To: tom.leiming; +Cc: linux-kernel, akpm, mingo

On Sun, 2009-06-28 at 23:04 +0800, tom.leiming@gmail.com wrote:
> Hi,Peter
> 
> Currently lockdep uses recursion DFS(depth-first search) algorithm to
> search target in checking lock circle(check_noncircular()),irq-safe
> -> irq-unsafe(check_irq_usage()) and irq inversion when adding a new
> lock dependency. This patches replace the current DFS with BFS, based on
> the following consideration:
> 
>     1,no loss of efficiency, no matter DFS or BFS, the running time
>     are O(V+E) (V is vertex count, and E is edge count of one
>     graph);

I'd still like to get some feedback on the loss of that generation count
optimization done by DaveM, I haven't had time to analyze the
ramifications of that.


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency  chain if finding a circle
  2009-07-13  7:01       ` Ingo Molnar
  2009-07-13  9:14         ` Peter Zijlstra
@ 2009-07-13 13:51         ` Ming Lei
  1 sibling, 0 replies; 54+ messages in thread
From: Ming Lei @ 2009-07-13 13:51 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: Frederic Weisbecker, a.p.zijlstra, linux-kernel, akpm

2009/7/13 Ingo Molnar <mingo@elte.hu>:
>
> * Ming Lei <tom.leiming@gmail.com> wrote:
>> No, as you said, the shortest circle is not very important, and it
>> is just a byproduct and we have no reason to reject it.
>
> It's a nice byproduct, beyond the primary advantage of not being a
> stack based recursion check.
>
> I think this patch-set is great, and there's just one more step
> needed to make it round: it would be nice to remove the limitation
> of maximum number of locks held per task. (MAX_LOCK_DEPTH)

IMHO,  it seems that removing the MAX_LOCK_DEPTH limitation is another
topic,  and is nothing to do with this patch-set.  We may still make it round
with the limitation like before.

Can we plan to remove the limitation of MAX_LOCK_DEPTH after merging
the patch-set?

Thanks.

>
> The way we could do it is to split out this bit of struct task:
>
> #ifdef CONFIG_LOCKDEP
> # define MAX_LOCK_DEPTH 48UL
>        u64 curr_chain_key;
>        int lockdep_depth;
>        unsigned int lockdep_recursion;
>        struct held_lock held_locks[MAX_LOCK_DEPTH];
>        gfp_t lockdep_reclaim_gfp;
> #endif
>
> into a separate 'struct lockdep_state' structure, and allocate it
> dynamically during fork with a initial pre-set size of say 64 locks
> depth. If we hit that limit, we'd double the allocation threshold,
> which would cause a larger structure to be allocated for all newly
> allocated tasks.
>
> ( This means that the task that hits this threshold needs to have
>  lockdep disabled until it exits - but that's OK. )
>
>        Ingo
>

-- 
Lei Ming

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency  chain if finding a circle
  2009-07-13  9:14         ` Peter Zijlstra
@ 2009-07-13 13:56           ` Ming Lei
  0 siblings, 0 replies; 54+ messages in thread
From: Ming Lei @ 2009-07-13 13:56 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Ingo Molnar, Frederic Weisbecker, linux-kernel, akpm

2009/7/13 Peter Zijlstra <a.p.zijlstra@chello.nl>:
> On Mon, 2009-07-13 at 09:01 +0200, Ingo Molnar wrote:
>>
>> It's a nice byproduct, beyond the primary advantage of not being a
>> stack based recursion check.
>>
>> I think this patch-set is great, and there's just one more step
>> needed to make it round: it would be nice to remove the limitation
>> of maximum number of locks held per task. (MAX_LOCK_DEPTH)
>>
>> The way we could do it is to split out this bit of struct task:
>>
>> #ifdef CONFIG_LOCKDEP
>> # define MAX_LOCK_DEPTH 48UL
>>         u64 curr_chain_key;
>>         int lockdep_depth;
>>         unsigned int lockdep_recursion;
>>         struct held_lock held_locks[MAX_LOCK_DEPTH];
>>         gfp_t lockdep_reclaim_gfp;
>> #endif
>>
>> into a separate 'struct lockdep_state' structure, and allocate it
>> dynamically during fork with a initial pre-set size of say 64 locks
>> depth. If we hit that limit, we'd double the allocation threshold,
>> which would cause a larger structure to be allocated for all newly
>> allocated tasks.
>
> Right, except allocating stuff while in the middle of lockdep is very
> hard since it involves taking more locks :-)

Yes, it is a little dangerous to allocating memory dynamically in lockdep.

>
> I've tried it several times but never quite managed it in a way that I
> felt comfortable with.
>
> It would require having a reserve and serializing over that reserve.

Yes, good idea.


-- 
Lei Ming

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
  2009-07-13  9:16 ` Peter Zijlstra
@ 2009-07-16  4:39   ` Ming Lei
  2009-07-16  5:22     ` Peter Zijlstra
  0 siblings, 1 reply; 54+ messages in thread
From: Ming Lei @ 2009-07-16  4:39 UTC (permalink / raw)
  To: Peter Zijlstra, mingo
  Cc: linux-kernel, akpm, Linus Torvalds, David S. Miller, Frederic Weisbecker

2009/7/13 Peter Zijlstra <a.p.zijlstra@chello.nl>:
> On Sun, 2009-06-28 at 23:04 +0800, tom.leiming@gmail.com wrote:
>> Hi,Peter
>>
>> Currently lockdep uses recursion DFS(depth-first search) algorithm to
>> search target in checking lock circle(check_noncircular()),irq-safe
>> -> irq-unsafe(check_irq_usage()) and irq inversion when adding a new
>> lock dependency. This patches replace the current DFS with BFS, based on
>> the following consideration:
>>
>>     1,no loss of efficiency, no matter DFS or BFS, the running time
>>     are O(V+E) (V is vertex count, and E is edge count of one
>>     graph);
>
> I'd still like to get some feedback on the loss of that generation count
> optimization done by DaveM, I haven't had time to analyze the
> ramifications of that.
>
>

Hi, Ingo and Peter

As you said, BFS patch-set is valuable(decrease kernel stack consume
largely, and report
the shortest circle).

It has been pending on lkml silently for several monthes, and I hope
it may be merged to
tip-tree or other tree. I don't know what we should and can do to make
it being accepted by tip-tree.
Anyway, I'd like to do any fixes for it to be merged.

Any suggestions, ideas or objections?

Thanks.

-- 
Lei Ming

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
  2009-07-16  4:39   ` Ming Lei
@ 2009-07-16  5:22     ` Peter Zijlstra
  2009-07-16  7:12       ` Ming Lei
  0 siblings, 1 reply; 54+ messages in thread
From: Peter Zijlstra @ 2009-07-16  5:22 UTC (permalink / raw)
  To: Ming Lei
  Cc: mingo, linux-kernel, akpm, Linus Torvalds, David S. Miller,
	Frederic Weisbecker

On Thu, 2009-07-16 at 12:39 +0800, Ming Lei wrote:
> 2009/7/13 Peter Zijlstra <a.p.zijlstra@chello.nl>:
> > On Sun, 2009-06-28 at 23:04 +0800, tom.leiming@gmail.com wrote:
> >> Hi,Peter
> >>
> >> Currently lockdep uses recursion DFS(depth-first search) algorithm to
> >> search target in checking lock circle(check_noncircular()),irq-safe
> >> -> irq-unsafe(check_irq_usage()) and irq inversion when adding a new
> >> lock dependency. This patches replace the current DFS with BFS, based on
> >> the following consideration:
> >>
> >>     1,no loss of efficiency, no matter DFS or BFS, the running time
> >>     are O(V+E) (V is vertex count, and E is edge count of one
> >>     graph);
> >
> > I'd still like to get some feedback on the loss of that generation count
> > optimization done by DaveM, I haven't had time to analyze the
> > ramifications of that.
> >
> >
> 
> Hi, Ingo and Peter
> 
> As you said, BFS patch-set is valuable(decrease kernel stack consume
> largely, and report
> the shortest circle).
> 
> It has been pending on lkml silently for several monthes, and I hope
> it may be merged to
> tip-tree or other tree. I don't know what we should and can do to make
> it being accepted by tip-tree.
> Anyway, I'd like to do any fixes for it to be merged.
> 
> Any suggestions, ideas or objections?

I've asked several times to comment on the removal of that generation
count DaveM did. Is that a normal part of DFS, or was that an
optimization on top particular to this problem, can something similar be
done for BFS, etc.

Other than that it does seem to hold up, I've got the patches running on
my laptop. Don't worry they're not getting lost.


^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
  2009-07-16  5:22     ` Peter Zijlstra
@ 2009-07-16  7:12       ` Ming Lei
  2009-07-16  9:54         ` Peter Zijlstra
  0 siblings, 1 reply; 54+ messages in thread
From: Ming Lei @ 2009-07-16  7:12 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: mingo, linux-kernel, akpm, Linus Torvalds, David S. Miller,
	Frederic Weisbecker

2009/7/16 Peter Zijlstra <a.p.zijlstra@chello.nl>:
>
> I've asked several times to comment on the removal of that generation
> count DaveM did. Is that a normal part of DFS, or was that an
> optimization on top particular to this problem, can something similar be
> done for BFS, etc.

DFS uses generation count DaveM did to decide if a class is visted, and BFS
uses bitmap to mark a class is visted or not and the extra efficiency loss is

          bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);

before staring each BFS.

On most of ARCH, bimap_zero can be optimizied to only consume very few
instructions,
can't it?

It seems bitmap is more easily used in BFS than generation count.

>
> Other than that it does seem to hold up, I've got the patches running on
> my laptop. Don't worry they're not getting lost.

Great.

-- 
Lei Ming

^ permalink raw reply	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS
  2009-07-16  7:12       ` Ming Lei
@ 2009-07-16  9:54         ` Peter Zijlstra
  0 siblings, 0 replies; 54+ messages in thread
From: Peter Zijlstra @ 2009-07-16  9:54 UTC (permalink / raw)
  To: Ming Lei
  Cc: mingo, linux-kernel, akpm, Linus Torvalds, David S. Miller,
	Frederic Weisbecker

On Thu, 2009-07-16 at 15:12 +0800, Ming Lei wrote:
> 2009/7/16 Peter Zijlstra <a.p.zijlstra@chello.nl>:
> >
> > I've asked several times to comment on the removal of that generation
> > count DaveM did. Is that a normal part of DFS, or was that an
> > optimization on top particular to this problem, can something similar be
> > done for BFS, etc.
> 
> DFS uses generation count DaveM did to decide if a class is visted, and BFS
> uses bitmap to mark a class is visted or not and the extra efficiency loss is
> 
>           bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
> 
> before staring each BFS.
> 
> On most of ARCH, bimap_zero can be optimizied to only consume very few
> instructions,
> can't it?
> 
> It seems bitmap is more easily used in BFS than generation count.

Ah, I so missed that, sorry about that.

I'll go over the patches one more time and push them out.

Thanks!


^ permalink raw reply	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Print the shortest dependency chain if finding a circle
  2009-06-28 15:04 ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle tom.leiming
                     ` (2 preceding siblings ...)
  2009-07-13  9:01   ` Dave Young
@ 2009-07-18 14:23   ` tip-bot for Ming Lei
  2009-08-02 13:01   ` tip-bot for Ming Lei
  4 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-07-18 14:23 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  404918949c3fb1b0138e4a281d4e7e72e5b348bb
Gitweb:     http://git.kernel.org/tip/404918949c3fb1b0138e4a281d4e7e72e5b348bb
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 18 Jul 2009 16:02:51 +0200

lockdep: Print the shortest dependency chain if finding a circle

Currently lockdep will print the 1st circle detected if it
exists when acquiring a new (next) lock.

This patch prints the shortest path from the next lock to be
acquired to the previous held lock if a circle is found.

The patch still uses the current method to check circle, and
once the circle is found, breadth-first search algorithem is
used to compute the shortest path from the next lock to the
previous lock in the forward lock dependency graph.

Printing the shortest path will shorten the dependency chain,
and make troubleshooting for possible circular locking easier.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-2-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 include/linux/lockdep.h    |    6 ++
 kernel/lockdep.c           |  115 ++++++++++++++++++++++++++++++++++++++++----
 kernel/lockdep_internals.h |   83 +++++++++++++++++++++++++++++++
 3 files changed, 195 insertions(+), 9 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b25d1b5..9ec026f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -149,6 +149,12 @@ struct lock_list {
 	struct lock_class		*class;
 	struct stack_trace		trace;
 	int				distance;
+
+	/*The parent field is used to implement breadth-first search,and
+	 *the bit 0 is reused to indicate if the lock has been accessed
+	 *in BFS.
+	 */
+	struct lock_list		*parent;
 };
 
 /*
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8bbeef9..93dc70d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -897,6 +897,79 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 	return 1;
 }
 
+static struct circular_queue  lock_cq;
+static int __search_shortest_path(struct lock_list *source_entry,
+				struct lock_class *target,
+				struct lock_list **target_entry,
+				int forward)
+{
+	struct lock_list *entry;
+	struct circular_queue *cq = &lock_cq;
+	int ret = 1;
+
+	__cq_init(cq);
+
+	mark_lock_accessed(source_entry, NULL);
+	if (source_entry->class == target) {
+		*target_entry = source_entry;
+		ret = 0;
+		goto exit;
+	}
+
+	__cq_enqueue(cq, (unsigned long)source_entry);
+
+	while (!__cq_empty(cq)) {
+		struct lock_list *lock;
+		struct list_head *head;
+
+		__cq_dequeue(cq, (unsigned long *)&lock);
+
+		if (!lock->class) {
+			ret = -2;
+			goto exit;
+		}
+
+		if (forward)
+			head = &lock->class->locks_after;
+		else
+			head = &lock->class->locks_before;
+
+		list_for_each_entry(entry, head, entry) {
+			if (!lock_accessed(entry)) {
+				mark_lock_accessed(entry, lock);
+				if (entry->class == target) {
+					*target_entry = entry;
+					ret = 0;
+					goto exit;
+				}
+
+				if (__cq_enqueue(cq, (unsigned long)entry)) {
+					ret = -1;
+					goto exit;
+				}
+			}
+		}
+	}
+exit:
+	return ret;
+}
+
+static inline int __search_forward_shortest_path(struct lock_list *src_entry,
+				struct lock_class *target,
+				struct lock_list **target_entry)
+{
+	return __search_shortest_path(src_entry, target, target_entry, 1);
+
+}
+
+static inline int __search_backward_shortest_path(struct lock_list *src_entry,
+				struct lock_class *target,
+				struct lock_list **target_entry)
+{
+	return __search_shortest_path(src_entry, target, target_entry, 0);
+
+}
+
 /*
  * Recursive, forwards-direction lock-dependency checking, used for
  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
@@ -934,7 +1007,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 {
 	struct task_struct *curr = current;
 
-	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+	if (debug_locks_silent)
 		return 0;
 
 	printk("\n=======================================================\n");
@@ -954,19 +1027,41 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 	return 0;
 }
 
-static noinline int print_circular_bug_tail(void)
+static noinline int print_circular_bug(void)
 {
 	struct task_struct *curr = current;
 	struct lock_list this;
+	struct lock_list *target;
+	struct lock_list *parent;
+	int result;
+	unsigned long depth;
 
-	if (debug_locks_silent)
+	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
 	this.class = hlock_class(check_source);
 	if (!save_trace(&this.trace))
 		return 0;
 
-	print_circular_bug_entry(&this, 0);
+	result = __search_forward_shortest_path(&this,
+						hlock_class(check_target),
+						&target);
+	if (result) {
+		printk("\n%s:search shortest path failed:%d\n", __func__,
+			result);
+		return 0;
+	}
+
+	depth = get_lock_depth(target);
+
+	print_circular_bug_header(target, depth);
+
+	parent = get_lock_parent(target);
+
+	while (parent) {
+		print_circular_bug_entry(parent, --depth);
+		parent = get_lock_parent(parent);
+	}
 
 	printk("\nother info that might help us debug this:\n\n");
 	lockdep_print_held_locks(curr);
@@ -1072,14 +1167,15 @@ check_noncircular(struct lock_class *source, unsigned int depth)
 	 */
 	list_for_each_entry(entry, &source->locks_after, entry) {
 		if (entry->class == hlock_class(check_target))
-			return print_circular_bug_header(entry, depth+1);
+			return 2;
 		debug_atomic_inc(&nr_cyclic_checks);
-		if (!check_noncircular(entry->class, depth+1))
-			return print_circular_bug_entry(entry, depth+1);
+		if (check_noncircular(entry->class, depth+1) == 2)
+			return 2;
 	}
 	return 1;
 }
 
+
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 /*
  * Forwards and backwards subgraph searching, for the purposes of
@@ -1484,8 +1580,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 	 */
 	check_source = next;
 	check_target = prev;
-	if (!(check_noncircular(hlock_class(next), 0)))
-		return print_circular_bug_tail();
+	if (check_noncircular(hlock_class(next), 0) == 2)
+		return print_circular_bug();
+
 
 	if (!check_prev_add_irq(curr, prev, next))
 		return 0;
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 699a2ac..6f48d37 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -136,3 +136,86 @@ extern atomic_t nr_find_usage_backwards_recursions;
 # define debug_atomic_dec(ptr)		do { } while (0)
 # define debug_atomic_read(ptr)		0
 #endif
+
+/* The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ * */
+#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
+struct circular_queue{
+	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
+	unsigned int  front, rear;
+};
+
+#define LOCK_ACCESSED 		1UL
+#define LOCK_ACCESSED_MASK	(~LOCK_ACCESSED)
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+	cq->front = cq->rear = 0;
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+	return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+	return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE)  == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+	if (__cq_full(cq))
+		return -1;
+
+	cq->element[cq->rear] = elem;
+	cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
+	return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+	if (__cq_empty(cq))
+		return -1;
+
+	*elem = cq->element[cq->front];
+	cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
+	return 0;
+}
+
+static inline int __cq_get_elem_count(struct circular_queue *cq)
+{
+	return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+					struct lock_list *parent)
+{
+	lock->parent = (void *) parent + LOCK_ACCESSED;
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+	return (unsigned long)lock->parent & LOCK_ACCESSED;
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+	return (struct lock_list *)
+		((unsigned long)child->parent & LOCK_ACCESSED_MASK);
+}
+
+static inline unsigned long get_lock_depth(struct lock_list *child)
+{
+	unsigned long depth = 0;
+	struct lock_list *parent;
+
+	while ((parent = get_lock_parent(child))) {
+		child = parent;
+		depth++;
+	}
+	return depth;
+}

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Improve implementation of BFS
  2009-06-28 15:04   ` [RESEND PATCH 02/11] kernel:lockdep:improve implementation of BFS tom.leiming
  2009-06-28 15:04     ` [RESEND PATCH 03/11] kernel:lockdep: introduce match function to BFS tom.leiming
@ 2009-07-18 14:23     ` tip-bot for Ming Lei
  2009-08-02 13:01     ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-07-18 14:23 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  4b397a0578ade0fda66e53578cabf1890fb9dee4
Gitweb:     http://git.kernel.org/tip/4b397a0578ade0fda66e53578cabf1890fb9dee4
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 18 Jul 2009 16:02:52 +0200

lockdep: Improve implementation of BFS

1,replace %MAX_CIRCULAR_QUE_SIZE with &(MAX_CIRCULAR_QUE_SIZE-1)
since we define MAX_CIRCULAR_QUE_SIZE as power of 2;

2,use bitmap to mark if a lock is accessed in BFS in order to
clear it quickly, because we may search a graph many times.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-3-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c           |   23 ++++++++++++++++-------
 kernel/lockdep_internals.h |   35 +++++++++++++++++++++++------------
 2 files changed, 39 insertions(+), 19 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 93dc70d..5dcca26 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,7 +42,7 @@
 #include <linux/hash.h>
 #include <linux/ftrace.h>
 #include <linux/stringify.h>
-
+#include <linux/bitops.h>
 #include <asm/sections.h>
 
 #include "lockdep_internals.h"
@@ -118,7 +118,7 @@ static inline int debug_locks_off_graph_unlock(void)
 static int lockdep_initialized;
 
 unsigned long nr_list_entries;
-static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
 /*
  * All data structures here are protected by the global debug_lock.
@@ -897,30 +897,38 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 	return 1;
 }
 
+unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
 static struct circular_queue  lock_cq;
+
 static int __search_shortest_path(struct lock_list *source_entry,
 				struct lock_class *target,
 				struct lock_list **target_entry,
 				int forward)
 {
 	struct lock_list *entry;
+	struct list_head *head;
 	struct circular_queue *cq = &lock_cq;
 	int ret = 1;
 
-	__cq_init(cq);
-
-	mark_lock_accessed(source_entry, NULL);
 	if (source_entry->class == target) {
 		*target_entry = source_entry;
 		ret = 0;
 		goto exit;
 	}
 
+	if (forward)
+		head = &source_entry->class->locks_after;
+	else
+		head = &source_entry->class->locks_before;
+
+	if (list_empty(head))
+		goto exit;
+
+	__cq_init(cq);
 	__cq_enqueue(cq, (unsigned long)source_entry);
 
 	while (!__cq_empty(cq)) {
 		struct lock_list *lock;
-		struct list_head *head;
 
 		__cq_dequeue(cq, (unsigned long *)&lock);
 
@@ -1040,6 +1048,7 @@ static noinline int print_circular_bug(void)
 		return 0;
 
 	this.class = hlock_class(check_source);
+	this.parent = NULL;
 	if (!save_trace(&this.trace))
 		return 0;
 
@@ -1580,10 +1589,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 	 */
 	check_source = next;
 	check_target = prev;
+
 	if (check_noncircular(hlock_class(next), 0) == 2)
 		return print_circular_bug();
 
-
 	if (!check_prev_add_irq(curr, prev, next))
 		return 0;
 
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 6f48d37..c2f6594 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -137,23 +137,28 @@ extern atomic_t nr_find_usage_backwards_recursions;
 # define debug_atomic_read(ptr)		0
 #endif
 
+
+extern unsigned long nr_list_entries;
+extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+extern unsigned long bfs_accessed[];
+
+/*For good efficiency of modular, we use power of 2*/
+#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
+
 /* The circular_queue and helpers is used to implement the
  * breadth-first search(BFS)algorithem, by which we can build
  * the shortest path from the next lock to be acquired to the
  * previous held lock if there is a circular between them.
  * */
-#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
 struct circular_queue{
 	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
 	unsigned int  front, rear;
 };
 
-#define LOCK_ACCESSED 		1UL
-#define LOCK_ACCESSED_MASK	(~LOCK_ACCESSED)
-
 static inline void __cq_init(struct circular_queue *cq)
 {
 	cq->front = cq->rear = 0;
+	bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
 }
 
 static inline int __cq_empty(struct circular_queue *cq)
@@ -163,7 +168,7 @@ static inline int __cq_empty(struct circular_queue *cq)
 
 static inline int __cq_full(struct circular_queue *cq)
 {
-	return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE)  == cq->front;
+	return ((cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1))  == cq->front;
 }
 
 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
@@ -172,7 +177,7 @@ static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
 		return -1;
 
 	cq->element[cq->rear] = elem;
-	cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
+	cq->rear = (cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
 	return 0;
 }
 
@@ -182,30 +187,36 @@ static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
 		return -1;
 
 	*elem = cq->element[cq->front];
-	cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
+	cq->front = (cq->front + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
 	return 0;
 }
 
 static inline int __cq_get_elem_count(struct circular_queue *cq)
 {
-	return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
+	return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
 }
 
 static inline void mark_lock_accessed(struct lock_list *lock,
 					struct lock_list *parent)
 {
-	lock->parent = (void *) parent + LOCK_ACCESSED;
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	lock->parent = parent;
+	set_bit(nr, bfs_accessed);
 }
 
 static inline unsigned long lock_accessed(struct lock_list *lock)
 {
-	return (unsigned long)lock->parent & LOCK_ACCESSED;
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	return test_bit(nr, bfs_accessed);
 }
 
 static inline struct lock_list *get_lock_parent(struct lock_list *child)
 {
-	return (struct lock_list *)
-		((unsigned long)child->parent & LOCK_ACCESSED_MASK);
+	return child->parent;
 }
 
 static inline unsigned long get_lock_depth(struct lock_list *child)

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Introduce match function to BFS
  2009-06-28 15:04     ` [RESEND PATCH 03/11] kernel:lockdep: introduce match function to BFS tom.leiming
  2009-06-28 15:04       ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() by BFS tom.leiming
@ 2009-07-18 14:23       ` tip-bot for Ming Lei
  2009-08-02 13:01       ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-07-18 14:23 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  451641e4e36a6a207fffbbe5628ddd1fda400387
Gitweb:     http://git.kernel.org/tip/451641e4e36a6a207fffbbe5628ddd1fda400387
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 18 Jul 2009 16:02:53 +0200

lockdep: Introduce match function to BFS

1,introduce match() to BFS in order to make it usable to
match different pattern;

2,also rename some functions to make them more suitable.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-4-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c |   43 ++++++++++++++++++++++++++-----------------
 1 files changed, 26 insertions(+), 17 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 5dcca26..ce6d09e 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -900,17 +900,18 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
 static struct circular_queue  lock_cq;
 
-static int __search_shortest_path(struct lock_list *source_entry,
-				struct lock_class *target,
-				struct lock_list **target_entry,
-				int forward)
+static int __bfs(struct lock_list *source_entry,
+			void *data,
+			int (*match)(struct lock_list *entry, void *data),
+			struct lock_list **target_entry,
+			int forward)
 {
 	struct lock_list *entry;
 	struct list_head *head;
 	struct circular_queue *cq = &lock_cq;
 	int ret = 1;
 
-	if (source_entry->class == target) {
+	if (match(source_entry, data)) {
 		*target_entry = source_entry;
 		ret = 0;
 		goto exit;
@@ -945,7 +946,7 @@ static int __search_shortest_path(struct lock_list *source_entry,
 		list_for_each_entry(entry, head, entry) {
 			if (!lock_accessed(entry)) {
 				mark_lock_accessed(entry, lock);
-				if (entry->class == target) {
+				if (match(entry, data)) {
 					*target_entry = entry;
 					ret = 0;
 					goto exit;
@@ -962,19 +963,21 @@ exit:
 	return ret;
 }
 
-static inline int __search_forward_shortest_path(struct lock_list *src_entry,
-				struct lock_class *target,
-				struct lock_list **target_entry)
+static inline int __bfs_forward(struct lock_list *src_entry,
+			void *data,
+			int (*match)(struct lock_list *entry, void *data),
+			struct lock_list **target_entry)
 {
-	return __search_shortest_path(src_entry, target, target_entry, 1);
+	return __bfs(src_entry, data, match, target_entry, 1);
 
 }
 
-static inline int __search_backward_shortest_path(struct lock_list *src_entry,
-				struct lock_class *target,
-				struct lock_list **target_entry)
+static inline int __bfs_backward(struct lock_list *src_entry,
+			void *data,
+			int (*match)(struct lock_list *entry, void *data),
+			struct lock_list **target_entry)
 {
-	return __search_shortest_path(src_entry, target, target_entry, 0);
+	return __bfs(src_entry, data, match, target_entry, 0);
 
 }
 
@@ -1035,6 +1038,11 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 	return 0;
 }
 
+static inline int class_equal(struct lock_list *entry, void *data)
+{
+	return entry->class == data;
+}
+
 static noinline int print_circular_bug(void)
 {
 	struct task_struct *curr = current;
@@ -1052,9 +1060,10 @@ static noinline int print_circular_bug(void)
 	if (!save_trace(&this.trace))
 		return 0;
 
-	result = __search_forward_shortest_path(&this,
-						hlock_class(check_target),
-						&target);
+	result = __bfs_forward(&this,
+			hlock_class(check_target),
+			class_equal,
+			&target);
 	if (result) {
 		printk("\n%s:search shortest path failed:%d\n", __func__,
 			result);

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Implement check_noncircular() by BFS
  2009-06-28 15:04       ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() by BFS tom.leiming
  2009-06-28 15:04         ` [RESEND PATCH 05/11] kernel:lockdep:implement find_usage_*wards " tom.leiming
  2009-07-13  8:02         ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() " Dave Young
@ 2009-07-18 14:23         ` tip-bot for Ming Lei
  2009-08-02 13:02         ` tip-bot for Ming Lei
  3 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-07-18 14:23 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  290ef1220a4df1e398edb0ad7316e7e14564e078
Gitweb:     http://git.kernel.org/tip/290ef1220a4df1e398edb0ad7316e7e14564e078
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 18 Jul 2009 16:02:53 +0200

lockdep: Implement check_noncircular() by BFS

This patch uses BFS to implement check_noncircular() and
prints the generated shortest circle if exists.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-5-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c |   89 ++++++++++++++++++++++-------------------------------
 1 files changed, 37 insertions(+), 52 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ce6d09e..5609d30 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -985,12 +985,7 @@ static inline int __bfs_backward(struct lock_list *src_entry,
  * Recursive, forwards-direction lock-dependency checking, used for
  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
  * checking.
- *
- * (to keep the stackframe of the recursive functions small we
- *  use these global variables, and we also mark various helper
- *  functions as noinline.)
  */
-static struct held_lock *check_source, *check_target;
 
 /*
  * Print a dependency chain entry (this is only done when a deadlock
@@ -1014,7 +1009,9 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
  * header first:
  */
 static noinline int
-print_circular_bug_header(struct lock_list *entry, unsigned int depth)
+print_circular_bug_header(struct lock_list *entry, unsigned int depth,
+			struct held_lock *check_src,
+			struct held_lock *check_tgt)
 {
 	struct task_struct *curr = current;
 
@@ -1027,9 +1024,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 	printk(  "-------------------------------------------------------\n");
 	printk("%s/%d is trying to acquire lock:\n",
 		curr->comm, task_pid_nr(curr));
-	print_lock(check_source);
+	print_lock(check_src);
 	printk("\nbut task is already holding lock:\n");
-	print_lock(check_target);
+	print_lock(check_tgt);
 	printk("\nwhich lock already depends on the new lock.\n\n");
 	printk("\nthe existing dependency chain (in reverse order) is:\n");
 
@@ -1043,36 +1040,24 @@ static inline int class_equal(struct lock_list *entry, void *data)
 	return entry->class == data;
 }
 
-static noinline int print_circular_bug(void)
+static noinline int print_circular_bug(struct lock_list *this,
+				struct lock_list *target,
+				struct held_lock *check_src,
+				struct held_lock *check_tgt)
 {
 	struct task_struct *curr = current;
-	struct lock_list this;
-	struct lock_list *target;
 	struct lock_list *parent;
-	int result;
 	unsigned long depth;
 
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
-	this.class = hlock_class(check_source);
-	this.parent = NULL;
-	if (!save_trace(&this.trace))
+	if (!save_trace(&this->trace))
 		return 0;
 
-	result = __bfs_forward(&this,
-			hlock_class(check_target),
-			class_equal,
-			&target);
-	if (result) {
-		printk("\n%s:search shortest path failed:%d\n", __func__,
-			result);
-		return 0;
-	}
-
 	depth = get_lock_depth(target);
 
-	print_circular_bug_header(target, depth);
+	print_circular_bug_header(target, depth, check_src, check_tgt);
 
 	parent = get_lock_parent(target);
 
@@ -1090,6 +1075,16 @@ static noinline int print_circular_bug(void)
 	return 0;
 }
 
+static noinline int print_bfs_bug(int ret)
+{
+	if (!debug_locks_off_graph_unlock())
+		return 0;
+
+	WARN(1, "lockdep bfs error:%d\n", ret);
+
+	return 0;
+}
+
 #define RECURSION_LIMIT 40
 
 static int noinline print_infinite_recursion_bug(void)
@@ -1168,31 +1163,17 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
  * lead to <target>. Print an error and return 0 if it does.
  */
 static noinline int
-check_noncircular(struct lock_class *source, unsigned int depth)
+check_noncircular(struct lock_list *root, struct lock_class *target,
+		struct lock_list **target_entry)
 {
-	struct lock_list *entry;
+	int result;
 
-	if (lockdep_dependency_visit(source, depth))
-		return 1;
+	debug_atomic_inc(&nr_cyclic_checks);
 
-	debug_atomic_inc(&nr_cyclic_check_recursions);
-	if (depth > max_recursion_depth)
-		max_recursion_depth = depth;
-	if (depth >= RECURSION_LIMIT)
-		return print_infinite_recursion_bug();
-	/*
-	 * Check this lock's dependency list:
-	 */
-	list_for_each_entry(entry, &source->locks_after, entry) {
-		if (entry->class == hlock_class(check_target))
-			return 2;
-		debug_atomic_inc(&nr_cyclic_checks);
-		if (check_noncircular(entry->class, depth+1) == 2)
-			return 2;
-	}
-	return 1;
-}
+	result = __bfs_forward(root, target, class_equal, target_entry);
 
+	return result;
+}
 
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 /*
@@ -1586,6 +1567,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 {
 	struct lock_list *entry;
 	int ret;
+	struct lock_list this;
+	struct lock_list *uninitialized_var(target_entry);
 
 	/*
 	 * Prove that the new <prev> -> <next> dependency would not
@@ -1596,11 +1579,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 	 * We are using global variables to control the recursion, to
 	 * keep the stackframe size of the recursive functions low:
 	 */
-	check_source = next;
-	check_target = prev;
-
-	if (check_noncircular(hlock_class(next), 0) == 2)
-		return print_circular_bug();
+	this.class = hlock_class(next);
+	this.parent = NULL;
+	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
+	if (unlikely(!ret))
+		return print_circular_bug(&this, target_entry, next, prev);
+	else if (unlikely(ret < 0))
+		return print_bfs_bug(ret);
 
 	if (!check_prev_add_irq(curr, prev, next))
 		return 0;

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Implement find_usage_*wards by BFS
  2009-06-28 15:04         ` [RESEND PATCH 05/11] kernel:lockdep:implement find_usage_*wards " tom.leiming
  2009-06-28 15:04           ` [RESEND PATCH 06/11] kernel:lockdep:introduce print_shortest_lock_dependencies tom.leiming
@ 2009-07-18 14:23           ` tip-bot for Ming Lei
  2009-08-02 13:02           ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-07-18 14:23 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  013f1606270e97dc7952f19a7c0eaeebcbfcf048
Gitweb:     http://git.kernel.org/tip/013f1606270e97dc7952f19a7c0eaeebcbfcf048
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 18 Jul 2009 16:02:54 +0200

lockdep: Implement find_usage_*wards by BFS

This patch uses BFS to implement find_usage_*wards(),which
was originally writen by DFS.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-6-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c |  180 ++++++++++++++++++++++--------------------------------
 1 files changed, 72 insertions(+), 108 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 5609d30..b3ade50 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -963,7 +963,7 @@ exit:
 	return ret;
 }
 
-static inline int __bfs_forward(struct lock_list *src_entry,
+static inline int __bfs_forwards(struct lock_list *src_entry,
 			void *data,
 			int (*match)(struct lock_list *entry, void *data),
 			struct lock_list **target_entry)
@@ -972,7 +972,7 @@ static inline int __bfs_forward(struct lock_list *src_entry,
 
 }
 
-static inline int __bfs_backward(struct lock_list *src_entry,
+static inline int __bfs_backwards(struct lock_list *src_entry,
 			void *data,
 			int (*match)(struct lock_list *entry, void *data),
 			struct lock_list **target_entry)
@@ -1085,18 +1085,6 @@ static noinline int print_bfs_bug(int ret)
 	return 0;
 }
 
-#define RECURSION_LIMIT 40
-
-static int noinline print_infinite_recursion_bug(void)
-{
-	if (!debug_locks_off_graph_unlock())
-		return 0;
-
-	WARN_ON(1);
-
-	return 0;
-}
-
 unsigned long __lockdep_count_forward_deps(struct lock_class *class,
 					   unsigned int depth)
 {
@@ -1170,7 +1158,7 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
 
 	debug_atomic_inc(&nr_cyclic_checks);
 
-	result = __bfs_forward(root, target, class_equal, target_entry);
+	result = __bfs_forwards(root, target, class_equal, target_entry);
 
 	return result;
 }
@@ -1181,101 +1169,70 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
  * proving that two subgraphs can be connected by a new dependency
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
-static enum lock_usage_bit find_usage_bit;
 static struct lock_class *forwards_match, *backwards_match;
 
+
+#define   BFS_PROCESS_RET(ret)	do { \
+					if (ret < 0) \
+						return print_bfs_bug(ret); \
+					if (ret == 1) \
+						return 1; \
+				} while (0)
+
+static inline int usage_match(struct lock_list *entry, void *bit)
+{
+	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+}
+
+
+
 /*
  * Find a node in the forwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
  *
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <forwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
  *
- * Return 1 otherwise and keep <forwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
  */
-static noinline int
-find_usage_forwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+			struct lock_list **target_entry)
 {
-	struct lock_list *entry;
-	int ret;
-
-	if (lockdep_dependency_visit(source, depth))
-		return 1;
-
-	if (depth > max_recursion_depth)
-		max_recursion_depth = depth;
-	if (depth >= RECURSION_LIMIT)
-		return print_infinite_recursion_bug();
+	int result;
 
 	debug_atomic_inc(&nr_find_usage_forwards_checks);
-	if (source->usage_mask & (1 << find_usage_bit)) {
-		forwards_match = source;
-		return 2;
-	}
 
-	/*
-	 * Check this lock's dependency list:
-	 */
-	list_for_each_entry(entry, &source->locks_after, entry) {
-		debug_atomic_inc(&nr_find_usage_forwards_recursions);
-		ret = find_usage_forwards(entry->class, depth+1);
-		if (ret == 2 || ret == 0)
-			return ret;
-	}
-	return 1;
+	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+
+	return result;
 }
 
 /*
  * Find a node in the backwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
  *
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <backwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
  *
- * Return 1 otherwise and keep <backwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
  */
-static noinline int
-find_usage_backwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+			struct lock_list **target_entry)
 {
-	struct lock_list *entry;
-	int ret;
-
-	if (lockdep_dependency_visit(source, depth))
-		return 1;
-
-	if (!__raw_spin_is_locked(&lockdep_lock))
-		return DEBUG_LOCKS_WARN_ON(1);
-
-	if (depth > max_recursion_depth)
-		max_recursion_depth = depth;
-	if (depth >= RECURSION_LIMIT)
-		return print_infinite_recursion_bug();
+	int result;
 
 	debug_atomic_inc(&nr_find_usage_backwards_checks);
-	if (source->usage_mask & (1 << find_usage_bit)) {
-		backwards_match = source;
-		return 2;
-	}
 
-	if (!source && debug_locks_off_graph_unlock()) {
-		WARN_ON(1);
-		return 0;
-	}
+	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
 
-	/*
-	 * Check this lock's dependency list:
-	 */
-	list_for_each_entry(entry, &source->locks_before, entry) {
-		debug_atomic_inc(&nr_find_usage_backwards_recursions);
-		ret = find_usage_backwards(entry->class, depth+1);
-		if (ret == 2 || ret == 0)
-			return ret;
-	}
-	return 1;
+	return result;
 }
 
+
 static int
 print_bad_irq_dependency(struct task_struct *curr,
 			 struct held_lock *prev,
@@ -1343,18 +1300,21 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 	    enum lock_usage_bit bit_forwards, const char *irqclass)
 {
 	int ret;
+	struct lock_list this;
+	struct lock_list *uninitialized_var(target_entry);
+
+	this.parent = NULL;
+
+	this.class = hlock_class(prev);
+	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+	BFS_PROCESS_RET(ret);
+	backwards_match = target_entry->class;
+
+	this.class = hlock_class(next);
+	ret = find_usage_forwards(&this, bit_forwards, &target_entry);
+	BFS_PROCESS_RET(ret);
+	forwards_match = target_entry->class;
 
-	find_usage_bit = bit_backwards;
-	/* fills in <backwards_match> */
-	ret = find_usage_backwards(hlock_class(prev), 0);
-	if (!ret || ret == 1)
-		return ret;
-
-	find_usage_bit = bit_forwards;
-	ret = find_usage_forwards(hlock_class(next), 0);
-	if (!ret || ret == 1)
-		return ret;
-	/* ret == 2 */
 	return print_bad_irq_dependency(curr, prev, next,
 			bit_backwards, bit_forwards, irqclass);
 }
@@ -2029,14 +1989,16 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 		     enum lock_usage_bit bit, const char *irqclass)
 {
 	int ret;
+	struct lock_list root;
+	struct lock_list *uninitialized_var(target_entry);
 
-	find_usage_bit = bit;
-	/* fills in <forwards_match> */
-	ret = find_usage_forwards(hlock_class(this), 0);
-	if (!ret || ret == 1)
-		return ret;
+	root.parent = NULL;
+	root.class = hlock_class(this);
+	ret = find_usage_forwards(&root, bit, &target_entry);
+	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
+	return print_irq_inversion_bug(curr, target_entry->class,
+					this, 1, irqclass);
 }
 
 /*
@@ -2048,14 +2010,16 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 		      enum lock_usage_bit bit, const char *irqclass)
 {
 	int ret;
+	struct lock_list root;
+	struct lock_list *uninitialized_var(target_entry);
 
-	find_usage_bit = bit;
-	/* fills in <backwards_match> */
-	ret = find_usage_backwards(hlock_class(this), 0);
-	if (!ret || ret == 1)
-		return ret;
+	root.parent = NULL;
+	root.class = hlock_class(this);
+	ret = find_usage_backwards(&root, bit, &target_entry);
+	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
+	return print_irq_inversion_bug(curr, target_entry->class,
+					this, 1, irqclass);
 }
 
 void print_irqtrace_events(struct task_struct *curr)

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Introduce print_shortest_lock_dependencies
  2009-06-28 15:04           ` [RESEND PATCH 06/11] kernel:lockdep:introduce print_shortest_lock_dependencies tom.leiming
  2009-06-28 15:04             ` [RESEND PATCH 07/11] kernel:lockdep: implement lockdep_count_*ward_deps by BFS tom.leiming
@ 2009-07-18 14:24             ` tip-bot for Ming Lei
  2009-08-02 13:02             ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-07-18 14:24 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  7d336a4d2baec06e1b497a2cee42e15cdcb225eb
Gitweb:     http://git.kernel.org/tip/7d336a4d2baec06e1b497a2cee42e15cdcb225eb
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 18 Jul 2009 16:02:54 +0200

lockdep: Introduce print_shortest_lock_dependencies

Since the shortest lock dependencies' path may be obtained by BFS,
we print the shortest one by print_shortest_lock_dependencies().

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-7-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c           |   93 +++++++++++++++++++++++++++++++------------
 kernel/lockdep_internals.h |    4 +-
 2 files changed, 69 insertions(+), 28 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b3ade50..938dc50 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -576,6 +576,36 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 }
 
 /*
+ * printk the shortest lock dependencies from @start to @end in reverse order:
+ */
+static void __used
+print_shortest_lock_dependencies(struct lock_list *leaf,
+				struct lock_list *root)
+{
+	struct lock_list *entry = leaf;
+	int depth;
+
+	/*compute depth from generated tree by BFS*/
+	depth = get_lock_depth(leaf);
+
+	do {
+		print_lock_class_header(entry->class, depth);
+		printk("%*s ... acquired at:\n", depth, "");
+		print_stack_trace(&entry->trace, 2);
+		printk("\n");
+
+		if (depth == 0 && (entry != root)) {
+			printk("lockdep:%s bad BFS generated tree\n", __func__);
+			break;
+		}
+
+		entry = get_lock_parent(entry);
+		depth--;
+	} while (entry && (depth >= 0));
+
+	return;
+}
+/*
  * printk all lock dependencies starting at <entry>:
  */
 static void __used
@@ -992,7 +1022,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
  * has been detected):
  */
 static noinline int
-print_circular_bug_entry(struct lock_list *target, unsigned int depth)
+print_circular_bug_entry(struct lock_list *target, int depth)
 {
 	if (debug_locks_silent)
 		return 0;
@@ -1047,7 +1077,7 @@ static noinline int print_circular_bug(struct lock_list *this,
 {
 	struct task_struct *curr = current;
 	struct lock_list *parent;
-	unsigned long depth;
+	int depth;
 
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
@@ -1169,7 +1199,6 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
  * proving that two subgraphs can be connected by a new dependency
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
-static struct lock_class *forwards_match, *backwards_match;
 
 
 #define   BFS_PROCESS_RET(ret)	do { \
@@ -1235,6 +1264,10 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
 
 static int
 print_bad_irq_dependency(struct task_struct *curr,
+			 struct lock_list *prev_root,
+			 struct lock_list *next_root,
+			 struct lock_list *backwards_entry,
+			 struct lock_list *forwards_entry,
 			 struct held_lock *prev,
 			 struct held_lock *next,
 			 enum lock_usage_bit bit1,
@@ -1267,26 +1300,32 @@ print_bad_irq_dependency(struct task_struct *curr,
 
 	printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
 		irqclass);
-	print_lock_name(backwards_match);
+	print_lock_name(backwards_entry->class);
 	printk("\n... which became %s-irq-safe at:\n", irqclass);
 
-	print_stack_trace(backwards_match->usage_traces + bit1, 1);
+	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
 
 	printk("\nto a %s-irq-unsafe lock:\n", irqclass);
-	print_lock_name(forwards_match);
+	print_lock_name(forwards_entry->class);
 	printk("\n... which became %s-irq-unsafe at:\n", irqclass);
 	printk("...");
 
-	print_stack_trace(forwards_match->usage_traces + bit2, 1);
+	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
 
 	printk("\nother info that might help us debug this:\n\n");
 	lockdep_print_held_locks(curr);
 
-	printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
-	print_lock_dependencies(backwards_match, 0);
+	printk("\nthe dependencies between %s-irq-safe lock", irqclass);
+	printk(" and the holding lock:\n");
+	if (!save_trace(&prev_root->trace))
+		return 0;
+	print_shortest_lock_dependencies(backwards_entry, prev_root);
 
-	printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
-	print_lock_dependencies(forwards_match, 0);
+	printk("\nthe dependencies between the lock to be acquired");
+	printk(" and %s-irq-unsafe lock:\n", irqclass);
+	if (!save_trace(&next_root->trace))
+		return 0;
+	print_shortest_lock_dependencies(forwards_entry, next_root);
 
 	printk("\nstack backtrace:\n");
 	dump_stack();
@@ -1300,22 +1339,24 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 	    enum lock_usage_bit bit_forwards, const char *irqclass)
 {
 	int ret;
-	struct lock_list this;
+	struct lock_list this, that;
 	struct lock_list *uninitialized_var(target_entry);
+	struct lock_list *uninitialized_var(target_entry1);
 
 	this.parent = NULL;
 
 	this.class = hlock_class(prev);
 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
 	BFS_PROCESS_RET(ret);
-	backwards_match = target_entry->class;
 
-	this.class = hlock_class(next);
-	ret = find_usage_forwards(&this, bit_forwards, &target_entry);
+	that.parent = NULL;
+	that.class = hlock_class(next);
+	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
 	BFS_PROCESS_RET(ret);
-	forwards_match = target_entry->class;
 
-	return print_bad_irq_dependency(curr, prev, next,
+	return print_bad_irq_dependency(curr, &this, &that,
+			target_entry, target_entry1,
+			prev, next,
 			bit_backwards, bit_forwards, irqclass);
 }
 
@@ -1944,7 +1985,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
  * print irq inversion bug:
  */
 static int
-print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
+print_irq_inversion_bug(struct task_struct *curr,
+			struct lock_list *root, struct lock_list *other,
 			struct held_lock *this, int forwards,
 			const char *irqclass)
 {
@@ -1962,17 +2004,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
 		printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
 	else
 		printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
-	print_lock_name(other);
+	print_lock_name(other->class);
 	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
 	printk("\nother info that might help us debug this:\n");
 	lockdep_print_held_locks(curr);
 
-	printk("\nthe first lock's dependencies:\n");
-	print_lock_dependencies(hlock_class(this), 0);
-
-	printk("\nthe second lock's dependencies:\n");
-	print_lock_dependencies(other, 0);
+	printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
+	if (!save_trace(&root->trace))
+		return 0;
+	print_shortest_lock_dependencies(other, root);
 
 	printk("\nstack backtrace:\n");
 	dump_stack();
@@ -1997,7 +2038,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 	ret = find_usage_forwards(&root, bit, &target_entry);
 	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, target_entry->class,
+	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
 }
 
@@ -2018,7 +2059,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 	ret = find_usage_backwards(&root, bit, &target_entry);
 	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, target_entry->class,
+	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
 }
 
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index c2f6594..b115aaa 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -219,9 +219,9 @@ static inline struct lock_list *get_lock_parent(struct lock_list *child)
 	return child->parent;
 }
 
-static inline unsigned long get_lock_depth(struct lock_list *child)
+static inline int get_lock_depth(struct lock_list *child)
 {
-	unsigned long depth = 0;
+	int depth = 0;
 	struct lock_list *parent;
 
 	while ((parent = get_lock_parent(child))) {

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Implement lockdep_count_*ward_deps by BFS
  2009-06-28 15:04             ` [RESEND PATCH 07/11] kernel:lockdep: implement lockdep_count_*ward_deps by BFS tom.leiming
  2009-06-28 15:04               ` [RESEND PATCH 08/11] kernel:lockdep: update memory usage introduced " tom.leiming
@ 2009-07-18 14:24               ` tip-bot for Ming Lei
  2009-08-02 13:02               ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-07-18 14:24 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  43cbc4a9295ce0aa6d439b037a81f24addd9fcf6
Gitweb:     http://git.kernel.org/tip/43cbc4a9295ce0aa6d439b037a81f24addd9fcf6
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 18 Jul 2009 16:02:55 +0200

lockdep: Implement lockdep_count_*ward_deps by BFS

Implement lockdep_count_{for,back}ward using BFS.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-8-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c |   52 +++++++++++++++++++++++++---------------------------
 1 files changed, 25 insertions(+), 27 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 938dc50..b896f23 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1115,61 +1115,59 @@ static noinline int print_bfs_bug(int ret)
 	return 0;
 }
 
-unsigned long __lockdep_count_forward_deps(struct lock_class *class,
-					   unsigned int depth)
+static int noop_count(struct lock_list *entry, void *data)
 {
-	struct lock_list *entry;
-	unsigned long ret = 1;
+	(*(unsigned long *)data)++;
+	return 0;
+}
 
-	if (lockdep_dependency_visit(class, depth))
-		return 0;
+unsigned long __lockdep_count_forward_deps(struct lock_list *this)
+{
+	unsigned long  count = 0;
+	struct lock_list *uninitialized_var(target_entry);
 
-	/*
-	 * Recurse this class's dependency list:
-	 */
-	list_for_each_entry(entry, &class->locks_after, entry)
-		ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
 
-	return ret;
+	return count;
 }
-
 unsigned long lockdep_count_forward_deps(struct lock_class *class)
 {
 	unsigned long ret, flags;
+	struct lock_list this;
+
+	this.parent = NULL;
+	this.class = class;
 
 	local_irq_save(flags);
 	__raw_spin_lock(&lockdep_lock);
-	ret = __lockdep_count_forward_deps(class, 0);
+	ret = __lockdep_count_forward_deps(&this);
 	__raw_spin_unlock(&lockdep_lock);
 	local_irq_restore(flags);
 
 	return ret;
 }
 
-unsigned long __lockdep_count_backward_deps(struct lock_class *class,
-					    unsigned int depth)
+unsigned long __lockdep_count_backward_deps(struct lock_list *this)
 {
-	struct lock_list *entry;
-	unsigned long ret = 1;
+	unsigned long  count = 0;
+	struct lock_list *uninitialized_var(target_entry);
 
-	if (lockdep_dependency_visit(class, depth))
-		return 0;
-	/*
-	 * Recurse this class's dependency list:
-	 */
-	list_for_each_entry(entry, &class->locks_before, entry)
-		ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
 
-	return ret;
+	return count;
 }
 
 unsigned long lockdep_count_backward_deps(struct lock_class *class)
 {
 	unsigned long ret, flags;
+	struct lock_list this;
+
+	this.parent = NULL;
+	this.class = class;
 
 	local_irq_save(flags);
 	__raw_spin_lock(&lockdep_lock);
-	ret = __lockdep_count_backward_deps(class, 0);
+	ret = __lockdep_count_backward_deps(&this);
 	__raw_spin_unlock(&lockdep_lock);
 	local_irq_restore(flags);
 

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Update memory usage introduced by BFS
  2009-06-28 15:04               ` [RESEND PATCH 08/11] kernel:lockdep: update memory usage introduced " tom.leiming
  2009-06-28 15:04                 ` [RESEND PATCH 09/11] kernel:lockdep:add statistics info for max bfs queue depth tom.leiming
@ 2009-07-18 14:24                 ` tip-bot for Ming Lei
  2009-07-18 17:23                   ` Peter Zijlstra
  2009-08-02 13:02                 ` tip-bot for Ming Lei
  2 siblings, 1 reply; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-07-18 14:24 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  292264ece1ffdc9dacc1af486e28ee91cc66fb2d
Gitweb:     http://git.kernel.org/tip/292264ece1ffdc9dacc1af486e28ee91cc66fb2d
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 18 Jul 2009 16:02:56 +0200

lockdep: Update memory usage introduced by BFS

Also account the BFS memory usage.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-9-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c |    3 ++-
 1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b896f23..930335d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3429,7 +3429,8 @@ void __init lockdep_info(void)
 		sizeof(struct list_head) * CLASSHASH_SIZE +
 		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
 		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
-		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
+		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024 +
+		sizeof(struct circular_queue) + sizeof(bfs_accessed));
 
 	printk(" per task-struct memory footprint: %lu bytes\n",
 		sizeof(struct held_lock) * MAX_LOCK_DEPTH);

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Add statistics info for max bfs queue depth
  2009-06-28 15:04                 ` [RESEND PATCH 09/11] kernel:lockdep:add statistics info for max bfs queue depth tom.leiming
  2009-06-28 15:04                   ` [RESEND PATCH 10/11] BFS cleanup tom.leiming
@ 2009-07-18 14:24                   ` tip-bot for Ming Lei
  2009-08-02 13:03                   ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-07-18 14:24 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  63627e378c2ede5086d0f8927571c857777845d7
Gitweb:     http://git.kernel.org/tip/63627e378c2ede5086d0f8927571c857777845d7
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 18 Jul 2009 16:02:56 +0200

lockdep: Add statistics info for max bfs queue depth

Add BFS statistics to the existing lockdep stats.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-10-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c           |    6 +++++-
 kernel/lockdep_internals.h |    3 ++-
 kernel/lockdep_proc.c      |    2 ++
 3 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 930335d..41ceaa1 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -929,7 +929,7 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 
 unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
 static struct circular_queue  lock_cq;
-
+unsigned int max_bfs_queue_depth;
 static int __bfs(struct lock_list *source_entry,
 			void *data,
 			int (*match)(struct lock_list *entry, void *data),
@@ -975,6 +975,7 @@ static int __bfs(struct lock_list *source_entry,
 
 		list_for_each_entry(entry, head, entry) {
 			if (!lock_accessed(entry)) {
+				unsigned int cq_depth;
 				mark_lock_accessed(entry, lock);
 				if (match(entry, data)) {
 					*target_entry = entry;
@@ -986,6 +987,9 @@ static int __bfs(struct lock_list *source_entry,
 					ret = -1;
 					goto exit;
 				}
+				cq_depth = __cq_get_elem_count(cq);
+				if (max_bfs_queue_depth < cq_depth)
+					max_bfs_queue_depth = cq_depth;
 			}
 		}
 	}
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index b115aaa..6baa880 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -138,6 +138,7 @@ extern atomic_t nr_find_usage_backwards_recursions;
 #endif
 
 
+extern unsigned int max_bfs_queue_depth;
 extern unsigned long nr_list_entries;
 extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 extern unsigned long bfs_accessed[];
@@ -191,7 +192,7 @@ static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
 	return 0;
 }
 
-static inline int __cq_get_elem_count(struct circular_queue *cq)
+static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
 {
 	return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
 }
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index d7135aa..9a1bf34 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -411,6 +411,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
 			max_lockdep_depth);
 	seq_printf(m, " max recursion depth:           %11u\n",
 			max_recursion_depth);
+	seq_printf(m, " max bfs queue depth:           %11u\n",
+			max_bfs_queue_depth);
 	lockdep_stats_debug_show(m);
 	seq_printf(m, " debug_locks:                   %11u\n",
 			debug_locks);

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: BFS cleanup
  2009-06-28 15:04                   ` [RESEND PATCH 10/11] BFS cleanup tom.leiming
  2009-06-28 15:04                     ` [RESEND PATCH 11/11] kernel:lockdep:fix return value of check_usage*() tom.leiming
@ 2009-07-18 14:24                     ` tip-bot for Peter Zijlstra
  2009-07-18 17:23                       ` Peter Zijlstra
  2009-08-02 13:03                     ` tip-bot for Peter Zijlstra
  2 siblings, 1 reply; 54+ messages in thread
From: tip-bot for Peter Zijlstra @ 2009-07-18 14:24 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, mingo

Commit-ID:  ec223f9a486e12dbdb8e6996fcc283eee622695b
Gitweb:     http://git.kernel.org/tip/ec223f9a486e12dbdb8e6996fcc283eee622695b
Author:     Peter Zijlstra <a.p.zijlstra@chello.nl>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Ingo Molnar <mingo@elte.hu>
CommitDate: Sat, 18 Jul 2009 16:02:57 +0200

lockdep: BFS cleanup

Some cleanups of the lockdep code after the BFS series:

 - Remove the last traces of the generation id
 - Fixup comment style
 - Move the bfs routines into lockdep.c
 - Cleanup the bfs routines

[ tom.leiming@gmail.com: Fix crash ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-11-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 include/linux/lockdep.h    |    7 +-
 kernel/lockdep.c           |  236 +++++++++++++++++++++++++++-----------------
 kernel/lockdep_internals.h |   97 +------------------
 3 files changed, 151 insertions(+), 189 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 9ec026f..12aabfc 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -58,7 +58,6 @@ struct lock_class {
 
 	struct lockdep_subclass_key	*key;
 	unsigned int			subclass;
-	unsigned int			dep_gen_id;
 
 	/*
 	 * IRQ/softirq usage tracking bits:
@@ -150,9 +149,9 @@ struct lock_list {
 	struct stack_trace		trace;
 	int				distance;
 
-	/*The parent field is used to implement breadth-first search,and
-	 *the bit 0 is reused to indicate if the lock has been accessed
-	 *in BFS.
+	/*
+	 * The parent field is used to implement breadth-first search, and the
+	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
 	 */
 	struct lock_list		*parent;
 };
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 41ceaa1..3718a98 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -43,6 +43,7 @@
 #include <linux/ftrace.h>
 #include <linux/stringify.h>
 #include <linux/bitops.h>
+
 #include <asm/sections.h>
 
 #include "lockdep_internals.h"
@@ -118,7 +119,7 @@ static inline int debug_locks_off_graph_unlock(void)
 static int lockdep_initialized;
 
 unsigned long nr_list_entries;
-struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
 /*
  * All data structures here are protected by the global debug_lock.
@@ -390,19 +391,6 @@ unsigned int nr_process_chains;
 unsigned int max_lockdep_depth;
 unsigned int max_recursion_depth;
 
-static unsigned int lockdep_dependency_gen_id;
-
-static bool lockdep_dependency_visit(struct lock_class *source,
-				     unsigned int depth)
-{
-	if (!depth)
-		lockdep_dependency_gen_id++;
-	if (source->dep_gen_id == lockdep_dependency_gen_id)
-		return true;
-	source->dep_gen_id = lockdep_dependency_gen_id;
-	return false;
-}
-
 #ifdef CONFIG_DEBUG_LOCKDEP
 /*
  * We cannot printk in early bootup code. Not even early_printk()
@@ -575,64 +563,6 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 	print_ip_sym((unsigned long)class->key);
 }
 
-/*
- * printk the shortest lock dependencies from @start to @end in reverse order:
- */
-static void __used
-print_shortest_lock_dependencies(struct lock_list *leaf,
-				struct lock_list *root)
-{
-	struct lock_list *entry = leaf;
-	int depth;
-
-	/*compute depth from generated tree by BFS*/
-	depth = get_lock_depth(leaf);
-
-	do {
-		print_lock_class_header(entry->class, depth);
-		printk("%*s ... acquired at:\n", depth, "");
-		print_stack_trace(&entry->trace, 2);
-		printk("\n");
-
-		if (depth == 0 && (entry != root)) {
-			printk("lockdep:%s bad BFS generated tree\n", __func__);
-			break;
-		}
-
-		entry = get_lock_parent(entry);
-		depth--;
-	} while (entry && (depth >= 0));
-
-	return;
-}
-/*
- * printk all lock dependencies starting at <entry>:
- */
-static void __used
-print_lock_dependencies(struct lock_class *class, int depth)
-{
-	struct lock_list *entry;
-
-	if (lockdep_dependency_visit(class, depth))
-		return;
-
-	if (DEBUG_LOCKS_WARN_ON(depth >= 20))
-		return;
-
-	print_lock_class_header(class, depth);
-
-	list_for_each_entry(entry, &class->locks_after, entry) {
-		if (DEBUG_LOCKS_WARN_ON(!entry->class))
-			return;
-
-		print_lock_dependencies(entry->class, depth + 1);
-
-		printk("%*s ... acquired at:\n",depth,"");
-		print_stack_trace(&entry->trace, 2);
-		printk("\n");
-	}
-}
-
 static void print_kernel_version(void)
 {
 	printk("%s %.*s\n", init_utsname()->release,
@@ -927,14 +857,106 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 	return 1;
 }
 
-unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
-static struct circular_queue  lock_cq;
+/*For good efficiency of modular, we use power of 2*/
+#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
+#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
+
+/* The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ * */
+struct circular_queue {
+	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+	unsigned int  front, rear;
+};
+
+static struct circular_queue lock_cq;
+static unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
+
 unsigned int max_bfs_queue_depth;
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+	cq->front = cq->rear = 0;
+	bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+	return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+	return ((cq->rear + 1) & CQ_MASK) == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+	if (__cq_full(cq))
+		return -1;
+
+	cq->element[cq->rear] = elem;
+	cq->rear = (cq->rear + 1) & CQ_MASK;
+	return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+	if (__cq_empty(cq))
+		return -1;
+
+	*elem = cq->element[cq->front];
+	cq->front = (cq->front + 1) & CQ_MASK;
+	return 0;
+}
+
+static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
+{
+	return (cq->rear - cq->front) & CQ_MASK;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+					struct lock_list *parent)
+{
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	lock->parent = parent;
+	set_bit(nr, bfs_accessed);
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	return test_bit(nr, bfs_accessed);
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+	return child->parent;
+}
+
+static inline int get_lock_depth(struct lock_list *child)
+{
+	int depth = 0;
+	struct lock_list *parent;
+
+	while ((parent = get_lock_parent(child))) {
+		child = parent;
+		depth++;
+	}
+	return depth;
+}
+
 static int __bfs(struct lock_list *source_entry,
-			void *data,
-			int (*match)(struct lock_list *entry, void *data),
-			struct lock_list **target_entry,
-			int forward)
+		 void *data,
+		 int (*match)(struct lock_list *entry, void *data),
+		 struct lock_list **target_entry,
+		 int forward)
 {
 	struct lock_list *entry;
 	struct list_head *head;
@@ -1202,14 +1224,6 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
 
-
-#define   BFS_PROCESS_RET(ret)	do { \
-					if (ret < 0) \
-						return print_bfs_bug(ret); \
-					if (ret == 1) \
-						return 1; \
-				} while (0)
-
 static inline int usage_match(struct lock_list *entry, void *bit)
 {
 	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
@@ -1263,6 +1277,36 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
 	return result;
 }
 
+/*
+ * printk the shortest lock dependencies from @start to @end in reverse order:
+ */
+static void __used
+print_shortest_lock_dependencies(struct lock_list *leaf,
+				struct lock_list *root)
+{
+	struct lock_list *entry = leaf;
+	int depth;
+
+	/*compute depth from generated tree by BFS*/
+	depth = get_lock_depth(leaf);
+
+	do {
+		print_lock_class_header(entry->class, depth);
+		printk("%*s ... acquired at:\n", depth, "");
+		print_stack_trace(&entry->trace, 2);
+		printk("\n");
+
+		if (depth == 0 && (entry != root)) {
+			printk("lockdep:%s bad BFS generated tree\n", __func__);
+			break;
+		}
+
+		entry = get_lock_parent(entry);
+		depth--;
+	} while (entry && (depth >= 0));
+
+	return;
+}
 
 static int
 print_bad_irq_dependency(struct task_struct *curr,
@@ -1349,12 +1393,18 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 
 	this.class = hlock_class(prev);
 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
-	BFS_PROCESS_RET(ret);
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
+		return ret;
 
 	that.parent = NULL;
 	that.class = hlock_class(next);
 	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
-	BFS_PROCESS_RET(ret);
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
+		return ret;
 
 	return print_bad_irq_dependency(curr, &this, &that,
 			target_entry, target_entry1,
@@ -2038,7 +2088,10 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 	root.parent = NULL;
 	root.class = hlock_class(this);
 	ret = find_usage_forwards(&root, bit, &target_entry);
-	BFS_PROCESS_RET(ret);
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
+		return ret;
 
 	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
@@ -2059,7 +2112,10 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 	root.parent = NULL;
 	root.class = hlock_class(this);
 	ret = find_usage_backwards(&root, bit, &target_entry);
-	BFS_PROCESS_RET(ret);
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
+		return ret;
 
 	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 6baa880..a2ee95a 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -91,6 +91,8 @@ extern unsigned int nr_process_chains;
 extern unsigned int max_lockdep_depth;
 extern unsigned int max_recursion_depth;
 
+extern unsigned int max_bfs_queue_depth;
+
 #ifdef CONFIG_PROVE_LOCKING
 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
@@ -136,98 +138,3 @@ extern atomic_t nr_find_usage_backwards_recursions;
 # define debug_atomic_dec(ptr)		do { } while (0)
 # define debug_atomic_read(ptr)		0
 #endif
-
-
-extern unsigned int max_bfs_queue_depth;
-extern unsigned long nr_list_entries;
-extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
-extern unsigned long bfs_accessed[];
-
-/*For good efficiency of modular, we use power of 2*/
-#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
-
-/* The circular_queue and helpers is used to implement the
- * breadth-first search(BFS)algorithem, by which we can build
- * the shortest path from the next lock to be acquired to the
- * previous held lock if there is a circular between them.
- * */
-struct circular_queue{
-	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
-	unsigned int  front, rear;
-};
-
-static inline void __cq_init(struct circular_queue *cq)
-{
-	cq->front = cq->rear = 0;
-	bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
-}
-
-static inline int __cq_empty(struct circular_queue *cq)
-{
-	return (cq->front == cq->rear);
-}
-
-static inline int __cq_full(struct circular_queue *cq)
-{
-	return ((cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1))  == cq->front;
-}
-
-static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
-{
-	if (__cq_full(cq))
-		return -1;
-
-	cq->element[cq->rear] = elem;
-	cq->rear = (cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
-	return 0;
-}
-
-static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
-{
-	if (__cq_empty(cq))
-		return -1;
-
-	*elem = cq->element[cq->front];
-	cq->front = (cq->front + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
-	return 0;
-}
-
-static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
-{
-	return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
-}
-
-static inline void mark_lock_accessed(struct lock_list *lock,
-					struct lock_list *parent)
-{
-	unsigned long nr;
-	nr = lock - list_entries;
-	WARN_ON(nr >= nr_list_entries);
-	lock->parent = parent;
-	set_bit(nr, bfs_accessed);
-}
-
-static inline unsigned long lock_accessed(struct lock_list *lock)
-{
-	unsigned long nr;
-	nr = lock - list_entries;
-	WARN_ON(nr >= nr_list_entries);
-	return test_bit(nr, bfs_accessed);
-}
-
-static inline struct lock_list *get_lock_parent(struct lock_list *child)
-{
-	return child->parent;
-}
-
-static inline int get_lock_depth(struct lock_list *child)
-{
-	int depth = 0;
-	struct lock_list *parent;
-
-	while ((parent = get_lock_parent(child))) {
-		child = parent;
-		depth++;
-	}
-	return depth;
-}

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* Re: [tip:core/locking] lockdep: Update memory usage introduced by BFS
  2009-07-18 14:24                 ` [tip:core/locking] lockdep: Update memory usage introduced by BFS tip-bot for Ming Lei
@ 2009-07-18 17:23                   ` Peter Zijlstra
  0 siblings, 0 replies; 54+ messages in thread
From: Peter Zijlstra @ 2009-07-18 17:23 UTC (permalink / raw)
  To: mingo, hpa, linux-kernel, tglx, tom.leiming, mingo; +Cc: linux-tip-commits

On Sat, 2009-07-18 at 14:24 +0000, tip-bot for Ming Lei wrote:
> Commit-ID:  292264ece1ffdc9dacc1af486e28ee91cc66fb2d
> Gitweb:     http://git.kernel.org/tip/292264ece1ffdc9dacc1af486e28ee91cc66fb2d
> Author:     Ming Lei <tom.leiming@gmail.com>
> AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
> Committer:  Ingo Molnar <mingo@elte.hu>
> CommitDate: Sat, 18 Jul 2009 16:02:56 +0200
> 
> lockdep: Update memory usage introduced by BFS
> 
> Also account the BFS memory usage.
> 
> Signed-off-by: Ming Lei <tom.leiming@gmail.com>
> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
> LKML-Reference: <1246201486-7308-9-git-send-email-tom.leiming@gmail.com>
> Signed-off-by: Ingo Molnar <mingo@elte.hu>
> 
> 
> ---
>  kernel/lockdep.c |    3 ++-
>  1 files changed, 2 insertions(+), 1 deletions(-)
> 
> diff --git a/kernel/lockdep.c b/kernel/lockdep.c
> index b896f23..930335d 100644
> --- a/kernel/lockdep.c
> +++ b/kernel/lockdep.c
> @@ -3429,7 +3429,8 @@ void __init lockdep_info(void)
>  		sizeof(struct list_head) * CLASSHASH_SIZE +
>  		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
>  		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
> -		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
> +		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024 +
> +		sizeof(struct circular_queue) + sizeof(bfs_accessed));
>  
>  	printk(" per task-struct memory footprint: %lu bytes\n",
>  		sizeof(struct held_lock) * MAX_LOCK_DEPTH);

/usr/src/linux-2.6/kernel/lockdep.c:3493: error: invalid application of
'sizeof' to incomplete type 'struct circular_queue'

This wants the following folded in.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
 kernel/lockdep.c |    7 +++++--
 1 files changed, 5 insertions(+), 2 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3718a98..33fdda9 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3489,8 +3489,11 @@ void __init lockdep_info(void)
 		sizeof(struct list_head) * CLASSHASH_SIZE +
 		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
 		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
-		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024 +
-		sizeof(struct circular_queue) + sizeof(bfs_accessed));
+		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024
+#ifdef CONFIG_PROVE_LOCKING
+		+ sizeof(struct circular_queue) + sizeof(bfs_accessed)
+#endif
+		);
 
 	printk(" per task-struct memory footprint: %lu bytes\n",
 		sizeof(struct held_lock) * MAX_LOCK_DEPTH);



^ permalink raw reply related	[flat|nested] 54+ messages in thread

* Re: [tip:core/locking] lockdep: BFS cleanup
  2009-07-18 14:24                     ` [tip:core/locking] lockdep: BFS cleanup tip-bot for Peter Zijlstra
@ 2009-07-18 17:23                       ` Peter Zijlstra
  0 siblings, 0 replies; 54+ messages in thread
From: Peter Zijlstra @ 2009-07-18 17:23 UTC (permalink / raw)
  To: mingo, hpa, linux-kernel, tglx, mingo; +Cc: linux-tip-commits

On Sat, 2009-07-18 at 14:24 +0000, tip-bot for Peter Zijlstra wrote:
> Commit-ID:  ec223f9a486e12dbdb8e6996fcc283eee622695b
> Gitweb:     http://git.kernel.org/tip/ec223f9a486e12dbdb8e6996fcc283eee622695b
> Author:     Peter Zijlstra <a.p.zijlstra@chello.nl>
> AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
> Committer:  Ingo Molnar <mingo@elte.hu>
> CommitDate: Sat, 18 Jul 2009 16:02:57 +0200
> 
> lockdep: BFS cleanup
> 
> Some cleanups of the lockdep code after the BFS series:
> 
>  - Remove the last traces of the generation id
>  - Fixup comment style
>  - Move the bfs routines into lockdep.c
>  - Cleanup the bfs routines
> 
> [ tom.leiming@gmail.com: Fix crash ]
> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
> LKML-Reference: <1246201486-7308-11-git-send-email-tom.leiming@gmail.com>
> Signed-off-by: Ingo Molnar <mingo@elte.hu>

/usr/src/linux-2.6/kernel/lockdep.c:543: warning: 'print_lock_class_header' defined but not used

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
 kernel/lockdep.c |   48 ++++++++++++++++++++++++------------------------
 1 files changed, 24 insertions(+), 24 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3718a98..75cf4ad 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -539,30 +539,6 @@ static void lockdep_print_held_locks(struct task_struct *curr)
 	}
 }
 
-static void print_lock_class_header(struct lock_class *class, int depth)
-{
-	int bit;
-
-	printk("%*s->", depth, "");
-	print_lock_name(class);
-	printk(" ops: %lu", class->ops);
-	printk(" {\n");
-
-	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
-		if (class->usage_mask & (1 << bit)) {
-			int len = depth;
-
-			len += printk("%*s   %s", depth, "", usage_str[bit]);
-			len += printk(" at:\n");
-			print_stack_trace(class->usage_traces + bit, len);
-		}
-	}
-	printk("%*s }\n", depth, "");
-
-	printk("%*s ... key      at: ",depth,"");
-	print_ip_sym((unsigned long)class->key);
-}
-
 static void print_kernel_version(void)
 {
 	printk("%s %.*s\n", init_utsname()->release,
@@ -1277,6 +1253,30 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
 	return result;
 }
 
+static void print_lock_class_header(struct lock_class *class, int depth)
+{
+	int bit;
+
+	printk("%*s->", depth, "");
+	print_lock_name(class);
+	printk(" ops: %lu", class->ops);
+	printk(" {\n");
+
+	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
+		if (class->usage_mask & (1 << bit)) {
+			int len = depth;
+
+			len += printk("%*s   %s", depth, "", usage_str[bit]);
+			len += printk(" at:\n");
+			print_stack_trace(class->usage_traces + bit, len);
+		}
+	}
+	printk("%*s }\n", depth, "");
+
+	printk("%*s ... key      at: ",depth,"");
+	print_ip_sym((unsigned long)class->key);
+}
+
 /*
  * printk the shortest lock dependencies from @start to @end in reverse order:
  */



^ permalink raw reply related	[flat|nested] 54+ messages in thread

* Re: [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular()  by BFS
  2009-07-13  8:08           ` Dave Young
@ 2009-07-21  3:33             ` Ming Lei
  0 siblings, 0 replies; 54+ messages in thread
From: Ming Lei @ 2009-07-21  3:33 UTC (permalink / raw)
  To: Dave Young; +Cc: a.p.zijlstra, linux-kernel, akpm, mingo

2009/7/13 Dave Young <hidave.darkstar@gmail.com>:
>>
>> Shouldn't be 'static noinline int' here? Same question to following ones.
>>
>
> Sorry for pointing to wrong place. please grep to find them:
>
> 161:+static int noinline print_bfs_bug(int ret)
> 173: static int noinline print_infinite_recursion_bug(void)

Since BFS removes recursion, many noinline may be not necessary.
Thanks.

-- 
Lei Ming

^ permalink raw reply	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Print the shortest dependency chain if finding a circle
  2009-06-28 15:04 ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle tom.leiming
                     ` (3 preceding siblings ...)
  2009-07-18 14:23   ` [tip:core/locking] lockdep: Print " tip-bot for Ming Lei
@ 2009-08-02 13:01   ` tip-bot for Ming Lei
  4 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-08-02 13:01 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  c94aa5ca3088018d2a7a9bd3258aefffe29df265
Gitweb:     http://git.kernel.org/tip/c94aa5ca3088018d2a7a9bd3258aefffe29df265
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Peter Zijlstra <a.p.zijlstra@chello.nl>
CommitDate: Fri, 24 Jul 2009 10:49:44 +0200

lockdep: Print the shortest dependency chain if finding a circle

Currently lockdep will print the 1st circle detected if it
exists when acquiring a new (next) lock.

This patch prints the shortest path from the next lock to be
acquired to the previous held lock if a circle is found.

The patch still uses the current method to check circle, and
once the circle is found, breadth-first search algorithem is
used to compute the shortest path from the next lock to the
previous lock in the forward lock dependency graph.

Printing the shortest path will shorten the dependency chain,
and make troubleshooting for possible circular locking easier.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-2-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 include/linux/lockdep.h    |    6 ++
 kernel/lockdep.c           |  115 ++++++++++++++++++++++++++++++++++++++++----
 kernel/lockdep_internals.h |   83 +++++++++++++++++++++++++++++++
 3 files changed, 195 insertions(+), 9 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b25d1b5..9ec026f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -149,6 +149,12 @@ struct lock_list {
 	struct lock_class		*class;
 	struct stack_trace		trace;
 	int				distance;
+
+	/*The parent field is used to implement breadth-first search,and
+	 *the bit 0 is reused to indicate if the lock has been accessed
+	 *in BFS.
+	 */
+	struct lock_list		*parent;
 };
 
 /*
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8bbeef9..93dc70d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -897,6 +897,79 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 	return 1;
 }
 
+static struct circular_queue  lock_cq;
+static int __search_shortest_path(struct lock_list *source_entry,
+				struct lock_class *target,
+				struct lock_list **target_entry,
+				int forward)
+{
+	struct lock_list *entry;
+	struct circular_queue *cq = &lock_cq;
+	int ret = 1;
+
+	__cq_init(cq);
+
+	mark_lock_accessed(source_entry, NULL);
+	if (source_entry->class == target) {
+		*target_entry = source_entry;
+		ret = 0;
+		goto exit;
+	}
+
+	__cq_enqueue(cq, (unsigned long)source_entry);
+
+	while (!__cq_empty(cq)) {
+		struct lock_list *lock;
+		struct list_head *head;
+
+		__cq_dequeue(cq, (unsigned long *)&lock);
+
+		if (!lock->class) {
+			ret = -2;
+			goto exit;
+		}
+
+		if (forward)
+			head = &lock->class->locks_after;
+		else
+			head = &lock->class->locks_before;
+
+		list_for_each_entry(entry, head, entry) {
+			if (!lock_accessed(entry)) {
+				mark_lock_accessed(entry, lock);
+				if (entry->class == target) {
+					*target_entry = entry;
+					ret = 0;
+					goto exit;
+				}
+
+				if (__cq_enqueue(cq, (unsigned long)entry)) {
+					ret = -1;
+					goto exit;
+				}
+			}
+		}
+	}
+exit:
+	return ret;
+}
+
+static inline int __search_forward_shortest_path(struct lock_list *src_entry,
+				struct lock_class *target,
+				struct lock_list **target_entry)
+{
+	return __search_shortest_path(src_entry, target, target_entry, 1);
+
+}
+
+static inline int __search_backward_shortest_path(struct lock_list *src_entry,
+				struct lock_class *target,
+				struct lock_list **target_entry)
+{
+	return __search_shortest_path(src_entry, target, target_entry, 0);
+
+}
+
 /*
  * Recursive, forwards-direction lock-dependency checking, used for
  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
@@ -934,7 +1007,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 {
 	struct task_struct *curr = current;
 
-	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+	if (debug_locks_silent)
 		return 0;
 
 	printk("\n=======================================================\n");
@@ -954,19 +1027,41 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 	return 0;
 }
 
-static noinline int print_circular_bug_tail(void)
+static noinline int print_circular_bug(void)
 {
 	struct task_struct *curr = current;
 	struct lock_list this;
+	struct lock_list *target;
+	struct lock_list *parent;
+	int result;
+	unsigned long depth;
 
-	if (debug_locks_silent)
+	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
 	this.class = hlock_class(check_source);
 	if (!save_trace(&this.trace))
 		return 0;
 
-	print_circular_bug_entry(&this, 0);
+	result = __search_forward_shortest_path(&this,
+						hlock_class(check_target),
+						&target);
+	if (result) {
+		printk("\n%s:search shortest path failed:%d\n", __func__,
+			result);
+		return 0;
+	}
+
+	depth = get_lock_depth(target);
+
+	print_circular_bug_header(target, depth);
+
+	parent = get_lock_parent(target);
+
+	while (parent) {
+		print_circular_bug_entry(parent, --depth);
+		parent = get_lock_parent(parent);
+	}
 
 	printk("\nother info that might help us debug this:\n\n");
 	lockdep_print_held_locks(curr);
@@ -1072,14 +1167,15 @@ check_noncircular(struct lock_class *source, unsigned int depth)
 	 */
 	list_for_each_entry(entry, &source->locks_after, entry) {
 		if (entry->class == hlock_class(check_target))
-			return print_circular_bug_header(entry, depth+1);
+			return 2;
 		debug_atomic_inc(&nr_cyclic_checks);
-		if (!check_noncircular(entry->class, depth+1))
-			return print_circular_bug_entry(entry, depth+1);
+		if (check_noncircular(entry->class, depth+1) == 2)
+			return 2;
 	}
 	return 1;
 }
 
+
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 /*
  * Forwards and backwards subgraph searching, for the purposes of
@@ -1484,8 +1580,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 	 */
 	check_source = next;
 	check_target = prev;
-	if (!(check_noncircular(hlock_class(next), 0)))
-		return print_circular_bug_tail();
+	if (check_noncircular(hlock_class(next), 0) == 2)
+		return print_circular_bug();
+
 
 	if (!check_prev_add_irq(curr, prev, next))
 		return 0;
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 699a2ac..6f48d37 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -136,3 +136,86 @@ extern atomic_t nr_find_usage_backwards_recursions;
 # define debug_atomic_dec(ptr)		do { } while (0)
 # define debug_atomic_read(ptr)		0
 #endif
+
+/* The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ * */
+#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
+struct circular_queue{
+	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
+	unsigned int  front, rear;
+};
+
+#define LOCK_ACCESSED 		1UL
+#define LOCK_ACCESSED_MASK	(~LOCK_ACCESSED)
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+	cq->front = cq->rear = 0;
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+	return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+	return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE)  == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+	if (__cq_full(cq))
+		return -1;
+
+	cq->element[cq->rear] = elem;
+	cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
+	return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+	if (__cq_empty(cq))
+		return -1;
+
+	*elem = cq->element[cq->front];
+	cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
+	return 0;
+}
+
+static inline int __cq_get_elem_count(struct circular_queue *cq)
+{
+	return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+					struct lock_list *parent)
+{
+	lock->parent = (void *) parent + LOCK_ACCESSED;
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+	return (unsigned long)lock->parent & LOCK_ACCESSED;
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+	return (struct lock_list *)
+		((unsigned long)child->parent & LOCK_ACCESSED_MASK);
+}
+
+static inline unsigned long get_lock_depth(struct lock_list *child)
+{
+	unsigned long depth = 0;
+	struct lock_list *parent;
+
+	while ((parent = get_lock_parent(child))) {
+		child = parent;
+		depth++;
+	}
+	return depth;
+}

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Improve implementation of BFS
  2009-06-28 15:04   ` [RESEND PATCH 02/11] kernel:lockdep:improve implementation of BFS tom.leiming
  2009-06-28 15:04     ` [RESEND PATCH 03/11] kernel:lockdep: introduce match function to BFS tom.leiming
  2009-07-18 14:23     ` [tip:core/locking] lockdep: Improve implementation of BFS tip-bot for Ming Lei
@ 2009-08-02 13:01     ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-08-02 13:01 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  d588e46155e9c51217b9840db1e94a0f594c1af2
Gitweb:     http://git.kernel.org/tip/d588e46155e9c51217b9840db1e94a0f594c1af2
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Peter Zijlstra <a.p.zijlstra@chello.nl>
CommitDate: Fri, 24 Jul 2009 10:49:46 +0200

lockdep: Improve implementation of BFS

1,replace %MAX_CIRCULAR_QUE_SIZE with &(MAX_CIRCULAR_QUE_SIZE-1)
since we define MAX_CIRCULAR_QUE_SIZE as power of 2;

2,use bitmap to mark if a lock is accessed in BFS in order to
clear it quickly, because we may search a graph many times.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-3-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c           |   23 ++++++++++++++++-------
 kernel/lockdep_internals.h |   35 +++++++++++++++++++++++------------
 2 files changed, 39 insertions(+), 19 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 93dc70d..5dcca26 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,7 +42,7 @@
 #include <linux/hash.h>
 #include <linux/ftrace.h>
 #include <linux/stringify.h>
-
+#include <linux/bitops.h>
 #include <asm/sections.h>
 
 #include "lockdep_internals.h"
@@ -118,7 +118,7 @@ static inline int debug_locks_off_graph_unlock(void)
 static int lockdep_initialized;
 
 unsigned long nr_list_entries;
-static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
 /*
  * All data structures here are protected by the global debug_lock.
@@ -897,30 +897,38 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 	return 1;
 }
 
+unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
 static struct circular_queue  lock_cq;
+
 static int __search_shortest_path(struct lock_list *source_entry,
 				struct lock_class *target,
 				struct lock_list **target_entry,
 				int forward)
 {
 	struct lock_list *entry;
+	struct list_head *head;
 	struct circular_queue *cq = &lock_cq;
 	int ret = 1;
 
-	__cq_init(cq);
-
-	mark_lock_accessed(source_entry, NULL);
 	if (source_entry->class == target) {
 		*target_entry = source_entry;
 		ret = 0;
 		goto exit;
 	}
 
+	if (forward)
+		head = &source_entry->class->locks_after;
+	else
+		head = &source_entry->class->locks_before;
+
+	if (list_empty(head))
+		goto exit;
+
+	__cq_init(cq);
 	__cq_enqueue(cq, (unsigned long)source_entry);
 
 	while (!__cq_empty(cq)) {
 		struct lock_list *lock;
-		struct list_head *head;
 
 		__cq_dequeue(cq, (unsigned long *)&lock);
 
@@ -1040,6 +1048,7 @@ static noinline int print_circular_bug(void)
 		return 0;
 
 	this.class = hlock_class(check_source);
+	this.parent = NULL;
 	if (!save_trace(&this.trace))
 		return 0;
 
@@ -1580,10 +1589,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 	 */
 	check_source = next;
 	check_target = prev;
+
 	if (check_noncircular(hlock_class(next), 0) == 2)
 		return print_circular_bug();
 
-
 	if (!check_prev_add_irq(curr, prev, next))
 		return 0;
 
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 6f48d37..c2f6594 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -137,23 +137,28 @@ extern atomic_t nr_find_usage_backwards_recursions;
 # define debug_atomic_read(ptr)		0
 #endif
 
+
+extern unsigned long nr_list_entries;
+extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+extern unsigned long bfs_accessed[];
+
+/*For good efficiency of modular, we use power of 2*/
+#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
+
 /* The circular_queue and helpers is used to implement the
  * breadth-first search(BFS)algorithem, by which we can build
  * the shortest path from the next lock to be acquired to the
  * previous held lock if there is a circular between them.
  * */
-#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
 struct circular_queue{
 	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
 	unsigned int  front, rear;
 };
 
-#define LOCK_ACCESSED 		1UL
-#define LOCK_ACCESSED_MASK	(~LOCK_ACCESSED)
-
 static inline void __cq_init(struct circular_queue *cq)
 {
 	cq->front = cq->rear = 0;
+	bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
 }
 
 static inline int __cq_empty(struct circular_queue *cq)
@@ -163,7 +168,7 @@ static inline int __cq_empty(struct circular_queue *cq)
 
 static inline int __cq_full(struct circular_queue *cq)
 {
-	return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE)  == cq->front;
+	return ((cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1))  == cq->front;
 }
 
 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
@@ -172,7 +177,7 @@ static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
 		return -1;
 
 	cq->element[cq->rear] = elem;
-	cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
+	cq->rear = (cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
 	return 0;
 }
 
@@ -182,30 +187,36 @@ static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
 		return -1;
 
 	*elem = cq->element[cq->front];
-	cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
+	cq->front = (cq->front + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
 	return 0;
 }
 
 static inline int __cq_get_elem_count(struct circular_queue *cq)
 {
-	return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
+	return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
 }
 
 static inline void mark_lock_accessed(struct lock_list *lock,
 					struct lock_list *parent)
 {
-	lock->parent = (void *) parent + LOCK_ACCESSED;
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	lock->parent = parent;
+	set_bit(nr, bfs_accessed);
 }
 
 static inline unsigned long lock_accessed(struct lock_list *lock)
 {
-	return (unsigned long)lock->parent & LOCK_ACCESSED;
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	return test_bit(nr, bfs_accessed);
 }
 
 static inline struct lock_list *get_lock_parent(struct lock_list *child)
 {
-	return (struct lock_list *)
-		((unsigned long)child->parent & LOCK_ACCESSED_MASK);
+	return child->parent;
 }
 
 static inline unsigned long get_lock_depth(struct lock_list *child)

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Introduce match function to BFS
  2009-06-28 15:04     ` [RESEND PATCH 03/11] kernel:lockdep: introduce match function to BFS tom.leiming
  2009-06-28 15:04       ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() by BFS tom.leiming
  2009-07-18 14:23       ` [tip:core/locking] lockdep: Introduce match function to BFS tip-bot for Ming Lei
@ 2009-08-02 13:01       ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-08-02 13:01 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  9e2d551ea0d767c0d624965f0c273e942f4be536
Gitweb:     http://git.kernel.org/tip/9e2d551ea0d767c0d624965f0c273e942f4be536
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Peter Zijlstra <a.p.zijlstra@chello.nl>
CommitDate: Fri, 24 Jul 2009 10:49:48 +0200

lockdep: Introduce match function to BFS

1,introduce match() to BFS in order to make it usable to
match different pattern;

2,also rename some functions to make them more suitable.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-4-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c |   43 ++++++++++++++++++++++++++-----------------
 1 files changed, 26 insertions(+), 17 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 5dcca26..ce6d09e 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -900,17 +900,18 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
 static struct circular_queue  lock_cq;
 
-static int __search_shortest_path(struct lock_list *source_entry,
-				struct lock_class *target,
-				struct lock_list **target_entry,
-				int forward)
+static int __bfs(struct lock_list *source_entry,
+			void *data,
+			int (*match)(struct lock_list *entry, void *data),
+			struct lock_list **target_entry,
+			int forward)
 {
 	struct lock_list *entry;
 	struct list_head *head;
 	struct circular_queue *cq = &lock_cq;
 	int ret = 1;
 
-	if (source_entry->class == target) {
+	if (match(source_entry, data)) {
 		*target_entry = source_entry;
 		ret = 0;
 		goto exit;
@@ -945,7 +946,7 @@ static int __search_shortest_path(struct lock_list *source_entry,
 		list_for_each_entry(entry, head, entry) {
 			if (!lock_accessed(entry)) {
 				mark_lock_accessed(entry, lock);
-				if (entry->class == target) {
+				if (match(entry, data)) {
 					*target_entry = entry;
 					ret = 0;
 					goto exit;
@@ -962,19 +963,21 @@ exit:
 	return ret;
 }
 
-static inline int __search_forward_shortest_path(struct lock_list *src_entry,
-				struct lock_class *target,
-				struct lock_list **target_entry)
+static inline int __bfs_forward(struct lock_list *src_entry,
+			void *data,
+			int (*match)(struct lock_list *entry, void *data),
+			struct lock_list **target_entry)
 {
-	return __search_shortest_path(src_entry, target, target_entry, 1);
+	return __bfs(src_entry, data, match, target_entry, 1);
 
 }
 
-static inline int __search_backward_shortest_path(struct lock_list *src_entry,
-				struct lock_class *target,
-				struct lock_list **target_entry)
+static inline int __bfs_backward(struct lock_list *src_entry,
+			void *data,
+			int (*match)(struct lock_list *entry, void *data),
+			struct lock_list **target_entry)
 {
-	return __search_shortest_path(src_entry, target, target_entry, 0);
+	return __bfs(src_entry, data, match, target_entry, 0);
 
 }
 
@@ -1035,6 +1038,11 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 	return 0;
 }
 
+static inline int class_equal(struct lock_list *entry, void *data)
+{
+	return entry->class == data;
+}
+
 static noinline int print_circular_bug(void)
 {
 	struct task_struct *curr = current;
@@ -1052,9 +1060,10 @@ static noinline int print_circular_bug(void)
 	if (!save_trace(&this.trace))
 		return 0;
 
-	result = __search_forward_shortest_path(&this,
-						hlock_class(check_target),
-						&target);
+	result = __bfs_forward(&this,
+			hlock_class(check_target),
+			class_equal,
+			&target);
 	if (result) {
 		printk("\n%s:search shortest path failed:%d\n", __func__,
 			result);

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Implement check_noncircular() by BFS
  2009-06-28 15:04       ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() by BFS tom.leiming
                           ` (2 preceding siblings ...)
  2009-07-18 14:23         ` [tip:core/locking] lockdep: Implement " tip-bot for Ming Lei
@ 2009-08-02 13:02         ` tip-bot for Ming Lei
  3 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-08-02 13:02 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  db0002a32f31060ca900b533d93a074ddf7d5b61
Gitweb:     http://git.kernel.org/tip/db0002a32f31060ca900b533d93a074ddf7d5b61
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Peter Zijlstra <a.p.zijlstra@chello.nl>
CommitDate: Fri, 24 Jul 2009 10:49:50 +0200

lockdep: Implement check_noncircular() by BFS

This patch uses BFS to implement check_noncircular() and
prints the generated shortest circle if exists.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-5-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c |   89 ++++++++++++++++++++++-------------------------------
 1 files changed, 37 insertions(+), 52 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ce6d09e..5609d30 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -985,12 +985,7 @@ static inline int __bfs_backward(struct lock_list *src_entry,
  * Recursive, forwards-direction lock-dependency checking, used for
  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
  * checking.
- *
- * (to keep the stackframe of the recursive functions small we
- *  use these global variables, and we also mark various helper
- *  functions as noinline.)
  */
-static struct held_lock *check_source, *check_target;
 
 /*
  * Print a dependency chain entry (this is only done when a deadlock
@@ -1014,7 +1009,9 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
  * header first:
  */
 static noinline int
-print_circular_bug_header(struct lock_list *entry, unsigned int depth)
+print_circular_bug_header(struct lock_list *entry, unsigned int depth,
+			struct held_lock *check_src,
+			struct held_lock *check_tgt)
 {
 	struct task_struct *curr = current;
 
@@ -1027,9 +1024,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
 	printk(  "-------------------------------------------------------\n");
 	printk("%s/%d is trying to acquire lock:\n",
 		curr->comm, task_pid_nr(curr));
-	print_lock(check_source);
+	print_lock(check_src);
 	printk("\nbut task is already holding lock:\n");
-	print_lock(check_target);
+	print_lock(check_tgt);
 	printk("\nwhich lock already depends on the new lock.\n\n");
 	printk("\nthe existing dependency chain (in reverse order) is:\n");
 
@@ -1043,36 +1040,24 @@ static inline int class_equal(struct lock_list *entry, void *data)
 	return entry->class == data;
 }
 
-static noinline int print_circular_bug(void)
+static noinline int print_circular_bug(struct lock_list *this,
+				struct lock_list *target,
+				struct held_lock *check_src,
+				struct held_lock *check_tgt)
 {
 	struct task_struct *curr = current;
-	struct lock_list this;
-	struct lock_list *target;
 	struct lock_list *parent;
-	int result;
 	unsigned long depth;
 
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
 
-	this.class = hlock_class(check_source);
-	this.parent = NULL;
-	if (!save_trace(&this.trace))
+	if (!save_trace(&this->trace))
 		return 0;
 
-	result = __bfs_forward(&this,
-			hlock_class(check_target),
-			class_equal,
-			&target);
-	if (result) {
-		printk("\n%s:search shortest path failed:%d\n", __func__,
-			result);
-		return 0;
-	}
-
 	depth = get_lock_depth(target);
 
-	print_circular_bug_header(target, depth);
+	print_circular_bug_header(target, depth, check_src, check_tgt);
 
 	parent = get_lock_parent(target);
 
@@ -1090,6 +1075,16 @@ static noinline int print_circular_bug(void)
 	return 0;
 }
 
+static noinline int print_bfs_bug(int ret)
+{
+	if (!debug_locks_off_graph_unlock())
+		return 0;
+
+	WARN(1, "lockdep bfs error:%d\n", ret);
+
+	return 0;
+}
+
 #define RECURSION_LIMIT 40
 
 static int noinline print_infinite_recursion_bug(void)
@@ -1168,31 +1163,17 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
  * lead to <target>. Print an error and return 0 if it does.
  */
 static noinline int
-check_noncircular(struct lock_class *source, unsigned int depth)
+check_noncircular(struct lock_list *root, struct lock_class *target,
+		struct lock_list **target_entry)
 {
-	struct lock_list *entry;
+	int result;
 
-	if (lockdep_dependency_visit(source, depth))
-		return 1;
+	debug_atomic_inc(&nr_cyclic_checks);
 
-	debug_atomic_inc(&nr_cyclic_check_recursions);
-	if (depth > max_recursion_depth)
-		max_recursion_depth = depth;
-	if (depth >= RECURSION_LIMIT)
-		return print_infinite_recursion_bug();
-	/*
-	 * Check this lock's dependency list:
-	 */
-	list_for_each_entry(entry, &source->locks_after, entry) {
-		if (entry->class == hlock_class(check_target))
-			return 2;
-		debug_atomic_inc(&nr_cyclic_checks);
-		if (check_noncircular(entry->class, depth+1) == 2)
-			return 2;
-	}
-	return 1;
-}
+	result = __bfs_forward(root, target, class_equal, target_entry);
 
+	return result;
+}
 
 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 /*
@@ -1586,6 +1567,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 {
 	struct lock_list *entry;
 	int ret;
+	struct lock_list this;
+	struct lock_list *uninitialized_var(target_entry);
 
 	/*
 	 * Prove that the new <prev> -> <next> dependency would not
@@ -1596,11 +1579,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 	 * We are using global variables to control the recursion, to
 	 * keep the stackframe size of the recursive functions low:
 	 */
-	check_source = next;
-	check_target = prev;
-
-	if (check_noncircular(hlock_class(next), 0) == 2)
-		return print_circular_bug();
+	this.class = hlock_class(next);
+	this.parent = NULL;
+	ret = check_noncircular(&this, hlock_class(prev), &target_entry);
+	if (unlikely(!ret))
+		return print_circular_bug(&this, target_entry, next, prev);
+	else if (unlikely(ret < 0))
+		return print_bfs_bug(ret);
 
 	if (!check_prev_add_irq(curr, prev, next))
 		return 0;

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Implement find_usage_*wards by BFS
  2009-06-28 15:04         ` [RESEND PATCH 05/11] kernel:lockdep:implement find_usage_*wards " tom.leiming
  2009-06-28 15:04           ` [RESEND PATCH 06/11] kernel:lockdep:introduce print_shortest_lock_dependencies tom.leiming
  2009-07-18 14:23           ` [tip:core/locking] lockdep: Implement find_usage_*wards by BFS tip-bot for Ming Lei
@ 2009-08-02 13:02           ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-08-02 13:02 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  d7aaba140a09b7a2295aec061629c880f088de3d
Gitweb:     http://git.kernel.org/tip/d7aaba140a09b7a2295aec061629c880f088de3d
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Peter Zijlstra <a.p.zijlstra@chello.nl>
CommitDate: Fri, 24 Jul 2009 10:49:52 +0200

lockdep: Implement find_usage_*wards by BFS

This patch uses BFS to implement find_usage_*wards(),which
was originally writen by DFS.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-6-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c |  180 ++++++++++++++++++++++--------------------------------
 1 files changed, 72 insertions(+), 108 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 5609d30..b3ade50 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -963,7 +963,7 @@ exit:
 	return ret;
 }
 
-static inline int __bfs_forward(struct lock_list *src_entry,
+static inline int __bfs_forwards(struct lock_list *src_entry,
 			void *data,
 			int (*match)(struct lock_list *entry, void *data),
 			struct lock_list **target_entry)
@@ -972,7 +972,7 @@ static inline int __bfs_forward(struct lock_list *src_entry,
 
 }
 
-static inline int __bfs_backward(struct lock_list *src_entry,
+static inline int __bfs_backwards(struct lock_list *src_entry,
 			void *data,
 			int (*match)(struct lock_list *entry, void *data),
 			struct lock_list **target_entry)
@@ -1085,18 +1085,6 @@ static noinline int print_bfs_bug(int ret)
 	return 0;
 }
 
-#define RECURSION_LIMIT 40
-
-static int noinline print_infinite_recursion_bug(void)
-{
-	if (!debug_locks_off_graph_unlock())
-		return 0;
-
-	WARN_ON(1);
-
-	return 0;
-}
-
 unsigned long __lockdep_count_forward_deps(struct lock_class *class,
 					   unsigned int depth)
 {
@@ -1170,7 +1158,7 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
 
 	debug_atomic_inc(&nr_cyclic_checks);
 
-	result = __bfs_forward(root, target, class_equal, target_entry);
+	result = __bfs_forwards(root, target, class_equal, target_entry);
 
 	return result;
 }
@@ -1181,101 +1169,70 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
  * proving that two subgraphs can be connected by a new dependency
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
-static enum lock_usage_bit find_usage_bit;
 static struct lock_class *forwards_match, *backwards_match;
 
+
+#define   BFS_PROCESS_RET(ret)	do { \
+					if (ret < 0) \
+						return print_bfs_bug(ret); \
+					if (ret == 1) \
+						return 1; \
+				} while (0)
+
+static inline int usage_match(struct lock_list *entry, void *bit)
+{
+	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+}
+
+
+
 /*
  * Find a node in the forwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
  *
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <forwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
  *
- * Return 1 otherwise and keep <forwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
  */
-static noinline int
-find_usage_forwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+			struct lock_list **target_entry)
 {
-	struct lock_list *entry;
-	int ret;
-
-	if (lockdep_dependency_visit(source, depth))
-		return 1;
-
-	if (depth > max_recursion_depth)
-		max_recursion_depth = depth;
-	if (depth >= RECURSION_LIMIT)
-		return print_infinite_recursion_bug();
+	int result;
 
 	debug_atomic_inc(&nr_find_usage_forwards_checks);
-	if (source->usage_mask & (1 << find_usage_bit)) {
-		forwards_match = source;
-		return 2;
-	}
 
-	/*
-	 * Check this lock's dependency list:
-	 */
-	list_for_each_entry(entry, &source->locks_after, entry) {
-		debug_atomic_inc(&nr_find_usage_forwards_recursions);
-		ret = find_usage_forwards(entry->class, depth+1);
-		if (ret == 2 || ret == 0)
-			return ret;
-	}
-	return 1;
+	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+
+	return result;
 }
 
 /*
  * Find a node in the backwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
  *
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <backwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
  *
- * Return 1 otherwise and keep <backwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
  */
-static noinline int
-find_usage_backwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+			struct lock_list **target_entry)
 {
-	struct lock_list *entry;
-	int ret;
-
-	if (lockdep_dependency_visit(source, depth))
-		return 1;
-
-	if (!__raw_spin_is_locked(&lockdep_lock))
-		return DEBUG_LOCKS_WARN_ON(1);
-
-	if (depth > max_recursion_depth)
-		max_recursion_depth = depth;
-	if (depth >= RECURSION_LIMIT)
-		return print_infinite_recursion_bug();
+	int result;
 
 	debug_atomic_inc(&nr_find_usage_backwards_checks);
-	if (source->usage_mask & (1 << find_usage_bit)) {
-		backwards_match = source;
-		return 2;
-	}
 
-	if (!source && debug_locks_off_graph_unlock()) {
-		WARN_ON(1);
-		return 0;
-	}
+	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
 
-	/*
-	 * Check this lock's dependency list:
-	 */
-	list_for_each_entry(entry, &source->locks_before, entry) {
-		debug_atomic_inc(&nr_find_usage_backwards_recursions);
-		ret = find_usage_backwards(entry->class, depth+1);
-		if (ret == 2 || ret == 0)
-			return ret;
-	}
-	return 1;
+	return result;
 }
 
+
 static int
 print_bad_irq_dependency(struct task_struct *curr,
 			 struct held_lock *prev,
@@ -1343,18 +1300,21 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 	    enum lock_usage_bit bit_forwards, const char *irqclass)
 {
 	int ret;
+	struct lock_list this;
+	struct lock_list *uninitialized_var(target_entry);
+
+	this.parent = NULL;
+
+	this.class = hlock_class(prev);
+	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+	BFS_PROCESS_RET(ret);
+	backwards_match = target_entry->class;
+
+	this.class = hlock_class(next);
+	ret = find_usage_forwards(&this, bit_forwards, &target_entry);
+	BFS_PROCESS_RET(ret);
+	forwards_match = target_entry->class;
 
-	find_usage_bit = bit_backwards;
-	/* fills in <backwards_match> */
-	ret = find_usage_backwards(hlock_class(prev), 0);
-	if (!ret || ret == 1)
-		return ret;
-
-	find_usage_bit = bit_forwards;
-	ret = find_usage_forwards(hlock_class(next), 0);
-	if (!ret || ret == 1)
-		return ret;
-	/* ret == 2 */
 	return print_bad_irq_dependency(curr, prev, next,
 			bit_backwards, bit_forwards, irqclass);
 }
@@ -2029,14 +1989,16 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 		     enum lock_usage_bit bit, const char *irqclass)
 {
 	int ret;
+	struct lock_list root;
+	struct lock_list *uninitialized_var(target_entry);
 
-	find_usage_bit = bit;
-	/* fills in <forwards_match> */
-	ret = find_usage_forwards(hlock_class(this), 0);
-	if (!ret || ret == 1)
-		return ret;
+	root.parent = NULL;
+	root.class = hlock_class(this);
+	ret = find_usage_forwards(&root, bit, &target_entry);
+	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
+	return print_irq_inversion_bug(curr, target_entry->class,
+					this, 1, irqclass);
 }
 
 /*
@@ -2048,14 +2010,16 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 		      enum lock_usage_bit bit, const char *irqclass)
 {
 	int ret;
+	struct lock_list root;
+	struct lock_list *uninitialized_var(target_entry);
 
-	find_usage_bit = bit;
-	/* fills in <backwards_match> */
-	ret = find_usage_backwards(hlock_class(this), 0);
-	if (!ret || ret == 1)
-		return ret;
+	root.parent = NULL;
+	root.class = hlock_class(this);
+	ret = find_usage_backwards(&root, bit, &target_entry);
+	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
+	return print_irq_inversion_bug(curr, target_entry->class,
+					this, 1, irqclass);
 }
 
 void print_irqtrace_events(struct task_struct *curr)

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Introduce print_shortest_lock_dependencies
  2009-06-28 15:04           ` [RESEND PATCH 06/11] kernel:lockdep:introduce print_shortest_lock_dependencies tom.leiming
  2009-06-28 15:04             ` [RESEND PATCH 07/11] kernel:lockdep: implement lockdep_count_*ward_deps by BFS tom.leiming
  2009-07-18 14:24             ` [tip:core/locking] lockdep: Introduce print_shortest_lock_dependencies tip-bot for Ming Lei
@ 2009-08-02 13:02             ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-08-02 13:02 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  24208ca76707581a097c01a73fd63781e73d3404
Gitweb:     http://git.kernel.org/tip/24208ca76707581a097c01a73fd63781e73d3404
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Peter Zijlstra <a.p.zijlstra@chello.nl>
CommitDate: Fri, 24 Jul 2009 10:49:54 +0200

lockdep: Introduce print_shortest_lock_dependencies

Since the shortest lock dependencies' path may be obtained by BFS,
we print the shortest one by print_shortest_lock_dependencies().

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-7-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c           |   93 +++++++++++++++++++++++++++++++------------
 kernel/lockdep_internals.h |    4 +-
 2 files changed, 69 insertions(+), 28 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b3ade50..938dc50 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -576,6 +576,36 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 }
 
 /*
+ * printk the shortest lock dependencies from @start to @end in reverse order:
+ */
+static void __used
+print_shortest_lock_dependencies(struct lock_list *leaf,
+				struct lock_list *root)
+{
+	struct lock_list *entry = leaf;
+	int depth;
+
+	/*compute depth from generated tree by BFS*/
+	depth = get_lock_depth(leaf);
+
+	do {
+		print_lock_class_header(entry->class, depth);
+		printk("%*s ... acquired at:\n", depth, "");
+		print_stack_trace(&entry->trace, 2);
+		printk("\n");
+
+		if (depth == 0 && (entry != root)) {
+			printk("lockdep:%s bad BFS generated tree\n", __func__);
+			break;
+		}
+
+		entry = get_lock_parent(entry);
+		depth--;
+	} while (entry && (depth >= 0));
+
+	return;
+}
+/*
  * printk all lock dependencies starting at <entry>:
  */
 static void __used
@@ -992,7 +1022,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
  * has been detected):
  */
 static noinline int
-print_circular_bug_entry(struct lock_list *target, unsigned int depth)
+print_circular_bug_entry(struct lock_list *target, int depth)
 {
 	if (debug_locks_silent)
 		return 0;
@@ -1047,7 +1077,7 @@ static noinline int print_circular_bug(struct lock_list *this,
 {
 	struct task_struct *curr = current;
 	struct lock_list *parent;
-	unsigned long depth;
+	int depth;
 
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return 0;
@@ -1169,7 +1199,6 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
  * proving that two subgraphs can be connected by a new dependency
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
-static struct lock_class *forwards_match, *backwards_match;
 
 
 #define   BFS_PROCESS_RET(ret)	do { \
@@ -1235,6 +1264,10 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
 
 static int
 print_bad_irq_dependency(struct task_struct *curr,
+			 struct lock_list *prev_root,
+			 struct lock_list *next_root,
+			 struct lock_list *backwards_entry,
+			 struct lock_list *forwards_entry,
 			 struct held_lock *prev,
 			 struct held_lock *next,
 			 enum lock_usage_bit bit1,
@@ -1267,26 +1300,32 @@ print_bad_irq_dependency(struct task_struct *curr,
 
 	printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
 		irqclass);
-	print_lock_name(backwards_match);
+	print_lock_name(backwards_entry->class);
 	printk("\n... which became %s-irq-safe at:\n", irqclass);
 
-	print_stack_trace(backwards_match->usage_traces + bit1, 1);
+	print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
 
 	printk("\nto a %s-irq-unsafe lock:\n", irqclass);
-	print_lock_name(forwards_match);
+	print_lock_name(forwards_entry->class);
 	printk("\n... which became %s-irq-unsafe at:\n", irqclass);
 	printk("...");
 
-	print_stack_trace(forwards_match->usage_traces + bit2, 1);
+	print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
 
 	printk("\nother info that might help us debug this:\n\n");
 	lockdep_print_held_locks(curr);
 
-	printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
-	print_lock_dependencies(backwards_match, 0);
+	printk("\nthe dependencies between %s-irq-safe lock", irqclass);
+	printk(" and the holding lock:\n");
+	if (!save_trace(&prev_root->trace))
+		return 0;
+	print_shortest_lock_dependencies(backwards_entry, prev_root);
 
-	printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
-	print_lock_dependencies(forwards_match, 0);
+	printk("\nthe dependencies between the lock to be acquired");
+	printk(" and %s-irq-unsafe lock:\n", irqclass);
+	if (!save_trace(&next_root->trace))
+		return 0;
+	print_shortest_lock_dependencies(forwards_entry, next_root);
 
 	printk("\nstack backtrace:\n");
 	dump_stack();
@@ -1300,22 +1339,24 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 	    enum lock_usage_bit bit_forwards, const char *irqclass)
 {
 	int ret;
-	struct lock_list this;
+	struct lock_list this, that;
 	struct lock_list *uninitialized_var(target_entry);
+	struct lock_list *uninitialized_var(target_entry1);
 
 	this.parent = NULL;
 
 	this.class = hlock_class(prev);
 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
 	BFS_PROCESS_RET(ret);
-	backwards_match = target_entry->class;
 
-	this.class = hlock_class(next);
-	ret = find_usage_forwards(&this, bit_forwards, &target_entry);
+	that.parent = NULL;
+	that.class = hlock_class(next);
+	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
 	BFS_PROCESS_RET(ret);
-	forwards_match = target_entry->class;
 
-	return print_bad_irq_dependency(curr, prev, next,
+	return print_bad_irq_dependency(curr, &this, &that,
+			target_entry, target_entry1,
+			prev, next,
 			bit_backwards, bit_forwards, irqclass);
 }
 
@@ -1944,7 +1985,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
  * print irq inversion bug:
  */
 static int
-print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
+print_irq_inversion_bug(struct task_struct *curr,
+			struct lock_list *root, struct lock_list *other,
 			struct held_lock *this, int forwards,
 			const char *irqclass)
 {
@@ -1962,17 +2004,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
 		printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
 	else
 		printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
-	print_lock_name(other);
+	print_lock_name(other->class);
 	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
 	printk("\nother info that might help us debug this:\n");
 	lockdep_print_held_locks(curr);
 
-	printk("\nthe first lock's dependencies:\n");
-	print_lock_dependencies(hlock_class(this), 0);
-
-	printk("\nthe second lock's dependencies:\n");
-	print_lock_dependencies(other, 0);
+	printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
+	if (!save_trace(&root->trace))
+		return 0;
+	print_shortest_lock_dependencies(other, root);
 
 	printk("\nstack backtrace:\n");
 	dump_stack();
@@ -1997,7 +2038,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 	ret = find_usage_forwards(&root, bit, &target_entry);
 	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, target_entry->class,
+	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
 }
 
@@ -2018,7 +2059,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 	ret = find_usage_backwards(&root, bit, &target_entry);
 	BFS_PROCESS_RET(ret);
 
-	return print_irq_inversion_bug(curr, target_entry->class,
+	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
 }
 
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index c2f6594..b115aaa 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -219,9 +219,9 @@ static inline struct lock_list *get_lock_parent(struct lock_list *child)
 	return child->parent;
 }
 
-static inline unsigned long get_lock_depth(struct lock_list *child)
+static inline int get_lock_depth(struct lock_list *child)
 {
-	unsigned long depth = 0;
+	int depth = 0;
 	struct lock_list *parent;
 
 	while ((parent = get_lock_parent(child))) {

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Implement lockdep_count_*ward_deps by BFS
  2009-06-28 15:04             ` [RESEND PATCH 07/11] kernel:lockdep: implement lockdep_count_*ward_deps by BFS tom.leiming
  2009-06-28 15:04               ` [RESEND PATCH 08/11] kernel:lockdep: update memory usage introduced " tom.leiming
  2009-07-18 14:24               ` [tip:core/locking] lockdep: Implement lockdep_count_*ward_deps " tip-bot for Ming Lei
@ 2009-08-02 13:02               ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-08-02 13:02 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  ef681026ff85c49b7399ceec3eb62bbbcce605e8
Gitweb:     http://git.kernel.org/tip/ef681026ff85c49b7399ceec3eb62bbbcce605e8
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Peter Zijlstra <a.p.zijlstra@chello.nl>
CommitDate: Fri, 24 Jul 2009 10:49:56 +0200

lockdep: Implement lockdep_count_*ward_deps by BFS

Implement lockdep_count_{for,back}ward using BFS.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-8-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c |   52 +++++++++++++++++++++++++---------------------------
 1 files changed, 25 insertions(+), 27 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 938dc50..b896f23 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1115,61 +1115,59 @@ static noinline int print_bfs_bug(int ret)
 	return 0;
 }
 
-unsigned long __lockdep_count_forward_deps(struct lock_class *class,
-					   unsigned int depth)
+static int noop_count(struct lock_list *entry, void *data)
 {
-	struct lock_list *entry;
-	unsigned long ret = 1;
+	(*(unsigned long *)data)++;
+	return 0;
+}
 
-	if (lockdep_dependency_visit(class, depth))
-		return 0;
+unsigned long __lockdep_count_forward_deps(struct lock_list *this)
+{
+	unsigned long  count = 0;
+	struct lock_list *uninitialized_var(target_entry);
 
-	/*
-	 * Recurse this class's dependency list:
-	 */
-	list_for_each_entry(entry, &class->locks_after, entry)
-		ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
 
-	return ret;
+	return count;
 }
-
 unsigned long lockdep_count_forward_deps(struct lock_class *class)
 {
 	unsigned long ret, flags;
+	struct lock_list this;
+
+	this.parent = NULL;
+	this.class = class;
 
 	local_irq_save(flags);
 	__raw_spin_lock(&lockdep_lock);
-	ret = __lockdep_count_forward_deps(class, 0);
+	ret = __lockdep_count_forward_deps(&this);
 	__raw_spin_unlock(&lockdep_lock);
 	local_irq_restore(flags);
 
 	return ret;
 }
 
-unsigned long __lockdep_count_backward_deps(struct lock_class *class,
-					    unsigned int depth)
+unsigned long __lockdep_count_backward_deps(struct lock_list *this)
 {
-	struct lock_list *entry;
-	unsigned long ret = 1;
+	unsigned long  count = 0;
+	struct lock_list *uninitialized_var(target_entry);
 
-	if (lockdep_dependency_visit(class, depth))
-		return 0;
-	/*
-	 * Recurse this class's dependency list:
-	 */
-	list_for_each_entry(entry, &class->locks_before, entry)
-		ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
 
-	return ret;
+	return count;
 }
 
 unsigned long lockdep_count_backward_deps(struct lock_class *class)
 {
 	unsigned long ret, flags;
+	struct lock_list this;
+
+	this.parent = NULL;
+	this.class = class;
 
 	local_irq_save(flags);
 	__raw_spin_lock(&lockdep_lock);
-	ret = __lockdep_count_backward_deps(class, 0);
+	ret = __lockdep_count_backward_deps(&this);
 	__raw_spin_unlock(&lockdep_lock);
 	local_irq_restore(flags);
 

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Update memory usage introduced by BFS
  2009-06-28 15:04               ` [RESEND PATCH 08/11] kernel:lockdep: update memory usage introduced " tom.leiming
  2009-06-28 15:04                 ` [RESEND PATCH 09/11] kernel:lockdep:add statistics info for max bfs queue depth tom.leiming
  2009-07-18 14:24                 ` [tip:core/locking] lockdep: Update memory usage introduced by BFS tip-bot for Ming Lei
@ 2009-08-02 13:02                 ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-08-02 13:02 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  4dd861d6467007681991d8ec079d928db2018cbb
Gitweb:     http://git.kernel.org/tip/4dd861d6467007681991d8ec079d928db2018cbb
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Peter Zijlstra <a.p.zijlstra@chello.nl>
CommitDate: Fri, 24 Jul 2009 10:51:52 +0200

lockdep: Update memory usage introduced by BFS

Also account the BFS memory usage.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
[ fix build for !PROVE_LOCKING ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-9-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c |    6 +++++-
 1 files changed, 5 insertions(+), 1 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b896f23..6358cf7 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3429,7 +3429,11 @@ void __init lockdep_info(void)
 		sizeof(struct list_head) * CLASSHASH_SIZE +
 		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
 		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
-		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
+		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024
+#ifdef CONFIG_PROVE_LOCKING
+		+ sizeof(struct circular_queue) + sizeof(bfs_accessed)
+#endif
+		);
 
 	printk(" per task-struct memory footprint: %lu bytes\n",
 		sizeof(struct held_lock) * MAX_LOCK_DEPTH);

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: Add statistics info for max bfs queue depth
  2009-06-28 15:04                 ` [RESEND PATCH 09/11] kernel:lockdep:add statistics info for max bfs queue depth tom.leiming
  2009-06-28 15:04                   ` [RESEND PATCH 10/11] BFS cleanup tom.leiming
  2009-07-18 14:24                   ` [tip:core/locking] lockdep: Add statistics info for max bfs queue depth tip-bot for Ming Lei
@ 2009-08-02 13:03                   ` tip-bot for Ming Lei
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Ming Lei @ 2009-08-02 13:03 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, tom.leiming, mingo

Commit-ID:  12f3dfd022d7e616757a94f0538d3d525d806a16
Gitweb:     http://git.kernel.org/tip/12f3dfd022d7e616757a94f0538d3d525d806a16
Author:     Ming Lei <tom.leiming@gmail.com>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Peter Zijlstra <a.p.zijlstra@chello.nl>
CommitDate: Fri, 24 Jul 2009 10:52:03 +0200

lockdep: Add statistics info for max bfs queue depth

Add BFS statistics to the existing lockdep stats.

Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-10-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 kernel/lockdep.c           |    6 +++++-
 kernel/lockdep_internals.h |    3 ++-
 kernel/lockdep_proc.c      |    2 ++
 3 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 6358cf7..744da62 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -929,7 +929,7 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 
 unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
 static struct circular_queue  lock_cq;
-
+unsigned int max_bfs_queue_depth;
 static int __bfs(struct lock_list *source_entry,
 			void *data,
 			int (*match)(struct lock_list *entry, void *data),
@@ -975,6 +975,7 @@ static int __bfs(struct lock_list *source_entry,
 
 		list_for_each_entry(entry, head, entry) {
 			if (!lock_accessed(entry)) {
+				unsigned int cq_depth;
 				mark_lock_accessed(entry, lock);
 				if (match(entry, data)) {
 					*target_entry = entry;
@@ -986,6 +987,9 @@ static int __bfs(struct lock_list *source_entry,
 					ret = -1;
 					goto exit;
 				}
+				cq_depth = __cq_get_elem_count(cq);
+				if (max_bfs_queue_depth < cq_depth)
+					max_bfs_queue_depth = cq_depth;
 			}
 		}
 	}
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index b115aaa..6baa880 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -138,6 +138,7 @@ extern atomic_t nr_find_usage_backwards_recursions;
 #endif
 
 
+extern unsigned int max_bfs_queue_depth;
 extern unsigned long nr_list_entries;
 extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 extern unsigned long bfs_accessed[];
@@ -191,7 +192,7 @@ static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
 	return 0;
 }
 
-static inline int __cq_get_elem_count(struct circular_queue *cq)
+static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
 {
 	return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
 }
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index d7135aa..9a1bf34 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -411,6 +411,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
 			max_lockdep_depth);
 	seq_printf(m, " max recursion depth:           %11u\n",
 			max_recursion_depth);
+	seq_printf(m, " max bfs queue depth:           %11u\n",
+			max_bfs_queue_depth);
 	lockdep_stats_debug_show(m);
 	seq_printf(m, " debug_locks:                   %11u\n",
 			debug_locks);

^ permalink raw reply related	[flat|nested] 54+ messages in thread

* [tip:core/locking] lockdep: BFS cleanup
  2009-06-28 15:04                   ` [RESEND PATCH 10/11] BFS cleanup tom.leiming
  2009-06-28 15:04                     ` [RESEND PATCH 11/11] kernel:lockdep:fix return value of check_usage*() tom.leiming
  2009-07-18 14:24                     ` [tip:core/locking] lockdep: BFS cleanup tip-bot for Peter Zijlstra
@ 2009-08-02 13:03                     ` tip-bot for Peter Zijlstra
  2 siblings, 0 replies; 54+ messages in thread
From: tip-bot for Peter Zijlstra @ 2009-08-02 13:03 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: linux-kernel, hpa, mingo, a.p.zijlstra, tglx, mingo

Commit-ID:  af012961450949ea297b209e091bd1a3805b8a0a
Gitweb:     http://git.kernel.org/tip/af012961450949ea297b209e091bd1a3805b8a0a
Author:     Peter Zijlstra <a.p.zijlstra@chello.nl>
AuthorDate: Thu, 16 Jul 2009 15:44:29 +0200
Committer:  Peter Zijlstra <a.p.zijlstra@chello.nl>
CommitDate: Fri, 24 Jul 2009 10:53:29 +0200

lockdep: BFS cleanup

Some cleanups of the lockdep code after the BFS series:

 - Remove the last traces of the generation id
 - Fixup comment style
 - Move the bfs routines into lockdep.c
 - Cleanup the bfs routines

[ tom.leiming@gmail.com: Fix crash ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-11-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>


---
 include/linux/lockdep.h    |    7 +-
 kernel/lockdep.c           |  284 ++++++++++++++++++++++++++------------------
 kernel/lockdep_internals.h |   97 +---------------
 3 files changed, 175 insertions(+), 213 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 9ec026f..12aabfc 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -58,7 +58,6 @@ struct lock_class {
 
 	struct lockdep_subclass_key	*key;
 	unsigned int			subclass;
-	unsigned int			dep_gen_id;
 
 	/*
 	 * IRQ/softirq usage tracking bits:
@@ -150,9 +149,9 @@ struct lock_list {
 	struct stack_trace		trace;
 	int				distance;
 
-	/*The parent field is used to implement breadth-first search,and
-	 *the bit 0 is reused to indicate if the lock has been accessed
-	 *in BFS.
+	/*
+	 * The parent field is used to implement breadth-first search, and the
+	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
 	 */
 	struct lock_list		*parent;
 };
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 744da62..1cedb00 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -43,6 +43,7 @@
 #include <linux/ftrace.h>
 #include <linux/stringify.h>
 #include <linux/bitops.h>
+
 #include <asm/sections.h>
 
 #include "lockdep_internals.h"
@@ -118,7 +119,7 @@ static inline int debug_locks_off_graph_unlock(void)
 static int lockdep_initialized;
 
 unsigned long nr_list_entries;
-struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
 /*
  * All data structures here are protected by the global debug_lock.
@@ -390,19 +391,6 @@ unsigned int nr_process_chains;
 unsigned int max_lockdep_depth;
 unsigned int max_recursion_depth;
 
-static unsigned int lockdep_dependency_gen_id;
-
-static bool lockdep_dependency_visit(struct lock_class *source,
-				     unsigned int depth)
-{
-	if (!depth)
-		lockdep_dependency_gen_id++;
-	if (source->dep_gen_id == lockdep_dependency_gen_id)
-		return true;
-	source->dep_gen_id = lockdep_dependency_gen_id;
-	return false;
-}
-
 #ifdef CONFIG_DEBUG_LOCKDEP
 /*
  * We cannot printk in early bootup code. Not even early_printk()
@@ -551,88 +539,6 @@ static void lockdep_print_held_locks(struct task_struct *curr)
 	}
 }
 
-static void print_lock_class_header(struct lock_class *class, int depth)
-{
-	int bit;
-
-	printk("%*s->", depth, "");
-	print_lock_name(class);
-	printk(" ops: %lu", class->ops);
-	printk(" {\n");
-
-	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
-		if (class->usage_mask & (1 << bit)) {
-			int len = depth;
-
-			len += printk("%*s   %s", depth, "", usage_str[bit]);
-			len += printk(" at:\n");
-			print_stack_trace(class->usage_traces + bit, len);
-		}
-	}
-	printk("%*s }\n", depth, "");
-
-	printk("%*s ... key      at: ",depth,"");
-	print_ip_sym((unsigned long)class->key);
-}
-
-/*
- * printk the shortest lock dependencies from @start to @end in reverse order:
- */
-static void __used
-print_shortest_lock_dependencies(struct lock_list *leaf,
-				struct lock_list *root)
-{
-	struct lock_list *entry = leaf;
-	int depth;
-
-	/*compute depth from generated tree by BFS*/
-	depth = get_lock_depth(leaf);
-
-	do {
-		print_lock_class_header(entry->class, depth);
-		printk("%*s ... acquired at:\n", depth, "");
-		print_stack_trace(&entry->trace, 2);
-		printk("\n");
-
-		if (depth == 0 && (entry != root)) {
-			printk("lockdep:%s bad BFS generated tree\n", __func__);
-			break;
-		}
-
-		entry = get_lock_parent(entry);
-		depth--;
-	} while (entry && (depth >= 0));
-
-	return;
-}
-/*
- * printk all lock dependencies starting at <entry>:
- */
-static void __used
-print_lock_dependencies(struct lock_class *class, int depth)
-{
-	struct lock_list *entry;
-
-	if (lockdep_dependency_visit(class, depth))
-		return;
-
-	if (DEBUG_LOCKS_WARN_ON(depth >= 20))
-		return;
-
-	print_lock_class_header(class, depth);
-
-	list_for_each_entry(entry, &class->locks_after, entry) {
-		if (DEBUG_LOCKS_WARN_ON(!entry->class))
-			return;
-
-		print_lock_dependencies(entry->class, depth + 1);
-
-		printk("%*s ... acquired at:\n",depth,"");
-		print_stack_trace(&entry->trace, 2);
-		printk("\n");
-	}
-}
-
 static void print_kernel_version(void)
 {
 	printk("%s %.*s\n", init_utsname()->release,
@@ -927,14 +833,106 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
 	return 1;
 }
 
-unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
-static struct circular_queue  lock_cq;
+/*For good efficiency of modular, we use power of 2*/
+#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
+#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)
+
+/* The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ * */
+struct circular_queue {
+	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+	unsigned int  front, rear;
+};
+
+static struct circular_queue lock_cq;
+static unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
+
 unsigned int max_bfs_queue_depth;
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+	cq->front = cq->rear = 0;
+	bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+	return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+	return ((cq->rear + 1) & CQ_MASK) == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+	if (__cq_full(cq))
+		return -1;
+
+	cq->element[cq->rear] = elem;
+	cq->rear = (cq->rear + 1) & CQ_MASK;
+	return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+	if (__cq_empty(cq))
+		return -1;
+
+	*elem = cq->element[cq->front];
+	cq->front = (cq->front + 1) & CQ_MASK;
+	return 0;
+}
+
+static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
+{
+	return (cq->rear - cq->front) & CQ_MASK;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+					struct lock_list *parent)
+{
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	lock->parent = parent;
+	set_bit(nr, bfs_accessed);
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+	unsigned long nr;
+	nr = lock - list_entries;
+	WARN_ON(nr >= nr_list_entries);
+	return test_bit(nr, bfs_accessed);
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+	return child->parent;
+}
+
+static inline int get_lock_depth(struct lock_list *child)
+{
+	int depth = 0;
+	struct lock_list *parent;
+
+	while ((parent = get_lock_parent(child))) {
+		child = parent;
+		depth++;
+	}
+	return depth;
+}
+
 static int __bfs(struct lock_list *source_entry,
-			void *data,
-			int (*match)(struct lock_list *entry, void *data),
-			struct lock_list **target_entry,
-			int forward)
+		 void *data,
+		 int (*match)(struct lock_list *entry, void *data),
+		 struct lock_list **target_entry,
+		 int forward)
 {
 	struct lock_list *entry;
 	struct list_head *head;
@@ -1202,14 +1200,6 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
 
-
-#define   BFS_PROCESS_RET(ret)	do { \
-					if (ret < 0) \
-						return print_bfs_bug(ret); \
-					if (ret == 1) \
-						return 1; \
-				} while (0)
-
 static inline int usage_match(struct lock_list *entry, void *bit)
 {
 	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
@@ -1263,6 +1253,60 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
 	return result;
 }
 
+static void print_lock_class_header(struct lock_class *class, int depth)
+{
+	int bit;
+
+	printk("%*s->", depth, "");
+	print_lock_name(class);
+	printk(" ops: %lu", class->ops);
+	printk(" {\n");
+
+	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
+		if (class->usage_mask & (1 << bit)) {
+			int len = depth;
+
+			len += printk("%*s   %s", depth, "", usage_str[bit]);
+			len += printk(" at:\n");
+			print_stack_trace(class->usage_traces + bit, len);
+		}
+	}
+	printk("%*s }\n", depth, "");
+
+	printk("%*s ... key      at: ",depth,"");
+	print_ip_sym((unsigned long)class->key);
+}
+
+/*
+ * printk the shortest lock dependencies from @start to @end in reverse order:
+ */
+static void __used
+print_shortest_lock_dependencies(struct lock_list *leaf,
+				struct lock_list *root)
+{
+	struct lock_list *entry = leaf;
+	int depth;
+
+	/*compute depth from generated tree by BFS*/
+	depth = get_lock_depth(leaf);
+
+	do {
+		print_lock_class_header(entry->class, depth);
+		printk("%*s ... acquired at:\n", depth, "");
+		print_stack_trace(&entry->trace, 2);
+		printk("\n");
+
+		if (depth == 0 && (entry != root)) {
+			printk("lockdep:%s bad BFS generated tree\n", __func__);
+			break;
+		}
+
+		entry = get_lock_parent(entry);
+		depth--;
+	} while (entry && (depth >= 0));
+
+	return;
+}
 
 static int
 print_bad_irq_dependency(struct task_struct *curr,
@@ -1349,12 +1393,18 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 
 	this.class = hlock_class(prev);
 	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
-	BFS_PROCESS_RET(ret);
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
+		return ret;
 
 	that.parent = NULL;
 	that.class = hlock_class(next);
 	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
-	BFS_PROCESS_RET(ret);
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
+		return ret;
 
 	return print_bad_irq_dependency(curr, &this, &that,
 			target_entry, target_entry1,
@@ -2038,7 +2088,10 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 	root.parent = NULL;
 	root.class = hlock_class(this);
 	ret = find_usage_forwards(&root, bit, &target_entry);
-	BFS_PROCESS_RET(ret);
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
+		return ret;
 
 	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
@@ -2059,7 +2112,10 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 	root.parent = NULL;
 	root.class = hlock_class(this);
 	ret = find_usage_backwards(&root, bit, &target_entry);
-	BFS_PROCESS_RET(ret);
+	if (ret < 0)
+		return print_bfs_bug(ret);
+	if (ret == 1)
+		return ret;
 
 	return print_irq_inversion_bug(curr, &root, target_entry,
 					this, 1, irqclass);
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 6baa880..a2ee95a 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -91,6 +91,8 @@ extern unsigned int nr_process_chains;
 extern unsigned int max_lockdep_depth;
 extern unsigned int max_recursion_depth;
 
+extern unsigned int max_bfs_queue_depth;
+
 #ifdef CONFIG_PROVE_LOCKING
 extern unsigned long lockdep_count_forward_deps(struct lock_class *);
 extern unsigned long lockdep_count_backward_deps(struct lock_class *);
@@ -136,98 +138,3 @@ extern atomic_t nr_find_usage_backwards_recursions;
 # define debug_atomic_dec(ptr)		do { } while (0)
 # define debug_atomic_read(ptr)		0
 #endif
-
-
-extern unsigned int max_bfs_queue_depth;
-extern unsigned long nr_list_entries;
-extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
-extern unsigned long bfs_accessed[];
-
-/*For good efficiency of modular, we use power of 2*/
-#define  MAX_CIRCULAR_QUE_SIZE	    4096UL
-
-/* The circular_queue and helpers is used to implement the
- * breadth-first search(BFS)algorithem, by which we can build
- * the shortest path from the next lock to be acquired to the
- * previous held lock if there is a circular between them.
- * */
-struct circular_queue{
-	unsigned long element[MAX_CIRCULAR_QUE_SIZE];
-	unsigned int  front, rear;
-};
-
-static inline void __cq_init(struct circular_queue *cq)
-{
-	cq->front = cq->rear = 0;
-	bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
-}
-
-static inline int __cq_empty(struct circular_queue *cq)
-{
-	return (cq->front == cq->rear);
-}
-
-static inline int __cq_full(struct circular_queue *cq)
-{
-	return ((cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1))  == cq->front;
-}
-
-static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
-{
-	if (__cq_full(cq))
-		return -1;
-
-	cq->element[cq->rear] = elem;
-	cq->rear = (cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
-	return 0;
-}
-
-static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
-{
-	if (__cq_empty(cq))
-		return -1;
-
-	*elem = cq->element[cq->front];
-	cq->front = (cq->front + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
-	return 0;
-}
-
-static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
-{
-	return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
-}
-
-static inline void mark_lock_accessed(struct lock_list *lock,
-					struct lock_list *parent)
-{
-	unsigned long nr;
-	nr = lock - list_entries;
-	WARN_ON(nr >= nr_list_entries);
-	lock->parent = parent;
-	set_bit(nr, bfs_accessed);
-}
-
-static inline unsigned long lock_accessed(struct lock_list *lock)
-{
-	unsigned long nr;
-	nr = lock - list_entries;
-	WARN_ON(nr >= nr_list_entries);
-	return test_bit(nr, bfs_accessed);
-}
-
-static inline struct lock_list *get_lock_parent(struct lock_list *child)
-{
-	return child->parent;
-}
-
-static inline int get_lock_depth(struct lock_list *child)
-{
-	int depth = 0;
-	struct lock_list *parent;
-
-	while ((parent = get_lock_parent(child))) {
-		child = parent;
-		depth++;
-	}
-	return depth;
-}

^ permalink raw reply related	[flat|nested] 54+ messages in thread

end of thread, other threads:[~2009-08-02 13:03 UTC | newest]

Thread overview: 54+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-06-28 15:04 [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS tom.leiming
2009-06-28 15:04 ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle tom.leiming
2009-06-28 15:04   ` [RESEND PATCH 02/11] kernel:lockdep:improve implementation of BFS tom.leiming
2009-06-28 15:04     ` [RESEND PATCH 03/11] kernel:lockdep: introduce match function to BFS tom.leiming
2009-06-28 15:04       ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() by BFS tom.leiming
2009-06-28 15:04         ` [RESEND PATCH 05/11] kernel:lockdep:implement find_usage_*wards " tom.leiming
2009-06-28 15:04           ` [RESEND PATCH 06/11] kernel:lockdep:introduce print_shortest_lock_dependencies tom.leiming
2009-06-28 15:04             ` [RESEND PATCH 07/11] kernel:lockdep: implement lockdep_count_*ward_deps by BFS tom.leiming
2009-06-28 15:04               ` [RESEND PATCH 08/11] kernel:lockdep: update memory usage introduced " tom.leiming
2009-06-28 15:04                 ` [RESEND PATCH 09/11] kernel:lockdep:add statistics info for max bfs queue depth tom.leiming
2009-06-28 15:04                   ` [RESEND PATCH 10/11] BFS cleanup tom.leiming
2009-06-28 15:04                     ` [RESEND PATCH 11/11] kernel:lockdep:fix return value of check_usage*() tom.leiming
2009-07-18 14:24                     ` [tip:core/locking] lockdep: BFS cleanup tip-bot for Peter Zijlstra
2009-07-18 17:23                       ` Peter Zijlstra
2009-08-02 13:03                     ` tip-bot for Peter Zijlstra
2009-07-18 14:24                   ` [tip:core/locking] lockdep: Add statistics info for max bfs queue depth tip-bot for Ming Lei
2009-08-02 13:03                   ` tip-bot for Ming Lei
2009-07-18 14:24                 ` [tip:core/locking] lockdep: Update memory usage introduced by BFS tip-bot for Ming Lei
2009-07-18 17:23                   ` Peter Zijlstra
2009-08-02 13:02                 ` tip-bot for Ming Lei
2009-07-18 14:24               ` [tip:core/locking] lockdep: Implement lockdep_count_*ward_deps " tip-bot for Ming Lei
2009-08-02 13:02               ` tip-bot for Ming Lei
2009-07-18 14:24             ` [tip:core/locking] lockdep: Introduce print_shortest_lock_dependencies tip-bot for Ming Lei
2009-08-02 13:02             ` tip-bot for Ming Lei
2009-07-18 14:23           ` [tip:core/locking] lockdep: Implement find_usage_*wards by BFS tip-bot for Ming Lei
2009-08-02 13:02           ` tip-bot for Ming Lei
2009-07-13  8:02         ` [RESEND PATCH 04/11] kernel:lockdep:implement check_noncircular() " Dave Young
2009-07-13  8:08           ` Dave Young
2009-07-21  3:33             ` Ming Lei
2009-07-18 14:23         ` [tip:core/locking] lockdep: Implement " tip-bot for Ming Lei
2009-08-02 13:02         ` tip-bot for Ming Lei
2009-07-18 14:23       ` [tip:core/locking] lockdep: Introduce match function to BFS tip-bot for Ming Lei
2009-08-02 13:01       ` tip-bot for Ming Lei
2009-07-18 14:23     ` [tip:core/locking] lockdep: Improve implementation of BFS tip-bot for Ming Lei
2009-08-02 13:01     ` tip-bot for Ming Lei
2009-07-11 21:30   ` [RESEND PATCH 01/11] kernel:lockdep:print the shortest dependency chain if finding a circle Frederic Weisbecker
2009-07-12  2:42     ` Ming Lei
2009-07-13  7:01       ` Ingo Molnar
2009-07-13  9:14         ` Peter Zijlstra
2009-07-13 13:56           ` Ming Lei
2009-07-13 13:51         ` Ming Lei
2009-07-13  9:01   ` Dave Young
2009-07-18 14:23   ` [tip:core/locking] lockdep: Print " tip-bot for Ming Lei
2009-08-02 13:01   ` tip-bot for Ming Lei
2009-07-11  0:43 ` [RESEND PATCH 0/11] kernel:lockdep:replace DFS with BFS Frederic Weisbecker
2009-07-11  3:25   ` Ming Lei
2009-07-11 21:09     ` Frederic Weisbecker
2009-07-12  2:29       ` Ming Lei
2009-07-13  7:02         ` Ingo Molnar
2009-07-13  9:16 ` Peter Zijlstra
2009-07-16  4:39   ` Ming Lei
2009-07-16  5:22     ` Peter Zijlstra
2009-07-16  7:12       ` Ming Lei
2009-07-16  9:54         ` Peter Zijlstra

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.