linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] lockdep: lock_set_cmp_fn()
@ 2023-05-09 19:58 Kent Overstreet
  2023-05-09 19:58 ` [PATCH 2/2] bcache: Convert to lock_cmp_fn Kent Overstreet
                   ` (3 more replies)
  0 siblings, 4 replies; 13+ messages in thread
From: Kent Overstreet @ 2023-05-09 19:58 UTC (permalink / raw)
  To: linux-kernel, peterz; +Cc: Kent Overstreet, Ingo Molnar

This implements a new interface to lockedp, lock_set_cmp_fn(), for
defining an ordering when taking multiple locks of the same type.

This provides a more rigorous mechanism than the subclass mechanism -
it's hoped that this might be able to replace subclasses.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Peter Zijlstra <peterz@infradead.org> (maintainer:LOCKING PRIMITIVES)
Cc: Ingo Molnar <mingo@redhat.com> (maintainer:LOCKING PRIMITIVES)
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
---
 include/linux/lockdep.h       |   8 +++
 include/linux/lockdep_types.h |   8 +++
 kernel/locking/lockdep.c      | 115 +++++++++++++++++++++++++---------
 3 files changed, 101 insertions(+), 30 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 1023f349af..2b4497ee3d 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -379,6 +379,14 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
 		do { (void)(key); } while (0)
 #define lockdep_set_subclass(lock, sub)		do { } while (0)
 
+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn);
+
+#define lock_set_cmp_fn(lock, ...)	lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__)
+#else
+#define lock_set_cmp_fn(lock, ...)	do {} while (0)
+#endif
+
 #define lockdep_set_novalidate_class(lock) do { } while (0)
 
 /*
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index d22430840b..8bf79c4e48 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -84,6 +84,11 @@ struct lock_trace;
 
 #define LOCKSTAT_POINTS		4
 
+struct lockdep_map;
+typedef int (*lock_cmp_fn)(const struct lockdep_map *a,
+			   const struct lockdep_map *b);
+typedef void (*lock_print_fn)(const struct lockdep_map *map);
+
 /*
  * The lock-class itself. The order of the structure members matters.
  * reinit_class() zeroes the key member and all subsequent members.
@@ -109,6 +114,9 @@ struct lock_class {
 	struct list_head		locks_after, locks_before;
 
 	const struct lockdep_subclass_key *key;
+	lock_cmp_fn			cmp_fn;
+	lock_print_fn			print_fn;
+
 	unsigned int			subclass;
 	unsigned int			dep_gen_id;
 
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 50d4863974..7b8c16cbef 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -709,7 +709,7 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
 	usage[i] = '\0';
 }
 
-static void __print_lock_name(struct lock_class *class)
+static void __print_lock_name(struct held_lock *hlock, struct lock_class *class)
 {
 	char str[KSYM_NAME_LEN];
 	const char *name;
@@ -724,17 +724,19 @@ static void __print_lock_name(struct lock_class *class)
 			printk(KERN_CONT "#%d", class->name_version);
 		if (class->subclass)
 			printk(KERN_CONT "/%d", class->subclass);
+		if (hlock && class->print_fn)
+			class->print_fn(hlock->instance);
 	}
 }
 
-static void print_lock_name(struct lock_class *class)
+static void print_lock_name(struct held_lock *hlock, struct lock_class *class)
 {
 	char usage[LOCK_USAGE_CHARS];
 
 	get_usage_chars(class, usage);
 
 	printk(KERN_CONT " (");
-	__print_lock_name(class);
+	__print_lock_name(hlock, class);
 	printk(KERN_CONT "){%s}-{%d:%d}", usage,
 			class->wait_type_outer ?: class->wait_type_inner,
 			class->wait_type_inner);
@@ -772,7 +774,7 @@ static void print_lock(struct held_lock *hlock)
 	}
 
 	printk(KERN_CONT "%px", hlock->instance);
-	print_lock_name(lock);
+	print_lock_name(hlock, lock);
 	printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
 }
 
@@ -1868,7 +1870,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
 	if (debug_locks_silent)
 		return;
 	printk("\n-> #%u", depth);
-	print_lock_name(target->class);
+	print_lock_name(NULL, target->class);
 	printk(KERN_CONT ":\n");
 	print_lock_trace(target->trace, 6);
 }
@@ -1897,11 +1899,11 @@ print_circular_lock_scenario(struct held_lock *src,
 	 */
 	if (parent != source) {
 		printk("Chain exists of:\n  ");
-		__print_lock_name(source);
+		__print_lock_name(src, source);
 		printk(KERN_CONT " --> ");
-		__print_lock_name(parent);
+		__print_lock_name(NULL, parent);
 		printk(KERN_CONT " --> ");
-		__print_lock_name(target);
+		__print_lock_name(tgt, target);
 		printk(KERN_CONT "\n\n");
 	}
 
@@ -1909,16 +1911,16 @@ print_circular_lock_scenario(struct held_lock *src,
 	printk("       CPU0                    CPU1\n");
 	printk("       ----                    ----\n");
 	printk("  lock(");
-	__print_lock_name(target);
+	__print_lock_name(tgt, target);
 	printk(KERN_CONT ");\n");
 	printk("                               lock(");
-	__print_lock_name(parent);
+	__print_lock_name(NULL, parent);
 	printk(KERN_CONT ");\n");
 	printk("                               lock(");
-	__print_lock_name(target);
+	__print_lock_name(tgt, target);
 	printk(KERN_CONT ");\n");
 	printk("  lock(");
-	__print_lock_name(source);
+	__print_lock_name(src, source);
 	printk(KERN_CONT ");\n");
 	printk("\n *** DEADLOCK ***\n\n");
 }
@@ -2144,6 +2146,8 @@ check_path(struct held_lock *target, struct lock_list *src_entry,
 	return ret;
 }
 
+static void print_deadlock_bug(struct task_struct *, struct held_lock *, struct held_lock *);
+
 /*
  * Prove that the dependency graph starting at <src> can not
  * lead to <target>. If it can, there is a circle when adding
@@ -2175,7 +2179,10 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
 			*trace = save_trace();
 		}
 
-		print_circular_bug(&src_entry, target_entry, src, target);
+		if (src->class_idx == target->class_idx)
+			print_deadlock_bug(current, src, target);
+		else
+			print_circular_bug(&src_entry, target_entry, src, target);
 	}
 
 	return ret;
@@ -2331,7 +2338,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 	int bit;
 
 	printk("%*s->", depth, "");
-	print_lock_name(class);
+	print_lock_name(NULL, class);
 #ifdef CONFIG_DEBUG_LOCKDEP
 	printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
 #endif
@@ -2513,11 +2520,11 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
 	 */
 	if (middle_class != unsafe_class) {
 		printk("Chain exists of:\n  ");
-		__print_lock_name(safe_class);
+		__print_lock_name(NULL, safe_class);
 		printk(KERN_CONT " --> ");
-		__print_lock_name(middle_class);
+		__print_lock_name(NULL, middle_class);
 		printk(KERN_CONT " --> ");
-		__print_lock_name(unsafe_class);
+		__print_lock_name(NULL, unsafe_class);
 		printk(KERN_CONT "\n\n");
 	}
 
@@ -2525,18 +2532,18 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
 	printk("       CPU0                    CPU1\n");
 	printk("       ----                    ----\n");
 	printk("  lock(");
-	__print_lock_name(unsafe_class);
+	__print_lock_name(NULL, unsafe_class);
 	printk(KERN_CONT ");\n");
 	printk("                               local_irq_disable();\n");
 	printk("                               lock(");
-	__print_lock_name(safe_class);
+	__print_lock_name(NULL, safe_class);
 	printk(KERN_CONT ");\n");
 	printk("                               lock(");
-	__print_lock_name(middle_class);
+	__print_lock_name(NULL, middle_class);
 	printk(KERN_CONT ");\n");
 	printk("  <Interrupt>\n");
 	printk("    lock(");
-	__print_lock_name(safe_class);
+	__print_lock_name(NULL, safe_class);
 	printk(KERN_CONT ");\n");
 	printk("\n *** DEADLOCK ***\n\n");
 }
@@ -2573,20 +2580,20 @@ print_bad_irq_dependency(struct task_struct *curr,
 	pr_warn("\nand this task is already holding:\n");
 	print_lock(prev);
 	pr_warn("which would create a new lock dependency:\n");
-	print_lock_name(hlock_class(prev));
+	print_lock_name(prev, hlock_class(prev));
 	pr_cont(" ->");
-	print_lock_name(hlock_class(next));
+	print_lock_name(next, hlock_class(next));
 	pr_cont("\n");
 
 	pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
 		irqclass);
-	print_lock_name(backwards_entry->class);
+	print_lock_name(NULL, backwards_entry->class);
 	pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
 
 	print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
 
 	pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
-	print_lock_name(forwards_entry->class);
+	print_lock_name(NULL, forwards_entry->class);
 	pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
 	pr_warn("...");
 
@@ -2956,10 +2963,10 @@ print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
 	printk("       CPU0\n");
 	printk("       ----\n");
 	printk("  lock(");
-	__print_lock_name(prev);
+	__print_lock_name(prv, prev);
 	printk(KERN_CONT ");\n");
 	printk("  lock(");
-	__print_lock_name(next);
+	__print_lock_name(nxt, next);
 	printk(KERN_CONT ");\n");
 	printk("\n *** DEADLOCK ***\n\n");
 	printk(" May be due to missing lock nesting notation\n\n");
@@ -2969,6 +2976,8 @@ static void
 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 		   struct held_lock *next)
 {
+	struct lock_class *class = hlock_class(prev);
+
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return;
 
@@ -2983,6 +2992,10 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 	pr_warn("\nbut task is already holding lock:\n");
 	print_lock(prev);
 
+	if (class->cmp_fn)
+		pr_warn("and the lock comparison function returns %i:\n",
+			class->cmp_fn(prev->instance, next->instance));
+
 	pr_warn("\nother info that might help us debug this:\n");
 	print_deadlock_scenario(next, prev);
 	lockdep_print_held_locks(curr);
@@ -3004,6 +3017,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 static int
 check_deadlock(struct task_struct *curr, struct held_lock *next)
 {
+	struct lock_class *class;
 	struct held_lock *prev;
 	struct held_lock *nest = NULL;
 	int i;
@@ -3024,6 +3038,12 @@ check_deadlock(struct task_struct *curr, struct held_lock *next)
 		if ((next->read == 2) && prev->read)
 			continue;
 
+		class = hlock_class(prev);
+
+		if (class->cmp_fn &&
+		    class->cmp_fn(prev->instance, next->instance) < 0)
+			continue;
+
 		/*
 		 * We're holding the nest_lock, which serializes this lock's
 		 * nesting behaviour.
@@ -3085,6 +3105,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 		return 2;
 	}
 
+	if (prev->class_idx == next->class_idx) {
+		struct lock_class *class = hlock_class(prev);
+
+		if (class->cmp_fn &&
+		    class->cmp_fn(prev->instance, next->instance) < 0)
+			return 2;
+	}
+
 	/*
 	 * Prove that the new <prev> -> <next> dependency would not
 	 * create a circular dependency in the graph. (We do this by
@@ -3918,11 +3946,11 @@ static void print_usage_bug_scenario(struct held_lock *lock)
 	printk("       CPU0\n");
 	printk("       ----\n");
 	printk("  lock(");
-	__print_lock_name(class);
+	__print_lock_name(lock, class);
 	printk(KERN_CONT ");\n");
 	printk("  <Interrupt>\n");
 	printk("    lock(");
-	__print_lock_name(class);
+	__print_lock_name(lock, class);
 	printk(KERN_CONT ");\n");
 	printk("\n *** DEADLOCK ***\n\n");
 }
@@ -4008,7 +4036,7 @@ print_irq_inversion_bug(struct task_struct *curr,
 		pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
 	else
 		pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
-	print_lock_name(other->class);
+	print_lock_name(NULL, other->class);
 	pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
 	pr_warn("\nother info that might help us debug this:\n");
@@ -4866,6 +4894,33 @@ EXPORT_SYMBOL_GPL(lockdep_init_map_type);
 struct lock_class_key __lockdep_no_validate__;
 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
 
+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn cmp_fn,
+			     lock_print_fn print_fn)
+{
+	struct lock_class *class = lock->class_cache[0];
+	unsigned long flags;
+
+	raw_local_irq_save(flags);
+	lockdep_recursion_inc();
+
+	if (!class)
+		class = register_lock_class(lock, 0, 0);
+
+	if (class) {
+		WARN_ON(class->cmp_fn	&& class->cmp_fn != cmp_fn);
+		WARN_ON(class->print_fn && class->print_fn != print_fn);
+
+		class->cmp_fn	= cmp_fn;
+		class->print_fn = print_fn;
+	}
+
+	lockdep_recursion_finish();
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lockdep_set_lock_cmp_fn);
+#endif
+
 static void
 print_lock_nested_lock_not_held(struct task_struct *curr,
 				struct held_lock *hlock)
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 2/2] bcache: Convert to lock_cmp_fn
  2023-05-09 19:58 [PATCH 1/2] lockdep: lock_set_cmp_fn() Kent Overstreet
@ 2023-05-09 19:58 ` Kent Overstreet
  2023-05-10  5:25   ` Coly Li
                     ` (3 more replies)
  2023-05-09 21:54 ` [PATCH 1/2] lockdep: lock_set_cmp_fn() kernel test robot
                   ` (2 subsequent siblings)
  3 siblings, 4 replies; 13+ messages in thread
From: Kent Overstreet @ 2023-05-09 19:58 UTC (permalink / raw)
  To: linux-kernel, peterz; +Cc: Kent Overstreet, Coly Li

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Coly Li <colyli@suse.de>
---
 drivers/md/bcache/btree.c | 23 ++++++++++++++++++++++-
 drivers/md/bcache/btree.h |  4 ++--
 2 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 147c493a98..569f48958b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -559,6 +559,27 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
 	}
 }
 
+#define cmp_int(l, r)		((l > r) - (l < r))
+
+#ifdef CONFIG_PROVE_LOCKING
+static int btree_lock_cmp_fn(const struct lockdep_map *_a,
+			     const struct lockdep_map *_b)
+{
+	const struct btree *a = container_of(_a, struct btree, lock.dep_map);
+	const struct btree *b = container_of(_b, struct btree, lock.dep_map);
+
+	return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
+}
+
+static void btree_lock_print_fn(const struct lockdep_map *map)
+{
+	const struct btree *b = container_of(map, struct btree, lock.dep_map);
+
+	printk(KERN_CONT " l=%u %llu:%llu", b->level,
+	       KEY_INODE(&b->key), KEY_OFFSET(&b->key));
+}
+#endif
+
 static struct btree *mca_bucket_alloc(struct cache_set *c,
 				      struct bkey *k, gfp_t gfp)
 {
@@ -572,7 +593,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
 		return NULL;
 
 	init_rwsem(&b->lock);
-	lockdep_set_novalidate_class(&b->lock);
+	lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
 	mutex_init(&b->write_lock);
 	lockdep_set_novalidate_class(&b->write_lock);
 	INIT_LIST_HEAD(&b->list);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 1b5fdbc0d8..17b1d201ce 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -247,8 +247,8 @@ static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
 
 static inline void rw_lock(bool w, struct btree *b, int level)
 {
-	w ? down_write_nested(&b->lock, level + 1)
-	  : down_read_nested(&b->lock, level + 1);
+	w ? down_write(&b->lock)
+	  : down_read(&b->lock);
 	if (w)
 		b->seq++;
 }
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2] lockdep: lock_set_cmp_fn()
  2023-05-09 19:58 [PATCH 1/2] lockdep: lock_set_cmp_fn() Kent Overstreet
  2023-05-09 19:58 ` [PATCH 2/2] bcache: Convert to lock_cmp_fn Kent Overstreet
@ 2023-05-09 21:54 ` kernel test robot
  2023-05-09 23:26 ` kernel test robot
  2023-05-20 10:49 ` [tip: locking/core] lockdep: Add lock_set_cmp_fn() annotation tip-bot2 for Kent Overstreet
  3 siblings, 0 replies; 13+ messages in thread
From: kernel test robot @ 2023-05-09 21:54 UTC (permalink / raw)
  To: Kent Overstreet, linux-kernel; +Cc: oe-kbuild-all, Kent Overstreet, Ingo Molnar

Hi Kent,

kernel test robot noticed the following build warnings:

[auto build test WARNING on tip/locking/core]
[cannot apply to linus/master v6.4-rc1 next-20230509]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Kent-Overstreet/bcache-Convert-to-lock_cmp_fn/20230510-040030
base:   tip/locking/core
patch link:    https://lore.kernel.org/r/20230509195847.1745548-1-kent.overstreet%40linux.dev
patch subject: [PATCH 1/2] lockdep: lock_set_cmp_fn()
config: loongarch-allyesconfig (https://download.01.org/0day-ci/archive/20230510/202305100552.41Mi8Ctu-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/f265373c48cadb6f81e032df9893abffc370ab89
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Kent-Overstreet/bcache-Convert-to-lock_cmp_fn/20230510-040030
        git checkout f265373c48cadb6f81e032df9893abffc370ab89
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=loongarch olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=loongarch SHELL=/bin/bash drivers/md/bcache/ kernel/locking/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
| Link: https://lore.kernel.org/oe-kbuild-all/202305100552.41Mi8Ctu-lkp@intel.com/

All warnings (new ones prefixed by >>):

   kernel/locking/lockdep.c: In function 'print_chain_keys_chain':
   kernel/locking/lockdep.c:3592:46: error: passing argument 1 of 'print_lock_name' from incompatible pointer type [-Werror=incompatible-pointer-types]
    3592 |                 print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
         |                                 ~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
         |                                              |
         |                                              struct lock_class *
   kernel/locking/lockdep.c:732:47: note: expected 'struct held_lock *' but argument is of type 'struct lock_class *'
     732 | static void print_lock_name(struct held_lock *hlock, struct lock_class *class)
         |                             ~~~~~~~~~~~~~~~~~~^~~~~
   kernel/locking/lockdep.c:3592:17: error: too few arguments to function 'print_lock_name'
    3592 |                 print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
         |                 ^~~~~~~~~~~~~~~
   kernel/locking/lockdep.c:732:13: note: declared here
     732 | static void print_lock_name(struct held_lock *hlock, struct lock_class *class)
         |             ^~~~~~~~~~~~~~~
   kernel/locking/lockdep.c: At top level:
>> kernel/locking/lockdep.c:4898:6: warning: no previous prototype for 'lockdep_set_lock_cmp_fn' [-Wmissing-prototypes]
    4898 | void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn cmp_fn,
         |      ^~~~~~~~~~~~~~~~~~~~~~~
   cc1: some warnings being treated as errors


vim +/lockdep_set_lock_cmp_fn +4898 kernel/locking/lockdep.c

  4896	
  4897	#ifdef CONFIG_PROVE_LOCKING
> 4898	void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn cmp_fn,
  4899				     lock_print_fn print_fn)
  4900	{
  4901		struct lock_class *class = lock->class_cache[0];
  4902		unsigned long flags;
  4903	
  4904		raw_local_irq_save(flags);
  4905		lockdep_recursion_inc();
  4906	
  4907		if (!class)
  4908			class = register_lock_class(lock, 0, 0);
  4909	
  4910		if (class) {
  4911			WARN_ON(class->cmp_fn	&& class->cmp_fn != cmp_fn);
  4912			WARN_ON(class->print_fn && class->print_fn != print_fn);
  4913	
  4914			class->cmp_fn	= cmp_fn;
  4915			class->print_fn = print_fn;
  4916		}
  4917	
  4918		lockdep_recursion_finish();
  4919		raw_local_irq_restore(flags);
  4920	}
  4921	EXPORT_SYMBOL_GPL(lockdep_set_lock_cmp_fn);
  4922	#endif
  4923	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 1/2] lockdep: lock_set_cmp_fn()
  2023-05-09 19:58 [PATCH 1/2] lockdep: lock_set_cmp_fn() Kent Overstreet
  2023-05-09 19:58 ` [PATCH 2/2] bcache: Convert to lock_cmp_fn Kent Overstreet
  2023-05-09 21:54 ` [PATCH 1/2] lockdep: lock_set_cmp_fn() kernel test robot
@ 2023-05-09 23:26 ` kernel test robot
  2023-05-20 10:49 ` [tip: locking/core] lockdep: Add lock_set_cmp_fn() annotation tip-bot2 for Kent Overstreet
  3 siblings, 0 replies; 13+ messages in thread
From: kernel test robot @ 2023-05-09 23:26 UTC (permalink / raw)
  To: Kent Overstreet, linux-kernel, peterz
  Cc: oe-kbuild-all, Kent Overstreet, Ingo Molnar

Hi Kent,

kernel test robot noticed the following build errors:

[auto build test ERROR on tip/locking/core]
[cannot apply to linus/master v6.4-rc1 next-20230509]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Kent-Overstreet/bcache-Convert-to-lock_cmp_fn/20230510-040030
base:   tip/locking/core
patch link:    https://lore.kernel.org/r/20230509195847.1745548-1-kent.overstreet%40linux.dev
patch subject: [PATCH 1/2] lockdep: lock_set_cmp_fn()
config: loongarch-allyesconfig (https://download.01.org/0day-ci/archive/20230510/202305100719.eq9Z9e7H-lkp@intel.com/config)
compiler: loongarch64-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/f265373c48cadb6f81e032df9893abffc370ab89
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Kent-Overstreet/bcache-Convert-to-lock_cmp_fn/20230510-040030
        git checkout f265373c48cadb6f81e032df9893abffc370ab89
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=loongarch olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=loongarch SHELL=/bin/bash drivers/md/ kernel/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
| Link: https://lore.kernel.org/oe-kbuild-all/202305100719.eq9Z9e7H-lkp@intel.com/

All errors (new ones prefixed by >>):

   kernel/locking/lockdep.c: In function 'print_chain_keys_chain':
>> kernel/locking/lockdep.c:3592:46: error: passing argument 1 of 'print_lock_name' from incompatible pointer type [-Werror=incompatible-pointer-types]
    3592 |                 print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
         |                                 ~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
         |                                              |
         |                                              struct lock_class *
   kernel/locking/lockdep.c:732:47: note: expected 'struct held_lock *' but argument is of type 'struct lock_class *'
     732 | static void print_lock_name(struct held_lock *hlock, struct lock_class *class)
         |                             ~~~~~~~~~~~~~~~~~~^~~~~
>> kernel/locking/lockdep.c:3592:17: error: too few arguments to function 'print_lock_name'
    3592 |                 print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
         |                 ^~~~~~~~~~~~~~~
   kernel/locking/lockdep.c:732:13: note: declared here
     732 | static void print_lock_name(struct held_lock *hlock, struct lock_class *class)
         |             ^~~~~~~~~~~~~~~
   kernel/locking/lockdep.c: At top level:
   kernel/locking/lockdep.c:4898:6: warning: no previous prototype for 'lockdep_set_lock_cmp_fn' [-Wmissing-prototypes]
    4898 | void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn cmp_fn,
         |      ^~~~~~~~~~~~~~~~~~~~~~~
   cc1: some warnings being treated as errors


vim +/print_lock_name +3592 kernel/locking/lockdep.c

39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3580  
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3581  static void print_chain_keys_chain(struct lock_chain *chain)
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3582  {
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3583  	int i;
f6ec8829ac9d59 Yuyang Du                 2019-05-06  3584  	u64 chain_key = INITIAL_CHAIN_KEY;
f611e8cf98ec90 Boqun Feng                2020-08-07  3585  	u16 hlock_id;
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3586  
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3587  	printk("depth: %u\n", chain->depth);
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3588  	for (i = 0; i < chain->depth; i++) {
f611e8cf98ec90 Boqun Feng                2020-08-07  3589  		hlock_id = chain_hlocks[chain->base + i];
f611e8cf98ec90 Boqun Feng                2020-08-07  3590  		chain_key = print_chain_key_iteration(hlock_id, chain_key);
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3591  
28df029d53a2fd Cheng Jui Wang            2022-02-10 @3592  		print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3593  		printk("\n");
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3594  	}
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3595  }
39e2e173fb1f90 Alfredo Alvarez Fernandez 2016-03-30  3596  

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] bcache: Convert to lock_cmp_fn
  2023-05-09 19:58 ` [PATCH 2/2] bcache: Convert to lock_cmp_fn Kent Overstreet
@ 2023-05-10  5:25   ` Coly Li
  2023-05-10 13:01   ` Peter Zijlstra
                     ` (2 subsequent siblings)
  3 siblings, 0 replies; 13+ messages in thread
From: Coly Li @ 2023-05-10  5:25 UTC (permalink / raw)
  To: Kent Overstreet; +Cc: linux-kernel, peterz



> 2023年5月10日 03:58,Kent Overstreet <kent.overstreet@linux.dev> 写道:
> 
> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
> Cc: Coly Li <colyli@suse.de>

Acked-by: Coly Li <colyli@suse.de <mailto:colyli@suse.de>>

Thanks for this change :-)

Coly Li


> ---
> drivers/md/bcache/btree.c | 23 ++++++++++++++++++++++-
> drivers/md/bcache/btree.h |  4 ++--
> 2 files changed, 24 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
> index 147c493a98..569f48958b 100644
> --- a/drivers/md/bcache/btree.c
> +++ b/drivers/md/bcache/btree.c
> @@ -559,6 +559,27 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
> }
> }
> 
> +#define cmp_int(l, r) ((l > r) - (l < r))
> +
> +#ifdef CONFIG_PROVE_LOCKING
> +static int btree_lock_cmp_fn(const struct lockdep_map *_a,
> +     const struct lockdep_map *_b)
> +{
> + const struct btree *a = container_of(_a, struct btree, lock.dep_map);
> + const struct btree *b = container_of(_b, struct btree, lock.dep_map);
> +
> + return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
> +}
> +
> +static void btree_lock_print_fn(const struct lockdep_map *map)
> +{
> + const struct btree *b = container_of(map, struct btree, lock.dep_map);
> +
> + printk(KERN_CONT " l=%u %llu:%llu", b->level,
> +       KEY_INODE(&b->key), KEY_OFFSET(&b->key));
> +}
> +#endif
> +
> static struct btree *mca_bucket_alloc(struct cache_set *c,
>      struct bkey *k, gfp_t gfp)
> {
> @@ -572,7 +593,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
> return NULL;
> 
> init_rwsem(&b->lock);
> - lockdep_set_novalidate_class(&b->lock);
> + lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
> mutex_init(&b->write_lock);
> lockdep_set_novalidate_class(&b->write_lock);
> INIT_LIST_HEAD(&b->list);
> diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
> index 1b5fdbc0d8..17b1d201ce 100644
> --- a/drivers/md/bcache/btree.h
> +++ b/drivers/md/bcache/btree.h
> @@ -247,8 +247,8 @@ static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
> 
> static inline void rw_lock(bool w, struct btree *b, int level)
> {
> - w ? down_write_nested(&b->lock, level + 1)
> -  : down_read_nested(&b->lock, level + 1);
> + w ? down_write(&b->lock)
> +  : down_read(&b->lock);
> if (w)
> b->seq++;
> }
> -- 
> 2.40.1
> 


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] bcache: Convert to lock_cmp_fn
  2023-05-09 19:58 ` [PATCH 2/2] bcache: Convert to lock_cmp_fn Kent Overstreet
  2023-05-10  5:25   ` Coly Li
@ 2023-05-10 13:01   ` Peter Zijlstra
  2023-05-10 17:05     ` Kent Overstreet
  2023-05-25  5:07     ` Kent Overstreet
       [not found]   ` <168457974565.404.16611061652498882569.tip-bot2@tip-bot2>
  2023-05-24 10:31   ` tip-bot2 for Kent Overstreet
  3 siblings, 2 replies; 13+ messages in thread
From: Peter Zijlstra @ 2023-05-10 13:01 UTC (permalink / raw)
  To: Kent Overstreet; +Cc: linux-kernel, Coly Li

On Tue, May 09, 2023 at 03:58:47PM -0400, Kent Overstreet wrote:
> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
> Cc: Coly Li <colyli@suse.de>
> ---
>  drivers/md/bcache/btree.c | 23 ++++++++++++++++++++++-
>  drivers/md/bcache/btree.h |  4 ++--
>  2 files changed, 24 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
> index 147c493a98..569f48958b 100644
> --- a/drivers/md/bcache/btree.c
> +++ b/drivers/md/bcache/btree.c
> @@ -559,6 +559,27 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
>  	}
>  }
>  
> +#define cmp_int(l, r)		((l > r) - (l < r))
> +
> +#ifdef CONFIG_PROVE_LOCKING
> +static int btree_lock_cmp_fn(const struct lockdep_map *_a,
> +			     const struct lockdep_map *_b)
> +{
> +	const struct btree *a = container_of(_a, struct btree, lock.dep_map);
> +	const struct btree *b = container_of(_b, struct btree, lock.dep_map);
> +
> +	return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
> +}
> +
> +static void btree_lock_print_fn(const struct lockdep_map *map)
> +{
> +	const struct btree *b = container_of(map, struct btree, lock.dep_map);
> +
> +	printk(KERN_CONT " l=%u %llu:%llu", b->level,
> +	       KEY_INODE(&b->key), KEY_OFFSET(&b->key));
> +}
> +#endif
> +
>  static struct btree *mca_bucket_alloc(struct cache_set *c,
>  				      struct bkey *k, gfp_t gfp)
>  {
> @@ -572,7 +593,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
>  		return NULL;
>  
>  	init_rwsem(&b->lock);
> -	lockdep_set_novalidate_class(&b->lock);
> +	lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
>  	mutex_init(&b->write_lock);
>  	lockdep_set_novalidate_class(&b->write_lock);

I can't help but notice you've got yet another novalidate_class usage
here. What does it take to get rid of that?

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] bcache: Convert to lock_cmp_fn
  2023-05-10 13:01   ` Peter Zijlstra
@ 2023-05-10 17:05     ` Kent Overstreet
  2023-05-25  5:07     ` Kent Overstreet
  1 sibling, 0 replies; 13+ messages in thread
From: Kent Overstreet @ 2023-05-10 17:05 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: linux-kernel, Coly Li

On Wed, May 10, 2023 at 03:01:51PM +0200, Peter Zijlstra wrote:
> On Tue, May 09, 2023 at 03:58:47PM -0400, Kent Overstreet wrote:
> > Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
> > Cc: Coly Li <colyli@suse.de>
> > ---
> >  drivers/md/bcache/btree.c | 23 ++++++++++++++++++++++-
> >  drivers/md/bcache/btree.h |  4 ++--
> >  2 files changed, 24 insertions(+), 3 deletions(-)
> > 
> > diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
> > index 147c493a98..569f48958b 100644
> > --- a/drivers/md/bcache/btree.c
> > +++ b/drivers/md/bcache/btree.c
> > @@ -559,6 +559,27 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
> >  	}
> >  }
> >  
> > +#define cmp_int(l, r)		((l > r) - (l < r))
> > +
> > +#ifdef CONFIG_PROVE_LOCKING
> > +static int btree_lock_cmp_fn(const struct lockdep_map *_a,
> > +			     const struct lockdep_map *_b)
> > +{
> > +	const struct btree *a = container_of(_a, struct btree, lock.dep_map);
> > +	const struct btree *b = container_of(_b, struct btree, lock.dep_map);
> > +
> > +	return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
> > +}
> > +
> > +static void btree_lock_print_fn(const struct lockdep_map *map)
> > +{
> > +	const struct btree *b = container_of(map, struct btree, lock.dep_map);
> > +
> > +	printk(KERN_CONT " l=%u %llu:%llu", b->level,
> > +	       KEY_INODE(&b->key), KEY_OFFSET(&b->key));
> > +}
> > +#endif
> > +
> >  static struct btree *mca_bucket_alloc(struct cache_set *c,
> >  				      struct bkey *k, gfp_t gfp)
> >  {
> > @@ -572,7 +593,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
> >  		return NULL;
> >  
> >  	init_rwsem(&b->lock);
> > -	lockdep_set_novalidate_class(&b->lock);
> > +	lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
> >  	mutex_init(&b->write_lock);
> >  	lockdep_set_novalidate_class(&b->write_lock);
> 
> I can't help but notice you've got yet another novalidate_class usage
> here. What does it take to get rid of that?

this is a tricky one, because the correct lock ordering refers to
particular locks of different types; we take b->lock before
b->write_lock, for a given btree node.

And like b->lock, b->write_lock can be held simultaneously for multiple
nodes, with the same ordering that btree_lock_cmp_fn() defines.

Conceptually we'd need a lock_cmp_fn that can compare locks of different
types...

This patchset might be almost enough to do that, I'll give it a bit more
thought.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [tip: locking/core] lockdep: Add lock_set_cmp_fn() annotation
  2023-05-09 19:58 [PATCH 1/2] lockdep: lock_set_cmp_fn() Kent Overstreet
                   ` (2 preceding siblings ...)
  2023-05-09 23:26 ` kernel test robot
@ 2023-05-20 10:49 ` tip-bot2 for Kent Overstreet
  3 siblings, 0 replies; 13+ messages in thread
From: tip-bot2 for Kent Overstreet @ 2023-05-20 10:49 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Kent Overstreet, Peter Zijlstra (Intel), x86, linux-kernel

The following commit has been merged into the locking/core branch of tip:

Commit-ID:     eb1cfd09f788e39948a82be8063e54e40dd018d9
Gitweb:        https://git.kernel.org/tip/eb1cfd09f788e39948a82be8063e54e40dd018d9
Author:        Kent Overstreet <kent.overstreet@linux.dev>
AuthorDate:    Tue, 09 May 2023 15:58:46 -04:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Fri, 19 May 2023 12:35:10 +02:00

lockdep: Add lock_set_cmp_fn() annotation

This implements a new interface to lockdep, lock_set_cmp_fn(), for
defining a custom ordering when taking multiple locks of the same
class.

This is an alternative to subclasses, but can not fully replace them
since subclasses allow lock hierarchies with other clasees
inter-twined, while this relies on pure class nesting.

Specifically, if A is our nesting class then:

  A/0 <- B <- A/1

Would be a valid lock order with subclasses (each subclass really is a
full class from the validation PoV) but not with this annotation,
which requires all nesting to be consecutive.

Example output:

| ============================================
| WARNING: possible recursive locking detected
| 6.2.0-rc8-00003-g7d81e591ca6a-dirty #15 Not tainted
| --------------------------------------------
| kworker/14:3/938 is trying to acquire lock:
| ffff8880143218c8 (&b->lock l=0 0:2803368){++++}-{3:3}, at: bch_btree_node_get.part.0+0x81/0x2b0
|
| but task is already holding lock:
| ffff8880143de8c8 (&b->lock l=1 1048575:9223372036854775807){++++}-{3:3}, at: __bch_btree_map_nodes+0xea/0x1e0
| and the lock comparison function returns 1:
|
| other info that might help us debug this:
|  Possible unsafe locking scenario:
|
|        CPU0
|        ----
|   lock(&b->lock l=1 1048575:9223372036854775807);
|   lock(&b->lock l=0 0:2803368);
|
|  *** DEADLOCK ***
|
|  May be due to missing lock nesting notation
|
| 3 locks held by kworker/14:3/938:
|  #0: ffff888005ea9d38 ((wq_completion)bcache){+.+.}-{0:0}, at: process_one_work+0x1ec/0x530
|  #1: ffff8880098c3e70 ((work_completion)(&cl->work)#3){+.+.}-{0:0}, at: process_one_work+0x1ec/0x530
|  #2: ffff8880143de8c8 (&b->lock l=1 1048575:9223372036854775807){++++}-{3:3}, at: __bch_btree_map_nodes+0xea/0x1e0

[peterz: extended changelog]
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20230509195847.1745548-1-kent.overstreet@linux.dev
---
 include/linux/lockdep.h       |   8 ++-
 include/linux/lockdep_types.h |   8 ++-
 kernel/locking/lockdep.c      | 118 ++++++++++++++++++++++++---------
 3 files changed, 103 insertions(+), 31 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b32256e..3bac150 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -434,6 +434,14 @@ extern int lockdep_is_held(const void *);
 
 #endif /* !LOCKDEP */
 
+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *, lock_cmp_fn, lock_print_fn);
+
+#define lock_set_cmp_fn(lock, ...)	lockdep_set_lock_cmp_fn(&(lock)->dep_map, __VA_ARGS__)
+#else
+#define lock_set_cmp_fn(lock, ...)	do { } while (0)
+#endif
+
 enum xhlock_context_t {
 	XHLOCK_HARD,
 	XHLOCK_SOFT,
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index d224308..8bf79c4 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -84,6 +84,11 @@ struct lock_trace;
 
 #define LOCKSTAT_POINTS		4
 
+struct lockdep_map;
+typedef int (*lock_cmp_fn)(const struct lockdep_map *a,
+			   const struct lockdep_map *b);
+typedef void (*lock_print_fn)(const struct lockdep_map *map);
+
 /*
  * The lock-class itself. The order of the structure members matters.
  * reinit_class() zeroes the key member and all subsequent members.
@@ -109,6 +114,9 @@ struct lock_class {
 	struct list_head		locks_after, locks_before;
 
 	const struct lockdep_subclass_key *key;
+	lock_cmp_fn			cmp_fn;
+	lock_print_fn			print_fn;
+
 	unsigned int			subclass;
 	unsigned int			dep_gen_id;
 
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index dcd1d5b..3e8950f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -709,7 +709,7 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
 	usage[i] = '\0';
 }
 
-static void __print_lock_name(struct lock_class *class)
+static void __print_lock_name(struct held_lock *hlock, struct lock_class *class)
 {
 	char str[KSYM_NAME_LEN];
 	const char *name;
@@ -724,17 +724,19 @@ static void __print_lock_name(struct lock_class *class)
 			printk(KERN_CONT "#%d", class->name_version);
 		if (class->subclass)
 			printk(KERN_CONT "/%d", class->subclass);
+		if (hlock && class->print_fn)
+			class->print_fn(hlock->instance);
 	}
 }
 
-static void print_lock_name(struct lock_class *class)
+static void print_lock_name(struct held_lock *hlock, struct lock_class *class)
 {
 	char usage[LOCK_USAGE_CHARS];
 
 	get_usage_chars(class, usage);
 
 	printk(KERN_CONT " (");
-	__print_lock_name(class);
+	__print_lock_name(hlock, class);
 	printk(KERN_CONT "){%s}-{%d:%d}", usage,
 			class->wait_type_outer ?: class->wait_type_inner,
 			class->wait_type_inner);
@@ -772,7 +774,7 @@ static void print_lock(struct held_lock *hlock)
 	}
 
 	printk(KERN_CONT "%px", hlock->instance);
-	print_lock_name(lock);
+	print_lock_name(hlock, lock);
 	printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
 }
 
@@ -1868,7 +1870,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
 	if (debug_locks_silent)
 		return;
 	printk("\n-> #%u", depth);
-	print_lock_name(target->class);
+	print_lock_name(NULL, target->class);
 	printk(KERN_CONT ":\n");
 	print_lock_trace(target->trace, 6);
 }
@@ -1899,11 +1901,11 @@ print_circular_lock_scenario(struct held_lock *src,
 	 */
 	if (parent != source) {
 		printk("Chain exists of:\n  ");
-		__print_lock_name(source);
+		__print_lock_name(src, source);
 		printk(KERN_CONT " --> ");
-		__print_lock_name(parent);
+		__print_lock_name(NULL, parent);
 		printk(KERN_CONT " --> ");
-		__print_lock_name(target);
+		__print_lock_name(tgt, target);
 		printk(KERN_CONT "\n\n");
 	}
 
@@ -1914,13 +1916,13 @@ print_circular_lock_scenario(struct held_lock *src,
 		printk("  rlock(");
 	else
 		printk("  lock(");
-	__print_lock_name(target);
+	__print_lock_name(tgt, target);
 	printk(KERN_CONT ");\n");
 	printk("                               lock(");
-	__print_lock_name(parent);
+	__print_lock_name(NULL, parent);
 	printk(KERN_CONT ");\n");
 	printk("                               lock(");
-	__print_lock_name(target);
+	__print_lock_name(tgt, target);
 	printk(KERN_CONT ");\n");
 	if (src_read != 0)
 		printk("  rlock(");
@@ -1928,7 +1930,7 @@ print_circular_lock_scenario(struct held_lock *src,
 		printk("  sync(");
 	else
 		printk("  lock(");
-	__print_lock_name(source);
+	__print_lock_name(src, source);
 	printk(KERN_CONT ");\n");
 	printk("\n *** DEADLOCK ***\n\n");
 }
@@ -2154,6 +2156,8 @@ check_path(struct held_lock *target, struct lock_list *src_entry,
 	return ret;
 }
 
+static void print_deadlock_bug(struct task_struct *, struct held_lock *, struct held_lock *);
+
 /*
  * Prove that the dependency graph starting at <src> can not
  * lead to <target>. If it can, there is a circle when adding
@@ -2185,7 +2189,10 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
 			*trace = save_trace();
 		}
 
-		print_circular_bug(&src_entry, target_entry, src, target);
+		if (src->class_idx == target->class_idx)
+			print_deadlock_bug(current, src, target);
+		else
+			print_circular_bug(&src_entry, target_entry, src, target);
 	}
 
 	return ret;
@@ -2341,7 +2348,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
 	int bit;
 
 	printk("%*s->", depth, "");
-	print_lock_name(class);
+	print_lock_name(NULL, class);
 #ifdef CONFIG_DEBUG_LOCKDEP
 	printk(KERN_CONT " ops: %lu", debug_class_ops_read(class));
 #endif
@@ -2523,11 +2530,11 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
 	 */
 	if (middle_class != unsafe_class) {
 		printk("Chain exists of:\n  ");
-		__print_lock_name(safe_class);
+		__print_lock_name(NULL, safe_class);
 		printk(KERN_CONT " --> ");
-		__print_lock_name(middle_class);
+		__print_lock_name(NULL, middle_class);
 		printk(KERN_CONT " --> ");
-		__print_lock_name(unsafe_class);
+		__print_lock_name(NULL, unsafe_class);
 		printk(KERN_CONT "\n\n");
 	}
 
@@ -2535,18 +2542,18 @@ print_irq_lock_scenario(struct lock_list *safe_entry,
 	printk("       CPU0                    CPU1\n");
 	printk("       ----                    ----\n");
 	printk("  lock(");
-	__print_lock_name(unsafe_class);
+	__print_lock_name(NULL, unsafe_class);
 	printk(KERN_CONT ");\n");
 	printk("                               local_irq_disable();\n");
 	printk("                               lock(");
-	__print_lock_name(safe_class);
+	__print_lock_name(NULL, safe_class);
 	printk(KERN_CONT ");\n");
 	printk("                               lock(");
-	__print_lock_name(middle_class);
+	__print_lock_name(NULL, middle_class);
 	printk(KERN_CONT ");\n");
 	printk("  <Interrupt>\n");
 	printk("    lock(");
-	__print_lock_name(safe_class);
+	__print_lock_name(NULL, safe_class);
 	printk(KERN_CONT ");\n");
 	printk("\n *** DEADLOCK ***\n\n");
 }
@@ -2583,20 +2590,20 @@ print_bad_irq_dependency(struct task_struct *curr,
 	pr_warn("\nand this task is already holding:\n");
 	print_lock(prev);
 	pr_warn("which would create a new lock dependency:\n");
-	print_lock_name(hlock_class(prev));
+	print_lock_name(prev, hlock_class(prev));
 	pr_cont(" ->");
-	print_lock_name(hlock_class(next));
+	print_lock_name(next, hlock_class(next));
 	pr_cont("\n");
 
 	pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
 		irqclass);
-	print_lock_name(backwards_entry->class);
+	print_lock_name(NULL, backwards_entry->class);
 	pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
 
 	print_lock_trace(backwards_entry->class->usage_traces[bit1], 1);
 
 	pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
-	print_lock_name(forwards_entry->class);
+	print_lock_name(NULL, forwards_entry->class);
 	pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
 	pr_warn("...");
 
@@ -2966,10 +2973,10 @@ print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
 	printk("       CPU0\n");
 	printk("       ----\n");
 	printk("  lock(");
-	__print_lock_name(prev);
+	__print_lock_name(prv, prev);
 	printk(KERN_CONT ");\n");
 	printk("  lock(");
-	__print_lock_name(next);
+	__print_lock_name(nxt, next);
 	printk(KERN_CONT ");\n");
 	printk("\n *** DEADLOCK ***\n\n");
 	printk(" May be due to missing lock nesting notation\n\n");
@@ -2979,6 +2986,8 @@ static void
 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 		   struct held_lock *next)
 {
+	struct lock_class *class = hlock_class(prev);
+
 	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
 		return;
 
@@ -2993,6 +3002,11 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 	pr_warn("\nbut task is already holding lock:\n");
 	print_lock(prev);
 
+	if (class->cmp_fn) {
+		pr_warn("and the lock comparison function returns %i:\n",
+			class->cmp_fn(prev->instance, next->instance));
+	}
+
 	pr_warn("\nother info that might help us debug this:\n");
 	print_deadlock_scenario(next, prev);
 	lockdep_print_held_locks(curr);
@@ -3014,6 +3028,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
 static int
 check_deadlock(struct task_struct *curr, struct held_lock *next)
 {
+	struct lock_class *class;
 	struct held_lock *prev;
 	struct held_lock *nest = NULL;
 	int i;
@@ -3034,6 +3049,12 @@ check_deadlock(struct task_struct *curr, struct held_lock *next)
 		if ((next->read == 2) && prev->read)
 			continue;
 
+		class = hlock_class(prev);
+
+		if (class->cmp_fn &&
+		    class->cmp_fn(prev->instance, next->instance) < 0)
+			continue;
+
 		/*
 		 * We're holding the nest_lock, which serializes this lock's
 		 * nesting behaviour.
@@ -3095,6 +3116,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
 		return 2;
 	}
 
+	if (prev->class_idx == next->class_idx) {
+		struct lock_class *class = hlock_class(prev);
+
+		if (class->cmp_fn &&
+		    class->cmp_fn(prev->instance, next->instance) < 0)
+			return 2;
+	}
+
 	/*
 	 * Prove that the new <prev> -> <next> dependency would not
 	 * create a circular dependency in the graph. (We do this by
@@ -3571,7 +3600,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
 		hlock_id = chain_hlocks[chain->base + i];
 		chain_key = print_chain_key_iteration(hlock_id, chain_key);
 
-		print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
+		print_lock_name(NULL, lock_classes + chain_hlock_class_idx(hlock_id));
 		printk("\n");
 	}
 }
@@ -3928,11 +3957,11 @@ static void print_usage_bug_scenario(struct held_lock *lock)
 	printk("       CPU0\n");
 	printk("       ----\n");
 	printk("  lock(");
-	__print_lock_name(class);
+	__print_lock_name(lock, class);
 	printk(KERN_CONT ");\n");
 	printk("  <Interrupt>\n");
 	printk("    lock(");
-	__print_lock_name(class);
+	__print_lock_name(lock, class);
 	printk(KERN_CONT ");\n");
 	printk("\n *** DEADLOCK ***\n\n");
 }
@@ -4018,7 +4047,7 @@ print_irq_inversion_bug(struct task_struct *curr,
 		pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
 	else
 		pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
-	print_lock_name(other->class);
+	print_lock_name(NULL, other->class);
 	pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
 
 	pr_warn("\nother info that might help us debug this:\n");
@@ -4882,6 +4911,33 @@ EXPORT_SYMBOL_GPL(lockdep_init_map_type);
 struct lock_class_key __lockdep_no_validate__;
 EXPORT_SYMBOL_GPL(__lockdep_no_validate__);
 
+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn cmp_fn,
+			     lock_print_fn print_fn)
+{
+	struct lock_class *class = lock->class_cache[0];
+	unsigned long flags;
+
+	raw_local_irq_save(flags);
+	lockdep_recursion_inc();
+
+	if (!class)
+		class = register_lock_class(lock, 0, 0);
+
+	if (class) {
+		WARN_ON(class->cmp_fn	&& class->cmp_fn != cmp_fn);
+		WARN_ON(class->print_fn && class->print_fn != print_fn);
+
+		class->cmp_fn	= cmp_fn;
+		class->print_fn = print_fn;
+	}
+
+	lockdep_recursion_finish();
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lockdep_set_lock_cmp_fn);
+#endif
+
 static void
 print_lock_nested_lock_not_held(struct task_struct *curr,
 				struct held_lock *hlock)

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [tip: locking/core] bcache: Convert to lock_cmp_fn
       [not found]   ` <168457974565.404.16611061652498882569.tip-bot2@tip-bot2>
@ 2023-05-21 13:21     ` Coly Li
  2023-05-22  8:34       ` Peter Zijlstra
  0 siblings, 1 reply; 13+ messages in thread
From: Coly Li @ 2023-05-21 13:21 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Kent Overstreet, Peter Zijlstra (Intel),
	LKML, Coly Li <colyli@suse.de>,
	x86



> 2023年5月20日 18:49,tip-bot2 for Kent Overstreet <tip-bot2@linutronix.de> 写道:
> 
> The following commit has been merged into the locking/core branch of tip:
> 
> Commit-ID:     0ad397b556936a14052aa65d8fa958a9f3175add
> Gitweb:        https://git.kernel.org/tip/0ad397b556936a14052aa65d8fa958a9f3175add
> Author:        Kent Overstreet <kent.overstreet@linux.dev>
> AuthorDate:    Tue, 09 May 2023 15:58:47 -04:00
> Committer:     Peter Zijlstra <peterz@infradead.org>
> CommitterDate: Fri, 19 May 2023 12:35:10 +02:00
> 
> bcache: Convert to lock_cmp_fn
> 
> Replace one of bcache's lockdep_set_novalidate_class() usage with the
> newly introduced custom lock nesting annotation.
> 
> [peterz: changelog]
> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> Acked-by: Coly Li <colyli@suse.de <mailto:colyli@suse.de>>


Can the above “<mailto:colyli@suse.de>” be removed from my acked-by. This was automatically and invisibly added by MacOS email client, which just introduced chaos in such use case.

Thanks.

Coly Li


> Link: https://lkml.kernel.org/r/20230509195847.1745548-2-kent.overstreet@linux.dev
> ---
> drivers/md/bcache/btree.c | 23 ++++++++++++++++++++++-
> drivers/md/bcache/btree.h |  4 ++--
> 2 files changed, 24 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
> index 147c493..569f489 100644
> --- a/drivers/md/bcache/btree.c
> +++ b/drivers/md/bcache/btree.c
> @@ -559,6 +559,27 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
> }
> }
> 
> +#define cmp_int(l, r) ((l > r) - (l < r))
> +
> +#ifdef CONFIG_PROVE_LOCKING
> +static int btree_lock_cmp_fn(const struct lockdep_map *_a,
> +     const struct lockdep_map *_b)
> +{
> + const struct btree *a = container_of(_a, struct btree, lock.dep_map);
> + const struct btree *b = container_of(_b, struct btree, lock.dep_map);
> +
> + return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
> +}
> +
> +static void btree_lock_print_fn(const struct lockdep_map *map)
> +{
> + const struct btree *b = container_of(map, struct btree, lock.dep_map);
> +
> + printk(KERN_CONT " l=%u %llu:%llu", b->level,
> +       KEY_INODE(&b->key), KEY_OFFSET(&b->key));
> +}
> +#endif
> +
> static struct btree *mca_bucket_alloc(struct cache_set *c,
>      struct bkey *k, gfp_t gfp)
> {
> @@ -572,7 +593,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
> return NULL;
> 
> init_rwsem(&b->lock);
> - lockdep_set_novalidate_class(&b->lock);
> + lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
> mutex_init(&b->write_lock);
> lockdep_set_novalidate_class(&b->write_lock);
> INIT_LIST_HEAD(&b->list);
> diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
> index 1b5fdbc..17b1d20 100644
> --- a/drivers/md/bcache/btree.h
> +++ b/drivers/md/bcache/btree.h
> @@ -247,8 +247,8 @@ static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
> 
> static inline void rw_lock(bool w, struct btree *b, int level)
> {
> - w ? down_write_nested(&b->lock, level + 1)
> -  : down_read_nested(&b->lock, level + 1);
> + w ? down_write(&b->lock)
> +  : down_read(&b->lock);
> if (w)
> b->seq++;
> }


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [tip: locking/core] bcache: Convert to lock_cmp_fn
  2023-05-21 13:21     ` [tip: locking/core] " Coly Li
@ 2023-05-22  8:34       ` Peter Zijlstra
  0 siblings, 0 replies; 13+ messages in thread
From: Peter Zijlstra @ 2023-05-22  8:34 UTC (permalink / raw)
  To: Coly Li
  Cc: linux-tip-commits, Kent Overstreet, LKML,
	Coly Li <colyli@suse.de>,
	x86

On Sun, May 21, 2023 at 09:21:36PM +0800, Coly Li wrote:
> 
> 
> > 2023年5月20日 18:49,tip-bot2 for Kent Overstreet <tip-bot2@linutronix.de> 写道:
> > 
> > The following commit has been merged into the locking/core branch of tip:
> > 
> > Commit-ID:     0ad397b556936a14052aa65d8fa958a9f3175add
> > Gitweb:        https://git.kernel.org/tip/0ad397b556936a14052aa65d8fa958a9f3175add
> > Author:        Kent Overstreet <kent.overstreet@linux.dev>
> > AuthorDate:    Tue, 09 May 2023 15:58:47 -04:00
> > Committer:     Peter Zijlstra <peterz@infradead.org>
> > CommitterDate: Fri, 19 May 2023 12:35:10 +02:00
> > 
> > bcache: Convert to lock_cmp_fn
> > 
> > Replace one of bcache's lockdep_set_novalidate_class() usage with the
> > newly introduced custom lock nesting annotation.
> > 
> > [peterz: changelog]
> > Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> > Acked-by: Coly Li <colyli@suse.de <mailto:colyli@suse.de>>
> 
> 
> Can the above “<mailto:colyli@suse.de>” be removed from my acked-by. This was automatically and invisibly added by MacOS email client, which just introduced chaos in such use case.
> 
> Thanks.

Urgh, something I should add to my script on a rainy day I suppose.

I'll have to rebase the tree, but I suppose I can do that. Let me go see
if I can remember the git incantations required.

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [tip: locking/core] bcache: Convert to lock_cmp_fn
  2023-05-09 19:58 ` [PATCH 2/2] bcache: Convert to lock_cmp_fn Kent Overstreet
                     ` (2 preceding siblings ...)
       [not found]   ` <168457974565.404.16611061652498882569.tip-bot2@tip-bot2>
@ 2023-05-24 10:31   ` tip-bot2 for Kent Overstreet
  3 siblings, 0 replies; 13+ messages in thread
From: tip-bot2 for Kent Overstreet @ 2023-05-24 10:31 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Kent Overstreet, Peter Zijlstra (Intel), Coly Li, x86, linux-kernel

The following commit has been merged into the locking/core branch of tip:

Commit-ID:     4c8a49244c6abc5fb829d81abaaf2435ad2a44bf
Gitweb:        https://git.kernel.org/tip/4c8a49244c6abc5fb829d81abaaf2435ad2a44bf
Author:        Kent Overstreet <kent.overstreet@linux.dev>
AuthorDate:    Tue, 09 May 2023 15:58:47 -04:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Wed, 24 May 2023 12:21:22 +02:00

bcache: Convert to lock_cmp_fn

Replace one of bcache's lockdep_set_novalidate_class() usage with the
newly introduced custom lock nesting annotation.

[peterz: changelog]
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Coly Li <colyli@suse.de>
Link: https://lkml.kernel.org/r/20230509195847.1745548-2-kent.overstreet@linux.dev
---
 drivers/md/bcache/btree.c | 23 ++++++++++++++++++++++-
 drivers/md/bcache/btree.h |  4 ++--
 2 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 147c493..569f489 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -559,6 +559,27 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
 	}
 }
 
+#define cmp_int(l, r)		((l > r) - (l < r))
+
+#ifdef CONFIG_PROVE_LOCKING
+static int btree_lock_cmp_fn(const struct lockdep_map *_a,
+			     const struct lockdep_map *_b)
+{
+	const struct btree *a = container_of(_a, struct btree, lock.dep_map);
+	const struct btree *b = container_of(_b, struct btree, lock.dep_map);
+
+	return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
+}
+
+static void btree_lock_print_fn(const struct lockdep_map *map)
+{
+	const struct btree *b = container_of(map, struct btree, lock.dep_map);
+
+	printk(KERN_CONT " l=%u %llu:%llu", b->level,
+	       KEY_INODE(&b->key), KEY_OFFSET(&b->key));
+}
+#endif
+
 static struct btree *mca_bucket_alloc(struct cache_set *c,
 				      struct bkey *k, gfp_t gfp)
 {
@@ -572,7 +593,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
 		return NULL;
 
 	init_rwsem(&b->lock);
-	lockdep_set_novalidate_class(&b->lock);
+	lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
 	mutex_init(&b->write_lock);
 	lockdep_set_novalidate_class(&b->write_lock);
 	INIT_LIST_HEAD(&b->list);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 1b5fdbc..17b1d20 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -247,8 +247,8 @@ static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
 
 static inline void rw_lock(bool w, struct btree *b, int level)
 {
-	w ? down_write_nested(&b->lock, level + 1)
-	  : down_read_nested(&b->lock, level + 1);
+	w ? down_write(&b->lock)
+	  : down_read(&b->lock);
 	if (w)
 		b->seq++;
 }

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 2/2] bcache: Convert to lock_cmp_fn
  2023-05-10 13:01   ` Peter Zijlstra
  2023-05-10 17:05     ` Kent Overstreet
@ 2023-05-25  5:07     ` Kent Overstreet
  1 sibling, 0 replies; 13+ messages in thread
From: Kent Overstreet @ 2023-05-25  5:07 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: linux-kernel, Coly Li

On Wed, May 10, 2023 at 03:01:51PM +0200, Peter Zijlstra wrote:
> >  static struct btree *mca_bucket_alloc(struct cache_set *c,
> >  				      struct bkey *k, gfp_t gfp)
> >  {
> > @@ -572,7 +593,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
> >  		return NULL;
> >  
> >  	init_rwsem(&b->lock);
> > -	lockdep_set_novalidate_class(&b->lock);
> > +	lock_set_cmp_fn(&b->lock, btree_lock_cmp_fn, btree_lock_print_fn);
> >  	mutex_init(&b->write_lock);
> >  	lockdep_set_novalidate_class(&b->write_lock);
> 
> I can't help but notice you've got yet another novalidate_class usage
> here. What does it take to get rid of that?

A locking rework, probably switching bcache to six locks.

I'd rather prioritize getting bcachefs merged and then working on an
upgrade path from bcache -> bcahcefs so we can depracate bcache,
though...

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 2/2] bcache: Convert to lock_cmp_fn
  2023-02-18  3:21 [PATCH 0/2] lockdep lock comparison function Kent Overstreet
@ 2023-02-18  3:21 ` Kent Overstreet
  0 siblings, 0 replies; 13+ messages in thread
From: Kent Overstreet @ 2023-02-18  3:21 UTC (permalink / raw)
  To: linux-kernel, peterz; +Cc: Kent Overstreet, mingo, stern, Coly Li

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Coly Li <colyli@suse.de> (maintainer:BCACHE (BLOCK LAYER CACHE))
---
 drivers/md/bcache/btree.c | 15 ++++++++++++++-
 drivers/md/bcache/btree.h |  4 ++--
 2 files changed, 16 insertions(+), 3 deletions(-)

diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 147c493a98..db2bf2402c 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -559,6 +559,19 @@ static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
 	}
 }
 
+#define cmp_int(l, r)		((l > r) - (l < r))
+
+#ifdef CONFIG_PROVE_LOCKING
+static int btree_lock_cmp_fn(const struct lockdep_map *_a,
+			     const struct lockdep_map *_b)
+{
+	const struct btree *a = container_of(_a, struct btree, lock.dep_map);
+	const struct btree *b = container_of(_b, struct btree, lock.dep_map);
+
+	return -cmp_int(a->level, b->level) ?: bkey_cmp(&a->key, &b->key);
+}
+#endif
+
 static struct btree *mca_bucket_alloc(struct cache_set *c,
 				      struct bkey *k, gfp_t gfp)
 {
@@ -572,7 +585,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
 		return NULL;
 
 	init_rwsem(&b->lock);
-	lockdep_set_novalidate_class(&b->lock);
+	lock_set_lock_cmp_fn(&b->lock, btree_lock_cmp_fn);
 	mutex_init(&b->write_lock);
 	lockdep_set_novalidate_class(&b->write_lock);
 	INIT_LIST_HEAD(&b->list);
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index 1b5fdbc0d8..17b1d201ce 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -247,8 +247,8 @@ static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
 
 static inline void rw_lock(bool w, struct btree *b, int level)
 {
-	w ? down_write_nested(&b->lock, level + 1)
-	  : down_read_nested(&b->lock, level + 1);
+	w ? down_write(&b->lock)
+	  : down_read(&b->lock);
 	if (w)
 		b->seq++;
 }
-- 
2.39.2


^ permalink raw reply related	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2023-05-25  5:20 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-05-09 19:58 [PATCH 1/2] lockdep: lock_set_cmp_fn() Kent Overstreet
2023-05-09 19:58 ` [PATCH 2/2] bcache: Convert to lock_cmp_fn Kent Overstreet
2023-05-10  5:25   ` Coly Li
2023-05-10 13:01   ` Peter Zijlstra
2023-05-10 17:05     ` Kent Overstreet
2023-05-25  5:07     ` Kent Overstreet
     [not found]   ` <168457974565.404.16611061652498882569.tip-bot2@tip-bot2>
2023-05-21 13:21     ` [tip: locking/core] " Coly Li
2023-05-22  8:34       ` Peter Zijlstra
2023-05-24 10:31   ` tip-bot2 for Kent Overstreet
2023-05-09 21:54 ` [PATCH 1/2] lockdep: lock_set_cmp_fn() kernel test robot
2023-05-09 23:26 ` kernel test robot
2023-05-20 10:49 ` [tip: locking/core] lockdep: Add lock_set_cmp_fn() annotation tip-bot2 for Kent Overstreet
  -- strict thread matches above, loose matches on Subject: below --
2023-02-18  3:21 [PATCH 0/2] lockdep lock comparison function Kent Overstreet
2023-02-18  3:21 ` [PATCH 2/2] bcache: Convert to lock_cmp_fn Kent Overstreet

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).