linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] lockdep: Pass lockdep expression to RCU lists
@ 2020-01-23 10:54 Amol Grover
  0 siblings, 0 replies; only message in thread
From: Amol Grover @ 2020-01-23 10:54 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar, Will Deacon
  Cc: linux-kernel, linux-kernel-mentees, Joel Fernandes,
	Madhuparna Bhowmik, Paul E . McKenney, Amol Grover

Data is traversed using hlist_for_each_entry_rcu outside an
RCU read-side critical section but under the protection
of either lockdep_lock or with irqs disabled.

Hence, add corresponding lockdep expression to silence false-positive
lockdep warnings, and harden RCU lists. Also add macro for
corresponding lockdep expression.

Two things to note:
- RCU traversals protected under both, irqs disabled and
graph lock, have both the checks in the lockdep expression.
- RCU traversals under the protection of just disabled irqs
don't have a corresponding lockdep expression as it is implicitly
checked for.

Signed-off-by: Amol Grover <frextrite@gmail.com>
---
 kernel/locking/lockdep.c | 21 +++++++++++++--------
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 32282e7112d3..696ad5d4daed 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -85,6 +85,8 @@ module_param(lock_stat, int, 0644);
  * code to recurse back into the lockdep code...
  */
 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+#define graph_lock_held() \
+	arch_spin_is_locked(&lockdep_lock)
 static struct task_struct *lockdep_selftest_task_struct;
 
 static int graph_lock(void)
@@ -1009,7 +1011,7 @@ static bool __check_data_structures(void)
 	/* Check the chain_key of all lock chains. */
 	for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
 		head = chainhash_table + i;
-		hlist_for_each_entry_rcu(chain, head, entry) {
+		hlist_for_each_entry_rcu(chain, head, entry, graph_lock_held()) {
 			if (!check_lock_chain_key(chain))
 				return false;
 		}
@@ -1124,7 +1126,8 @@ void lockdep_register_key(struct lock_class_key *key)
 	raw_local_irq_save(flags);
 	if (!graph_lock())
 		goto restore_irqs;
-	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+	hlist_for_each_entry_rcu(k, hash_head, hash_entry,
+				 irqs_disabled() && graph_lock_held()) {
 		if (WARN_ON_ONCE(k == key))
 			goto out_unlock;
 	}
@@ -1203,7 +1206,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 	 * We have to do the hash-walk again, to avoid races
 	 * with another CPU:
 	 */
-	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
+	hlist_for_each_entry_rcu(class, hash_head, hash_entry,
+				 irqs_disabled() && graph_lock_held()) {
 		if (class->key == key)
 			goto out_unlock_set;
 	}
@@ -2858,7 +2862,7 @@ static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
 	struct hlist_head *hash_head = chainhashentry(chain_key);
 	struct lock_chain *chain;
 
-	hlist_for_each_entry_rcu(chain, hash_head, entry) {
+	hlist_for_each_entry_rcu(chain, hash_head, entry, graph_lock_held()) {
 		if (READ_ONCE(chain->chain_key) == chain_key) {
 			debug_atomic_inc(chain_lookup_hits);
 			return chain;
@@ -4833,7 +4837,7 @@ static void remove_class_from_lock_chains(struct pending_free *pf,
 
 	for (i = 0; i < ARRAY_SIZE(chainhash_table); i++) {
 		head = chainhash_table + i;
-		hlist_for_each_entry_rcu(chain, head, entry) {
+		hlist_for_each_entry_rcu(chain, head, entry, graph_lock_held()) {
 			remove_class_from_lock_chain(pf, chain, class);
 		}
 	}
@@ -4993,7 +4997,7 @@ static void __lockdep_free_key_range(struct pending_free *pf, void *start,
 	/* Unhash all classes that were created by a module. */
 	for (i = 0; i < CLASSHASH_SIZE; i++) {
 		head = classhash_table + i;
-		hlist_for_each_entry_rcu(class, head, hash_entry) {
+		hlist_for_each_entry_rcu(class, head, hash_entry, graph_lock_held()) {
 			if (!within(class->key, start, size) &&
 			    !within(class->name, start, size))
 				continue;
@@ -5076,7 +5080,7 @@ static bool lock_class_cache_is_registered(struct lockdep_map *lock)
 
 	for (i = 0; i < CLASSHASH_SIZE; i++) {
 		head = classhash_table + i;
-		hlist_for_each_entry_rcu(class, head, hash_entry) {
+		hlist_for_each_entry_rcu(class, head, hash_entry, graph_lock_held()) {
 			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
 				if (lock->class_cache[j] == class)
 					return true;
@@ -5181,7 +5185,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
 		goto out_irq;
 
 	pf = get_pending_free();
-	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+	hlist_for_each_entry_rcu(k, hash_head, hash_entry,
+				 irqs_disabled() && graph_lock_held()) {
 		if (k == key) {
 			hlist_del_rcu(&k->hash_entry);
 			found = true;
-- 
2.24.1


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2020-01-23 10:56 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-23 10:54 [PATCH] lockdep: Pass lockdep expression to RCU lists Amol Grover

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).