All of lore.kernel.org
 help / color / mirror / Atom feed
From: tip-bot for Bart Van Assche <tipbot@zytor.com>
To: linux-tip-commits@vger.kernel.org
Cc: hpa@zytor.com, johannes@sipsolutions.net, peterz@infradead.org,
	longman@redhat.com, akpm@linux-foundation.org,
	torvalds@linux-foundation.org, bvanassche@acm.org,
	paulmck@linux.vnet.ibm.com, tglx@linutronix.de,
	linux-kernel@vger.kernel.org, will.deacon@arm.com,
	mingo@kernel.org
Subject: [tip:locking/core] locking/lockdep: Add support for dynamic keys
Date: Wed, 27 Feb 2019 23:13:48 -0800	[thread overview]
Message-ID: <tip-108c14858b9ea224686e476c8f5ec345a0df9e27@git.kernel.org> (raw)
In-Reply-To: <20190214230058.196511-19-bvanassche@acm.org>

Commit-ID:  108c14858b9ea224686e476c8f5ec345a0df9e27
Gitweb:     https://git.kernel.org/tip/108c14858b9ea224686e476c8f5ec345a0df9e27
Author:     Bart Van Assche <bvanassche@acm.org>
AuthorDate: Thu, 14 Feb 2019 15:00:53 -0800
Committer:  Ingo Molnar <mingo@kernel.org>
CommitDate: Thu, 28 Feb 2019 07:55:47 +0100

locking/lockdep: Add support for dynamic keys

A shortcoming of the current lockdep implementation is that it requires
lock keys to be allocated statically. That forces all instances of lock
objects that occur in a given data structure to share a lock key. Since
lock dependency analysis groups lock objects per key sharing lock keys
can cause false positive lockdep reports. Make it possible to avoid
such false positive reports by allowing lock keys to be allocated
dynamically. Require that dynamically allocated lock keys are
registered before use by calling lockdep_register_key(). Complain about
attempts to register the same lock key pointer twice without calling
lockdep_unregister_key() between successive registration calls.

The purpose of the new lock_keys_hash[] data structure that keeps
track of all dynamic keys is twofold:

  - Verify whether the lockdep_register_key() and lockdep_unregister_key()
    functions are used correctly.

  - Avoid that lockdep_init_map() complains when encountering a dynamically
    allocated key.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: johannes.berg@intel.com
Cc: tj@kernel.org
Link: https://lkml.kernel.org/r/20190214230058.196511-19-bvanassche@acm.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
 include/linux/lockdep.h  |  21 ++++++--
 kernel/locking/lockdep.c | 121 +++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 131 insertions(+), 11 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 619ec3f26cdc..43fb35bd7baf 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -46,15 +46,19 @@ extern int lock_stat;
 #define NR_LOCKDEP_CACHING_CLASSES	2
 
 /*
- * Lock-classes are keyed via unique addresses, by embedding the
- * lockclass-key into the kernel (or module) .data section. (For
- * static locks we use the lock address itself as the key.)
+ * A lockdep key is associated with each lock object. For static locks we use
+ * the lock address itself as the key. Dynamically allocated lock objects can
+ * have a statically or dynamically allocated key. Dynamically allocated lock
+ * keys must be registered before being used and must be unregistered before
+ * the key memory is freed.
  */
 struct lockdep_subclass_key {
 	char __one_byte;
 } __attribute__ ((__packed__));
 
+/* hash_entry is used to keep track of dynamically allocated keys. */
 struct lock_class_key {
+	struct hlist_node		hash_entry;
 	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
 };
 
@@ -273,6 +277,9 @@ extern void lockdep_set_selftest_task(struct task_struct *task);
 extern void lockdep_off(void);
 extern void lockdep_on(void);
 
+extern void lockdep_register_key(struct lock_class_key *key);
+extern void lockdep_unregister_key(struct lock_class_key *key);
+
 /*
  * These methods are used by specific locking variants (spinlocks,
  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
@@ -434,6 +441,14 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
  */
 struct lock_class_key { };
 
+static inline void lockdep_register_key(struct lock_class_key *key)
+{
+}
+
+static inline void lockdep_unregister_key(struct lock_class_key *key)
+{
+}
+
 /*
  * The lockdep_map takes no space if lockdep is disabled:
  */
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 84427441824e..c73bc4334bee 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -143,6 +143,9 @@ static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
  * nr_lock_classes is the number of elements of lock_classes[] that is
  * in use.
  */
+#define KEYHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
+#define KEYHASH_SIZE		(1UL << KEYHASH_BITS)
+static struct hlist_head lock_keys_hash[KEYHASH_SIZE];
 unsigned long nr_lock_classes;
 #ifndef CONFIG_DEBUG_LOCKDEP
 static
@@ -641,7 +644,7 @@ static int very_verbose(struct lock_class *class)
  * Is this the address of a static object:
  */
 #ifdef __KERNEL__
-static int static_obj(void *obj)
+static int static_obj(const void *obj)
 {
 	unsigned long start = (unsigned long) &_stext,
 		      end   = (unsigned long) &_end,
@@ -975,6 +978,71 @@ static void init_data_structures_once(void)
 	}
 }
 
+static inline struct hlist_head *keyhashentry(const struct lock_class_key *key)
+{
+	unsigned long hash = hash_long((uintptr_t)key, KEYHASH_BITS);
+
+	return lock_keys_hash + hash;
+}
+
+/* Register a dynamically allocated key. */
+void lockdep_register_key(struct lock_class_key *key)
+{
+	struct hlist_head *hash_head;
+	struct lock_class_key *k;
+	unsigned long flags;
+
+	if (WARN_ON_ONCE(static_obj(key)))
+		return;
+	hash_head = keyhashentry(key);
+
+	raw_local_irq_save(flags);
+	if (!graph_lock())
+		goto restore_irqs;
+	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+		if (WARN_ON_ONCE(k == key))
+			goto out_unlock;
+	}
+	hlist_add_head_rcu(&key->hash_entry, hash_head);
+out_unlock:
+	graph_unlock();
+restore_irqs:
+	raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lockdep_register_key);
+
+/* Check whether a key has been registered as a dynamic key. */
+static bool is_dynamic_key(const struct lock_class_key *key)
+{
+	struct hlist_head *hash_head;
+	struct lock_class_key *k;
+	bool found = false;
+
+	if (WARN_ON_ONCE(static_obj(key)))
+		return false;
+
+	/*
+	 * If lock debugging is disabled lock_keys_hash[] may contain
+	 * pointers to memory that has already been freed. Avoid triggering
+	 * a use-after-free in that case by returning early.
+	 */
+	if (!debug_locks)
+		return true;
+
+	hash_head = keyhashentry(key);
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+		if (k == key) {
+			found = true;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return found;
+}
+
 /*
  * Register a lock's class in the hash-table, if the class is not present
  * yet. Otherwise we look it up. We cache the result in the lock object
@@ -996,7 +1064,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 	if (!lock->key) {
 		if (!assign_lock_key(lock))
 			return NULL;
-	} else if (!static_obj(lock->key)) {
+	} else if (!static_obj(lock->key) && !is_dynamic_key(lock->key)) {
 		return NULL;
 	}
 
@@ -3378,13 +3446,12 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
 	if (DEBUG_LOCKS_WARN_ON(!key))
 		return;
 	/*
-	 * Sanity check, the lock-class key must be persistent:
+	 * Sanity check, the lock-class key must either have been allocated
+	 * statically or must have been registered as a dynamic key.
 	 */
-	if (!static_obj(key)) {
-		printk("BUG: key %px not in .data!\n", key);
-		/*
-		 * What it says above ^^^^^, I suggest you read it.
-		 */
+	if (!static_obj(key) && !is_dynamic_key(key)) {
+		if (debug_locks)
+			printk(KERN_ERR "BUG: key %px has not been registered!\n", key);
 		DEBUG_LOCKS_WARN_ON(1);
 		return;
 	}
@@ -4795,6 +4862,44 @@ void lockdep_reset_lock(struct lockdep_map *lock)
 		lockdep_reset_lock_reg(lock);
 }
 
+/* Unregister a dynamically allocated key. */
+void lockdep_unregister_key(struct lock_class_key *key)
+{
+	struct hlist_head *hash_head = keyhashentry(key);
+	struct lock_class_key *k;
+	struct pending_free *pf;
+	unsigned long flags;
+	bool found = false;
+
+	might_sleep();
+
+	if (WARN_ON_ONCE(static_obj(key)))
+		return;
+
+	raw_local_irq_save(flags);
+	if (!graph_lock())
+		goto out_irq;
+
+	pf = get_pending_free();
+	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
+		if (k == key) {
+			hlist_del_rcu(&k->hash_entry);
+			found = true;
+			break;
+		}
+	}
+	WARN_ON_ONCE(!found);
+	__lockdep_free_key_range(pf, key, 1);
+	call_rcu_zapped(pf);
+	graph_unlock();
+out_irq:
+	raw_local_irq_restore(flags);
+
+	/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
+	synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(lockdep_unregister_key);
+
 void __init lockdep_init(void)
 {
 	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");

  parent reply	other threads:[~2019-02-28  7:14 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-14 23:00 [PATCH v7 00/23] locking/lockdep: Add support for dynamic keys Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 01/23] locking/lockdep: Fix two 32-bit compiler warnings Bart Van Assche
2019-02-28  7:02   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 02/23] locking/lockdep: Fix reported required memory size (1/2) Bart Van Assche
2019-02-28  7:03   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 03/23] locking/lockdep: Fix reported required memory size (2/2) Bart Van Assche
2019-02-28  7:03   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 04/23] locking/lockdep: Avoid that add_chain_cache() adds an invalid chain to the cache Bart Van Assche
2019-02-28  7:04   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 05/23] locking/lockdep: Reorder struct lock_class members Bart Van Assche
2019-02-28  7:05   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 06/23] locking/lockdep: Make zap_class() remove all matching lock order entries Bart Van Assche
2019-02-28  7:05   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 07/23] locking/lockdep: Initialize the locks_before and locks_after lists earlier Bart Van Assche
2019-02-28  7:06   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 08/23] locking/lockdep: Split lockdep_free_key_range() and lockdep_reset_lock() Bart Van Assche
2019-02-28  7:07   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 09/23] locking/lockdep: Make it easy to detect whether or not inside a selftest Bart Van Assche
2019-02-28  7:07   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 10/23] locking/lockdep: Update two outdated comments Bart Van Assche
2019-02-28  7:08   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 11/23] locking/lockdep: Free lock classes that are no longer in use Bart Van Assche
2019-02-28  7:09   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 12/23] locking/lockdep: Reuse list entries " Bart Van Assche
2019-02-28  7:09   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 13/23] locking/lockdep: Introduce lockdep_next_lockchain() and lock_chain_count() Bart Van Assche
2019-02-28  7:10   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 14/23] locking/lockdep: Fix a comment in add_chain_cache() Bart Van Assche
2019-02-28  7:11   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 15/23] locking/lockdep: Reuse lock chains that have been freed Bart Van Assche
2019-02-28  7:11   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 16/23] locking/lockdep: Check data structure consistency Bart Van Assche
2019-02-28  7:12   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 17/23] locking/lockdep: Verify whether lock objects are small enough to be used as class keys Bart Van Assche
2019-02-28  7:13   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 18/23] locking/lockdep: Add support for dynamic keys Bart Van Assche
2019-02-26 17:17   ` Peter Zijlstra
2019-02-28  7:13   ` tip-bot for Bart Van Assche [this message]
2019-02-14 23:00 ` [PATCH v7 19/23] kernel/workqueue: Use dynamic lockdep keys for workqueues Bart Van Assche
2019-02-28  7:14   ` [tip:locking/core] " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 20/23] locking/spinlock: Introduce spin_lock_init_key() Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 21/23] block: Avoid that flushing triggers a lockdep complaint Bart Van Assche
2019-02-15  2:26   ` Ming Lei
2019-02-15 16:08     ` Bart Van Assche
2019-02-17 13:23       ` Ming Lei
2019-02-26 18:08     ` Peter Zijlstra
2019-02-27  1:35       ` Ming Lei
2019-02-27 14:24         ` Peter Zijlstra
2019-02-27 15:53           ` Ming Lei
2019-02-26 17:24   ` Peter Zijlstra
2019-02-26 17:48     ` Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 22/23] lockdep tests: Fix run_tests.sh Bart Van Assche
2019-02-28  7:15   ` [tip:locking/core] lockdep/lib/tests: " tip-bot for Bart Van Assche
2019-02-14 23:00 ` [PATCH v7 23/23] lockdep tests: Test dynamic key registration Bart Van Assche
2019-02-28  7:15   ` [tip:locking/core] lockdep/lib/tests: " tip-bot for Bart Van Assche
2019-02-21 22:02 ` [PATCH v7 00/23] locking/lockdep: Add support for dynamic keys Bart Van Assche
2019-02-22 16:26   ` Peter Zijlstra
2019-02-22 17:20     ` Bart Van Assche
2019-02-22 22:13       ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=tip-108c14858b9ea224686e476c8f5ec345a0df9e27@git.kernel.org \
    --to=tipbot@zytor.com \
    --cc=akpm@linux-foundation.org \
    --cc=bvanassche@acm.org \
    --cc=hpa@zytor.com \
    --cc=johannes@sipsolutions.net \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mingo@kernel.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.