All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/3] lockdep: Allow checking a read-only lock
@ 2018-01-17 15:14 Matthew Wilcox
  2018-01-17 15:14 ` [PATCH 1/3] lockdep: Assign lock keys on registration Matthew Wilcox
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Matthew Wilcox @ 2018-01-17 15:14 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar
  Cc: David S. Miller, Matthew Wilcox, Thomas Gleixner, linux-kernel

From: Matthew Wilcox <mawilcox@microsoft.com>

I am not for one moment suggesting that the concept of a read-only lock
makes sense.  You can't sensibly put one in ROM or in read-only mappings.
What does make sense is some APIs want to specify a const pointer to
indicate that they do not modify the object being pointed to.  One example
we have of this today is in the networking stack; tcp_md5_do_lookup takes
a const struct sock * argument and wants to ensure that the caller either
took the socket lock or the rcu lock.

At the moment, tcp_md5_do_lookup() is actually lying to its callers;
lockdep_sock_is_held() casts away the constness of the pointer because
lockdep actually does modify the lock when checking whether it's held
(under rare and unnecessary conditions).

Fix this situation by (patch 1) only assigning a lock key on registration,
not on check, (patch 2) marking the pointers in the lockdep check path
as const and (patch 3) converting a few of the callers to themselves
be const, removing the nasty hack in lockdep_sock_is_held().

Matthew Wilcox (3):
  lockdep: Assign lock keys on registration
  lockdep: Make lockdep checking constant
  lockdep: Convert some users to const

 include/linux/backing-dev.h |  2 +-
 include/linux/lockdep.h     |  4 +-
 include/linux/srcu.h        |  4 +-
 include/net/sock.h          |  4 +-
 kernel/locking/lockdep.c    | 89 ++++++++++++++++++++++++---------------------
 5 files changed, 53 insertions(+), 50 deletions(-)

-- 
2.15.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/3] lockdep: Assign lock keys on registration
  2018-01-17 15:14 [PATCH 0/3] lockdep: Allow checking a read-only lock Matthew Wilcox
@ 2018-01-17 15:14 ` Matthew Wilcox
  2018-01-18 11:00   ` [tip:locking/core] " tip-bot for Matthew Wilcox
  2018-01-17 15:14 ` [PATCH 2/3] lockdep: Make lockdep checking constant Matthew Wilcox
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 8+ messages in thread
From: Matthew Wilcox @ 2018-01-17 15:14 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar
  Cc: David S. Miller, Matthew Wilcox, Thomas Gleixner, linux-kernel

From: Matthew Wilcox <mawilcox@microsoft.com>

Lockdep was assigning lock keys when a lock was looked up.  This is
unnecessary; if the lock has never been registered then we know it is
not locked.  It also complicates the calling convention.  Switch to
assigning the lock key in register_lock_class().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 kernel/locking/lockdep.c | 76 +++++++++++++++++++++++++-----------------------
 1 file changed, 40 insertions(+), 36 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5fa1324a4f29..472547dd45c3 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -647,18 +647,12 @@ static int count_matching_names(struct lock_class *new_class)
 	return count + 1;
 }
 
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
 static inline struct lock_class *
 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 {
 	struct lockdep_subclass_key *key;
 	struct hlist_head *hash_head;
 	struct lock_class *class;
-	bool is_static = false;
 
 	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
 		debug_locks_off();
@@ -671,24 +665,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 	}
 
 	/*
-	 * Static locks do not have their class-keys yet - for them the key
-	 * is the lock object itself. If the lock is in the per cpu area,
-	 * the canonical address of the lock (per cpu offset removed) is
-	 * used.
+	 * If it is not initialised then it has never been locked,
+	 * so it won't be present in the hash table.
 	 */
-	if (unlikely(!lock->key)) {
-		unsigned long can_addr, addr = (unsigned long)lock;
-
-		if (__is_kernel_percpu_address(addr, &can_addr))
-			lock->key = (void *)can_addr;
-		else if (__is_module_percpu_address(addr, &can_addr))
-			lock->key = (void *)can_addr;
-		else if (static_obj(lock))
-			lock->key = (void *)lock;
-		else
-			return ERR_PTR(-EINVAL);
-		is_static = true;
-	}
+	if (unlikely(!lock->key))
+		return NULL;
 
 	/*
 	 * NOTE: the class-key must be unique. For dynamic locks, a static
@@ -720,7 +701,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 		}
 	}
 
-	return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
+	return NULL;
+}
+
+/*
+ * Static locks do not have their class-keys yet - for them the key is
+ * the lock object itself. If the lock is in the per cpu area, the
+ * canonical address of the lock (per cpu offset removed) is used.
+ */
+static bool assign_lock_key(struct lockdep_map *lock)
+{
+	unsigned long can_addr, addr = (unsigned long)lock;
+
+	if (__is_kernel_percpu_address(addr, &can_addr))
+		lock->key = (void *)can_addr;
+	else if (__is_module_percpu_address(addr, &can_addr))
+		lock->key = (void *)can_addr;
+	else if (static_obj(lock))
+		lock->key = (void *)lock;
+	else {
+		/* Debug-check: all keys must be persistent! */
+		debug_locks_off();
+		pr_err("INFO: trying to register non-static key.\n");
+		pr_err("the code is fine but needs lockdep annotation.\n");
+		pr_err("turning off the locking correctness validator.\n");
+		dump_stack();
+		return false;
+	}
+
+	return true;
 }
 
 /*
@@ -738,18 +747,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
 	class = look_up_lock_class(lock, subclass);
-	if (likely(!IS_ERR_OR_NULL(class)))
+	if (likely(class))
 		goto out_set_class_cache;
 
-	/*
-	 * Debug-check: all keys must be persistent!
-	 */
-	if (IS_ERR(class)) {
-		debug_locks_off();
-		printk("INFO: trying to register non-static key.\n");
-		printk("the code is fine but needs lockdep annotation.\n");
-		printk("turning off the locking correctness validator.\n");
-		dump_stack();
+	if (!lock->key) {
+		if (!assign_lock_key(lock))
+			return NULL;
+	} else if (!static_obj(lock->key)) {
 		return NULL;
 	}
 
@@ -3498,7 +3502,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
 		 * Clearly if the lock hasn't been acquired _ever_, we're not
 		 * holding it either, so report failure.
 		 */
-		if (IS_ERR_OR_NULL(class))
+		if (!class)
 			return 0;
 
 		/*
@@ -4294,7 +4298,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
 		 * If the class exists we look it up and zap it:
 		 */
 		class = look_up_lock_class(lock, j);
-		if (!IS_ERR_OR_NULL(class))
+		if (class)
 			zap_class(class);
 	}
 	/*
-- 
2.15.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 2/3] lockdep: Make lockdep checking constant
  2018-01-17 15:14 [PATCH 0/3] lockdep: Allow checking a read-only lock Matthew Wilcox
  2018-01-17 15:14 ` [PATCH 1/3] lockdep: Assign lock keys on registration Matthew Wilcox
@ 2018-01-17 15:14 ` Matthew Wilcox
  2018-01-18 11:01   ` [tip:locking/core] " tip-bot for Matthew Wilcox
  2018-01-17 15:14 ` [PATCH 3/3] lockdep: Convert some users to const Matthew Wilcox
  2018-01-17 16:33 ` [PATCH 0/3] lockdep: Allow checking a read-only lock Peter Zijlstra
  3 siblings, 1 reply; 8+ messages in thread
From: Matthew Wilcox @ 2018-01-17 15:14 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar
  Cc: David S. Miller, Matthew Wilcox, Thomas Gleixner, linux-kernel

From: Matthew Wilcox <mawilcox@microsoft.com>

There are several places in the kernel which would like to pass a const
pointer to lockdep_is_held().  Constify the entire path so nobody has to
trick the compiler.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/lockdep.h  |  4 ++--
 kernel/locking/lockdep.c | 13 +++++++------
 2 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 3251d9c0d313..864d6fc60fa6 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
 /*
  * Same "read" as for lock_acquire(), except -1 means any.
  */
-extern int lock_is_held_type(struct lockdep_map *lock, int read);
+extern int lock_is_held_type(const struct lockdep_map *lock, int read);
 
-static inline int lock_is_held(struct lockdep_map *lock)
+static inline int lock_is_held(const struct lockdep_map *lock)
 {
 	return lock_is_held_type(lock, -1);
 }
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 472547dd45c3..b7a307b53704 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -648,7 +648,7 @@ static int count_matching_names(struct lock_class *new_class)
 }
 
 static inline struct lock_class *
-look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
 {
 	struct lockdep_subclass_key *key;
 	struct hlist_head *hash_head;
@@ -3276,7 +3276,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
 	return 0;
 }
 
-static int __lock_is_held(struct lockdep_map *lock, int read);
+static int __lock_is_held(const struct lockdep_map *lock, int read);
 
 /*
  * This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3485,13 +3485,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
 	return 0;
 }
 
-static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+static int match_held_lock(const struct held_lock *hlock,
+					const struct lockdep_map *lock)
 {
 	if (hlock->instance == lock)
 		return 1;
 
 	if (hlock->references) {
-		struct lock_class *class = lock->class_cache[0];
+		const struct lock_class *class = lock->class_cache[0];
 
 		if (!class)
 			class = look_up_lock_class(lock, 0);
@@ -3727,7 +3728,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 	return 1;
 }
 
-static int __lock_is_held(struct lockdep_map *lock, int read)
+static int __lock_is_held(const struct lockdep_map *lock, int read)
 {
 	struct task_struct *curr = current;
 	int i;
@@ -3941,7 +3942,7 @@ void lock_release(struct lockdep_map *lock, int nested,
 }
 EXPORT_SYMBOL_GPL(lock_release);
 
-int lock_is_held_type(struct lockdep_map *lock, int read)
+int lock_is_held_type(const struct lockdep_map *lock, int read)
 {
 	unsigned long flags;
 	int ret = 0;
-- 
2.15.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 3/3] lockdep: Convert some users to const
  2018-01-17 15:14 [PATCH 0/3] lockdep: Allow checking a read-only lock Matthew Wilcox
  2018-01-17 15:14 ` [PATCH 1/3] lockdep: Assign lock keys on registration Matthew Wilcox
  2018-01-17 15:14 ` [PATCH 2/3] lockdep: Make lockdep checking constant Matthew Wilcox
@ 2018-01-17 15:14 ` Matthew Wilcox
  2018-01-18 11:01   ` [tip:locking/core] " tip-bot for Matthew Wilcox
  2018-01-17 16:33 ` [PATCH 0/3] lockdep: Allow checking a read-only lock Peter Zijlstra
  3 siblings, 1 reply; 8+ messages in thread
From: Matthew Wilcox @ 2018-01-17 15:14 UTC (permalink / raw)
  To: Peter Zijlstra, Ingo Molnar
  Cc: David S. Miller, Matthew Wilcox, Thomas Gleixner, linux-kernel

From: Matthew Wilcox <mawilcox@microsoft.com>

These users of lockdep_is_held() either wanted lockdep_is_held to
take a const pointer, or would benefit from providing a const pointer.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
 include/linux/backing-dev.h | 2 +-
 include/linux/srcu.h        | 4 ++--
 include/net/sock.h          | 4 +---
 3 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e54e7e0033eb..3e4ce54d84ab 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
  * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
  * associated wb's list_lock.
  */
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
 {
 #ifdef CONFIG_LOCKDEP
 	WARN_ON_ONCE(debug_locks &&
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 62be8966e837..33c1c698df09 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp);
  * relies on normal RCU, it can be called from the CPU which
  * is in the idle loop from an RCU point of view or offline.
  */
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
 {
 	if (!debug_lockdep_rcu_enabled())
 		return 1;
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
 
 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
 {
 	return 1;
 }
diff --git a/include/net/sock.h b/include/net/sock.h
index 7a7b14e9628a..c4a424fe6fdd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1445,10 +1445,8 @@ do {									\
 } while (0)
 
 #ifdef CONFIG_LOCKDEP
-static inline bool lockdep_sock_is_held(const struct sock *csk)
+static inline bool lockdep_sock_is_held(const struct sock *sk)
 {
-	struct sock *sk = (struct sock *)csk;
-
 	return lockdep_is_held(&sk->sk_lock) ||
 	       lockdep_is_held(&sk->sk_lock.slock);
 }
-- 
2.15.1

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 0/3] lockdep: Allow checking a read-only lock
  2018-01-17 15:14 [PATCH 0/3] lockdep: Allow checking a read-only lock Matthew Wilcox
                   ` (2 preceding siblings ...)
  2018-01-17 15:14 ` [PATCH 3/3] lockdep: Convert some users to const Matthew Wilcox
@ 2018-01-17 16:33 ` Peter Zijlstra
  3 siblings, 0 replies; 8+ messages in thread
From: Peter Zijlstra @ 2018-01-17 16:33 UTC (permalink / raw)
  To: Matthew Wilcox
  Cc: Ingo Molnar, David S. Miller, Matthew Wilcox, Thomas Gleixner,
	linux-kernel

On Wed, Jan 17, 2018 at 07:14:11AM -0800, Matthew Wilcox wrote:
> From: Matthew Wilcox <mawilcox@microsoft.com>
> 
> I am not for one moment suggesting that the concept of a read-only lock
> makes sense.  You can't sensibly put one in ROM or in read-only mappings.
> What does make sense is some APIs want to specify a const pointer to
> indicate that they do not modify the object being pointed to.  One example
> we have of this today is in the networking stack; tcp_md5_do_lookup takes
> a const struct sock * argument and wants to ensure that the caller either
> took the socket lock or the rcu lock.
> 
> At the moment, tcp_md5_do_lookup() is actually lying to its callers;
> lockdep_sock_is_held() casts away the constness of the pointer because
> lockdep actually does modify the lock when checking whether it's held
> (under rare and unnecessary conditions).
> 
> Fix this situation by (patch 1) only assigning a lock key on registration,
> not on check, (patch 2) marking the pointers in the lockdep check path
> as const and (patch 3) converting a few of the callers to themselves
> be const, removing the nasty hack in lockdep_sock_is_held().
> 

Seems OK.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>

Ingo can you make that happen?

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [tip:locking/core] lockdep: Assign lock keys on registration
  2018-01-17 15:14 ` [PATCH 1/3] lockdep: Assign lock keys on registration Matthew Wilcox
@ 2018-01-18 11:00   ` tip-bot for Matthew Wilcox
  0 siblings, 0 replies; 8+ messages in thread
From: tip-bot for Matthew Wilcox @ 2018-01-18 11:00 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: peterz, davem, hpa, tglx, mawilcox, mingo, linux-kernel

Commit-ID:  64f29d1bc9fb8196df3d0f1df694245230e208c0
Gitweb:     https://git.kernel.org/tip/64f29d1bc9fb8196df3d0f1df694245230e208c0
Author:     Matthew Wilcox <mawilcox@microsoft.com>
AuthorDate: Wed, 17 Jan 2018 07:14:12 -0800
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 18 Jan 2018 11:56:48 +0100

lockdep: Assign lock keys on registration

Lockdep is assigning lock keys when a lock was looked up.  This is
unnecessary; if the lock has never been registered then it is known that it
is not locked.  It also complicates the calling convention.  Switch to
assigning the lock key in register_lock_class().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: "David S. Miller" <davem@davemloft.net>
Link: https://lkml.kernel.org/r/20180117151414.23686-2-willy@infradead.org

---
 kernel/locking/lockdep.c | 76 +++++++++++++++++++++++++-----------------------
 1 file changed, 40 insertions(+), 36 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5fa1324..472547d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -647,18 +647,12 @@ static int count_matching_names(struct lock_class *new_class)
 	return count + 1;
 }
 
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
 static inline struct lock_class *
 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 {
 	struct lockdep_subclass_key *key;
 	struct hlist_head *hash_head;
 	struct lock_class *class;
-	bool is_static = false;
 
 	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
 		debug_locks_off();
@@ -671,24 +665,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 	}
 
 	/*
-	 * Static locks do not have their class-keys yet - for them the key
-	 * is the lock object itself. If the lock is in the per cpu area,
-	 * the canonical address of the lock (per cpu offset removed) is
-	 * used.
+	 * If it is not initialised then it has never been locked,
+	 * so it won't be present in the hash table.
 	 */
-	if (unlikely(!lock->key)) {
-		unsigned long can_addr, addr = (unsigned long)lock;
-
-		if (__is_kernel_percpu_address(addr, &can_addr))
-			lock->key = (void *)can_addr;
-		else if (__is_module_percpu_address(addr, &can_addr))
-			lock->key = (void *)can_addr;
-		else if (static_obj(lock))
-			lock->key = (void *)lock;
-		else
-			return ERR_PTR(-EINVAL);
-		is_static = true;
-	}
+	if (unlikely(!lock->key))
+		return NULL;
 
 	/*
 	 * NOTE: the class-key must be unique. For dynamic locks, a static
@@ -720,7 +701,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 		}
 	}
 
-	return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
+	return NULL;
+}
+
+/*
+ * Static locks do not have their class-keys yet - for them the key is
+ * the lock object itself. If the lock is in the per cpu area, the
+ * canonical address of the lock (per cpu offset removed) is used.
+ */
+static bool assign_lock_key(struct lockdep_map *lock)
+{
+	unsigned long can_addr, addr = (unsigned long)lock;
+
+	if (__is_kernel_percpu_address(addr, &can_addr))
+		lock->key = (void *)can_addr;
+	else if (__is_module_percpu_address(addr, &can_addr))
+		lock->key = (void *)can_addr;
+	else if (static_obj(lock))
+		lock->key = (void *)lock;
+	else {
+		/* Debug-check: all keys must be persistent! */
+		debug_locks_off();
+		pr_err("INFO: trying to register non-static key.\n");
+		pr_err("the code is fine but needs lockdep annotation.\n");
+		pr_err("turning off the locking correctness validator.\n");
+		dump_stack();
+		return false;
+	}
+
+	return true;
 }
 
 /*
@@ -738,18 +747,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
 	class = look_up_lock_class(lock, subclass);
-	if (likely(!IS_ERR_OR_NULL(class)))
+	if (likely(class))
 		goto out_set_class_cache;
 
-	/*
-	 * Debug-check: all keys must be persistent!
-	 */
-	if (IS_ERR(class)) {
-		debug_locks_off();
-		printk("INFO: trying to register non-static key.\n");
-		printk("the code is fine but needs lockdep annotation.\n");
-		printk("turning off the locking correctness validator.\n");
-		dump_stack();
+	if (!lock->key) {
+		if (!assign_lock_key(lock))
+			return NULL;
+	} else if (!static_obj(lock->key)) {
 		return NULL;
 	}
 
@@ -3498,7 +3502,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
 		 * Clearly if the lock hasn't been acquired _ever_, we're not
 		 * holding it either, so report failure.
 		 */
-		if (IS_ERR_OR_NULL(class))
+		if (!class)
 			return 0;
 
 		/*
@@ -4294,7 +4298,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
 		 * If the class exists we look it up and zap it:
 		 */
 		class = look_up_lock_class(lock, j);
-		if (!IS_ERR_OR_NULL(class))
+		if (class)
 			zap_class(class);
 	}
 	/*

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [tip:locking/core] lockdep: Make lockdep checking constant
  2018-01-17 15:14 ` [PATCH 2/3] lockdep: Make lockdep checking constant Matthew Wilcox
@ 2018-01-18 11:01   ` tip-bot for Matthew Wilcox
  0 siblings, 0 replies; 8+ messages in thread
From: tip-bot for Matthew Wilcox @ 2018-01-18 11:01 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: peterz, linux-kernel, hpa, tglx, mawilcox, davem, mingo

Commit-ID:  08f36ff642342fb058212099757cb5d40f158c2a
Gitweb:     https://git.kernel.org/tip/08f36ff642342fb058212099757cb5d40f158c2a
Author:     Matthew Wilcox <mawilcox@microsoft.com>
AuthorDate: Wed, 17 Jan 2018 07:14:13 -0800
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 18 Jan 2018 11:56:48 +0100

lockdep: Make lockdep checking constant

There are several places in the kernel which would like to pass a const
pointer to lockdep_is_held().  Constify the entire path so nobody has to
trick the compiler.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: "David S. Miller" <davem@davemloft.net>
Link: https://lkml.kernel.org/r/20180117151414.23686-3-willy@infradead.org

---
 include/linux/lockdep.h  |  4 ++--
 kernel/locking/lockdep.c | 13 +++++++------
 2 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 3251d9c..864d6fc 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
 /*
  * Same "read" as for lock_acquire(), except -1 means any.
  */
-extern int lock_is_held_type(struct lockdep_map *lock, int read);
+extern int lock_is_held_type(const struct lockdep_map *lock, int read);
 
-static inline int lock_is_held(struct lockdep_map *lock)
+static inline int lock_is_held(const struct lockdep_map *lock)
 {
 	return lock_is_held_type(lock, -1);
 }
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 472547d..b7a307b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -648,7 +648,7 @@ static int count_matching_names(struct lock_class *new_class)
 }
 
 static inline struct lock_class *
-look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
 {
 	struct lockdep_subclass_key *key;
 	struct hlist_head *hash_head;
@@ -3276,7 +3276,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
 	return 0;
 }
 
-static int __lock_is_held(struct lockdep_map *lock, int read);
+static int __lock_is_held(const struct lockdep_map *lock, int read);
 
 /*
  * This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3485,13 +3485,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
 	return 0;
 }
 
-static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+static int match_held_lock(const struct held_lock *hlock,
+					const struct lockdep_map *lock)
 {
 	if (hlock->instance == lock)
 		return 1;
 
 	if (hlock->references) {
-		struct lock_class *class = lock->class_cache[0];
+		const struct lock_class *class = lock->class_cache[0];
 
 		if (!class)
 			class = look_up_lock_class(lock, 0);
@@ -3727,7 +3728,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 	return 1;
 }
 
-static int __lock_is_held(struct lockdep_map *lock, int read)
+static int __lock_is_held(const struct lockdep_map *lock, int read)
 {
 	struct task_struct *curr = current;
 	int i;
@@ -3941,7 +3942,7 @@ void lock_release(struct lockdep_map *lock, int nested,
 }
 EXPORT_SYMBOL_GPL(lock_release);
 
-int lock_is_held_type(struct lockdep_map *lock, int read)
+int lock_is_held_type(const struct lockdep_map *lock, int read)
 {
 	unsigned long flags;
 	int ret = 0;

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [tip:locking/core] lockdep: Convert some users to const
  2018-01-17 15:14 ` [PATCH 3/3] lockdep: Convert some users to const Matthew Wilcox
@ 2018-01-18 11:01   ` tip-bot for Matthew Wilcox
  0 siblings, 0 replies; 8+ messages in thread
From: tip-bot for Matthew Wilcox @ 2018-01-18 11:01 UTC (permalink / raw)
  To: linux-tip-commits; +Cc: peterz, mingo, mawilcox, davem, linux-kernel, tglx, hpa

Commit-ID:  05b93801a23c21a6f355f4c492c51715d6ccc96d
Gitweb:     https://git.kernel.org/tip/05b93801a23c21a6f355f4c492c51715d6ccc96d
Author:     Matthew Wilcox <mawilcox@microsoft.com>
AuthorDate: Wed, 17 Jan 2018 07:14:14 -0800
Committer:  Thomas Gleixner <tglx@linutronix.de>
CommitDate: Thu, 18 Jan 2018 11:56:49 +0100

lockdep: Convert some users to const

These users of lockdep_is_held() either wanted lockdep_is_held to
take a const pointer, or would benefit from providing a const pointer.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: "David S. Miller" <davem@davemloft.net>
Link: https://lkml.kernel.org/r/20180117151414.23686-4-willy@infradead.org

---
 include/linux/backing-dev.h | 2 +-
 include/linux/srcu.h        | 4 ++--
 include/net/sock.h          | 4 +---
 3 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e54e7e0..3e4ce54 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
  * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
  * associated wb's list_lock.
  */
-static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
+static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
 {
 #ifdef CONFIG_LOCKDEP
 	WARN_ON_ONCE(debug_locks &&
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 62be896..33c1c69 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp);
  * relies on normal RCU, it can be called from the CPU which
  * is in the idle loop from an RCU point of view or offline.
  */
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
 {
 	if (!debug_lockdep_rcu_enabled())
 		return 1;
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)
 
 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
-static inline int srcu_read_lock_held(struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *sp)
 {
 	return 1;
 }
diff --git a/include/net/sock.h b/include/net/sock.h
index 7a7b14e..c4a424f 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1445,10 +1445,8 @@ do {									\
 } while (0)
 
 #ifdef CONFIG_LOCKDEP
-static inline bool lockdep_sock_is_held(const struct sock *csk)
+static inline bool lockdep_sock_is_held(const struct sock *sk)
 {
-	struct sock *sk = (struct sock *)csk;
-
 	return lockdep_is_held(&sk->sk_lock) ||
 	       lockdep_is_held(&sk->sk_lock.slock);
 }

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2018-01-18 11:04 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-17 15:14 [PATCH 0/3] lockdep: Allow checking a read-only lock Matthew Wilcox
2018-01-17 15:14 ` [PATCH 1/3] lockdep: Assign lock keys on registration Matthew Wilcox
2018-01-18 11:00   ` [tip:locking/core] " tip-bot for Matthew Wilcox
2018-01-17 15:14 ` [PATCH 2/3] lockdep: Make lockdep checking constant Matthew Wilcox
2018-01-18 11:01   ` [tip:locking/core] " tip-bot for Matthew Wilcox
2018-01-17 15:14 ` [PATCH 3/3] lockdep: Convert some users to const Matthew Wilcox
2018-01-18 11:01   ` [tip:locking/core] " tip-bot for Matthew Wilcox
2018-01-17 16:33 ` [PATCH 0/3] lockdep: Allow checking a read-only lock Peter Zijlstra

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.