linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Frederic Weisbecker <frederic@kernel.org>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Frederic Weisbecker <frederic@kernel.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Peter Zijlstra <peterz@infradead.org>,
	Mauro Carvalho Chehab <mchehab@s-opensource.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	"David S . Miller" <davem@davemloft.net>,
	Thomas Gleixner <tglx@linutronix.de>,
	"Paul E . McKenney" <paulmck@linux.vnet.ibm.com>,
	Frederic Weisbecker <fweisbec@gmail.com>,
	Pavan Kondeti <pkondeti@codeaurora.org>,
	Ingo Molnar <mingo@kernel.org>,
	Joel Fernandes <joel@joelfernandes.org>
Subject: [PATCH 01/32] locking/lockdep: Use expanded masks on find_usage_*() functions
Date: Tue, 12 Feb 2019 18:13:52 +0100	[thread overview]
Message-ID: <20190212171423.8308-2-frederic@kernel.org> (raw)
In-Reply-To: <20190212171423.8308-1-frederic@kernel.org>

In order to perform softirq vector-finegrained locking validation we'll
need to be able to check multiple vector usages at once. Prepare the low
level usage mask check functions for that purpose.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Mauro Carvalho Chehab <mchehab@s-opensource.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Pavan Kondeti <pkondeti@codeaurora.org>
Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
Cc: David S . Miller <davem@davemloft.net>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
---
 kernel/locking/lockdep.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 608f74ed8bb9..6127cef4f8fb 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1335,9 +1335,9 @@ check_redundant(struct lock_list *root, struct lock_class *target,
  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
  */
 
-static inline int usage_match(struct lock_list *entry, void *bit)
+static inline int usage_match(struct lock_list *entry, void *mask)
 {
-	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+	return entry->class->usage_mask & *(u64 *)mask;
 }
 
 
@@ -1353,14 +1353,14 @@ static inline int usage_match(struct lock_list *entry, void *bit)
  * Return <0 on error.
  */
 static int
-find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_forwards(struct lock_list *root, u64 usage_mask,
 			struct lock_list **target_entry)
 {
 	int result;
 
 	debug_atomic_inc(nr_find_usage_forwards_checks);
 
-	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+	result = __bfs_forwards(root, &usage_mask, usage_match, target_entry);
 
 	return result;
 }
@@ -1376,14 +1376,14 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
  * Return <0 on error.
  */
 static int
-find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+find_usage_backwards(struct lock_list *root, u64 usage_mask,
 			struct lock_list **target_entry)
 {
 	int result;
 
 	debug_atomic_inc(nr_find_usage_backwards_checks);
 
-	result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
+	result = __bfs_backwards(root, &usage_mask, usage_match, target_entry);
 
 	return result;
 }
@@ -1588,7 +1588,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 	this.parent = NULL;
 
 	this.class = hlock_class(prev);
-	ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+	ret = find_usage_backwards(&this, BIT(bit_backwards), &target_entry);
 	if (ret < 0)
 		return print_bfs_bug(ret);
 	if (ret == 1)
@@ -1596,7 +1596,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
 
 	that.parent = NULL;
 	that.class = hlock_class(next);
-	ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
+	ret = find_usage_forwards(&that, BIT(bit_forwards), &target_entry1);
 	if (ret < 0)
 		return print_bfs_bug(ret);
 	if (ret == 1)
@@ -2553,7 +2553,7 @@ print_irq_inversion_bug(struct task_struct *curr,
  */
 static int
 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
-		     enum lock_usage_bit bit, const char *irqclass)
+		     u64 usage_mask, const char *irqclass)
 {
 	int ret;
 	struct lock_list root;
@@ -2561,7 +2561,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
 
 	root.parent = NULL;
 	root.class = hlock_class(this);
-	ret = find_usage_forwards(&root, bit, &target_entry);
+	ret = find_usage_forwards(&root, usage_mask, &target_entry);
 	if (ret < 0)
 		return print_bfs_bug(ret);
 	if (ret == 1)
@@ -2577,7 +2577,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
  */
 static int
 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
-		      enum lock_usage_bit bit, const char *irqclass)
+		      u64 usage_mask, const char *irqclass)
 {
 	int ret;
 	struct lock_list root;
@@ -2585,7 +2585,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
 
 	root.parent = NULL;
 	root.class = hlock_class(this);
-	ret = find_usage_backwards(&root, bit, &target_entry);
+	ret = find_usage_backwards(&root, usage_mask, &target_entry);
 	if (ret < 0)
 		return print_bfs_bug(ret);
 	if (ret == 1)
@@ -2644,7 +2644,7 @@ static inline int state_verbose(enum lock_usage_bit bit,
 }
 
 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
-			     enum lock_usage_bit bit, const char *name);
+			     u64 usage_mask, const char *name);
 
 static int
 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
@@ -2676,7 +2676,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
 	 * states.
 	 */
 	if ((!read || !dir || STRICT_READ_CHECKS) &&
-			!usage(curr, this, excl_bit, state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
+	    !usage(curr, this, BIT(excl_bit), state_name(new_bit & ~LOCK_USAGE_READ_MASK)))
 		return 0;
 
 	/*
@@ -2687,7 +2687,7 @@ mark_lock_irq(struct task_struct *curr, struct held_lock *this,
 			return 0;
 
 		if (STRICT_READ_CHECKS &&
-			!usage(curr, this, excl_bit + LOCK_USAGE_READ_MASK,
+		    !usage(curr, this, BIT(excl_bit + LOCK_USAGE_READ_MASK),
 				state_name(new_bit + LOCK_USAGE_READ_MASK)))
 			return 0;
 	}
-- 
2.17.1


  reply	other threads:[~2019-02-12 17:14 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-12 17:13 [PATCH 00/32] softirq: Per vector masking v2 Frederic Weisbecker
2019-02-12 17:13 ` Frederic Weisbecker [this message]
2019-02-12 17:35   ` [PATCH 01/32] locking/lockdep: Use expanded masks on find_usage_*() functions Linus Torvalds
2019-02-12 17:13 ` [PATCH 02/32] locking/lockdep: Introduce struct lock_usage Frederic Weisbecker
2019-02-12 17:38   ` Linus Torvalds
2019-02-13 14:56     ` Frederic Weisbecker
2019-02-12 17:13 ` [PATCH 03/32] locking/lockdep: Convert usage_mask to u64 Frederic Weisbecker
2019-02-12 17:40   ` Linus Torvalds
2019-02-13 14:51     ` Frederic Weisbecker
2019-02-12 17:13 ` [PATCH 04/32] locking/lockdep: Test all incompatible scenario at once in check_irq_usage() Frederic Weisbecker
2019-02-12 17:13 ` [PATCH 05/32] locking/lockdep: Prepare valid_state() to handle plain masks Frederic Weisbecker
2019-02-12 17:45   ` Linus Torvalds
2019-02-13 15:16     ` Frederic Weisbecker
2019-02-13 19:47       ` Linus Torvalds
2019-02-21  3:53         ` Frederic Weisbecker
2019-02-12 17:13 ` [PATCH 06/32] locking/lockdep: Prepare check_usage_*() " Frederic Weisbecker
2019-02-12 17:13 ` [PATCH 07/32] locking/lockdep: Prepare state_verbose() to handle all softirqs Frederic Weisbecker
2019-02-12 17:13 ` [PATCH 08/32] locking/lockdep: Make mark_lock() fastpath to work with multiple usage at once Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 09/32] locking/lockdep: Save stack trace for each softirq vector involved Frederic Weisbecker
2019-02-12 17:47   ` Linus Torvalds
2019-02-13 15:18     ` Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 10/32] locking/lockdep: Make mark_lock() verbosity aware of vector Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 11/32] softirq: Macrofy softirq vectors Frederic Weisbecker
2019-02-27  9:54   ` Sebastian Andrzej Siewior
2019-02-27 23:08     ` Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 12/32] locking/lockdep: Define per vector softirq lock usage states Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 13/32] softirq: Pass softirq vector number to lockdep on vector execution Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 14/32] x86: Revert "x86/irq: Demote irq_cpustat_t::__softirq_pending to u16" Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 15/32] arch/softirq: Rename softirq_pending fields to softirq_data Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 16/32] softirq: Normalize softirq_pending naming scheme Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 17/32] softirq: Convert softirq_pending_*() to set/clear mask scheme Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 18/32] softirq: Introduce disabled softirq vectors bits Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 19/32] softirq: Rename _local_bh_enable() to local_bh_enable_no_softirq() Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 20/32] softirq: Move vectors bits to bottom_half.h Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 21/32] x86: Init softirq enabled field Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 22/32] softirq: Check enabled vectors before processing Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 23/32] softirq: Remove stale comment Frederic Weisbecker
2019-02-27 11:04   ` Sebastian Andrzej Siewior
2019-02-27 23:09     ` Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 24/32] softirq: Uninline !CONFIG_TRACE_IRQFLAGS __local_bh_disable_ip() Frederic Weisbecker
2019-02-27 11:14   ` Sebastian Andrzej Siewior
2019-02-27 23:14     ` Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 25/32] softirq: Prepare for mixing all/per-vector masking Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 26/32] softirq: Support per vector masking Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 27/32] locking/lockdep: Remove redundant softirqs on check Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 28/32] locking/lockdep: Update check_flags() according to new layout Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 29/32] locking/lockdep: Branch the new vec-finegrained softirq masking to lockdep Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 30/32] softirq: Allow to soft interrupt vector-specific masked contexts Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 31/32] locking: Introduce spin_[un]lock_bh_mask() Frederic Weisbecker
2019-02-12 17:14 ` [PATCH 32/32] net: Make softirq vector masking finegrained on release_sock() Frederic Weisbecker
2019-02-12 18:29 ` [PATCH 00/32] softirq: Per vector masking v2 David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190212171423.8308-2-frederic@kernel.org \
    --to=frederic@kernel.org \
    --cc=bigeasy@linutronix.de \
    --cc=davem@davemloft.net \
    --cc=fweisbec@gmail.com \
    --cc=joel@joelfernandes.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mchehab@s-opensource.com \
    --cc=mingo@kernel.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=pkondeti@codeaurora.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).