linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Frederic Weisbecker <frederic@kernel.org>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Frederic Weisbecker <frederic@kernel.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Peter Zijlstra <peterz@infradead.org>,
	"David S . Miller" <davem@davemloft.net>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Mauro Carvalho Chehab <mchehab+samsung@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	"Paul E . McKenney" <paulmck@linux.vnet.ibm.com>,
	Frederic Weisbecker <fweisbec@gmail.com>,
	Pavan Kondeti <pkondeti@codeaurora.org>,
	Ingo Molnar <mingo@kernel.org>,
	Joel Fernandes <joel@joelfernandes.org>
Subject: [PATCH 36/37] locking: Introduce spin_[un]lock_bh_mask()
Date: Thu, 28 Feb 2019 18:12:41 +0100	[thread overview]
Message-ID: <20190228171242.32144-37-frederic@kernel.org> (raw)
In-Reply-To: <20190228171242.32144-1-frederic@kernel.org>

It allows us to extend the coverage of vector finegrained masking
throughout softirq safe locking. This is especially interesting with
networking that makes extensive use of it.

It works the same way as local_bh_disable_mask():

    bh = spin_lock_bh_mask(lock, BIT(NET_RX_SOFTIRQ));
    [...]
    spin_unlock_bh_mask(lock, bh);

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Pavan Kondeti <pkondeti@codeaurora.org>
Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
Cc: David S . Miller <davem@davemloft.net>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
---
 include/linux/spinlock.h         | 14 ++++++++++++++
 include/linux/spinlock_api_smp.h | 28 ++++++++++++++++++++++++++++
 include/linux/spinlock_api_up.h  | 13 +++++++++++++
 kernel/locking/spinlock.c        | 19 +++++++++++++++++++
 4 files changed, 74 insertions(+)

diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e089157dcf97..57dd73ed202d 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -270,6 +270,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 
 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
+#define raw_spin_lock_bh_mask(lock, mask)	_raw_spin_lock_bh_mask(lock, mask)
 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
 
@@ -279,6 +280,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 		_raw_spin_unlock_irqrestore(lock, flags);	\
 	} while (0)
 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
+#define raw_spin_unlock_bh_mask(lock, mask)	_raw_spin_unlock_bh_mask(lock, mask)
 
 #define raw_spin_trylock_bh(lock) \
 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
@@ -334,6 +336,12 @@ static __always_inline void spin_lock_bh(spinlock_t *lock)
 	raw_spin_lock_bh(&lock->rlock);
 }
 
+static __always_inline unsigned int spin_lock_bh_mask(spinlock_t *lock,
+						      unsigned int mask)
+{
+	return raw_spin_lock_bh_mask(&lock->rlock, mask);
+}
+
 static __always_inline int spin_trylock(spinlock_t *lock)
 {
 	return raw_spin_trylock(&lock->rlock);
@@ -374,6 +382,12 @@ static __always_inline void spin_unlock_bh(spinlock_t *lock)
 	raw_spin_unlock_bh(&lock->rlock);
 }
 
+static __always_inline void spin_unlock_bh_mask(spinlock_t *lock,
+						unsigned int mask)
+{
+	raw_spin_unlock_bh_mask(&lock->rlock, mask);
+}
+
 static __always_inline void spin_unlock_irq(spinlock_t *lock)
 {
 	raw_spin_unlock_irq(&lock->rlock);
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 42dfab89e740..473641abbc5c 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -26,6 +26,8 @@ void __lockfunc
 _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
 								__acquires(lock);
 void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)		__acquires(lock);
+unsigned int __lockfunc
+_raw_spin_lock_bh_mask(raw_spinlock_t *lock, unsigned int mask)	__acquires(lock);
 void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
 								__acquires(lock);
 
@@ -38,6 +40,9 @@ int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
 int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)		__releases(lock);
 void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)	__releases(lock);
+void __lockfunc
+_raw_spin_unlock_bh_mask(raw_spinlock_t *lock, unsigned int mask)
+								__releases(lock);
 void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)	__releases(lock);
 void __lockfunc
 _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
@@ -49,6 +54,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
 
 #ifdef CONFIG_INLINE_SPIN_LOCK_BH
 #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
+#define _raw_spin_lock_bh_mask(lock, mask) __raw_spin_lock_bh_mask(lock, mask)
 #endif
 
 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
@@ -73,6 +79,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
 
 #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
 #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
+#define _raw_spin_unlock_bh_mask(lock, mask) __raw_spin_unlock_bh_mask(lock, mask)
 #endif
 
 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
@@ -136,6 +143,19 @@ static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
 	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 }
 
+static inline unsigned int __raw_spin_lock_bh_mask(raw_spinlock_t *lock,
+						   unsigned int mask)
+{
+	unsigned int old_mask;
+
+	old_mask = local_bh_disable_mask(_RET_IP_, SOFTIRQ_LOCK_OFFSET, mask);
+	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+	LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
+
+	return old_mask;
+}
+
+
 static inline void __raw_spin_lock(raw_spinlock_t *lock)
 {
 	preempt_disable();
@@ -176,6 +196,14 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
 	__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 }
 
+static inline void __raw_spin_unlock_bh_mask(raw_spinlock_t *lock,
+					     unsigned int mask)
+{
+	spin_release(&lock->dep_map, 1, _RET_IP_);
+	do_raw_spin_unlock(lock);
+	local_bh_enable_mask(_RET_IP_, SOFTIRQ_LOCK_OFFSET, mask);
+}
+
 static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
 {
 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d0d188861ad6..b900dcf46b26 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -33,6 +33,13 @@
 #define __LOCK_BH(lock) \
   do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
 
+#define __LOCK_BH_MASK(lock, mask) ({							\
+	unsigned int ____old_mask;							\
+	____old_mask = local_bh_disable_mask(_THIS_IP_, SOFTIRQ_LOCK_OFFSET, mask);	\
+	___LOCK(lock);									\
+	____old_mask;									\
+})
+
 #define __LOCK_IRQ(lock) \
   do { local_irq_disable(); __LOCK(lock); } while (0)
 
@@ -49,6 +56,10 @@
   do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
        ___UNLOCK(lock); } while (0)
 
+#define __UNLOCK_BH_MASK(lock, mask)				   \
+  do { local_bh_enable_mask(_THIS_IP_, SOFTIRQ_LOCK_OFFSET, mask); \
+       ___UNLOCK(lock); } while (0)
+
 #define __UNLOCK_IRQ(lock) \
   do { local_irq_enable(); __UNLOCK(lock); } while (0)
 
@@ -60,6 +71,7 @@
 #define _raw_read_lock(lock)			__LOCK(lock)
 #define _raw_write_lock(lock)			__LOCK(lock)
 #define _raw_spin_lock_bh(lock)			__LOCK_BH(lock)
+#define _raw_spin_lock_bh_mask(lock, mask)	__LOCK_BH_MASK(lock, mask)
 #define _raw_read_lock_bh(lock)			__LOCK_BH(lock)
 #define _raw_write_lock_bh(lock)		__LOCK_BH(lock)
 #define _raw_spin_lock_irq(lock)		__LOCK_IRQ(lock)
@@ -76,6 +88,7 @@
 #define _raw_read_unlock(lock)			__UNLOCK(lock)
 #define _raw_write_unlock(lock)			__UNLOCK(lock)
 #define _raw_spin_unlock_bh(lock)		__UNLOCK_BH(lock)
+#define _raw_spin_unlock_bh_mask(lock, mask)	__UNLOCK_BH_MASK(lock, mask)
 #define _raw_write_unlock_bh(lock)		__UNLOCK_BH(lock)
 #define _raw_read_unlock_bh(lock)		__UNLOCK_BH(lock)
 #define _raw_spin_unlock_irq(lock)		__UNLOCK_IRQ(lock)
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index 936f3d14dd6b..4245cb3cda5a 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -170,6 +170,16 @@ void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
 EXPORT_SYMBOL(_raw_spin_lock_bh);
 #endif
 
+#ifndef CONFIG_INLINE_SPIN_LOCK_BH
+unsigned int __lockfunc _raw_spin_lock_bh_mask(raw_spinlock_t *lock,
+					       unsigned int mask)
+{
+	return __raw_spin_lock_bh_mask(lock, mask);
+}
+EXPORT_SYMBOL(_raw_spin_lock_bh_mask);
+#endif
+
+
 #ifdef CONFIG_UNINLINE_SPIN_UNLOCK
 void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
 {
@@ -202,6 +212,15 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
 EXPORT_SYMBOL(_raw_spin_unlock_bh);
 #endif
 
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
+void __lockfunc _raw_spin_unlock_bh_mask(raw_spinlock_t *lock,
+					 unsigned int mask)
+{
+	__raw_spin_unlock_bh_mask(lock, mask);
+}
+EXPORT_SYMBOL(_raw_spin_unlock_bh_mask);
+#endif
+
 #ifndef CONFIG_INLINE_READ_TRYLOCK
 int __lockfunc _raw_read_trylock(rwlock_t *lock)
 {
-- 
2.21.0


  parent reply	other threads:[~2019-02-28 17:15 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-28 17:12 [PATCH 00/37] softirq: Per vector masking v3 Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 01/37] locking/lockdep: Move valid_state() inside CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 02/37] locking/lockdep: Use expanded masks on find_usage_*() functions Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 03/37] locking/lockdep: Introduce struct lock_usage Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 04/37] locking/lockdep: Convert usage_mask to u64 Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 05/37] locking/lockdep: Introduce lock usage mask iterator Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 06/37] locking/lockdep: Test all incompatible scenario at once in check_irq_usage() Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 07/37] locking/lockdep: Prepare valid_state() to handle plain masks Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 08/37] locking/lockdep: Prepare check_usage_*() " Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 09/37] locking/lockdep: Prepare state_verbose() to handle all softirqs Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 10/37] locking/lockdep: Make mark_lock() fastpath to work with multiple usage at once Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 11/37] locking/lockdep: Save stack trace for each softirq vector involved Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 12/37] locking/lockdep: Report all usages on mark_lock() verbosity mode Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 13/37] softirq: Macrofy softirq vectors Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 14/37] locking/lockdep: Define per vector softirq lock usage states Frederic Weisbecker
2019-04-09 12:03   ` Peter Zijlstra
2019-02-28 17:12 ` [PATCH 15/37] softirq: Pass softirq vector number to lockdep on vector execution Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 16/37] x86: Revert "x86/irq: Demote irq_cpustat_t::__softirq_pending to u16" Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 17/37] arch/softirq: Rename softirq_pending fields to softirq_data Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 18/37] softirq: Normalize softirq_pending naming scheme Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 19/37] softirq: Convert softirq_pending_*() to set/clear mask scheme Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 20/37] softirq: Introduce disabled softirq vectors bits Frederic Weisbecker
2019-03-01 11:29   ` Sebastian Andrzej Siewior
2019-02-28 17:12 ` [PATCH 21/37] softirq: Rename _local_bh_enable() to local_bh_enable_no_softirq() Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 22/37] softirq: Move vectors bits to bottom_half.h Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 23/37] x86: Init softirq enabled field Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 24/37] parisc: " Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 25/37] powerpc: " Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 26/37] softirq: Init softirq enabled field for default irq_stat definition Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 27/37] softirq: Check enabled vectors before processing Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 28/37] softirq: Remove stale comment Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 29/37] softirq: Uninline !CONFIG_TRACE_IRQFLAGS __local_bh_disable_ip() Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 30/37] softirq: Prepare for mixing all/per-vector masking Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 31/37] softirq: Support per vector masking Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 32/37] locking/lockdep: Remove redundant softirqs on check Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 33/37] locking/lockdep: Update check_flags() according to new layout Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 34/37] locking/lockdep: Branch the new vec-finegrained softirq masking to lockdep Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 35/37] softirq: Allow to soft interrupt vector-specific masked contexts Frederic Weisbecker
2019-02-28 17:12 ` Frederic Weisbecker [this message]
2019-02-28 17:12 ` [PATCH 37/37] net: Make softirq vector masking finegrained on release_sock() Frederic Weisbecker
2019-02-28 17:33 ` [PATCH 00/37] softirq: Per vector masking v3 Linus Torvalds
2019-03-01  3:45   ` Frederic Weisbecker
2019-03-01 16:51     ` Linus Torvalds
2019-03-08 15:30       ` Frederic Weisbecker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190228171242.32144-37-frederic@kernel.org \
    --to=frederic@kernel.org \
    --cc=bigeasy@linutronix.de \
    --cc=davem@davemloft.net \
    --cc=fweisbec@gmail.com \
    --cc=joel@joelfernandes.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mchehab+samsung@kernel.org \
    --cc=mingo@kernel.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=pkondeti@codeaurora.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).