linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Frederic Weisbecker <frederic@kernel.org>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Frederic Weisbecker <frederic@kernel.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Peter Zijlstra <peterz@infradead.org>,
	"David S . Miller" <davem@davemloft.net>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	Mauro Carvalho Chehab <mchehab+samsung@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	"Paul E . McKenney" <paulmck@linux.vnet.ibm.com>,
	Frederic Weisbecker <fweisbec@gmail.com>,
	Pavan Kondeti <pkondeti@codeaurora.org>,
	Ingo Molnar <mingo@kernel.org>,
	Joel Fernandes <joel@joelfernandes.org>
Subject: [PATCH 31/37] softirq: Support per vector masking
Date: Thu, 28 Feb 2019 18:12:36 +0100	[thread overview]
Message-ID: <20190228171242.32144-32-frederic@kernel.org> (raw)
In-Reply-To: <20190228171242.32144-1-frederic@kernel.org>

Provide the low level APIs to support per-vector masking. In order
to allow these to properly nest with itself and with full softirq
masking APIs, we provide two mechanisms:

1) Self nesting: use a caller stack saved/restored state model similar to
  that of local_irq_save() and local_irq_restore():

      bh = local_bh_disable_mask(BIT(NET_RX_SOFTIRQ));
      [...]
          bh2 = local_bh_disable_mask(BIT(TIMER_SOFTIRQ));
          [...]
          local_bh_enable_mask(bh2);
      local_bh_enable_mask(bh);

2) Nest against full masking: save the per-vector disabled state prior
   to the first full disable operation and restore it on the last full
   enable operation:

      bh = local_bh_disable_mask(BIT(NET_RX_SOFTIRQ));
      [...]
          local_bh_disable() <---- save state with NET_RX_SOFTIRQ disabled
          [...]
          local_bh_enable() <---- restore state with NET_RX_SOFTIRQ disabled
      local_bh_enable_mask(bh);

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Pavan Kondeti <pkondeti@codeaurora.org>
Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com>
Cc: David S . Miller <davem@davemloft.net>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
---
 include/linux/bottom_half.h |  7 +++
 kernel/softirq.c            | 85 +++++++++++++++++++++++++++++++------
 2 files changed, 80 insertions(+), 12 deletions(-)

diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index ef9e4c752f56..a6996e3f4526 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -35,6 +35,10 @@ static inline void local_bh_disable(void)
 	__local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
 }
 
+extern unsigned int local_bh_disable_mask(unsigned long ip,
+					  unsigned int cnt, unsigned int mask);
+
+
 extern void local_bh_enable_no_softirq(void);
 extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
 
@@ -48,4 +52,7 @@ static inline void local_bh_enable(void)
 	__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
 }
 
+extern void local_bh_enable_mask(unsigned long ip, unsigned int cnt,
+				 unsigned int mask);
+
 #endif /* _LINUX_BH_H */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2cddaaff3bfa..bb841e5d9951 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -61,6 +61,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
 
 struct softirq_nesting {
 	unsigned int disabled_all;
+	unsigned int enabled_vector;
 };
 
 static DEFINE_PER_CPU(struct softirq_nesting, softirq_nesting);
@@ -110,8 +111,10 @@ static bool ksoftirqd_running(unsigned long pending)
  * softirq and whether we just have bh disabled.
  */
 
-void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+static unsigned int local_bh_disable_common(unsigned long ip, unsigned int cnt,
+					    bool per_vec, unsigned int vec_mask)
 {
+	unsigned int enabled;
 #ifdef CONFIG_TRACE_IRQFLAGS
 	unsigned long flags;
 
@@ -127,10 +130,31 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 	 */
 	__preempt_count_add(cnt);
 
-	if (__this_cpu_inc_return(softirq_nesting.disabled_all) == 1) {
-		softirq_enabled_clear_mask(SOFTIRQ_ALL_MASK);
-		trace_softirqs_off(ip);
-	}
+	enabled = local_softirq_enabled();
+
+	/*
+	 * Handle nesting of full/per-vector masking. Per vector masking
+	 * takes effect only if full masking hasn't taken place yet.
+	 */
+	if (!__this_cpu_read(softirq_nesting.disabled_all)) {
+		if (enabled & vec_mask) {
+			softirq_enabled_clear_mask(vec_mask);
+			if (!local_softirq_enabled())
+				trace_softirqs_off(ip);
+		}
+
+		/*
+		 * Save the state prior to full masking. We'll restore it
+		 * on next non-nesting full unmasking in case some vectors
+		 * have been individually disabled before (case of full masking
+		 * nesting inside per-vector masked code).
+		 */
+		if (!per_vec)
+			__this_cpu_write(softirq_nesting.enabled_vector, enabled);
+        }
+
+	if (!per_vec)
+		__this_cpu_inc(softirq_nesting.disabled_all);
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 	raw_local_irq_restore(flags);
@@ -142,15 +166,38 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
 #endif
 		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
 	}
+
+	return enabled;
+}
+
+void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+{
+	local_bh_disable_common(ip, cnt, false, SOFTIRQ_ALL_MASK);
 }
 EXPORT_SYMBOL(__local_bh_disable_ip);
 
-static void local_bh_enable_common(unsigned long ip, unsigned int cnt)
+unsigned int local_bh_disable_mask(unsigned long ip, unsigned int cnt,
+				   unsigned int vec_mask)
 {
-	if (__this_cpu_dec_return(softirq_nesting.disabled_all))
-		return;
+	return local_bh_disable_common(ip, cnt, true, vec_mask);
+}
+EXPORT_SYMBOL(local_bh_disable_mask);
 
-	softirq_enabled_set(SOFTIRQ_ALL_MASK);
+static void local_bh_enable_common(unsigned long ip, unsigned int cnt,
+				   bool per_vec, unsigned int mask)
+{
+	/*
+	 * Restore the previous softirq mask state. If this was the last
+	 * full unmasking, restore what was saved.
+	 */
+	if (!per_vec) {
+		if (__this_cpu_dec_return(softirq_nesting.disabled_all))
+			return;
+		else
+			mask = __this_cpu_read(softirq_nesting.enabled_vector);
+	}
+
+	softirq_enabled_set(mask);
 	trace_softirqs_on(ip);
 }
 
@@ -161,7 +208,7 @@ static void __local_bh_enable_no_softirq(unsigned int cnt)
 	if (preempt_count() == cnt)
 		trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
 
-	local_bh_enable_common(_RET_IP_, cnt);
+	local_bh_enable_common(_RET_IP_, cnt, false, SOFTIRQ_ALL_MASK);
 
 	__preempt_count_sub(cnt);
 }
@@ -177,14 +224,15 @@ void local_bh_enable_no_softirq(void)
 }
 EXPORT_SYMBOL(local_bh_enable_no_softirq);
 
-void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+static void local_bh_enable_ip_mask(unsigned long ip, unsigned int cnt,
+				    bool per_vec, unsigned int mask)
 {
 	WARN_ON_ONCE(in_irq());
 	lockdep_assert_irqs_enabled();
 #ifdef CONFIG_TRACE_IRQFLAGS
 	local_irq_disable();
 #endif
-	local_bh_enable_common(ip, cnt);
+	local_bh_enable_common(ip, cnt, per_vec, mask);
 
 	/*
 	 * Keep preemption disabled until we are done with
@@ -206,8 +254,21 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
 #endif
 	preempt_check_resched();
 }
+
+void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+{
+	local_bh_enable_ip_mask(ip, cnt, false, SOFTIRQ_ALL_MASK);
+}
 EXPORT_SYMBOL(__local_bh_enable_ip);
 
+void local_bh_enable_mask(unsigned long ip, unsigned int cnt,
+			  unsigned int mask)
+{
+	local_bh_enable_ip_mask(ip, cnt, true, mask);
+}
+EXPORT_SYMBOL(local_bh_enable_mask);
+
+
 /*
  * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
  * but break the loop if need_resched() is set or after 2 ms.
-- 
2.21.0


  parent reply	other threads:[~2019-02-28 17:14 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-28 17:12 [PATCH 00/37] softirq: Per vector masking v3 Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 01/37] locking/lockdep: Move valid_state() inside CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 02/37] locking/lockdep: Use expanded masks on find_usage_*() functions Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 03/37] locking/lockdep: Introduce struct lock_usage Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 04/37] locking/lockdep: Convert usage_mask to u64 Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 05/37] locking/lockdep: Introduce lock usage mask iterator Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 06/37] locking/lockdep: Test all incompatible scenario at once in check_irq_usage() Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 07/37] locking/lockdep: Prepare valid_state() to handle plain masks Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 08/37] locking/lockdep: Prepare check_usage_*() " Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 09/37] locking/lockdep: Prepare state_verbose() to handle all softirqs Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 10/37] locking/lockdep: Make mark_lock() fastpath to work with multiple usage at once Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 11/37] locking/lockdep: Save stack trace for each softirq vector involved Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 12/37] locking/lockdep: Report all usages on mark_lock() verbosity mode Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 13/37] softirq: Macrofy softirq vectors Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 14/37] locking/lockdep: Define per vector softirq lock usage states Frederic Weisbecker
2019-04-09 12:03   ` Peter Zijlstra
2019-02-28 17:12 ` [PATCH 15/37] softirq: Pass softirq vector number to lockdep on vector execution Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 16/37] x86: Revert "x86/irq: Demote irq_cpustat_t::__softirq_pending to u16" Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 17/37] arch/softirq: Rename softirq_pending fields to softirq_data Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 18/37] softirq: Normalize softirq_pending naming scheme Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 19/37] softirq: Convert softirq_pending_*() to set/clear mask scheme Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 20/37] softirq: Introduce disabled softirq vectors bits Frederic Weisbecker
2019-03-01 11:29   ` Sebastian Andrzej Siewior
2019-02-28 17:12 ` [PATCH 21/37] softirq: Rename _local_bh_enable() to local_bh_enable_no_softirq() Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 22/37] softirq: Move vectors bits to bottom_half.h Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 23/37] x86: Init softirq enabled field Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 24/37] parisc: " Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 25/37] powerpc: " Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 26/37] softirq: Init softirq enabled field for default irq_stat definition Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 27/37] softirq: Check enabled vectors before processing Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 28/37] softirq: Remove stale comment Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 29/37] softirq: Uninline !CONFIG_TRACE_IRQFLAGS __local_bh_disable_ip() Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 30/37] softirq: Prepare for mixing all/per-vector masking Frederic Weisbecker
2019-02-28 17:12 ` Frederic Weisbecker [this message]
2019-02-28 17:12 ` [PATCH 32/37] locking/lockdep: Remove redundant softirqs on check Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 33/37] locking/lockdep: Update check_flags() according to new layout Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 34/37] locking/lockdep: Branch the new vec-finegrained softirq masking to lockdep Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 35/37] softirq: Allow to soft interrupt vector-specific masked contexts Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 36/37] locking: Introduce spin_[un]lock_bh_mask() Frederic Weisbecker
2019-02-28 17:12 ` [PATCH 37/37] net: Make softirq vector masking finegrained on release_sock() Frederic Weisbecker
2019-02-28 17:33 ` [PATCH 00/37] softirq: Per vector masking v3 Linus Torvalds
2019-03-01  3:45   ` Frederic Weisbecker
2019-03-01 16:51     ` Linus Torvalds
2019-03-08 15:30       ` Frederic Weisbecker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190228171242.32144-32-frederic@kernel.org \
    --to=frederic@kernel.org \
    --cc=bigeasy@linutronix.de \
    --cc=davem@davemloft.net \
    --cc=fweisbec@gmail.com \
    --cc=joel@joelfernandes.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mchehab+samsung@kernel.org \
    --cc=mingo@kernel.org \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=peterz@infradead.org \
    --cc=pkondeti@codeaurora.org \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).