linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Joel Fernandes <joel@joelfernandes.org>
To: Thomas Gleixner <tglx@linutronix.de>
Cc: LKML <linux-kernel@vger.kernel.org>,
	x86@kernel.org, "Paul E. McKenney" <paulmck@kernel.org>,
	Andy Lutomirski <luto@kernel.org>,
	Alexandre Chartre <alexandre.chartre@oracle.com>,
	Frederic Weisbecker <frederic@kernel.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Sean Christopherson <sean.j.christopherson@intel.com>,
	Masami Hiramatsu <mhiramat@kernel.org>,
	Petr Mladek <pmladek@suse.com>,
	Steven Rostedt <rostedt@goodmis.org>,
	Boris Ostrovsky <boris.ostrovsky@oracle.com>,
	Juergen Gross <jgross@suse.com>, Brian Gerst <brgerst@gmail.com>,
	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
	Josh Poimboeuf <jpoimboe@redhat.com>,
	Will Deacon <will@kernel.org>,
	vineethrp@gmail.com
Subject: Re: [patch V4 part 1 25/36] rcu/tree: Mark the idle relevant functions noinstr
Date: Tue, 19 May 2020 15:48:07 -0400	[thread overview]
Message-ID: <20200519194807.GA17484@google.com> (raw)
In-Reply-To: <20200505134100.575356107@linutronix.de>

Hi Thomas,

On Tue, May 05, 2020 at 03:16:27PM +0200, Thomas Gleixner wrote:
> These functions are invoked from context tracking and other places in the
> low level entry code. Move them into the .noinstr.text section to exclude
> them from instrumentation.
> 
> Mark the places which are safe to invoke traceable functions with
> instr_begin/end() so objtool won't complain.
> 
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> ---
>  kernel/rcu/tree.c        |   85 +++++++++++++++++++++++++----------------------
>  kernel/rcu/tree_plugin.h |    4 +-
>  kernel/rcu/update.c      |    7 +--
>  3 files changed, 52 insertions(+), 44 deletions(-)
[...]

Just a few nits/questions but otherwise LGTM.

> @@ -299,7 +294,7 @@ static void rcu_dynticks_eqs_online(void
>   *
>   * No ordering, as we are sampling CPU-local information.
>   */
> -static bool rcu_dynticks_curr_cpu_in_eqs(void)
> +static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
>  {
>  	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
>  
> @@ -566,7 +561,7 @@ EXPORT_SYMBOL_GPL(rcutorture_get_gp_data
>   * the possibility of usermode upcalls having messed up our count
>   * of interrupt nesting level during the prior busy period.
>   */
> -static void rcu_eqs_enter(bool user)
> +static noinstr void rcu_eqs_enter(bool user)
>  {
>  	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
>  
> @@ -581,12 +576,14 @@ static void rcu_eqs_enter(bool user)
>  	}
>  
>  	lockdep_assert_irqs_disabled();
> +	instr_begin();
>  	trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
>  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
>  	rdp = this_cpu_ptr(&rcu_data);
>  	do_nocb_deferred_wakeup(rdp);
>  	rcu_prepare_for_idle();
>  	rcu_preempt_deferred_qs(current);
> +	instr_end();
>  	WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
>  	// RCU is watching here ...
>  	rcu_dynticks_eqs_enter();
> @@ -623,7 +620,7 @@ void rcu_idle_enter(void)
>   * If you add or remove a call to rcu_user_enter(), be sure to test with
>   * CONFIG_RCU_EQS_DEBUG=y.
>   */
> -void rcu_user_enter(void)
> +noinstr void rcu_user_enter(void)
>  {
>  	lockdep_assert_irqs_disabled();
>  	rcu_eqs_enter(true);
> @@ -656,19 +653,23 @@ static __always_inline void rcu_nmi_exit
>  	 * leave it in non-RCU-idle state.
>  	 */
>  	if (rdp->dynticks_nmi_nesting != 1) {
> +		instr_begin();
>  		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
>  				  atomic_read(&rdp->dynticks));
>  		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
>  			   rdp->dynticks_nmi_nesting - 2);
> +		instr_end();

Can instr_end() be moved before the WRITE_ONCE() ?

> @@ -737,7 +738,7 @@ void rcu_irq_exit_irqson(void)
>   * allow for the possibility of usermode upcalls messing up our count of
>   * interrupt nesting level during the busy period that is just now starting.
>   */
> -static void rcu_eqs_exit(bool user)
> +static void noinstr rcu_eqs_exit(bool user)
>  {
>  	struct rcu_data *rdp;
>  	long oldval;
> @@ -755,12 +756,14 @@ static void rcu_eqs_exit(bool user)
>  	// RCU is not watching here ...
>  	rcu_dynticks_eqs_exit();
>  	// ... but is watching here.
> +	instr_begin();
>  	rcu_cleanup_after_idle();
>  	trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
>  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
>  	WRITE_ONCE(rdp->dynticks_nesting, 1);
>  	WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
>  	WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
> +	instr_end();

And here I think instr_end() can be moved after the trace_rcu_dyntick() call?

>  }
>  
>  /**
> @@ -791,7 +794,7 @@ void rcu_idle_exit(void)
>   * If you add or remove a call to rcu_user_exit(), be sure to test with
>   * CONFIG_RCU_EQS_DEBUG=y.
>   */
> -void rcu_user_exit(void)
> +void noinstr rcu_user_exit(void)
>  {
>  	rcu_eqs_exit(1);
>  }
> @@ -839,28 +842,35 @@ static __always_inline void rcu_nmi_ente
>  			rcu_cleanup_after_idle();
>  
>  		incby = 1;
> -	} else if (irq && tick_nohz_full_cpu(rdp->cpu) &&
> -		   rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
> -		   READ_ONCE(rdp->rcu_urgent_qs) &&
> -		   !READ_ONCE(rdp->rcu_forced_tick)) {
> -		// We get here only if we had already exited the extended
> -		// quiescent state and this was an interrupt (not an NMI).
> -		// Therefore, (1) RCU is already watching and (2) The fact
> -		// that we are in an interrupt handler and that the rcu_node
> -		// lock is an irq-disabled lock prevents self-deadlock.
> -		// So we can safely recheck under the lock.
> -		raw_spin_lock_rcu_node(rdp->mynode);
> -		if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
> -			// A nohz_full CPU is in the kernel and RCU
> -			// needs a quiescent state.  Turn on the tick!
> -			WRITE_ONCE(rdp->rcu_forced_tick, true);
> -			tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
> +	} else if (irq) {
> +		instr_begin();
> +		if (tick_nohz_full_cpu(rdp->cpu) &&

Not a huge fan of the extra indentation but don't see a better way :)

> +		    rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
> +		    READ_ONCE(rdp->rcu_urgent_qs) &&
> +		    !READ_ONCE(rdp->rcu_forced_tick)) {
> +			// We get here only if we had already exited the
> +			// extended quiescent state and this was an
> +			// interrupt (not an NMI).  Therefore, (1) RCU is
> +			// already watching and (2) The fact that we are in
> +			// an interrupt handler and that the rcu_node lock
> +			// is an irq-disabled lock prevents self-deadlock.
> +			// So we can safely recheck under the lock.
> +			raw_spin_lock_rcu_node(rdp->mynode);
> +			if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
> +				// A nohz_full CPU is in the kernel and RCU
> +				// needs a quiescent state.  Turn on the tick!
> +				WRITE_ONCE(rdp->rcu_forced_tick, true);
> +				tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
> +			}
> +			raw_spin_unlock_rcu_node(rdp->mynode);
>  		}
> -		raw_spin_unlock_rcu_node(rdp->mynode);
> +		instr_end();
>  	}
> +	instr_begin();
>  	trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
>  			  rdp->dynticks_nmi_nesting,
>  			  rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
> +	instr_end();
>  	WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
>  		   rdp->dynticks_nmi_nesting + incby);
>  	barrier();
> @@ -869,11 +879,10 @@ static __always_inline void rcu_nmi_ente
>  /**
>   * rcu_nmi_enter - inform RCU of entry to NMI context
>   */
> -void rcu_nmi_enter(void)
> +noinstr void rcu_nmi_enter(void)
>  {
>  	rcu_nmi_enter_common(false);
>  }
> -NOKPROBE_SYMBOL(rcu_nmi_enter);
>  
>  /**
>   * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
> @@ -897,7 +906,7 @@ NOKPROBE_SYMBOL(rcu_nmi_enter);
>   * If you add or remove a call to rcu_irq_enter(), be sure to test with
>   * CONFIG_RCU_EQS_DEBUG=y.
>   */
> -void rcu_irq_enter(void)
> +noinstr void rcu_irq_enter(void)

Just checking if rcu_irq_enter_irqson() needs noinstr too?

Would the noinstr checking be enforced by the kernel build as well or is the
developer required to run it themselves?

Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>

thanks,

 - Joel


>  {
>  	lockdep_assert_irqs_disabled();
>  	rcu_nmi_enter_common(true);
> @@ -942,7 +951,7 @@ static void rcu_disable_urgency_upon_qs(
>   * if the current CPU is not in its idle loop or is in an interrupt or
>   * NMI handler, return true.
>   */
> -bool notrace rcu_is_watching(void)
> +noinstr bool rcu_is_watching(void)
>  {
>  	bool ret;
>  
> @@ -986,7 +995,7 @@ void rcu_request_urgent_qs_task(struct t
>   * RCU on an offline processor during initial boot, hence the check for
>   * rcu_scheduler_fully_active.
>   */
> -bool rcu_lockdep_current_cpu_online(void)
> +noinstr bool rcu_lockdep_current_cpu_online(void)
>  {
>  	struct rcu_data *rdp;
>  	struct rcu_node *rnp;
> @@ -994,12 +1003,12 @@ bool rcu_lockdep_current_cpu_online(void
>  
>  	if (in_nmi() || !rcu_scheduler_fully_active)
>  		return true;
> -	preempt_disable();
> +	preempt_disable_notrace();
>  	rdp = this_cpu_ptr(&rcu_data);
>  	rnp = rdp->mynode;
>  	if (rdp->grpmask & rcu_rnp_online_cpus(rnp))
>  		ret = true;
> -	preempt_enable();
> +	preempt_enable_notrace();
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
> --- a/kernel/rcu/tree_plugin.h
> +++ b/kernel/rcu/tree_plugin.h
> @@ -2553,7 +2553,7 @@ static void rcu_bind_gp_kthread(void)
>  }
>  
>  /* Record the current task on dyntick-idle entry. */
> -static void rcu_dynticks_task_enter(void)
> +static void noinstr rcu_dynticks_task_enter(void)
>  {
>  #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
>  	WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
> @@ -2561,7 +2561,7 @@ static void rcu_dynticks_task_enter(void
>  }
>  
>  /* Record no current task on dyntick-idle exit. */
> -static void rcu_dynticks_task_exit(void)
> +static void noinstr rcu_dynticks_task_exit(void)
>  {
>  #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
>  	WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
> --- a/kernel/rcu/update.c
> +++ b/kernel/rcu/update.c
> @@ -95,7 +95,7 @@ module_param(rcu_normal_after_boot, int,
>   * Similarly, we avoid claiming an RCU read lock held if the current
>   * CPU is offline.
>   */
> -static bool rcu_read_lock_held_common(bool *ret)
> +static noinstr bool rcu_read_lock_held_common(bool *ret)
>  {
>  	if (!debug_lockdep_rcu_enabled()) {
>  		*ret = 1;
> @@ -112,7 +112,7 @@ static bool rcu_read_lock_held_common(bo
>  	return false;
>  }
>  
> -int rcu_read_lock_sched_held(void)
> +noinstr int rcu_read_lock_sched_held(void)
>  {
>  	bool ret;
>  
> @@ -270,13 +270,12 @@ struct lockdep_map rcu_callback_map =
>  	STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
>  EXPORT_SYMBOL_GPL(rcu_callback_map);
>  
> -int notrace debug_lockdep_rcu_enabled(void)
> +noinstr int notrace debug_lockdep_rcu_enabled(void)
>  {
>  	return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
>  	       current->lockdep_recursion == 0;
>  }
>  EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
> -NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
>  
>  /**
>   * rcu_read_lock_held() - might we be in RCU read-side critical section?
> 

  parent reply	other threads:[~2020-05-19 19:48 UTC|newest]

Thread overview: 178+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-05 13:16 [patch V4 part 1 00/36] x86/entry: Entry/exception code rework, preparatory patches Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 01/36] rcu: Add comments marking transitions between RCU watching and not Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 02/36] x86/hw_breakpoint: Prevent data breakpoints on cpu_entry_area Thomas Gleixner
2020-05-06  8:14   ` Borislav Petkov
2020-05-06 12:11   ` Alexandre Chartre
2020-05-09  9:00   ` Lai Jiangshan
2020-05-09  9:23   ` Lai Jiangshan
2020-05-09 19:08     ` Andy Lutomirski
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Andy Lutomirski
2020-05-05 13:16 ` [patch V4 part 1 03/36] sched: Clean up scheduler_ipi() Thomas Gleixner
2020-05-06  8:32   ` Thomas Gleixner
2020-05-06  8:40   ` Borislav Petkov
2020-05-06  9:12     ` Thomas Gleixner
2020-05-06 10:02       ` Borislav Petkov
2020-05-06 12:37   ` Alexandre Chartre
2020-05-06 15:03     ` Thomas Gleixner
2020-05-06 15:33     ` Peter Zijlstra
2020-05-06 18:28       ` Paul E. McKenney
2020-05-06 18:37         ` Peter Zijlstra
2020-05-06 18:46           ` Paul E. McKenney
2020-05-12 15:13   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra (Intel)
2020-05-05 13:16 ` [patch V4 part 1 04/36] sched: Make scheduler_ipi inline Thomas Gleixner
2020-05-06 12:42   ` Alexandre Chartre
2020-05-12 15:13   ` [tip: sched/core] " tip-bot2 for Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 05/36] x86/entry: Flip _TIF_SIGPENDING and _TIF_NOTIFY_RESUME handling Thomas Gleixner
2020-05-06 11:53   ` Miroslav Benes
2020-05-06 12:06     ` Thomas Gleixner
2020-05-06 15:35     ` Peter Zijlstra
2020-05-06 13:06   ` Alexandre Chartre
2020-05-06 16:26   ` Borislav Petkov
2020-05-07 17:35   ` Andy Lutomirski
2020-05-13 20:56   ` Mathieu Desnoyers
2020-05-13 21:10     ` Steven Rostedt
2020-05-13 22:48       ` Mathieu Desnoyers
2020-05-14  0:12       ` Thomas Gleixner
2020-05-14  0:37         ` Steven Rostedt
2020-05-14  0:49           ` Thomas Gleixner
2020-05-14  1:22         ` Andy Lutomirski
2020-05-14  2:51         ` Mathieu Desnoyers
2020-05-14  9:19           ` Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 06/36] compiler: Simple READ/WRITE_ONCE() implementations Thomas Gleixner
2020-05-06 13:11   ` Alexandre Chartre
2020-05-06 13:33   ` Will Deacon
2020-05-06 15:36     ` Peter Zijlstra
2020-05-06 16:33   ` Borislav Petkov
2020-05-05 13:16 ` [patch V4 part 1 07/36] locking/atomics: Flip fallbacks and instrumentation Thomas Gleixner
2020-05-05 16:04   ` Mark Rutland
2020-05-07 23:41   ` Steven Rostedt
2020-05-08  8:40     ` Peter Zijlstra
2020-05-12 14:36   ` [tip: locking/kcsan] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 08/36] x86/doublefault: Remove memmove() call Thomas Gleixner
2020-05-06 13:47   ` Alexandre Chartre
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 09/36] x86/entry/64: Avoid pointless code when CONTEXT_TRACKING=n Thomas Gleixner
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 10/36] x86/entry: Remove the unused LOCKDEP_SYSEXIT cruft Thomas Gleixner
2020-05-06 13:52   ` Alexandre Chartre
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 11/36] x86/kvm: Handle async page faults directly through do_page_fault() Thomas Gleixner
2020-05-06  7:00   ` Paolo Bonzini
2020-05-06 14:05   ` Alexandre Chartre
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Andy Lutomirski
2020-05-05 13:16 ` [patch V4 part 1 12/36] x86/kvm: Sanitize kvm_async_pf_task_wait() Thomas Gleixner
2020-05-05 17:54   ` Paul E. McKenney
2020-05-05 21:50     ` Thomas Gleixner
2020-05-06  7:00   ` Paolo Bonzini
2020-05-06 12:53     ` Steven Rostedt
2020-05-06 15:13   ` Alexandre Chartre
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 13/36] x86/kvm: Restrict ASYNC_PF to user space Thomas Gleixner
2020-05-06  7:00   ` Paolo Bonzini
2020-05-06 15:29   ` Alexandre Chartre
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 14/36] x86/entry: Get rid of ist_begin/end_non_atomic() Thomas Gleixner
2020-05-06 15:34   ` Alexandre Chartre
2020-05-07 17:46   ` Andy Lutomirski
2020-05-13 22:57   ` Mathieu Desnoyers
2020-05-14  0:13     ` Steven Rostedt
2020-05-15  9:34     ` Thomas Gleixner
2020-05-15 13:11       ` Mathieu Desnoyers
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 15/36] kprobes: Lock kprobe_mutex while showing kprobe_blacklist Thomas Gleixner
2020-05-06 15:38   ` Alexandre Chartre
2020-05-12 15:18   ` [tip: core/kprobes] " tip-bot2 for Masami Hiramatsu
2020-05-05 13:16 ` [patch V4 part 1 16/36] kprobes: Support __kprobes blacklist in modules Thomas Gleixner
2020-05-06 15:47   ` Alexandre Chartre
2020-05-12 15:18   ` [tip: core/kprobes] " tip-bot2 for Masami Hiramatsu
2020-05-05 13:16 ` [patch V4 part 1 17/36] kprobes: Support NOKPROBE_SYMBOL() " Thomas Gleixner
2020-05-06 15:54   ` Alexandre Chartre
2020-05-12 15:18   ` [tip: core/kprobes] " tip-bot2 for Masami Hiramatsu
2020-05-05 13:16 ` [patch V4 part 1 18/36] samples/kprobes: Add __kprobes and NOKPROBE_SYMBOL() for handlers Thomas Gleixner
2020-05-06 15:57   ` Alexandre Chartre
2020-05-12 15:18   ` [tip: core/kprobes] " tip-bot2 for Masami Hiramatsu
2020-05-05 13:16 ` [patch V4 part 1 19/36] x86/entry: Exclude low level entry code from sanitizing Thomas Gleixner
2020-05-05 20:39   ` Brian Gerst
2020-05-06 15:42     ` Peter Zijlstra
2020-05-06 16:03   ` Alexandre Chartre
2020-05-13 22:58     ` Mathieu Desnoyers
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 20/36] vmlinux.lds.h: Create section for protection against instrumentation Thomas Gleixner
2020-05-06 16:08   ` Sean Christopherson
2020-05-06 16:28     ` Peter Zijlstra
2020-05-06 16:57       ` Thomas Gleixner
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 21/36] kprobes: Prevent probes in .noinstr.text section Thomas Gleixner
2020-05-08  6:30   ` Masami Hiramatsu
2020-05-19 19:52   ` [tip: core/kprobes] " tip-bot2 for Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 22/36] tracing: Provide lockdep less trace_hardirqs_on/off() variants Thomas Gleixner
2020-05-07 17:55   ` Andy Lutomirski
2020-05-07 18:52     ` Thomas Gleixner
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 23/36] bug: Annotate WARN/BUG/stackfail as noinstr safe Thomas Gleixner
2020-05-13 23:12   ` Mathieu Desnoyers
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Thomas Gleixner
2020-05-05 13:16 ` [patch V4 part 1 24/36] lockdep: Prepare for noinstr sections Thomas Gleixner
2020-05-08  0:23   ` Steven Rostedt
2020-05-08  8:44     ` Peter Zijlstra
2020-05-19 19:58   ` [tip: x86/entry] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 25/36] rcu/tree: Mark the idle relevant functions noinstr Thomas Gleixner
2020-05-05 18:07   ` Paul E. McKenney
2020-05-19 19:48   ` Joel Fernandes [this message]
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Thomas Gleixner
2020-09-28 22:22     ` Kim Phillips
2020-09-28 22:55       ` Paul E. McKenney
2020-09-29  7:25       ` Peter Zijlstra
2020-09-29 11:25     ` Peter Zijlstra
2020-09-29 14:34       ` Steven Rostedt
2020-09-29 14:52         ` Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 26/36] printk: Prepare for nested printk_nmi_enter() Thomas Gleixner
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Petr Mladek
2020-05-05 13:16 ` [patch V4 part 1 27/36] arm64: Prepare arch_nmi_enter() for recursion Thomas Gleixner
2020-05-13 23:28   ` Mathieu Desnoyers
2020-05-15 14:04     ` Frederic Weisbecker
2020-05-15 15:45       ` Will Deacon
2020-05-15 16:01         ` Mathieu Desnoyers
2020-05-15 21:29   ` Thomas Gleixner
2020-05-15 21:31     ` Frederic Weisbecker
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Frederic Weisbecker
2020-05-05 13:16 ` [patch V4 part 1 28/36] hardirq/nmi: Allow nested nmi_enter() Thomas Gleixner
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 29/36] x86/mce: Send #MC singal from task work Thomas Gleixner
2020-05-07 18:02   ` Andy Lutomirski
2020-05-08  8:48     ` Peter Zijlstra
2020-05-08 21:30       ` Andy Lutomirski
2020-05-14 14:16     ` Borislav Petkov
2020-05-13 23:42   ` Mathieu Desnoyers
2020-05-14 17:38     ` Thomas Gleixner
2020-05-14 17:42       ` Mathieu Desnoyers
2020-05-14 14:17   ` Borislav Petkov
2020-05-14 16:03     ` Mathieu Desnoyers
2020-05-14 16:19       ` Andy Lutomirski
2020-05-14 16:39       ` Borislav Petkov
2020-05-14 17:05         ` Mathieu Desnoyers
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 30/36] lockdep: Always inline lockdep_{off,on}() Thomas Gleixner
2020-05-13 23:46   ` Mathieu Desnoyers
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 31/36] printk: Disallow instrumenting print_nmi_enter() Thomas Gleixner
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 32/36] sh/ftrace: Move arch_ftrace_nmi_{enter,exit} into nmi exception Thomas Gleixner
2020-05-08  0:34   ` Steven Rostedt
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 33/36] x86,tracing: Robustify ftrace_nmi_enter() Thomas Gleixner
2020-05-08  6:19   ` Masami Hiramatsu
2020-05-05 13:16 ` [patch V4 part 1 34/36] sched,rcu,tracing: Avoid tracing before in_nmi() is correct Thomas Gleixner
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 35/36] x86: Replace ist_enter() with nmi_enter() Thomas Gleixner
2020-05-07 18:04   ` Andy Lutomirski
2020-05-07 18:17     ` Mathieu Desnoyers
2020-05-08  8:50       ` Peter Zijlstra
2020-05-08 17:12         ` Josh Poimboeuf
2020-05-14  0:12   ` Mathieu Desnoyers
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Peter Zijlstra
2020-05-05 13:16 ` [patch V4 part 1 36/36] rcu: Make RCU IRQ enter/exit functions rely on in_nmi() Thomas Gleixner
2020-05-05 18:13   ` Paul E. McKenney
2020-05-06 17:09   ` Alexandre Chartre
2020-05-19 19:52   ` [tip: core/rcu] " tip-bot2 for Paul E. McKenney
2020-05-07 18:05 ` [patch V4 part 1 00/36] x86/entry: Entry/exception code rework, preparatory patches Andy Lutomirski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200519194807.GA17484@google.com \
    --to=joel@joelfernandes.org \
    --cc=alexandre.chartre@oracle.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=brgerst@gmail.com \
    --cc=frederic@kernel.org \
    --cc=jgross@suse.com \
    --cc=jpoimboe@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mhiramat@kernel.org \
    --cc=paulmck@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=pmladek@suse.com \
    --cc=rostedt@goodmis.org \
    --cc=sean.j.christopherson@intel.com \
    --cc=tglx@linutronix.de \
    --cc=vineethrp@gmail.com \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).