All of lore.kernel.org
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: gor@linux.ibm.com, jpoimboe@redhat.com, jikos@kernel.org,
	mbenes@suse.cz, pmladek@suse.com, mingo@kernel.org
Cc: linux-kernel@vger.kernel.org, peterz@infradead.org,
	joe.lawrence@redhat.com, fweisbec@gmail.com, tglx@linutronix.de,
	hca@linux.ibm.com, svens@linux.ibm.com, sumanthk@linux.ibm.com,
	live-patching@vger.kernel.org, paulmck@kernel.org,
	rostedt@goodmis.org, x86@kernel.org
Subject: [RFC][PATCH v2 06/11] context_tracking: Prefix user_{enter,exit}*()
Date: Wed, 29 Sep 2021 17:17:29 +0200	[thread overview]
Message-ID: <20210929152428.887923187@infradead.org> (raw)
In-Reply-To: 20210929151723.162004989@infradead.org

Put a ct_ prefix on a bunch of context_tracking functions for better
namespacing / greppability.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 arch/Kconfig                            |    6 +++---
 arch/arm64/kernel/entry-common.c        |    4 ++--
 arch/mips/kernel/ptrace.c               |    6 +++---
 arch/mips/kernel/signal.c               |    4 ++--
 arch/powerpc/include/asm/interrupt.h    |    2 +-
 arch/powerpc/kernel/interrupt.c         |   10 +++++-----
 arch/sparc/kernel/ptrace_64.c           |    6 +++---
 arch/sparc/kernel/signal_64.c           |    4 ++--
 include/linux/context_tracking.h        |   16 ++++++++--------
 include/linux/context_tracking_state.h  |    2 +-
 include/trace/events/context_tracking.h |    8 ++++----
 kernel/context_tracking.c               |    6 +++---
 kernel/entry/common.c                   |    4 ++--
 kernel/livepatch/transition.c           |    2 +-
 kernel/sched/core.c                     |    4 ++--
 kernel/trace/ftrace.c                   |    4 ++--
 16 files changed, 44 insertions(+), 44 deletions(-)

--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -760,7 +760,7 @@ config HAVE_CONTEXT_TRACKING
 	help
 	  Provide kernel/user boundaries probes necessary for subsystems
 	  that need it, such as userspace RCU extended quiescent state.
-	  Syscalls need to be wrapped inside user_exit()-user_enter(), either
+	  Syscalls need to be wrapped inside ct_user_exit()-ct_user_enter(), either
 	  optimized behind static key or through the slow path using TIF_NOHZ
 	  flag. Exceptions handlers must be wrapped as well. Irqs are already
 	  protected inside rcu_irq_enter/rcu_irq_exit() but preemption or signal
@@ -774,7 +774,7 @@ config HAVE_CONTEXT_TRACKING_OFFSTACK
 	  preempt_schedule_irq() can't be called in a preemptible section
 	  while context tracking is CONTEXT_USER. This feature reflects a sane
 	  entry implementation where the following requirements are met on
-	  critical entry code, ie: before user_exit() or after user_enter():
+	  critical entry code, ie: before ct_user_exit() or after ct_user_enter():
 
 	  - Critical entry code isn't preemptible (or better yet:
 	    not interruptible).
@@ -787,7 +787,7 @@ config HAVE_TIF_NOHZ
 	bool
 	help
 	  Arch relies on TIF_NOHZ and syscall slow path to implement context
-	  tracking calls to user_enter()/user_exit().
+	  tracking calls to ct_user_enter()/ct_user_exit().
 
 config HAVE_VIRT_CPU_ACCOUNTING
 	bool
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -100,7 +100,7 @@ static __always_inline void __enter_from
 {
 	lockdep_hardirqs_off(CALLER_ADDR0);
 	CT_WARN_ON(ct_state() != CONTEXT_USER);
-	user_exit_irqoff();
+	ct_user_exit_irqoff();
 	trace_hardirqs_off_finish();
 }
 
@@ -118,7 +118,7 @@ static __always_inline void __exit_to_us
 {
 	trace_hardirqs_on_prepare();
 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
-	user_enter_irqoff();
+	ct_user_enter_irqoff();
 	lockdep_hardirqs_on(CALLER_ADDR0);
 }
 
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -1312,7 +1312,7 @@ long arch_ptrace(struct task_struct *chi
  */
 asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
 {
-	user_exit();
+	ct_user_exit();
 
 	current_thread_info()->syscall = syscall;
 
@@ -1368,7 +1368,7 @@ asmlinkage void syscall_trace_leave(stru
 	 * or do_notify_resume(), in which case we can be in RCU
 	 * user mode.
 	 */
-	user_exit();
+	ct_user_exit();
 
 	audit_syscall_exit(regs);
 
@@ -1378,5 +1378,5 @@ asmlinkage void syscall_trace_leave(stru
 	if (test_thread_flag(TIF_SYSCALL_TRACE))
 		tracehook_report_syscall_exit(regs, 0);
 
-	user_enter();
+	ct_user_enter();
 }
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -897,7 +897,7 @@ asmlinkage void do_notify_resume(struct
 {
 	local_irq_enable();
 
-	user_exit();
+	ct_user_exit();
 
 	if (thread_info_flags & _TIF_UPROBE)
 		uprobe_notify_resume(regs);
@@ -911,7 +911,7 @@ asmlinkage void do_notify_resume(struct
 		rseq_handle_notify_resume(NULL, regs);
 	}
 
-	user_enter();
+	ct_user_enter();
 }
 
 #if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -154,7 +154,7 @@ static inline void interrupt_enter_prepa
 
 	if (user_mode(regs)) {
 		CT_WARN_ON(ct_state() != CONTEXT_USER);
-		user_exit_irqoff();
+		ct_user_exit_irqoff();
 
 		account_cpu_user_entry();
 		account_stolen_time();
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -91,7 +91,7 @@ notrace long system_call_exception(long
 	trace_hardirqs_off(); /* finish reconciling */
 
 	CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
-	user_exit_irqoff();
+	ct_user_exit_irqoff();
 
 	BUG_ON(regs_is_unrecoverable(regs));
 	BUG_ON(!(regs->msr & MSR_PR));
@@ -388,9 +388,9 @@ interrupt_exit_user_prepare_main(unsigne
 
 	check_return_regs_valid(regs);
 
-	user_enter_irqoff();
+	ct_user_enter_irqoff();
 	if (!prep_irq_for_enabled_exit(true)) {
-		user_exit_irqoff();
+		ct_user_exit_irqoff();
 		local_irq_enable();
 		local_irq_disable();
 		goto again;
@@ -489,7 +489,7 @@ notrace unsigned long syscall_exit_resta
 #endif
 
 	trace_hardirqs_off();
-	user_exit_irqoff();
+	ct_user_exit_irqoff();
 	account_cpu_user_entry();
 
 	BUG_ON(!user_mode(regs));
@@ -638,7 +638,7 @@ notrace unsigned long interrupt_exit_use
 #endif
 
 	trace_hardirqs_off();
-	user_exit_irqoff();
+	ct_user_exit_irqoff();
 	account_cpu_user_entry();
 
 	BUG_ON(!user_mode(regs));
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -1092,7 +1092,7 @@ asmlinkage int syscall_trace_enter(struc
 	secure_computing_strict(regs->u_regs[UREG_G1]);
 
 	if (test_thread_flag(TIF_NOHZ))
-		user_exit();
+		ct_user_exit();
 
 	if (test_thread_flag(TIF_SYSCALL_TRACE))
 		ret = tracehook_report_syscall_entry(regs);
@@ -1110,7 +1110,7 @@ asmlinkage int syscall_trace_enter(struc
 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
 {
 	if (test_thread_flag(TIF_NOHZ))
-		user_exit();
+		ct_user_exit();
 
 	audit_syscall_exit(regs);
 
@@ -1121,7 +1121,7 @@ asmlinkage void syscall_trace_leave(stru
 		tracehook_report_syscall_exit(regs, 0);
 
 	if (test_thread_flag(TIF_NOHZ))
-		user_enter();
+		ct_user_enter();
 }
 
 /**
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -546,14 +546,14 @@ static void do_signal(struct pt_regs *re
 
 void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
 {
-	user_exit();
+	ct_user_exit();
 	if (thread_info_flags & _TIF_UPROBE)
 		uprobe_notify_resume(regs);
 	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
 		do_signal(regs, orig_i0);
 	if (thread_info_flags & _TIF_NOTIFY_RESUME)
 		tracehook_notify_resume(regs);
-	user_enter();
+	ct_user_enter();
 }
 
 /*
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -22,26 +22,26 @@ extern void context_tracking_exit(enum c
 extern void context_tracking_user_enter(void);
 extern void context_tracking_user_exit(void);
 
-static inline void user_enter(void)
+static inline void ct_user_enter(void)
 {
 	if (context_tracking_enabled())
 		context_tracking_enter(CONTEXT_USER);
 
 }
-static inline void user_exit(void)
+static inline void ct_user_exit(void)
 {
 	if (context_tracking_enabled())
 		context_tracking_exit(CONTEXT_USER);
 }
 
 /* Called with interrupts disabled.  */
-static __always_inline void user_enter_irqoff(void)
+static __always_inline void ct_user_enter_irqoff(void)
 {
 	if (context_tracking_enabled())
 		__context_tracking_enter(CONTEXT_USER);
 
 }
-static __always_inline void user_exit_irqoff(void)
+static __always_inline void ct_user_exit_irqoff(void)
 {
 	if (context_tracking_enabled())
 		__context_tracking_exit(CONTEXT_USER);
@@ -98,10 +98,10 @@ static __always_inline enum ctx_state ct
 		this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
 }
 #else
-static inline void user_enter(void) { }
-static inline void user_exit(void) { }
-static inline void user_enter_irqoff(void) { }
-static inline void user_exit_irqoff(void) { }
+static inline void ct_user_enter(void) { }
+static inline void ct_user_exit(void) { }
+static inline void ct_user_enter_irqoff(void) { }
+static inline void ct_user_exit_irqoff(void) { }
 static inline enum ctx_state exception_enter(void) { return 0; }
 static inline void exception_exit(enum ctx_state prev_ctx) { }
 static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -9,7 +9,7 @@ struct context_tracking {
 	/*
 	 * When active is false, probes are unset in order
 	 * to minimize overhead: TIF flags are cleared
-	 * and calls to user_enter/exit are ignored. This
+	 * and calls to ct_user_enter/exit are ignored. This
 	 * may be further optimized using static keys.
 	 */
 	bool active;
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -73,7 +73,7 @@ void noinstr __context_tracking_enter(en
 			 * At this stage, only low level arch entry code remains and
 			 * then we'll run in userspace. We can assume there won't be
 			 * any RCU read-side critical section until the next call to
-			 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
+			 * ct_user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
 			 * on the tick.
 			 */
 			if (state == CONTEXT_USER) {
@@ -127,7 +127,7 @@ EXPORT_SYMBOL_GPL(context_tracking_enter
 
 void context_tracking_user_enter(void)
 {
-	user_enter();
+	ct_user_enter();
 }
 NOKPROBE_SYMBOL(context_tracking_user_enter);
 
@@ -184,7 +184,7 @@ EXPORT_SYMBOL_GPL(context_tracking_exit)
 
 void context_tracking_user_exit(void)
 {
-	user_exit();
+	ct_user_exit();
 }
 NOKPROBE_SYMBOL(context_tracking_user_exit);
 
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -19,7 +19,7 @@ static __always_inline void __enter_from
 	lockdep_hardirqs_off(CALLER_ADDR0);
 
 	CT_WARN_ON(ct_state() != CONTEXT_USER);
-	user_exit_irqoff();
+	ct_user_exit_irqoff();
 
 	instrumentation_begin();
 	trace_hardirqs_off_finish();
@@ -127,7 +127,7 @@ static __always_inline void __exit_to_us
 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
 	instrumentation_end();
 
-	user_enter_irqoff();
+	ct_user_enter_irqoff();
 	arch_exit_to_user_mode();
 	lockdep_hardirqs_on(CALLER_ADDR0);
 }
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -51,7 +51,7 @@ static void klp_sync(struct work_struct
 
 /*
  * We allow to patch also functions where RCU is not watching,
- * e.g. before user_exit(). We can not rely on the RCU infrastructure
+ * e.g. before ct_user_exit(). We can not rely on the RCU infrastructure
  * to do the synchronization. Instead hard force the sched synchronization.
  *
  * This approach allows to use RCU functions for manipulating func_stack
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6444,7 +6444,7 @@ EXPORT_STATIC_CALL_TRAMP(preempt_schedul
  * recursion and tracing preempt enabling caused by the tracing
  * infrastructure itself. But as tracing can happen in areas coming
  * from userspace or just about to enter userspace, a preempt enable
- * can occur before user_exit() is called. This will cause the scheduler
+ * can occur before ct_user_exit() is called. This will cause the scheduler
  * to be called when the system is still in usermode.
  *
  * To prevent this, the preempt_enable_notrace will use this function
@@ -6475,7 +6475,7 @@ asmlinkage __visible void __sched notrac
 		preempt_disable_notrace();
 		preempt_latency_start(1);
 		/*
-		 * Needs preempt disabled in case user_exit() is traced
+		 * Needs preempt disabled in case ct_user_exit() is traced
 		 * and the tracer calls preempt_enable_notrace() causing
 		 * an infinite recursion.
 		 */
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2996,7 +2996,7 @@ int ftrace_shutdown(struct ftrace_ops *o
 		 * We need to do a hard force of sched synchronization.
 		 * This is because we use preempt_disable() to do RCU, but
 		 * the function tracers can be called where RCU is not watching
-		 * (like before user_exit()). We can not rely on the RCU
+		 * (like before ct_user_exit()). We can not rely on the RCU
 		 * infrastructure to do the synchronization, thus we must do it
 		 * ourselves.
 		 */
@@ -5981,7 +5981,7 @@ ftrace_graph_release(struct inode *inode
 		 * We need to do a hard force of sched synchronization.
 		 * This is because we use preempt_disable() to do RCU, but
 		 * the function tracers can be called where RCU is not watching
-		 * (like before user_exit()). We can not rely on the RCU
+		 * (like before ct_user_exit()). We can not rely on the RCU
 		 * infrastructure to do the synchronization, thus we must do it
 		 * ourselves.
 		 */



  parent reply	other threads:[~2021-09-29 16:08 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-29 15:17 [PATCH v2 00/11] sched,rcu,context_tracking,livepatch: Improve livepatch task transitions for idle and NOHZ_FULL Peter Zijlstra
2021-09-29 15:17 ` [PATCH v2 01/11] sched: Improve try_invoke_on_locked_down_task() Peter Zijlstra
2021-10-09 10:07   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2021-09-29 15:17 ` [PATCH v2 02/11] sched,rcu: Rework try_invoke_on_locked_down_task() Peter Zijlstra
2021-10-09 10:07   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2021-09-29 15:17 ` [PATCH v2 03/11] sched,livepatch: Use task_call_func() Peter Zijlstra
2021-10-05 11:40   ` Petr Mladek
2021-10-05 14:03     ` Peter Zijlstra
2021-10-06  8:59   ` Miroslav Benes
2021-10-09 10:07   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2021-09-29 15:17 ` [PATCH v2 04/11] sched: Simplify wake_up_*idle*() Peter Zijlstra
2021-10-09 10:07   ` [tip: sched/core] " tip-bot2 for Peter Zijlstra
2021-10-13 14:32   ` [PATCH v2 04/11] " Qian Cai
2021-10-19  3:47     ` Qian Cai
2021-10-19  8:56       ` Peter Zijlstra
2021-10-19  9:10         ` Peter Zijlstra
2021-10-19 15:32           ` Qian Cai
2021-10-19 15:50             ` Peter Zijlstra
2021-10-19 19:22               ` Qian Cai
2021-10-19 20:27                 ` Peter Zijlstra
     [not found]   ` <CGME20211022134630eucas1p2e79e2816587d182c580459d567c1f2a9@eucas1p2.samsung.com>
2021-10-22 13:46     ` Marek Szyprowski
2021-09-29 15:17 ` [PATCH v2 05/11] sched,livepatch: Use wake_up_if_idle() Peter Zijlstra
2021-10-05 12:00   ` Petr Mladek
2021-10-06  9:16   ` Miroslav Benes
2021-10-07  9:18     ` Vasily Gorbik
2021-10-07 10:02       ` Peter Zijlstra
2021-10-13 19:37   ` Arnd Bergmann
2021-10-14 10:42     ` Peter Zijlstra
2021-09-29 15:17 ` Peter Zijlstra [this message]
2021-09-29 15:17 ` [RFC][PATCH v2 07/11] context_tracking: Add an atomic sequence/state count Peter Zijlstra
2021-09-29 15:17 ` [RFC][PATCH v2 08/11] context_tracking,rcu: Replace RCU dynticks counter with context_tracking Peter Zijlstra
2021-09-29 18:37   ` Paul E. McKenney
2021-09-29 19:09     ` Peter Zijlstra
2021-09-29 19:11     ` Peter Zijlstra
2021-09-29 19:13     ` Peter Zijlstra
2021-09-29 19:24       ` Peter Zijlstra
2021-09-29 19:45         ` Paul E. McKenney
2021-09-29 18:54   ` Peter Zijlstra
2021-09-29 15:17 ` [RFC][PATCH v2 09/11] context_tracking,livepatch: Dont disturb NOHZ_FULL Peter Zijlstra
2021-10-06  8:12   ` Petr Mladek
2021-10-06  9:04     ` Peter Zijlstra
2021-10-06 10:29       ` Petr Mladek
2021-10-06 11:41         ` Peter Zijlstra
2021-10-06 11:48         ` Miroslav Benes
2021-09-29 15:17 ` [RFC][PATCH v2 10/11] livepatch: Remove klp_synchronize_transition() Peter Zijlstra
2021-10-06 12:30   ` Petr Mladek
2021-09-29 15:17 ` [RFC][PATCH v2 11/11] context_tracking,x86: Fix text_poke_sync() vs NOHZ_FULL Peter Zijlstra
2021-10-21 18:39   ` Marcelo Tosatti
2021-10-21 18:40     ` Marcelo Tosatti
2021-10-21 19:25     ` Peter Zijlstra
2021-10-21 19:57       ` Marcelo Tosatti
2021-10-21 20:18         ` Peter Zijlstra
2021-10-26 18:19           ` Marcelo Tosatti
2021-10-26 19:38             ` Peter Zijlstra
2021-09-29 18:03 ` [PATCH v2 00/11] sched,rcu,context_tracking,livepatch: Improve livepatch task transitions for idle and NOHZ_FULL Paul E. McKenney
2021-10-09 10:07 ` [tip: sched/core] sched,livepatch: Use wake_up_if_idle() tip-bot2 for Peter Zijlstra
2021-10-14 11:16 ` tip-bot2 for Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210929152428.887923187@infradead.org \
    --to=peterz@infradead.org \
    --cc=fweisbec@gmail.com \
    --cc=gor@linux.ibm.com \
    --cc=hca@linux.ibm.com \
    --cc=jikos@kernel.org \
    --cc=joe.lawrence@redhat.com \
    --cc=jpoimboe@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=live-patching@vger.kernel.org \
    --cc=mbenes@suse.cz \
    --cc=mingo@kernel.org \
    --cc=paulmck@kernel.org \
    --cc=pmladek@suse.com \
    --cc=rostedt@goodmis.org \
    --cc=sumanthk@linux.ibm.com \
    --cc=svens@linux.ibm.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.