All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC PATCH 1/1] smp: Change function signatures to use call_single_data_t
@ 2023-08-31  6:31 Leonardo Bras
  2023-08-31  6:58 ` Guo Ren
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Leonardo Bras @ 2023-08-31  6:31 UTC (permalink / raw)
  To: Steven Rostedt, Masami Hiramatsu, Peter Zijlstra, Josh Poimboeuf,
	Guo Ren, Valentin Schneider, Leonardo Bras, Paul E. McKenney,
	Juergen Gross, Yury Norov, Imran Khan
  Cc: linux-kernel, linux-trace-kernel

call_single_data_t is a size-aligned typedef of struct __call_single_data.

This alignment is desirable in order to have smp_call_function*() avoid
bouncing an extra cacheline in case of an unaligned csd, given this
would hurt performance.

Since the removal of struct request->csd in commit 660e802c76c8
("blk-mq: use percpu csd to remote complete instead of per-rq csd") there
are no current users of smp_call_function*() with unaligned csd.

Change every 'struct __call_single_data' function parameter to
'call_single_data_t', so we have warnings if any new code tries to
introduce an smp_call_function*() call with unaligned csd.

Signed-off-by: Leonardo Bras <leobras@redhat.com>
---
 include/linux/smp.h        |  2 +-
 include/trace/events/csd.h |  8 ++++----
 kernel/smp.c               | 26 +++++++++++++-------------
 kernel/up.c                |  2 +-
 4 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/include/linux/smp.h b/include/linux/smp.h
index 91ea4a67f8ca..e87520dc2959 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 			   void *info, bool wait, const struct cpumask *mask);
 
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
 
 /*
  * Cpus stopping functions in panic. All have default weak definitions.
diff --git a/include/trace/events/csd.h b/include/trace/events/csd.h
index 67e9d01f80c2..58cc83b99c34 100644
--- a/include/trace/events/csd.h
+++ b/include/trace/events/csd.h
@@ -12,7 +12,7 @@ TRACE_EVENT(csd_queue_cpu,
 	TP_PROTO(const unsigned int cpu,
 		unsigned long callsite,
 		smp_call_func_t func,
-		struct __call_single_data *csd),
+		call_single_data_t *csd),
 
 	TP_ARGS(cpu, callsite, func, csd),
 
@@ -39,7 +39,7 @@ TRACE_EVENT(csd_queue_cpu,
  */
 DECLARE_EVENT_CLASS(csd_function,
 
-	TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
 
 	TP_ARGS(func, csd),
 
@@ -57,12 +57,12 @@ DECLARE_EVENT_CLASS(csd_function,
 );
 
 DEFINE_EVENT(csd_function, csd_function_entry,
-	TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
 	TP_ARGS(func, csd)
 );
 
 DEFINE_EVENT(csd_function, csd_function_exit,
-	TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
 	TP_ARGS(func, csd)
 );
 
diff --git a/kernel/smp.c b/kernel/smp.c
index 8455a53465af..8c714583786b 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -127,7 +127,7 @@ send_call_function_ipi_mask(struct cpumask *mask)
 }
 
 static __always_inline void
-csd_do_func(smp_call_func_t func, void *info, struct __call_single_data *csd)
+csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
 {
 	trace_csd_function_entry(func, csd);
 	func(info);
@@ -174,7 +174,7 @@ module_param(csd_lock_timeout, ulong, 0444);
 static atomic_t csd_bug_count = ATOMIC_INIT(0);
 
 /* Record current CSD work for current CPU, NULL to erase. */
-static void __csd_lock_record(struct __call_single_data *csd)
+static void __csd_lock_record(call_single_data_t *csd)
 {
 	if (!csd) {
 		smp_mb(); /* NULL cur_csd after unlock. */
@@ -189,13 +189,13 @@ static void __csd_lock_record(struct __call_single_data *csd)
 		  /* Or before unlock, as the case may be. */
 }
 
-static __always_inline void csd_lock_record(struct __call_single_data *csd)
+static __always_inline void csd_lock_record(call_single_data_t *csd)
 {
 	if (static_branch_unlikely(&csdlock_debug_enabled))
 		__csd_lock_record(csd);
 }
 
-static int csd_lock_wait_getcpu(struct __call_single_data *csd)
+static int csd_lock_wait_getcpu(call_single_data_t *csd)
 {
 	unsigned int csd_type;
 
@@ -210,7 +210,7 @@ static int csd_lock_wait_getcpu(struct __call_single_data *csd)
  * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
  * so waiting on other types gets much less information.
  */
-static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
+static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
 {
 	int cpu = -1;
 	int cpux;
@@ -276,7 +276,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
  * previous function call. For multi-cpu calls its even more interesting
  * as we'll have to ensure no other cpu is observing our csd.
  */
-static void __csd_lock_wait(struct __call_single_data *csd)
+static void __csd_lock_wait(call_single_data_t *csd)
 {
 	int bug_id = 0;
 	u64 ts0, ts1;
@@ -290,7 +290,7 @@ static void __csd_lock_wait(struct __call_single_data *csd)
 	smp_acquire__after_ctrl_dep();
 }
 
-static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+static __always_inline void csd_lock_wait(call_single_data_t *csd)
 {
 	if (static_branch_unlikely(&csdlock_debug_enabled)) {
 		__csd_lock_wait(csd);
@@ -300,17 +300,17 @@ static __always_inline void csd_lock_wait(struct __call_single_data *csd)
 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
 }
 #else
-static void csd_lock_record(struct __call_single_data *csd)
+static void csd_lock_record(call_single_data_t *csd)
 {
 }
 
-static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+static __always_inline void csd_lock_wait(call_single_data_t *csd)
 {
 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
 }
 #endif
 
-static __always_inline void csd_lock(struct __call_single_data *csd)
+static __always_inline void csd_lock(call_single_data_t *csd)
 {
 	csd_lock_wait(csd);
 	csd->node.u_flags |= CSD_FLAG_LOCK;
@@ -323,7 +323,7 @@ static __always_inline void csd_lock(struct __call_single_data *csd)
 	smp_wmb();
 }
 
-static __always_inline void csd_unlock(struct __call_single_data *csd)
+static __always_inline void csd_unlock(call_single_data_t *csd)
 {
 	WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
 
@@ -376,7 +376,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
  * for execution on the given CPU. data must already have
  * ->func, ->info, and ->flags set.
  */
-static int generic_exec_single(int cpu, struct __call_single_data *csd)
+static int generic_exec_single(int cpu, call_single_data_t *csd)
 {
 	if (cpu == smp_processor_id()) {
 		smp_call_func_t func = csd->func;
@@ -667,7 +667,7 @@ EXPORT_SYMBOL(smp_call_function_single);
  *
  * Return: %0 on success or negative errno value on error
  */
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 {
 	int err = 0;
 
diff --git a/kernel/up.c b/kernel/up.c
index a38b8b095251..df50828cc2f0 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 }
 EXPORT_SYMBOL(smp_call_function_single);
 
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 {
 	unsigned long flags;
 
-- 
2.42.0


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [RFC PATCH 1/1] smp: Change function signatures to use call_single_data_t
  2023-08-31  6:31 [RFC PATCH 1/1] smp: Change function signatures to use call_single_data_t Leonardo Bras
@ 2023-08-31  6:58 ` Guo Ren
  2023-09-01  6:17   ` Leonardo Bras Soares Passos
  2023-09-06 14:09 ` Peter Zijlstra
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 7+ messages in thread
From: Guo Ren @ 2023-08-31  6:58 UTC (permalink / raw)
  To: Leonardo Bras
  Cc: Steven Rostedt, Masami Hiramatsu, Peter Zijlstra, Josh Poimboeuf,
	Valentin Schneider, Paul E. McKenney, Juergen Gross, Yury Norov,
	Imran Khan, linux-kernel, linux-trace-kernel

On Thu, Aug 31, 2023 at 2:31 PM Leonardo Bras <leobras@redhat.com> wrote:
>
> call_single_data_t is a size-aligned typedef of struct __call_single_data.
>
> This alignment is desirable in order to have smp_call_function*() avoid
> bouncing an extra cacheline in case of an unaligned csd, given this
> would hurt performance.
>
> Since the removal of struct request->csd in commit 660e802c76c8
> ("blk-mq: use percpu csd to remote complete instead of per-rq csd") there
> are no current users of smp_call_function*() with unaligned csd.
>
> Change every 'struct __call_single_data' function parameter to
> 'call_single_data_t', so we have warnings if any new code tries to
> introduce an smp_call_function*() call with unaligned csd.
I agree to prevent __call_single_data usage.

Reviewed-by: Guo Ren <guoren@kernel.org>

/*
 * structure shares (partial) layout with struct irq_work
 */
struct __call_single_data {
        struct __call_single_node node;
        smp_call_func_t func;
        void *info;
};

#define CSD_INIT(_func, _info) \
        (struct __call_single_data){ .func = (_func), .info = (_info), }

/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
typedef struct __call_single_data call_single_data_t
        __aligned(sizeof(struct __call_single_data));

>
> Signed-off-by: Leonardo Bras <leobras@redhat.com>
> ---
>  include/linux/smp.h        |  2 +-
>  include/trace/events/csd.h |  8 ++++----
>  kernel/smp.c               | 26 +++++++++++++-------------
>  kernel/up.c                |  2 +-
>  4 files changed, 19 insertions(+), 19 deletions(-)
>
> diff --git a/include/linux/smp.h b/include/linux/smp.h
> index 91ea4a67f8ca..e87520dc2959 100644
> --- a/include/linux/smp.h
> +++ b/include/linux/smp.h
> @@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
>  void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
>                            void *info, bool wait, const struct cpumask *mask);
>
> -int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
> +int smp_call_function_single_async(int cpu, call_single_data_t *csd);
>
>  /*
>   * Cpus stopping functions in panic. All have default weak definitions.
> diff --git a/include/trace/events/csd.h b/include/trace/events/csd.h
> index 67e9d01f80c2..58cc83b99c34 100644
> --- a/include/trace/events/csd.h
> +++ b/include/trace/events/csd.h
> @@ -12,7 +12,7 @@ TRACE_EVENT(csd_queue_cpu,
>         TP_PROTO(const unsigned int cpu,
>                 unsigned long callsite,
>                 smp_call_func_t func,
> -               struct __call_single_data *csd),
> +               call_single_data_t *csd),
>
>         TP_ARGS(cpu, callsite, func, csd),
>
> @@ -39,7 +39,7 @@ TRACE_EVENT(csd_queue_cpu,
>   */
>  DECLARE_EVENT_CLASS(csd_function,
>
> -       TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
> +       TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
>
>         TP_ARGS(func, csd),
>
> @@ -57,12 +57,12 @@ DECLARE_EVENT_CLASS(csd_function,
>  );
>
>  DEFINE_EVENT(csd_function, csd_function_entry,
> -       TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
> +       TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
>         TP_ARGS(func, csd)
>  );
>
>  DEFINE_EVENT(csd_function, csd_function_exit,
> -       TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
> +       TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
>         TP_ARGS(func, csd)
>  );
>
> diff --git a/kernel/smp.c b/kernel/smp.c
> index 8455a53465af..8c714583786b 100644
> --- a/kernel/smp.c
> +++ b/kernel/smp.c
> @@ -127,7 +127,7 @@ send_call_function_ipi_mask(struct cpumask *mask)
>  }
>
>  static __always_inline void
> -csd_do_func(smp_call_func_t func, void *info, struct __call_single_data *csd)
> +csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
>  {
>         trace_csd_function_entry(func, csd);
>         func(info);
> @@ -174,7 +174,7 @@ module_param(csd_lock_timeout, ulong, 0444);
>  static atomic_t csd_bug_count = ATOMIC_INIT(0);
>
>  /* Record current CSD work for current CPU, NULL to erase. */
> -static void __csd_lock_record(struct __call_single_data *csd)
> +static void __csd_lock_record(call_single_data_t *csd)
>  {
>         if (!csd) {
>                 smp_mb(); /* NULL cur_csd after unlock. */
> @@ -189,13 +189,13 @@ static void __csd_lock_record(struct __call_single_data *csd)
>                   /* Or before unlock, as the case may be. */
>  }
>
> -static __always_inline void csd_lock_record(struct __call_single_data *csd)
> +static __always_inline void csd_lock_record(call_single_data_t *csd)
>  {
>         if (static_branch_unlikely(&csdlock_debug_enabled))
>                 __csd_lock_record(csd);
>  }
>
> -static int csd_lock_wait_getcpu(struct __call_single_data *csd)
> +static int csd_lock_wait_getcpu(call_single_data_t *csd)
>  {
>         unsigned int csd_type;
>
> @@ -210,7 +210,7 @@ static int csd_lock_wait_getcpu(struct __call_single_data *csd)
>   * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
>   * so waiting on other types gets much less information.
>   */
> -static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
> +static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
>  {
>         int cpu = -1;
>         int cpux;
> @@ -276,7 +276,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
>   * previous function call. For multi-cpu calls its even more interesting
>   * as we'll have to ensure no other cpu is observing our csd.
>   */
> -static void __csd_lock_wait(struct __call_single_data *csd)
> +static void __csd_lock_wait(call_single_data_t *csd)
>  {
>         int bug_id = 0;
>         u64 ts0, ts1;
> @@ -290,7 +290,7 @@ static void __csd_lock_wait(struct __call_single_data *csd)
>         smp_acquire__after_ctrl_dep();
>  }
>
> -static __always_inline void csd_lock_wait(struct __call_single_data *csd)
> +static __always_inline void csd_lock_wait(call_single_data_t *csd)
>  {
>         if (static_branch_unlikely(&csdlock_debug_enabled)) {
>                 __csd_lock_wait(csd);
> @@ -300,17 +300,17 @@ static __always_inline void csd_lock_wait(struct __call_single_data *csd)
>         smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
>  }
>  #else
> -static void csd_lock_record(struct __call_single_data *csd)
> +static void csd_lock_record(call_single_data_t *csd)
>  {
>  }
>
> -static __always_inline void csd_lock_wait(struct __call_single_data *csd)
> +static __always_inline void csd_lock_wait(call_single_data_t *csd)
>  {
>         smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
>  }
>  #endif
>
> -static __always_inline void csd_lock(struct __call_single_data *csd)
> +static __always_inline void csd_lock(call_single_data_t *csd)
>  {
>         csd_lock_wait(csd);
>         csd->node.u_flags |= CSD_FLAG_LOCK;
> @@ -323,7 +323,7 @@ static __always_inline void csd_lock(struct __call_single_data *csd)
>         smp_wmb();
>  }
>
> -static __always_inline void csd_unlock(struct __call_single_data *csd)
> +static __always_inline void csd_unlock(call_single_data_t *csd)
>  {
>         WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
>
> @@ -376,7 +376,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
>   * for execution on the given CPU. data must already have
>   * ->func, ->info, and ->flags set.
>   */
> -static int generic_exec_single(int cpu, struct __call_single_data *csd)
> +static int generic_exec_single(int cpu, call_single_data_t *csd)
>  {
>         if (cpu == smp_processor_id()) {
>                 smp_call_func_t func = csd->func;
> @@ -667,7 +667,7 @@ EXPORT_SYMBOL(smp_call_function_single);
>   *
>   * Return: %0 on success or negative errno value on error
>   */
> -int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
> +int smp_call_function_single_async(int cpu, call_single_data_t *csd)
>  {
>         int err = 0;
>
> diff --git a/kernel/up.c b/kernel/up.c
> index a38b8b095251..df50828cc2f0 100644
> --- a/kernel/up.c
> +++ b/kernel/up.c
> @@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
>  }
>  EXPORT_SYMBOL(smp_call_function_single);
>
> -int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
> +int smp_call_function_single_async(int cpu, call_single_data_t *csd)
>  {
>         unsigned long flags;
>
> --
> 2.42.0
>


-- 
Best Regards
 Guo Ren

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [RFC PATCH 1/1] smp: Change function signatures to use call_single_data_t
  2023-08-31  6:58 ` Guo Ren
@ 2023-09-01  6:17   ` Leonardo Bras Soares Passos
  0 siblings, 0 replies; 7+ messages in thread
From: Leonardo Bras Soares Passos @ 2023-09-01  6:17 UTC (permalink / raw)
  To: Guo Ren
  Cc: Steven Rostedt, Masami Hiramatsu, Peter Zijlstra, Josh Poimboeuf,
	Valentin Schneider, Paul E. McKenney, Juergen Gross, Yury Norov,
	Imran Khan, linux-kernel, linux-trace-kernel

On Thu, Aug 31, 2023 at 3:58 AM Guo Ren <guoren@kernel.org> wrote:
>
> On Thu, Aug 31, 2023 at 2:31 PM Leonardo Bras <leobras@redhat.com> wrote:
> >
> > call_single_data_t is a size-aligned typedef of struct __call_single_data.
> >
> > This alignment is desirable in order to have smp_call_function*() avoid
> > bouncing an extra cacheline in case of an unaligned csd, given this
> > would hurt performance.
> >
> > Since the removal of struct request->csd in commit 660e802c76c8
> > ("blk-mq: use percpu csd to remote complete instead of per-rq csd") there
> > are no current users of smp_call_function*() with unaligned csd.
> >
> > Change every 'struct __call_single_data' function parameter to
> > 'call_single_data_t', so we have warnings if any new code tries to
> > introduce an smp_call_function*() call with unaligned csd.
> I agree to prevent __call_single_data usage.
>
> Reviewed-by: Guo Ren <guoren@kernel.org>

Thanks for reviewing!


>
> /*
>  * structure shares (partial) layout with struct irq_work
>  */
> struct __call_single_data {
>         struct __call_single_node node;
>         smp_call_func_t func;
>         void *info;
> };
>
> #define CSD_INIT(_func, _info) \
>         (struct __call_single_data){ .func = (_func), .info = (_info), }
>
> /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
> typedef struct __call_single_data call_single_data_t
>         __aligned(sizeof(struct __call_single_data));
>
> >
> > Signed-off-by: Leonardo Bras <leobras@redhat.com>
> > ---
> >  include/linux/smp.h        |  2 +-
> >  include/trace/events/csd.h |  8 ++++----
> >  kernel/smp.c               | 26 +++++++++++++-------------
> >  kernel/up.c                |  2 +-
> >  4 files changed, 19 insertions(+), 19 deletions(-)
> >
> > diff --git a/include/linux/smp.h b/include/linux/smp.h
> > index 91ea4a67f8ca..e87520dc2959 100644
> > --- a/include/linux/smp.h
> > +++ b/include/linux/smp.h
> > @@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
> >  void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
> >                            void *info, bool wait, const struct cpumask *mask);
> >
> > -int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
> > +int smp_call_function_single_async(int cpu, call_single_data_t *csd);
> >
> >  /*
> >   * Cpus stopping functions in panic. All have default weak definitions.
> > diff --git a/include/trace/events/csd.h b/include/trace/events/csd.h
> > index 67e9d01f80c2..58cc83b99c34 100644
> > --- a/include/trace/events/csd.h
> > +++ b/include/trace/events/csd.h
> > @@ -12,7 +12,7 @@ TRACE_EVENT(csd_queue_cpu,
> >         TP_PROTO(const unsigned int cpu,
> >                 unsigned long callsite,
> >                 smp_call_func_t func,
> > -               struct __call_single_data *csd),
> > +               call_single_data_t *csd),
> >
> >         TP_ARGS(cpu, callsite, func, csd),
> >
> > @@ -39,7 +39,7 @@ TRACE_EVENT(csd_queue_cpu,
> >   */
> >  DECLARE_EVENT_CLASS(csd_function,
> >
> > -       TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
> > +       TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
> >
> >         TP_ARGS(func, csd),
> >
> > @@ -57,12 +57,12 @@ DECLARE_EVENT_CLASS(csd_function,
> >  );
> >
> >  DEFINE_EVENT(csd_function, csd_function_entry,
> > -       TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
> > +       TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
> >         TP_ARGS(func, csd)
> >  );
> >
> >  DEFINE_EVENT(csd_function, csd_function_exit,
> > -       TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
> > +       TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
> >         TP_ARGS(func, csd)
> >  );
> >
> > diff --git a/kernel/smp.c b/kernel/smp.c
> > index 8455a53465af..8c714583786b 100644
> > --- a/kernel/smp.c
> > +++ b/kernel/smp.c
> > @@ -127,7 +127,7 @@ send_call_function_ipi_mask(struct cpumask *mask)
> >  }
> >
> >  static __always_inline void
> > -csd_do_func(smp_call_func_t func, void *info, struct __call_single_data *csd)
> > +csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
> >  {
> >         trace_csd_function_entry(func, csd);
> >         func(info);
> > @@ -174,7 +174,7 @@ module_param(csd_lock_timeout, ulong, 0444);
> >  static atomic_t csd_bug_count = ATOMIC_INIT(0);
> >
> >  /* Record current CSD work for current CPU, NULL to erase. */
> > -static void __csd_lock_record(struct __call_single_data *csd)
> > +static void __csd_lock_record(call_single_data_t *csd)
> >  {
> >         if (!csd) {
> >                 smp_mb(); /* NULL cur_csd after unlock. */
> > @@ -189,13 +189,13 @@ static void __csd_lock_record(struct __call_single_data *csd)
> >                   /* Or before unlock, as the case may be. */
> >  }
> >
> > -static __always_inline void csd_lock_record(struct __call_single_data *csd)
> > +static __always_inline void csd_lock_record(call_single_data_t *csd)
> >  {
> >         if (static_branch_unlikely(&csdlock_debug_enabled))
> >                 __csd_lock_record(csd);
> >  }
> >
> > -static int csd_lock_wait_getcpu(struct __call_single_data *csd)
> > +static int csd_lock_wait_getcpu(call_single_data_t *csd)
> >  {
> >         unsigned int csd_type;
> >
> > @@ -210,7 +210,7 @@ static int csd_lock_wait_getcpu(struct __call_single_data *csd)
> >   * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
> >   * so waiting on other types gets much less information.
> >   */
> > -static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
> > +static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
> >  {
> >         int cpu = -1;
> >         int cpux;
> > @@ -276,7 +276,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
> >   * previous function call. For multi-cpu calls its even more interesting
> >   * as we'll have to ensure no other cpu is observing our csd.
> >   */
> > -static void __csd_lock_wait(struct __call_single_data *csd)
> > +static void __csd_lock_wait(call_single_data_t *csd)
> >  {
> >         int bug_id = 0;
> >         u64 ts0, ts1;
> > @@ -290,7 +290,7 @@ static void __csd_lock_wait(struct __call_single_data *csd)
> >         smp_acquire__after_ctrl_dep();
> >  }
> >
> > -static __always_inline void csd_lock_wait(struct __call_single_data *csd)
> > +static __always_inline void csd_lock_wait(call_single_data_t *csd)
> >  {
> >         if (static_branch_unlikely(&csdlock_debug_enabled)) {
> >                 __csd_lock_wait(csd);
> > @@ -300,17 +300,17 @@ static __always_inline void csd_lock_wait(struct __call_single_data *csd)
> >         smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
> >  }
> >  #else
> > -static void csd_lock_record(struct __call_single_data *csd)
> > +static void csd_lock_record(call_single_data_t *csd)
> >  {
> >  }
> >
> > -static __always_inline void csd_lock_wait(struct __call_single_data *csd)
> > +static __always_inline void csd_lock_wait(call_single_data_t *csd)
> >  {
> >         smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
> >  }
> >  #endif
> >
> > -static __always_inline void csd_lock(struct __call_single_data *csd)
> > +static __always_inline void csd_lock(call_single_data_t *csd)
> >  {
> >         csd_lock_wait(csd);
> >         csd->node.u_flags |= CSD_FLAG_LOCK;
> > @@ -323,7 +323,7 @@ static __always_inline void csd_lock(struct __call_single_data *csd)
> >         smp_wmb();
> >  }
> >
> > -static __always_inline void csd_unlock(struct __call_single_data *csd)
> > +static __always_inline void csd_unlock(call_single_data_t *csd)
> >  {
> >         WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
> >
> > @@ -376,7 +376,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
> >   * for execution on the given CPU. data must already have
> >   * ->func, ->info, and ->flags set.
> >   */
> > -static int generic_exec_single(int cpu, struct __call_single_data *csd)
> > +static int generic_exec_single(int cpu, call_single_data_t *csd)
> >  {
> >         if (cpu == smp_processor_id()) {
> >                 smp_call_func_t func = csd->func;
> > @@ -667,7 +667,7 @@ EXPORT_SYMBOL(smp_call_function_single);
> >   *
> >   * Return: %0 on success or negative errno value on error
> >   */
> > -int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
> > +int smp_call_function_single_async(int cpu, call_single_data_t *csd)
> >  {
> >         int err = 0;
> >
> > diff --git a/kernel/up.c b/kernel/up.c
> > index a38b8b095251..df50828cc2f0 100644
> > --- a/kernel/up.c
> > +++ b/kernel/up.c
> > @@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
> >  }
> >  EXPORT_SYMBOL(smp_call_function_single);
> >
> > -int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
> > +int smp_call_function_single_async(int cpu, call_single_data_t *csd)
> >  {
> >         unsigned long flags;
> >
> > --
> > 2.42.0
> >
>
>
> --
> Best Regards
>  Guo Ren
>


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [RFC PATCH 1/1] smp: Change function signatures to use call_single_data_t
  2023-08-31  6:31 [RFC PATCH 1/1] smp: Change function signatures to use call_single_data_t Leonardo Bras
  2023-08-31  6:58 ` Guo Ren
@ 2023-09-06 14:09 ` Peter Zijlstra
  2023-09-06 16:15   ` Leonardo Bras Soares Passos
  2023-09-12 10:29 ` [tip: smp/core] " tip-bot2 for Leonardo Bras
  2023-09-13 13:03 ` tip-bot2 for Leonardo Bras
  3 siblings, 1 reply; 7+ messages in thread
From: Peter Zijlstra @ 2023-09-06 14:09 UTC (permalink / raw)
  To: Leonardo Bras
  Cc: Steven Rostedt, Masami Hiramatsu, Josh Poimboeuf, Guo Ren,
	Valentin Schneider, Paul E. McKenney, Juergen Gross, Yury Norov,
	Imran Khan, linux-kernel, linux-trace-kernel

On Thu, Aug 31, 2023 at 03:31:28AM -0300, Leonardo Bras wrote:
> call_single_data_t is a size-aligned typedef of struct __call_single_data.
> 
> This alignment is desirable in order to have smp_call_function*() avoid
> bouncing an extra cacheline in case of an unaligned csd, given this
> would hurt performance.
> 
> Since the removal of struct request->csd in commit 660e802c76c8
> ("blk-mq: use percpu csd to remote complete instead of per-rq csd") there
> are no current users of smp_call_function*() with unaligned csd.
> 
> Change every 'struct __call_single_data' function parameter to
> 'call_single_data_t', so we have warnings if any new code tries to
> introduce an smp_call_function*() call with unaligned csd.
> 
> Signed-off-by: Leonardo Bras <leobras@redhat.com>

Fair enough, I'll go queue it somewhere.

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [RFC PATCH 1/1] smp: Change function signatures to use call_single_data_t
  2023-09-06 14:09 ` Peter Zijlstra
@ 2023-09-06 16:15   ` Leonardo Bras Soares Passos
  0 siblings, 0 replies; 7+ messages in thread
From: Leonardo Bras Soares Passos @ 2023-09-06 16:15 UTC (permalink / raw)
  To: Peter Zijlstra
  Cc: Steven Rostedt, Masami Hiramatsu, Josh Poimboeuf, Guo Ren,
	Valentin Schneider, Paul E. McKenney, Juergen Gross, Yury Norov,
	Imran Khan, linux-kernel, linux-trace-kernel

On Wed, Sep 6, 2023 at 11:10 AM Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Thu, Aug 31, 2023 at 03:31:28AM -0300, Leonardo Bras wrote:
> > call_single_data_t is a size-aligned typedef of struct __call_single_data.
> >
> > This alignment is desirable in order to have smp_call_function*() avoid
> > bouncing an extra cacheline in case of an unaligned csd, given this
> > would hurt performance.
> >
> > Since the removal of struct request->csd in commit 660e802c76c8
> > ("blk-mq: use percpu csd to remote complete instead of per-rq csd") there
> > are no current users of smp_call_function*() with unaligned csd.
> >
> > Change every 'struct __call_single_data' function parameter to
> > 'call_single_data_t', so we have warnings if any new code tries to
> > introduce an smp_call_function*() call with unaligned csd.
> >
> > Signed-off-by: Leonardo Bras <leobras@redhat.com>
>
> Fair enough, I'll go queue it somewhere.
>

Thanks Peter!


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [tip: smp/core] smp: Change function signatures to use call_single_data_t
  2023-08-31  6:31 [RFC PATCH 1/1] smp: Change function signatures to use call_single_data_t Leonardo Bras
  2023-08-31  6:58 ` Guo Ren
  2023-09-06 14:09 ` Peter Zijlstra
@ 2023-09-12 10:29 ` tip-bot2 for Leonardo Bras
  2023-09-13 13:03 ` tip-bot2 for Leonardo Bras
  3 siblings, 0 replies; 7+ messages in thread
From: tip-bot2 for Leonardo Bras @ 2023-09-12 10:29 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Leonardo Bras, Guo Ren, Peter Zijlstra (Intel), x86, linux-kernel

The following commit has been merged into the smp/core branch of tip:

Commit-ID:     c3ba53a7f6ddcdd08562f609237c587e2ae13bbb
Gitweb:        https://git.kernel.org/tip/c3ba53a7f6ddcdd08562f609237c587e2ae13bbb
Author:        Leonardo Bras <leobras@redhat.com>
AuthorDate:    Thu, 31 Aug 2023 03:31:28 -03:00
Committer:     root <root@noisy.programming.kicks-ass.net>
CommitterDate: Sat, 09 Sep 2023 15:10:30 +02:00

smp: Change function signatures to use call_single_data_t

call_single_data_t is a size-aligned typedef of struct __call_single_data.

This alignment is desirable in order to have smp_call_function*() avoid
bouncing an extra cacheline in case of an unaligned csd, given this
would hurt performance.

Since the removal of struct request->csd in commit 660e802c76c8
("blk-mq: use percpu csd to remote complete instead of per-rq csd") there
are no current users of smp_call_function*() with unaligned csd.

Change every 'struct __call_single_data' function parameter to
'call_single_data_t', so we have warnings if any new code tries to
introduce an smp_call_function*() call with unaligned csd.

Signed-off-by: Leonardo Bras <leobras@redhat.com>
Reviewed-by: Guo Ren <guoren@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230831063129.335425-1-leobras@redhat.com
---
 include/linux/smp.h        |  2 +-
 include/trace/events/csd.h |  8 ++++----
 kernel/smp.c               | 26 +++++++++++++-------------
 kernel/up.c                |  2 +-
 4 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/include/linux/smp.h b/include/linux/smp.h
index 91ea4a6..e87520d 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 			   void *info, bool wait, const struct cpumask *mask);
 
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
 
 /*
  * Cpus stopping functions in panic. All have default weak definitions.
diff --git a/include/trace/events/csd.h b/include/trace/events/csd.h
index 67e9d01..58cc83b 100644
--- a/include/trace/events/csd.h
+++ b/include/trace/events/csd.h
@@ -12,7 +12,7 @@ TRACE_EVENT(csd_queue_cpu,
 	TP_PROTO(const unsigned int cpu,
 		unsigned long callsite,
 		smp_call_func_t func,
-		struct __call_single_data *csd),
+		call_single_data_t *csd),
 
 	TP_ARGS(cpu, callsite, func, csd),
 
@@ -39,7 +39,7 @@ TRACE_EVENT(csd_queue_cpu,
  */
 DECLARE_EVENT_CLASS(csd_function,
 
-	TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
 
 	TP_ARGS(func, csd),
 
@@ -57,12 +57,12 @@ DECLARE_EVENT_CLASS(csd_function,
 );
 
 DEFINE_EVENT(csd_function, csd_function_entry,
-	TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
 	TP_ARGS(func, csd)
 );
 
 DEFINE_EVENT(csd_function, csd_function_exit,
-	TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
 	TP_ARGS(func, csd)
 );
 
diff --git a/kernel/smp.c b/kernel/smp.c
index 385179d..822fabb 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -125,7 +125,7 @@ send_call_function_ipi_mask(struct cpumask *mask)
 }
 
 static __always_inline void
-csd_do_func(smp_call_func_t func, void *info, struct __call_single_data *csd)
+csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
 {
 	trace_csd_function_entry(func, csd);
 	func(info);
@@ -172,7 +172,7 @@ module_param(csd_lock_timeout, ulong, 0444);
 static atomic_t csd_bug_count = ATOMIC_INIT(0);
 
 /* Record current CSD work for current CPU, NULL to erase. */
-static void __csd_lock_record(struct __call_single_data *csd)
+static void __csd_lock_record(call_single_data_t *csd)
 {
 	if (!csd) {
 		smp_mb(); /* NULL cur_csd after unlock. */
@@ -187,13 +187,13 @@ static void __csd_lock_record(struct __call_single_data *csd)
 		  /* Or before unlock, as the case may be. */
 }
 
-static __always_inline void csd_lock_record(struct __call_single_data *csd)
+static __always_inline void csd_lock_record(call_single_data_t *csd)
 {
 	if (static_branch_unlikely(&csdlock_debug_enabled))
 		__csd_lock_record(csd);
 }
 
-static int csd_lock_wait_getcpu(struct __call_single_data *csd)
+static int csd_lock_wait_getcpu(call_single_data_t *csd)
 {
 	unsigned int csd_type;
 
@@ -208,7 +208,7 @@ static int csd_lock_wait_getcpu(struct __call_single_data *csd)
  * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
  * so waiting on other types gets much less information.
  */
-static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
+static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
 {
 	int cpu = -1;
 	int cpux;
@@ -272,7 +272,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
  * previous function call. For multi-cpu calls its even more interesting
  * as we'll have to ensure no other cpu is observing our csd.
  */
-static void __csd_lock_wait(struct __call_single_data *csd)
+static void __csd_lock_wait(call_single_data_t *csd)
 {
 	int bug_id = 0;
 	u64 ts0, ts1;
@@ -286,7 +286,7 @@ static void __csd_lock_wait(struct __call_single_data *csd)
 	smp_acquire__after_ctrl_dep();
 }
 
-static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+static __always_inline void csd_lock_wait(call_single_data_t *csd)
 {
 	if (static_branch_unlikely(&csdlock_debug_enabled)) {
 		__csd_lock_wait(csd);
@@ -296,17 +296,17 @@ static __always_inline void csd_lock_wait(struct __call_single_data *csd)
 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
 }
 #else
-static void csd_lock_record(struct __call_single_data *csd)
+static void csd_lock_record(call_single_data_t *csd)
 {
 }
 
-static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+static __always_inline void csd_lock_wait(call_single_data_t *csd)
 {
 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
 }
 #endif
 
-static __always_inline void csd_lock(struct __call_single_data *csd)
+static __always_inline void csd_lock(call_single_data_t *csd)
 {
 	csd_lock_wait(csd);
 	csd->node.u_flags |= CSD_FLAG_LOCK;
@@ -319,7 +319,7 @@ static __always_inline void csd_lock(struct __call_single_data *csd)
 	smp_wmb();
 }
 
-static __always_inline void csd_unlock(struct __call_single_data *csd)
+static __always_inline void csd_unlock(call_single_data_t *csd)
 {
 	WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
 
@@ -372,7 +372,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
  * for execution on the given CPU. data must already have
  * ->func, ->info, and ->flags set.
  */
-static int generic_exec_single(int cpu, struct __call_single_data *csd)
+static int generic_exec_single(int cpu, call_single_data_t *csd)
 {
 	if (cpu == smp_processor_id()) {
 		smp_call_func_t func = csd->func;
@@ -658,7 +658,7 @@ EXPORT_SYMBOL(smp_call_function_single);
  *
  * Return: %0 on success or negative errno value on error
  */
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 {
 	int err = 0;
 
diff --git a/kernel/up.c b/kernel/up.c
index a38b8b0..df50828 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 }
 EXPORT_SYMBOL(smp_call_function_single);
 
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 {
 	unsigned long flags;
 

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [tip: smp/core] smp: Change function signatures to use call_single_data_t
  2023-08-31  6:31 [RFC PATCH 1/1] smp: Change function signatures to use call_single_data_t Leonardo Bras
                   ` (2 preceding siblings ...)
  2023-09-12 10:29 ` [tip: smp/core] " tip-bot2 for Leonardo Bras
@ 2023-09-13 13:03 ` tip-bot2 for Leonardo Bras
  3 siblings, 0 replies; 7+ messages in thread
From: tip-bot2 for Leonardo Bras @ 2023-09-13 13:03 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: Leonardo Bras, Guo Ren, Peter Zijlstra (Intel),
	Ingo Molnar, x86, linux-kernel

The following commit has been merged into the smp/core branch of tip:

Commit-ID:     d090ec0df81e56556af3a2bf04a7e89347ae5784
Gitweb:        https://git.kernel.org/tip/d090ec0df81e56556af3a2bf04a7e89347ae5784
Author:        Leonardo Bras <leobras@redhat.com>
AuthorDate:    Thu, 31 Aug 2023 03:31:28 -03:00
Committer:     Ingo Molnar <mingo@kernel.org>
CommitterDate: Wed, 13 Sep 2023 14:59:24 +02:00

smp: Change function signatures to use call_single_data_t

call_single_data_t is a size-aligned typedef of struct __call_single_data.

This alignment is desirable in order to have smp_call_function*() avoid
bouncing an extra cacheline in case of an unaligned csd, given this
would hurt performance.

Since the removal of struct request->csd in commit 660e802c76c8
("blk-mq: use percpu csd to remote complete instead of per-rq csd") there
are no current users of smp_call_function*() with unaligned csd.

Change every 'struct __call_single_data' function parameter to
'call_single_data_t', so we have warnings if any new code tries to
introduce an smp_call_function*() call with unaligned csd.

Signed-off-by: Leonardo Bras <leobras@redhat.com>
Reviewed-by: Guo Ren <guoren@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230831063129.335425-1-leobras@redhat.com
---
 include/linux/smp.h        |  2 +-
 include/trace/events/csd.h |  8 ++++----
 kernel/smp.c               | 26 +++++++++++++-------------
 kernel/up.c                |  2 +-
 4 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/include/linux/smp.h b/include/linux/smp.h
index 91ea4a6..e87520d 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
 			   void *info, bool wait, const struct cpumask *mask);
 
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
 
 /*
  * Cpus stopping functions in panic. All have default weak definitions.
diff --git a/include/trace/events/csd.h b/include/trace/events/csd.h
index 67e9d01..58cc83b 100644
--- a/include/trace/events/csd.h
+++ b/include/trace/events/csd.h
@@ -12,7 +12,7 @@ TRACE_EVENT(csd_queue_cpu,
 	TP_PROTO(const unsigned int cpu,
 		unsigned long callsite,
 		smp_call_func_t func,
-		struct __call_single_data *csd),
+		call_single_data_t *csd),
 
 	TP_ARGS(cpu, callsite, func, csd),
 
@@ -39,7 +39,7 @@ TRACE_EVENT(csd_queue_cpu,
  */
 DECLARE_EVENT_CLASS(csd_function,
 
-	TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
 
 	TP_ARGS(func, csd),
 
@@ -57,12 +57,12 @@ DECLARE_EVENT_CLASS(csd_function,
 );
 
 DEFINE_EVENT(csd_function, csd_function_entry,
-	TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
 	TP_ARGS(func, csd)
 );
 
 DEFINE_EVENT(csd_function, csd_function_exit,
-	TP_PROTO(smp_call_func_t func, struct __call_single_data *csd),
+	TP_PROTO(smp_call_func_t func, call_single_data_t *csd),
 	TP_ARGS(func, csd)
 );
 
diff --git a/kernel/smp.c b/kernel/smp.c
index 385179d..822fabb 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -125,7 +125,7 @@ send_call_function_ipi_mask(struct cpumask *mask)
 }
 
 static __always_inline void
-csd_do_func(smp_call_func_t func, void *info, struct __call_single_data *csd)
+csd_do_func(smp_call_func_t func, void *info, call_single_data_t *csd)
 {
 	trace_csd_function_entry(func, csd);
 	func(info);
@@ -172,7 +172,7 @@ module_param(csd_lock_timeout, ulong, 0444);
 static atomic_t csd_bug_count = ATOMIC_INIT(0);
 
 /* Record current CSD work for current CPU, NULL to erase. */
-static void __csd_lock_record(struct __call_single_data *csd)
+static void __csd_lock_record(call_single_data_t *csd)
 {
 	if (!csd) {
 		smp_mb(); /* NULL cur_csd after unlock. */
@@ -187,13 +187,13 @@ static void __csd_lock_record(struct __call_single_data *csd)
 		  /* Or before unlock, as the case may be. */
 }
 
-static __always_inline void csd_lock_record(struct __call_single_data *csd)
+static __always_inline void csd_lock_record(call_single_data_t *csd)
 {
 	if (static_branch_unlikely(&csdlock_debug_enabled))
 		__csd_lock_record(csd);
 }
 
-static int csd_lock_wait_getcpu(struct __call_single_data *csd)
+static int csd_lock_wait_getcpu(call_single_data_t *csd)
 {
 	unsigned int csd_type;
 
@@ -208,7 +208,7 @@ static int csd_lock_wait_getcpu(struct __call_single_data *csd)
  * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
  * so waiting on other types gets much less information.
  */
-static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
+static bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
 {
 	int cpu = -1;
 	int cpux;
@@ -272,7 +272,7 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
  * previous function call. For multi-cpu calls its even more interesting
  * as we'll have to ensure no other cpu is observing our csd.
  */
-static void __csd_lock_wait(struct __call_single_data *csd)
+static void __csd_lock_wait(call_single_data_t *csd)
 {
 	int bug_id = 0;
 	u64 ts0, ts1;
@@ -286,7 +286,7 @@ static void __csd_lock_wait(struct __call_single_data *csd)
 	smp_acquire__after_ctrl_dep();
 }
 
-static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+static __always_inline void csd_lock_wait(call_single_data_t *csd)
 {
 	if (static_branch_unlikely(&csdlock_debug_enabled)) {
 		__csd_lock_wait(csd);
@@ -296,17 +296,17 @@ static __always_inline void csd_lock_wait(struct __call_single_data *csd)
 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
 }
 #else
-static void csd_lock_record(struct __call_single_data *csd)
+static void csd_lock_record(call_single_data_t *csd)
 {
 }
 
-static __always_inline void csd_lock_wait(struct __call_single_data *csd)
+static __always_inline void csd_lock_wait(call_single_data_t *csd)
 {
 	smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
 }
 #endif
 
-static __always_inline void csd_lock(struct __call_single_data *csd)
+static __always_inline void csd_lock(call_single_data_t *csd)
 {
 	csd_lock_wait(csd);
 	csd->node.u_flags |= CSD_FLAG_LOCK;
@@ -319,7 +319,7 @@ static __always_inline void csd_lock(struct __call_single_data *csd)
 	smp_wmb();
 }
 
-static __always_inline void csd_unlock(struct __call_single_data *csd)
+static __always_inline void csd_unlock(call_single_data_t *csd)
 {
 	WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
 
@@ -372,7 +372,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
  * for execution on the given CPU. data must already have
  * ->func, ->info, and ->flags set.
  */
-static int generic_exec_single(int cpu, struct __call_single_data *csd)
+static int generic_exec_single(int cpu, call_single_data_t *csd)
 {
 	if (cpu == smp_processor_id()) {
 		smp_call_func_t func = csd->func;
@@ -658,7 +658,7 @@ EXPORT_SYMBOL(smp_call_function_single);
  *
  * Return: %0 on success or negative errno value on error
  */
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 {
 	int err = 0;
 
diff --git a/kernel/up.c b/kernel/up.c
index a38b8b0..df50828 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
 }
 EXPORT_SYMBOL(smp_call_function_single);
 
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
+int smp_call_function_single_async(int cpu, call_single_data_t *csd)
 {
 	unsigned long flags;
 

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2023-09-13 13:03 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-31  6:31 [RFC PATCH 1/1] smp: Change function signatures to use call_single_data_t Leonardo Bras
2023-08-31  6:58 ` Guo Ren
2023-09-01  6:17   ` Leonardo Bras Soares Passos
2023-09-06 14:09 ` Peter Zijlstra
2023-09-06 16:15   ` Leonardo Bras Soares Passos
2023-09-12 10:29 ` [tip: smp/core] " tip-bot2 for Leonardo Bras
2023-09-13 13:03 ` tip-bot2 for Leonardo Bras

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.