linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Anna-Maria Behnsen <anna-maria@linutronix.de>
To: linux-kernel@vger.kernel.org
Cc: Peter Zijlstra <peterz@infradead.org>,
	John Stultz <jstultz@google.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Eric Dumazet <edumazet@google.com>,
	"Rafael J . Wysocki" <rafael.j.wysocki@intel.com>,
	Arjan van de Ven <arjan@infradead.org>,
	"Paul E . McKenney" <paulmck@kernel.org>,
	Frederic Weisbecker <fweisbec@gmail.com>,
	Rik van Riel <riel@surriel.com>,
	Anna-Maria Behnsen <anna-maria@linutronix.de>
Subject: [PATCH v5 17/18] timer_migration: Add tracepoints
Date: Wed,  1 Mar 2023 15:17:43 +0100	[thread overview]
Message-ID: <20230301141744.16063-18-anna-maria@linutronix.de> (raw)
In-Reply-To: <20230301141744.16063-1-anna-maria@linutronix.de>

The timer pull logic needs proper debugging aids. Add tracepoints so the
hierarchical idle machinery can be diagnosed.

Signed-off-by: Anna-Maria Behnsen <anna-maria@linutronix.de>
---
 include/trace/events/timer_migration.h | 277 +++++++++++++++++++++++++
 kernel/time/timer_migration.c          |  24 +++
 2 files changed, 301 insertions(+)
 create mode 100644 include/trace/events/timer_migration.h

diff --git a/include/trace/events/timer_migration.h b/include/trace/events/timer_migration.h
new file mode 100644
index 000000000000..0c4824056930
--- /dev/null
+++ b/include/trace/events/timer_migration.h
@@ -0,0 +1,277 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM timer_migration
+
+#if !defined(_TRACE_TIMER_MIGRATION_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_TIMER_MIGRATION_H
+
+#include <linux/tracepoint.h>
+
+/* Group events */
+TRACE_EVENT(tmigr_group_set,
+
+	TP_PROTO(struct tmigr_group *group),
+
+	TP_ARGS(group),
+
+	TP_STRUCT__entry(
+		__field( void *,	group		)
+		__field( unsigned int,	lvl		)
+		__field( unsigned int,	numa_node	)
+	),
+
+	TP_fast_assign(
+		__entry->group		= group;
+		__entry->lvl		= group->level;
+		__entry->numa_node	= group->numa_node;
+	),
+
+	TP_printk("group=%p lvl=%d numa=%d",
+		  __entry->group, __entry->lvl, __entry->numa_node)
+);
+
+TRACE_EVENT(tmigr_connect_child_parent,
+
+	TP_PROTO(struct tmigr_group *child),
+
+	TP_ARGS(child),
+
+	TP_STRUCT__entry(
+		__field( void *,	child		)
+		__field( void *,	parent		)
+		__field( unsigned int,	lvl		)
+		__field( unsigned int,	numa_node	)
+		__field( unsigned int,	num_childs	)
+		__field( u32,		childmask	)
+	),
+
+	TP_fast_assign(
+		__entry->child		= child;
+		__entry->parent		= child->parent;
+		__entry->lvl		= child->parent->level;
+		__entry->numa_node	= child->parent->numa_node;
+		__entry->numa_node	= child->parent->num_childs;
+		__entry->childmask	= child->childmask;
+	),
+
+	TP_printk("group=%p childmask=%0x parent=%p lvl=%d numa=%d num_childs=%d",
+		  __entry->child,  __entry->childmask, __entry->parent,
+		  __entry->lvl, __entry->numa_node, __entry->num_childs)
+);
+
+TRACE_EVENT(tmigr_connect_cpu_parent,
+
+	TP_PROTO(struct tmigr_cpu *tmc),
+
+	TP_ARGS(tmc),
+
+	TP_STRUCT__entry(
+		__field( void *,	parent		)
+		__field( unsigned int,	cpu		)
+		__field( unsigned int,	lvl		)
+		__field( unsigned int,	numa_node	)
+		__field( unsigned int,	num_childs	)
+		__field( u32,		childmask	)
+	),
+
+	TP_fast_assign(
+		__entry->parent		= tmc->tmgroup;
+		__entry->cpu		= tmc->cpuevt.cpu;
+		__entry->lvl		= tmc->tmgroup->level;
+		__entry->numa_node	= tmc->tmgroup->numa_node;
+		__entry->numa_node	= tmc->tmgroup->num_childs;
+		__entry->childmask	= tmc->childmask;
+	),
+
+	TP_printk("cpu=%d childmask=%0x parent=%p lvl=%d numa=%d num_childs=%d",
+		  __entry->cpu,	 __entry->childmask, __entry->parent,
+		  __entry->lvl, __entry->numa_node, __entry->num_childs)
+);
+
+DECLARE_EVENT_CLASS(tmigr_group_and_cpu,
+
+	TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask),
+
+	TP_ARGS(group, state, childmask),
+
+	TP_STRUCT__entry(
+		__field( void *,	group		)
+		__field( void *,	parent		)
+		__field( unsigned int,	lvl		)
+		__field( unsigned int,	numa_node	)
+		__field( u8,		active		)
+		__field( u8,		migrator	)
+		__field( u32,		childmask	)
+	),
+
+	TP_fast_assign(
+		__entry->group		= group;
+		__entry->parent		= group->parent;
+		__entry->lvl		= group->level;
+		__entry->numa_node	= group->numa_node;
+		__entry->active		= state.active;
+		__entry->migrator	= state.migrator;
+		__entry->childmask	= childmask;
+	),
+
+	TP_printk("group=%p lvl=%d numa=%d active=%0x migrator=%0x "
+		  "parent=%p childmask=%0x",
+		  __entry->group, __entry->lvl, __entry->numa_node,
+		  __entry->active, __entry->migrator,
+		  __entry->parent, __entry->childmask)
+);
+
+DEFINE_EVENT(tmigr_group_and_cpu, tmigr_group_set_cpu_inactive,
+
+	TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask),
+
+	TP_ARGS(group, state, childmask)
+);
+
+DEFINE_EVENT(tmigr_group_and_cpu, tmigr_group_set_cpu_active,
+
+	TP_PROTO(struct tmigr_group *group, union tmigr_state state, u32 childmask),
+
+	TP_ARGS(group, state, childmask)
+);
+
+/* CPU events*/
+DECLARE_EVENT_CLASS(tmigr_cpugroup,
+
+	TP_PROTO(struct tmigr_cpu *tmc),
+
+	TP_ARGS(tmc),
+
+	TP_STRUCT__entry(
+		__field( void *,	parent)
+		__field( unsigned int,	cpu)
+	),
+
+	TP_fast_assign(
+		__entry->cpu		= tmc->cpuevt.cpu;
+		__entry->parent		= tmc->tmgroup;
+	),
+
+	TP_printk("cpu=%d parent=%p", __entry->cpu, __entry->parent)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_new_timer,
+
+	TP_PROTO(struct tmigr_cpu *tmc),
+
+	TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_active,
+
+	TP_PROTO(struct tmigr_cpu *tmc),
+
+	TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_online,
+
+	TP_PROTO(struct tmigr_cpu *tmc),
+
+	TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_cpu_offline,
+
+	TP_PROTO(struct tmigr_cpu *tmc),
+
+	TP_ARGS(tmc)
+);
+
+DEFINE_EVENT(tmigr_cpugroup, tmigr_handle_remote_cpu,
+
+	TP_PROTO(struct tmigr_cpu *tmc),
+
+	TP_ARGS(tmc)
+);
+
+TRACE_EVENT(tmigr_cpu_idle,
+
+	TP_PROTO(struct tmigr_cpu *tmc, u64 nextevt),
+
+	TP_ARGS(tmc, nextevt),
+
+	TP_STRUCT__entry(
+		__field( void *,	parent)
+		__field( unsigned int,	cpu)
+		__field( u64,		nextevt)
+	),
+
+	TP_fast_assign(
+		__entry->cpu		= tmc->cpuevt.cpu;
+		__entry->parent		= tmc->tmgroup;
+		__entry->nextevt	= nextevt;
+	),
+
+	TP_printk("cpu=%d parent=%p nextevt=%llu",
+		  __entry->cpu, __entry->parent, __entry->nextevt)
+);
+
+TRACE_EVENT(tmigr_update_events,
+
+	TP_PROTO(struct tmigr_group *child, struct tmigr_group *group,
+		 union tmigr_state childstate,	union tmigr_state groupstate,
+		 u64 nextevt),
+
+	TP_ARGS(child, group, childstate, groupstate, nextevt),
+
+	TP_STRUCT__entry(
+		__field( void *,	child			)
+		__field( void *,	group			)
+		__field( u64,		nextevt			)
+		__field( u64,		group_next_expiry	)
+		__field( unsigned int,	group_lvl		)
+		__field( u8,		child_active		)
+		__field( u8,		group_active		)
+		__field( unsigned int,	child_evtcpu		)
+		__field( u64,		child_evt_expiry	)
+	),
+
+	TP_fast_assign(
+		__entry->child			= child;
+		__entry->group			= group;
+		__entry->nextevt		= nextevt;
+		__entry->group_next_expiry	= group->next_expiry;
+		__entry->group_lvl		= group->level;
+		__entry->child_active		= childstate.active;
+		__entry->group_active		= groupstate.active;
+		__entry->child_evtcpu		= child ? child->groupevt.cpu : 0;
+		__entry->child_evt_expiry	= child ? child->groupevt.nextevt.expires : 0;
+	),
+
+	TP_printk("child=%p group=%p group_lvl=%d child_active=%0x group_active=%0x "
+		  "nextevt=%llu next_expiry=%llu child_evt_expiry=%llu child_evtcpu=%d",
+		  __entry->child, __entry->group, __entry->group_lvl, __entry->child_active,
+		  __entry->group_active,
+		  __entry->nextevt, __entry->group_next_expiry, __entry->child_evt_expiry,
+		  __entry->child_evtcpu)
+);
+
+TRACE_EVENT(tmigr_handle_remote,
+
+	TP_PROTO(struct tmigr_group *group),
+
+	TP_ARGS(group),
+
+	TP_STRUCT__entry(
+		__field( void * ,	group	)
+		__field( unsigned int ,	lvl	)
+	),
+
+	TP_fast_assign(
+		__entry->group		= group;
+		__entry->lvl		= group->level;
+	),
+
+	TP_printk("group=%p lvl=%d",
+		   __entry->group, __entry->lvl)
+);
+
+#endif /*  _TRACE_TIMER_MIGRATION_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/time/timer_migration.c b/kernel/time/timer_migration.c
index 5a600de3623b..5a371bc252d4 100644
--- a/kernel/time/timer_migration.c
+++ b/kernel/time/timer_migration.c
@@ -13,6 +13,9 @@
 #include "timer_migration.h"
 #include "tick-internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/timer_migration.h>
+
 /*
  * The timer migration mechanism is built on a hierarchy of groups. The
  * lowest level group contains CPUs, the next level groups of CPU groups
@@ -320,6 +323,8 @@ static bool tmigr_active_up(struct tmigr_group *group,
 	 */
 	group->groupevt.ignore = 1;
 
+	trace_tmigr_group_set_cpu_active(group, newstate, childmask);
+
 	return walk_done;
 }
 
@@ -344,6 +349,7 @@ void tmigr_cpu_activate(void)
 	raw_spin_lock(&tmc->lock);
 	tmc->idle = 0;
 	tmc->wakeup = KTIME_MAX;
+	trace_tmigr_cpu_active(tmc);
 	__tmigr_cpu_activate(tmc);
 	raw_spin_unlock(&tmc->lock);
 }
@@ -450,6 +456,9 @@ static bool tmigr_update_events(struct tmigr_group *group,
 		data->nextexp = tmigr_next_groupevt_expires(group);
 	}
 
+	trace_tmigr_update_events(child, group, data->childstate,
+				  data->groupstate, nextexp);
+
 unlock:
 	raw_spin_unlock(&group->lock);
 
@@ -493,6 +502,8 @@ static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp)
 	if (tmc->remote)
 		return KTIME_MAX;
 
+	trace_tmigr_cpu_new_timer(tmc);
+
 	tmc->cpuevt.ignore = 0;
 
 	data.groupstate.state = atomic_read(tmc->tmgroup->migr_state);
@@ -593,6 +604,8 @@ static bool tmigr_inactive_up(struct tmigr_group *group,
 		}
 	}
 
+	trace_tmigr_group_set_cpu_inactive(group, newstate, childmask);
+
 	return walk_done;
 }
 
@@ -669,6 +682,7 @@ u64 tmigr_cpu_deactivate(u64 nextexp)
 	tmc->idle = 1;
 
 unlock:
+	trace_tmigr_cpu_idle(tmc, ret);
 	raw_spin_unlock(&tmc->lock);
 	return ret;
 }
@@ -695,6 +709,8 @@ static u64 tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
 		return next;
 	}
 
+	trace_tmigr_handle_remote_cpu(tmc);
+
 	tmc->remote = 1;
 
 	/* Drop the lock to allow the remote CPU to exit idle */
@@ -746,6 +762,7 @@ static bool tmigr_handle_remote_up(struct tmigr_group *group,
 
 	childmask = data->childmask;
 
+	trace_tmigr_handle_remote(group);
 again:
 	/*
 	 * Handle the group only if @childmask is the migrator or if the
@@ -979,6 +996,7 @@ static struct tmigr_group *tmigr_get_group(unsigned int cpu, unsigned int node,
 	tmigr_init_group(group, lvl, node, migr_state);
 	/* Setup successful. Add it to the hierarchy */
 	list_add(&group->list, &tmigr_level_list[lvl]);
+	trace_tmigr_group_set(group);
 	return group;
 }
 
@@ -997,6 +1015,8 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
 	raw_spin_unlock(&parent->lock);
 	raw_spin_unlock_irqrestore(&child->lock, flags);
 
+	trace_tmigr_connect_child_parent(child);
+
 	/*
 	 * To prevent inconsistent states, active childs needs to be active
 	 * in new parent as well. Inactive childs are already marked
@@ -1083,6 +1103,8 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
 
 			raw_spin_unlock_irqrestore(&group->lock, flags);
 
+			trace_tmigr_connect_cpu_parent(tmc);
+
 			/* There are no childs that needs to be connected */
 			continue;
 		} else {
@@ -1151,6 +1173,7 @@ static int tmigr_cpu_online(unsigned int cpu)
 		tmc->wakeup = KTIME_MAX;
 	}
 	raw_spin_lock_irqsave(&tmc->lock, flags);
+	trace_tmigr_cpu_online(tmc);
 	__tmigr_cpu_activate(tmc);
 	tmc->online = 1;
 	raw_spin_unlock_irqrestore(&tmc->lock, flags);
@@ -1164,6 +1187,7 @@ static int tmigr_cpu_offline(unsigned int cpu)
 	raw_spin_lock_irq(&tmc->lock);
 	tmc->online = 0;
 	__tmigr_cpu_deactivate(tmc, KTIME_MAX);
+	trace_tmigr_cpu_offline(tmc);
 	raw_spin_unlock_irq(&tmc->lock);
 
 	return 0;
-- 
2.30.2


  parent reply	other threads:[~2023-03-01 14:19 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-01 14:17 [PATCH v5 00/18] timer: Move from a push remote at enqueue to a pull at expiry model Anna-Maria Behnsen
2023-03-01 14:17 ` [PATCH v5 01/18] tick-sched: Warn when next tick seems to be in the past Anna-Maria Behnsen
2023-03-01 14:17 ` [PATCH v5 02/18] timer: Add comment to get_next_timer_interrupt() description Anna-Maria Behnsen
2023-04-11  9:36   ` Frederic Weisbecker
2023-04-11 16:10     ` Anna-Maria Behnsen
2023-04-12 11:29       ` Frederic Weisbecker
2023-03-01 14:17 ` [PATCH v5 03/18] timer: Move store of next event into __next_timer_interrupt() Anna-Maria Behnsen
2023-03-21 12:48   ` Peter Zijlstra
2023-04-12 11:32   ` Frederic Weisbecker
2023-03-01 14:17 ` [PATCH v5 04/18] timer: Split next timer interrupt logic Anna-Maria Behnsen
2023-03-01 14:17 ` [PATCH v5 05/18] timer: Rework idle logic Anna-Maria Behnsen
2023-03-01 14:17 ` [PATCH v5 06/18] add_timer_on(): Make sure callers have TIMER_PINNED flag Anna-Maria Behnsen
2023-03-01 14:17 ` [PATCH v5 07/18] timers: Ease code in run_local_timers() Anna-Maria Behnsen
2023-04-12 14:32   ` Frederic Weisbecker
2023-03-01 14:17 ` [PATCH v5 08/18] timers: Create helper function to forward timer base clk Anna-Maria Behnsen
2023-04-12 14:40   ` Frederic Weisbecker
2023-03-01 14:17 ` [PATCH v5 09/18] timer: Keep the pinned timers separate from the others Anna-Maria Behnsen
2023-03-01 14:17 ` [PATCH v5 10/18] timer: Retrieve next expiry of pinned/non-pinned timers seperately Anna-Maria Behnsen
2023-03-01 14:17 ` [PATCH v5 11/18] timer: Split out "get next timer interrupt" functionality Anna-Maria Behnsen
2023-03-09 16:30   ` Frederic Weisbecker
2023-03-09 17:45     ` Frederic Weisbecker
2023-03-21 14:30   ` Peter Zijlstra
2023-04-12 20:34   ` Frederic Weisbecker
2023-03-01 14:17 ` [PATCH v5 12/18] timer: Add get next timer interrupt functionality for remote CPUs Anna-Maria Behnsen
2023-03-01 14:17 ` [PATCH v5 13/18] timer: Restructure internal locking Anna-Maria Behnsen
2023-03-01 14:17 ` [PATCH v5 14/18] timer: Check if timers base is handled already Anna-Maria Behnsen
2023-03-21 14:43   ` Peter Zijlstra
2023-03-01 14:17 ` [PATCH v5 15/18] tick/sched: Split out jiffies update helper function Anna-Maria Behnsen
2023-03-01 14:17 ` [PATCH v5 16/18] timer: Implement the hierarchical pull model Anna-Maria Behnsen
2023-03-14 13:24   ` Frederic Weisbecker
2023-03-14 14:49     ` Anna-Maria Behnsen
2023-03-14 16:01       ` Frederic Weisbecker
2023-03-21 11:17   ` Frederic Weisbecker
2023-04-04 14:05     ` Anna-Maria Behnsen
2023-04-04 14:32       ` Frederic Weisbecker
2023-03-21 13:25   ` Frederic Weisbecker
2023-04-06  9:12     ` Anna-Maria Behnsen
2023-03-21 15:29   ` Peter Zijlstra
2023-03-21 15:34   ` Peter Zijlstra
2023-03-21 15:40   ` Peter Zijlstra
2023-03-23  9:22   ` Peter Zijlstra
2023-03-23  9:34   ` Peter Zijlstra
2023-03-23  9:47   ` Peter Zijlstra
2023-03-23 12:47   ` Peter Zijlstra
2023-03-23 14:24   ` Peter Zijlstra
2023-04-04 14:56     ` Anna-Maria Behnsen
2023-03-01 14:17 ` Anna-Maria Behnsen [this message]
2023-03-01 14:17 ` [PATCH v5 18/18] timer: Always queue timers on the local CPU Anna-Maria Behnsen
2023-03-21 12:46 ` [PATCH v5 00/18] timer: Move from a push remote at enqueue to a pull at expiry model Peter Zijlstra
2023-04-04 13:35   ` Anna-Maria Behnsen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230301141744.16063-18-anna-maria@linutronix.de \
    --to=anna-maria@linutronix.de \
    --cc=arjan@infradead.org \
    --cc=edumazet@google.com \
    --cc=fweisbec@gmail.com \
    --cc=jstultz@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=paulmck@kernel.org \
    --cc=peterz@infradead.org \
    --cc=rafael.j.wysocki@intel.com \
    --cc=riel@surriel.com \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).