All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] irq_work: Add event-tracing points for irq_work
@ 2022-04-14  6:38 Zqiang
  2022-04-14  9:32 ` Peter Zijlstra
  2022-04-14 13:47 ` Thomas Gleixner
  0 siblings, 2 replies; 5+ messages in thread
From: Zqiang @ 2022-04-14  6:38 UTC (permalink / raw)
  To: akpm, peterz; +Cc: linux-kernel

Add irq_work_queue_on tracepoints allow tracing when and how
a irq-work is queued, irq_work_execute_start/end tracepoints
allow know when a irq-work is executed and the executed time.

Signed-off-by: Zqiang <qiang1.zhang@intel.com>
---
 include/trace/events/irq_work.h | 79 +++++++++++++++++++++++++++++++++
 kernel/irq_work.c               |  9 +++-
 2 files changed, 87 insertions(+), 1 deletion(-)
 create mode 100644 include/trace/events/irq_work.h

diff --git a/include/trace/events/irq_work.h b/include/trace/events/irq_work.h
new file mode 100644
index 000000000000..823cb48666c4
--- /dev/null
+++ b/include/trace/events/irq_work.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irq_work
+
+#if !defined(_TRACE_IRQ_WORK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IRQ_WORK_H
+
+#include <linux/tracepoint.h>
+#include <linux/irq_work.h>
+
+TRACE_EVENT(irq_work_queue_on,
+
+	TP_PROTO(unsigned int req_cpu, struct irq_work *work),
+
+	TP_ARGS(req_cpu, work),
+
+	TP_STRUCT__entry(
+		__field( void *,	work	)
+		__field( void *,	function)
+		__field( unsigned int,	req_cpu	)
+		__field( int,		flags	)
+	),
+
+	TP_fast_assign(
+		__entry->work		= work;
+		__entry->function	= work->func;
+		__entry->req_cpu	= req_cpu;
+		__entry->flags		= atomic_read(&work->node.a_flags);
+	),
+
+	TP_printk("irq_work=%p func=%ps req_cpu=%u flags=%x",
+			__entry->work, __entry->function, __entry->req_cpu, __entry->flags)
+);
+
+TRACE_EVENT(irq_work_execute_start,
+
+	TP_PROTO(struct irq_work *work),
+
+	TP_ARGS(work),
+
+	TP_STRUCT__entry(
+		__field( void *,	work	)
+		__field( void *,	function)
+		__field( int,		flags	)
+	),
+
+	TP_fast_assign(
+		__entry->work		= work;
+		__entry->function	= work->func;
+		__entry->flags		= atomic_read(&work->node.a_flags);
+	),
+
+	TP_printk("irq_work=%p: func=%ps flags=%x",
+			__entry->work, __entry->function,  __entry->flags)
+);
+
+TRACE_EVENT(irq_work_execute_end,
+
+	TP_PROTO(struct irq_work *work),
+
+	TP_ARGS(work),
+
+	TP_STRUCT__entry(
+		__field( void *,	work	)
+		__field( void *,	function)
+		__field( int,		flags	)
+	),
+
+	TP_fast_assign(
+		__entry->work		= work;
+		__entry->function	= work->func;
+		__entry->flags		= atomic_read(&work->node.a_flags);
+	),
+
+	TP_printk("irq_work=%p: func=%ps flags=%x",
+			__entry->work, __entry->function, __entry->flags)
+);
+#endif
+#include <trace/define_trace.h>
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 7afa40fe5cc4..edad992556d0 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -22,6 +22,9 @@
 #include <asm/processor.h>
 #include <linux/kasan.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/irq_work.h>
+
 static DEFINE_PER_CPU(struct llist_head, raised_list);
 static DEFINE_PER_CPU(struct llist_head, lazy_list);
 static DEFINE_PER_CPU(struct task_struct *, irq_workd);
@@ -81,7 +84,9 @@ static void __irq_work_queue_local(struct irq_work *work)
 	bool rt_lazy_work = false;
 	bool lazy_work = false;
 	int work_flags;
+	int cpu = smp_processor_id();
 
+	trace_irq_work_queue_on(cpu, work);
 	work_flags = atomic_read(&work->node.a_flags);
 	if (work_flags & IRQ_WORK_LAZY)
 		lazy_work = true;
@@ -143,7 +148,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
 	if (cpu != smp_processor_id()) {
 		/* Arch remote IPI send/receive backend aren't NMI safe */
 		WARN_ON_ONCE(in_nmi());
-
+		trace_irq_work_queue_on(cpu, work);
 		/*
 		 * On PREEMPT_RT the items which are not marked as
 		 * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work
@@ -208,7 +213,9 @@ void irq_work_single(void *arg)
 	smp_mb();
 
 	lockdep_irq_work_enter(flags);
+	trace_irq_work_execute_start(work);
 	work->func(work);
+	trace_irq_work_execute_end(work);
 	lockdep_irq_work_exit(flags);
 
 	/*
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2022-04-15  3:02 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-14  6:38 [PATCH] irq_work: Add event-tracing points for irq_work Zqiang
2022-04-14  9:32 ` Peter Zijlstra
2022-04-15  2:56   ` Zhang, Qiang1
2022-04-14 13:47 ` Thomas Gleixner
2022-04-15  3:02   ` Zhang, Qiang1

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.