[v2] kthread: Add kthread_work tracepoints
diff mbox series

Message ID 20201012173629.264991-1-robdclark@gmail.com
State New, archived
Headers show
Series
  • [v2] kthread: Add kthread_work tracepoints
Related show

Commit Message

Rob Clark Oct. 12, 2020, 5:36 p.m. UTC
From: Rob Clark <robdclark@chromium.org>

While migrating some code from wq to kthread_worker, I found that I
missed the execute_start/end tracepoints.  So add similar tracepoints
for kthread_work.  And for completeness, queue_work tracepoint (although
this one differs slightly from the matching workqueue tracepoint).

Signed-off-by: Rob Clark <robdclark@chromium.org>
---
v2: Use DECLARE_EVENT_CLASS (Steven Rostedt)

 include/trace/events/sched.h | 74 ++++++++++++++++++++++++++++++++++++
 kernel/kthread.c             |  9 +++++
 2 files changed, 83 insertions(+)

Patch
diff mbox series

diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index fec25b9cfbaf..c89dc6626f01 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -5,6 +5,7 @@ 
 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _TRACE_SCHED_H
 
+#include <linux/kthread.h>
 #include <linux/sched/numa_balancing.h>
 #include <linux/tracepoint.h>
 #include <linux/binfmts.h>
@@ -51,6 +52,79 @@  TRACE_EVENT(sched_kthread_stop_ret,
 	TP_printk("ret=%d", __entry->ret)
 );
 
+/**
+ * sched_kthread_work_queue_work - called when a work gets queued
+ * @worker:	pointer to the kthread_worker
+ * @work:	pointer to struct kthread_work
+ *
+ * This event occurs when a work is queued immediately or once a
+ * delayed work is actually queued (ie: once the delay has been
+ * reached).
+ */
+TRACE_EVENT(sched_kthread_work_queue_work,
+
+	TP_PROTO(struct kthread_worker *worker,
+		 struct kthread_work *work),
+
+	TP_ARGS(worker, work),
+
+	TP_STRUCT__entry(
+		__field( void *,	work	)
+		__field( void *,	function)
+		__field( void *,	worker)
+	),
+
+	TP_fast_assign(
+		__entry->work		= work;
+		__entry->function	= work->func;
+		__entry->worker		= worker;
+	),
+
+	TP_printk("work struct=%p function=%ps worker=%p",
+		  __entry->work, __entry->function, __entry->worker)
+);
+
+DECLARE_EVENT_CLASS(sched_kthread_work_execute_template,
+
+	TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
+
+	TP_ARGS(work, function),
+
+	TP_STRUCT__entry(
+		__field( void *,	work	)
+		__field( void *,	function)
+	),
+
+	TP_fast_assign(
+		__entry->work		= work;
+		__entry->function	= function;
+	),
+
+	TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
+);
+
+/**
+ * sched_kthread_work_execute_start - called immediately before the work callback
+ * @work:	pointer to struct kthread_work
+ * @function:   pointer to worker function
+ *
+ * Allows to track kthread work execution.
+ */
+DEFINE_EVENT(sched_kthread_work_execute_template, sched_kthread_work_execute_start,
+	TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
+	TP_ARGS(work, function));
+
+/**
+ * sched_kthread_work_execute_end - called immediately after the work callback
+ * @work:	pointer to struct work_struct
+ * @function:   pointer to worker function
+ *
+ * Allows to track workqueue execution.
+ */
+DEFINE_EVENT(sched_kthread_work_execute_template, sched_kthread_work_execute_end,
+	TP_PROTO(struct kthread_work *work, kthread_work_func_t function),
+	TP_ARGS(work, function));
+
 /*
  * Tracepoint for waking up a task:
  */
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 3edaa380dc7b..7da6758283cd 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -704,8 +704,15 @@  int kthread_worker_fn(void *worker_ptr)
 	raw_spin_unlock_irq(&worker->lock);
 
 	if (work) {
+		kthread_work_func_t func = work->func;
 		__set_current_state(TASK_RUNNING);
+		trace_sched_kthread_work_execute_start(work, work->func);
 		work->func(work);
+		/*
+		 * Avoid dereferencing work after this point.  The trace
+		 * event only cares about the address.
+		 */
+		trace_sched_kthread_work_execute_end(work, func);
 	} else if (!freezing(current))
 		schedule();
 
@@ -834,6 +841,8 @@  static void kthread_insert_work(struct kthread_worker *worker,
 {
 	kthread_insert_work_sanity_check(worker, work);
 
+	trace_sched_kthread_work_queue_work(worker, work);
+
 	list_add_tail(&work->node, pos);
 	work->worker = worker;
 	if (!worker->current_work && likely(worker->task))