linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Tom Zanussi <tom.zanussi@linux.intel.com>
To: rostedt@goodmis.org
Cc: tglx@linutronix.de, mhiramat@kernel.org, namhyung@kernel.org,
	vedang.patel@intel.com, bigeasy@linutronix.de,
	joel.opensrc@gmail.com, joelaf@google.com,
	mathieu.desnoyers@efficios.com, baohong.liu@intel.com,
	linux-kernel@vger.kernel.org, linux-rt-users@vger.kernel.org,
	Tom Zanussi <tom.zanussi@linux.intel.com>
Subject: [PATCH v2 27/40] tracing: Add support for 'synthetic' events
Date: Tue,  5 Sep 2017 16:57:39 -0500	[thread overview]
Message-ID: <9cae5fa47b29d06521c12626333158dd5dd86d66.1504642143.git.tom.zanussi@linux.intel.com> (raw)
In-Reply-To: <cover.1504642143.git.tom.zanussi@linux.intel.com>
In-Reply-To: <cover.1504642143.git.tom.zanussi@linux.intel.com>

Synthetic events are user-defined events generated from hist trigger
variables saved from one or more other events.

To define a synthetic event, the user writes a simple specification
consisting of the name of the new event along with one or more
variables and their type(s), to the tracing/synthetic_events file.

For instance, the following creates a new event named 'wakeup_latency'
with 3 fields: lat, pid, and prio:

    # echo 'wakeup_latency u64 lat; pid_t pid; int prio' >> \
      /sys/kernel/debug/tracing/synthetic_events

Reading the tracing/synthetic_events file lists all the
currently-defined synthetic events, in this case the event we defined
above:

    # cat /sys/kernel/debug/tracing/synthetic_events
    wakeup_latency u64 lat; pid_t pid; int prio

At this point, the synthetic event is ready to use, and a histogram
can be defined using it:

    # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
    /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger

The new event is created under the tracing/events/synthetic/ directory
and looks and behaves just like any other event:

    # ls /sys/kernel/debug/tracing/events/synthetic/wakeup_latency
      enable  filter  format  hist  id  trigger

Although a histogram can be defined for it, nothing will happen until
an action tracing that event via the trace_synth() function occurs.
The trace_synth() function is very similar to all the other trace_*
invocations spread throughout the kernel, except in this case the
trace_ function and its corresponding tracepoint isn't statically
generated but defined by the user at run-time.

How this can be automatically hooked up via a hist trigger 'action' is
discussed in a subsequent patch.

Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
---
 kernel/trace/trace_events_hist.c | 863 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 863 insertions(+)

diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index c3edf7a..2906b92 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -20,10 +20,16 @@
 #include <linux/slab.h>
 #include <linux/stacktrace.h>
 #include <linux/rculist.h>
+#include <linux/tracefs.h>
 
 #include "tracing_map.h"
 #include "trace.h"
 
+#define SYNTH_SYSTEM		"synthetic"
+#define SYNTH_FIELDS_MAX	16
+
+#define STR_VAR_LEN_MAX		32 /* must be multiple of sizeof(u64) */
+
 struct hist_field;
 
 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
@@ -270,6 +276,26 @@ struct hist_trigger_data {
 	unsigned int			n_actions;
 };
 
+struct synth_field {
+	char *type;
+	char *name;
+	size_t size;
+	bool is_signed;
+	bool is_string;
+};
+
+struct synth_event {
+	struct list_head			list;
+	int					ref;
+	char					*name;
+	struct synth_field			**fields;
+	unsigned int				n_fields;
+	unsigned int				n_u64;
+	struct trace_event_class		class;
+	struct trace_event_call			call;
+	struct tracepoint			*tp;
+};
+
 struct action_data;
 
 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
@@ -282,6 +308,798 @@ struct action_data {
 	unsigned int		var_ref_idx;
 };
 
+static LIST_HEAD(synth_event_list);
+static DEFINE_MUTEX(synth_event_mutex);
+
+struct synth_trace_event {
+	struct trace_entry	ent;
+	u64			fields[];
+};
+
+static int synth_event_define_fields(struct trace_event_call *call)
+{
+	struct synth_trace_event trace;
+	int offset = offsetof(typeof(trace), fields);
+	struct synth_event *event = call->data;
+	unsigned int i, size, n_u64;
+	char *name, *type;
+	bool is_signed;
+	int ret = 0;
+
+	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+		size = event->fields[i]->size;
+		is_signed = event->fields[i]->is_signed;
+		type = event->fields[i]->type;
+		name = event->fields[i]->name;
+		ret = trace_define_field(call, type, name, offset, size,
+					 is_signed, FILTER_OTHER);
+		if (ret)
+			break;
+
+		if (event->fields[i]->is_string) {
+			offset += STR_VAR_LEN_MAX;
+			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+		} else {
+			offset += sizeof(u64);
+			n_u64++;
+		}
+	}
+
+	event->n_u64 = n_u64;
+
+	return ret;
+}
+
+static bool synth_field_signed(char *type)
+{
+	if (strncmp(type, "u", 1) == 0)
+		return false;
+
+	return true;
+}
+
+static int synth_field_is_string(char *type)
+{
+	if (strstr(type, "char[") != NULL)
+		return true;
+
+	return false;
+}
+
+static int synth_field_string_size(char *type)
+{
+	char buf[4], *end, *start;
+	unsigned int len;
+	int size, err;
+
+	start = strstr(type, "char[");
+	if (start == NULL)
+		return -EINVAL;
+	start += strlen("char[");
+
+	end = strchr(type, ']');
+	if (!end || end < start)
+		return -EINVAL;
+
+	len = end - start;
+	if (len > 2)
+		return -EINVAL;
+
+	strncpy(buf, start, len);
+	buf[len] = '\0';
+
+	err = kstrtouint(buf, 0, &size);
+	if (err)
+		return err;
+
+	if (size > STR_VAR_LEN_MAX)
+		return -EINVAL;
+
+	return size;
+}
+
+static int synth_field_size(char *type)
+{
+	int size = 0;
+
+	if (strcmp(type, "s64") == 0)
+		size = sizeof(s64);
+	else if (strcmp(type, "u64") == 0)
+		size = sizeof(u64);
+	else if (strcmp(type, "s32") == 0)
+		size = sizeof(s32);
+	else if (strcmp(type, "u32") == 0)
+		size = sizeof(u32);
+	else if (strcmp(type, "s16") == 0)
+		size = sizeof(s16);
+	else if (strcmp(type, "u16") == 0)
+		size = sizeof(u16);
+	else if (strcmp(type, "s8") == 0)
+		size = sizeof(s8);
+	else if (strcmp(type, "u8") == 0)
+		size = sizeof(u8);
+	else if (strcmp(type, "char") == 0)
+		size = sizeof(char);
+	else if (strcmp(type, "unsigned char") == 0)
+		size = sizeof(unsigned char);
+	else if (strcmp(type, "int") == 0)
+		size = sizeof(int);
+	else if (strcmp(type, "unsigned int") == 0)
+		size = sizeof(unsigned int);
+	else if (strcmp(type, "long") == 0)
+		size = sizeof(long);
+	else if (strcmp(type, "unsigned long") == 0)
+		size = sizeof(unsigned long);
+	else if (strcmp(type, "pid_t") == 0)
+		size = sizeof(pid_t);
+	else if (synth_field_is_string(type))
+		size = synth_field_string_size(type);
+
+	return size;
+}
+
+static const char *synth_field_fmt(char *type)
+{
+	const char *fmt = "%llu";
+
+	if (strcmp(type, "s64") == 0)
+		fmt = "%lld";
+	else if (strcmp(type, "u64") == 0)
+		fmt = "%llu";
+	else if (strcmp(type, "s32") == 0)
+		fmt = "%d";
+	else if (strcmp(type, "u32") == 0)
+		fmt = "%u";
+	else if (strcmp(type, "s16") == 0)
+		fmt = "%d";
+	else if (strcmp(type, "u16") == 0)
+		fmt = "%u";
+	else if (strcmp(type, "s8") == 0)
+		fmt = "%d";
+	else if (strcmp(type, "u8") == 0)
+		fmt = "%u";
+	else if (strcmp(type, "char") == 0)
+		fmt = "%d";
+	else if (strcmp(type, "unsigned char") == 0)
+		fmt = "%u";
+	else if (strcmp(type, "int") == 0)
+		fmt = "%d";
+	else if (strcmp(type, "unsigned int") == 0)
+		fmt = "%u";
+	else if (strcmp(type, "long") == 0)
+		fmt = "%ld";
+	else if (strcmp(type, "unsigned long") == 0)
+		fmt = "%lu";
+	else if (strcmp(type, "pid_t") == 0)
+		fmt = "%d";
+	else if (strstr(type, "[") == 0)
+		fmt = "%s";
+
+	return fmt;
+}
+
+static enum print_line_t print_synth_event(struct trace_iterator *iter,
+					   int flags,
+					   struct trace_event *event)
+{
+	struct trace_array *tr = iter->tr;
+	struct trace_seq *s = &iter->seq;
+	struct synth_trace_event *entry;
+	struct synth_event *se;
+	unsigned int i, n_u64;
+	char print_fmt[32];
+	const char *fmt;
+
+	entry = (struct synth_trace_event *)iter->ent;
+	se = container_of(event, struct synth_event, call.event);
+
+	trace_seq_printf(s, "%s: ", se->name);
+
+	for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
+		if (trace_seq_has_overflowed(s))
+			goto end;
+
+		fmt = synth_field_fmt(se->fields[i]->type);
+
+		/* parameter types */
+		if (tr->trace_flags & TRACE_ITER_VERBOSE)
+			trace_seq_printf(s, "%s ", fmt);
+
+		sprintf(print_fmt, "%%s=%s%%s", fmt);
+
+		/* parameter values */
+		if (se->fields[i]->is_string) {
+			trace_seq_printf(s, print_fmt, se->fields[i]->name,
+					 (char *)entry->fields[n_u64],
+					 i == se->n_fields - 1 ? "" : " ");
+			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+		} else {
+			trace_seq_printf(s, print_fmt, se->fields[i]->name,
+					 entry->fields[n_u64],
+					 i == se->n_fields - 1 ? "" : " ");
+			n_u64++;
+		}
+	}
+end:
+	trace_seq_putc(s, '\n');
+
+	return trace_handle_return(s);
+}
+
+static struct trace_event_functions synth_event_funcs = {
+	.trace		= print_synth_event
+};
+
+static notrace void trace_event_raw_event_synth(void *__data,
+						u64 *var_ref_vals,
+						unsigned int var_ref_idx)
+{
+	struct trace_event_file *trace_file = __data;
+	struct synth_trace_event *entry;
+	struct trace_event_buffer fbuffer;
+	struct synth_event *event;
+	unsigned int i, n_u64;
+	int fields_size = 0;
+
+	event = trace_file->event_call->data;
+
+	if (trace_trigger_soft_disabled(trace_file))
+		return;
+
+	fields_size = event->n_u64 * sizeof(u64);
+
+	entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+					   sizeof(*entry) + fields_size);
+	if (!entry)
+		return;
+
+	for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+		if (event->fields[i]->is_string) {
+			char *str_val = (char *)var_ref_vals[var_ref_idx + i];
+			char *str_field = (char *)&entry->fields[n_u64];
+
+			strncpy(str_field, str_val, STR_VAR_LEN_MAX);
+			n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
+		} else {
+			entry->fields[i] = var_ref_vals[var_ref_idx + i];
+			n_u64++;
+		}
+	}
+
+	trace_event_buffer_commit(&fbuffer);
+}
+
+static void free_synth_event_print_fmt(struct trace_event_call *call)
+{
+	if (call)
+		kfree(call->print_fmt);
+}
+
+static int __set_synth_event_print_fmt(struct synth_event *event,
+				       char *buf, int len)
+{
+	const char *fmt;
+	int pos = 0;
+	int i;
+
+	/* When len=0, we just calculate the needed length */
+#define LEN_OR_ZERO (len ? len - pos : 0)
+
+	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
+	for (i = 0; i < event->n_fields; i++) {
+		fmt = synth_field_fmt(event->fields[i]->type);
+		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
+				event->fields[i]->name, fmt,
+				i == event->n_fields - 1 ? "" : ", ");
+	}
+	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
+
+	for (i = 0; i < event->n_fields; i++) {
+		pos += snprintf(buf + pos, LEN_OR_ZERO,
+				", REC->%s", event->fields[i]->name);
+	}
+
+#undef LEN_OR_ZERO
+
+	/* return the length of print_fmt */
+	return pos;
+}
+
+static int set_synth_event_print_fmt(struct trace_event_call *call)
+{
+	struct synth_event *event = call->data;
+	char *print_fmt;
+	int len;
+
+	/* First: called with 0 length to calculate the needed length */
+	len = __set_synth_event_print_fmt(event, NULL, 0);
+
+	print_fmt = kmalloc(len + 1, GFP_KERNEL);
+	if (!print_fmt)
+		return -ENOMEM;
+
+	/* Second: actually write the @print_fmt */
+	__set_synth_event_print_fmt(event, print_fmt, len + 1);
+	call->print_fmt = print_fmt;
+
+	return 0;
+}
+
+static void free_synth_field(struct synth_field *field)
+{
+	kfree(field->type);
+	kfree(field->name);
+	kfree(field);
+}
+
+static struct synth_field *parse_synth_field(char *field_type,
+					     char *field_name)
+{
+	struct synth_field *field;
+	int len, ret = 0;
+	char *array;
+
+	if (field_type[0] == ';')
+		field_type++;
+
+	len = strlen(field_name);
+	if (field_name[len - 1] == ';')
+		field_name[len - 1] = '\0';
+
+	field = kzalloc(sizeof(*field), GFP_KERNEL);
+	if (!field)
+		return ERR_PTR(-ENOMEM);
+
+	len = strlen(field_type) + 1;
+	array = strchr(field_name, '[');
+	if (array)
+		len += strlen(array);
+	field->type = kzalloc(len, GFP_KERNEL);
+	if (!field->type) {
+		ret = -ENOMEM;
+		goto free;
+	}
+	strcat(field->type, field_type);
+	if (array) {
+		strcat(field->type, array);
+		*array = '\0';
+	}
+
+	field->size = synth_field_size(field->type);
+	if (!field->size) {
+		ret = -EINVAL;
+		goto free;
+	}
+
+	if (synth_field_is_string(field->type))
+		field->is_string = true;
+
+	field->is_signed = synth_field_signed(field->type);
+
+	field->name = kstrdup(field_name, GFP_KERNEL);
+	if (!field->name) {
+		ret = -ENOMEM;
+		goto free;
+	}
+ out:
+	return field;
+ free:
+	free_synth_field(field);
+	field = ERR_PTR(ret);
+	goto out;
+}
+
+static void free_synth_tracepoint(struct tracepoint *tp)
+{
+	if (!tp)
+		return;
+
+	kfree(tp->name);
+	kfree(tp);
+}
+
+static struct tracepoint *alloc_synth_tracepoint(char *name)
+{
+	struct tracepoint *tp;
+	int ret = 0;
+
+	tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+	if (!tp) {
+		ret = -ENOMEM;
+		goto free;
+	}
+
+	tp->name = kstrdup(name, GFP_KERNEL);
+	if (!tp->name) {
+		ret = -ENOMEM;
+		goto free;
+	}
+
+	tp->dynamic = true;
+
+	return tp;
+ free:
+	free_synth_tracepoint(tp);
+
+	return ERR_PTR(ret);
+}
+
+typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
+				    unsigned int var_ref_idx);
+
+static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
+			       unsigned int var_ref_idx)
+{
+	struct tracepoint *tp = event->tp;
+
+	if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
+		struct tracepoint_func *probe_func_ptr;
+		synth_probe_func_t probe_func;
+		void *__data;
+
+		if (!(cpu_online(raw_smp_processor_id())))
+			return;
+
+		probe_func_ptr = rcu_dereference_sched((tp)->funcs);
+		if (probe_func_ptr) {
+			do {
+				probe_func = (probe_func_ptr)->func;
+				__data = (probe_func_ptr)->data;
+				probe_func(__data, var_ref_vals, var_ref_idx);
+			} while ((++probe_func_ptr)->func);
+		}
+	}
+}
+
+static struct synth_event *find_synth_event(const char *name)
+{
+	struct synth_event *event;
+
+	list_for_each_entry(event, &synth_event_list, list) {
+		if (strcmp(event->name, name) == 0)
+			return event;
+	}
+
+	return NULL;
+}
+
+static int register_synth_event(struct synth_event *event)
+{
+	struct trace_event_call *call = &event->call;
+	int ret = 0;
+
+	event->call.class = &event->class;
+	event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
+	if (!event->class.system) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	event->tp = alloc_synth_tracepoint(event->name);
+	if (IS_ERR(event->tp)) {
+		ret = PTR_ERR(event->tp);
+		event->tp = NULL;
+		goto out;
+	}
+
+	INIT_LIST_HEAD(&call->class->fields);
+	call->event.funcs = &synth_event_funcs;
+	call->class->define_fields = synth_event_define_fields;
+
+	ret = register_trace_event(&call->event);
+	if (!ret) {
+		ret = -ENODEV;
+		goto out;
+	}
+	call->flags = TRACE_EVENT_FL_TRACEPOINT;
+	call->class->reg = trace_event_reg;
+	call->class->probe = trace_event_raw_event_synth;
+	call->data = event;
+	call->tp = event->tp;
+
+	mutex_unlock(&synth_event_mutex);
+	ret = trace_add_event_call(call);
+	mutex_lock(&synth_event_mutex);
+	if (ret) {
+		pr_warn("Failed to register synthetic event: %s\n",
+			trace_event_name(call));
+		goto err;
+	}
+
+	ret = set_synth_event_print_fmt(call);
+	if (ret < 0) {
+		mutex_unlock(&synth_event_mutex);
+		trace_remove_event_call(call);
+		mutex_lock(&synth_event_mutex);
+		goto err;
+	}
+ out:
+	return ret;
+ err:
+	unregister_trace_event(&call->event);
+	goto out;
+}
+
+static int unregister_synth_event(struct synth_event *event)
+{
+	struct trace_event_call *call = &event->call;
+	int ret;
+
+	mutex_unlock(&synth_event_mutex);
+	ret = trace_remove_event_call(call);
+	mutex_lock(&synth_event_mutex);
+	if (ret) {
+		pr_warn("Failed to remove synthetic event: %s\n",
+			trace_event_name(call));
+		free_synth_event_print_fmt(call);
+		unregister_trace_event(&call->event);
+	}
+
+	return ret;
+}
+
+static void remove_synth_event(struct synth_event *event)
+{
+	unregister_synth_event(event);
+	list_del(&event->list);
+}
+
+static int add_synth_event(struct synth_event *event)
+{
+	int ret;
+
+	ret = register_synth_event(event);
+	if (ret)
+		return ret;
+
+	list_add(&event->list, &synth_event_list);
+
+	return 0;
+}
+
+static void free_synth_event(struct synth_event *event)
+{
+	unsigned int i;
+
+	if (!event)
+		return;
+
+	for (i = 0; i < event->n_fields; i++)
+		free_synth_field(event->fields[i]);
+
+	kfree(event->fields);
+	kfree(event->name);
+	kfree(event->class.system);
+	free_synth_tracepoint(event->tp);
+	free_synth_event_print_fmt(&event->call);
+	kfree(event);
+}
+
+static struct synth_event *alloc_synth_event(char *event_name, int n_fields,
+					     struct synth_field **fields)
+{
+	struct synth_event *event;
+	unsigned int i;
+
+	event = kzalloc(sizeof(*event), GFP_KERNEL);
+	if (!event) {
+		event = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	event->name = kstrdup(event_name, GFP_KERNEL);
+	if (!event->name) {
+		kfree(event);
+		event = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	event->fields = kcalloc(n_fields, sizeof(event->fields), GFP_KERNEL);
+	if (!event->fields) {
+		free_synth_event(event);
+		event = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	for (i = 0; i < n_fields; i++)
+		event->fields[i] = fields[i];
+
+	event->n_fields = n_fields;
+ out:
+	return event;
+}
+
+static int create_synth_event(int argc, char **argv)
+{
+	struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
+	struct synth_event *event = NULL;
+	bool delete_event = false;
+	int i, n_fields = 0, ret = 0;
+	char *name;
+
+	mutex_lock(&synth_event_mutex);
+
+	/*
+	 * Argument syntax:
+	 *  - Add synthetic event: <event_name> field[;field] ...
+	 *  - Remove synthetic event: !<event_name> field[;field] ...
+	 *      where 'field' = type field_name
+	 */
+	if (argc < 1) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	name = argv[0];
+	if (name[0] == '!') {
+		delete_event = true;
+		name++;
+	}
+
+	event = find_synth_event(name);
+	if (event) {
+		if (delete_event) {
+			if (event->ref) {
+				ret = -EBUSY;
+				goto out;
+			}
+			remove_synth_event(event);
+			free_synth_event(event);
+			goto out;
+		}
+		ret = -EEXIST;
+		goto out;
+	} else if (delete_event) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (argc < 2) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (i = 1; i < argc - 1; i++) {
+		if (strcmp(argv[i], ";") == 0)
+			continue;
+		if (n_fields == SYNTH_FIELDS_MAX) {
+			ret = -EINVAL;
+			goto err;
+		}
+
+		field = parse_synth_field(argv[i], argv[i + 1]);
+		if (IS_ERR(field)) {
+			ret = PTR_ERR(field);
+			goto err;
+		}
+		fields[n_fields] = field;
+		i++; n_fields++;
+	}
+
+	if (i < argc) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	event = alloc_synth_event(name, n_fields, fields);
+	if (IS_ERR(event)) {
+		ret = PTR_ERR(event);
+		event = NULL;
+		goto err;
+	}
+
+	add_synth_event(event);
+ out:
+	mutex_unlock(&synth_event_mutex);
+
+	return ret;
+ err:
+	for (i = 0; i < n_fields; i++)
+		free_synth_field(fields[i]);
+	free_synth_event(event);
+
+	goto out;
+}
+
+static int release_all_synth_events(void)
+{
+	struct synth_event *event, *e;
+	int ret = 0;
+
+	mutex_lock(&synth_event_mutex);
+
+	list_for_each_entry(event, &synth_event_list, list) {
+		if (event->ref) {
+			ret = -EBUSY;
+			goto out;
+		}
+	}
+
+	list_for_each_entry_safe(event, e, &synth_event_list, list) {
+		remove_synth_event(event);
+		free_synth_event(event);
+	}
+ out:
+	mutex_unlock(&synth_event_mutex);
+
+	return ret;
+}
+
+
+static void *synth_events_seq_start(struct seq_file *m, loff_t *pos)
+{
+	mutex_lock(&synth_event_mutex);
+
+	return seq_list_start(&synth_event_list, *pos);
+}
+
+static void *synth_events_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	return seq_list_next(v, &synth_event_list, pos);
+}
+
+static void synth_events_seq_stop(struct seq_file *m, void *v)
+{
+	mutex_unlock(&synth_event_mutex);
+}
+
+static int synth_events_seq_show(struct seq_file *m, void *v)
+{
+	struct synth_field *field;
+	struct synth_event *event = v;
+	unsigned int i;
+
+	seq_printf(m, "%s\t", event->name);
+
+	for (i = 0; i < event->n_fields; i++) {
+		field = event->fields[i];
+
+		/* parameter values */
+		seq_printf(m, "%s %s%s", field->type, field->name,
+			   i == event->n_fields - 1 ? "" : "; ");
+	}
+
+	seq_putc(m, '\n');
+
+	return 0;
+}
+
+static const struct seq_operations synth_events_seq_op = {
+	.start  = synth_events_seq_start,
+	.next   = synth_events_seq_next,
+	.stop   = synth_events_seq_stop,
+	.show   = synth_events_seq_show
+};
+
+static int synth_events_open(struct inode *inode, struct file *file)
+{
+	int ret;
+
+	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+		ret = release_all_synth_events();
+		if (ret < 0)
+			return ret;
+	}
+
+	return seq_open(file, &synth_events_seq_op);
+}
+
+static ssize_t synth_events_write(struct file *file,
+				  const char __user *buffer,
+				  size_t count, loff_t *ppos)
+{
+	return trace_parse_run_command(file, buffer, count, ppos,
+				       create_synth_event);
+}
+
+static const struct file_operations synth_events_fops = {
+	.open           = synth_events_open,
+	.write		= synth_events_write,
+	.read           = seq_read,
+	.llseek         = seq_lseek,
+	.release        = seq_release,
+};
+
 static u64 hist_field_timestamp(struct hist_field *hist_field,
 				struct tracing_map_elt *elt,
 				struct ring_buffer_event *rbe,
@@ -2933,6 +3751,8 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
 	struct hist_trigger_attrs *attrs;
 	struct event_trigger_ops *trigger_ops;
 	struct hist_trigger_data *hist_data;
+	struct synth_event *se;
+	const char *se_name;
 	bool remove = false;
 	char *trigger;
 	int ret = 0;
@@ -2991,6 +3811,14 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
 		}
 
 		cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
+
+		mutex_lock(&synth_event_mutex);
+		se_name = trace_event_name(file->event_call);
+		se = find_synth_event(se_name);
+		if (se)
+			se->ref--;
+		mutex_unlock(&synth_event_mutex);
+
 		ret = 0;
 		goto out_free;
 	}
@@ -3008,6 +3836,13 @@ static int event_hist_trigger_func(struct event_command *cmd_ops,
 	} else if (ret < 0)
 		goto out_free;
 
+	mutex_lock(&synth_event_mutex);
+	se_name = trace_event_name(file->event_call);
+	se = find_synth_event(se_name);
+	if (se)
+		se->ref++;
+	mutex_unlock(&synth_event_mutex);
+
 	if (get_named_trigger_data(trigger_data))
 		goto enable;
 
@@ -3198,3 +4033,31 @@ __init int register_trigger_hist_enable_disable_cmds(void)
 
 	return ret;
 }
+
+static __init int trace_events_hist_init(void)
+{
+	struct dentry *entry = NULL;
+	struct dentry *d_tracer;
+	int err = 0;
+
+	d_tracer = tracing_init_dentry();
+	if (IS_ERR(d_tracer)) {
+		err = PTR_ERR(d_tracer);
+		goto err;
+	}
+
+	entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
+				    NULL, &synth_events_fops);
+	if (!entry) {
+		err = -ENODEV;
+		goto err;
+	}
+
+	return err;
+ err:
+	pr_warn("Could not create tracefs 'synthetic_events' entry\n");
+
+	return err;
+}
+
+fs_initcall(trace_events_hist_init);
-- 
1.9.3

  parent reply	other threads:[~2017-09-05 21:59 UTC|newest]

Thread overview: 90+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-09-05 21:57 [PATCH v2 00/40] tracing: Inter-event (e.g. latency) support Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 01/40] tracing: Exclude 'generic fields' from histograms Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 02/40] tracing: Add support to detect and avoid duplicates Tom Zanussi
2017-09-06 18:32   ` Steven Rostedt
2017-09-06 18:47   ` Steven Rostedt
2017-09-06 20:58     ` Patel, Vedang
2017-09-05 21:57 ` [PATCH v2 03/40] tracing: Remove code which merges duplicates Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 04/40] tracing: Add hist_field_name() accessor Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 05/40] tracing: Reimplement log2 Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 06/40] ring-buffer: Add interface for setting absolute time stamps Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 07/40] tracing: Apply absolute timestamps to instance max buffer Tom Zanussi
2017-09-06 19:57   ` Steven Rostedt
2017-09-07  0:49   ` Steven Rostedt
2017-09-07  1:15     ` Liu, Baohong
2017-09-05 21:57 ` [PATCH v2 08/40] ring-buffer: Redefine the unimplemented RINGBUF_TIME_TIME_STAMP Tom Zanussi
2017-09-07 14:35   ` Steven Rostedt
2017-09-07 15:05     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 09/40] tracing: Give event triggers access to ring_buffer_event Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 10/40] tracing: Add ring buffer event param to hist field functions Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 11/40] tracing: Increase tracing map KEYS_MAX size Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 12/40] tracing: Break out hist trigger assignment parsing Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 13/40] tracing: Make traceprobe parsing code reusable Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 14/40] tracing: Add hist trigger timestamp support Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 15/40] tracing: Add per-element variable support to tracing_map Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 16/40] tracing: Add hist_data member to hist_field Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 17/40] tracing: Add usecs modifier for hist trigger timestamps Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 18/40] tracing: Add variable support to hist triggers Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 19/40] tracing: Account for variables in named trigger compatibility Tom Zanussi
2017-09-07 16:40   ` Steven Rostedt
2017-09-07 17:00     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 20/40] tracing: Add simple expression support to hist triggers Tom Zanussi
2017-09-07 16:46   ` Steven Rostedt
2017-09-07 17:01     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 21/40] tracing: Generalize per-element hist trigger data Tom Zanussi
2017-09-07 17:56   ` Steven Rostedt
2017-09-07 18:14     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 22/40] tracing: Pass tracing_map_elt to hist_field accessor functions Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 23/40] tracing: Add hist_field 'type' field Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 24/40] tracing: Add variable reference handling to hist triggers Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 25/40] tracing: Add support for dynamic tracepoints Tom Zanussi
2017-09-05 23:29   ` Mathieu Desnoyers
2017-09-06  2:35     ` Tom Zanussi
2017-09-07 22:02   ` Steven Rostedt
2017-09-08 14:18     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 26/40] tracing: Add hist trigger action hook Tom Zanussi
2017-09-05 21:57 ` Tom Zanussi [this message]
2017-09-07 23:40   ` [PATCH v2 27/40] tracing: Add support for 'synthetic' events Steven Rostedt
2017-09-08 14:30     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 28/40] tracing: Add support for 'field variables' Tom Zanussi
2017-09-07 23:43   ` Steven Rostedt
2017-09-08 15:37     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 29/40] tracing: Add 'onmatch' hist trigger action support Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 30/40] tracing: Add 'onmax' " Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 31/40] tracing: Allow whitespace to surround hist trigger filter Tom Zanussi
2017-09-08 18:50   ` Steven Rostedt
2017-09-08 19:08     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 32/40] tracing: Add cpu field for hist triggers Tom Zanussi
2017-09-08 19:08   ` Steven Rostedt
2017-09-08 19:35     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 33/40] tracing: Add hist trigger support for variable reference aliases Tom Zanussi
2017-09-08 19:09   ` Steven Rostedt
2017-09-08 19:41     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 34/40] tracing: Add 'last error' error facility for hist triggers Tom Zanussi
2017-09-08 19:25   ` Steven Rostedt
2017-09-08 19:44     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 35/40] tracing: Reverse the order event_mutex/trace_types_lock are taken Tom Zanussi
2017-09-08 19:31   ` Steven Rostedt
2017-09-08 19:41     ` Steven Rostedt
2017-09-08 20:00       ` Steven Rostedt
2017-09-05 21:57 ` [PATCH v2 36/40] tracing: Remove lookups from tracing_map hitcount Tom Zanussi
2017-09-12  2:16   ` Masami Hiramatsu
2017-09-12 14:16     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 37/40] tracing: Add inter-event hist trigger Documentation Tom Zanussi
2017-09-20 14:44   ` Julia Cartwright
2017-09-20 17:15     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 38/40] tracing: Make tracing_set_clock() non-static Tom Zanussi
2017-09-12  2:18   ` Masami Hiramatsu
2017-09-12 14:18     ` Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 39/40] tracing: Add a clock attribute for hist triggers Tom Zanussi
2017-09-05 21:57 ` [PATCH v2 40/40] tracing: Add trace_event_buffer_reserve() variant that allows recursion Tom Zanussi
2017-09-07 22:29   ` kbuild test robot
2017-09-07 22:35   ` kbuild test robot
2017-09-08 20:27   ` Steven Rostedt
2017-09-08 20:41     ` Tom Zanussi
2017-09-12  1:50 ` [PATCH v2 00/40] tracing: Inter-event (e.g. latency) support Masami Hiramatsu
2017-09-12 14:14   ` Tom Zanussi
2017-09-19 16:31 ` Steven Rostedt
2017-09-19 18:44   ` Tom Zanussi
2017-09-21 20:20     ` Steven Rostedt
2017-09-21 21:11       ` Tom Zanussi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9cae5fa47b29d06521c12626333158dd5dd86d66.1504642143.git.tom.zanussi@linux.intel.com \
    --to=tom.zanussi@linux.intel.com \
    --cc=baohong.liu@intel.com \
    --cc=bigeasy@linutronix.de \
    --cc=joel.opensrc@gmail.com \
    --cc=joelaf@google.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=mhiramat@kernel.org \
    --cc=namhyung@kernel.org \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    --cc=vedang.patel@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).