All of lore.kernel.org
 help / color / mirror / Atom feed
* [for-next][PATCH 0/4] tracing: Updates for 5.10
@ 2020-09-29  1:11 Steven Rostedt
  2020-09-29  1:11 ` [for-next][PATCH 1/4] tracing/boot: Initialize per-instance event list in early boot Steven Rostedt
                   ` (3 more replies)
  0 siblings, 4 replies; 5+ messages in thread
From: Steven Rostedt @ 2020-09-29  1:11 UTC (permalink / raw)
  To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton

  git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace.git
for-next

Head SHA1: fdb46faeab2f3fa2b43a55059b33b8f98b2e1442


Masami Hiramatsu (1):
      tracing/boot: Initialize per-instance event list in early boot

Steven Rostedt (VMware) (3):
      tracepoints: Add helper to test if tracepoint is enabled in a header
      mm/page_ref: Convert the open coded tracepoint enabled to the new helper
      x86: Use tracepoint_enabled() for msr tracepoints instead of open coding it

----
 Documentation/trace/tracepoints.rst | 27 ++++++++++++++++++++++++
 arch/x86/include/asm/msr.h          | 20 ++++++++----------
 include/linux/page_ref.h            | 42 ++++++++++++++++++-------------------
 include/linux/tracepoint-defs.h     | 34 ++++++++++++++++++++++++++++++
 kernel/trace/trace.c                |  3 ++-
 kernel/trace/trace.h                |  1 +
 kernel/trace/trace_events.c         | 30 +++++++++++++-------------
 7 files changed, 110 insertions(+), 47 deletions(-)

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [for-next][PATCH 1/4] tracing/boot: Initialize per-instance event list in early boot
  2020-09-29  1:11 [for-next][PATCH 0/4] tracing: Updates for 5.10 Steven Rostedt
@ 2020-09-29  1:11 ` Steven Rostedt
  2020-09-29  1:11 ` [for-next][PATCH 2/4] tracepoints: Add helper to test if tracepoint is enabled in a header Steven Rostedt
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2020-09-29  1:11 UTC (permalink / raw)
  To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton, Masami Hiramatsu

From: Masami Hiramatsu <mhiramat@kernel.org>

Initialize per-instance event list in early boot time (before
initializing instance directory on tracefs). This fixes boot-time
tracing to correctly handle the boot-time per-instance settings.

Link: https://lkml.kernel.org/r/160096560826.182763.17110991546046128881.stgit@devnote2

Fixes: 4114fbfd02f1 ("tracing: Enable creating new instance early boot")
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 kernel/trace/trace.c        |  3 ++-
 kernel/trace/trace.h        |  1 +
 kernel/trace/trace_events.c | 30 ++++++++++++++++--------------
 3 files changed, 19 insertions(+), 15 deletions(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6211a13b3327..3f2533adae72 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -8700,7 +8700,8 @@ static struct trace_array *trace_array_create(const char *name)
 		ret = trace_array_create_dir(tr);
 		if (ret)
 			goto out_free_tr;
-	}
+	} else
+		__trace_early_add_events(tr);
 
 	list_add(&tr->list, &ftrace_trace_arrays);
 
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 525434145eea..5b0e797cacdd 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -1658,6 +1658,7 @@ extern void trace_event_enable_tgid_record(bool enable);
 extern int event_trace_init(void);
 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
 extern int event_trace_del_tracer(struct trace_array *tr);
+extern void __trace_early_add_events(struct trace_array *tr);
 
 extern struct trace_event_file *__find_event_file(struct trace_array *tr,
 						  const char *system,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 42c0e7df6e70..851ab37058dd 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -3131,14 +3131,13 @@ static inline int register_event_cmds(void) { return 0; }
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 /*
- * The top level array has already had its trace_event_file
- * descriptors created in order to allow for early events to
- * be recorded. This function is called after the tracefs has been
- * initialized, and we now have to create the files associated
- * to the events.
+ * The top level array and trace arrays created by boot-time tracing
+ * have already had its trace_event_file descriptors created in order
+ * to allow for early events to be recorded.
+ * This function is called after the tracefs has been initialized,
+ * and we now have to create the files associated to the events.
  */
-static __init void
-__trace_early_add_event_dirs(struct trace_array *tr)
+static void __trace_early_add_event_dirs(struct trace_array *tr)
 {
 	struct trace_event_file *file;
 	int ret;
@@ -3153,13 +3152,12 @@ __trace_early_add_event_dirs(struct trace_array *tr)
 }
 
 /*
- * For early boot up, the top trace array requires to have
- * a list of events that can be enabled. This must be done before
- * the filesystem is set up in order to allow events to be traced
- * early.
+ * For early boot up, the top trace array and the trace arrays created
+ * by boot-time tracing require to have a list of events that can be
+ * enabled. This must be done before the filesystem is set up in order
+ * to allow events to be traced early.
  */
-static __init void
-__trace_early_add_events(struct trace_array *tr)
+void __trace_early_add_events(struct trace_array *tr)
 {
 	struct trace_event_call *call;
 	int ret;
@@ -3290,7 +3288,11 @@ int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
 		goto out;
 
 	down_write(&trace_event_sem);
-	__trace_add_event_dirs(tr);
+	/* If tr already has the event list, it is initialized in early boot. */
+	if (unlikely(!list_empty(&tr->events)))
+		__trace_early_add_event_dirs(tr);
+	else
+		__trace_add_event_dirs(tr);
 	up_write(&trace_event_sem);
 
  out:
-- 
2.28.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [for-next][PATCH 2/4] tracepoints: Add helper to test if tracepoint is enabled in a header
  2020-09-29  1:11 [for-next][PATCH 0/4] tracing: Updates for 5.10 Steven Rostedt
  2020-09-29  1:11 ` [for-next][PATCH 1/4] tracing/boot: Initialize per-instance event list in early boot Steven Rostedt
@ 2020-09-29  1:11 ` Steven Rostedt
  2020-09-29  1:11 ` [for-next][PATCH 3/4] mm/page_ref: Convert the open coded tracepoint enabled to the new helper Steven Rostedt
  2020-09-29  1:12 ` [for-next][PATCH 4/4] x86: Use tracepoint_enabled() for msr tracepoints instead of open coding it Steven Rostedt
  3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2020-09-29  1:11 UTC (permalink / raw)
  To: linux-kernel; +Cc: Ingo Molnar, Andrew Morton

From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>

As tracepoints are discouraged from being added in a header because it can
cause side effects if other tracepoints are in headers, as well as bloat the
kernel as the trace_<tracepoint>() function is not a small inline, the common
workaround is to add a function call that calls a wrapper function in a
C file that then calls the tracepoint. But as function calls add overhead,
this function should only be called when the tracepoint in question is
enabled. To get around this overhead, a static_branch can be used to only
have the tracepoint wrapper get called when the tracepoint is enabled.

Add a tracepoint_enabled(tp) macro that gets passed the name of the
tracepoint, and this becomes a static_branch that is enabled when the
tracepoint is enabled and is a nop when the tracepoint is disabled.

Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 Documentation/trace/tracepoints.rst | 27 +++++++++++++++++++++++
 include/linux/tracepoint-defs.h     | 34 +++++++++++++++++++++++++++++
 2 files changed, 61 insertions(+)

diff --git a/Documentation/trace/tracepoints.rst b/Documentation/trace/tracepoints.rst
index 6e3ce3bf3593..0cb8d9ca3d60 100644
--- a/Documentation/trace/tracepoints.rst
+++ b/Documentation/trace/tracepoints.rst
@@ -146,3 +146,30 @@ with jump labels and avoid conditional branches.
       define tracepoints. Check http://lwn.net/Articles/379903,
       http://lwn.net/Articles/381064 and http://lwn.net/Articles/383362
       for a series of articles with more details.
+
+If you require calling a tracepoint from a header file, it is not
+recommended to call one directly or to use the trace_<tracepoint>_enabled()
+function call, as tracepoints in header files can have side effects if a
+header is included from a file that has CREATE_TRACE_POINTS set, as
+well as the trace_<tracepoint>() is not that small of an inline
+and can bloat the kernel if used by other inlined functions. Instead,
+include tracepoint-defs.h and use tracepoint_enabled().
+
+In a C file::
+
+	void do_trace_foo_bar_wrapper(args)
+	{
+		trace_foo_bar(args);
+	}
+
+In the header file::
+
+	DECLARE_TRACEPOINT(foo_bar);
+
+	static inline void some_inline_function()
+	{
+		[..]
+		if (tracepoint_enabled(foo_bar))
+			do_trace_foo_bar_wrapper(args);
+		[..]
+	}
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index b29950a19205..60625973faaf 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -48,4 +48,38 @@ struct bpf_raw_event_map {
 	u32			writable_size;
 } __aligned(32);
 
+/*
+ * If a tracepoint needs to be called from a header file, it is not
+ * recommended to call it directly, as tracepoints in header files
+ * may cause side-effects and bloat the kernel. Instead, use
+ * tracepoint_enabled() to test if the tracepoint is enabled, then if
+ * it is, call a wrapper function defined in a C file that will then
+ * call the tracepoint.
+ *
+ * For "trace_foo_bar()", you would need to create a wrapper function
+ * in a C file to call trace_foo_bar():
+ *   void do_trace_foo_bar(args) { trace_foo_bar(args); }
+ * Then in the header file, declare the tracepoint:
+ *   DECLARE_TRACEPOINT(foo_bar);
+ * And call your wrapper:
+ *   static inline void some_inlined_function() {
+ *            [..]
+ *            if (tracepoint_enabled(foo_bar))
+ *                    do_trace_foo_bar(args);
+ *            [..]
+ *   }
+ *
+ * Note: tracepoint_enabled(foo_bar) is equivalent to trace_foo_bar_enabled()
+ *   but is safe to have in headers, where trace_foo_bar_enabled() is not.
+ */
+#define DECLARE_TRACEPOINT(tp) \
+	extern struct tracepoint __tracepoint_##tp
+
+#ifdef CONFIG_TRACEPOINTS
+# define tracepoint_enabled(tp) \
+	static_key_false(&(__tracepoint_##tp).key)
+#else
+# define tracepoint_enabled(tracepoint) false
+#endif
+
 #endif
-- 
2.28.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [for-next][PATCH 3/4] mm/page_ref: Convert the open coded tracepoint enabled to the new helper
  2020-09-29  1:11 [for-next][PATCH 0/4] tracing: Updates for 5.10 Steven Rostedt
  2020-09-29  1:11 ` [for-next][PATCH 1/4] tracing/boot: Initialize per-instance event list in early boot Steven Rostedt
  2020-09-29  1:11 ` [for-next][PATCH 2/4] tracepoints: Add helper to test if tracepoint is enabled in a header Steven Rostedt
@ 2020-09-29  1:11 ` Steven Rostedt
  2020-09-29  1:12 ` [for-next][PATCH 4/4] x86: Use tracepoint_enabled() for msr tracepoints instead of open coding it Steven Rostedt
  3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2020-09-29  1:11 UTC (permalink / raw)
  To: linux-kernel
  Cc: Ingo Molnar, Andrew Morton, Joonsoo Kim, Michal Nazarewicz,
	Vlastimil Babka, Minchan Kim, Mel Gorman, Kirill A. Shutemov,
	Sergey Senozhatsky, Arnd Bergmann

From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>

As more use cases of checking if a tracepoint is enabled in a header are
coming to fruition, a helper macro, tracepoint_enabled(), has been added to
check if a tracepoint is enabled or not, and can be used with minimal header
requirements (avoid "include hell"). Convert the page_ref logic over to the
new helper macro.

Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 include/linux/page_ref.h | 42 ++++++++++++++++++++--------------------
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index d27701199a4d..f3318f34fc54 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -7,13 +7,13 @@
 #include <linux/page-flags.h>
 #include <linux/tracepoint-defs.h>
 
-extern struct tracepoint __tracepoint_page_ref_set;
-extern struct tracepoint __tracepoint_page_ref_mod;
-extern struct tracepoint __tracepoint_page_ref_mod_and_test;
-extern struct tracepoint __tracepoint_page_ref_mod_and_return;
-extern struct tracepoint __tracepoint_page_ref_mod_unless;
-extern struct tracepoint __tracepoint_page_ref_freeze;
-extern struct tracepoint __tracepoint_page_ref_unfreeze;
+DECLARE_TRACEPOINT(page_ref_set);
+DECLARE_TRACEPOINT(page_ref_mod);
+DECLARE_TRACEPOINT(page_ref_mod_and_test);
+DECLARE_TRACEPOINT(page_ref_mod_and_return);
+DECLARE_TRACEPOINT(page_ref_mod_unless);
+DECLARE_TRACEPOINT(page_ref_freeze);
+DECLARE_TRACEPOINT(page_ref_unfreeze);
 
 #ifdef CONFIG_DEBUG_PAGE_REF
 
@@ -24,7 +24,7 @@ extern struct tracepoint __tracepoint_page_ref_unfreeze;
  *
  * See trace_##name##_enabled(void) in include/linux/tracepoint.h
  */
-#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
+#define page_ref_tracepoint_active(t) tracepoint_enabled(t)
 
 extern void __page_ref_set(struct page *page, int v);
 extern void __page_ref_mod(struct page *page, int v);
@@ -75,7 +75,7 @@ static inline int page_count(struct page *page)
 static inline void set_page_count(struct page *page, int v)
 {
 	atomic_set(&page->_refcount, v);
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
+	if (page_ref_tracepoint_active(page_ref_set))
 		__page_ref_set(page, v);
 }
 
@@ -91,14 +91,14 @@ static inline void init_page_count(struct page *page)
 static inline void page_ref_add(struct page *page, int nr)
 {
 	atomic_add(nr, &page->_refcount);
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+	if (page_ref_tracepoint_active(page_ref_mod))
 		__page_ref_mod(page, nr);
 }
 
 static inline void page_ref_sub(struct page *page, int nr)
 {
 	atomic_sub(nr, &page->_refcount);
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+	if (page_ref_tracepoint_active(page_ref_mod))
 		__page_ref_mod(page, -nr);
 }
 
@@ -106,7 +106,7 @@ static inline int page_ref_sub_return(struct page *page, int nr)
 {
 	int ret = atomic_sub_return(nr, &page->_refcount);
 
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+	if (page_ref_tracepoint_active(page_ref_mod_and_return))
 		__page_ref_mod_and_return(page, -nr, ret);
 	return ret;
 }
@@ -114,14 +114,14 @@ static inline int page_ref_sub_return(struct page *page, int nr)
 static inline void page_ref_inc(struct page *page)
 {
 	atomic_inc(&page->_refcount);
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+	if (page_ref_tracepoint_active(page_ref_mod))
 		__page_ref_mod(page, 1);
 }
 
 static inline void page_ref_dec(struct page *page)
 {
 	atomic_dec(&page->_refcount);
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
+	if (page_ref_tracepoint_active(page_ref_mod))
 		__page_ref_mod(page, -1);
 }
 
@@ -129,7 +129,7 @@ static inline int page_ref_sub_and_test(struct page *page, int nr)
 {
 	int ret = atomic_sub_and_test(nr, &page->_refcount);
 
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+	if (page_ref_tracepoint_active(page_ref_mod_and_test))
 		__page_ref_mod_and_test(page, -nr, ret);
 	return ret;
 }
@@ -138,7 +138,7 @@ static inline int page_ref_inc_return(struct page *page)
 {
 	int ret = atomic_inc_return(&page->_refcount);
 
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+	if (page_ref_tracepoint_active(page_ref_mod_and_return))
 		__page_ref_mod_and_return(page, 1, ret);
 	return ret;
 }
@@ -147,7 +147,7 @@ static inline int page_ref_dec_and_test(struct page *page)
 {
 	int ret = atomic_dec_and_test(&page->_refcount);
 
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
+	if (page_ref_tracepoint_active(page_ref_mod_and_test))
 		__page_ref_mod_and_test(page, -1, ret);
 	return ret;
 }
@@ -156,7 +156,7 @@ static inline int page_ref_dec_return(struct page *page)
 {
 	int ret = atomic_dec_return(&page->_refcount);
 
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
+	if (page_ref_tracepoint_active(page_ref_mod_and_return))
 		__page_ref_mod_and_return(page, -1, ret);
 	return ret;
 }
@@ -165,7 +165,7 @@ static inline int page_ref_add_unless(struct page *page, int nr, int u)
 {
 	int ret = atomic_add_unless(&page->_refcount, nr, u);
 
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
+	if (page_ref_tracepoint_active(page_ref_mod_unless))
 		__page_ref_mod_unless(page, nr, ret);
 	return ret;
 }
@@ -174,7 +174,7 @@ static inline int page_ref_freeze(struct page *page, int count)
 {
 	int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
 
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
+	if (page_ref_tracepoint_active(page_ref_freeze))
 		__page_ref_freeze(page, count, ret);
 	return ret;
 }
@@ -185,7 +185,7 @@ static inline void page_ref_unfreeze(struct page *page, int count)
 	VM_BUG_ON(count == 0);
 
 	atomic_set_release(&page->_refcount, count);
-	if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
+	if (page_ref_tracepoint_active(page_ref_unfreeze))
 		__page_ref_unfreeze(page, count);
 }
 
-- 
2.28.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [for-next][PATCH 4/4] x86: Use tracepoint_enabled() for msr tracepoints instead of open coding it
  2020-09-29  1:11 [for-next][PATCH 0/4] tracing: Updates for 5.10 Steven Rostedt
                   ` (2 preceding siblings ...)
  2020-09-29  1:11 ` [for-next][PATCH 3/4] mm/page_ref: Convert the open coded tracepoint enabled to the new helper Steven Rostedt
@ 2020-09-29  1:12 ` Steven Rostedt
  3 siblings, 0 replies; 5+ messages in thread
From: Steven Rostedt @ 2020-09-29  1:12 UTC (permalink / raw)
  To: linux-kernel
  Cc: Ingo Molnar, Andrew Morton, Andi Kleen, Peter Zijlstra, Thomas Gleixner

From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>

7f47d8cc039f ("x86, tracing, perf: Add trace point for MSR accesses") added
tracing of msr read and write, but because of complexity in having
tracepoints in headers, and even more so for a core header like msr.h, not
to mention the bloat a tracepoint adds to inline functions, a helper
function is needed to be called from the header.

Use the new tracepoint_enabled() macro in tracepoint-defs.h to test if the
tracepoint is active before calling the helper function, instead of open
coding the same logic, which requires knowing the internals of a tracepoint.

Cc: Andi Kleen <ak@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
---
 arch/x86/include/asm/msr.h | 20 +++++++++-----------
 1 file changed, 9 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 86f20d520a07..0b4920a7238e 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -60,22 +60,20 @@ struct saved_msrs {
 #define EAX_EDX_RET(val, low, high)	"=A" (val)
 #endif
 
-#ifdef CONFIG_TRACEPOINTS
 /*
  * Be very careful with includes. This header is prone to include loops.
  */
 #include <asm/atomic.h>
 #include <linux/tracepoint-defs.h>
 
-extern struct tracepoint __tracepoint_read_msr;
-extern struct tracepoint __tracepoint_write_msr;
-extern struct tracepoint __tracepoint_rdpmc;
-#define msr_tracepoint_active(t) static_key_false(&(t).key)
+#ifdef CONFIG_TRACEPOINTS
+DECLARE_TRACEPOINT(read_msr);
+DECLARE_TRACEPOINT(write_msr);
+DECLARE_TRACEPOINT(rdpmc);
 extern void do_trace_write_msr(unsigned int msr, u64 val, int failed);
 extern void do_trace_read_msr(unsigned int msr, u64 val, int failed);
 extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed);
 #else
-#define msr_tracepoint_active(t) false
 static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {}
 static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {}
 static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
@@ -128,7 +126,7 @@ static inline unsigned long long native_read_msr(unsigned int msr)
 
 	val = __rdmsr(msr);
 
-	if (msr_tracepoint_active(__tracepoint_read_msr))
+	if (tracepoint_enabled(read_msr))
 		do_trace_read_msr(msr, val, 0);
 
 	return val;
@@ -150,7 +148,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
 		     _ASM_EXTABLE(2b, 3b)
 		     : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
 		     : "c" (msr), [fault] "i" (-EIO));
-	if (msr_tracepoint_active(__tracepoint_read_msr))
+	if (tracepoint_enabled(read_msr))
 		do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
 	return EAX_EDX_VAL(val, low, high);
 }
@@ -161,7 +159,7 @@ native_write_msr(unsigned int msr, u32 low, u32 high)
 {
 	__wrmsr(msr, low, high);
 
-	if (msr_tracepoint_active(__tracepoint_write_msr))
+	if (tracepoint_enabled(write_msr))
 		do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
 }
 
@@ -181,7 +179,7 @@ native_write_msr_safe(unsigned int msr, u32 low, u32 high)
 		     : "c" (msr), "0" (low), "d" (high),
 		       [fault] "i" (-EIO)
 		     : "memory");
-	if (msr_tracepoint_active(__tracepoint_write_msr))
+	if (tracepoint_enabled(write_msr))
 		do_trace_write_msr(msr, ((u64)high << 32 | low), err);
 	return err;
 }
@@ -248,7 +246,7 @@ static inline unsigned long long native_read_pmc(int counter)
 	DECLARE_ARGS(val, low, high);
 
 	asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter));
-	if (msr_tracepoint_active(__tracepoint_rdpmc))
+	if (tracepoint_enabled(rdpmc))
 		do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0);
 	return EAX_EDX_VAL(val, low, high);
 }
-- 
2.28.0



^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-09-29  1:12 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-09-29  1:11 [for-next][PATCH 0/4] tracing: Updates for 5.10 Steven Rostedt
2020-09-29  1:11 ` [for-next][PATCH 1/4] tracing/boot: Initialize per-instance event list in early boot Steven Rostedt
2020-09-29  1:11 ` [for-next][PATCH 2/4] tracepoints: Add helper to test if tracepoint is enabled in a header Steven Rostedt
2020-09-29  1:11 ` [for-next][PATCH 3/4] mm/page_ref: Convert the open coded tracepoint enabled to the new helper Steven Rostedt
2020-09-29  1:12 ` [for-next][PATCH 4/4] x86: Use tracepoint_enabled() for msr tracepoints instead of open coding it Steven Rostedt

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.