linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* Re: [PATCH] trace: circular dependency for trace_types_lock and event_mutex
       [not found] <0101016ecc80a1fa-27cc3e87-c16d-4cd6-b3c6-9c893010fdef-000000@us-west-2.amazonses.com>
@ 2019-12-03 16:27 ` Steven Rostedt
  2019-12-04  4:44   ` Prateek Sood
  0 siblings, 1 reply; 5+ messages in thread
From: Steven Rostedt @ 2019-12-03 16:27 UTC (permalink / raw)
  To: Prateek Sood; +Cc: mingo, linux-kernel, kaushalk


Hi Prateek,

Thanks for the patch. Note, I think a better subject is:

 tracing: Fix lock inversion caused by trace_event_enable_tgid_record()


On Tue, 3 Dec 2019 16:03:32 +0000
Prateek Sood <prsood@codeaurora.org> wrote:

>        Task T2                             Task T3
> trace_options_core_write()            subsystem_open()
> 
>  mutex_lock(trace_types_lock)           mutex_lock(event_mutex)
> 
>  set_tracer_flag()
> 
>    trace_event_enable_tgid_record()       mutex_lock(trace_types_lock)
> 
>     mutex_lock(event_mutex)
> 
> This gives a circular dependency deadlock between trace_types_lock and
> event_mutex. To fix this invert the usage of trace_types_lock and event_mutex
> in trace_options_core_write(). This keeps the sequence of lock usage consistent.
> 
> Change-Id: I3752a77c59555852c2241f7775ec4a1960c15c15

What's this Change-Id? Something internal?

I'll be adding:

 Link: http://lkml.kernel.org/r/0101016ecc80a1fa-27cc3e87-c16d-4cd6-b3c6-9c893010fdef-000000@us-west-2.amazonses.com

I'll test this out, and probably even add a stable tag.

-- Steve



> Signed-off-by: Prateek Sood <prsood@codeaurora.org>
> ---
>  kernel/trace/trace.c              | 6 ++++++
>  kernel/trace/trace_events.c       | 8 ++++----
>  kernel/trace/trace_irqsoff.c      | 4 ++++
>  kernel/trace/trace_sched_wakeup.c | 4 ++++
>  4 files changed, 18 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index 6a0ee91..2c09aa1 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -4590,6 +4590,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
>  
>  int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
>  {
> +	lockdep_assert_held(&event_mutex);
> +
>  	/* do nothing if flag is already set */
>  	if (!!(tr->trace_flags & mask) == !!enabled)
>  		return 0;
> @@ -4657,6 +4659,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
>  
>  	cmp += len;
>  
> +	mutex_lock(&event_mutex);
>  	mutex_lock(&trace_types_lock);
>  
>  	ret = match_string(trace_options, -1, cmp);
> @@ -4667,6 +4670,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
>  		ret = set_tracer_flag(tr, 1 << ret, !neg);
>  
>  	mutex_unlock(&trace_types_lock);
> +	mutex_unlock(&event_mutex);
>  
>  	/*
>  	 * If the first trailing whitespace is replaced with '\0' by strstrip,
> @@ -7972,9 +7976,11 @@ static void get_tr_index(void *data, struct trace_array **ptr,
>  	if (val != 0 && val != 1)
>  		return -EINVAL;
>  
> +	mutex_lock(&event_mutex);
>  	mutex_lock(&trace_types_lock);
>  	ret = set_tracer_flag(tr, 1 << index, val);
>  	mutex_unlock(&trace_types_lock);
> +	mutex_unlock(&event_mutex);
>  
>  	if (ret < 0)
>  		return ret;
> diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
> index fba87d1..995061b 100644
> --- a/kernel/trace/trace_events.c
> +++ b/kernel/trace/trace_events.c
> @@ -320,7 +320,8 @@ void trace_event_enable_cmd_record(bool enable)
>  	struct trace_event_file *file;
>  	struct trace_array *tr;
>  
> -	mutex_lock(&event_mutex);
> +	lockdep_assert_held(&event_mutex);
> +
>  	do_for_each_event_file(tr, file) {
>  
>  		if (!(file->flags & EVENT_FILE_FL_ENABLED))
> @@ -334,7 +335,6 @@ void trace_event_enable_cmd_record(bool enable)
>  			clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
>  		}
>  	} while_for_each_event_file();
> -	mutex_unlock(&event_mutex);
>  }
>  
>  void trace_event_enable_tgid_record(bool enable)
> @@ -342,7 +342,8 @@ void trace_event_enable_tgid_record(bool enable)
>  	struct trace_event_file *file;
>  	struct trace_array *tr;
>  
> -	mutex_lock(&event_mutex);
> +	lockdep_assert_held(&event_mutex);
> +
>  	do_for_each_event_file(tr, file) {
>  		if (!(file->flags & EVENT_FILE_FL_ENABLED))
>  			continue;
> @@ -356,7 +357,6 @@ void trace_event_enable_tgid_record(bool enable)
>  				  &file->flags);
>  		}
>  	} while_for_each_event_file();
> -	mutex_unlock(&event_mutex);
>  }
>  
>  static int __ftrace_event_enable_disable(struct trace_event_file *file,
> diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> index a745b0c..887cdb5 100644
> --- a/kernel/trace/trace_irqsoff.c
> +++ b/kernel/trace/trace_irqsoff.c
> @@ -560,8 +560,10 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
>  	save_flags = tr->trace_flags;
>  
>  	/* non overwrite screws up the latency tracers */
> +	mutex_lock(&event_mutex);
>  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
>  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
> +	mutex_unlock(&event_mutex);
>  
>  	tr->max_latency = 0;
>  	irqsoff_trace = tr;
> @@ -586,8 +588,10 @@ static void __irqsoff_tracer_reset(struct trace_array *tr)
>  
>  	stop_irqsoff_tracer(tr, is_graph(tr));
>  
> +	mutex_lock(&event_mutex);
>  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
>  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
> +	mutex_unlock(&event_mutex);
>  	ftrace_reset_array_ops(tr);
>  
>  	irqsoff_busy = false;
> diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
> index 5e43b96..0bc67d1 100644
> --- a/kernel/trace/trace_sched_wakeup.c
> +++ b/kernel/trace/trace_sched_wakeup.c
> @@ -671,8 +671,10 @@ static int __wakeup_tracer_init(struct trace_array *tr)
>  	save_flags = tr->trace_flags;
>  
>  	/* non overwrite screws up the latency tracers */
> +	mutex_lock(&event_mutex);
>  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
>  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
> +	mutex_unlock(&event_mutex);
>  
>  	tr->max_latency = 0;
>  	wakeup_trace = tr;
> @@ -722,8 +724,10 @@ static void wakeup_tracer_reset(struct trace_array *tr)
>  	/* make sure we put back any tasks we are tracing */
>  	wakeup_reset(tr);
>  
> +	mutex_lock(&event_mutex);
>  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
>  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
> +	mutex_unlock(&event_mutex);
>  	ftrace_reset_array_ops(tr);
>  	wakeup_busy = false;
>  }


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] trace: circular dependency for trace_types_lock and event_mutex
  2019-12-03 16:27 ` [PATCH] trace: circular dependency for trace_types_lock and event_mutex Steven Rostedt
@ 2019-12-04  4:44   ` Prateek Sood
       [not found]     ` <1575435333-21626-1-git-send-email-prsood@codeaurora.org>
  0 siblings, 1 reply; 5+ messages in thread
From: Prateek Sood @ 2019-12-04  4:44 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: mingo, linux-kernel, kaushalk

On 12/3/19 9:57 PM, Steven Rostedt wrote:
> 
> Hi Prateek,
> 
> Thanks for the patch. Note, I think a better subject is:
> 
>  tracing: Fix lock inversion caused by trace_event_enable_tgid_record()
Ok, I will update and send with updated subject.

> 
> 
> On Tue, 3 Dec 2019 16:03:32 +0000
> Prateek Sood <prsood@codeaurora.org> wrote:
> 
>>        Task T2                             Task T3
>> trace_options_core_write()            subsystem_open()
>>
>>  mutex_lock(trace_types_lock)           mutex_lock(event_mutex)
>>
>>  set_tracer_flag()
>>
>>    trace_event_enable_tgid_record()       mutex_lock(trace_types_lock)
>>
>>     mutex_lock(event_mutex)
>>
>> This gives a circular dependency deadlock between trace_types_lock and
>> event_mutex. To fix this invert the usage of trace_types_lock and event_mutex
>> in trace_options_core_write(). This keeps the sequence of lock usage consistent.
>>
>> Change-Id: I3752a77c59555852c2241f7775ec4a1960c15c15
> 
> What's this Change-Id? Something internal?

Apologies, this is something internal which I missed to remove

> 
> I'll be adding:
> 
>  Link: http://lkml.kernel.org/r/0101016ecc80a1fa-27cc3e87-c16d-4cd6-b3c6-9c893010fdef-000000@us-west-2.amazonses.com
> 
> I'll test this out, and probably even add a stable tag.

Ok

Thanks for reviewing it!!

> 
> -- Steve
> 
> 
> 
>> Signed-off-by: Prateek Sood <prsood@codeaurora.org>
>> ---
>>  kernel/trace/trace.c              | 6 ++++++
>>  kernel/trace/trace_events.c       | 8 ++++----
>>  kernel/trace/trace_irqsoff.c      | 4 ++++
>>  kernel/trace/trace_sched_wakeup.c | 4 ++++
>>  4 files changed, 18 insertions(+), 4 deletions(-)
>>
>> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
>> index 6a0ee91..2c09aa1 100644
>> --- a/kernel/trace/trace.c
>> +++ b/kernel/trace/trace.c
>> @@ -4590,6 +4590,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
>>  
>>  int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
>>  {
>> +	lockdep_assert_held(&event_mutex);
>> +
>>  	/* do nothing if flag is already set */
>>  	if (!!(tr->trace_flags & mask) == !!enabled)
>>  		return 0;
>> @@ -4657,6 +4659,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
>>  
>>  	cmp += len;
>>  
>> +	mutex_lock(&event_mutex);
>>  	mutex_lock(&trace_types_lock);
>>  
>>  	ret = match_string(trace_options, -1, cmp);
>> @@ -4667,6 +4670,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
>>  		ret = set_tracer_flag(tr, 1 << ret, !neg);
>>  
>>  	mutex_unlock(&trace_types_lock);
>> +	mutex_unlock(&event_mutex);
>>  
>>  	/*
>>  	 * If the first trailing whitespace is replaced with '\0' by strstrip,
>> @@ -7972,9 +7976,11 @@ static void get_tr_index(void *data, struct trace_array **ptr,
>>  	if (val != 0 && val != 1)
>>  		return -EINVAL;
>>  
>> +	mutex_lock(&event_mutex);
>>  	mutex_lock(&trace_types_lock);
>>  	ret = set_tracer_flag(tr, 1 << index, val);
>>  	mutex_unlock(&trace_types_lock);
>> +	mutex_unlock(&event_mutex);
>>  
>>  	if (ret < 0)
>>  		return ret;
>> diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
>> index fba87d1..995061b 100644
>> --- a/kernel/trace/trace_events.c
>> +++ b/kernel/trace/trace_events.c
>> @@ -320,7 +320,8 @@ void trace_event_enable_cmd_record(bool enable)
>>  	struct trace_event_file *file;
>>  	struct trace_array *tr;
>>  
>> -	mutex_lock(&event_mutex);
>> +	lockdep_assert_held(&event_mutex);
>> +
>>  	do_for_each_event_file(tr, file) {
>>  
>>  		if (!(file->flags & EVENT_FILE_FL_ENABLED))
>> @@ -334,7 +335,6 @@ void trace_event_enable_cmd_record(bool enable)
>>  			clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
>>  		}
>>  	} while_for_each_event_file();
>> -	mutex_unlock(&event_mutex);
>>  }
>>  
>>  void trace_event_enable_tgid_record(bool enable)
>> @@ -342,7 +342,8 @@ void trace_event_enable_tgid_record(bool enable)
>>  	struct trace_event_file *file;
>>  	struct trace_array *tr;
>>  
>> -	mutex_lock(&event_mutex);
>> +	lockdep_assert_held(&event_mutex);
>> +
>>  	do_for_each_event_file(tr, file) {
>>  		if (!(file->flags & EVENT_FILE_FL_ENABLED))
>>  			continue;
>> @@ -356,7 +357,6 @@ void trace_event_enable_tgid_record(bool enable)
>>  				  &file->flags);
>>  		}
>>  	} while_for_each_event_file();
>> -	mutex_unlock(&event_mutex);
>>  }
>>  
>>  static int __ftrace_event_enable_disable(struct trace_event_file *file,
>> diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
>> index a745b0c..887cdb5 100644
>> --- a/kernel/trace/trace_irqsoff.c
>> +++ b/kernel/trace/trace_irqsoff.c
>> @@ -560,8 +560,10 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
>>  	save_flags = tr->trace_flags;
>>  
>>  	/* non overwrite screws up the latency tracers */
>> +	mutex_lock(&event_mutex);
>>  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
>>  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
>> +	mutex_unlock(&event_mutex);
>>  
>>  	tr->max_latency = 0;
>>  	irqsoff_trace = tr;
>> @@ -586,8 +588,10 @@ static void __irqsoff_tracer_reset(struct trace_array *tr)
>>  
>>  	stop_irqsoff_tracer(tr, is_graph(tr));
>>  
>> +	mutex_lock(&event_mutex);
>>  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
>>  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
>> +	mutex_unlock(&event_mutex);
>>  	ftrace_reset_array_ops(tr);
>>  
>>  	irqsoff_busy = false;
>> diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
>> index 5e43b96..0bc67d1 100644
>> --- a/kernel/trace/trace_sched_wakeup.c
>> +++ b/kernel/trace/trace_sched_wakeup.c
>> @@ -671,8 +671,10 @@ static int __wakeup_tracer_init(struct trace_array *tr)
>>  	save_flags = tr->trace_flags;
>>  
>>  	/* non overwrite screws up the latency tracers */
>> +	mutex_lock(&event_mutex);
>>  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
>>  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
>> +	mutex_unlock(&event_mutex);
>>  
>>  	tr->max_latency = 0;
>>  	wakeup_trace = tr;
>> @@ -722,8 +724,10 @@ static void wakeup_tracer_reset(struct trace_array *tr)
>>  	/* make sure we put back any tasks we are tracing */
>>  	wakeup_reset(tr);
>>  
>> +	mutex_lock(&event_mutex);
>>  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
>>  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
>> +	mutex_unlock(&event_mutex);
>>  	ftrace_reset_array_ops(tr);
>>  	wakeup_busy = false;
>>  }
> 


-- 
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v2] tracing: Fix lock inversion in trace_event_enable_tgid_record()
       [not found]     ` <1575435333-21626-1-git-send-email-prsood@codeaurora.org>
@ 2019-12-04  9:36       ` Prateek Sood
  2019-12-06 12:31         ` Prateek Sood
  0 siblings, 1 reply; 5+ messages in thread
From: Prateek Sood @ 2019-12-04  9:36 UTC (permalink / raw)
  To: rostedt, mingo; +Cc: linux-kernel, kaushalk

        Task T2                             Task T3
trace_options_core_write()            subsystem_open()

  mutex_lock(trace_types_lock)           mutex_lock(event_mutex)

  set_tracer_flag()

    trace_event_enable_tgid_record()       mutex_lock(trace_types_lock)

     mutex_lock(event_mutex)

This gives a circular dependency deadlock between trace_types_lock and
event_mutex. To fix this invert the usage of trace_types_lock and
event_mutex in trace_options_core_write(). This keeps the sequence of
lock usage consistent.

Signed-off-by: Prateek Sood<prsood@codeaurora.org>
---
  kernel/trace/trace.c              | 6 ++++++
  kernel/trace/trace_events.c       | 8 ++++----
  kernel/trace/trace_irqsoff.c      | 4 ++++
  kernel/trace/trace_sched_wakeup.c | 4 ++++
  4 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6a0ee91..2c09aa1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4590,6 +4590,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
  
  int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
  {
+	lockdep_assert_held(&event_mutex);
+
  	/* do nothing if flag is already set */
  	if (!!(tr->trace_flags & mask) == !!enabled)
  		return 0;
@@ -4657,6 +4659,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
  
  	cmp += len;
  
+	mutex_lock(&event_mutex);
  	mutex_lock(&trace_types_lock);
  
  	ret = match_string(trace_options, -1, cmp);
@@ -4667,6 +4670,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
  		ret = set_tracer_flag(tr, 1 << ret, !neg);
  
  	mutex_unlock(&trace_types_lock);
+	mutex_unlock(&event_mutex);
  
  	/*
  	 * If the first trailing whitespace is replaced with '\0' by strstrip,
@@ -7972,9 +7976,11 @@ static void get_tr_index(void *data, struct trace_array **ptr,
  	if (val != 0 && val != 1)
  		return -EINVAL;
  
+	mutex_lock(&event_mutex);
  	mutex_lock(&trace_types_lock);
  	ret = set_tracer_flag(tr, 1 << index, val);
  	mutex_unlock(&trace_types_lock);
+	mutex_unlock(&event_mutex);
  
  	if (ret < 0)
  		return ret;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index fba87d1..995061b 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -320,7 +320,8 @@ void trace_event_enable_cmd_record(bool enable)
  	struct trace_event_file *file;
  	struct trace_array *tr;
  
-	mutex_lock(&event_mutex);
+	lockdep_assert_held(&event_mutex);
+
  	do_for_each_event_file(tr, file) {
  
  		if (!(file->flags & EVENT_FILE_FL_ENABLED))
@@ -334,7 +335,6 @@ void trace_event_enable_cmd_record(bool enable)
  			clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
  		}
  	} while_for_each_event_file();
-	mutex_unlock(&event_mutex);
  }
  
  void trace_event_enable_tgid_record(bool enable)
@@ -342,7 +342,8 @@ void trace_event_enable_tgid_record(bool enable)
  	struct trace_event_file *file;
  	struct trace_array *tr;
  
-	mutex_lock(&event_mutex);
+	lockdep_assert_held(&event_mutex);
+
  	do_for_each_event_file(tr, file) {
  		if (!(file->flags & EVENT_FILE_FL_ENABLED))
  			continue;
@@ -356,7 +357,6 @@ void trace_event_enable_tgid_record(bool enable)
  				  &file->flags);
  		}
  	} while_for_each_event_file();
-	mutex_unlock(&event_mutex);
  }
  
  static int __ftrace_event_enable_disable(struct trace_event_file *file,
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a745b0c..887cdb5 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -560,8 +560,10 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
  	save_flags = tr->trace_flags;
  
  	/* non overwrite screws up the latency tracers */
+	mutex_lock(&event_mutex);
  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
+	mutex_unlock(&event_mutex);
  
  	tr->max_latency = 0;
  	irqsoff_trace = tr;
@@ -586,8 +588,10 @@ static void __irqsoff_tracer_reset(struct trace_array *tr)
  
  	stop_irqsoff_tracer(tr, is_graph(tr));
  
+	mutex_lock(&event_mutex);
  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+	mutex_unlock(&event_mutex);
  	ftrace_reset_array_ops(tr);
  
  	irqsoff_busy = false;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 5e43b96..0bc67d1 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -671,8 +671,10 @@ static int __wakeup_tracer_init(struct trace_array *tr)
  	save_flags = tr->trace_flags;
  
  	/* non overwrite screws up the latency tracers */
+	mutex_lock(&event_mutex);
  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
+	mutex_unlock(&event_mutex);
  
  	tr->max_latency = 0;
  	wakeup_trace = tr;
@@ -722,8 +724,10 @@ static void wakeup_tracer_reset(struct trace_array *tr)
  	/* make sure we put back any tasks we are tracing */
  	wakeup_reset(tr);
  
+	mutex_lock(&event_mutex);
  	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
  	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+	mutex_unlock(&event_mutex);
  	ftrace_reset_array_ops(tr);
  	wakeup_busy = false;
  }
-- Qualcomm India Private Limited, on behalf of Qualcomm Innovation 
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation 
Collaborative Project.


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v2] tracing: Fix lock inversion in trace_event_enable_tgid_record()
  2019-12-04  9:36       ` [PATCH v2] tracing: Fix lock inversion in trace_event_enable_tgid_record() Prateek Sood
@ 2019-12-06 12:31         ` Prateek Sood
  0 siblings, 0 replies; 5+ messages in thread
From: Prateek Sood @ 2019-12-06 12:31 UTC (permalink / raw)
  To: rostedt, mingo; +Cc: linux-kernel, kaushalk

Steve,

Does the deadlock reflected here looks valid?

Any outcome of your local verification?


Regards

Prateek


On 12/4/2019 3:06 PM, Prateek Sood wrote:
> Task T2                             Task T3
> trace_options_core_write()            subsystem_open()
>
>  mutex_lock(trace_types_lock)           mutex_lock(event_mutex)
>
>  set_tracer_flag()
>
>    trace_event_enable_tgid_record() mutex_lock(trace_types_lock)
>
>     mutex_lock(event_mutex)
>
> This gives a circular dependency deadlock between trace_types_lock and
> event_mutex. To fix this invert the usage of trace_types_lock and
> event_mutex in trace_options_core_write(). This keeps the sequence of
> lock usage consistent.
>
> Signed-off-by: Prateek Sood<prsood@codeaurora.org>
> ---
>  kernel/trace/trace.c              | 6 ++++++
>  kernel/trace/trace_events.c       | 8 ++++----
>  kernel/trace/trace_irqsoff.c      | 4 ++++
>  kernel/trace/trace_sched_wakeup.c | 4 ++++
>  4 files changed, 18 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index 6a0ee91..2c09aa1 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -4590,6 +4590,8 @@ int trace_keep_overwrite(struct tracer *tracer, 
> u32 mask, int set)
>
>  int set_tracer_flag(struct trace_array *tr, unsigned int mask, int 
> enabled)
>  {
> +    lockdep_assert_held(&event_mutex);
> +
>      /* do nothing if flag is already set */
>      if (!!(tr->trace_flags & mask) == !!enabled)
>          return 0;
> @@ -4657,6 +4659,7 @@ static int trace_set_options(struct trace_array 
> *tr, char *option)
>
>      cmp += len;
>
> +    mutex_lock(&event_mutex);
>      mutex_lock(&trace_types_lock);
>
>      ret = match_string(trace_options, -1, cmp);
> @@ -4667,6 +4670,7 @@ static int trace_set_options(struct trace_array 
> *tr, char *option)
>          ret = set_tracer_flag(tr, 1 << ret, !neg);
>
>      mutex_unlock(&trace_types_lock);
> +    mutex_unlock(&event_mutex);
>
>      /*
>       * If the first trailing whitespace is replaced with '\0' by 
> strstrip,
> @@ -7972,9 +7976,11 @@ static void get_tr_index(void *data, struct 
> trace_array **ptr,
>      if (val != 0 && val != 1)
>          return -EINVAL;
>
> +    mutex_lock(&event_mutex);
>      mutex_lock(&trace_types_lock);
>      ret = set_tracer_flag(tr, 1 << index, val);
>      mutex_unlock(&trace_types_lock);
> +    mutex_unlock(&event_mutex);
>
>      if (ret < 0)
>          return ret;
> diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
> index fba87d1..995061b 100644
> --- a/kernel/trace/trace_events.c
> +++ b/kernel/trace/trace_events.c
> @@ -320,7 +320,8 @@ void trace_event_enable_cmd_record(bool enable)
>      struct trace_event_file *file;
>      struct trace_array *tr;
>
> -    mutex_lock(&event_mutex);
> +    lockdep_assert_held(&event_mutex);
> +
>      do_for_each_event_file(tr, file) {
>
>          if (!(file->flags & EVENT_FILE_FL_ENABLED))
> @@ -334,7 +335,6 @@ void trace_event_enable_cmd_record(bool enable)
>              clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
>          }
>      } while_for_each_event_file();
> -    mutex_unlock(&event_mutex);
>  }
>
>  void trace_event_enable_tgid_record(bool enable)
> @@ -342,7 +342,8 @@ void trace_event_enable_tgid_record(bool enable)
>      struct trace_event_file *file;
>      struct trace_array *tr;
>
> -    mutex_lock(&event_mutex);
> +    lockdep_assert_held(&event_mutex);
> +
>      do_for_each_event_file(tr, file) {
>          if (!(file->flags & EVENT_FILE_FL_ENABLED))
>              continue;
> @@ -356,7 +357,6 @@ void trace_event_enable_tgid_record(bool enable)
>                    &file->flags);
>          }
>      } while_for_each_event_file();
> -    mutex_unlock(&event_mutex);
>  }
>
>  static int __ftrace_event_enable_disable(struct trace_event_file *file,
> diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> index a745b0c..887cdb5 100644
> --- a/kernel/trace/trace_irqsoff.c
> +++ b/kernel/trace/trace_irqsoff.c
> @@ -560,8 +560,10 @@ static int __irqsoff_tracer_init(struct 
> trace_array *tr)
>      save_flags = tr->trace_flags;
>
>      /* non overwrite screws up the latency tracers */
> +    mutex_lock(&event_mutex);
>      set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
>      set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
> +    mutex_unlock(&event_mutex);
>
>      tr->max_latency = 0;
>      irqsoff_trace = tr;
> @@ -586,8 +588,10 @@ static void __irqsoff_tracer_reset(struct 
> trace_array *tr)
>
>      stop_irqsoff_tracer(tr, is_graph(tr));
>
> +    mutex_lock(&event_mutex);
>      set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
>      set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
> +    mutex_unlock(&event_mutex);
>      ftrace_reset_array_ops(tr);
>
>      irqsoff_busy = false;
> diff --git a/kernel/trace/trace_sched_wakeup.c 
> b/kernel/trace/trace_sched_wakeup.c
> index 5e43b96..0bc67d1 100644
> --- a/kernel/trace/trace_sched_wakeup.c
> +++ b/kernel/trace/trace_sched_wakeup.c
> @@ -671,8 +671,10 @@ static int __wakeup_tracer_init(struct 
> trace_array *tr)
>      save_flags = tr->trace_flags;
>
>      /* non overwrite screws up the latency tracers */
> +    mutex_lock(&event_mutex);
>      set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
>      set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
> +    mutex_unlock(&event_mutex);
>
>      tr->max_latency = 0;
>      wakeup_trace = tr;
> @@ -722,8 +724,10 @@ static void wakeup_tracer_reset(struct 
> trace_array *tr)
>      /* make sure we put back any tasks we are tracing */
>      wakeup_reset(tr);
>
> +    mutex_lock(&event_mutex);
>      set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
>      set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
> +    mutex_unlock(&event_mutex);
>      ftrace_reset_array_ops(tr);
>      wakeup_busy = false;
>  }
> -- Qualcomm India Private Limited, on behalf of Qualcomm Innovation 
> Center, Inc., is a member of Code Aurora Forum, a Linux Foundation 
> Collaborative Project.
>
-- 
Qualcomm India Private Limited, on behalf of Qualcomm Innovation
Center, Inc., is a member of Code Aurora Forum, a Linux Foundation
Collaborative Project


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH] trace: circular dependency for trace_types_lock and event_mutex
@ 2019-12-03 16:03 Prateek Sood
  0 siblings, 0 replies; 5+ messages in thread
From: Prateek Sood @ 2019-12-03 16:03 UTC (permalink / raw)
  To: rostedt, mingo; +Cc: linux-kernel, kaushalk, Prateek Sood

       Task T2                             Task T3
trace_options_core_write()            subsystem_open()

 mutex_lock(trace_types_lock)           mutex_lock(event_mutex)

 set_tracer_flag()

   trace_event_enable_tgid_record()       mutex_lock(trace_types_lock)

    mutex_lock(event_mutex)

This gives a circular dependency deadlock between trace_types_lock and
event_mutex. To fix this invert the usage of trace_types_lock and event_mutex
in trace_options_core_write(). This keeps the sequence of lock usage consistent.

Change-Id: I3752a77c59555852c2241f7775ec4a1960c15c15
Signed-off-by: Prateek Sood <prsood@codeaurora.org>
---
 kernel/trace/trace.c              | 6 ++++++
 kernel/trace/trace_events.c       | 8 ++++----
 kernel/trace/trace_irqsoff.c      | 4 ++++
 kernel/trace/trace_sched_wakeup.c | 4 ++++
 4 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6a0ee91..2c09aa1 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4590,6 +4590,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
 
 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
 {
+	lockdep_assert_held(&event_mutex);
+
 	/* do nothing if flag is already set */
 	if (!!(tr->trace_flags & mask) == !!enabled)
 		return 0;
@@ -4657,6 +4659,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
 
 	cmp += len;
 
+	mutex_lock(&event_mutex);
 	mutex_lock(&trace_types_lock);
 
 	ret = match_string(trace_options, -1, cmp);
@@ -4667,6 +4670,7 @@ static int trace_set_options(struct trace_array *tr, char *option)
 		ret = set_tracer_flag(tr, 1 << ret, !neg);
 
 	mutex_unlock(&trace_types_lock);
+	mutex_unlock(&event_mutex);
 
 	/*
 	 * If the first trailing whitespace is replaced with '\0' by strstrip,
@@ -7972,9 +7976,11 @@ static void get_tr_index(void *data, struct trace_array **ptr,
 	if (val != 0 && val != 1)
 		return -EINVAL;
 
+	mutex_lock(&event_mutex);
 	mutex_lock(&trace_types_lock);
 	ret = set_tracer_flag(tr, 1 << index, val);
 	mutex_unlock(&trace_types_lock);
+	mutex_unlock(&event_mutex);
 
 	if (ret < 0)
 		return ret;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index fba87d1..995061b 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -320,7 +320,8 @@ void trace_event_enable_cmd_record(bool enable)
 	struct trace_event_file *file;
 	struct trace_array *tr;
 
-	mutex_lock(&event_mutex);
+	lockdep_assert_held(&event_mutex);
+
 	do_for_each_event_file(tr, file) {
 
 		if (!(file->flags & EVENT_FILE_FL_ENABLED))
@@ -334,7 +335,6 @@ void trace_event_enable_cmd_record(bool enable)
 			clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
 		}
 	} while_for_each_event_file();
-	mutex_unlock(&event_mutex);
 }
 
 void trace_event_enable_tgid_record(bool enable)
@@ -342,7 +342,8 @@ void trace_event_enable_tgid_record(bool enable)
 	struct trace_event_file *file;
 	struct trace_array *tr;
 
-	mutex_lock(&event_mutex);
+	lockdep_assert_held(&event_mutex);
+
 	do_for_each_event_file(tr, file) {
 		if (!(file->flags & EVENT_FILE_FL_ENABLED))
 			continue;
@@ -356,7 +357,6 @@ void trace_event_enable_tgid_record(bool enable)
 				  &file->flags);
 		}
 	} while_for_each_event_file();
-	mutex_unlock(&event_mutex);
 }
 
 static int __ftrace_event_enable_disable(struct trace_event_file *file,
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index a745b0c..887cdb5 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -560,8 +560,10 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
 	save_flags = tr->trace_flags;
 
 	/* non overwrite screws up the latency tracers */
+	mutex_lock(&event_mutex);
 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
+	mutex_unlock(&event_mutex);
 
 	tr->max_latency = 0;
 	irqsoff_trace = tr;
@@ -586,8 +588,10 @@ static void __irqsoff_tracer_reset(struct trace_array *tr)
 
 	stop_irqsoff_tracer(tr, is_graph(tr));
 
+	mutex_lock(&event_mutex);
 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+	mutex_unlock(&event_mutex);
 	ftrace_reset_array_ops(tr);
 
 	irqsoff_busy = false;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 5e43b96..0bc67d1 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -671,8 +671,10 @@ static int __wakeup_tracer_init(struct trace_array *tr)
 	save_flags = tr->trace_flags;
 
 	/* non overwrite screws up the latency tracers */
+	mutex_lock(&event_mutex);
 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
+	mutex_unlock(&event_mutex);
 
 	tr->max_latency = 0;
 	wakeup_trace = tr;
@@ -722,8 +724,10 @@ static void wakeup_tracer_reset(struct trace_array *tr)
 	/* make sure we put back any tasks we are tracing */
 	wakeup_reset(tr);
 
+	mutex_lock(&event_mutex);
 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+	mutex_unlock(&event_mutex);
 	ftrace_reset_array_ops(tr);
 	wakeup_busy = false;
 }
-- 
Qualcomm India Private Limited, on behalf of Qualcomm Innovation Center, Inc., 
is a member of Code Aurora Forum, a Linux Foundation Collaborative Project.


^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-12-06 12:31 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <0101016ecc80a1fa-27cc3e87-c16d-4cd6-b3c6-9c893010fdef-000000@us-west-2.amazonses.com>
2019-12-03 16:27 ` [PATCH] trace: circular dependency for trace_types_lock and event_mutex Steven Rostedt
2019-12-04  4:44   ` Prateek Sood
     [not found]     ` <1575435333-21626-1-git-send-email-prsood@codeaurora.org>
2019-12-04  9:36       ` [PATCH v2] tracing: Fix lock inversion in trace_event_enable_tgid_record() Prateek Sood
2019-12-06 12:31         ` Prateek Sood
2019-12-03 16:03 [PATCH] trace: circular dependency for trace_types_lock and event_mutex Prateek Sood

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).