All of lore.kernel.org
 help / color / mirror / Atom feed
From: Waiman Long <longman@redhat.com>
To: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@redhat.com>, Will Deacon <will@kernel.org>,
	Boqun Feng <boqun.feng@gmail.com>,
	linux-kernel@vger.kernel.org
Subject: Re: [PATCH 2/2] tracing: Use proper do_arch_spin_lock() API
Date: Wed, 21 Sep 2022 20:50:27 -0400	[thread overview]
Message-ID: <d9870730-2f4c-83a1-e083-a8b6da368436@redhat.com> (raw)
In-Reply-To: <20220921181721.3a51afe9@gandalf.local.home>

On 9/21/22 18:17, Steven Rostedt wrote:
> On Wed, 21 Sep 2022 09:21:52 -0400
> Waiman Long <longman@redhat.com> wrote:
>
>> It was found that some tracing functions acquire a arch_spinlock_t with
>> preemption and irqs enabled.  That can be problematic in case preemption
>> happens after acquiring the lock. Use the proper do_arch_spin_lock()
>> API with preemption disabled to make sure that this won't happen unless
>> it is obvious that either preemption or irqs has been disabled .
>>
>> Signed-off-by: Waiman Long <longman@redhat.com>
>> ---
>>   kernel/trace/trace.c | 52 ++++++++++++++++++++------------------------
>>   1 file changed, 24 insertions(+), 28 deletions(-)
>>
>> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
>> index d3005279165d..cbb8520842ad 100644
>> --- a/kernel/trace/trace.c
>> +++ b/kernel/trace/trace.c
>> @@ -1193,12 +1193,12 @@ void *tracing_cond_snapshot_data(struct trace_array *tr)
>>   {
>>   	void *cond_data = NULL;
>>   
>> -	arch_spin_lock(&tr->max_lock);
>> +	do_arch_spin_lock(&tr->max_lock);
> This should actually disable interrupts and not preemption.
>
>>   
>>   	if (tr->cond_snapshot)
>>   		cond_data = tr->cond_snapshot->cond_data;
>>   
>> -	arch_spin_unlock(&tr->max_lock);
>> +	do_arch_spin_unlock(&tr->max_lock);
>>   
>>   	return cond_data;
>>   }
>> @@ -1334,9 +1334,9 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
>>   		goto fail_unlock;
>>   	}
>>   
>> -	arch_spin_lock(&tr->max_lock);
>> +	do_arch_spin_lock(&tr->max_lock);
> Same here.
>
>>   	tr->cond_snapshot = cond_snapshot;
>> -	arch_spin_unlock(&tr->max_lock);
>> +	do_arch_spin_unlock(&tr->max_lock);
>>   
>>   	mutex_unlock(&trace_types_lock);
>>   
>> @@ -1363,7 +1363,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
>>   {
>>   	int ret = 0;
>>   
>> -	arch_spin_lock(&tr->max_lock);
>> +	do_arch_spin_lock(&tr->max_lock);
> And here.
>
>>   
>>   	if (!tr->cond_snapshot)
>>   		ret = -EINVAL;
>> @@ -1372,7 +1372,7 @@ int tracing_snapshot_cond_disable(struct trace_array *tr)
>>   		tr->cond_snapshot = NULL;
>>   	}
>>   
>> -	arch_spin_unlock(&tr->max_lock);
>> +	do_arch_spin_unlock(&tr->max_lock);
>>   
>>   	return ret;
>>   }
>> @@ -1819,7 +1819,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
>>   		return;
>>   	}
>>   
>> -	arch_spin_lock(&tr->max_lock);
>> +	do_arch_spin_lock(&tr->max_lock);
> Nothing here is needed, as interrupts had better be disabled when this
> function is called. And there's already a:
>
> 	WARN_ON_ONCE(!irqs_disabled());

Sorry, I missed that.


>
>>   
>>   	/* Inherit the recordable setting from array_buffer */
>>   	if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
>> @@ -1836,7 +1836,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
>>   	__update_max_tr(tr, tsk, cpu);
>>   
>>    out_unlock:
>> -	arch_spin_unlock(&tr->max_lock);
>> +	do_arch_spin_unlock(&tr->max_lock);
> Nothing needs to be done here.
Right.
>
>>   }
>>   
>>   /**
>> @@ -1862,7 +1862,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
>>   		return;
>>   	}
>>   
>> -	arch_spin_lock(&tr->max_lock);
>> +	do_arch_spin_lock(&tr->max_lock);
> Same here. Interrupts had better be disabled in this function.
>
>>   
>>   	ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
>>   
>> @@ -1880,7 +1880,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
>>   	WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
>>   
>>   	__update_max_tr(tr, tsk, cpu);
>> -	arch_spin_unlock(&tr->max_lock);
>> +	do_arch_spin_unlock(&tr->max_lock);
> Nothing to do here.
>
>>   }
>>   #endif /* CONFIG_TRACER_MAX_TRACE */
>>   
>> @@ -2413,7 +2413,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
>>   	 * nor do we want to disable interrupts,
>>   	 * so if we miss here, then better luck next time.
>>   	 */
>> -	if (!arch_spin_trylock(&trace_cmdline_lock))
>> +	if (!do_arch_spin_trylock(&trace_cmdline_lock))
> This is called within the scheduler and wake up, so interrupts had better
> be disabled (run queue lock is held).
>
>>   		return 0;
>>   
>>   	idx = savedcmd->map_pid_to_cmdline[tpid];
>> @@ -2427,7 +2427,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
>>   	savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
>>   	set_cmdline(idx, tsk->comm);
>>   
>> -	arch_spin_unlock(&trace_cmdline_lock);
>> +	do_arch_spin_unlock(&trace_cmdline_lock);
> Nothing to do here.
>
>>   
>>   	return 1;
>>   }
>> @@ -2461,13 +2461,11 @@ static void __trace_find_cmdline(int pid, char comm[])
>>   
>>   void trace_find_cmdline(int pid, char comm[])
>>   {
>> -	preempt_disable();
>> -	arch_spin_lock(&trace_cmdline_lock);
>> +	do_arch_spin_lock(&trace_cmdline_lock);
> Keep this as is (with the open coded preempt_disable()).
OK.
>
>>   
>>   	__trace_find_cmdline(pid, comm);
>>   
>> -	arch_spin_unlock(&trace_cmdline_lock);
>> -	preempt_enable();
>> +	do_arch_spin_unlock(&trace_cmdline_lock);
>>   }
>>   
>>   static int *trace_find_tgid_ptr(int pid)
>> @@ -5829,8 +5827,7 @@ static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
>>   	void *v;
>>   	loff_t l = 0;
>>   
>> -	preempt_disable();
>> -	arch_spin_lock(&trace_cmdline_lock);
>> +	do_arch_spin_lock(&trace_cmdline_lock);
> This too.
>
>>   
>>   	v = &savedcmd->map_cmdline_to_pid[0];
>>   	while (l <= *pos) {
>> @@ -5844,8 +5841,7 @@ static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
>>   
>>   static void saved_cmdlines_stop(struct seq_file *m, void *v)
>>   {
>> -	arch_spin_unlock(&trace_cmdline_lock);
>> -	preempt_enable();
>> +	do_arch_spin_unlock(&trace_cmdline_lock);
> And this.
>
>>   }
>>   
>>   static int saved_cmdlines_show(struct seq_file *m, void *v)
>> @@ -5890,9 +5886,9 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
>>   	char buf[64];
>>   	int r;
>>   
>> -	arch_spin_lock(&trace_cmdline_lock);
>> +	do_arch_spin_lock(&trace_cmdline_lock);
> Yeah, we should add preempt_disable() here.
>
>>   	r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
>> -	arch_spin_unlock(&trace_cmdline_lock);
>> +	do_arch_spin_unlock(&trace_cmdline_lock);
>>   
>>   	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
>>   }
>> @@ -5917,10 +5913,10 @@ static int tracing_resize_saved_cmdlines(unsigned int val)
>>   		return -ENOMEM;
>>   	}
>>   
>> -	arch_spin_lock(&trace_cmdline_lock);
>> +	do_arch_spin_lock(&trace_cmdline_lock);
> And here.
>
>>   	savedcmd_temp = savedcmd;
>>   	savedcmd = s;
>> -	arch_spin_unlock(&trace_cmdline_lock);
>> +	do_arch_spin_unlock(&trace_cmdline_lock);
>>   	free_saved_cmdlines_buffer(savedcmd_temp);
>>   
>>   	return 0;
>> @@ -6373,10 +6369,10 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
>>   
>>   #ifdef CONFIG_TRACER_SNAPSHOT
>>   	if (t->use_max_tr) {
>> -		arch_spin_lock(&tr->max_lock);
>> +		do_arch_spin_lock(&tr->max_lock);
> Add preemption disabling.
>
>>   		if (tr->cond_snapshot)
>>   			ret = -EBUSY;
>> -		arch_spin_unlock(&tr->max_lock);
>> +		do_arch_spin_unlock(&tr->max_lock);
>>   		if (ret)
>>   			goto out;
>>   	}
>> @@ -7436,10 +7432,10 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
>>   		goto out;
>>   	}
>>   
>> -	arch_spin_lock(&tr->max_lock);
>> +	do_arch_spin_lock(&tr->max_lock);
> And this should disable interrupts first.
>
> -- Steve

Thanks for the comments, will modify the patch as suggested.

Cheers,
Longman


  reply	other threads:[~2022-09-22  0:50 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-21 13:21 [PATCH 0/2] locking, tracing: Fix incorrect use of arch_spin_lock() Waiman Long
2022-09-21 13:21 ` [PATCH 1/2] locking: Provide a low overhead do_arch_spin_lock() API Waiman Long
2022-09-21 22:06   ` Steven Rostedt
2022-09-22  0:51     ` Waiman Long
2022-09-21 13:21 ` [PATCH 2/2] tracing: Use proper " Waiman Long
2022-09-21 22:17   ` Steven Rostedt
2022-09-22  0:50     ` Waiman Long [this message]
     [not found]     ` <3789bf4d-d699-63d4-c97b-c8d9524cbc2f@redhat.com>
2022-09-22  2:08       ` Steven Rostedt
2022-09-22  7:55     ` Peter Zijlstra
2022-09-22 12:25       ` Waiman Long
2022-09-22 12:40       ` Steven Rostedt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d9870730-2f4c-83a1-e083-a8b6da368436@redhat.com \
    --to=longman@redhat.com \
    --cc=boqun.feng@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.