All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Liang, Kan" <kan.liang@linux.intel.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: acme@redhat.com, mingo@kernel.org, linux-kernel@vger.kernel.org,
	jolsa@kernel.org, eranian@google.com,
	alexander.shishkin@linux.intel.com, ak@linux.intel.com
Subject: Re: [PATCH V6 07/14] perf/x86/intel: Generic support for hardware TopDown metrics
Date: Tue, 21 Jul 2020 10:05:55 -0400	[thread overview]
Message-ID: <3a6b082e-7906-9df1-28b9-c7639127e8a7@linux.intel.com> (raw)
In-Reply-To: <20200721094327.GW10769@hirez.programming.kicks-ass.net>



On 7/21/2020 5:43 AM, Peter Zijlstra wrote:
> On Fri, Jul 17, 2020 at 07:05:47AM -0700, kan.liang@linux.intel.com wrote:
>> @@ -1031,6 +1034,35 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
>>   	return unsched ? -EINVAL : 0;
>>   }
>>   
>> +static int add_nr_metric_event(struct cpu_hw_events *cpuc,
>> +			       struct perf_event *event,
>> +			       int *max_count, bool sibling)
>> +{
>> +	/* The TopDown metrics events cannot be shared. */
>> +	if (is_metric_event(event) &&
>> +	    (++cpuc->n_metric_event > INTEL_TD_METRIC_NUM)) {
>> +		cpuc->n_metric_event--;
>> +		return -EINVAL;
>> +	}
>> +
>> +	/*
>> +	 * Take the accepted metrics events into account for leader event.
>> +	 */
>> +	if (!sibling)
>> +		*max_count += cpuc->n_metric_event;
>> +	else if (is_metric_event(event))
>> +		(*max_count)++;
>> +
>> +	return 0;
>> +}
>> +
>> +static void del_nr_metric_event(struct cpu_hw_events *cpuc,
>> +				struct perf_event *event)
>> +{
>> +	if (is_metric_event(event))
>> +		cpuc->n_metric_event--;
>> +}
>> +
>>   /*
>>    * dogrp: true if must collect siblings events (group)
>>    * returns total number of events and error code
>> @@ -1066,6 +1098,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
>>   		cpuc->pebs_output = is_pebs_pt(leader) + 1;
>>   	}
>>   
>> +	if (x86_pmu.intel_cap.perf_metrics &&
>> +	    add_nr_metric_event(cpuc, leader, &max_count, false))
>> +		return -EINVAL;
>> +
>>   	if (is_x86_event(leader)) {
>>   		if (n >= max_count)
>>   			return -EINVAL;
>> @@ -1082,6 +1118,10 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
>>   		    event->state <= PERF_EVENT_STATE_OFF)
>>   			continue;
>>   
>> +		if (x86_pmu.intel_cap.perf_metrics &&
>> +		    add_nr_metric_event(cpuc, event, &max_count, true))
>> +			return -EINVAL;
>> +
>>   		if (n >= max_count)
>>   			return -EINVAL;
>>   
> 
> Something like so perhaps ?
> 
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -1035,24 +1035,14 @@ int x86_schedule_events(struct cpu_hw_ev
>   }
>   
>   static int add_nr_metric_event(struct cpu_hw_events *cpuc,
> -			       struct perf_event *event,
> -			       int *max_count, bool sibling)
> +			       struct perf_event *event)
>   {
> -	/* The TopDown metrics events cannot be shared. */
> -	if (is_metric_event(event) &&
> -	    (++cpuc->n_metric_event > INTEL_TD_METRIC_NUM)) {
> -		cpuc->n_metric_event--;
> -		return -EINVAL;
> +	if (is_metric_event(event)) {
> +		if (cpuc->n_metric == INTEL_TD_METRIC_NUM)
> +			return -EINVAL;
> +		cpuc->n_metric++;
>   	}
>   
> -	/*
> -	 * Take the accepted metrics events into account for leader event.
> -	 */
> -	if (!sibling)
> -		*max_count += cpuc->n_metric_event;
> -	else if (is_metric_event(event))
> -		(*max_count)++;
> -
>   	return 0;
>   }
>   
> @@ -1060,7 +1050,24 @@ static void del_nr_metric_event(struct c
>   				struct perf_event *event)
>   {
>   	if (is_metric_event(event))
> -		cpuc->n_metric_event--;
> +		cpuc->n_metric--;
> +}
> +
> +static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
> +			 int max_count, int n)
> +{
> +
> +	if (x86_pmu.intel_cap.perf_metrics && add_nr_metric_event(cpuc, event))
> +		return -EINVAL;
> +
> +	if (n >= max_count + cpuc->n_metric)
> +		return -EINVAL;
> +
> +	cpuc->event_list[n] = event;
> +	if (is_counter_pair(&event->hw))
> +		cpuc->n_pair++;
> +
> +	return 0;
>   }
>   
>   /*
> @@ -1098,37 +1105,20 @@ static int collect_events(struct cpu_hw_
>   		cpuc->pebs_output = is_pebs_pt(leader) + 1;
>   	}
>   
> -	if (x86_pmu.intel_cap.perf_metrics &&
> -	    add_nr_metric_event(cpuc, leader, &max_count, false))
> +	if (is_x86_event(leader) && collect_event(cpuc, leader, max_count, n))
>   		return -EINVAL;
> +	n++;

If a leader is not an x86 event, n will be mistakenly increased.
But is it possible that a leader is not an x86 event here?

Seems impossible for now. A SW event cannot be a leader for a mix group.
We don't allow the core PMU and the perf_invalid_context PMU in the same 
group.
If so, I think it deserves a comment, in case the situation changes 
later, e.g.,

  +	if (is_x86_event(leader) && collect_event(cpuc, leader, max_count, n))
   		return -EINVAL;
  +	/*
  +	 * Currently, for a x86 core event group, the leader must be a
  +	 * x86 core event. A SW event cannot be a leader for a mix
  +	 * group. We don't allow the core PMU and the perf_invalid_contex +	 
* PMU in the same group.
  +	 */
  +	n++;


Thanks,
Kan
>   
> -	if (is_x86_event(leader)) {
> -		if (n >= max_count)
> -			return -EINVAL;
> -		cpuc->event_list[n] = leader;
> -		n++;
> -		if (is_counter_pair(&leader->hw))
> -			cpuc->n_pair++;
> -	}
>   	if (!dogrp)
>   		return n;
>   
>   	for_each_sibling_event(event, leader) {
> -		if (!is_x86_event(event) ||
> -		    event->state <= PERF_EVENT_STATE_OFF)
> +		if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF)
>   			continue;
>   
> -		if (x86_pmu.intel_cap.perf_metrics &&
> -		    add_nr_metric_event(cpuc, event, &max_count, true))
> -			return -EINVAL;
> -
> -		if (n >= max_count)
> +		if (collect_event(cpuc, event, max_count, n))
>   			return -EINVAL;
> -
> -		cpuc->event_list[n] = event;
>   		n++;
> -		if (is_counter_pair(&event->hw))
> -			cpuc->n_pair++;
>   	}
>   	return n;
>   }
> --- a/arch/x86/events/perf_event.h
> +++ b/arch/x86/events/perf_event.h
> @@ -313,7 +313,7 @@ struct cpu_hw_events {
>   	 * Perf Metrics
>   	 */
>   	/* number of accepted metrics events */
> -	int				n_metric_event;
> +	int				n_metric;
>   
>   	/*
>   	 * AMD specific bits
> 

  reply	other threads:[~2020-07-21 14:06 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-17 14:05 [PATCH V6 00/14] TopDown metrics support for Icelake kan.liang
2020-07-17 14:05 ` [PATCH V6 01/14] perf/x86: Use event_base_rdpmc for the RDPMC userspace support kan.liang
2020-07-17 14:05 ` [PATCH V6 02/14] perf/x86/intel: Name the global status bit in NMI handler kan.liang
2020-07-17 14:05 ` [PATCH V6 03/14] perf/x86/intel: Introduce the fourth fixed counter kan.liang
2020-07-20 16:20   ` Peter Zijlstra
2020-07-20 18:22     ` Liang, Kan
2020-07-17 14:05 ` [PATCH V6 04/14] perf/x86/intel: Move BTS index to 47 kan.liang
2020-07-17 14:05 ` [PATCH V6 05/14] perf/x86/intel: Fix the name of perf METRICS kan.liang
2020-07-17 14:05 ` [PATCH V6 06/14] perf/x86/intel: Use switch in intel_pmu_disable/enable_event kan.liang
2020-07-20 16:22   ` Peter Zijlstra
2020-07-20 19:02     ` Liang, Kan
2020-07-17 14:05 ` [PATCH V6 07/14] perf/x86/intel: Generic support for hardware TopDown metrics kan.liang
2020-07-20 17:41   ` Peter Zijlstra
2020-07-20 18:11     ` Liang, Kan
2020-07-21  9:43   ` Peter Zijlstra
2020-07-21 14:05     ` Liang, Kan [this message]
2020-07-21 14:25       ` peterz
2020-07-17 14:05 ` [PATCH V6 08/14] perf/x86: Add a macro for RDPMC offset of fixed counters kan.liang
2020-07-17 14:05 ` [PATCH V6 09/14] perf/x86/intel: Support TopDown metrics on Ice Lake kan.liang
2020-07-21 12:40   ` Peter Zijlstra
2020-07-21 14:23     ` Liang, Kan
2020-07-21 14:31       ` peterz
2020-07-21 15:50         ` Liang, Kan
2020-07-21 17:38     ` Andi Kleen
2020-07-21 19:20       ` Peter Zijlstra
2020-07-17 14:05 ` [PATCH V6 10/14] perf/x86/intel: Support per-thread RDPMC TopDown metrics kan.liang
2020-07-17 14:05 ` [PATCH V6 11/14] perf/x86/intel: Disable sample-read the slots and metrics events kan.liang
2020-07-21 13:10   ` Peter Zijlstra
2020-07-21 16:07     ` Liang, Kan
2020-07-21 19:18       ` Peter Zijlstra
2020-07-22 19:26     ` Liang, Kan
2020-07-17 14:05 ` [PATCH V6 12/14] perf, tools, stat: Support new per thread TopDown metrics kan.liang
2020-07-17 14:05 ` [PATCH V6 13/14] perf, tools, stat: Check Topdown Metric group kan.liang
2020-07-17 14:05 ` [PATCH V6 14/14] perf, tools: Add documentation for topdown metrics kan.liang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3a6b082e-7906-9df1-28b9-c7639127e8a7@linux.intel.com \
    --to=kan.liang@linux.intel.com \
    --cc=acme@redhat.com \
    --cc=ak@linux.intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=eranian@google.com \
    --cc=jolsa@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.