[v4,04/10] perf stat: factor out event handling loop into a function
diff mbox series

Message ID 5f42c6c5-c301-accd-928e-4304fb1c15d0@linux.intel.com
State New
Headers show
Series
  • perf: support enable and disable commands in stat and record modes
Related show

Commit Message

Alexey Budankov May 25, 2020, 2:19 p.m. UTC
Factor out event handling loop into dispatch_events() function.

Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
 tools/perf/builtin-stat.c | 93 ++++++++++++++++++++++++---------------
 1 file changed, 58 insertions(+), 35 deletions(-)

Comments

Jiri Olsa May 31, 2020, 6:19 p.m. UTC | #1
On Mon, May 25, 2020 at 05:19:45PM +0300, Alexey Budankov wrote:

SNIP

> @@ -544,12 +598,10 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
>  static int __run_perf_stat(int argc, const char **argv, int run_idx)
>  {
>  	int interval = stat_config.interval;
> -	int times = stat_config.times;
>  	int timeout = stat_config.timeout;
>  	char msg[BUFSIZ];
>  	unsigned long long t0, t1;
>  	struct evsel *counter;
> -	struct timespec ts;
>  	size_t l;
>  	int status = 0;
>  	const bool forks = (argc > 0);
> @@ -558,17 +610,6 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>  	int i, cpu;
>  	bool second_pass = false;
>  
> -	if (interval) {
> -		ts.tv_sec  = interval / USEC_PER_MSEC;
> -		ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
> -	} else if (timeout) {
> -		ts.tv_sec  = timeout / USEC_PER_MSEC;
> -		ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC;
> -	} else {
> -		ts.tv_sec  = 1;
> -		ts.tv_nsec = 0;
> -	}
> -
>  	if (forks) {
>  		if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
>  						  workload_exec_failed_signal) < 0) {
> @@ -725,16 +766,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>  		perf_evlist__start_workload(evsel_list);
>  		enable_counters();
>  
> -		if (interval || timeout) {
> -			while (!waitpid(child_pid, &status, WNOHANG)) {
> -				nanosleep(&ts, NULL);
> -				if (timeout)
> -					break;
> -				process_interval();
> -				if (interval_count && !(--times))
> -					break;
> -			}
> -		}
> +		if (interval || timeout)
> +			dispatch_events(child_pid, &stat_config);
> +
>  		if (child_pid != -1) {
>  			if (timeout)
>  				kill(child_pid, SIGTERM);
> @@ -751,18 +785,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>  			psignal(WTERMSIG(status), argv[0]);
>  	} else {
>  		enable_counters();
> -		while (!done) {
> -			nanosleep(&ts, NULL);
> -			if (!is_target_alive(&target, evsel_list->core.threads))
> -				break;
> -			if (timeout)
> -				break;
> -			if (interval) {
> -				process_interval();
> -				if (interval_count && !(--times))
> -					break;
> -			}
> -		}
> +		dispatch_events(-1, &stat_config);

hum, from the discussion we had on v3  I expected more smaller patches
with easy changes, so the change is more transparent and easy to review

as I said before this part really makes me worried and needs to be as clear
as possible.. please introdce the new function first and replace the factored
places separately, also more verbose changelog would help ;-)

thanks,
jirka


>  	}
>  
>  	disable_counters();
> -- 
> 2.24.1
>
Alexey Budankov June 1, 2020, 7:38 a.m. UTC | #2
On 31.05.2020 21:19, Jiri Olsa wrote:
> On Mon, May 25, 2020 at 05:19:45PM +0300, Alexey Budankov wrote:
> 
> SNIP
> 
>> @@ -544,12 +598,10 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
>>  static int __run_perf_stat(int argc, const char **argv, int run_idx)
>>  {
>>  	int interval = stat_config.interval;
>> -	int times = stat_config.times;
>>  	int timeout = stat_config.timeout;
>>  	char msg[BUFSIZ];
>>  	unsigned long long t0, t1;
>>  	struct evsel *counter;
>> -	struct timespec ts;
>>  	size_t l;
>>  	int status = 0;
>>  	const bool forks = (argc > 0);
>> @@ -558,17 +610,6 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>>  	int i, cpu;
>>  	bool second_pass = false;
>>  
>> -	if (interval) {
>> -		ts.tv_sec  = interval / USEC_PER_MSEC;
>> -		ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
>> -	} else if (timeout) {
>> -		ts.tv_sec  = timeout / USEC_PER_MSEC;
>> -		ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC;
>> -	} else {
>> -		ts.tv_sec  = 1;
>> -		ts.tv_nsec = 0;
>> -	}
>> -
>>  	if (forks) {
>>  		if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
>>  						  workload_exec_failed_signal) < 0) {
>> @@ -725,16 +766,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>>  		perf_evlist__start_workload(evsel_list);
>>  		enable_counters();
>>  
>> -		if (interval || timeout) {
>> -			while (!waitpid(child_pid, &status, WNOHANG)) {
>> -				nanosleep(&ts, NULL);
>> -				if (timeout)
>> -					break;
>> -				process_interval();
>> -				if (interval_count && !(--times))
>> -					break;
>> -			}
>> -		}
>> +		if (interval || timeout)
>> +			dispatch_events(child_pid, &stat_config);
>> +
>>  		if (child_pid != -1) {
>>  			if (timeout)
>>  				kill(child_pid, SIGTERM);
>> @@ -751,18 +785,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
>>  			psignal(WTERMSIG(status), argv[0]);
>>  	} else {
>>  		enable_counters();
>> -		while (!done) {
>> -			nanosleep(&ts, NULL);
>> -			if (!is_target_alive(&target, evsel_list->core.threads))
>> -				break;
>> -			if (timeout)
>> -				break;
>> -			if (interval) {
>> -				process_interval();
>> -				if (interval_count && !(--times))
>> -					break;
>> -			}
>> -		}
>> +		dispatch_events(-1, &stat_config);
> 
> hum, from the discussion we had on v3  I expected more smaller patches
> with easy changes, so the change is more transparent and easy to review
> 
> as I said before this part really makes me worried and needs to be as clear
> as possible.. please introdce the new function first and replace the factored
> places separately, also more verbose changelog would help ;-)

Ok. Will try to reshape the patch that way.

~Alexey
Alexey Budankov June 1, 2020, 4:10 p.m. UTC | #3
On 01.06.2020 10:38, Alexey Budankov wrote:
> 
> On 31.05.2020 21:19, Jiri Olsa wrote:
>> On Mon, May 25, 2020 at 05:19:45PM +0300, Alexey Budankov wrote:
>>
>> SNIP
>>
>>> @@ -544,12 +598,10 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
<SNIP>
>>>  static int __run_perf_stat(int argc, const char **argv, int run_idx)
>>> +		dispatch_events(-1, &stat_config);
>>
>> hum, from the discussion we had on v3  I expected more smaller patches
>> with easy changes, so the change is more transparent and easy to review
>>
>> as I said before this part really makes me worried and needs to be as clear
>> as possible.. please introdce the new function first and replace the factored
>> places separately, also more verbose changelog would help ;-)
> 
> Ok. Will try to reshape the patch that way.

Please see v5.
It puts this refactoring part into several smaller consecutive changes to make
review and possible bisecting activity easier. Let me know if some other parts
of the patch set also require similar breakdown.

~Alexey

Patch
diff mbox series

diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index c43ba6080691..ef2682a87491 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -421,6 +421,24 @@  static void process_interval(void)
 	print_counters(&rs, 0, NULL);
 }
 
+static bool print_interval(unsigned int interval, int *times)
+{
+	if (interval) {
+		process_interval();
+		if (interval_count && !(--(*times)))
+			return true;
+	}
+	return false;
+}
+
+static bool process_timeout(bool timeout, unsigned int interval, int *times)
+{
+	if (timeout)
+		return true;
+	else
+		return print_interval(interval, times);
+}
+
 static void enable_counters(void)
 {
 	if (stat_config.initial_delay)
@@ -486,6 +504,42 @@  static bool is_target_alive(struct target *_target,
 	return false;
 }
 
+static int dispatch_events(pid_t pid, struct perf_stat_config *config)
+{
+	pid_t child = 0;
+	bool stop = false;
+	struct timespec time_to_sleep;
+	int sleep_time, status = 0;
+	int times = config->times;
+
+	if (config->interval)
+		sleep_time = config->interval;
+	else if (config->timeout)
+		sleep_time = config->timeout;
+	else
+		sleep_time = 1000;
+
+	time_to_sleep.tv_sec  = sleep_time / MSEC_PER_SEC;
+	time_to_sleep.tv_nsec = (sleep_time % MSEC_PER_SEC) * NSEC_PER_MSEC;
+
+	do {
+		if (pid != -1) {
+			child = waitpid(pid, &status, WNOHANG);
+		} else {
+			if (!stop)
+				stop = !is_target_alive(&target, evsel_list->core.threads);
+		}
+
+		if (child || stop || done)
+			break;
+
+		nanosleep(&time_to_sleep, NULL);
+		stop = process_timeout(config->timeout, config->interval, &times);
+	} while (1);
+
+	return status;
+}
+
 enum counter_recovery {
 	COUNTER_SKIP,
 	COUNTER_RETRY,
@@ -544,12 +598,10 @@  static enum counter_recovery stat_handle_error(struct evsel *counter)
 static int __run_perf_stat(int argc, const char **argv, int run_idx)
 {
 	int interval = stat_config.interval;
-	int times = stat_config.times;
 	int timeout = stat_config.timeout;
 	char msg[BUFSIZ];
 	unsigned long long t0, t1;
 	struct evsel *counter;
-	struct timespec ts;
 	size_t l;
 	int status = 0;
 	const bool forks = (argc > 0);
@@ -558,17 +610,6 @@  static int __run_perf_stat(int argc, const char **argv, int run_idx)
 	int i, cpu;
 	bool second_pass = false;
 
-	if (interval) {
-		ts.tv_sec  = interval / USEC_PER_MSEC;
-		ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
-	} else if (timeout) {
-		ts.tv_sec  = timeout / USEC_PER_MSEC;
-		ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC;
-	} else {
-		ts.tv_sec  = 1;
-		ts.tv_nsec = 0;
-	}
-
 	if (forks) {
 		if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
 						  workload_exec_failed_signal) < 0) {
@@ -725,16 +766,9 @@  static int __run_perf_stat(int argc, const char **argv, int run_idx)
 		perf_evlist__start_workload(evsel_list);
 		enable_counters();
 
-		if (interval || timeout) {
-			while (!waitpid(child_pid, &status, WNOHANG)) {
-				nanosleep(&ts, NULL);
-				if (timeout)
-					break;
-				process_interval();
-				if (interval_count && !(--times))
-					break;
-			}
-		}
+		if (interval || timeout)
+			dispatch_events(child_pid, &stat_config);
+
 		if (child_pid != -1) {
 			if (timeout)
 				kill(child_pid, SIGTERM);
@@ -751,18 +785,7 @@  static int __run_perf_stat(int argc, const char **argv, int run_idx)
 			psignal(WTERMSIG(status), argv[0]);
 	} else {
 		enable_counters();
-		while (!done) {
-			nanosleep(&ts, NULL);
-			if (!is_target_alive(&target, evsel_list->core.threads))
-				break;
-			if (timeout)
-				break;
-			if (interval) {
-				process_interval();
-				if (interval_count && !(--times))
-					break;
-			}
-		}
+		dispatch_events(-1, &stat_config);
 	}
 
 	disable_counters();