linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] scheduler: conditional statement cleanup
@ 2018-09-21 20:22 PierceGriffiths
  2018-09-22  7:08 ` kbuild test robot
                   ` (2 more replies)
  0 siblings, 3 replies; 5+ messages in thread
From: PierceGriffiths @ 2018-09-21 20:22 UTC (permalink / raw)
  Cc: pierceagriffiths, Ingo Molnar, Peter Zijlstra, linux-kernel

From: Pierce Griffiths <pierceagriffiths@gmail.com>

*Condense ssequential if statements into a single if statement when they
share an outcome
*Eliminate a jump instruction by replacing a goto with a return
*Eliminate an unnecessary local variable
*Replace "if(function or boolean expression) return true else return false"
with "return (function or boolean expression);"
*Remove null pointer checks before calls to kfree on the grounds that
kfree(NULL) results in a no-op

Signed-off-by: Pierce Griffiths <pierceagriffiths@gmail.com>
---
Please tell me if I've made any mistakes in the submission process,
this is the first time I've submitted a patch to the Linux kernel.

 kernel/sched/core.c    |  8 ++------
 kernel/sched/cpufreq.c |  5 +----
 kernel/sched/cpupri.c  | 22 ++++++++--------------
 kernel/sched/rt.c      | 31 ++++++++++++-------------------
 4 files changed, 23 insertions(+), 43 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 625bc9897f62..443a1f235cfd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -617,12 +617,8 @@ bool sched_can_stop_tick(struct rq *rq)
 	 * If there are more than one RR tasks, we need the tick to effect the
 	 * actual RR behaviour.
 	 */
-	if (rq->rt.rr_nr_running) {
-		if (rq->rt.rr_nr_running == 1)
-			return true;
-		else
-			return false;
-	}
+	if (rq->rt.rr_nr_running)
+		return rq->rt.rr_nr_running == 1;
 
 	/*
 	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
index 5e54cbcae673..a8fd4bd68954 100644
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -34,10 +34,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
 			void (*func)(struct update_util_data *data, u64 time,
 				     unsigned int flags))
 {
-	if (WARN_ON(!data || !func))
-		return;
-
-	if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
+	if (WARN_ON(!data || !func || per_cpu(cpufreq_update_util_data, cpu)))
 		return;
 
 	data->func = func;
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index daaadf939ccb..152c133e8247 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -29,20 +29,16 @@
 #include "sched.h"
 
 /* Convert between a 140 based task->prio, and our 102 based cpupri */
-static int convert_prio(int prio)
+static int convert_prio(const int prio)
 {
-	int cpupri;
-
 	if (prio == CPUPRI_INVALID)
-		cpupri = CPUPRI_INVALID;
+		return CPUPRI_INVALID;
 	else if (prio == MAX_PRIO)
-		cpupri = CPUPRI_IDLE;
+		return CPUPRI_IDLE;
 	else if (prio >= MAX_RT_PRIO)
-		cpupri = CPUPRI_NORMAL;
+		return CPUPRI_NORMAL;
 	else
-		cpupri = MAX_RT_PRIO - prio + 1;
-
-	return cpupri;
+		return MAX_RT_PRIO - prio + 1;
 }
 
 /**
@@ -95,10 +91,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
 		smp_rmb();
 
 		/* Need to do the rmb for every iteration */
-		if (skip)
-			continue;
-
-		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
+		if (skip || cpumask_any_and(&p->cpus_allowed, vec->mask)
+				>= nr_cpu_ids)
 			continue;
 
 		if (lowest_mask) {
@@ -222,7 +216,7 @@ int cpupri_init(struct cpupri *cp)
 	return 0;
 
 cleanup:
-	for (i--; i >= 0; i--)
+	while (--i >= 0)
 		free_cpumask_var(cp->pri_to_cpu[i].mask);
 	return -ENOMEM;
 }
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2e2955a8cf8f..acf1b94669ad 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -142,10 +142,12 @@ void free_rt_sched_group(struct task_group *tg)
 		destroy_rt_bandwidth(&tg->rt_bandwidth);
 
 	for_each_possible_cpu(i) {
-		if (tg->rt_rq)
-			kfree(tg->rt_rq[i]);
-		if (tg->rt_se)
-			kfree(tg->rt_se[i]);
+		/* Don't need to check if tg->rt_rq[i]
+		 * or tg->rt_se[i] are NULL, since kfree(NULL)
+		 * simply performs no operation
+		 */
+		kfree(tg->rt_rq[i]);
+		kfree(tg->rt_se[i]);
 	}
 
 	kfree(tg->rt_rq);
@@ -1015,10 +1017,7 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
 
 	BUG_ON(&rq->rt != rt_rq);
 
-	if (rt_rq->rt_queued)
-		return;
-
-	if (rt_rq_throttled(rt_rq))
+	if (rt_rq->rt_queued || rt_rq_throttled(rt_rq))
 		return;
 
 	if (rt_rq->rt_nr_running) {
@@ -1211,10 +1210,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
  */
 static inline bool move_entity(unsigned int flags)
 {
-	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
-		return false;
-
-	return true;
+	return !((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
 }
 
 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
@@ -1393,7 +1389,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
 
 	/* For anything but wake ups, just return the task_cpu */
 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
-		goto out;
+		return cpu;
 
 	rq = cpu_rq(cpu);
 
@@ -1437,7 +1433,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
 	}
 	rcu_read_unlock();
 
-out:
 	return cpu;
 }
 
@@ -2518,12 +2513,10 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
 	/*
 	 * Disallowing the root group RT runtime is BAD, it would disallow the
 	 * kernel creating (and or operating) RT threads.
+	 *
+	 * No period doesn't make any sense.
 	 */
-	if (tg == &root_task_group && rt_runtime == 0)
-		return -EINVAL;
-
-	/* No period doesn't make any sense. */
-	if (rt_period == 0)
+	if ((tg == &root_task_group && !rt_runtime) || !rt_period)
 		return -EINVAL;
 
 	mutex_lock(&rt_constraints_mutex);
-- 
2.19.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] scheduler: conditional statement cleanup
  2018-09-21 20:22 [PATCH] scheduler: conditional statement cleanup PierceGriffiths
@ 2018-09-22  7:08 ` kbuild test robot
  2018-09-22 10:26 ` Peter Zijlstra
  2018-09-26  7:46 ` Peter Zijlstra
  2 siblings, 0 replies; 5+ messages in thread
From: kbuild test robot @ 2018-09-22  7:08 UTC (permalink / raw)
  To: PierceGriffiths
  Cc: kbuild-all, pierceagriffiths, Ingo Molnar, Peter Zijlstra, linux-kernel

[-- Attachment #1: Type: text/plain, Size: 2091 bytes --]

Hi Pierce,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on tip/sched/core]
[also build test ERROR on v4.19-rc4 next-20180921]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/PierceGriffiths/scheduler-conditional-statement-cleanup/20180922-144638
config: x86_64-randconfig-x019-201837 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

All errors (new ones prefixed by >>):

   kernel/sched/rt.c: In function 'move_entity':
>> kernel/sched/rt.c:1214:1: error: expected ';' before '}' token
    }
    ^

vim +1214 kernel/sched/rt.c

63489e45 kernel/sched_rt.c Steven Rostedt   2008-01-25  1205  
ff77e468 kernel/sched/rt.c Peter Zijlstra   2016-01-18  1206  /*
ff77e468 kernel/sched/rt.c Peter Zijlstra   2016-01-18  1207   * Change rt_se->run_list location unless SAVE && !MOVE
ff77e468 kernel/sched/rt.c Peter Zijlstra   2016-01-18  1208   *
ff77e468 kernel/sched/rt.c Peter Zijlstra   2016-01-18  1209   * assumes ENQUEUE/DEQUEUE flags match
ff77e468 kernel/sched/rt.c Peter Zijlstra   2016-01-18  1210   */
ff77e468 kernel/sched/rt.c Peter Zijlstra   2016-01-18  1211  static inline bool move_entity(unsigned int flags)
ff77e468 kernel/sched/rt.c Peter Zijlstra   2016-01-18  1212  {
4f659495 kernel/sched/rt.c Pierce Griffiths 2018-09-21  1213  	return !((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
ff77e468 kernel/sched/rt.c Peter Zijlstra   2016-01-18 @1214  }
ff77e468 kernel/sched/rt.c Peter Zijlstra   2016-01-18  1215  

:::::: The code at line 1214 was first introduced by commit
:::::: ff77e468535987b3d21b7bd4da15608ea3ce7d0b sched/rt: Fix PI handling vs. sched_setscheduler()

:::::: TO: Peter Zijlstra <peterz@infradead.org>
:::::: CC: Ingo Molnar <mingo@kernel.org>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 24418 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] scheduler: conditional statement cleanup
  2018-09-21 20:22 [PATCH] scheduler: conditional statement cleanup PierceGriffiths
  2018-09-22  7:08 ` kbuild test robot
@ 2018-09-22 10:26 ` Peter Zijlstra
  2018-09-25 20:07   ` Pierce Griffiths
  2018-09-26  7:46 ` Peter Zijlstra
  2 siblings, 1 reply; 5+ messages in thread
From: Peter Zijlstra @ 2018-09-22 10:26 UTC (permalink / raw)
  To: PierceGriffiths; +Cc: Ingo Molnar, linux-kernel

On Fri, Sep 21, 2018 at 03:22:03PM -0500, PierceGriffiths wrote:
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 625bc9897f62..443a1f235cfd 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -617,12 +617,8 @@ bool sched_can_stop_tick(struct rq *rq)
>  	 * If there are more than one RR tasks, we need the tick to effect the
>  	 * actual RR behaviour.
>  	 */
> -	if (rq->rt.rr_nr_running) {
> -		if (rq->rt.rr_nr_running == 1)
> -			return true;
> -		else
> -			return false;
> -	}
> +	if (rq->rt.rr_nr_running)
> +		return rq->rt.rr_nr_running == 1;
>  
>  	/*
>  	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no

That one is OK I suppose.

> diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
> index 5e54cbcae673..a8fd4bd68954 100644
> --- a/kernel/sched/cpufreq.c
> +++ b/kernel/sched/cpufreq.c
> @@ -34,10 +34,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
>  			void (*func)(struct update_util_data *data, u64 time,
>  				     unsigned int flags))
>  {
> -	if (WARN_ON(!data || !func))
> -		return;
> -
> -	if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
> +	if (WARN_ON(!data || !func || per_cpu(cpufreq_update_util_data, cpu)))
>  		return;
>  
>  	data->func = func;

But I'm not a fan of this one. It mixes a different class of function
and the WARN condition gets too complicated. Its easier to have separate
warns.

> diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
> index daaadf939ccb..152c133e8247 100644
> --- a/kernel/sched/cpupri.c
> +++ b/kernel/sched/cpupri.c
> @@ -29,20 +29,16 @@
>  #include "sched.h"
>  
>  /* Convert between a 140 based task->prio, and our 102 based cpupri */
> -static int convert_prio(int prio)
> +static int convert_prio(const int prio)
>  {
> -	int cpupri;
> -
>  	if (prio == CPUPRI_INVALID)
> -		cpupri = CPUPRI_INVALID;
> +		return CPUPRI_INVALID;
>  	else if (prio == MAX_PRIO)
> -		cpupri = CPUPRI_IDLE;
> +		return CPUPRI_IDLE;
>  	else if (prio >= MAX_RT_PRIO)
> -		cpupri = CPUPRI_NORMAL;
> +		return CPUPRI_NORMAL;
>  	else
> -		cpupri = MAX_RT_PRIO - prio + 1;
> -
> -	return cpupri;
> +		return MAX_RT_PRIO - prio + 1;

The code looks even better if you leave out the last else.

>  }
>  
>  /**
> @@ -95,10 +91,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
>  		smp_rmb();
>  
>  		/* Need to do the rmb for every iteration */
> -		if (skip)
> -			continue;
> -
> -		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
> +		if (skip || cpumask_any_and(&p->cpus_allowed, vec->mask)
> +				>= nr_cpu_ids)
>  			continue;
>  
>  		if (lowest_mask) {

That just makes the code ugly for no reason.

> @@ -222,7 +216,7 @@ int cpupri_init(struct cpupri *cp)
>  	return 0;
>  
>  cleanup:
> -	for (i--; i >= 0; i--)
> +	while (--i >= 0)
>  		free_cpumask_var(cp->pri_to_cpu[i].mask);
>  	return -ENOMEM;
>  }
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 2e2955a8cf8f..acf1b94669ad 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -142,10 +142,12 @@ void free_rt_sched_group(struct task_group *tg)
>  		destroy_rt_bandwidth(&tg->rt_bandwidth);
>  
>  	for_each_possible_cpu(i) {
> -		if (tg->rt_rq)
> -			kfree(tg->rt_rq[i]);
> -		if (tg->rt_se)
> -			kfree(tg->rt_se[i]);
> +		/* Don't need to check if tg->rt_rq[i]
> +		 * or tg->rt_se[i] are NULL, since kfree(NULL)
> +		 * simply performs no operation
> +		 */

That's an invalid comment style.

> +		kfree(tg->rt_rq[i]);
> +		kfree(tg->rt_se[i]);
>  	}
>  
>  	kfree(tg->rt_rq);
> @@ -1015,10 +1017,7 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
>  
>  	BUG_ON(&rq->rt != rt_rq);
>  
> -	if (rt_rq->rt_queued)
> -		return;
> -
> -	if (rt_rq_throttled(rt_rq))
> +	if (rt_rq->rt_queued || rt_rq_throttled(rt_rq))
>  		return;
>  
>  	if (rt_rq->rt_nr_running) {

The compiler can do this transformation and the old code was simpler.

> @@ -1211,10 +1210,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
>   */
>  static inline bool move_entity(unsigned int flags)
>  {
> -	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
> -		return false;
> -
> -	return true;
> +	return !((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
>  }

Again, I find the new code harder to read.

>  
> @@ -2518,12 +2513,10 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
>  	/*
>  	 * Disallowing the root group RT runtime is BAD, it would disallow the
>  	 * kernel creating (and or operating) RT threads.
> +	 *
> +	 * No period doesn't make any sense.
>  	 */
> -	if (tg == &root_task_group && rt_runtime == 0)
> -		return -EINVAL;
> -
> -	/* No period doesn't make any sense. */
> -	if (rt_period == 0)
> +	if ((tg == &root_task_group && !rt_runtime) || !rt_period)
>  		return -EINVAL;
>  
>  	mutex_lock(&rt_constraints_mutex);

Again, far harder to read.

In short, while all the transformations are 'correct' the end result is
horrible. Please don't do this.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] scheduler: conditional statement cleanup
  2018-09-22 10:26 ` Peter Zijlstra
@ 2018-09-25 20:07   ` Pierce Griffiths
  0 siblings, 0 replies; 5+ messages in thread
From: Pierce Griffiths @ 2018-09-25 20:07 UTC (permalink / raw)
  To: Peter Zijlstra; +Cc: Ingo Molnar, linux-kernel

Peter,
Is there anything in this patch that you'd consider salvageable, or
would it be better to just throw the whole thing out? In either case, I
appreciate your honesty regarding this patch's (lack of) quality, and
apologize for what is most likely a waste of your time.

On Sat, Sep 22, 2018 at 12:26:40PM +0200, Peter Zijlstra wrote:
> On Fri, Sep 21, 2018 at 03:22:03PM -0500, PierceGriffiths wrote:
> > diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> > index 625bc9897f62..443a1f235cfd 100644
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -617,12 +617,8 @@ bool sched_can_stop_tick(struct rq *rq)
> >  	 * If there are more than one RR tasks, we need the tick to effect the
> >  	 * actual RR behaviour.
> >  	 */
> > -	if (rq->rt.rr_nr_running) {
> > -		if (rq->rt.rr_nr_running == 1)
> > -			return true;
> > -		else
> > -			return false;
> > -	}
> > +	if (rq->rt.rr_nr_running)
> > +		return rq->rt.rr_nr_running == 1;
> >  
> >  	/*
> >  	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
> 
> That one is OK I suppose.
> 
> > diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
> > index 5e54cbcae673..a8fd4bd68954 100644
> > --- a/kernel/sched/cpufreq.c
> > +++ b/kernel/sched/cpufreq.c
> > @@ -34,10 +34,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
> >  			void (*func)(struct update_util_data *data, u64 time,
> >  				     unsigned int flags))
> >  {
> > -	if (WARN_ON(!data || !func))
> > -		return;
> > -
> > -	if (WARN_ON(per_cpu(cpufreq_update_util_data, cpu)))
> > +	if (WARN_ON(!data || !func || per_cpu(cpufreq_update_util_data, cpu)))
> >  		return;
> >  
> >  	data->func = func;
> 
> But I'm not a fan of this one. It mixes a different class of function
> and the WARN condition gets too complicated. Its easier to have separate
> warns.
> 
> > diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
> > index daaadf939ccb..152c133e8247 100644
> > --- a/kernel/sched/cpupri.c
> > +++ b/kernel/sched/cpupri.c
> > @@ -29,20 +29,16 @@
> >  #include "sched.h"
> >  
> >  /* Convert between a 140 based task->prio, and our 102 based cpupri */
> > -static int convert_prio(int prio)
> > +static int convert_prio(const int prio)
> >  {
> > -	int cpupri;
> > -
> >  	if (prio == CPUPRI_INVALID)
> > -		cpupri = CPUPRI_INVALID;
> > +		return CPUPRI_INVALID;
> >  	else if (prio == MAX_PRIO)
> > -		cpupri = CPUPRI_IDLE;
> > +		return CPUPRI_IDLE;
> >  	else if (prio >= MAX_RT_PRIO)
> > -		cpupri = CPUPRI_NORMAL;
> > +		return CPUPRI_NORMAL;
> >  	else
> > -		cpupri = MAX_RT_PRIO - prio + 1;
> > -
> > -	return cpupri;
> > +		return MAX_RT_PRIO - prio + 1;
> 
> The code looks even better if you leave out the last else.
> 
> >  }
> >  
> >  /**
> > @@ -95,10 +91,8 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
> >  		smp_rmb();
> >  
> >  		/* Need to do the rmb for every iteration */
> > -		if (skip)
> > -			continue;
> > -
> > -		if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
> > +		if (skip || cpumask_any_and(&p->cpus_allowed, vec->mask)
> > +				>= nr_cpu_ids)
> >  			continue;
> >  
> >  		if (lowest_mask) {
> 
> That just makes the code ugly for no reason.
> 
> > @@ -222,7 +216,7 @@ int cpupri_init(struct cpupri *cp)
> >  	return 0;
> >  
> >  cleanup:
> > -	for (i--; i >= 0; i--)
> > +	while (--i >= 0)
> >  		free_cpumask_var(cp->pri_to_cpu[i].mask);
> >  	return -ENOMEM;
> >  }
> > diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> > index 2e2955a8cf8f..acf1b94669ad 100644
> > --- a/kernel/sched/rt.c
> > +++ b/kernel/sched/rt.c
> > @@ -142,10 +142,12 @@ void free_rt_sched_group(struct task_group *tg)
> >  		destroy_rt_bandwidth(&tg->rt_bandwidth);
> >  
> >  	for_each_possible_cpu(i) {
> > -		if (tg->rt_rq)
> > -			kfree(tg->rt_rq[i]);
> > -		if (tg->rt_se)
> > -			kfree(tg->rt_se[i]);
> > +		/* Don't need to check if tg->rt_rq[i]
> > +		 * or tg->rt_se[i] are NULL, since kfree(NULL)
> > +		 * simply performs no operation
> > +		 */
> 
> That's an invalid comment style.
> 
> > +		kfree(tg->rt_rq[i]);
> > +		kfree(tg->rt_se[i]);
> >  	}
> >  
> >  	kfree(tg->rt_rq);
> > @@ -1015,10 +1017,7 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
> >  
> >  	BUG_ON(&rq->rt != rt_rq);
> >  
> > -	if (rt_rq->rt_queued)
> > -		return;
> > -
> > -	if (rt_rq_throttled(rt_rq))
> > +	if (rt_rq->rt_queued || rt_rq_throttled(rt_rq))
> >  		return;
> >  
> >  	if (rt_rq->rt_nr_running) {
> 
> The compiler can do this transformation and the old code was simpler.
> 
> > @@ -1211,10 +1210,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
> >   */
> >  static inline bool move_entity(unsigned int flags)
> >  {
> > -	if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
> > -		return false;
> > -
> > -	return true;
> > +	return !((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
> >  }
> 
> Again, I find the new code harder to read.
> 
> >  
> > @@ -2518,12 +2513,10 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
> >  	/*
> >  	 * Disallowing the root group RT runtime is BAD, it would disallow the
> >  	 * kernel creating (and or operating) RT threads.
> > +	 *
> > +	 * No period doesn't make any sense.
> >  	 */
> > -	if (tg == &root_task_group && rt_runtime == 0)
> > -		return -EINVAL;
> > -
> > -	/* No period doesn't make any sense. */
> > -	if (rt_period == 0)
> > +	if ((tg == &root_task_group && !rt_runtime) || !rt_period)
> >  		return -EINVAL;
> >  
> >  	mutex_lock(&rt_constraints_mutex);
> 
> Again, far harder to read.
> 
> In short, while all the transformations are 'correct' the end result is
> horrible. Please don't do this.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] scheduler: conditional statement cleanup
  2018-09-21 20:22 [PATCH] scheduler: conditional statement cleanup PierceGriffiths
  2018-09-22  7:08 ` kbuild test robot
  2018-09-22 10:26 ` Peter Zijlstra
@ 2018-09-26  7:46 ` Peter Zijlstra
  2 siblings, 0 replies; 5+ messages in thread
From: Peter Zijlstra @ 2018-09-26  7:46 UTC (permalink / raw)
  To: PierceGriffiths; +Cc: Ingo Molnar, linux-kernel

On Fri, Sep 21, 2018 at 03:22:03PM -0500, PierceGriffiths wrote:
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 625bc9897f62..443a1f235cfd 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -617,12 +617,8 @@ bool sched_can_stop_tick(struct rq *rq)
>  	 * If there are more than one RR tasks, we need the tick to effect the
>  	 * actual RR behaviour.
>  	 */
> -	if (rq->rt.rr_nr_running) {
> -		if (rq->rt.rr_nr_running == 1)
> -			return true;
> -		else
> -			return false;
> -	}
> +	if (rq->rt.rr_nr_running)
> +		return rq->rt.rr_nr_running == 1;
>  
>  	/*
>  	 * If there's no RR tasks, but FIFO tasks, we can skip the tick, no

> diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
> index daaadf939ccb..152c133e8247 100644
> --- a/kernel/sched/cpupri.c
> +++ b/kernel/sched/cpupri.c
> @@ -29,20 +29,16 @@
>  #include "sched.h"
>  
>  /* Convert between a 140 based task->prio, and our 102 based cpupri */
> -static int convert_prio(int prio)
> +static int convert_prio(const int prio)
>  {
> -	int cpupri;
> -
>  	if (prio == CPUPRI_INVALID)
> -		cpupri = CPUPRI_INVALID;
> +		return CPUPRI_INVALID;
>  	else if (prio == MAX_PRIO)
> -		cpupri = CPUPRI_IDLE;
> +		return CPUPRI_IDLE;
>  	else if (prio >= MAX_RT_PRIO)
> -		cpupri = CPUPRI_NORMAL;
> +		return CPUPRI_NORMAL;
>  	else
> -		cpupri = MAX_RT_PRIO - prio + 1;
> -
> -	return cpupri;
> +		return MAX_RT_PRIO - prio + 1;

Code improves if you leave out the last else.

>  }
>  
>  /**

> @@ -222,7 +216,7 @@ int cpupri_init(struct cpupri *cp)
>  	return 0;
>  
>  cleanup:
> -	for (i--; i >= 0; i--)
> +	while (--i >= 0)
>  		free_cpumask_var(cp->pri_to_cpu[i].mask);
>  	return -ENOMEM;
>  }

> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index 2e2955a8cf8f..acf1b94669ad 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -142,10 +142,12 @@ void free_rt_sched_group(struct task_group *tg)
>  		destroy_rt_bandwidth(&tg->rt_bandwidth);
>  
>  	for_each_possible_cpu(i) {
> -		if (tg->rt_rq)
> -			kfree(tg->rt_rq[i]);
> -		if (tg->rt_se)
> -			kfree(tg->rt_se[i]);
> +		/* Don't need to check if tg->rt_rq[i]
> +		 * or tg->rt_se[i] are NULL, since kfree(NULL)
> +		 * simply performs no operation
> +		 */

Don't bother with the comment tho (but if you do, know this is the wrong
comment style).

> +		kfree(tg->rt_rq[i]);
> +		kfree(tg->rt_se[i]);
>  	}
>  
>  	kfree(tg->rt_rq);

> @@ -1393,7 +1389,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
>  
>  	/* For anything but wake ups, just return the task_cpu */
>  	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
> -		goto out;
> +		return cpu;
>  
>  	rq = cpu_rq(cpu);
>  
> @@ -1437,7 +1433,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
>  	}
>  	rcu_read_unlock();
>  
> -out:
>  	return cpu;
>  }
>  

These changes are OK with minor edits, the rest just makes the code
harder to read.

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2018-09-26  7:46 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-09-21 20:22 [PATCH] scheduler: conditional statement cleanup PierceGriffiths
2018-09-22  7:08 ` kbuild test robot
2018-09-22 10:26 ` Peter Zijlstra
2018-09-25 20:07   ` Pierce Griffiths
2018-09-26  7:46 ` Peter Zijlstra

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).