[v3,05/10] sched/fair: Hoist idle_stamp up from idle_balance
diff mbox series

Message ID 1541767840-93588-6-git-send-email-steven.sistare@oracle.com
State New
Headers show
Series
  • steal tasks to improve CPU utilization
Related show

Commit Message

Steven Sistare Nov. 9, 2018, 12:50 p.m. UTC
Move the update of idle_stamp from idle_balance to the call site in
pick_next_task_fair, to prepare for a future patch that adds work to
pick_next_task_fair which must be included in the idle_stamp interval.
No functional change.

Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
---
 kernel/sched/fair.c | 23 ++++++++++++++---------
 1 file changed, 14 insertions(+), 9 deletions(-)

Comments

Valentin Schneider Nov. 9, 2018, 7:07 p.m. UTC | #1
Hi Steve,

On 09/11/2018 12:50, Steve Sistare wrote:
> Move the update of idle_stamp from idle_balance to the call site in
> pick_next_task_fair, to prepare for a future patch that adds work to
> pick_next_task_fair which must be included in the idle_stamp interval.
> No functional change.
> 
> Signed-off-by: Steve Sistare <steven.sistare@oracle.com>
> ---
>  kernel/sched/fair.c | 23 ++++++++++++++---------
>  1 file changed, 14 insertions(+), 9 deletions(-)
> 
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 9031d39..da368ed 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3725,6 +3725,8 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
>  	rq->misfit_task_load = task_h_load(p);
>  }
>  
> +#define IF_SMP(statement)	statement
> +

I'm not too hot on those IF_SMP() macros. Since you're not introducing
any other user for them, what about an inline function for rq->idle_stamp
setting ? When it's mapped to an empty statement (!CONFIG_SMP) GCC is
smart enough to remove the rq_clock() that would be passed to it on
CONFIG_SMP:

----->8-----

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c11adf3..34d9864 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3725,7 +3725,10 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
        rq->misfit_task_load = task_h_load(p);
 }
 
-#define IF_SMP(statement)      statement
+static inline void set_rq_idle_stamp(struct rq *rq, u64 value)
+{
+       rq->idle_stamp = value;
+}
 
 static void overload_clear(struct rq *rq)
 {
@@ -3772,7 +3775,7 @@ static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
        return 0;
 }
 
-#define IF_SMP(statement)      /* empty */
+static inline void set_rq_idle_stamp(struct rq *rq, u64 value) {}
 
 static inline void overload_clear(struct rq *rq) {}
 static inline void overload_set(struct rq *rq) {}
@@ -6773,12 +6776,12 @@ done: __maybe_unused;
         * We must set idle_stamp _before_ calling idle_balance(), such that we
         * measure the duration of idle_balance() as idle time.
         */
-       IF_SMP(rq->idle_stamp = rq_clock(rq);)
+       set_rq_idle_stamp(rq, rq_clock(rq));
 
        new_tasks = idle_balance(rq, rf);
 
        if (new_tasks)
-               IF_SMP(rq->idle_stamp = 0;)
+               set_rq_idle_stamp(rq, 0);
 
        /*
         * Because idle_balance() releases (and re-acquires) rq->lock, it is

Patch
diff mbox series

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9031d39..da368ed 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3725,6 +3725,8 @@  static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
 	rq->misfit_task_load = task_h_load(p);
 }
 
+#define IF_SMP(statement)	statement
+
 static void overload_clear(struct rq *rq)
 {
 	struct sparsemask *overload_cpus;
@@ -3770,6 +3772,8 @@  static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
 	return 0;
 }
 
+#define IF_SMP(statement)	/* empty */
+
 static inline void overload_clear(struct rq *rq) {}
 static inline void overload_set(struct rq *rq) {}
 
@@ -6764,8 +6768,18 @@  static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
 
 idle:
 	update_misfit_status(NULL, rq);
+
+	/*
+	 * We must set idle_stamp _before_ calling idle_balance(), such that we
+	 * measure the duration of idle_balance() as idle time.
+	 */
+	IF_SMP(rq->idle_stamp = rq_clock(rq);)
+
 	new_tasks = idle_balance(rq, rf);
 
+	if (new_tasks)
+		IF_SMP(rq->idle_stamp = 0;)
+
 	/*
 	 * Because idle_balance() releases (and re-acquires) rq->lock, it is
 	 * possible for any higher priority task to appear. In that case we
@@ -9611,12 +9625,6 @@  static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 	u64 curr_cost = 0;
 
 	/*
-	 * We must set idle_stamp _before_ calling idle_balance(), such that we
-	 * measure the duration of idle_balance() as idle time.
-	 */
-	this_rq->idle_stamp = rq_clock(this_rq);
-
-	/*
 	 * Do not pull tasks towards !active CPUs...
 	 */
 	if (!cpu_active(this_cpu))
@@ -9707,9 +9715,6 @@  static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
 	if (this_rq->nr_running != this_rq->cfs.h_nr_running)
 		pulled_task = -1;
 
-	if (pulled_task)
-		this_rq->idle_stamp = 0;
-
 	rq_repin_lock(this_rq, rf);
 
 	return pulled_task;