@@ -101,6 +101,7 @@ static inline void play_dead(void)
}
#endif
+void idle_load_update(void);
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
@@ -140,6 +141,7 @@ void cpu_idle(void)
stop_critical_timings();
pm_idle();
start_critical_timings();
+ idle_load_update();
trace_power_end(smp_processor_id());
@@ -1819,7 +1819,6 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
}
#endif
-static void calc_load_account_idle(struct rq *this_rq);
static void update_sysctl(void);
static int get_update_sysctl_factor(void);
static void update_cpu_load(struct rq *this_rq);
@@ -2959,11 +2958,12 @@ static unsigned long calc_load_update;
unsigned long avenrun[3];
EXPORT_SYMBOL(avenrun);
-static long calc_load_fold_active(struct rq *this_rq)
+static long calc_load_fold(struct rq *this_rq, int idle)
{
- long nr_active, delta = 0;
+ long nr_active = 0, delta = 0;
- nr_active = this_rq->nr_running;
+ if (!idle)
+ nr_active = this_rq->nr_running;
nr_active += (long) this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
@@ -2974,46 +2974,6 @@ static long calc_load_fold_active(struct rq *this_rq)
return delta;
}
-#ifdef CONFIG_NO_HZ
-/*
- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
- *
- * When making the ILB scale, we should try to pull this in as well.
- */
-static atomic_long_t calc_load_tasks_idle;
-
-static void calc_load_account_idle(struct rq *this_rq)
-{
- long delta;
-
- delta = calc_load_fold_active(this_rq);
- if (delta)
- atomic_long_add(delta, &calc_load_tasks_idle);
-}
-
-static long calc_load_fold_idle(void)
-{
- long delta = 0;
-
- /*
- * Its got a race, we don't care...
- */
- if (atomic_long_read(&calc_load_tasks_idle))
- delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
-
- return delta;
-}
-#else
-static void calc_load_account_idle(struct rq *this_rq)
-{
-}
-
-static inline long calc_load_fold_idle(void)
-{
- return 0;
-}
-#endif
-
/**
* get_avenrun - get the load average array
* @loads: pointer to dest load array
@@ -3043,7 +3003,7 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
*/
void calc_global_load(void)
{
- unsigned long upd = calc_load_update + 10;
+ unsigned long upd = calc_load_update + LOAD_FREQ/2;
long active;
if (time_before(jiffies, upd))
@@ -3063,21 +3023,30 @@ void calc_global_load(void)
* Called from update_cpu_load() to periodically update this CPU's
* active count.
*/
-static void calc_load_account_active(struct rq *this_rq)
+static void calc_load_account(struct rq *this_rq, int idle)
{
long delta;
if (time_before(jiffies, this_rq->calc_load_update))
return;
- delta = calc_load_fold_active(this_rq);
- delta += calc_load_fold_idle();
+ delta = calc_load_fold(this_rq, idle);
if (delta)
atomic_long_add(delta, &calc_load_tasks);
this_rq->calc_load_update += LOAD_FREQ;
}
+void idle_load_update(void)
+{
+ struct rq *rq = this_rq();
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ calc_load_account(rq, 1);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
/*
* The exact cpuload at various idx values, calculated at every tick would be
* load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
@@ -3194,7 +3163,7 @@ static void update_cpu_load_active(struct rq *this_rq)
{
update_cpu_load(this_rq);
- calc_load_account_active(this_rq);
+ calc_load_account(this_rq, 0);
}
#ifdef CONFIG_SMP
@@ -23,7 +23,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
static struct task_struct *pick_next_task_idle(struct rq *rq)
{
schedstat_inc(rq, sched_goidle);
- calc_load_account_idle(rq);
return rq->idle;
}