All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] fair/util_est: Separate util_est_dequeue() for cfs_rq_util_change
@ 2020-12-09 10:44 Xuewen Yan
  2020-12-11 11:30 ` Dietmar Eggemann
  2020-12-15 17:59 ` Dietmar Eggemann
  0 siblings, 2 replies; 7+ messages in thread
From: Xuewen Yan @ 2020-12-09 10:44 UTC (permalink / raw)
  To: patrick.bellasi, vincent.guittot, peterz
  Cc: mingo, juri.lelli, dietmar.eggemann, rostedt, bsegall, mgorman,
	bristot, linux-kernel, Xuewen.Yan, xuewyan, Xuewen Yan

when a task dequeued, it will update it's util, and cfs_rq_util_change
would check rq's util, if the cfs_rq->avg.util_est.enqueued is bigger
than  cfs_rq->avg.util_avg, but because the cfs_rq->avg.util_est.enqueued
didn't be decreased, this would cause bigger cfs_rq_util by mistake,
as a result, cfs_rq_util_change may change freq unreasonablely.

separate the util_est_dequeue() into util_est_dequeue() and
util_est_update(), and dequeue the _task_util_est(p) before update util.

Signed-off-by: Xuewen Yan <xuewen.yan@unisoc.com>
---
 kernel/sched/fair.c | 24 +++++++++++++++++++-----
 1 file changed, 19 insertions(+), 5 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ae7ceba..20ecfd5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3946,11 +3946,9 @@ static inline bool within_margin(int value, int margin)
 }
 
 static void
-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
+util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p)
 {
-	long last_ewma_diff;
 	struct util_est ue;
-	int cpu;
 
 	if (!sched_feat(UTIL_EST))
 		return;
@@ -3961,6 +3959,17 @@ static inline bool within_margin(int value, int margin)
 	WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
 
 	trace_sched_util_est_cfs_tp(cfs_rq);
+}
+
+static void
+util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
+{
+	long last_ewma_diff;
+	struct util_est ue;
+	int cpu;
+
+	if (!sched_feat(UTIL_EST))
+		return;
 
 	/*
 	 * Skip update of task's estimated utilization when the task has not
@@ -4085,7 +4094,10 @@ static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
 util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
 
 static inline void
-util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
+util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
+
+static inline void
+util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p,
 		 bool task_sleep) {}
 static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
 
@@ -5589,6 +5601,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 	int idle_h_nr_running = task_has_idle_policy(p);
 	bool was_sched_idle = sched_idle_rq(rq);
 
+	util_est_dequeue(&rq->cfs, p);
+
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		dequeue_entity(cfs_rq, se, flags);
@@ -5639,7 +5653,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 		rq->next_balance = jiffies;
 
 dequeue_throttle:
-	util_est_dequeue(&rq->cfs, p, task_sleep);
+	util_est_update(&rq->cfs, p, task_sleep);
 	hrtick_update(rq);
 }
 
-- 
1.9.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2020-12-15 18:01 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-12-09 10:44 [PATCH] fair/util_est: Separate util_est_dequeue() for cfs_rq_util_change Xuewen Yan
2020-12-11 11:30 ` Dietmar Eggemann
     [not found]   ` <CAB8ipk-z0e5XnkR__vW9+NAz_rFDpC3odLnPEthWZoHKVRSYWg@mail.gmail.com>
2020-12-14 18:46     ` Dietmar Eggemann
2020-12-15  9:39       ` Vincent Guittot
2020-12-15 12:56         ` Xuewen Yan
2020-12-15 17:56           ` Dietmar Eggemann
2020-12-15 17:59 ` Dietmar Eggemann

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.