All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH 1/2] drm/i915/gt: Try to smooth RPS spikes
@ 2020-04-14 16:14 Chris Wilson
  2020-04-14 16:14   ` [Intel-gfx] " Chris Wilson
                   ` (5 more replies)
  0 siblings, 6 replies; 34+ messages in thread
From: Chris Wilson @ 2020-04-14 16:14 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

By the time we respond to the RPS interrupt [inside a worker], the GPU
may be running a different workload. As we look to make the evalution
intervals shorter, these spikes are more likely to okay. Let's try to
smooth over the spikes in the workload by comparing the EI interrupt
[up/down events] with the most recently completed EI; if both say up,
then increase the clocks, if they disagree stay the same. In principle,
this means we now take 2 up EI to go increase into the next bin, and
similary 2 down EI to decrease. However, if the worker runs fast enough,
the previous EI in the registers will be the same as triggered the
interrupt, so responsiveness remains unaffect. [Under the current scheme
where EI are on the order of 10ms, it is likely that this is true and we
compare the interrupt with the EI that caused it.]

As usual, Valleyview just likes to be different; and there since we are
manually evaluating the threshold, we cannot sample the previous EI
registers.

References: https://gitlab.freedesktop.org/drm/intel/-/issues/1698
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Andi Shyti <andi.shyti@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_rps.c | 59 ++++++++++++++++++++++++-----
 1 file changed, 50 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 86110458e2a7..367132092bed 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -1416,6 +1416,11 @@ static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
 	ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT);
 }
 
+static bool vlv_manual_ei(u32 pm_iir)
+{
+	return pm_iir & GEN6_PM_RP_UP_EI_EXPIRED;
+}
+
 static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
 {
 	struct intel_uncore *uncore = rps_to_uncore(rps);
@@ -1423,7 +1428,7 @@ static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
 	struct intel_rps_ei now;
 	u32 events = 0;
 
-	if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
+	if (!vlv_manual_ei(pm_iir))
 		return 0;
 
 	vlv_c0_read(uncore, &now);
@@ -1456,6 +1461,37 @@ static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
 	return events;
 }
 
+static bool __confirm_ei(struct intel_rps *rps,
+			 i915_reg_t ei_sample,
+			 i915_reg_t ei_threshold)
+{
+	struct intel_uncore *uncore = rps_to_uncore(rps);
+	u32 threshold, sample;
+
+	sample = intel_uncore_read(uncore, ei_sample);
+	threshold = intel_uncore_read(uncore, ei_threshold);
+
+	sample &= GEN6_CURBSYTAVG_MASK;
+
+	return sample > threshold;
+}
+
+static bool confirm_up(struct intel_rps *rps, u32 pm_iir)
+{
+	if (vlv_manual_ei(pm_iir))
+		return true;
+
+	return __confirm_ei(rps, GEN6_RP_PREV_UP, GEN6_RP_UP_THRESHOLD);
+}
+
+static bool confirm_down(struct intel_rps *rps, u32 pm_iir)
+{
+	if (vlv_manual_ei(pm_iir))
+		return true;
+
+	return !__confirm_ei(rps, GEN6_RP_PREV_UP, GEN6_RP_UP_THRESHOLD);
+}
+
 static void rps_work(struct work_struct *work)
 {
 	struct intel_rps *rps = container_of(work, typeof(*rps), work);
@@ -1484,10 +1520,11 @@ static void rps_work(struct work_struct *work)
 	max = rps->max_freq_softlimit;
 	if (client_boost)
 		max = rps->max_freq;
-	if (client_boost && new_freq < rps->boost_freq) {
+	if (client_boost && new_freq <= rps->boost_freq) {
 		new_freq = rps->boost_freq;
 		adj = 0;
-	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD &&
+		   confirm_up(rps, pm_iir)) {
 		if (adj > 0)
 			adj *= 2;
 		else /* CHV needs even encode values */
@@ -1497,13 +1534,15 @@ static void rps_work(struct work_struct *work)
 			adj = 0;
 	} else if (client_boost) {
 		adj = 0;
-	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT &&
+		   confirm_down(rps, pm_iir)) {
 		if (rps->cur_freq > rps->efficient_freq)
 			new_freq = rps->efficient_freq;
 		else if (rps->cur_freq > rps->min_freq_softlimit)
 			new_freq = rps->min_freq_softlimit;
 		adj = 0;
-	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD &&
+		   confirm_down(rps, pm_iir)) {
 		if (adj < 0)
 			adj *= 2;
 		else /* CHV needs even encode values */
@@ -1511,8 +1550,8 @@ static void rps_work(struct work_struct *work)
 
 		if (new_freq <= rps->min_freq_softlimit)
 			adj = 0;
-	} else { /* unknown event */
-		adj = 0;
+	} else { /* unknown event, or unwanted */
+		goto unlock;
 	}
 
 	rps->last_adj = adj;
@@ -1529,8 +1568,9 @@ static void rps_work(struct work_struct *work)
 	    (adj > 0 && rps->power.mode == LOW_POWER))
 		rps->last_adj = 0;
 
-	/* sysfs frequency interfaces may have snuck in while servicing the
-	 * interrupt
+	/*
+	 * sysfs frequency limits may have snuck in while
+	 * servicing the interrupt
 	 */
 	new_freq += adj;
 	new_freq = clamp_t(int, new_freq, min, max);
@@ -1540,6 +1580,7 @@ static void rps_work(struct work_struct *work)
 		rps->last_adj = 0;
 	}
 
+unlock:
 	mutex_unlock(&rps->lock);
 
 out:
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 34+ messages in thread

end of thread, other threads:[~2020-04-15 15:22 UTC | newest]

Thread overview: 34+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-14 16:14 [Intel-gfx] [PATCH 1/2] drm/i915/gt: Try to smooth RPS spikes Chris Wilson
2020-04-14 16:14 ` [PATCH 2/2] drm/i915/gt: Shrink the RPS evalution intervals Chris Wilson
2020-04-14 16:14   ` [Intel-gfx] " Chris Wilson
2020-04-14 16:35   ` Chris Wilson
2020-04-14 16:35     ` [Intel-gfx] " Chris Wilson
2020-04-14 19:39     ` Francisco Jerez
2020-04-14 19:39       ` [Intel-gfx] " Francisco Jerez
2020-04-14 20:13       ` Chris Wilson
2020-04-14 20:13         ` Chris Wilson
2020-04-14 21:00         ` Francisco Jerez
2020-04-14 21:00           ` Francisco Jerez
2020-04-14 21:52           ` Chris Wilson
2020-04-14 21:52             ` Chris Wilson
2020-04-14 22:28             ` Francisco Jerez
2020-04-14 22:28               ` Francisco Jerez
2020-04-14 22:38               ` Francisco Jerez
2020-04-14 22:38                 ` Francisco Jerez
2020-04-14 21:35       ` Chris Wilson
2020-04-14 21:35         ` Chris Wilson
2020-04-14 22:27         ` Francisco Jerez
2020-04-14 22:27           ` Francisco Jerez
2020-04-15  7:37       ` Chris Wilson
2020-04-15  7:37         ` Chris Wilson
2020-04-15 11:36         ` Chris Wilson
2020-04-15 11:36           ` Chris Wilson
2020-04-15 11:11   ` Andi Shyti
2020-04-15 11:11     ` [Intel-gfx] " Andi Shyti
2020-04-15  0:56 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/2] drm/i915/gt: Try to smooth RPS spikes Patchwork
2020-04-15  1:13 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork
2020-04-15  1:18 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-04-15 10:56 ` [Intel-gfx] [PATCH 1/2] " Andi Shyti
2020-04-15 11:24   ` Chris Wilson
2020-04-15 11:45     ` Chris Wilson
2020-04-15 15:22 ` [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [1/2] " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.