* [PATCH v2 1/2] drm/i915: Track the number of times we have woken the GPU up
@ 2018-01-24 11:03 Chris Wilson
2018-01-24 11:03 ` [PATCH v2 2/2] drm/i915: Shrink the GEM kmem_caches upon idling Chris Wilson
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Chris Wilson @ 2018-01-24 11:03 UTC (permalink / raw)
To: intel-gfx
By counting the number of times we have woken up, we have a very simple
means of defining an epoch, which will come in handy if we want to
perform deferred tasks at the end of an epoch (i.e. while we are going
to sleep) without imposing on the next activity cycle.
v2: No reason to specify precise number of bits here.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 7 ++++---
drivers/gpu/drm/i915/i915_drv.h | 5 +++++
drivers/gpu/drm/i915/i915_gem_request.c | 1 +
3 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 80dc679c0f01..65b175ac19b0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2717,7 +2717,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
- seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
+ seq_printf(m, "GPU idle: %s (epoch %d)\n",
+ yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
@@ -3150,8 +3151,8 @@ static int i915_engine_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- seq_printf(m, "GT awake? %s\n",
- yesno(dev_priv->gt.awake));
+ seq_printf(m, "GT awake? %s (epoch %d)\n",
+ yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
seq_printf(m, "CS timestamp frequency: %u kHz\n",
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8333692dac5a..eed2e69dc002 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2312,6 +2312,11 @@ struct drm_i915_private {
*/
bool awake;
+ /**
+ * The number of times we have woken up.
+ */
+ unsigned int epoch;
+
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index a0f451b4a4e8..f0fab070a3a0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -274,6 +274,7 @@ static void mark_busy(struct drm_i915_private *i915)
intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
i915->gt.awake = true;
+ i915->gt.epoch++;
intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915);
--
2.15.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v2 2/2] drm/i915: Shrink the GEM kmem_caches upon idling
2018-01-24 11:03 [PATCH v2 1/2] drm/i915: Track the number of times we have woken the GPU up Chris Wilson
@ 2018-01-24 11:03 ` Chris Wilson
2018-01-24 11:10 ` Chris Wilson
2018-01-24 11:27 ` Tvrtko Ursulin
2018-01-24 11:40 ` ✓ Fi.CI.BAT: success for series starting with [v2,1/2] drm/i915: Track the number of times we have woken the GPU up Patchwork
2018-01-24 14:30 ` ✓ Fi.CI.IGT: " Patchwork
2 siblings, 2 replies; 6+ messages in thread
From: Chris Wilson @ 2018-01-24 11:03 UTC (permalink / raw)
To: intel-gfx
When we finally decide the gpu is idle, that is a good time to shrink
our kmem_caches.
v3: Defer until an rcu grace period after we idle.
v4: Think about epoch wraparound and how likely that is.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem.c | 78 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 78 insertions(+)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7f0684ccc724..60b34bb98ee3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3334,6 +3334,65 @@ i915_gem_retire_work_handler(struct work_struct *work)
}
}
+static void shrink_caches(struct drm_i915_private *i915)
+{
+ /*
+ * kmem_cache_shrink() discards empty slabs and reorders partially
+ * filled slabs to prioritise allocating from the mostly full slabs,
+ * with the aim of reducing fragmentation.
+ */
+ kmem_cache_shrink(i915->priorities);
+ kmem_cache_shrink(i915->dependencies);
+ kmem_cache_shrink(i915->requests);
+ kmem_cache_shrink(i915->luts);
+ kmem_cache_shrink(i915->vmas);
+ kmem_cache_shrink(i915->objects);
+}
+
+struct sleep_rcu_work {
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
+ struct drm_i915_private *i915;
+ unsigned int epoch;
+};
+
+static inline bool
+same_epoch(struct drm_i915_private *i915, unsigned int epoch)
+{
+ /*
+ * There is a small chance that the epoch wrapped since we started
+ * sleeping. If we assume that epoch is at least a u32, then it will
+ * take at least 2^32 * 100ms for it to wrap, or about 326 years.
+ */
+ return epoch == READ_ONCE(i915->gt.epoch);
+}
+
+static void __sleep_work(struct work_struct *work)
+{
+ struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
+ struct drm_i915_private *i915 = s->i915;
+ unsigned int epoch = s->epoch;
+
+ kfree(s);
+ if (same_epoch(i915, epoch))
+ shrink_caches(i915);
+}
+
+static void __sleep_rcu(struct rcu_head *rcu)
+{
+ struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
+ struct drm_i915_private *i915 = s->i915;
+
+ if (same_epoch(i915, s->epoch)) {
+ INIT_WORK(&s->work, __sleep_work);
+ queue_work(i915->wq, &s->work);
+ } else {
+ kfree(s);
+ }
+}
+
static inline bool
new_requests_since_last_retire(const struct drm_i915_private *i915)
{
@@ -3346,6 +3405,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), gt.idle_work.work);
+ unsigned int epoch = 0;
bool rearm_hangcheck;
ktime_t end;
@@ -3406,6 +3466,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
GEM_BUG_ON(!dev_priv->gt.awake);
dev_priv->gt.awake = false;
rearm_hangcheck = false;
+ epoch = dev_priv->gt.epoch;
if (INTEL_GEN(dev_priv) >= 6)
gen6_rps_idle(dev_priv);
@@ -3421,6 +3482,23 @@ i915_gem_idle_work_handler(struct work_struct *work)
GEM_BUG_ON(!dev_priv->gt.awake);
i915_queue_hangcheck(dev_priv);
}
+
+ /*
+ * When we are idle, it is an opportune time to reap our caches.
+ * However, we have many objects that utilise RCU and the ordered
+ * i915->wq that this work is executing on. To try and flush any
+ * pending frees now we are idle, we first wait for an RCU grace
+ * period, and then queue a task (that will run last on the wq) to
+ * shrink and re-optimize the caches.
+ */
+ if (same_epoch(dev_priv, epoch)) {
+ struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (s) {
+ s->i915 = dev_priv;
+ s->epoch = epoch;
+ call_rcu(&s->rcu, __sleep_rcu);
+ }
+ }
}
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
--
2.15.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v2 2/2] drm/i915: Shrink the GEM kmem_caches upon idling
2018-01-24 11:03 ` [PATCH v2 2/2] drm/i915: Shrink the GEM kmem_caches upon idling Chris Wilson
@ 2018-01-24 11:10 ` Chris Wilson
2018-01-24 11:27 ` Tvrtko Ursulin
1 sibling, 0 replies; 6+ messages in thread
From: Chris Wilson @ 2018-01-24 11:10 UTC (permalink / raw)
To: intel-gfx
Quoting Chris Wilson (2018-01-24 11:03:49)
> When we finally decide the gpu is idle, that is a good time to shrink
> our kmem_caches.
>
> v3: Defer until an rcu grace period after we idle.
> v4: Think about epoch wraparound and how likely that is.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com> #v3
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2 2/2] drm/i915: Shrink the GEM kmem_caches upon idling
2018-01-24 11:03 ` [PATCH v2 2/2] drm/i915: Shrink the GEM kmem_caches upon idling Chris Wilson
2018-01-24 11:10 ` Chris Wilson
@ 2018-01-24 11:27 ` Tvrtko Ursulin
1 sibling, 0 replies; 6+ messages in thread
From: Tvrtko Ursulin @ 2018-01-24 11:27 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
On 24/01/2018 11:03, Chris Wilson wrote:
> When we finally decide the gpu is idle, that is a good time to shrink
> our kmem_caches.
>
> v3: Defer until an rcu grace period after we idle.
> v4: Think about epoch wraparound and how likely that is.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> ---
> drivers/gpu/drm/i915/i915_gem.c | 78 +++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 78 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 7f0684ccc724..60b34bb98ee3 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -3334,6 +3334,65 @@ i915_gem_retire_work_handler(struct work_struct *work)
> }
> }
>
> +static void shrink_caches(struct drm_i915_private *i915)
> +{
> + /*
> + * kmem_cache_shrink() discards empty slabs and reorders partially
> + * filled slabs to prioritise allocating from the mostly full slabs,
> + * with the aim of reducing fragmentation.
> + */
> + kmem_cache_shrink(i915->priorities);
> + kmem_cache_shrink(i915->dependencies);
> + kmem_cache_shrink(i915->requests);
> + kmem_cache_shrink(i915->luts);
> + kmem_cache_shrink(i915->vmas);
> + kmem_cache_shrink(i915->objects);
> +}
> +
> +struct sleep_rcu_work {
> + union {
> + struct rcu_head rcu;
> + struct work_struct work;
> + };
> + struct drm_i915_private *i915;
> + unsigned int epoch;
> +};
> +
> +static inline bool
> +same_epoch(struct drm_i915_private *i915, unsigned int epoch)
> +{
> + /*
> + * There is a small chance that the epoch wrapped since we started
> + * sleeping. If we assume that epoch is at least a u32, then it will
> + * take at least 2^32 * 100ms for it to wrap, or about 326 years.
> + */
> + return epoch == READ_ONCE(i915->gt.epoch);
> +}
> +
> +static void __sleep_work(struct work_struct *work)
> +{
> + struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
> + struct drm_i915_private *i915 = s->i915;
> + unsigned int epoch = s->epoch;
> +
> + kfree(s);
> + if (same_epoch(i915, epoch))
> + shrink_caches(i915);
> +}
> +
> +static void __sleep_rcu(struct rcu_head *rcu)
> +{
> + struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
> + struct drm_i915_private *i915 = s->i915;
> +
> + if (same_epoch(i915, s->epoch)) {
> + INIT_WORK(&s->work, __sleep_work);
> + queue_work(i915->wq, &s->work);
> + } else {
> + kfree(s);
> + }
> +}
> +
> static inline bool
> new_requests_since_last_retire(const struct drm_i915_private *i915)
> {
> @@ -3346,6 +3405,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
> {
> struct drm_i915_private *dev_priv =
> container_of(work, typeof(*dev_priv), gt.idle_work.work);
> + unsigned int epoch = 0;
> bool rearm_hangcheck;
> ktime_t end;
>
> @@ -3406,6 +3466,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
> GEM_BUG_ON(!dev_priv->gt.awake);
> dev_priv->gt.awake = false;
> rearm_hangcheck = false;
> + epoch = dev_priv->gt.epoch;
>
> if (INTEL_GEN(dev_priv) >= 6)
> gen6_rps_idle(dev_priv);
> @@ -3421,6 +3482,23 @@ i915_gem_idle_work_handler(struct work_struct *work)
> GEM_BUG_ON(!dev_priv->gt.awake);
> i915_queue_hangcheck(dev_priv);
> }
> +
> + /*
> + * When we are idle, it is an opportune time to reap our caches.
> + * However, we have many objects that utilise RCU and the ordered
> + * i915->wq that this work is executing on. To try and flush any
> + * pending frees now we are idle, we first wait for an RCU grace
> + * period, and then queue a task (that will run last on the wq) to
> + * shrink and re-optimize the caches.
> + */
> + if (same_epoch(dev_priv, epoch)) {
> + struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
> + if (s) {
> + s->i915 = dev_priv;
> + s->epoch = epoch;
> + call_rcu(&s->rcu, __sleep_rcu);
> + }
> + }
> }
>
> void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 6+ messages in thread
* ✓ Fi.CI.BAT: success for series starting with [v2,1/2] drm/i915: Track the number of times we have woken the GPU up
2018-01-24 11:03 [PATCH v2 1/2] drm/i915: Track the number of times we have woken the GPU up Chris Wilson
2018-01-24 11:03 ` [PATCH v2 2/2] drm/i915: Shrink the GEM kmem_caches upon idling Chris Wilson
@ 2018-01-24 11:40 ` Patchwork
2018-01-24 14:30 ` ✓ Fi.CI.IGT: " Patchwork
2 siblings, 0 replies; 6+ messages in thread
From: Patchwork @ 2018-01-24 11:40 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [v2,1/2] drm/i915: Track the number of times we have woken the GPU up
URL : https://patchwork.freedesktop.org/series/37026/
State : success
== Summary ==
Series 37026v1 series starting with [v2,1/2] drm/i915: Track the number of times we have woken the GPU up
https://patchwork.freedesktop.org/api/1.0/series/37026/revisions/1/mbox/
Test kms_pipe_crc_basic:
Subgroup suspend-read-crc-pipe-b:
incomplete -> PASS (fi-snb-2520m) fdo#103713
fdo#103713 https://bugs.freedesktop.org/show_bug.cgi?id=103713
fi-bdw-5557u total:288 pass:267 dwarn:0 dfail:0 fail:0 skip:21 time:421s
fi-bdw-gvtdvm total:288 pass:264 dwarn:0 dfail:0 fail:0 skip:24 time:430s
fi-blb-e6850 total:288 pass:223 dwarn:1 dfail:0 fail:0 skip:64 time:372s
fi-bsw-n3050 total:288 pass:242 dwarn:0 dfail:0 fail:0 skip:46 time:496s
fi-bwr-2160 total:288 pass:183 dwarn:0 dfail:0 fail:0 skip:105 time:285s
fi-bxt-dsi total:288 pass:258 dwarn:0 dfail:0 fail:0 skip:30 time:484s
fi-bxt-j4205 total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:490s
fi-byt-j1900 total:288 pass:253 dwarn:0 dfail:0 fail:0 skip:35 time:476s
fi-elk-e7500 total:224 pass:168 dwarn:9 dfail:1 fail:0 skip:45
fi-gdg-551 total:288 pass:179 dwarn:0 dfail:0 fail:1 skip:108 time:282s
fi-glk-1 total:288 pass:260 dwarn:0 dfail:0 fail:0 skip:28 time:518s
fi-hsw-4770 total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:398s
fi-hsw-4770r total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:404s
fi-ilk-650 total:288 pass:228 dwarn:0 dfail:0 fail:0 skip:60 time:413s
fi-ivb-3520m total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:453s
fi-ivb-3770 total:288 pass:255 dwarn:0 dfail:0 fail:0 skip:33 time:416s
fi-kbl-7500u total:288 pass:263 dwarn:1 dfail:0 fail:0 skip:24 time:456s
fi-kbl-7560u total:288 pass:269 dwarn:0 dfail:0 fail:0 skip:19 time:500s
fi-kbl-7567u total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:454s
fi-kbl-r total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:504s
fi-pnv-d510 total:288 pass:222 dwarn:1 dfail:0 fail:0 skip:65 time:588s
fi-skl-6260u total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:432s
fi-skl-6600u total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:509s
fi-skl-6700hq total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:531s
fi-skl-6700k2 total:288 pass:264 dwarn:0 dfail:0 fail:0 skip:24 time:488s
fi-skl-6770hq total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:485s
fi-skl-guc total:288 pass:260 dwarn:0 dfail:0 fail:0 skip:28 time:414s
fi-skl-gvtdvm total:288 pass:265 dwarn:0 dfail:0 fail:0 skip:23 time:431s
fi-snb-2520m total:288 pass:248 dwarn:0 dfail:0 fail:0 skip:40 time:513s
fi-snb-2600 total:288 pass:248 dwarn:0 dfail:0 fail:0 skip:40 time:400s
Blacklisted hosts:
fi-cfl-s2 total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:566s
fi-glk-dsi total:288 pass:258 dwarn:0 dfail:0 fail:0 skip:30 time:468s
fi-byt-n2820 failed to collect. IGT log at Patchwork_7766/fi-byt-n2820/igt.log
ef2011c6380601730d0083e77c386aaa2d8c6673 drm-tip: 2018y-01m-24d-09h-45m-08s UTC integration manifest
591cc1c83a13 drm/i915: Shrink the GEM kmem_caches upon idling
ac442515a418 drm/i915: Track the number of times we have woken the GPU up
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_7766/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 6+ messages in thread
* ✓ Fi.CI.IGT: success for series starting with [v2,1/2] drm/i915: Track the number of times we have woken the GPU up
2018-01-24 11:03 [PATCH v2 1/2] drm/i915: Track the number of times we have woken the GPU up Chris Wilson
2018-01-24 11:03 ` [PATCH v2 2/2] drm/i915: Shrink the GEM kmem_caches upon idling Chris Wilson
2018-01-24 11:40 ` ✓ Fi.CI.BAT: success for series starting with [v2,1/2] drm/i915: Track the number of times we have woken the GPU up Patchwork
@ 2018-01-24 14:30 ` Patchwork
2 siblings, 0 replies; 6+ messages in thread
From: Patchwork @ 2018-01-24 14:30 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [v2,1/2] drm/i915: Track the number of times we have woken the GPU up
URL : https://patchwork.freedesktop.org/series/37026/
State : success
== Summary ==
Test kms_flip:
Subgroup 2x-plain-flip-fb-recreate:
pass -> FAIL (shard-hsw) fdo#100368
Subgroup 2x-flip-vs-expired-vblank:
pass -> FAIL (shard-hsw) fdo#102887
Test perf:
Subgroup oa-exponents:
pass -> FAIL (shard-apl) fdo#102254
Test kms_frontbuffer_tracking:
Subgroup fbc-1p-offscren-pri-shrfb-draw-render:
pass -> FAIL (shard-snb) fdo#101623 +1
Test gem_exec_suspend:
Subgroup basic-s3:
skip -> PASS (shard-snb) fdo#103880
fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
fdo#102887 https://bugs.freedesktop.org/show_bug.cgi?id=102887
fdo#102254 https://bugs.freedesktop.org/show_bug.cgi?id=102254
fdo#101623 https://bugs.freedesktop.org/show_bug.cgi?id=101623
fdo#103880 https://bugs.freedesktop.org/show_bug.cgi?id=103880
shard-apl total:2753 pass:1715 dwarn:1 dfail:0 fail:24 skip:1013 time:13995s
shard-hsw total:2753 pass:1724 dwarn:1 dfail:0 fail:12 skip:1015 time:15356s
shard-snb total:2753 pass:1317 dwarn:1 dfail:0 fail:12 skip:1423 time:7936s
Blacklisted hosts:
shard-kbl total:2753 pass:1835 dwarn:2 dfail:0 fail:24 skip:892 time:11093s
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_7766/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2018-01-24 14:30 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-24 11:03 [PATCH v2 1/2] drm/i915: Track the number of times we have woken the GPU up Chris Wilson
2018-01-24 11:03 ` [PATCH v2 2/2] drm/i915: Shrink the GEM kmem_caches upon idling Chris Wilson
2018-01-24 11:10 ` Chris Wilson
2018-01-24 11:27 ` Tvrtko Ursulin
2018-01-24 11:40 ` ✓ Fi.CI.BAT: success for series starting with [v2,1/2] drm/i915: Track the number of times we have woken the GPU up Patchwork
2018-01-24 14:30 ` ✓ Fi.CI.IGT: " Patchwork
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.