* [Intel-gfx] [PATCH 1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock
@ 2020-03-11 9:26 Chris Wilson
2020-03-11 9:26 ` [Intel-gfx] [PATCH 2/3] drm/i915/execlists: Track active elements during dequeue Chris Wilson
` (3 more replies)
0 siblings, 4 replies; 7+ messages in thread
From: Chris Wilson @ 2020-03-11 9:26 UTC (permalink / raw)
To: intel-gfx
Avoid angering kcsan by serialising the read of the pm_events with the
write in rps_disable_interrupts.
[ 6268.713419] BUG: KCSAN: data-race in intel_rps_park [i915] / rps_work [i915]
[ 6268.713437]
[ 6268.713449] write to 0xffff8881eda8efac of 4 bytes by task 1127 on cpu 3:
[ 6268.713680] intel_rps_park+0x136/0x260 [i915]
[ 6268.713905] __gt_park+0x61/0xa0 [i915]
[ 6268.714128] ____intel_wakeref_put_last+0x42/0x90 [i915]
[ 6268.714352] __intel_wakeref_put_work+0xd3/0xf0 [i915]
[ 6268.714369] process_one_work+0x3b1/0x690
[ 6268.714384] worker_thread+0x80/0x670
[ 6268.714398] kthread+0x19a/0x1e0
[ 6268.714412] ret_from_fork+0x1f/0x30
[ 6268.714423]
[ 6268.714435] read to 0xffff8881eda8efac of 4 bytes by task 950 on cpu 2:
[ 6268.714664] rps_work+0xc2/0x680 [i915]
[ 6268.714679] process_one_work+0x3b1/0x690
[ 6268.714693] worker_thread+0x80/0x670
[ 6268.714707] kthread+0x19a/0x1e0
[ 6268.714720] ret_from_fork+0x1f/0x30
v2: Mark all reads and writes of rpm->pm_events.
The flow of enabling/disabling rps is stronly ordered, so the writes and
interrupt generation are also strongly ordered -- just this may not be
visible to the compiler, so provide annotations.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gt/intel_rps.c | 29 +++++++++++++++++------------
1 file changed, 17 insertions(+), 12 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 1b1cda2438d1..87f9638d2cbf 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -57,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask &= rps->pm_events;
+ mask &= READ_ONCE(rps->pm_events);
return rps_pm_sanitize_mask(rps, ~mask);
}
@@ -70,17 +70,19 @@ static void rps_reset_ei(struct intel_rps *rps)
static void rps_enable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
rps_reset_ei(rps);
if (IS_VALLEYVIEW(gt->i915))
/* WaGsvRC0ResidencyMethod:vlv */
- rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ events = GEN6_PM_RP_UP_EI_EXPIRED;
else
- rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
+ events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+ WRITE_ONCE(rps->pm_events, events);
spin_lock_irq(>->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(>->irq_lock);
@@ -117,8 +119,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- rps->pm_events = 0;
-
+ WRITE_ONCE(rps->pm_events, 0);
set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(>->irq_lock);
@@ -1459,12 +1460,12 @@ static void rps_work(struct work_struct *work)
u32 pm_iir = 0;
spin_lock_irq(>->irq_lock);
- pm_iir = fetch_and_zero(&rps->pm_iir);
+ pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(>->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
- if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+ if (!pm_iir && !client_boost)
goto out;
mutex_lock(&rps->lock);
@@ -1560,11 +1561,15 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
- if (pm_iir & rps->pm_events) {
+ events = pm_iir & READ_ONCE(rps->pm_events);
+ if (events) {
spin_lock(>->irq_lock);
- gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
- rps->pm_iir |= pm_iir & rps->pm_events;
+
+ gen6_gt_pm_mask_irq(gt, events);
+ rps->pm_iir |= events;
+
schedule_work(&rps->work);
spin_unlock(>->irq_lock);
}
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Intel-gfx] [PATCH 2/3] drm/i915/execlists: Track active elements during dequeue
2020-03-11 9:26 [Intel-gfx] [PATCH 1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock Chris Wilson
@ 2020-03-11 9:26 ` Chris Wilson
2020-03-11 11:17 ` Mika Kuoppala
2020-03-11 9:26 ` [Intel-gfx] [PATCH 3/3] drm/i915/gem: Mark up the racy read of the mmap_singleton Chris Wilson
` (2 subsequent siblings)
3 siblings, 1 reply; 7+ messages in thread
From: Chris Wilson @ 2020-03-11 9:26 UTC (permalink / raw)
To: intel-gfx
Record the initial active element we use when building the next ELSP
submission, so that we can compare against it latter to see if there's
no change.
Fixes: 44d0a9c05bc0 ("drm/i915/execlists: Skip redundant resubmission")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gt/intel_lrc.c | 32 +++++++++++------------------
1 file changed, 12 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index ee378a089dd5..1c68b4f4e33d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1678,17 +1678,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_unlock(&old->breadcrumbs.irq_lock);
}
-static struct i915_request *
-last_active(const struct intel_engine_execlists *execlists)
-{
- struct i915_request * const *last = READ_ONCE(execlists->active);
-
- while (*last && i915_request_completed(*last))
- last++;
-
- return *last;
-}
-
#define for_each_waiter(p__, rq__) \
list_for_each_entry_lockless(p__, \
&(rq__)->sched.waiters_list, \
@@ -1827,11 +1816,9 @@ static void record_preemption(struct intel_engine_execlists *execlists)
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
- struct i915_request *rq;
-
- rq = last_active(&engine->execlists);
if (!rq)
return 0;
@@ -1842,13 +1829,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
return READ_ONCE(engine->props.preempt_timeout_ms);
}
-static void set_preempt_timeout(struct intel_engine_cs *engine)
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
if (!intel_engine_has_preempt_reset(engine))
return;
set_timer_ms(&engine->execlists.preempt,
- active_preempt_timeout(engine));
+ active_preempt_timeout(engine, rq));
}
static inline void clear_ports(struct i915_request **ports, int count)
@@ -1861,6 +1849,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request * const *active;
struct i915_request *last;
struct rb_node *rb;
bool submit = false;
@@ -1915,7 +1904,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* i.e. we will retrigger preemption following the ack in case
* of trouble.
*/
- last = last_active(execlists);
+ active = READ_ONCE(execlists->active);
+ while ((last = *active) && i915_request_completed(last))
+ active++;
+
if (last) {
if (need_preempt(engine, last, rb)) {
ENGINE_TRACE(engine,
@@ -2201,7 +2193,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Skip if we ended up with exactly the same set of requests,
* e.g. trying to timeslice a pair of ordered contexts
*/
- if (!memcmp(execlists->active, execlists->pending,
+ if (!memcmp(active, execlists->pending,
(port - execlists->pending + 1) * sizeof(*port))) {
do
execlists_schedule_out(fetch_and_zero(port));
@@ -2212,7 +2204,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
clear_ports(port + 1, last_port - port);
execlists_submit_ports(engine);
- set_preempt_timeout(engine);
+ set_preempt_timeout(engine, *active);
} else {
skip_submit:
ring_set_paused(engine, 0);
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Intel-gfx] [PATCH 3/3] drm/i915/gem: Mark up the racy read of the mmap_singleton
2020-03-11 9:26 [Intel-gfx] [PATCH 1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock Chris Wilson
2020-03-11 9:26 ` [Intel-gfx] [PATCH 2/3] drm/i915/execlists: Track active elements during dequeue Chris Wilson
@ 2020-03-11 9:26 ` Chris Wilson
2020-03-11 12:02 ` Mika Kuoppala
2020-03-11 13:49 ` [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock Patchwork
2020-03-12 5:20 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
3 siblings, 1 reply; 7+ messages in thread
From: Chris Wilson @ 2020-03-11 9:26 UTC (permalink / raw)
To: intel-gfx
[11057.642683] BUG: KCSAN: data-race in i915_gem_mmap [i915] / singleton_release [i915]
[11057.642717]
[11057.642740] write (marked) to 0xffff8881f24471a0 of 8 bytes by task 44668 on cpu 2:
[11057.643162] singleton_release+0x38/0x60 [i915]
[11057.643192] __fput+0x160/0x3c0
[11057.643217] ____fput+0x16/0x20
[11057.643241] task_work_run+0xba/0x100
[11057.643263] exit_to_usermode_loop+0xe4/0xf0
[11057.643286] do_syscall_64+0x27e/0x2c0
[11057.643314] entry_SYSCALL_64_after_hwframe+0x44/0xa9
[11057.643339]
[11057.643359] read to 0xffff8881f24471a0 of 8 bytes by task 44667 on cpu 3:
[11057.643774] i915_gem_mmap+0x295/0x670 [i915]
[11057.643802] mmap_region+0x62b/0xac0
[11057.643825] do_mmap+0x414/0x6b0
[11057.643848] vm_mmap_pgoff+0xa9/0xf0
[11057.643875] ksys_mmap_pgoff+0x1ac/0x2f0
[11057.643900] do_syscall_64+0x6e/0x2c0
[11057.643924] entry_SYSCALL_64_after_hwframe+0x44/0xa9
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index e8cccc131c40..b39c24dae64e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -775,7 +775,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
struct file *file;
rcu_read_lock();
- file = i915->gem.mmap_singleton;
+ file = READ_ONCE(i915->gem.mmap_singleton);
if (file && !get_file_rcu(file))
file = NULL;
rcu_read_unlock();
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [Intel-gfx] [PATCH 2/3] drm/i915/execlists: Track active elements during dequeue
2020-03-11 9:26 ` [Intel-gfx] [PATCH 2/3] drm/i915/execlists: Track active elements during dequeue Chris Wilson
@ 2020-03-11 11:17 ` Mika Kuoppala
0 siblings, 0 replies; 7+ messages in thread
From: Mika Kuoppala @ 2020-03-11 11:17 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
Chris Wilson <chris@chris-wilson.co.uk> writes:
> Record the initial active element we use when building the next ELSP
> submission, so that we can compare against it latter to see if there's
> no change.
>
> Fixes: 44d0a9c05bc0 ("drm/i915/execlists: Skip redundant resubmission")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
> drivers/gpu/drm/i915/gt/intel_lrc.c | 32 +++++++++++------------------
> 1 file changed, 12 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index ee378a089dd5..1c68b4f4e33d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -1678,17 +1678,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
> spin_unlock(&old->breadcrumbs.irq_lock);
> }
>
> -static struct i915_request *
> -last_active(const struct intel_engine_execlists *execlists)
> -{
> - struct i915_request * const *last = READ_ONCE(execlists->active);
> -
> - while (*last && i915_request_completed(*last))
> - last++;
> -
> - return *last;
> -}
> -
> #define for_each_waiter(p__, rq__) \
> list_for_each_entry_lockless(p__, \
> &(rq__)->sched.waiters_list, \
> @@ -1827,11 +1816,9 @@ static void record_preemption(struct intel_engine_execlists *execlists)
> (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
> }
>
> -static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
> +static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
> + const struct i915_request *rq)
> {
> - struct i915_request *rq;
> -
> - rq = last_active(&engine->execlists);
> if (!rq)
> return 0;
>
> @@ -1842,13 +1829,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
> return READ_ONCE(engine->props.preempt_timeout_ms);
> }
>
> -static void set_preempt_timeout(struct intel_engine_cs *engine)
> +static void set_preempt_timeout(struct intel_engine_cs *engine,
> + const struct i915_request *rq)
> {
> if (!intel_engine_has_preempt_reset(engine))
> return;
>
> set_timer_ms(&engine->execlists.preempt,
> - active_preempt_timeout(engine));
> + active_preempt_timeout(engine, rq));
> }
>
> static inline void clear_ports(struct i915_request **ports, int count)
> @@ -1861,6 +1849,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> struct intel_engine_execlists * const execlists = &engine->execlists;
> struct i915_request **port = execlists->pending;
> struct i915_request ** const last_port = port + execlists->port_mask;
> + struct i915_request * const *active;
> struct i915_request *last;
> struct rb_node *rb;
> bool submit = false;
> @@ -1915,7 +1904,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> * i.e. we will retrigger preemption following the ack in case
> * of trouble.
> */
> - last = last_active(execlists);
> + active = READ_ONCE(execlists->active);
> + while ((last = *active) && i915_request_completed(last))
> + active++;
> +
> if (last) {
> if (need_preempt(engine, last, rb)) {
> ENGINE_TRACE(engine,
> @@ -2201,7 +2193,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> * Skip if we ended up with exactly the same set of requests,
> * e.g. trying to timeslice a pair of ordered contexts
> */
> - if (!memcmp(execlists->active, execlists->pending,
> + if (!memcmp(active, execlists->pending,
> (port - execlists->pending + 1) * sizeof(*port))) {
> do
> execlists_schedule_out(fetch_and_zero(port));
> @@ -2212,7 +2204,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> clear_ports(port + 1, last_port - port);
>
> execlists_submit_ports(engine);
> - set_preempt_timeout(engine);
> + set_preempt_timeout(engine, *active);
> } else {
> skip_submit:
> ring_set_paused(engine, 0);
> --
> 2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [Intel-gfx] [PATCH 3/3] drm/i915/gem: Mark up the racy read of the mmap_singleton
2020-03-11 9:26 ` [Intel-gfx] [PATCH 3/3] drm/i915/gem: Mark up the racy read of the mmap_singleton Chris Wilson
@ 2020-03-11 12:02 ` Mika Kuoppala
0 siblings, 0 replies; 7+ messages in thread
From: Mika Kuoppala @ 2020-03-11 12:02 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
Chris Wilson <chris@chris-wilson.co.uk> writes:
> [11057.642683] BUG: KCSAN: data-race in i915_gem_mmap [i915] / singleton_release [i915]
> [11057.642717]
> [11057.642740] write (marked) to 0xffff8881f24471a0 of 8 bytes by task 44668 on cpu 2:
> [11057.643162] singleton_release+0x38/0x60 [i915]
> [11057.643192] __fput+0x160/0x3c0
> [11057.643217] ____fput+0x16/0x20
> [11057.643241] task_work_run+0xba/0x100
> [11057.643263] exit_to_usermode_loop+0xe4/0xf0
> [11057.643286] do_syscall_64+0x27e/0x2c0
> [11057.643314] entry_SYSCALL_64_after_hwframe+0x44/0xa9
> [11057.643339]
> [11057.643359] read to 0xffff8881f24471a0 of 8 bytes by task 44667 on cpu 3:
> [11057.643774] i915_gem_mmap+0x295/0x670 [i915]
> [11057.643802] mmap_region+0x62b/0xac0
> [11057.643825] do_mmap+0x414/0x6b0
> [11057.643848] vm_mmap_pgoff+0xa9/0xf0
> [11057.643875] ksys_mmap_pgoff+0x1ac/0x2f0
> [11057.643900] do_syscall_64+0x6e/0x2c0
> [11057.643924] entry_SYSCALL_64_after_hwframe+0x44/0xa9
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> index e8cccc131c40..b39c24dae64e 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> @@ -775,7 +775,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
> struct file *file;
>
> rcu_read_lock();
> - file = i915->gem.mmap_singleton;
> + file = READ_ONCE(i915->gem.mmap_singleton);
> if (file && !get_file_rcu(file))
> file = NULL;
> rcu_read_unlock();
> --
> 2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 7+ messages in thread
* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock
2020-03-11 9:26 [Intel-gfx] [PATCH 1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock Chris Wilson
2020-03-11 9:26 ` [Intel-gfx] [PATCH 2/3] drm/i915/execlists: Track active elements during dequeue Chris Wilson
2020-03-11 9:26 ` [Intel-gfx] [PATCH 3/3] drm/i915/gem: Mark up the racy read of the mmap_singleton Chris Wilson
@ 2020-03-11 13:49 ` Patchwork
2020-03-12 5:20 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
3 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2020-03-11 13:49 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock
URL : https://patchwork.freedesktop.org/series/74574/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_8117 -> Patchwork_16924
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/index.html
Known issues
------------
Here are the changes found in Patchwork_16924 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@i915_selftest@live@dmabuf:
- fi-ivb-3770: [PASS][1] -> [DMESG-WARN][2] ([i915#1405])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/fi-ivb-3770/igt@i915_selftest@live@dmabuf.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/fi-ivb-3770/igt@i915_selftest@live@dmabuf.html
* igt@i915_selftest@live@hangcheck:
- fi-ivb-3770: [PASS][3] -> [INCOMPLETE][4] ([i915#1405])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/fi-ivb-3770/igt@i915_selftest@live@hangcheck.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/fi-ivb-3770/igt@i915_selftest@live@hangcheck.html
#### Possible fixes ####
* igt@i915_selftest@live@execlists:
- fi-apl-guc: [INCOMPLETE][5] ([fdo#103927]) -> [PASS][6]
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/fi-apl-guc/igt@i915_selftest@live@execlists.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/fi-apl-guc/igt@i915_selftest@live@execlists.html
* igt@i915_selftest@live@gem_contexts:
- fi-cml-s: [DMESG-FAIL][7] ([i915#877]) -> [PASS][8]
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/fi-cml-s/igt@i915_selftest@live@gem_contexts.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/fi-cml-s/igt@i915_selftest@live@gem_contexts.html
* igt@i915_selftest@live@hangcheck:
- fi-apl-guc: [DMESG-WARN][9] -> [PASS][10]
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/fi-apl-guc/igt@i915_selftest@live@hangcheck.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/fi-apl-guc/igt@i915_selftest@live@hangcheck.html
* igt@kms_chamelium@hdmi-hpd-fast:
- fi-kbl-7500u: [FAIL][11] ([fdo#111407]) -> [PASS][12]
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
[fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
[fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
[i915#1405]: https://gitlab.freedesktop.org/drm/intel/issues/1405
[i915#877]: https://gitlab.freedesktop.org/drm/intel/issues/877
Participating hosts (49 -> 41)
------------------------------
Additional (2): fi-kbl-soraka fi-kbl-7560u
Missing (10): fi-ilk-m540 fi-hsw-4200u fi-skl-6770hq fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-kbl-x1275 fi-gdg-551 fi-byt-clapper fi-kbl-r
Build changes
-------------
* CI: CI-20190529 -> None
* Linux: CI_DRM_8117 -> Patchwork_16924
CI-20190529: 20190529
CI_DRM_8117: 39a97a79462bf47caf47d8e56e1027dcedb92bb9 @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_5505: 8973d811f3fdfb4ace4aabab2095ce0309881648 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
Patchwork_16924: 08f76cf4130125fc2367ab64007ed0318e3f4fa6 @ git://anongit.freedesktop.org/gfx-ci/linux
== Linux commits ==
08f76cf41301 drm/i915/gem: Mark up the racy read of the mmap_singleton
98c0234b5f8b drm/i915/execlists: Track active elements during dequeue
11373aa4af2b drm/i915/gt: Pull checking rps->pm_events under the irq_lock
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 7+ messages in thread
* [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock
2020-03-11 9:26 [Intel-gfx] [PATCH 1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock Chris Wilson
` (2 preceding siblings ...)
2020-03-11 13:49 ` [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock Patchwork
@ 2020-03-12 5:20 ` Patchwork
3 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2020-03-12 5:20 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock
URL : https://patchwork.freedesktop.org/series/74574/
State : failure
== Summary ==
CI Bug Log - changes from CI_DRM_8117_full -> Patchwork_16924_full
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with Patchwork_16924_full absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in Patchwork_16924_full, please notify your bug team to allow them
to document this new failure mode, which will reduce false positives in CI.
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in Patchwork_16924_full:
### IGT changes ###
#### Possible regressions ####
* igt@kms_cursor_crc@pipe-a-cursor-suspend:
- shard-kbl: [PASS][1] -> [INCOMPLETE][2]
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-kbl7/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-kbl3/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
Known issues
------------
Here are the changes found in Patchwork_16924_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gem_busy@busy-vcs1:
- shard-iclb: [PASS][3] -> [SKIP][4] ([fdo#112080]) +15 similar issues
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb4/igt@gem_busy@busy-vcs1.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb6/igt@gem_busy@busy-vcs1.html
* igt@gem_exec_balancer@smoke:
- shard-iclb: [PASS][5] -> [SKIP][6] ([fdo#110854])
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb4/igt@gem_exec_balancer@smoke.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb6/igt@gem_exec_balancer@smoke.html
* igt@gem_exec_schedule@preempt-other-chain-bsd:
- shard-iclb: [PASS][7] -> [SKIP][8] ([fdo#112146]) +2 similar issues
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb8/igt@gem_exec_schedule@preempt-other-chain-bsd.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb2/igt@gem_exec_schedule@preempt-other-chain-bsd.html
* igt@gem_exec_schedule@promotion-bsd1:
- shard-iclb: [PASS][9] -> [SKIP][10] ([fdo#109276]) +19 similar issues
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb4/igt@gem_exec_schedule@promotion-bsd1.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb6/igt@gem_exec_schedule@promotion-bsd1.html
* igt@gem_linear_blits@normal:
- shard-apl: [PASS][11] -> [TIMEOUT][12] ([i915#1322])
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-apl1/igt@gem_linear_blits@normal.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-apl3/igt@gem_linear_blits@normal.html
* igt@gen9_exec_parse@allowed-single:
- shard-skl: [PASS][13] -> [INCOMPLETE][14] ([i915#716])
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl2/igt@gen9_exec_parse@allowed-single.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl10/igt@gen9_exec_parse@allowed-single.html
* igt@i915_pm_rps@waitboost:
- shard-tglb: [PASS][15] -> [FAIL][16] ([i915#413])
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-tglb5/igt@i915_pm_rps@waitboost.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-tglb2/igt@i915_pm_rps@waitboost.html
* igt@i915_suspend@fence-restore-tiled2untiled:
- shard-apl: [PASS][17] -> [DMESG-WARN][18] ([i915#180]) +6 similar issues
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-apl7/igt@i915_suspend@fence-restore-tiled2untiled.html
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-apl2/igt@i915_suspend@fence-restore-tiled2untiled.html
- shard-skl: [PASS][19] -> [INCOMPLETE][20] ([i915#69])
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl3/igt@i915_suspend@fence-restore-tiled2untiled.html
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl10/igt@i915_suspend@fence-restore-tiled2untiled.html
* igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-ytiled:
- shard-skl: [PASS][21] -> [FAIL][22] ([i915#52] / [i915#54])
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl1/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-ytiled.html
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl1/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-ytiled.html
* igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-gtt:
- shard-skl: [PASS][23] -> [FAIL][24] ([i915#49])
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl1/igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-gtt.html
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl1/igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-gtt.html
* igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
- shard-kbl: [PASS][25] -> [DMESG-WARN][26] ([i915#180]) +2 similar issues
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-kbl7/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-kbl2/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
* igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min:
- shard-skl: [PASS][27] -> [FAIL][28] ([fdo#108145])
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl4/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl3/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html
* igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
- shard-skl: [PASS][29] -> [FAIL][30] ([fdo#108145] / [i915#265])
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl1/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl3/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
* igt@kms_psr@psr2_basic:
- shard-iclb: [PASS][31] -> [SKIP][32] ([fdo#109441])
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb2/igt@kms_psr@psr2_basic.html
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb1/igt@kms_psr@psr2_basic.html
#### Possible fixes ####
* igt@gem_ctx_persistence@close-replace-race:
- shard-iclb: [INCOMPLETE][33] ([i915#1402]) -> [PASS][34]
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb6/igt@gem_ctx_persistence@close-replace-race.html
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb5/igt@gem_ctx_persistence@close-replace-race.html
* igt@gem_ctx_shared@exec-single-timeline-bsd:
- shard-iclb: [SKIP][35] ([fdo#110841]) -> [PASS][36]
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb1/igt@gem_ctx_shared@exec-single-timeline-bsd.html
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb3/igt@gem_ctx_shared@exec-single-timeline-bsd.html
* igt@gem_exec_schedule@implicit-read-write-bsd1:
- shard-iclb: [SKIP][37] ([fdo#109276] / [i915#677]) -> [PASS][38] +1 similar issue
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb7/igt@gem_exec_schedule@implicit-read-write-bsd1.html
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb4/igt@gem_exec_schedule@implicit-read-write-bsd1.html
* igt@gem_exec_schedule@pi-distinct-iova-bsd:
- shard-iclb: [SKIP][39] ([i915#677]) -> [PASS][40] +4 similar issues
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb1/igt@gem_exec_schedule@pi-distinct-iova-bsd.html
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb3/igt@gem_exec_schedule@pi-distinct-iova-bsd.html
* igt@gem_exec_schedule@preemptive-hang-bsd:
- shard-iclb: [SKIP][41] ([fdo#112146]) -> [PASS][42] +7 similar issues
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb1/igt@gem_exec_schedule@preemptive-hang-bsd.html
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb3/igt@gem_exec_schedule@preemptive-hang-bsd.html
* igt@gem_ppgtt@flink-and-close-vma-leak:
- shard-glk: [FAIL][43] ([i915#644]) -> [PASS][44]
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-glk6/igt@gem_ppgtt@flink-and-close-vma-leak.html
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-glk1/igt@gem_ppgtt@flink-and-close-vma-leak.html
- shard-iclb: [FAIL][45] ([i915#644]) -> [PASS][46]
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb1/igt@gem_ppgtt@flink-and-close-vma-leak.html
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb7/igt@gem_ppgtt@flink-and-close-vma-leak.html
* igt@i915_selftest@live@execlists:
- shard-kbl: [INCOMPLETE][47] ([fdo#112259]) -> [PASS][48]
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-kbl4/igt@i915_selftest@live@execlists.html
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-kbl7/igt@i915_selftest@live@execlists.html
* igt@kms_dp_dsc@basic-dsc-enable-edp:
- shard-iclb: [SKIP][49] ([fdo#109349]) -> [PASS][50]
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb8/igt@kms_dp_dsc@basic-dsc-enable-edp.html
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html
* igt@kms_flip@flip-vs-suspend-interruptible:
- shard-skl: [INCOMPLETE][51] ([i915#221]) -> [PASS][52]
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl7/igt@kms_flip@flip-vs-suspend-interruptible.html
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl6/igt@kms_flip@flip-vs-suspend-interruptible.html
- shard-apl: [DMESG-WARN][53] ([i915#180]) -> [PASS][54]
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-apl6/igt@kms_flip@flip-vs-suspend-interruptible.html
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-apl4/igt@kms_flip@flip-vs-suspend-interruptible.html
* igt@kms_flip@plain-flip-ts-check-interruptible:
- shard-skl: [FAIL][55] ([i915#34]) -> [PASS][56]
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl2/igt@kms_flip@plain-flip-ts-check-interruptible.html
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl7/igt@kms_flip@plain-flip-ts-check-interruptible.html
* igt@kms_hdr@bpc-switch-dpms:
- shard-skl: [FAIL][57] ([i915#1188]) -> [PASS][58] +1 similar issue
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl5/igt@kms_hdr@bpc-switch-dpms.html
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl1/igt@kms_hdr@bpc-switch-dpms.html
* igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes:
- shard-kbl: [DMESG-WARN][59] ([i915#180]) -> [PASS][60] +1 similar issue
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-kbl2/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-kbl4/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
- shard-snb: [DMESG-WARN][61] ([i915#42]) -> [PASS][62]
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-snb6/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-snb5/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
* igt@kms_plane_alpha_blend@pipe-a-coverage-7efc:
- shard-skl: [FAIL][63] ([fdo#108145]) -> [PASS][64]
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl3/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl4/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
* igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
- shard-skl: [FAIL][65] ([fdo#108145] / [i915#265]) -> [PASS][66]
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl7/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl2/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
* igt@kms_psr@psr2_primary_render:
- shard-iclb: [SKIP][67] ([fdo#109441]) -> [PASS][68]
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb8/igt@kms_psr@psr2_primary_render.html
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb2/igt@kms_psr@psr2_primary_render.html
* igt@kms_setmode@basic:
- shard-hsw: [FAIL][69] ([i915#31]) -> [PASS][70]
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-hsw6/igt@kms_setmode@basic.html
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-hsw4/igt@kms_setmode@basic.html
* igt@perf_pmu@busy-accuracy-2-vcs1:
- shard-iclb: [SKIP][71] ([fdo#112080]) -> [PASS][72] +11 similar issues
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb6/igt@perf_pmu@busy-accuracy-2-vcs1.html
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb1/igt@perf_pmu@busy-accuracy-2-vcs1.html
* igt@prime_vgem@fence-wait-bsd2:
- shard-iclb: [SKIP][73] ([fdo#109276]) -> [PASS][74] +14 similar issues
[73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-iclb6/igt@prime_vgem@fence-wait-bsd2.html
[74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-iclb1/igt@prime_vgem@fence-wait-bsd2.html
#### Warnings ####
* igt@gem_ctx_persistence@close-replace-race:
- shard-kbl: [INCOMPLETE][75] ([i915#1402]) -> [TIMEOUT][76] ([i915#1340])
[75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-kbl6/igt@gem_ctx_persistence@close-replace-race.html
[76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-kbl2/igt@gem_ctx_persistence@close-replace-race.html
* igt@i915_pm_rpm@debugfs-forcewake-user:
- shard-snb: [SKIP][77] ([fdo#109271]) -> [INCOMPLETE][78] ([i915#82])
[77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-snb6/igt@i915_pm_rpm@debugfs-forcewake-user.html
[78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-snb2/igt@i915_pm_rpm@debugfs-forcewake-user.html
* igt@kms_cursor_crc@pipe-b-cursor-suspend:
- shard-kbl: [INCOMPLETE][79] -> [DMESG-WARN][80] ([i915#180])
[79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-kbl3/igt@kms_cursor_crc@pipe-b-cursor-suspend.html
[80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-kbl1/igt@kms_cursor_crc@pipe-b-cursor-suspend.html
* igt@runner@aborted:
- shard-kbl: ([FAIL][81], [FAIL][82], [FAIL][83]) ([i915#1389] / [i915#1402] / [i915#92]) -> [FAIL][84] ([i915#92])
[81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-kbl6/igt@runner@aborted.html
[82]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-kbl4/igt@runner@aborted.html
[83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-kbl6/igt@runner@aborted.html
[84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-kbl3/igt@runner@aborted.html
- shard-skl: [FAIL][85] ([i915#1402] / [i915#69]) -> [FAIL][86] ([i915#69])
[85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8117/shard-skl9/igt@runner@aborted.html
[86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/shard-skl10/igt@runner@aborted.html
[fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
[fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
[fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
[fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
[fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
[fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
[fdo#110854]: https://bugs.freedesktop.org/show_bug.cgi?id=110854
[fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080
[fdo#112146]: https://bugs.freedesktop.org/show_bug.cgi?id=112146
[fdo#112259]: https://bugs.freedesktop.org/show_bug.cgi?id=112259
[i915#1188]: https://gitlab.freedesktop.org/drm/intel/issues/1188
[i915#1322]: https://gitlab.freedesktop.org/drm/intel/issues/1322
[i915#1340]: https://gitlab.freedesktop.org/drm/intel/issues/1340
[i915#1389]: https://gitlab.freedesktop.org/drm/intel/issues/1389
[i915#1402]: https://gitlab.freedesktop.org/drm/intel/issues/1402
[i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
[i915#221]: https://gitlab.freedesktop.org/drm/intel/issues/221
[i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
[i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
[i915#34]: https://gitlab.freedesktop.org/drm/intel/issues/34
[i915#413]: https://gitlab.freedesktop.org/drm/intel/issues/413
[i915#42]: https://gitlab.freedesktop.org/drm/intel/issues/42
[i915#49]: https://gitlab.freedesktop.org/drm/intel/issues/49
[i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
[i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
[i915#644]: https://gitlab.freedesktop.org/drm/intel/issues/644
[i915#677]: https://gitlab.freedesktop.org/drm/intel/issues/677
[i915#69]: https://gitlab.freedesktop.org/drm/intel/issues/69
[i915#716]: https://gitlab.freedesktop.org/drm/intel/issues/716
[i915#82]: https://gitlab.freedesktop.org/drm/intel/issues/82
[i915#92]: https://gitlab.freedesktop.org/drm/intel/issues/92
Participating hosts (10 -> 10)
------------------------------
No changes in participating hosts
Build changes
-------------
* CI: CI-20190529 -> None
* Linux: CI_DRM_8117 -> Patchwork_16924
CI-20190529: 20190529
CI_DRM_8117: 39a97a79462bf47caf47d8e56e1027dcedb92bb9 @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_5505: 8973d811f3fdfb4ace4aabab2095ce0309881648 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
Patchwork_16924: 08f76cf4130125fc2367ab64007ed0318e3f4fa6 @ git://anongit.freedesktop.org/gfx-ci/linux
piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16924/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2020-03-12 5:20 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-11 9:26 [Intel-gfx] [PATCH 1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock Chris Wilson
2020-03-11 9:26 ` [Intel-gfx] [PATCH 2/3] drm/i915/execlists: Track active elements during dequeue Chris Wilson
2020-03-11 11:17 ` Mika Kuoppala
2020-03-11 9:26 ` [Intel-gfx] [PATCH 3/3] drm/i915/gem: Mark up the racy read of the mmap_singleton Chris Wilson
2020-03-11 12:02 ` Mika Kuoppala
2020-03-11 13:49 ` [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/3] drm/i915/gt: Pull checking rps->pm_events under the irq_lock Patchwork
2020-03-12 5:20 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).