All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 13:41 ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 13:41 UTC (permalink / raw)
  To: intel-gfx

Since we use barriers, we need only explicitly flush those barriers to
ensure tha we can reclaim the available ggtt for ourselves. The barrier
flush was implicit inside the intel_gt_wait_for_idle() -- except because
we use i915_gem_evict from inside an active timeline during execbuf, we
could easily end up waiting upon ourselves.

Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
Testcase: igt/gem_exec_reloc/basic-range
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_evict.c | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 7e62c310290f..78ca56c06a3c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -28,7 +28,7 @@
 
 #include <drm/i915_drm.h>
 
-#include "gem/i915_gem_context.h"
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_gt_requests.h"
 
 #include "i915_drv.h"
@@ -38,8 +38,11 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
 	bool fail_if_busy:1;
 } igt_evict_ctl;)
 
-static int ggtt_flush(struct intel_gt *gt)
+static void ggtt_flush(struct intel_gt *gt)
 {
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
 	/*
 	 * Not everything in the GGTT is tracked via vma (otherwise we
 	 * could evict as required with minimal stalling) so we are forced
@@ -47,7 +50,11 @@ static int ggtt_flush(struct intel_gt *gt)
 	 * the hopes that we can then remove contexts and the like only
 	 * bound by their active reference.
 	 */
-	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+	intel_gt_retire_requests(gt);
+	for_each_engine(engine, gt, id)
+		intel_engine_flush_barriers(engine);
+
+	cond_resched();
 }
 
 static bool
@@ -197,11 +204,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
 		return -EBUSY;
 
-	ret = ggtt_flush(vm->gt);
-	if (ret)
-		return ret;
-
-	cond_resched();
+	ggtt_flush(vm->gt);
 
 	flags |= PIN_NONBLOCK;
 	goto search_again;
@@ -371,11 +374,8 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 	 * pin themselves inside the global GTT and performing the
 	 * switch otherwise is ineffective.
 	 */
-	if (i915_is_ggtt(vm)) {
-		ret = ggtt_flush(vm->gt);
-		if (ret)
-			return ret;
-	}
+	if (i915_is_ggtt(vm))
+		ggtt_flush(vm->gt);
 
 	INIT_LIST_HEAD(&eviction_list);
 	list_for_each_entry(vma, &vm->bound_list, vm_link) {
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [Intel-gfx] [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 13:41 ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 13:41 UTC (permalink / raw)
  To: intel-gfx

Since we use barriers, we need only explicitly flush those barriers to
ensure tha we can reclaim the available ggtt for ourselves. The barrier
flush was implicit inside the intel_gt_wait_for_idle() -- except because
we use i915_gem_evict from inside an active timeline during execbuf, we
could easily end up waiting upon ourselves.

Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
Testcase: igt/gem_exec_reloc/basic-range
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_evict.c | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 7e62c310290f..78ca56c06a3c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -28,7 +28,7 @@
 
 #include <drm/i915_drm.h>
 
-#include "gem/i915_gem_context.h"
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_gt_requests.h"
 
 #include "i915_drv.h"
@@ -38,8 +38,11 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
 	bool fail_if_busy:1;
 } igt_evict_ctl;)
 
-static int ggtt_flush(struct intel_gt *gt)
+static void ggtt_flush(struct intel_gt *gt)
 {
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
 	/*
 	 * Not everything in the GGTT is tracked via vma (otherwise we
 	 * could evict as required with minimal stalling) so we are forced
@@ -47,7 +50,11 @@ static int ggtt_flush(struct intel_gt *gt)
 	 * the hopes that we can then remove contexts and the like only
 	 * bound by their active reference.
 	 */
-	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+	intel_gt_retire_requests(gt);
+	for_each_engine(engine, gt, id)
+		intel_engine_flush_barriers(engine);
+
+	cond_resched();
 }
 
 static bool
@@ -197,11 +204,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 	if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
 		return -EBUSY;
 
-	ret = ggtt_flush(vm->gt);
-	if (ret)
-		return ret;
-
-	cond_resched();
+	ggtt_flush(vm->gt);
 
 	flags |= PIN_NONBLOCK;
 	goto search_again;
@@ -371,11 +374,8 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
 	 * pin themselves inside the global GTT and performing the
 	 * switch otherwise is ineffective.
 	 */
-	if (i915_is_ggtt(vm)) {
-		ret = ggtt_flush(vm->gt);
-		if (ret)
-			return ret;
-	}
+	if (i915_is_ggtt(vm))
+		ggtt_flush(vm->gt);
 
 	INIT_LIST_HEAD(&eviction_list);
 	list_for_each_entry(vma, &vm->bound_list, vm_link) {
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 15:58   ` Tvrtko Ursulin
  0 siblings, 0 replies; 24+ messages in thread
From: Tvrtko Ursulin @ 2019-11-20 15:58 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 20/11/2019 13:41, Chris Wilson wrote:
> Since we use barriers, we need only explicitly flush those barriers to
> ensure tha we can reclaim the available ggtt for ourselves. The barrier
> flush was implicit inside the intel_gt_wait_for_idle() -- except because
> we use i915_gem_evict from inside an active timeline during execbuf, we
> could easily end up waiting upon ourselves.
> 
> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Testcase: igt/gem_exec_reloc/basic-range

Bugzilla: ?

This test gets permanently stuck on some platforms?

Regards,

Tvrtko

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_gem_evict.c | 26 +++++++++++++-------------
>   1 file changed, 13 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 7e62c310290f..78ca56c06a3c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -28,7 +28,7 @@
>   
>   #include <drm/i915_drm.h>
>   
> -#include "gem/i915_gem_context.h"
> +#include "gt/intel_engine_heartbeat.h"
>   #include "gt/intel_gt_requests.h"
>   
>   #include "i915_drv.h"
> @@ -38,8 +38,11 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
>   	bool fail_if_busy:1;
>   } igt_evict_ctl;)
>   
> -static int ggtt_flush(struct intel_gt *gt)
> +static void ggtt_flush(struct intel_gt *gt)
>   {
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +
>   	/*
>   	 * Not everything in the GGTT is tracked via vma (otherwise we
>   	 * could evict as required with minimal stalling) so we are forced
> @@ -47,7 +50,11 @@ static int ggtt_flush(struct intel_gt *gt)
>   	 * the hopes that we can then remove contexts and the like only
>   	 * bound by their active reference.
>   	 */
> -	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
> +	intel_gt_retire_requests(gt);
> +	for_each_engine(engine, gt, id)
> +		intel_engine_flush_barriers(engine);
> +
> +	cond_resched();
>   }
>   
>   static bool
> @@ -197,11 +204,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
>   	if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
>   		return -EBUSY;
>   
> -	ret = ggtt_flush(vm->gt);
> -	if (ret)
> -		return ret;
> -
> -	cond_resched();
> +	ggtt_flush(vm->gt);
>   
>   	flags |= PIN_NONBLOCK;
>   	goto search_again;
> @@ -371,11 +374,8 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
>   	 * pin themselves inside the global GTT and performing the
>   	 * switch otherwise is ineffective.
>   	 */
> -	if (i915_is_ggtt(vm)) {
> -		ret = ggtt_flush(vm->gt);
> -		if (ret)
> -			return ret;
> -	}
> +	if (i915_is_ggtt(vm))
> +		ggtt_flush(vm->gt);
>   
>   	INIT_LIST_HEAD(&eviction_list);
>   	list_for_each_entry(vma, &vm->bound_list, vm_link) {
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 15:58   ` Tvrtko Ursulin
  0 siblings, 0 replies; 24+ messages in thread
From: Tvrtko Ursulin @ 2019-11-20 15:58 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 20/11/2019 13:41, Chris Wilson wrote:
> Since we use barriers, we need only explicitly flush those barriers to
> ensure tha we can reclaim the available ggtt for ourselves. The barrier
> flush was implicit inside the intel_gt_wait_for_idle() -- except because
> we use i915_gem_evict from inside an active timeline during execbuf, we
> could easily end up waiting upon ourselves.
> 
> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Testcase: igt/gem_exec_reloc/basic-range

Bugzilla: ?

This test gets permanently stuck on some platforms?

Regards,

Tvrtko

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_gem_evict.c | 26 +++++++++++++-------------
>   1 file changed, 13 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 7e62c310290f..78ca56c06a3c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -28,7 +28,7 @@
>   
>   #include <drm/i915_drm.h>
>   
> -#include "gem/i915_gem_context.h"
> +#include "gt/intel_engine_heartbeat.h"
>   #include "gt/intel_gt_requests.h"
>   
>   #include "i915_drv.h"
> @@ -38,8 +38,11 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
>   	bool fail_if_busy:1;
>   } igt_evict_ctl;)
>   
> -static int ggtt_flush(struct intel_gt *gt)
> +static void ggtt_flush(struct intel_gt *gt)
>   {
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +
>   	/*
>   	 * Not everything in the GGTT is tracked via vma (otherwise we
>   	 * could evict as required with minimal stalling) so we are forced
> @@ -47,7 +50,11 @@ static int ggtt_flush(struct intel_gt *gt)
>   	 * the hopes that we can then remove contexts and the like only
>   	 * bound by their active reference.
>   	 */
> -	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
> +	intel_gt_retire_requests(gt);
> +	for_each_engine(engine, gt, id)
> +		intel_engine_flush_barriers(engine);
> +
> +	cond_resched();
>   }
>   
>   static bool
> @@ -197,11 +204,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
>   	if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
>   		return -EBUSY;
>   
> -	ret = ggtt_flush(vm->gt);
> -	if (ret)
> -		return ret;
> -
> -	cond_resched();
> +	ggtt_flush(vm->gt);
>   
>   	flags |= PIN_NONBLOCK;
>   	goto search_again;
> @@ -371,11 +374,8 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
>   	 * pin themselves inside the global GTT and performing the
>   	 * switch otherwise is ineffective.
>   	 */
> -	if (i915_is_ggtt(vm)) {
> -		ret = ggtt_flush(vm->gt);
> -		if (ret)
> -			return ret;
> -	}
> +	if (i915_is_ggtt(vm))
> +		ggtt_flush(vm->gt);
>   
>   	INIT_LIST_HEAD(&eviction_list);
>   	list_for_each_entry(vma, &vm->bound_list, vm_link) {
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 16:02     ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 16:02 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-11-20 15:58:49)
> 
> On 20/11/2019 13:41, Chris Wilson wrote:
> > Since we use barriers, we need only explicitly flush those barriers to
> > ensure tha we can reclaim the available ggtt for ourselves. The barrier
> > flush was implicit inside the intel_gt_wait_for_idle() -- except because
> > we use i915_gem_evict from inside an active timeline during execbuf, we
> > could easily end up waiting upon ourselves.
> > 
> > Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> > Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> > Testcase: igt/gem_exec_reloc/basic-range
> 
> Bugzilla: ?

It's been in CI since before the w/e (the test itself is much, much
older), I guess it hasn't been vetted yet as no bug has been filed.
 
> This test gets permanently stuck on some platforms?

All !full-ppgtt platforms.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 16:02     ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 16:02 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-11-20 15:58:49)
> 
> On 20/11/2019 13:41, Chris Wilson wrote:
> > Since we use barriers, we need only explicitly flush those barriers to
> > ensure tha we can reclaim the available ggtt for ourselves. The barrier
> > flush was implicit inside the intel_gt_wait_for_idle() -- except because
> > we use i915_gem_evict from inside an active timeline during execbuf, we
> > could easily end up waiting upon ourselves.
> > 
> > Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> > Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> > Testcase: igt/gem_exec_reloc/basic-range
> 
> Bugzilla: ?

It's been in CI since before the w/e (the test itself is much, much
older), I guess it hasn't been vetted yet as no bug has been filed.
 
> This test gets permanently stuck on some platforms?

All !full-ppgtt platforms.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 16:14       ` Tvrtko Ursulin
  0 siblings, 0 replies; 24+ messages in thread
From: Tvrtko Ursulin @ 2019-11-20 16:14 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 20/11/2019 16:02, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-11-20 15:58:49)
>>
>> On 20/11/2019 13:41, Chris Wilson wrote:
>>> Since we use barriers, we need only explicitly flush those barriers to
>>> ensure tha we can reclaim the available ggtt for ourselves. The barrier
>>> flush was implicit inside the intel_gt_wait_for_idle() -- except because
>>> we use i915_gem_evict from inside an active timeline during execbuf, we
>>> could easily end up waiting upon ourselves.
>>>
>>> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
>>> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
>>> Testcase: igt/gem_exec_reloc/basic-range
>>
>> Bugzilla: ?
> 
> It's been in CI since before the w/e (the test itself is much, much
> older), I guess it hasn't been vetted yet as no bug has been filed.
>   
>> This test gets permanently stuck on some platforms?
> 
> All !full-ppgtt platforms.

How it will cope with actual ggtt pressure? Wait for presumably there 
for a reason and now it will only retire what's already done and send an 
idle pulse down the engines.

Regards,

Tvrtko


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 16:14       ` Tvrtko Ursulin
  0 siblings, 0 replies; 24+ messages in thread
From: Tvrtko Ursulin @ 2019-11-20 16:14 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 20/11/2019 16:02, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-11-20 15:58:49)
>>
>> On 20/11/2019 13:41, Chris Wilson wrote:
>>> Since we use barriers, we need only explicitly flush those barriers to
>>> ensure tha we can reclaim the available ggtt for ourselves. The barrier
>>> flush was implicit inside the intel_gt_wait_for_idle() -- except because
>>> we use i915_gem_evict from inside an active timeline during execbuf, we
>>> could easily end up waiting upon ourselves.
>>>
>>> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
>>> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
>>> Testcase: igt/gem_exec_reloc/basic-range
>>
>> Bugzilla: ?
> 
> It's been in CI since before the w/e (the test itself is much, much
> older), I guess it hasn't been vetted yet as no bug has been filed.
>   
>> This test gets permanently stuck on some platforms?
> 
> All !full-ppgtt platforms.

How it will cope with actual ggtt pressure? Wait for presumably there 
for a reason and now it will only retire what's already done and send an 
idle pulse down the engines.

Regards,

Tvrtko


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 16:28         ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 16:28 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-11-20 16:14:40)
> 
> On 20/11/2019 16:02, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2019-11-20 15:58:49)
> >>
> >> On 20/11/2019 13:41, Chris Wilson wrote:
> >>> Since we use barriers, we need only explicitly flush those barriers to
> >>> ensure tha we can reclaim the available ggtt for ourselves. The barrier
> >>> flush was implicit inside the intel_gt_wait_for_idle() -- except because
> >>> we use i915_gem_evict from inside an active timeline during execbuf, we
> >>> could easily end up waiting upon ourselves.
> >>>
> >>> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> >>> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> >>> Testcase: igt/gem_exec_reloc/basic-range
> >>
> >> Bugzilla: ?
> > 
> > It's been in CI since before the w/e (the test itself is much, much
> > older), I guess it hasn't been vetted yet as no bug has been filed.
> >   
> >> This test gets permanently stuck on some platforms?
> > 
> > All !full-ppgtt platforms.
> 
> How it will cope with actual ggtt pressure? Wait for presumably there 
> for a reason and now it will only retire what's already done and send an 
> idle pulse down the engines.

Same it did previously... I've vacillated between using a flush and a
wait. Originally, it was meant to just be a flush as we would wait on
individual objects. But now context pinning requires waiting on
barriers. Hmm, actually that would be a simple way of obtaining the
previous behaviour when required.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 16:28         ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 16:28 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-11-20 16:14:40)
> 
> On 20/11/2019 16:02, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2019-11-20 15:58:49)
> >>
> >> On 20/11/2019 13:41, Chris Wilson wrote:
> >>> Since we use barriers, we need only explicitly flush those barriers to
> >>> ensure tha we can reclaim the available ggtt for ourselves. The barrier
> >>> flush was implicit inside the intel_gt_wait_for_idle() -- except because
> >>> we use i915_gem_evict from inside an active timeline during execbuf, we
> >>> could easily end up waiting upon ourselves.
> >>>
> >>> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> >>> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> >>> Testcase: igt/gem_exec_reloc/basic-range
> >>
> >> Bugzilla: ?
> > 
> > It's been in CI since before the w/e (the test itself is much, much
> > older), I guess it hasn't been vetted yet as no bug has been filed.
> >   
> >> This test gets permanently stuck on some platforms?
> > 
> > All !full-ppgtt platforms.
> 
> How it will cope with actual ggtt pressure? Wait for presumably there 
> for a reason and now it will only retire what's already done and send an 
> idle pulse down the engines.

Same it did previously... I've vacillated between using a flush and a
wait. Originally, it was meant to just be a flush as we would wait on
individual objects. But now context pinning requires waiting on
barriers. Hmm, actually that would be a simple way of obtaining the
previous behaviour when required.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH] drm/i915/gem: Reduce ggtt_flush() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 16:42   ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 16:42 UTC (permalink / raw)
  To: intel-gfx

Since we use barriers, we need only explicitly flush those barriers to
ensure tha we can reclaim the available ggtt for ourselves. The barrier
flush was implicit inside the intel_gt_wait_for_idle() -- except because
we use i915_gem_evict from inside an active timeline during execbuf, we
could easily end up waiting upon ourselves.

v2: Wait on the barriers to ensure that any context unpinning that can
be done, will be done.

Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
Testcase: igt/gem_exec_reloc/basic-range
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 .../gpu/drm/i915/gt/intel_engine_heartbeat.c  |  8 +++-
 .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  4 +-
 drivers/gpu/drm/i915/gt/selftest_context.c    | 38 ++++---------------
 .../drm/i915/gt/selftest_engine_heartbeat.c   |  7 +++-
 drivers/gpu/drm/i915/i915_gem_evict.c         | 26 ++++++++++++-
 5 files changed, 48 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index c91fd4e4af29..0173720af05a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -212,10 +212,14 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 	return err;
 }
 
-int intel_engine_flush_barriers(struct intel_engine_cs *engine)
+int intel_engine_flush_barriers(struct intel_engine_cs *engine,
+				struct i915_request **out)
 {
 	struct i915_request *rq;
 
+	if (out)
+		*out = NULL;
+
 	if (llist_empty(&engine->barrier_tasks))
 		return 0;
 
@@ -224,6 +228,8 @@ int intel_engine_flush_barriers(struct intel_engine_cs *engine)
 		return PTR_ERR(rq);
 
 	idle_pulse(engine, rq);
+	if (out)
+		*out = i915_request_get(rq);
 	i915_request_add(rq);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
index a7b8c0f9e005..17e973d86f5c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -7,6 +7,7 @@
 #ifndef INTEL_ENGINE_HEARTBEAT_H
 #define INTEL_ENGINE_HEARTBEAT_H
 
+struct i915_request;
 struct intel_engine_cs;
 
 void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
@@ -18,6 +19,7 @@ void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
 void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
 
 int intel_engine_pulse(struct intel_engine_cs *engine);
-int intel_engine_flush_barriers(struct intel_engine_cs *engine);
+int intel_engine_flush_barriers(struct intel_engine_cs *engine,
+				struct i915_request **barrier);
 
 #endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 3586af636304..0c0f130802fb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -41,33 +41,6 @@ static int request_sync(struct i915_request *rq)
 	return err;
 }
 
-static int context_sync(struct intel_context *ce)
-{
-	struct intel_timeline *tl = ce->timeline;
-	int err = 0;
-
-	mutex_lock(&tl->mutex);
-	do {
-		struct dma_fence *fence;
-		long timeout;
-
-		fence = i915_active_fence_get(&tl->last_request);
-		if (!fence)
-			break;
-
-		timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
-		if (timeout < 0)
-			err = timeout;
-		else
-			i915_request_retire_upto(to_request(fence));
-
-		dma_fence_put(fence);
-	} while (!err);
-	mutex_unlock(&tl->mutex);
-
-	return err;
-}
-
 static int __live_context_size(struct intel_engine_cs *engine,
 			       struct i915_gem_context *fixme)
 {
@@ -202,6 +175,7 @@ static int __live_active_context(struct intel_engine_cs *engine,
 				 struct i915_gem_context *fixme)
 {
 	unsigned long saved_heartbeat;
+	struct i915_request *barrier;
 	struct intel_context *ce;
 	int pass;
 	int err;
@@ -269,17 +243,21 @@ static int __live_active_context(struct intel_engine_cs *engine,
 	}
 
 	/* Now make sure our idle-barriers are flushed */
-	err = intel_engine_flush_barriers(engine);
+	err = intel_engine_flush_barriers(engine, &barrier);
 	if (err)
 		goto err;
 
-	err = context_sync(engine->kernel_context);
-	if (err)
+	if (i915_request_wait(barrier, 0, HZ / 5) < 0) {
+		i915_request_put(barrier);
+		err = -ETIME;
 		goto err;
+	}
+	i915_request_put(barrier);
 
 	if (!i915_active_is_idle(&ce->active)) {
 		pr_err("context is still active!");
 		err = -EINVAL;
+		goto err;
 	}
 
 	if (intel_engine_pm_is_awake(engine)) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index f665a0e23c61..0bd9afc20ef3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -115,6 +115,11 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
 	return err;
 }
 
+static int wrap_engine_flush_barriers(struct intel_engine_cs *engine)
+{
+	return intel_engine_flush_barriers(engine, NULL);
+}
+
 static int live_idle_flush(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -126,7 +131,7 @@ static int live_idle_flush(void *arg)
 
 	for_each_engine(engine, gt, id) {
 		intel_engine_pm_get(engine);
-		err = __live_idle_pulse(engine, intel_engine_flush_barriers);
+		err = __live_idle_pulse(engine, wrap_engine_flush_barriers);
 		intel_engine_pm_put(engine);
 		if (err)
 			break;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 7e62c310290f..91daf87f491e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -28,7 +28,7 @@
 
 #include <drm/i915_drm.h>
 
-#include "gem/i915_gem_context.h"
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_gt_requests.h"
 
 #include "i915_drv.h"
@@ -40,6 +40,9 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
 
 static int ggtt_flush(struct intel_gt *gt)
 {
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
 	/*
 	 * Not everything in the GGTT is tracked via vma (otherwise we
 	 * could evict as required with minimal stalling) so we are forced
@@ -47,7 +50,26 @@ static int ggtt_flush(struct intel_gt *gt)
 	 * the hopes that we can then remove contexts and the like only
 	 * bound by their active reference.
 	 */
-	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+	intel_gt_retire_requests(gt);
+	for_each_engine(engine, gt, id) {
+		struct i915_request *barrier;
+		long err;
+
+		/* A barrier will unpin anything that is ready to be unpinned */
+		err = intel_engine_flush_barriers(engine, &barrier);
+		if (err)
+			return err;
+
+		err = i915_request_wait(barrier,
+					I915_WAIT_INTERRUPTIBLE,
+					MAX_SCHEDULE_TIMEOUT);
+		i915_request_put(barrier);
+		if (err)
+			return err;
+	}
+	intel_gt_retire_requests(gt);
+
+	return 0;
 }
 
 static bool
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [Intel-gfx] [PATCH] drm/i915/gem: Reduce ggtt_flush() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 16:42   ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 16:42 UTC (permalink / raw)
  To: intel-gfx

Since we use barriers, we need only explicitly flush those barriers to
ensure tha we can reclaim the available ggtt for ourselves. The barrier
flush was implicit inside the intel_gt_wait_for_idle() -- except because
we use i915_gem_evict from inside an active timeline during execbuf, we
could easily end up waiting upon ourselves.

v2: Wait on the barriers to ensure that any context unpinning that can
be done, will be done.

Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
Testcase: igt/gem_exec_reloc/basic-range
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 .../gpu/drm/i915/gt/intel_engine_heartbeat.c  |  8 +++-
 .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  4 +-
 drivers/gpu/drm/i915/gt/selftest_context.c    | 38 ++++---------------
 .../drm/i915/gt/selftest_engine_heartbeat.c   |  7 +++-
 drivers/gpu/drm/i915/i915_gem_evict.c         | 26 ++++++++++++-
 5 files changed, 48 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index c91fd4e4af29..0173720af05a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -212,10 +212,14 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 	return err;
 }
 
-int intel_engine_flush_barriers(struct intel_engine_cs *engine)
+int intel_engine_flush_barriers(struct intel_engine_cs *engine,
+				struct i915_request **out)
 {
 	struct i915_request *rq;
 
+	if (out)
+		*out = NULL;
+
 	if (llist_empty(&engine->barrier_tasks))
 		return 0;
 
@@ -224,6 +228,8 @@ int intel_engine_flush_barriers(struct intel_engine_cs *engine)
 		return PTR_ERR(rq);
 
 	idle_pulse(engine, rq);
+	if (out)
+		*out = i915_request_get(rq);
 	i915_request_add(rq);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
index a7b8c0f9e005..17e973d86f5c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -7,6 +7,7 @@
 #ifndef INTEL_ENGINE_HEARTBEAT_H
 #define INTEL_ENGINE_HEARTBEAT_H
 
+struct i915_request;
 struct intel_engine_cs;
 
 void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
@@ -18,6 +19,7 @@ void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
 void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
 
 int intel_engine_pulse(struct intel_engine_cs *engine);
-int intel_engine_flush_barriers(struct intel_engine_cs *engine);
+int intel_engine_flush_barriers(struct intel_engine_cs *engine,
+				struct i915_request **barrier);
 
 #endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 3586af636304..0c0f130802fb 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -41,33 +41,6 @@ static int request_sync(struct i915_request *rq)
 	return err;
 }
 
-static int context_sync(struct intel_context *ce)
-{
-	struct intel_timeline *tl = ce->timeline;
-	int err = 0;
-
-	mutex_lock(&tl->mutex);
-	do {
-		struct dma_fence *fence;
-		long timeout;
-
-		fence = i915_active_fence_get(&tl->last_request);
-		if (!fence)
-			break;
-
-		timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
-		if (timeout < 0)
-			err = timeout;
-		else
-			i915_request_retire_upto(to_request(fence));
-
-		dma_fence_put(fence);
-	} while (!err);
-	mutex_unlock(&tl->mutex);
-
-	return err;
-}
-
 static int __live_context_size(struct intel_engine_cs *engine,
 			       struct i915_gem_context *fixme)
 {
@@ -202,6 +175,7 @@ static int __live_active_context(struct intel_engine_cs *engine,
 				 struct i915_gem_context *fixme)
 {
 	unsigned long saved_heartbeat;
+	struct i915_request *barrier;
 	struct intel_context *ce;
 	int pass;
 	int err;
@@ -269,17 +243,21 @@ static int __live_active_context(struct intel_engine_cs *engine,
 	}
 
 	/* Now make sure our idle-barriers are flushed */
-	err = intel_engine_flush_barriers(engine);
+	err = intel_engine_flush_barriers(engine, &barrier);
 	if (err)
 		goto err;
 
-	err = context_sync(engine->kernel_context);
-	if (err)
+	if (i915_request_wait(barrier, 0, HZ / 5) < 0) {
+		i915_request_put(barrier);
+		err = -ETIME;
 		goto err;
+	}
+	i915_request_put(barrier);
 
 	if (!i915_active_is_idle(&ce->active)) {
 		pr_err("context is still active!");
 		err = -EINVAL;
+		goto err;
 	}
 
 	if (intel_engine_pm_is_awake(engine)) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index f665a0e23c61..0bd9afc20ef3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -115,6 +115,11 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
 	return err;
 }
 
+static int wrap_engine_flush_barriers(struct intel_engine_cs *engine)
+{
+	return intel_engine_flush_barriers(engine, NULL);
+}
+
 static int live_idle_flush(void *arg)
 {
 	struct intel_gt *gt = arg;
@@ -126,7 +131,7 @@ static int live_idle_flush(void *arg)
 
 	for_each_engine(engine, gt, id) {
 		intel_engine_pm_get(engine);
-		err = __live_idle_pulse(engine, intel_engine_flush_barriers);
+		err = __live_idle_pulse(engine, wrap_engine_flush_barriers);
 		intel_engine_pm_put(engine);
 		if (err)
 			break;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 7e62c310290f..91daf87f491e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -28,7 +28,7 @@
 
 #include <drm/i915_drm.h>
 
-#include "gem/i915_gem_context.h"
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_gt_requests.h"
 
 #include "i915_drv.h"
@@ -40,6 +40,9 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
 
 static int ggtt_flush(struct intel_gt *gt)
 {
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
 	/*
 	 * Not everything in the GGTT is tracked via vma (otherwise we
 	 * could evict as required with minimal stalling) so we are forced
@@ -47,7 +50,26 @@ static int ggtt_flush(struct intel_gt *gt)
 	 * the hopes that we can then remove contexts and the like only
 	 * bound by their active reference.
 	 */
-	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+	intel_gt_retire_requests(gt);
+	for_each_engine(engine, gt, id) {
+		struct i915_request *barrier;
+		long err;
+
+		/* A barrier will unpin anything that is ready to be unpinned */
+		err = intel_engine_flush_barriers(engine, &barrier);
+		if (err)
+			return err;
+
+		err = i915_request_wait(barrier,
+					I915_WAIT_INTERRUPTIBLE,
+					MAX_SCHEDULE_TIMEOUT);
+		i915_request_put(barrier);
+		if (err)
+			return err;
+	}
+	intel_gt_retire_requests(gt);
+
+	return 0;
 }
 
 static bool
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH] drm/i915/gem: Reduce ggtt_flush() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 16:49     ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 16:49 UTC (permalink / raw)
  To: intel-gfx

Quoting Chris Wilson (2019-11-20 16:42:46)
> Since we use barriers, we need only explicitly flush those barriers to
> ensure tha we can reclaim the available ggtt for ourselves. The barrier
> flush was implicit inside the intel_gt_wait_for_idle() -- except because
> we use i915_gem_evict from inside an active timeline during execbuf, we
> could easily end up waiting upon ourselves.
> 
> v2: Wait on the barriers to ensure that any context unpinning that can
> be done, will be done.
> 
> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Testcase: igt/gem_exec_reloc/basic-range
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>  .../gpu/drm/i915/gt/intel_engine_heartbeat.c  |  8 +++-
>  .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  4 +-
>  drivers/gpu/drm/i915/gt/selftest_context.c    | 38 ++++---------------
>  .../drm/i915/gt/selftest_engine_heartbeat.c   |  7 +++-
>  drivers/gpu/drm/i915/i915_gem_evict.c         | 26 ++++++++++++-
>  5 files changed, 48 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> index c91fd4e4af29..0173720af05a 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> @@ -212,10 +212,14 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
>         return err;
>  }
>  
> -int intel_engine_flush_barriers(struct intel_engine_cs *engine)
> +int intel_engine_flush_barriers(struct intel_engine_cs *engine,
> +                               struct i915_request **out)
>  {
>         struct i915_request *rq;
>  
> +       if (out)
> +               *out = NULL;
> +
>         if (llist_empty(&engine->barrier_tasks))
>                 return 0;
>  
> @@ -224,6 +228,8 @@ int intel_engine_flush_barriers(struct intel_engine_cs *engine)
>                 return PTR_ERR(rq);
>  
>         idle_pulse(engine, rq);
> +       if (out)
> +               *out = i915_request_get(rq);
>         i915_request_add(rq);
>  
>         return 0;
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> index a7b8c0f9e005..17e973d86f5c 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> @@ -7,6 +7,7 @@
>  #ifndef INTEL_ENGINE_HEARTBEAT_H
>  #define INTEL_ENGINE_HEARTBEAT_H
>  
> +struct i915_request;
>  struct intel_engine_cs;
>  
>  void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
> @@ -18,6 +19,7 @@ void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
>  void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
>  
>  int intel_engine_pulse(struct intel_engine_cs *engine);
> -int intel_engine_flush_barriers(struct intel_engine_cs *engine);
> +int intel_engine_flush_barriers(struct intel_engine_cs *engine,
> +                               struct i915_request **barrier);
>  
>  #endif /* INTEL_ENGINE_HEARTBEAT_H */
> diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
> index 3586af636304..0c0f130802fb 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_context.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_context.c
> @@ -41,33 +41,6 @@ static int request_sync(struct i915_request *rq)
>         return err;
>  }
>  
> -static int context_sync(struct intel_context *ce)
> -{
> -       struct intel_timeline *tl = ce->timeline;
> -       int err = 0;
> -
> -       mutex_lock(&tl->mutex);
> -       do {
> -               struct dma_fence *fence;
> -               long timeout;
> -
> -               fence = i915_active_fence_get(&tl->last_request);
> -               if (!fence)
> -                       break;
> -
> -               timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
> -               if (timeout < 0)
> -                       err = timeout;
> -               else
> -                       i915_request_retire_upto(to_request(fence));
> -
> -               dma_fence_put(fence);
> -       } while (!err);
> -       mutex_unlock(&tl->mutex);
> -
> -       return err;
> -}
> -
>  static int __live_context_size(struct intel_engine_cs *engine,
>                                struct i915_gem_context *fixme)
>  {
> @@ -202,6 +175,7 @@ static int __live_active_context(struct intel_engine_cs *engine,
>                                  struct i915_gem_context *fixme)
>  {
>         unsigned long saved_heartbeat;
> +       struct i915_request *barrier;
>         struct intel_context *ce;
>         int pass;
>         int err;
> @@ -269,17 +243,21 @@ static int __live_active_context(struct intel_engine_cs *engine,
>         }
>  
>         /* Now make sure our idle-barriers are flushed */
> -       err = intel_engine_flush_barriers(engine);
> +       err = intel_engine_flush_barriers(engine, &barrier);
>         if (err)
>                 goto err;
>  
> -       err = context_sync(engine->kernel_context);
> -       if (err)
> +       if (i915_request_wait(barrier, 0, HZ / 5) < 0) {
> +               i915_request_put(barrier);
> +               err = -ETIME;
>                 goto err;
> +       }
> +       i915_request_put(barrier);
>  
>         if (!i915_active_is_idle(&ce->active)) {
>                 pr_err("context is still active!");
>                 err = -EINVAL;
> +               goto err;
>         }
>  
>         if (intel_engine_pm_is_awake(engine)) {
> diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
> index f665a0e23c61..0bd9afc20ef3 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
> @@ -115,6 +115,11 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
>         return err;
>  }
>  
> +static int wrap_engine_flush_barriers(struct intel_engine_cs *engine)
> +{
> +       return intel_engine_flush_barriers(engine, NULL);
> +}
> +
>  static int live_idle_flush(void *arg)
>  {
>         struct intel_gt *gt = arg;
> @@ -126,7 +131,7 @@ static int live_idle_flush(void *arg)
>  
>         for_each_engine(engine, gt, id) {
>                 intel_engine_pm_get(engine);
> -               err = __live_idle_pulse(engine, intel_engine_flush_barriers);
> +               err = __live_idle_pulse(engine, wrap_engine_flush_barriers);
>                 intel_engine_pm_put(engine);
>                 if (err)
>                         break;
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 7e62c310290f..91daf87f491e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -28,7 +28,7 @@
>  
>  #include <drm/i915_drm.h>
>  
> -#include "gem/i915_gem_context.h"
> +#include "gt/intel_engine_heartbeat.h"
>  #include "gt/intel_gt_requests.h"
>  
>  #include "i915_drv.h"
> @@ -40,6 +40,9 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
>  
>  static int ggtt_flush(struct intel_gt *gt)
>  {
> +       struct intel_engine_cs *engine;
> +       enum intel_engine_id id;
> +
>         /*
>          * Not everything in the GGTT is tracked via vma (otherwise we
>          * could evict as required with minimal stalling) so we are forced
> @@ -47,7 +50,26 @@ static int ggtt_flush(struct intel_gt *gt)
>          * the hopes that we can then remove contexts and the like only
>          * bound by their active reference.
>          */
> -       return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
> +       intel_gt_retire_requests(gt);
> +       for_each_engine(engine, gt, id) {
> +               struct i915_request *barrier;
> +               long err;
> +
> +               /* A barrier will unpin anything that is ready to be unpinned */
> +               err = intel_engine_flush_barriers(engine, &barrier);
> +               if (err)
> +                       return err;
> +
> +               err = i915_request_wait(barrier,
> +                                       I915_WAIT_INTERRUPTIBLE,
> +                                       MAX_SCHEDULE_TIMEOUT);
> +               i915_request_put(barrier);
> +               if (err)
> +                       return err;

It's still weaker than it was before, I can keep papering over it. :|
Long term plan is pipelined evictions. Oh boy.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915/gem: Reduce ggtt_flush() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 16:49     ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 16:49 UTC (permalink / raw)
  To: intel-gfx

Quoting Chris Wilson (2019-11-20 16:42:46)
> Since we use barriers, we need only explicitly flush those barriers to
> ensure tha we can reclaim the available ggtt for ourselves. The barrier
> flush was implicit inside the intel_gt_wait_for_idle() -- except because
> we use i915_gem_evict from inside an active timeline during execbuf, we
> could easily end up waiting upon ourselves.
> 
> v2: Wait on the barriers to ensure that any context unpinning that can
> be done, will be done.
> 
> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Testcase: igt/gem_exec_reloc/basic-range
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>  .../gpu/drm/i915/gt/intel_engine_heartbeat.c  |  8 +++-
>  .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  4 +-
>  drivers/gpu/drm/i915/gt/selftest_context.c    | 38 ++++---------------
>  .../drm/i915/gt/selftest_engine_heartbeat.c   |  7 +++-
>  drivers/gpu/drm/i915/i915_gem_evict.c         | 26 ++++++++++++-
>  5 files changed, 48 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> index c91fd4e4af29..0173720af05a 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> @@ -212,10 +212,14 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
>         return err;
>  }
>  
> -int intel_engine_flush_barriers(struct intel_engine_cs *engine)
> +int intel_engine_flush_barriers(struct intel_engine_cs *engine,
> +                               struct i915_request **out)
>  {
>         struct i915_request *rq;
>  
> +       if (out)
> +               *out = NULL;
> +
>         if (llist_empty(&engine->barrier_tasks))
>                 return 0;
>  
> @@ -224,6 +228,8 @@ int intel_engine_flush_barriers(struct intel_engine_cs *engine)
>                 return PTR_ERR(rq);
>  
>         idle_pulse(engine, rq);
> +       if (out)
> +               *out = i915_request_get(rq);
>         i915_request_add(rq);
>  
>         return 0;
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> index a7b8c0f9e005..17e973d86f5c 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> @@ -7,6 +7,7 @@
>  #ifndef INTEL_ENGINE_HEARTBEAT_H
>  #define INTEL_ENGINE_HEARTBEAT_H
>  
> +struct i915_request;
>  struct intel_engine_cs;
>  
>  void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
> @@ -18,6 +19,7 @@ void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
>  void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
>  
>  int intel_engine_pulse(struct intel_engine_cs *engine);
> -int intel_engine_flush_barriers(struct intel_engine_cs *engine);
> +int intel_engine_flush_barriers(struct intel_engine_cs *engine,
> +                               struct i915_request **barrier);
>  
>  #endif /* INTEL_ENGINE_HEARTBEAT_H */
> diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
> index 3586af636304..0c0f130802fb 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_context.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_context.c
> @@ -41,33 +41,6 @@ static int request_sync(struct i915_request *rq)
>         return err;
>  }
>  
> -static int context_sync(struct intel_context *ce)
> -{
> -       struct intel_timeline *tl = ce->timeline;
> -       int err = 0;
> -
> -       mutex_lock(&tl->mutex);
> -       do {
> -               struct dma_fence *fence;
> -               long timeout;
> -
> -               fence = i915_active_fence_get(&tl->last_request);
> -               if (!fence)
> -                       break;
> -
> -               timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
> -               if (timeout < 0)
> -                       err = timeout;
> -               else
> -                       i915_request_retire_upto(to_request(fence));
> -
> -               dma_fence_put(fence);
> -       } while (!err);
> -       mutex_unlock(&tl->mutex);
> -
> -       return err;
> -}
> -
>  static int __live_context_size(struct intel_engine_cs *engine,
>                                struct i915_gem_context *fixme)
>  {
> @@ -202,6 +175,7 @@ static int __live_active_context(struct intel_engine_cs *engine,
>                                  struct i915_gem_context *fixme)
>  {
>         unsigned long saved_heartbeat;
> +       struct i915_request *barrier;
>         struct intel_context *ce;
>         int pass;
>         int err;
> @@ -269,17 +243,21 @@ static int __live_active_context(struct intel_engine_cs *engine,
>         }
>  
>         /* Now make sure our idle-barriers are flushed */
> -       err = intel_engine_flush_barriers(engine);
> +       err = intel_engine_flush_barriers(engine, &barrier);
>         if (err)
>                 goto err;
>  
> -       err = context_sync(engine->kernel_context);
> -       if (err)
> +       if (i915_request_wait(barrier, 0, HZ / 5) < 0) {
> +               i915_request_put(barrier);
> +               err = -ETIME;
>                 goto err;
> +       }
> +       i915_request_put(barrier);
>  
>         if (!i915_active_is_idle(&ce->active)) {
>                 pr_err("context is still active!");
>                 err = -EINVAL;
> +               goto err;
>         }
>  
>         if (intel_engine_pm_is_awake(engine)) {
> diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
> index f665a0e23c61..0bd9afc20ef3 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
> @@ -115,6 +115,11 @@ static int __live_idle_pulse(struct intel_engine_cs *engine,
>         return err;
>  }
>  
> +static int wrap_engine_flush_barriers(struct intel_engine_cs *engine)
> +{
> +       return intel_engine_flush_barriers(engine, NULL);
> +}
> +
>  static int live_idle_flush(void *arg)
>  {
>         struct intel_gt *gt = arg;
> @@ -126,7 +131,7 @@ static int live_idle_flush(void *arg)
>  
>         for_each_engine(engine, gt, id) {
>                 intel_engine_pm_get(engine);
> -               err = __live_idle_pulse(engine, intel_engine_flush_barriers);
> +               err = __live_idle_pulse(engine, wrap_engine_flush_barriers);
>                 intel_engine_pm_put(engine);
>                 if (err)
>                         break;
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 7e62c310290f..91daf87f491e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -28,7 +28,7 @@
>  
>  #include <drm/i915_drm.h>
>  
> -#include "gem/i915_gem_context.h"
> +#include "gt/intel_engine_heartbeat.h"
>  #include "gt/intel_gt_requests.h"
>  
>  #include "i915_drv.h"
> @@ -40,6 +40,9 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
>  
>  static int ggtt_flush(struct intel_gt *gt)
>  {
> +       struct intel_engine_cs *engine;
> +       enum intel_engine_id id;
> +
>         /*
>          * Not everything in the GGTT is tracked via vma (otherwise we
>          * could evict as required with minimal stalling) so we are forced
> @@ -47,7 +50,26 @@ static int ggtt_flush(struct intel_gt *gt)
>          * the hopes that we can then remove contexts and the like only
>          * bound by their active reference.
>          */
> -       return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
> +       intel_gt_retire_requests(gt);
> +       for_each_engine(engine, gt, id) {
> +               struct i915_request *barrier;
> +               long err;
> +
> +               /* A barrier will unpin anything that is ready to be unpinned */
> +               err = intel_engine_flush_barriers(engine, &barrier);
> +               if (err)
> +                       return err;
> +
> +               err = i915_request_wait(barrier,
> +                                       I915_WAIT_INTERRUPTIBLE,
> +                                       MAX_SCHEDULE_TIMEOUT);
> +               i915_request_put(barrier);
> +               if (err)
> +                       return err;

It's still weaker than it was before, I can keep papering over it. :|
Long term plan is pipelined evictions. Oh boy.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 19:05   ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 19:05 UTC (permalink / raw)
  To: intel-gfx

Quoting Chris Wilson (2019-11-20 13:41:13)
> Since we use barriers, we need only explicitly flush those barriers to
> ensure tha we can reclaim the available ggtt for ourselves. The barrier
> flush was implicit inside the intel_gt_wait_for_idle() -- except because
> we use i915_gem_evict from inside an active timeline during execbuf, we
> could easily end up waiting upon ourselves.
> 
> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Testcase: igt/gem_exec_reloc/basic-range
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

I think we might as well just revert 7936a22dd466 and take another look
at how to repeat the waits; I'm optimistic that with

commit 1683d24c1470fb47716bd3ccd4e06547eb0ce0ed
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Tue Nov 19 16:25:58 2019 +0000

    drm/i915/gt: Move new timelines to the end of active_list

the problem (e.g. igt/live_late_gt_pm) has mostly evaporated.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [Intel-gfx] [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush
@ 2019-11-20 19:05   ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 19:05 UTC (permalink / raw)
  To: intel-gfx

Quoting Chris Wilson (2019-11-20 13:41:13)
> Since we use barriers, we need only explicitly flush those barriers to
> ensure tha we can reclaim the available ggtt for ourselves. The barrier
> flush was implicit inside the intel_gt_wait_for_idle() -- except because
> we use i915_gem_evict from inside an active timeline during execbuf, we
> could easily end up waiting upon ourselves.
> 
> Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Fixes: a46bfdc83fee ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
> Testcase: igt/gem_exec_reloc/basic-range
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

I think we might as well just revert 7936a22dd466 and take another look
at how to repeat the waits; I'm optimistic that with

commit 1683d24c1470fb47716bd3ccd4e06547eb0ce0ed
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Tue Nov 19 16:25:58 2019 +0000

    drm/i915/gt: Move new timelines to the end of active_list

the problem (e.g. igt/live_late_gt_pm) has mostly evaporated.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [PATCH] Revert "drm/i915/gt: Wait for new requests in intel_gt_retire_requests()"
@ 2019-11-20 19:20   ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 19:20 UTC (permalink / raw)
  To: intel-gfx

From inside an active timeline in the execbuf ioctl, we may try to
reclaim some space in the GGTT. We need GGTT space for all objects on
!full-ppgtt platforms, and for context images everywhere. However, to
free up space in the GGTT we may need to remove some pinned objects
(e.g. context images) that require flushing the idle barriers to remove.
For this we use the big hammer of intel_gt_wait_for_idle()

However, commit 7936a22dd466 ("drm/i915/gt: Wait for new requests in
intel_gt_retire_requests()") will continue spinning on the wait if a
timeline is active but lacks requests, as is the case during execbuf
reservation. Spinning forever is quite time consuming, so revert that
commit and start again.

In practice, the effect commit 7936a22dd466 was trying to achieve is
accomplished by commit 1683d24c1470 ("drm/i915/gt: Move new timelines
to the end of active_list"), so there is no immediate rush to replace
the looping.

Testcase: igt/gem_exec_reloc/basic-range
Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
References: 1683d24c1470 ("drm/i915/gt: Move new timelines to the end of active_list")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_gt_requests.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 4dc3cbeb1b36..f02f781b8492 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -33,6 +33,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 	struct intel_timeline *tl, *tn;
+	unsigned long active_count = 0;
 	bool interruptible;
 	LIST_HEAD(free);
 
@@ -44,8 +45,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 
 	spin_lock(&timelines->lock);
 	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
-		if (!mutex_trylock(&tl->mutex))
+		if (!mutex_trylock(&tl->mutex)) {
+			active_count++; /* report busy to caller, try again? */
 			continue;
+		}
 
 		intel_timeline_get(tl);
 		GEM_BUG_ON(!atomic_read(&tl->active_count));
@@ -72,6 +75,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 		list_safe_reset_next(tl, tn, link);
 		if (atomic_dec_and_test(&tl->active_count))
 			list_del(&tl->link);
+		else
+			active_count += !!rcu_access_pointer(tl->last_request.fence);
 
 		mutex_unlock(&tl->mutex);
 
@@ -86,7 +91,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 	list_for_each_entry_safe(tl, tn, &free, link)
 		__intel_timeline_free(&tl->kref);
 
-	return list_empty(&timelines->active_list) ? 0 : timeout;
+	return active_count ? timeout : 0;
 }
 
 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [Intel-gfx] [PATCH] Revert "drm/i915/gt: Wait for new requests in intel_gt_retire_requests()"
@ 2019-11-20 19:20   ` Chris Wilson
  0 siblings, 0 replies; 24+ messages in thread
From: Chris Wilson @ 2019-11-20 19:20 UTC (permalink / raw)
  To: intel-gfx

From inside an active timeline in the execbuf ioctl, we may try to
reclaim some space in the GGTT. We need GGTT space for all objects on
!full-ppgtt platforms, and for context images everywhere. However, to
free up space in the GGTT we may need to remove some pinned objects
(e.g. context images) that require flushing the idle barriers to remove.
For this we use the big hammer of intel_gt_wait_for_idle()

However, commit 7936a22dd466 ("drm/i915/gt: Wait for new requests in
intel_gt_retire_requests()") will continue spinning on the wait if a
timeline is active but lacks requests, as is the case during execbuf
reservation. Spinning forever is quite time consuming, so revert that
commit and start again.

In practice, the effect commit 7936a22dd466 was trying to achieve is
accomplished by commit 1683d24c1470 ("drm/i915/gt: Move new timelines
to the end of active_list"), so there is no immediate rush to replace
the looping.

Testcase: igt/gem_exec_reloc/basic-range
Fixes: 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")
References: 1683d24c1470 ("drm/i915/gt: Move new timelines to the end of active_list")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_gt_requests.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 4dc3cbeb1b36..f02f781b8492 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -33,6 +33,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 	struct intel_timeline *tl, *tn;
+	unsigned long active_count = 0;
 	bool interruptible;
 	LIST_HEAD(free);
 
@@ -44,8 +45,10 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 
 	spin_lock(&timelines->lock);
 	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
-		if (!mutex_trylock(&tl->mutex))
+		if (!mutex_trylock(&tl->mutex)) {
+			active_count++; /* report busy to caller, try again? */
 			continue;
+		}
 
 		intel_timeline_get(tl);
 		GEM_BUG_ON(!atomic_read(&tl->active_count));
@@ -72,6 +75,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 		list_safe_reset_next(tl, tn, link);
 		if (atomic_dec_and_test(&tl->active_count))
 			list_del(&tl->link);
+		else
+			active_count += !!rcu_access_pointer(tl->last_request.fence);
 
 		mutex_unlock(&tl->mutex);
 
@@ -86,7 +91,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 	list_for_each_entry_safe(tl, tn, &free, link)
 		__intel_timeline_free(&tl->kref);
 
-	return list_empty(&timelines->active_list) ? 0 : timeout;
+	return active_count ? timeout : 0;
 }
 
 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
@ 2019-11-21  0:37   ` Patchwork
  0 siblings, 0 replies; 24+ messages in thread
From: Patchwork @ 2019-11-21  0:37 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
URL   : https://patchwork.freedesktop.org/series/69752/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
e0572642aeca Revert "drm/i915/gt: Wait for new requests in intel_gt_retire_requests()"
-:20: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")'
#20: 
In practice, the effect commit 7936a22dd466 was trying to achieve is

-:27: WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#27: 
References: 1683d24c1470 ("drm/i915/gt: Move new timelines to the end of active_list")

-:27: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 1683d24c1470 ("drm/i915/gt: Move new timelines to the end of active_list")'
#27: 
References: 1683d24c1470 ("drm/i915/gt: Move new timelines to the end of active_list")

total: 2 errors, 1 warnings, 0 checks, 34 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
@ 2019-11-21  0:37   ` Patchwork
  0 siblings, 0 replies; 24+ messages in thread
From: Patchwork @ 2019-11-21  0:37 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
URL   : https://patchwork.freedesktop.org/series/69752/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
e0572642aeca Revert "drm/i915/gt: Wait for new requests in intel_gt_retire_requests()"
-:20: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 7936a22dd466 ("drm/i915/gt: Wait for new requests in intel_gt_retire_requests()")'
#20: 
In practice, the effect commit 7936a22dd466 was trying to achieve is

-:27: WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#27: 
References: 1683d24c1470 ("drm/i915/gt: Move new timelines to the end of active_list")

-:27: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 1683d24c1470 ("drm/i915/gt: Move new timelines to the end of active_list")'
#27: 
References: 1683d24c1470 ("drm/i915/gt: Move new timelines to the end of active_list")

total: 2 errors, 1 warnings, 0 checks, 34 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
@ 2019-11-21  0:59   ` Patchwork
  0 siblings, 0 replies; 24+ messages in thread
From: Patchwork @ 2019-11-21  0:59 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
URL   : https://patchwork.freedesktop.org/series/69752/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7393 -> Patchwork_15357
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/index.html

Known issues
------------

  Here are the changes found in Patchwork_15357 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@kms_busy@basic-flip-pipe-a:
    - fi-icl-u2:          [PASS][1] -> [INCOMPLETE][2] ([fdo#107713])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/fi-icl-u2/igt@kms_busy@basic-flip-pipe-a.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/fi-icl-u2/igt@kms_busy@basic-flip-pipe-a.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][3] -> [FAIL][4] ([fdo#111045] / [fdo#111096])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  
#### Possible fixes ####

  * igt@i915_module_load@reload-with-fault-injection:
    - {fi-kbl-7560u}:     [INCOMPLETE][5] ([fdo#112298]) -> [PASS][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/fi-kbl-7560u/igt@i915_module_load@reload-with-fault-injection.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/fi-kbl-7560u/igt@i915_module_load@reload-with-fault-injection.html

  * igt@i915_pm_rpm@module-reload:
    - fi-skl-lmem:        [DMESG-WARN][7] ([fdo#112261]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/fi-skl-lmem/igt@i915_pm_rpm@module-reload.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/fi-skl-lmem/igt@i915_pm_rpm@module-reload.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-icl-u3:          [FAIL][9] ([fdo#103167]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/fi-icl-u3/igt@kms_frontbuffer_tracking@basic.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/fi-icl-u3/igt@kms_frontbuffer_tracking@basic.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#111045]: https://bugs.freedesktop.org/show_bug.cgi?id=111045
  [fdo#111096]: https://bugs.freedesktop.org/show_bug.cgi?id=111096
  [fdo#112261]: https://bugs.freedesktop.org/show_bug.cgi?id=112261
  [fdo#112298]: https://bugs.freedesktop.org/show_bug.cgi?id=112298


Participating hosts (50 -> 45)
------------------------------

  Missing    (5): fi-hsw-4200u fi-bsw-cyan fi-ctg-p8600 fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7393 -> Patchwork_15357

  CI-20190529: 20190529
  CI_DRM_7393: 0e204eb18baca0cd97950bf936fffdbbce1fd337 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5299: 65fed6a79adea14f7bef6d55530da47d7731d370 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_15357: e0572642aecae15d5993af8fda95338ded4de47c @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

e0572642aeca Revert "drm/i915/gt: Wait for new requests in intel_gt_retire_requests()"

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
@ 2019-11-21  0:59   ` Patchwork
  0 siblings, 0 replies; 24+ messages in thread
From: Patchwork @ 2019-11-21  0:59 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
URL   : https://patchwork.freedesktop.org/series/69752/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7393 -> Patchwork_15357
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/index.html

Known issues
------------

  Here are the changes found in Patchwork_15357 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@kms_busy@basic-flip-pipe-a:
    - fi-icl-u2:          [PASS][1] -> [INCOMPLETE][2] ([fdo#107713])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/fi-icl-u2/igt@kms_busy@basic-flip-pipe-a.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/fi-icl-u2/igt@kms_busy@basic-flip-pipe-a.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][3] -> [FAIL][4] ([fdo#111045] / [fdo#111096])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  
#### Possible fixes ####

  * igt@i915_module_load@reload-with-fault-injection:
    - {fi-kbl-7560u}:     [INCOMPLETE][5] ([fdo#112298]) -> [PASS][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/fi-kbl-7560u/igt@i915_module_load@reload-with-fault-injection.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/fi-kbl-7560u/igt@i915_module_load@reload-with-fault-injection.html

  * igt@i915_pm_rpm@module-reload:
    - fi-skl-lmem:        [DMESG-WARN][7] ([fdo#112261]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/fi-skl-lmem/igt@i915_pm_rpm@module-reload.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/fi-skl-lmem/igt@i915_pm_rpm@module-reload.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-icl-u3:          [FAIL][9] ([fdo#103167]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/fi-icl-u3/igt@kms_frontbuffer_tracking@basic.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/fi-icl-u3/igt@kms_frontbuffer_tracking@basic.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#111045]: https://bugs.freedesktop.org/show_bug.cgi?id=111045
  [fdo#111096]: https://bugs.freedesktop.org/show_bug.cgi?id=111096
  [fdo#112261]: https://bugs.freedesktop.org/show_bug.cgi?id=112261
  [fdo#112298]: https://bugs.freedesktop.org/show_bug.cgi?id=112298


Participating hosts (50 -> 45)
------------------------------

  Missing    (5): fi-hsw-4200u fi-bsw-cyan fi-ctg-p8600 fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7393 -> Patchwork_15357

  CI-20190529: 20190529
  CI_DRM_7393: 0e204eb18baca0cd97950bf936fffdbbce1fd337 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5299: 65fed6a79adea14f7bef6d55530da47d7731d370 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_15357: e0572642aecae15d5993af8fda95338ded4de47c @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

e0572642aeca Revert "drm/i915/gt: Wait for new requests in intel_gt_retire_requests()"

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* ✗ Fi.CI.IGT: failure for drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
@ 2019-11-22  0:16   ` Patchwork
  0 siblings, 0 replies; 24+ messages in thread
From: Patchwork @ 2019-11-22  0:16 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
URL   : https://patchwork.freedesktop.org/series/69752/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7393_full -> Patchwork_15357_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_15357_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_15357_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_15357_full:

### IGT changes ###

#### Possible regressions ####

  * igt@kms_cursor_crc@pipe-a-cursor-64x64-random:
    - shard-tglb:         [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb9/igt@kms_cursor_crc@pipe-a-cursor-64x64-random.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb6/igt@kms_cursor_crc@pipe-a-cursor-64x64-random.html

  
Known issues
------------

  Here are the changes found in Patchwork_15357_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_busy@busy-vcs1:
    - shard-iclb:         [PASS][3] -> [SKIP][4] ([fdo#112080]) +14 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@gem_busy@busy-vcs1.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb5/igt@gem_busy@busy-vcs1.html

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-skl:          [PASS][5] -> [INCOMPLETE][6] ([fdo#104108])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl9/igt@gem_ctx_isolation@rcs0-s3.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl5/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_ctx_isolation@vcs1-clean:
    - shard-iclb:         [PASS][7] -> [SKIP][8] ([fdo#109276] / [fdo#112080]) +2 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@gem_ctx_isolation@vcs1-clean.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb5/igt@gem_ctx_isolation@vcs1-clean.html

  * igt@gem_ctx_isolation@vcs1-s3:
    - shard-tglb:         [PASS][9] -> [INCOMPLETE][10] ([fdo#111832]) +1 similar issue
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb9/igt@gem_ctx_isolation@vcs1-s3.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb4/igt@gem_ctx_isolation@vcs1-s3.html

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [PASS][11] -> [SKIP][12] ([fdo#110841])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb8/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb4/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_ctx_shared@q-smoketest-all:
    - shard-tglb:         [PASS][13] -> [INCOMPLETE][14] ([fdo#111735])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb7/igt@gem_ctx_shared@q-smoketest-all.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb4/igt@gem_ctx_shared@q-smoketest-all.html

  * igt@gem_eio@in-flight-suspend:
    - shard-tglb:         [PASS][15] -> [INCOMPLETE][16] ([fdo#111832] / [fdo#111850] / [fdo#112081])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb1/igt@gem_eio@in-flight-suspend.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb1/igt@gem_eio@in-flight-suspend.html

  * igt@gem_eio@kms:
    - shard-tglb:         [PASS][17] -> [INCOMPLETE][18] ([fdo#111887])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb5/igt@gem_eio@kms.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb5/igt@gem_eio@kms.html

  * igt@gem_exec_balancer@smoke:
    - shard-iclb:         [PASS][19] -> [SKIP][20] ([fdo#110854])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@gem_exec_balancer@smoke.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb5/igt@gem_exec_balancer@smoke.html

  * igt@gem_exec_create@madvise:
    - shard-tglb:         [PASS][21] -> [INCOMPLETE][22] ([fdo#111747])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb1/igt@gem_exec_create@madvise.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb6/igt@gem_exec_create@madvise.html

  * igt@gem_exec_schedule@in-order-bsd:
    - shard-iclb:         [PASS][23] -> [SKIP][24] ([fdo#112146]) +5 similar issues
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@gem_exec_schedule@in-order-bsd.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@gem_exec_schedule@in-order-bsd.html

  * igt@gem_userptr_blits@dmabuf-sync:
    - shard-snb:          [PASS][25] -> [DMESG-WARN][26] ([fdo#111870])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-snb4/igt@gem_userptr_blits@dmabuf-sync.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-snb2/igt@gem_userptr_blits@dmabuf-sync.html

  * igt@gem_userptr_blits@map-fixed-invalidate-busy-gup:
    - shard-hsw:          [PASS][27] -> [DMESG-WARN][28] ([fdo#111870])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-hsw6/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-hsw4/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html

  * igt@i915_pm_dc@dc6-psr:
    - shard-iclb:         [PASS][29] -> [FAIL][30] ([fdo#111830 ])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@i915_pm_dc@dc6-psr.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb6/igt@i915_pm_dc@dc6-psr.html

  * igt@i915_selftest@live_perf:
    - shard-hsw:          [PASS][31] -> [INCOMPLETE][32] ([fdo#103540])
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-hsw1/igt@i915_selftest@live_perf.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-hsw7/igt@i915_selftest@live_perf.html

  * igt@kms_cursor_crc@pipe-c-cursor-suspend:
    - shard-apl:          [PASS][33] -> [DMESG-WARN][34] ([fdo#108566]) +1 similar issue
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-apl2/igt@kms_cursor_crc@pipe-c-cursor-suspend.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-apl1/igt@kms_cursor_crc@pipe-c-cursor-suspend.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite:
    - shard-iclb:         [PASS][35] -> [FAIL][36] ([fdo#103167]) +3 similar issues
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-wc:
    - shard-tglb:         [PASS][37] -> [FAIL][38] ([fdo#103167]) +4 similar issues
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb5/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-wc.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb3/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-wc.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-rte:
    - shard-iclb:         [PASS][39] -> [FAIL][40] ([fdo#103167] / [fdo#110378])
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb5/igt@kms_frontbuffer_tracking@fbcpsr-1p-rte.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@kms_frontbuffer_tracking@fbcpsr-1p-rte.html

  * igt@kms_frontbuffer_tracking@fbcpsr-suspend:
    - shard-iclb:         [PASS][41] -> [INCOMPLETE][42] ([fdo#106978] / [fdo#107713])
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb5/igt@kms_frontbuffer_tracking@fbcpsr-suspend.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb3/igt@kms_frontbuffer_tracking@fbcpsr-suspend.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - shard-tglb:         [PASS][43] -> [INCOMPLETE][44] ([fdo#111832] / [fdo#111850]) +3 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb3/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb3/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  * igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
    - shard-skl:          [PASS][45] -> [FAIL][46] ([fdo#108145] / [fdo#110403])
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl10/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl2/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_render:
    - shard-iclb:         [PASS][47] -> [SKIP][48] ([fdo#109441]) +2 similar issues
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb2/igt@kms_psr@psr2_cursor_render.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb6/igt@kms_psr@psr2_cursor_render.html

  * igt@kms_vblank@pipe-a-ts-continuation-suspend:
    - shard-kbl:          [PASS][49] -> [DMESG-WARN][50] ([fdo#108566]) +3 similar issues
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html

  * igt@prime_busy@hang-bsd2:
    - shard-iclb:         [PASS][51] -> [SKIP][52] ([fdo#109276]) +17 similar issues
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@prime_busy@hang-bsd2.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb6/igt@prime_busy@hang-bsd2.html

  
#### Possible fixes ####

  * igt@gem_busy@close-race:
    - shard-tglb:         [INCOMPLETE][53] ([fdo#111747]) -> [PASS][54]
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb6/igt@gem_busy@close-race.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb5/igt@gem_busy@close-race.html

  * igt@gem_ctx_isolation@bcs0-s3:
    - shard-apl:          [DMESG-WARN][55] ([fdo#108566]) -> [PASS][56]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-apl4/igt@gem_ctx_isolation@bcs0-s3.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-apl7/igt@gem_ctx_isolation@bcs0-s3.html

  * igt@gem_ctx_persistence@vcs1-queued:
    - shard-iclb:         [SKIP][57] ([fdo#109276] / [fdo#112080]) -> [PASS][58] +3 similar issues
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@gem_ctx_persistence@vcs1-queued.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@gem_ctx_persistence@vcs1-queued.html

  * igt@gem_ctx_persistence@vecs0-mixed-process:
    - shard-skl:          [FAIL][59] ([fdo#112194]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl1/igt@gem_ctx_persistence@vecs0-mixed-process.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl9/igt@gem_ctx_persistence@vecs0-mixed-process.html

  * igt@gem_exec_reloc@basic-gtt-cpu-active:
    - shard-skl:          [DMESG-WARN][61] ([fdo#106107]) -> [PASS][62]
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl7/igt@gem_exec_reloc@basic-gtt-cpu-active.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl10/igt@gem_exec_reloc@basic-gtt-cpu-active.html

  * igt@gem_exec_reloc@basic-range:
    - shard-hsw:          [TIMEOUT][63] ([fdo#112271]) -> [PASS][64] +1 similar issue
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-hsw6/igt@gem_exec_reloc@basic-range.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-hsw6/igt@gem_exec_reloc@basic-range.html

  * igt@gem_exec_reloc@basic-range-active:
    - shard-snb:          [TIMEOUT][65] ([fdo#112271]) -> [PASS][66] +1 similar issue
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-snb6/igt@gem_exec_reloc@basic-range-active.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-snb5/igt@gem_exec_reloc@basic-range-active.html

  * igt@gem_exec_schedule@in-order-bsd2:
    - shard-iclb:         [SKIP][67] ([fdo#109276]) -> [PASS][68] +24 similar issues
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@gem_exec_schedule@in-order-bsd2.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@gem_exec_schedule@in-order-bsd2.html

  * igt@gem_exec_schedule@preempt-other-chain-bsd:
    - shard-iclb:         [SKIP][69] ([fdo#112146]) -> [PASS][70] +1 similar issue
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb2/igt@gem_exec_schedule@preempt-other-chain-bsd.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb6/igt@gem_exec_schedule@preempt-other-chain-bsd.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-snb:          [DMESG-WARN][71] ([fdo#111870]) -> [PASS][72] +1 similar issue
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-snb1/igt@gem_userptr_blits@sync-unmap-cycles.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-snb6/igt@gem_userptr_blits@sync-unmap-cycles.html
    - shard-hsw:          [DMESG-WARN][73] ([fdo#111870]) -> [PASS][74] +1 similar issue
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-hsw4/igt@gem_userptr_blits@sync-unmap-cycles.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-hsw7/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@i915_pm_rpm@system-suspend:
    - shard-hsw:          [INCOMPLETE][75] ([fdo#103540] / [fdo#107807]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-hsw2/igt@i915_pm_rpm@system-suspend.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-hsw6/igt@i915_pm_rpm@system-suspend.html

  * igt@i915_suspend@sysfs-reader:
    - shard-kbl:          [DMESG-WARN][77] ([fdo#108566]) -> [PASS][78]
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-kbl7/igt@i915_suspend@sysfs-reader.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-kbl6/igt@i915_suspend@sysfs-reader.html

  * igt@kms_big_fb@y-tiled-32bpp-rotate-0:
    - shard-skl:          [INCOMPLETE][79] ([fdo#104108] / [fdo#112347]) -> [PASS][80]
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl9/igt@kms_big_fb@y-tiled-32bpp-rotate-0.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl1/igt@kms_big_fb@y-tiled-32bpp-rotate-0.html

  * igt@kms_draw_crc@draw-method-xrgb2101010-pwrite-untiled:
    - shard-skl:          [FAIL][81] ([fdo#103184] / [fdo#103232] / [fdo#108472]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl5/igt@kms_draw_crc@draw-method-xrgb2101010-pwrite-untiled.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl10/igt@kms_draw_crc@draw-method-xrgb2101010-pwrite-untiled.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt:
    - shard-tglb:         [FAIL][83] ([fdo#103167]) -> [PASS][84] +5 similar issues
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb5/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb3/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt.html

  * igt@kms_frontbuffer_tracking@fbc-1p-rte:
    - shard-iclb:         [FAIL][85] ([fdo#103167] / [fdo#110378]) -> [PASS][86]
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-1p-rte.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-rte.html

  * igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite:
    - shard-iclb:         [FAIL][87] ([fdo#103167]) -> [PASS][88] +3 similar issues
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html

  * igt@kms_psr2_su@page_flip:
    - shard-iclb:         [SKIP][89] ([fdo#109642] / [fdo#111068]) -> [PASS][90]
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb3/igt@kms_psr2_su@page_flip.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@kms_psr2_su@page_flip.html

  * igt@kms_psr@no_drrs:
    - shard-iclb:         [FAIL][91] ([fdo#108341]) -> [PASS][92]
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@kms_psr@no_drrs.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb5/igt@kms_psr@no_drrs.html

  * igt@kms_psr@psr2_primary_blt:
    - shard-iclb:         [SKIP][93] ([fdo#109441]) -> [PASS][94]
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@kms_psr@psr2_primary_blt.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@kms_psr@psr2_primary_blt.html

  * igt@kms_vblank@pipe-c-ts-continuation-suspend:
    - shard-tglb:         [INCOMPLETE][95] ([fdo#111832] / [fdo#111850]) -> [PASS][96] +2 similar issues
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb5/igt@kms_vblank@pipe-c-ts-continuation-suspend.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb8/igt@kms_vblank@pipe-c-ts-continuation-suspend.html

  * igt@kms_vblank@pipe-d-ts-continuation-suspend:
    - shard-tglb:         [INCOMPLETE][97] ([fdo#111850]) -> [PASS][98]
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb8/igt@kms_vblank@pipe-d-ts-continuation-suspend.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb5/igt@kms_vblank@pipe-d-ts-continuation-suspend.html

  * igt@perf@gen8-unprivileged-single-ctx-counters:
    - shard-skl:          [INCOMPLETE][99] ([fdo#111747]) -> [PASS][100]
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl8/igt@perf@gen8-unprivileged-single-ctx-counters.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl4/igt@perf@gen8-unprivileged-single-ctx-counters.html

  * igt@perf_pmu@init-busy-vcs1:
    - shard-iclb:         [SKIP][101] ([fdo#112080]) -> [PASS][102] +12 similar issues
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@perf_pmu@init-busy-vcs1.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb4/igt@perf_pmu@init-busy-vcs1.html

  
#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv-switch:
    - shard-iclb:         [FAIL][103] ([fdo#111329]) -> [SKIP][104] ([fdo#109276] / [fdo#112080])
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb5/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html

  * igt@gem_exec_schedule@deep-bsd1:
    - shard-tglb:         [FAIL][105] ([fdo#111646]) -> [INCOMPLETE][106] ([fdo#111671])
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb5/igt@gem_exec_schedule@deep-bsd1.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

* [Intel-gfx] ✗ Fi.CI.IGT: failure for drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
@ 2019-11-22  0:16   ` Patchwork
  0 siblings, 0 replies; 24+ messages in thread
From: Patchwork @ 2019-11-22  0:16 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3)
URL   : https://patchwork.freedesktop.org/series/69752/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7393_full -> Patchwork_15357_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_15357_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_15357_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_15357_full:

### IGT changes ###

#### Possible regressions ####

  * igt@kms_cursor_crc@pipe-a-cursor-64x64-random:
    - shard-tglb:         [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb9/igt@kms_cursor_crc@pipe-a-cursor-64x64-random.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb6/igt@kms_cursor_crc@pipe-a-cursor-64x64-random.html

  
Known issues
------------

  Here are the changes found in Patchwork_15357_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_busy@busy-vcs1:
    - shard-iclb:         [PASS][3] -> [SKIP][4] ([fdo#112080]) +14 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@gem_busy@busy-vcs1.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb5/igt@gem_busy@busy-vcs1.html

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-skl:          [PASS][5] -> [INCOMPLETE][6] ([fdo#104108])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl9/igt@gem_ctx_isolation@rcs0-s3.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl5/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_ctx_isolation@vcs1-clean:
    - shard-iclb:         [PASS][7] -> [SKIP][8] ([fdo#109276] / [fdo#112080]) +2 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@gem_ctx_isolation@vcs1-clean.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb5/igt@gem_ctx_isolation@vcs1-clean.html

  * igt@gem_ctx_isolation@vcs1-s3:
    - shard-tglb:         [PASS][9] -> [INCOMPLETE][10] ([fdo#111832]) +1 similar issue
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb9/igt@gem_ctx_isolation@vcs1-s3.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb4/igt@gem_ctx_isolation@vcs1-s3.html

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [PASS][11] -> [SKIP][12] ([fdo#110841])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb8/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb4/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_ctx_shared@q-smoketest-all:
    - shard-tglb:         [PASS][13] -> [INCOMPLETE][14] ([fdo#111735])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb7/igt@gem_ctx_shared@q-smoketest-all.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb4/igt@gem_ctx_shared@q-smoketest-all.html

  * igt@gem_eio@in-flight-suspend:
    - shard-tglb:         [PASS][15] -> [INCOMPLETE][16] ([fdo#111832] / [fdo#111850] / [fdo#112081])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb1/igt@gem_eio@in-flight-suspend.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb1/igt@gem_eio@in-flight-suspend.html

  * igt@gem_eio@kms:
    - shard-tglb:         [PASS][17] -> [INCOMPLETE][18] ([fdo#111887])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb5/igt@gem_eio@kms.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb5/igt@gem_eio@kms.html

  * igt@gem_exec_balancer@smoke:
    - shard-iclb:         [PASS][19] -> [SKIP][20] ([fdo#110854])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@gem_exec_balancer@smoke.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb5/igt@gem_exec_balancer@smoke.html

  * igt@gem_exec_create@madvise:
    - shard-tglb:         [PASS][21] -> [INCOMPLETE][22] ([fdo#111747])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb1/igt@gem_exec_create@madvise.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb6/igt@gem_exec_create@madvise.html

  * igt@gem_exec_schedule@in-order-bsd:
    - shard-iclb:         [PASS][23] -> [SKIP][24] ([fdo#112146]) +5 similar issues
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@gem_exec_schedule@in-order-bsd.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@gem_exec_schedule@in-order-bsd.html

  * igt@gem_userptr_blits@dmabuf-sync:
    - shard-snb:          [PASS][25] -> [DMESG-WARN][26] ([fdo#111870])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-snb4/igt@gem_userptr_blits@dmabuf-sync.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-snb2/igt@gem_userptr_blits@dmabuf-sync.html

  * igt@gem_userptr_blits@map-fixed-invalidate-busy-gup:
    - shard-hsw:          [PASS][27] -> [DMESG-WARN][28] ([fdo#111870])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-hsw6/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-hsw4/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html

  * igt@i915_pm_dc@dc6-psr:
    - shard-iclb:         [PASS][29] -> [FAIL][30] ([fdo#111830 ])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@i915_pm_dc@dc6-psr.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb6/igt@i915_pm_dc@dc6-psr.html

  * igt@i915_selftest@live_perf:
    - shard-hsw:          [PASS][31] -> [INCOMPLETE][32] ([fdo#103540])
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-hsw1/igt@i915_selftest@live_perf.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-hsw7/igt@i915_selftest@live_perf.html

  * igt@kms_cursor_crc@pipe-c-cursor-suspend:
    - shard-apl:          [PASS][33] -> [DMESG-WARN][34] ([fdo#108566]) +1 similar issue
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-apl2/igt@kms_cursor_crc@pipe-c-cursor-suspend.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-apl1/igt@kms_cursor_crc@pipe-c-cursor-suspend.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite:
    - shard-iclb:         [PASS][35] -> [FAIL][36] ([fdo#103167]) +3 similar issues
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-wc:
    - shard-tglb:         [PASS][37] -> [FAIL][38] ([fdo#103167]) +4 similar issues
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb5/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-wc.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb3/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-shrfb-draw-mmap-wc.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-rte:
    - shard-iclb:         [PASS][39] -> [FAIL][40] ([fdo#103167] / [fdo#110378])
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb5/igt@kms_frontbuffer_tracking@fbcpsr-1p-rte.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@kms_frontbuffer_tracking@fbcpsr-1p-rte.html

  * igt@kms_frontbuffer_tracking@fbcpsr-suspend:
    - shard-iclb:         [PASS][41] -> [INCOMPLETE][42] ([fdo#106978] / [fdo#107713])
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb5/igt@kms_frontbuffer_tracking@fbcpsr-suspend.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb3/igt@kms_frontbuffer_tracking@fbcpsr-suspend.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - shard-tglb:         [PASS][43] -> [INCOMPLETE][44] ([fdo#111832] / [fdo#111850]) +3 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb3/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb3/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  * igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
    - shard-skl:          [PASS][45] -> [FAIL][46] ([fdo#108145] / [fdo#110403])
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl10/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl2/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_render:
    - shard-iclb:         [PASS][47] -> [SKIP][48] ([fdo#109441]) +2 similar issues
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb2/igt@kms_psr@psr2_cursor_render.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb6/igt@kms_psr@psr2_cursor_render.html

  * igt@kms_vblank@pipe-a-ts-continuation-suspend:
    - shard-kbl:          [PASS][49] -> [DMESG-WARN][50] ([fdo#108566]) +3 similar issues
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html

  * igt@prime_busy@hang-bsd2:
    - shard-iclb:         [PASS][51] -> [SKIP][52] ([fdo#109276]) +17 similar issues
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@prime_busy@hang-bsd2.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb6/igt@prime_busy@hang-bsd2.html

  
#### Possible fixes ####

  * igt@gem_busy@close-race:
    - shard-tglb:         [INCOMPLETE][53] ([fdo#111747]) -> [PASS][54]
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb6/igt@gem_busy@close-race.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb5/igt@gem_busy@close-race.html

  * igt@gem_ctx_isolation@bcs0-s3:
    - shard-apl:          [DMESG-WARN][55] ([fdo#108566]) -> [PASS][56]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-apl4/igt@gem_ctx_isolation@bcs0-s3.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-apl7/igt@gem_ctx_isolation@bcs0-s3.html

  * igt@gem_ctx_persistence@vcs1-queued:
    - shard-iclb:         [SKIP][57] ([fdo#109276] / [fdo#112080]) -> [PASS][58] +3 similar issues
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@gem_ctx_persistence@vcs1-queued.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@gem_ctx_persistence@vcs1-queued.html

  * igt@gem_ctx_persistence@vecs0-mixed-process:
    - shard-skl:          [FAIL][59] ([fdo#112194]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl1/igt@gem_ctx_persistence@vecs0-mixed-process.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl9/igt@gem_ctx_persistence@vecs0-mixed-process.html

  * igt@gem_exec_reloc@basic-gtt-cpu-active:
    - shard-skl:          [DMESG-WARN][61] ([fdo#106107]) -> [PASS][62]
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl7/igt@gem_exec_reloc@basic-gtt-cpu-active.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl10/igt@gem_exec_reloc@basic-gtt-cpu-active.html

  * igt@gem_exec_reloc@basic-range:
    - shard-hsw:          [TIMEOUT][63] ([fdo#112271]) -> [PASS][64] +1 similar issue
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-hsw6/igt@gem_exec_reloc@basic-range.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-hsw6/igt@gem_exec_reloc@basic-range.html

  * igt@gem_exec_reloc@basic-range-active:
    - shard-snb:          [TIMEOUT][65] ([fdo#112271]) -> [PASS][66] +1 similar issue
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-snb6/igt@gem_exec_reloc@basic-range-active.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-snb5/igt@gem_exec_reloc@basic-range-active.html

  * igt@gem_exec_schedule@in-order-bsd2:
    - shard-iclb:         [SKIP][67] ([fdo#109276]) -> [PASS][68] +24 similar issues
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@gem_exec_schedule@in-order-bsd2.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@gem_exec_schedule@in-order-bsd2.html

  * igt@gem_exec_schedule@preempt-other-chain-bsd:
    - shard-iclb:         [SKIP][69] ([fdo#112146]) -> [PASS][70] +1 similar issue
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb2/igt@gem_exec_schedule@preempt-other-chain-bsd.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb6/igt@gem_exec_schedule@preempt-other-chain-bsd.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-snb:          [DMESG-WARN][71] ([fdo#111870]) -> [PASS][72] +1 similar issue
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-snb1/igt@gem_userptr_blits@sync-unmap-cycles.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-snb6/igt@gem_userptr_blits@sync-unmap-cycles.html
    - shard-hsw:          [DMESG-WARN][73] ([fdo#111870]) -> [PASS][74] +1 similar issue
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-hsw4/igt@gem_userptr_blits@sync-unmap-cycles.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-hsw7/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@i915_pm_rpm@system-suspend:
    - shard-hsw:          [INCOMPLETE][75] ([fdo#103540] / [fdo#107807]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-hsw2/igt@i915_pm_rpm@system-suspend.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-hsw6/igt@i915_pm_rpm@system-suspend.html

  * igt@i915_suspend@sysfs-reader:
    - shard-kbl:          [DMESG-WARN][77] ([fdo#108566]) -> [PASS][78]
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-kbl7/igt@i915_suspend@sysfs-reader.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-kbl6/igt@i915_suspend@sysfs-reader.html

  * igt@kms_big_fb@y-tiled-32bpp-rotate-0:
    - shard-skl:          [INCOMPLETE][79] ([fdo#104108] / [fdo#112347]) -> [PASS][80]
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl9/igt@kms_big_fb@y-tiled-32bpp-rotate-0.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl1/igt@kms_big_fb@y-tiled-32bpp-rotate-0.html

  * igt@kms_draw_crc@draw-method-xrgb2101010-pwrite-untiled:
    - shard-skl:          [FAIL][81] ([fdo#103184] / [fdo#103232] / [fdo#108472]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl5/igt@kms_draw_crc@draw-method-xrgb2101010-pwrite-untiled.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl10/igt@kms_draw_crc@draw-method-xrgb2101010-pwrite-untiled.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt:
    - shard-tglb:         [FAIL][83] ([fdo#103167]) -> [PASS][84] +5 similar issues
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb5/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb3/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt.html

  * igt@kms_frontbuffer_tracking@fbc-1p-rte:
    - shard-iclb:         [FAIL][85] ([fdo#103167] / [fdo#110378]) -> [PASS][86]
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-1p-rte.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-rte.html

  * igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite:
    - shard-iclb:         [FAIL][87] ([fdo#103167]) -> [PASS][88] +3 similar issues
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html

  * igt@kms_psr2_su@page_flip:
    - shard-iclb:         [SKIP][89] ([fdo#109642] / [fdo#111068]) -> [PASS][90]
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb3/igt@kms_psr2_su@page_flip.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@kms_psr2_su@page_flip.html

  * igt@kms_psr@no_drrs:
    - shard-iclb:         [FAIL][91] ([fdo#108341]) -> [PASS][92]
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@kms_psr@no_drrs.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb5/igt@kms_psr@no_drrs.html

  * igt@kms_psr@psr2_primary_blt:
    - shard-iclb:         [SKIP][93] ([fdo#109441]) -> [PASS][94]
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@kms_psr@psr2_primary_blt.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb2/igt@kms_psr@psr2_primary_blt.html

  * igt@kms_vblank@pipe-c-ts-continuation-suspend:
    - shard-tglb:         [INCOMPLETE][95] ([fdo#111832] / [fdo#111850]) -> [PASS][96] +2 similar issues
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb5/igt@kms_vblank@pipe-c-ts-continuation-suspend.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb8/igt@kms_vblank@pipe-c-ts-continuation-suspend.html

  * igt@kms_vblank@pipe-d-ts-continuation-suspend:
    - shard-tglb:         [INCOMPLETE][97] ([fdo#111850]) -> [PASS][98]
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb8/igt@kms_vblank@pipe-d-ts-continuation-suspend.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb5/igt@kms_vblank@pipe-d-ts-continuation-suspend.html

  * igt@perf@gen8-unprivileged-single-ctx-counters:
    - shard-skl:          [INCOMPLETE][99] ([fdo#111747]) -> [PASS][100]
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-skl8/igt@perf@gen8-unprivileged-single-ctx-counters.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-skl4/igt@perf@gen8-unprivileged-single-ctx-counters.html

  * igt@perf_pmu@init-busy-vcs1:
    - shard-iclb:         [SKIP][101] ([fdo#112080]) -> [PASS][102] +12 similar issues
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb6/igt@perf_pmu@init-busy-vcs1.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb4/igt@perf_pmu@init-busy-vcs1.html

  
#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv-switch:
    - shard-iclb:         [FAIL][103] ([fdo#111329]) -> [SKIP][104] ([fdo#109276] / [fdo#112080])
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-iclb1/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-iclb5/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html

  * igt@gem_exec_schedule@deep-bsd1:
    - shard-tglb:         [FAIL][105] ([fdo#111646]) -> [INCOMPLETE][106] ([fdo#111671])
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7393/shard-tglb5/igt@gem_exec_schedule@deep-bsd1.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/shard-tglb

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_15357/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2019-11-22  0:16 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-20 13:41 [PATCH] drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush Chris Wilson
2019-11-20 13:41 ` [Intel-gfx] " Chris Wilson
2019-11-20 15:58 ` Tvrtko Ursulin
2019-11-20 15:58   ` [Intel-gfx] " Tvrtko Ursulin
2019-11-20 16:02   ` Chris Wilson
2019-11-20 16:02     ` [Intel-gfx] " Chris Wilson
2019-11-20 16:14     ` Tvrtko Ursulin
2019-11-20 16:14       ` [Intel-gfx] " Tvrtko Ursulin
2019-11-20 16:28       ` Chris Wilson
2019-11-20 16:28         ` [Intel-gfx] " Chris Wilson
2019-11-20 16:42 ` [PATCH] drm/i915/gem: Reduce ggtt_flush() " Chris Wilson
2019-11-20 16:42   ` [Intel-gfx] " Chris Wilson
2019-11-20 16:49   ` Chris Wilson
2019-11-20 16:49     ` [Intel-gfx] " Chris Wilson
2019-11-20 19:05 ` [PATCH] drm/i915/gem: Reduce flush_ggtt() " Chris Wilson
2019-11-20 19:05   ` [Intel-gfx] " Chris Wilson
2019-11-20 19:20 ` [PATCH] Revert "drm/i915/gt: Wait for new requests in intel_gt_retire_requests()" Chris Wilson
2019-11-20 19:20   ` [Intel-gfx] " Chris Wilson
2019-11-21  0:37 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/gem: Reduce flush_ggtt() from a wait-for-idle to a mere barrier flush (rev3) Patchwork
2019-11-21  0:37   ` [Intel-gfx] " Patchwork
2019-11-21  0:59 ` ✓ Fi.CI.BAT: success " Patchwork
2019-11-21  0:59   ` [Intel-gfx] " Patchwork
2019-11-22  0:16 ` ✗ Fi.CI.IGT: failure " Patchwork
2019-11-22  0:16   ` [Intel-gfx] " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.