* [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request()
@ 2017-09-25 12:49 Chris Wilson
2017-09-25 12:49 ` [PATCH 2/3] drm/i915/execlists: Move request unwinding to a separate function Chris Wilson
` (6 more replies)
0 siblings, 7 replies; 15+ messages in thread
From: Chris Wilson @ 2017-09-25 12:49 UTC (permalink / raw)
To: intel-gfx
Just rearrange the code slightly to trim the number of iterations
required.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/i915/intel_lrc.c | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3623403a4f2d..2c07f3c08bd3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -571,14 +571,17 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
execlists_submit_ports(engine);
}
-static void execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
+static void
+execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(execlists->port); i++)
- i915_gem_request_put(port_request(&execlists->port[i]));
+ struct execlist_port *port = execlists->port;
+ unsigned int num_ports = ARRAY_SIZE(execlists->port);
- memset(execlists->port, 0, sizeof(execlists->port));
+ while (num_ports-- && port_isset(port)) {
+ i915_gem_request_put(port_request(port));
+ memset(port, 0, sizeof(*port));
+ port++;
+ }
}
static void execlists_cancel_requests(struct intel_engine_cs *engine)
@@ -625,7 +628,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
execlists->queue = RB_ROOT;
execlists->first = NULL;
- GEM_BUG_ON(port_isset(&execlists->port[0]));
+ GEM_BUG_ON(port_isset(execlists->port));
/*
* The port is checked prior to scheduling a tasklet, but
--
2.14.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 2/3] drm/i915/execlists: Move request unwinding to a separate function
2017-09-25 12:49 [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Chris Wilson
@ 2017-09-25 12:49 ` Chris Wilson
2017-09-25 12:55 ` Mika Kuoppala
2017-09-25 14:54 ` Mika Kuoppala
2017-09-25 12:49 ` [PATCH 3/3] drm/i915/execlists: Cache the last priolist lookup Chris Wilson
` (5 subsequent siblings)
6 siblings, 2 replies; 15+ messages in thread
From: Chris Wilson @ 2017-09-25 12:49 UTC (permalink / raw)
To: intel-gfx
In the future, we will want to unwind requests following a preemption
point. This requires the same steps as for unwinding upon a reset, so
extract the existing code to a separate function for later use.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/i915/intel_lrc.c | 53 +++++++++++++++++++++++++---------------
1 file changed, 33 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2c07f3c08bd3..c84831c7ea4a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -348,6 +348,37 @@ lookup_priolist(struct intel_engine_cs *engine,
return ptr_pack_bits(p, first, 1);
}
+static void unwind_wa_tail(struct drm_i915_gem_request *rq)
+{
+ rq->tail = intel_ring_wrap(rq->ring,
+ rq->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
+ assert_ring_tail_valid(rq->ring, rq->tail);
+}
+
+static void unwind_incomplete_requests(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *rq, *rn;
+
+ lockdep_assert_held(&engine->timeline->lock);
+ list_for_each_entry_safe_reverse(rq, rn,
+ &engine->timeline->requests,
+ link) {
+ struct i915_priolist *p;
+
+ if (i915_gem_request_completed(rq))
+ return;
+
+ __i915_gem_request_unsubmit(rq);
+ unwind_wa_tail(rq);
+
+ p = lookup_priolist(engine,
+ &rq->priotree,
+ rq->priotree.priority);
+ list_add(&rq->priotree.link,
+ &ptr_mask_bits(p, 1)->requests);
+ }
+}
+
static inline void
execlists_context_status_change(struct drm_i915_gem_request *rq,
unsigned long status)
@@ -1378,7 +1409,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct drm_i915_gem_request *rq, *rn;
struct intel_context *ce;
unsigned long flags;
@@ -1396,21 +1426,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
execlist_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
- list_for_each_entry_safe_reverse(rq, rn,
- &engine->timeline->requests, link) {
- struct i915_priolist *p;
-
- if (i915_gem_request_completed(rq))
- break;
-
- __i915_gem_request_unsubmit(rq);
-
- p = lookup_priolist(engine,
- &rq->priotree,
- rq->priotree.priority);
- list_add(&rq->priotree.link,
- &ptr_mask_bits(p, 1)->requests);
- }
+ unwind_incomplete_requests(engine);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
@@ -1447,10 +1463,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
intel_ring_update_space(request->ring);
/* Reset WaIdleLiteRestore:bdw,skl as well */
- request->tail =
- intel_ring_wrap(request->ring,
- request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
- assert_ring_tail_valid(request->ring, request->tail);
+ unwind_wa_tail(request);
}
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
--
2.14.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 3/3] drm/i915/execlists: Cache the last priolist lookup
2017-09-25 12:49 [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Chris Wilson
2017-09-25 12:49 ` [PATCH 2/3] drm/i915/execlists: Move request unwinding to a separate function Chris Wilson
@ 2017-09-25 12:49 ` Chris Wilson
2017-09-25 12:54 ` Chris Wilson
2017-09-25 13:59 ` [PATCH v2] " Chris Wilson
2017-09-25 13:00 ` [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Mika Kuoppala
` (4 subsequent siblings)
6 siblings, 2 replies; 15+ messages in thread
From: Chris Wilson @ 2017-09-25 12:49 UTC (permalink / raw)
To: intel-gfx
From: Michał Winiarski <michal.winiarski@intel.com>
Avoid the repeated rbtree lookup for each request as we unwind them by
tracking the last priolist.
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/i915/intel_lrc.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c84831c7ea4a..7802b1bd2b5c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -357,13 +357,14 @@ static void unwind_wa_tail(struct drm_i915_gem_request *rq)
static void unwind_incomplete_requests(struct intel_engine_cs *engine)
{
+ struct i915_priolist *p = &engine->execlists.default_priolist;
+ int last_prio = I915_PRIORITY_NORMAL;
struct drm_i915_gem_request *rq, *rn;
lockdep_assert_held(&engine->timeline->lock);
list_for_each_entry_safe_reverse(rq, rn,
&engine->timeline->requests,
link) {
- struct i915_priolist *p;
if (i915_gem_request_completed(rq))
return;
@@ -371,11 +372,16 @@ static void unwind_incomplete_requests(struct intel_engine_cs *engine)
__i915_gem_request_unsubmit(rq);
unwind_wa_tail(rq);
- p = lookup_priolist(engine,
- &rq->priotree,
- rq->priotree.priority);
- list_add(&rq->priotree.link,
- &ptr_mask_bits(p, 1)->requests);
+ if (rq->priotree.priority != last_prio) {
+ p = lookup_priolist(engine,
+ &rq->priotree,
+ rq->priotree.priority);
+ p = ptr_mask_bits(p, 1);
+
+ last_prio = rq->priotree.priority;
+ }
+
+ list_add(&rq->priotree.link, &p->requests);
}
}
--
2.14.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCH 3/3] drm/i915/execlists: Cache the last priolist lookup
2017-09-25 12:49 ` [PATCH 3/3] drm/i915/execlists: Cache the last priolist lookup Chris Wilson
@ 2017-09-25 12:54 ` Chris Wilson
2017-09-25 13:59 ` [PATCH v2] " Chris Wilson
1 sibling, 0 replies; 15+ messages in thread
From: Chris Wilson @ 2017-09-25 12:54 UTC (permalink / raw)
To: intel-gfx
Quoting Chris Wilson (2017-09-25 13:49:29)
> From: Michał Winiarski <michal.winiarski@intel.com>
>
> Avoid the repeated rbtree lookup for each request as we unwind them by
> tracking the last priolist.
>
> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/intel_lrc.c | 18 ++++++++++++------
> 1 file changed, 12 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index c84831c7ea4a..7802b1bd2b5c 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -357,13 +357,14 @@ static void unwind_wa_tail(struct drm_i915_gem_request *rq)
>
> static void unwind_incomplete_requests(struct intel_engine_cs *engine)
> {
> + struct i915_priolist *p = &engine->execlists.default_priolist;
> + int last_prio = I915_PRIORITY_NORMAL;
Bah, my suggestion. And it doesn't work unless default_priolist is
already in the rbtree.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 2/3] drm/i915/execlists: Move request unwinding to a separate function
2017-09-25 12:49 ` [PATCH 2/3] drm/i915/execlists: Move request unwinding to a separate function Chris Wilson
@ 2017-09-25 12:55 ` Mika Kuoppala
2017-09-25 13:18 ` Chris Wilson
2017-09-25 14:54 ` Mika Kuoppala
1 sibling, 1 reply; 15+ messages in thread
From: Mika Kuoppala @ 2017-09-25 12:55 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
Chris Wilson <chris@chris-wilson.co.uk> writes:
> In the future, we will want to unwind requests following a preemption
> point. This requires the same steps as for unwinding upon a reset, so
> extract the existing code to a separate function for later use.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/intel_lrc.c | 53 +++++++++++++++++++++++++---------------
> 1 file changed, 33 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 2c07f3c08bd3..c84831c7ea4a 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -348,6 +348,37 @@ lookup_priolist(struct intel_engine_cs *engine,
> return ptr_pack_bits(p, first, 1);
> }
>
> +static void unwind_wa_tail(struct drm_i915_gem_request *rq)
> +{
> + rq->tail = intel_ring_wrap(rq->ring,
> + rq->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
> + assert_ring_tail_valid(rq->ring, rq->tail);
> +}
> +
> +static void unwind_incomplete_requests(struct intel_engine_cs *engine)
> +{
> + struct drm_i915_gem_request *rq, *rn;
> +
> + lockdep_assert_held(&engine->timeline->lock);
> + list_for_each_entry_safe_reverse(rq, rn,
> + &engine->timeline->requests,
> + link) {
> + struct i915_priolist *p;
> +
> + if (i915_gem_request_completed(rq))
> + return;
> +
> + __i915_gem_request_unsubmit(rq);
> + unwind_wa_tail(rq);
This here raised my attention. Why do you want
to do this for all requests now?
-Mika
> +
> + p = lookup_priolist(engine,
> + &rq->priotree,
> + rq->priotree.priority);
> + list_add(&rq->priotree.link,
> + &ptr_mask_bits(p, 1)->requests);
> + }
> +}
> +
> static inline void
> execlists_context_status_change(struct drm_i915_gem_request *rq,
> unsigned long status)
> @@ -1378,7 +1409,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
> struct drm_i915_gem_request *request)
> {
> struct intel_engine_execlists * const execlists = &engine->execlists;
> - struct drm_i915_gem_request *rq, *rn;
> struct intel_context *ce;
> unsigned long flags;
>
> @@ -1396,21 +1426,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
> execlist_cancel_port_requests(execlists);
>
> /* Push back any incomplete requests for replay after the reset. */
> - list_for_each_entry_safe_reverse(rq, rn,
> - &engine->timeline->requests, link) {
> - struct i915_priolist *p;
> -
> - if (i915_gem_request_completed(rq))
> - break;
> -
> - __i915_gem_request_unsubmit(rq);
> -
> - p = lookup_priolist(engine,
> - &rq->priotree,
> - rq->priotree.priority);
> - list_add(&rq->priotree.link,
> - &ptr_mask_bits(p, 1)->requests);
> - }
> + unwind_incomplete_requests(engine);
>
> spin_unlock_irqrestore(&engine->timeline->lock, flags);
>
> @@ -1447,10 +1463,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
> intel_ring_update_space(request->ring);
>
> /* Reset WaIdleLiteRestore:bdw,skl as well */
> - request->tail =
> - intel_ring_wrap(request->ring,
> - request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
> - assert_ring_tail_valid(request->ring, request->tail);
> + unwind_wa_tail(request);
> }
>
> static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
> --
> 2.14.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request()
2017-09-25 12:49 [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Chris Wilson
2017-09-25 12:49 ` [PATCH 2/3] drm/i915/execlists: Move request unwinding to a separate function Chris Wilson
2017-09-25 12:49 ` [PATCH 3/3] drm/i915/execlists: Cache the last priolist lookup Chris Wilson
@ 2017-09-25 13:00 ` Mika Kuoppala
2017-09-25 13:08 ` Chris Wilson
2017-09-25 19:38 ` Chris Wilson
2017-09-25 13:36 ` ✗ Fi.CI.BAT: failure for series starting with [1/3] " Patchwork
` (3 subsequent siblings)
6 siblings, 2 replies; 15+ messages in thread
From: Mika Kuoppala @ 2017-09-25 13:00 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
Chris Wilson <chris@chris-wilson.co.uk> writes:
> Just rearrange the code slightly to trim the number of iterations
> required.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/intel_lrc.c | 17 ++++++++++-------
> 1 file changed, 10 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 3623403a4f2d..2c07f3c08bd3 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -571,14 +571,17 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> execlists_submit_ports(engine);
> }
>
> -static void execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
> +static void
> +execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
> {
> - unsigned int i;
> -
> - for (i = 0; i < ARRAY_SIZE(execlists->port); i++)
> - i915_gem_request_put(port_request(&execlists->port[i]));
> + struct execlist_port *port = execlists->port;
> + unsigned int num_ports = ARRAY_SIZE(execlists->port);
>
> - memset(execlists->port, 0, sizeof(execlists->port));
> + while (num_ports-- && port_isset(port)) {
> + i915_gem_request_put(port_request(port));
> + memset(port, 0, sizeof(*port));
> + port++;
> + }
I thought this to be more about the reset and thus memsetting
everything would give us a clean plate. However now it matches
more of what the function is about.
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> }
>
> static void execlists_cancel_requests(struct intel_engine_cs *engine)
> @@ -625,7 +628,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
>
> execlists->queue = RB_ROOT;
> execlists->first = NULL;
> - GEM_BUG_ON(port_isset(&execlists->port[0]));
> + GEM_BUG_ON(port_isset(execlists->port));
>
> /*
> * The port is checked prior to scheduling a tasklet, but
> --
> 2.14.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request()
2017-09-25 13:00 ` [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Mika Kuoppala
@ 2017-09-25 13:08 ` Chris Wilson
2017-09-25 19:38 ` Chris Wilson
1 sibling, 0 replies; 15+ messages in thread
From: Chris Wilson @ 2017-09-25 13:08 UTC (permalink / raw)
To: Mika Kuoppala, intel-gfx
Quoting Mika Kuoppala (2017-09-25 14:00:17)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
>
> > Just rearrange the code slightly to trim the number of iterations
> > required.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> > drivers/gpu/drm/i915/intel_lrc.c | 17 ++++++++++-------
> > 1 file changed, 10 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> > index 3623403a4f2d..2c07f3c08bd3 100644
> > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > @@ -571,14 +571,17 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> > execlists_submit_ports(engine);
> > }
> >
> > -static void execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
> > +static void
> > +execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
> > {
> > - unsigned int i;
> > -
> > - for (i = 0; i < ARRAY_SIZE(execlists->port); i++)
> > - i915_gem_request_put(port_request(&execlists->port[i]));
> > + struct execlist_port *port = execlists->port;
> > + unsigned int num_ports = ARRAY_SIZE(execlists->port);
> >
> > - memset(execlists->port, 0, sizeof(execlists->port));
> > + while (num_ports-- && port_isset(port)) {
> > + i915_gem_request_put(port_request(port));
> > + memset(port, 0, sizeof(*port));
> > + port++;
> > + }
>
> I thought this to be more about the reset and thus memsetting
> everything would give us a clean plate. However now it matches
> more of what the function is about.
Right. It started off life as being memset everything on reset, but I
want to use it more actively, so wanted to trim it to only work over the
set ports.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 2/3] drm/i915/execlists: Move request unwinding to a separate function
2017-09-25 12:55 ` Mika Kuoppala
@ 2017-09-25 13:18 ` Chris Wilson
0 siblings, 0 replies; 15+ messages in thread
From: Chris Wilson @ 2017-09-25 13:18 UTC (permalink / raw)
To: Mika Kuoppala, intel-gfx
Quoting Mika Kuoppala (2017-09-25 13:55:26)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
>
> > In the future, we will want to unwind requests following a preemption
> > point. This requires the same steps as for unwinding upon a reset, so
> > extract the existing code to a separate function for later use.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> > drivers/gpu/drm/i915/intel_lrc.c | 53 +++++++++++++++++++++++++---------------
> > 1 file changed, 33 insertions(+), 20 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> > index 2c07f3c08bd3..c84831c7ea4a 100644
> > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > @@ -348,6 +348,37 @@ lookup_priolist(struct intel_engine_cs *engine,
> > return ptr_pack_bits(p, first, 1);
> > }
> >
> > +static void unwind_wa_tail(struct drm_i915_gem_request *rq)
> > +{
> > + rq->tail = intel_ring_wrap(rq->ring,
> > + rq->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
> > + assert_ring_tail_valid(rq->ring, rq->tail);
> > +}
> > +
> > +static void unwind_incomplete_requests(struct intel_engine_cs *engine)
> > +{
> > + struct drm_i915_gem_request *rq, *rn;
> > +
> > + lockdep_assert_held(&engine->timeline->lock);
> > + list_for_each_entry_safe_reverse(rq, rn,
> > + &engine->timeline->requests,
> > + link) {
> > + struct i915_priolist *p;
> > +
> > + if (i915_gem_request_completed(rq))
> > + return;
> > +
> > + __i915_gem_request_unsubmit(rq);
> > + unwind_wa_tail(rq);
>
> This here raised my attention. Why do you want
> to do this for all requests now?
It is a safety catch for preemption tasks. The worry being that if we
unwind a rq that was previously lite-restored, then on resubmission we
may not coalesce the requests again until perhaps a later interrupt.
It should never happen in practice. It requires a couple of reorderings
to happen at just the right moment, without triggering preemption...
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
* ✗ Fi.CI.BAT: failure for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request()
2017-09-25 12:49 [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Chris Wilson
` (2 preceding siblings ...)
2017-09-25 13:00 ` [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Mika Kuoppala
@ 2017-09-25 13:36 ` Patchwork
2017-09-25 15:04 ` ✓ Fi.CI.BAT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() (rev2) Patchwork
` (2 subsequent siblings)
6 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2017-09-25 13:36 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request()
URL : https://patchwork.freedesktop.org/series/30838/
State : failure
== Summary ==
Series 30838v1 series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request()
https://patchwork.freedesktop.org/api/1.0/series/30838/revisions/1/mbox/
Test gem_exec_suspend:
Subgroup basic-s3:
pass -> DMESG-FAIL (fi-kbl-7560u)
Subgroup basic-s4-devices:
pass -> DMESG-FAIL (fi-kbl-7560u)
Test gem_flink_basic:
Subgroup bad-flink:
pass -> DMESG-WARN (fi-kbl-7560u)
Subgroup bad-open:
pass -> DMESG-WARN (fi-kbl-7560u)
Subgroup basic:
pass -> DMESG-WARN (fi-kbl-7560u)
Subgroup double-flink:
pass -> DMESG-WARN (fi-kbl-7560u)
Subgroup flink-lifetime:
pass -> DMESG-WARN (fi-kbl-7560u)
Test gem_linear_blits:
Subgroup basic:
pass -> INCOMPLETE (fi-kbl-7560u)
Test kms_frontbuffer_tracking:
Subgroup basic:
pass -> DMESG-WARN (fi-bdw-5557u) fdo#102473
Test pm_rpm:
Subgroup basic-rte:
pass -> DMESG-WARN (fi-skl-6770hq)
Test drv_module_reload:
Subgroup basic-no-display:
pass -> DMESG-WARN (fi-glk-1) fdo#102777 +1
fdo#102473 https://bugs.freedesktop.org/show_bug.cgi?id=102473
fdo#102777 https://bugs.freedesktop.org/show_bug.cgi?id=102777
fi-bdw-5557u total:289 pass:267 dwarn:1 dfail:0 fail:0 skip:21 time:443s
fi-bdw-gvtdvm total:289 pass:265 dwarn:0 dfail:0 fail:0 skip:24 time:468s
fi-blb-e6850 total:289 pass:224 dwarn:1 dfail:0 fail:0 skip:64 time:419s
fi-bsw-n3050 total:289 pass:243 dwarn:0 dfail:0 fail:0 skip:46 time:519s
fi-bwr-2160 total:289 pass:184 dwarn:0 dfail:0 fail:0 skip:105 time:276s
fi-bxt-j4205 total:289 pass:260 dwarn:0 dfail:0 fail:0 skip:29 time:501s
fi-byt-j1900 total:289 pass:254 dwarn:1 dfail:0 fail:0 skip:34 time:498s
fi-byt-n2820 total:289 pass:250 dwarn:1 dfail:0 fail:0 skip:38 time:491s
fi-cfl-s total:289 pass:223 dwarn:34 dfail:0 fail:0 skip:32 time:539s
fi-cnl-y total:289 pass:257 dwarn:0 dfail:0 fail:5 skip:27 time:648s
fi-elk-e7500 total:289 pass:230 dwarn:0 dfail:0 fail:0 skip:59 time:417s
fi-glk-1 total:289 pass:259 dwarn:1 dfail:0 fail:0 skip:29 time:567s
fi-hsw-4770 total:289 pass:263 dwarn:0 dfail:0 fail:0 skip:26 time:421s
fi-hsw-4770r total:289 pass:263 dwarn:0 dfail:0 fail:0 skip:26 time:404s
fi-ilk-650 total:289 pass:229 dwarn:0 dfail:0 fail:0 skip:60 time:432s
fi-ivb-3520m total:289 pass:261 dwarn:0 dfail:0 fail:0 skip:28 time:492s
fi-ivb-3770 total:289 pass:261 dwarn:0 dfail:0 fail:0 skip:28 time:461s
fi-kbl-7500u total:289 pass:263 dwarn:1 dfail:0 fail:1 skip:24 time:462s
fi-kbl-7560u total:125 pass:105 dwarn:5 dfail:2 fail:0 skip:12
fi-kbl-r total:289 pass:262 dwarn:0 dfail:0 fail:0 skip:27 time:585s
fi-pnv-d510 total:289 pass:223 dwarn:1 dfail:0 fail:0 skip:65 time:542s
fi-skl-6260u total:289 pass:269 dwarn:0 dfail:0 fail:0 skip:20 time:450s
fi-skl-6700k total:289 pass:265 dwarn:0 dfail:0 fail:0 skip:24 time:749s
fi-skl-6770hq total:289 pass:268 dwarn:1 dfail:0 fail:0 skip:20 time:486s
fi-skl-gvtdvm total:289 pass:266 dwarn:0 dfail:0 fail:0 skip:23 time:472s
fi-snb-2520m total:289 pass:251 dwarn:0 dfail:0 fail:0 skip:38 time:571s
fi-snb-2600 total:289 pass:250 dwarn:0 dfail:0 fail:0 skip:39 time:425s
70924899354bf209acae0e32b6836389d5ee0446 drm-tip: 2017y-09m-25d-12h-37m-13s UTC integration manifest
ea52ee2d12ec drm/i915/execlists: Cache the last priolist lookup
313ccea2cdc8 drm/i915/execlists: Move request unwinding to a separate function
8e43175c1014 drm/i915/execlists: Microoptimise execlists_cancel_port_request()
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_5804/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH v2] drm/i915/execlists: Cache the last priolist lookup
2017-09-25 12:49 ` [PATCH 3/3] drm/i915/execlists: Cache the last priolist lookup Chris Wilson
2017-09-25 12:54 ` Chris Wilson
@ 2017-09-25 13:59 ` Chris Wilson
1 sibling, 0 replies; 15+ messages in thread
From: Chris Wilson @ 2017-09-25 13:59 UTC (permalink / raw)
To: intel-gfx
From: Michał Winiarski <michal.winiarski@intel.com>
Avoid the repeated rbtree lookup for each request as we unwind them by
tracking the last priolist.
v2: Fix up my unhelpful suggestion of using default_priolist.
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/i915/intel_lrc.c | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5021c565bfd9..1db5ab131c9e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -358,25 +358,31 @@ static void unwind_wa_tail(struct drm_i915_gem_request *rq)
static void unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *rq, *rn;
+ struct i915_priolist *uninitialized_var(p);
+ int last_prio = INT_MAX;
lockdep_assert_held(&engine->timeline->lock);
list_for_each_entry_safe_reverse(rq, rn,
&engine->timeline->requests,
link) {
- struct i915_priolist *p;
-
if (i915_gem_request_completed(rq))
return;
__i915_gem_request_unsubmit(rq);
unwind_wa_tail(rq);
- p = lookup_priolist(engine,
- &rq->priotree,
- rq->priotree.priority);
- list_add(&rq->priotree.link,
- &ptr_mask_bits(p, 1)->requests);
+ GEM_BUG_ON(rq->priotree.priority == INT_MAX);
+ if (rq->priotree.priority != last_prio) {
+ p = lookup_priolist(engine,
+ &rq->priotree,
+ rq->priotree.priority);
+ p = ptr_mask_bits(p, 1);
+
+ last_prio = rq->priotree.priority;
+ }
+
+ list_add(&rq->priotree.link, &p->requests);
}
}
--
2.14.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCH 2/3] drm/i915/execlists: Move request unwinding to a separate function
2017-09-25 12:49 ` [PATCH 2/3] drm/i915/execlists: Move request unwinding to a separate function Chris Wilson
2017-09-25 12:55 ` Mika Kuoppala
@ 2017-09-25 14:54 ` Mika Kuoppala
1 sibling, 0 replies; 15+ messages in thread
From: Mika Kuoppala @ 2017-09-25 14:54 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
Chris Wilson <chris@chris-wilson.co.uk> writes:
> In the future, we will want to unwind requests following a preemption
> point. This requires the same steps as for unwinding upon a reset, so
> extract the existing code to a separate function for later use.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
> drivers/gpu/drm/i915/intel_lrc.c | 53 +++++++++++++++++++++++++---------------
> 1 file changed, 33 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 2c07f3c08bd3..c84831c7ea4a 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -348,6 +348,37 @@ lookup_priolist(struct intel_engine_cs *engine,
> return ptr_pack_bits(p, first, 1);
> }
>
> +static void unwind_wa_tail(struct drm_i915_gem_request *rq)
> +{
> + rq->tail = intel_ring_wrap(rq->ring,
> + rq->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
> + assert_ring_tail_valid(rq->ring, rq->tail);
> +}
> +
> +static void unwind_incomplete_requests(struct intel_engine_cs *engine)
> +{
> + struct drm_i915_gem_request *rq, *rn;
> +
> + lockdep_assert_held(&engine->timeline->lock);
> + list_for_each_entry_safe_reverse(rq, rn,
> + &engine->timeline->requests,
> + link) {
> + struct i915_priolist *p;
> +
> + if (i915_gem_request_completed(rq))
> + return;
> +
> + __i915_gem_request_unsubmit(rq);
> + unwind_wa_tail(rq);
> +
> + p = lookup_priolist(engine,
> + &rq->priotree,
> + rq->priotree.priority);
> + list_add(&rq->priotree.link,
> + &ptr_mask_bits(p, 1)->requests);
> + }
> +}
> +
> static inline void
> execlists_context_status_change(struct drm_i915_gem_request *rq,
> unsigned long status)
> @@ -1378,7 +1409,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
> struct drm_i915_gem_request *request)
> {
> struct intel_engine_execlists * const execlists = &engine->execlists;
> - struct drm_i915_gem_request *rq, *rn;
> struct intel_context *ce;
> unsigned long flags;
>
> @@ -1396,21 +1426,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
> execlist_cancel_port_requests(execlists);
>
> /* Push back any incomplete requests for replay after the reset. */
> - list_for_each_entry_safe_reverse(rq, rn,
> - &engine->timeline->requests, link) {
> - struct i915_priolist *p;
> -
> - if (i915_gem_request_completed(rq))
> - break;
> -
> - __i915_gem_request_unsubmit(rq);
> -
> - p = lookup_priolist(engine,
> - &rq->priotree,
> - rq->priotree.priority);
> - list_add(&rq->priotree.link,
> - &ptr_mask_bits(p, 1)->requests);
> - }
> + unwind_incomplete_requests(engine);
>
> spin_unlock_irqrestore(&engine->timeline->lock, flags);
>
> @@ -1447,10 +1463,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
> intel_ring_update_space(request->ring);
>
> /* Reset WaIdleLiteRestore:bdw,skl as well */
> - request->tail =
> - intel_ring_wrap(request->ring,
> - request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
> - assert_ring_tail_valid(request->ring, request->tail);
> + unwind_wa_tail(request);
> }
>
> static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
> --
> 2.14.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
* ✓ Fi.CI.BAT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() (rev2)
2017-09-25 12:49 [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Chris Wilson
` (3 preceding siblings ...)
2017-09-25 13:36 ` ✗ Fi.CI.BAT: failure for series starting with [1/3] " Patchwork
@ 2017-09-25 15:04 ` Patchwork
2017-09-25 18:01 ` ✓ Fi.CI.IGT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Patchwork
2017-09-25 20:34 ` ✓ Fi.CI.IGT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() (rev2) Patchwork
6 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2017-09-25 15:04 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() (rev2)
URL : https://patchwork.freedesktop.org/series/30838/
State : success
== Summary ==
Series 30838v2 series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request()
https://patchwork.freedesktop.org/api/1.0/series/30838/revisions/2/mbox/
Test chamelium:
Subgroup dp-crc-fast:
pass -> FAIL (fi-kbl-7500u) fdo#102514
Test kms_pipe_crc_basic:
Subgroup suspend-read-crc-pipe-b:
dmesg-warn -> PASS (fi-skl-6770hq)
Test drv_module_reload:
Subgroup basic-reload:
pass -> DMESG-WARN (fi-glk-1) fdo#102777
fdo#102514 https://bugs.freedesktop.org/show_bug.cgi?id=102514
fdo#102777 https://bugs.freedesktop.org/show_bug.cgi?id=102777
fi-bdw-5557u total:289 pass:268 dwarn:0 dfail:0 fail:0 skip:21 time:440s
fi-bdw-gvtdvm total:289 pass:265 dwarn:0 dfail:0 fail:0 skip:24 time:467s
fi-blb-e6850 total:289 pass:224 dwarn:1 dfail:0 fail:0 skip:64 time:421s
fi-bsw-n3050 total:289 pass:243 dwarn:0 dfail:0 fail:0 skip:46 time:518s
fi-bwr-2160 total:289 pass:184 dwarn:0 dfail:0 fail:0 skip:105 time:276s
fi-bxt-j4205 total:289 pass:260 dwarn:0 dfail:0 fail:0 skip:29 time:511s
fi-byt-j1900 total:289 pass:254 dwarn:1 dfail:0 fail:0 skip:34 time:490s
fi-byt-n2820 total:289 pass:250 dwarn:1 dfail:0 fail:0 skip:38 time:492s
fi-cfl-s total:289 pass:222 dwarn:35 dfail:0 fail:0 skip:32 time:545s
fi-cnl-y total:289 pass:256 dwarn:0 dfail:0 fail:6 skip:27 time:648s
fi-elk-e7500 total:289 pass:230 dwarn:0 dfail:0 fail:0 skip:59 time:416s
fi-glk-1 total:289 pass:259 dwarn:1 dfail:0 fail:0 skip:29 time:570s
fi-hsw-4770 total:289 pass:263 dwarn:0 dfail:0 fail:0 skip:26 time:423s
fi-hsw-4770r total:289 pass:263 dwarn:0 dfail:0 fail:0 skip:26 time:409s
fi-ilk-650 total:289 pass:229 dwarn:0 dfail:0 fail:0 skip:60 time:435s
fi-ivb-3520m total:289 pass:261 dwarn:0 dfail:0 fail:0 skip:28 time:493s
fi-ivb-3770 total:289 pass:261 dwarn:0 dfail:0 fail:0 skip:28 time:461s
fi-kbl-7500u total:289 pass:263 dwarn:1 dfail:0 fail:1 skip:24 time:465s
fi-kbl-7560u total:289 pass:270 dwarn:0 dfail:0 fail:0 skip:19 time:577s
fi-kbl-r total:289 pass:262 dwarn:0 dfail:0 fail:0 skip:27 time:588s
fi-skl-6260u total:289 pass:269 dwarn:0 dfail:0 fail:0 skip:20 time:454s
fi-skl-6770hq total:289 pass:269 dwarn:0 dfail:0 fail:0 skip:20 time:485s
fi-skl-gvtdvm total:289 pass:266 dwarn:0 dfail:0 fail:0 skip:23 time:476s
fi-snb-2520m total:289 pass:251 dwarn:0 dfail:0 fail:0 skip:38 time:562s
fi-snb-2600 total:289 pass:250 dwarn:0 dfail:0 fail:0 skip:39 time:415s
fi-pnv-d510 failed to connect after reboot
0b65077382608db179cb5afafecf4a0edd35c2fe drm-tip: 2017y-09m-25d-13h-55m-18s UTC integration manifest
c50c9fea5e0b drm/i915/execlists: Move request unwinding to a separate function
e20abe308cef drm/i915/execlists: Microoptimise execlists_cancel_port_request()
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_5806/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
* ✓ Fi.CI.IGT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request()
2017-09-25 12:49 [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Chris Wilson
` (4 preceding siblings ...)
2017-09-25 15:04 ` ✓ Fi.CI.BAT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() (rev2) Patchwork
@ 2017-09-25 18:01 ` Patchwork
2017-09-25 20:34 ` ✓ Fi.CI.IGT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() (rev2) Patchwork
6 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2017-09-25 18:01 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request()
URL : https://patchwork.freedesktop.org/series/30838/
State : success
== Summary ==
Test perf:
Subgroup polling:
fail -> PASS (shard-hsw) fdo#102252
Test kms_setmode:
Subgroup basic:
fail -> PASS (shard-hsw) fdo#99912
fdo#102252 https://bugs.freedesktop.org/show_bug.cgi?id=102252
fdo#99912 https://bugs.freedesktop.org/show_bug.cgi?id=99912
shard-hsw total:2429 pass:1325 dwarn:5 dfail:0 fail:16 skip:1083 time:9871s
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_5804/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request()
2017-09-25 13:00 ` [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Mika Kuoppala
2017-09-25 13:08 ` Chris Wilson
@ 2017-09-25 19:38 ` Chris Wilson
1 sibling, 0 replies; 15+ messages in thread
From: Chris Wilson @ 2017-09-25 19:38 UTC (permalink / raw)
To: Mika Kuoppala, intel-gfx
Quoting Mika Kuoppala (2017-09-25 14:00:17)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
>
> > Just rearrange the code slightly to trim the number of iterations
> > required.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> > drivers/gpu/drm/i915/intel_lrc.c | 17 ++++++++++-------
> > 1 file changed, 10 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> > index 3623403a4f2d..2c07f3c08bd3 100644
> > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > @@ -571,14 +571,17 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> > execlists_submit_ports(engine);
> > }
> >
> > -static void execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
> > +static void
> > +execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
> > {
> > - unsigned int i;
> > -
> > - for (i = 0; i < ARRAY_SIZE(execlists->port); i++)
> > - i915_gem_request_put(port_request(&execlists->port[i]));
> > + struct execlist_port *port = execlists->port;
> > + unsigned int num_ports = ARRAY_SIZE(execlists->port);
> >
> > - memset(execlists->port, 0, sizeof(execlists->port));
> > + while (num_ports-- && port_isset(port)) {
> > + i915_gem_request_put(port_request(port));
> > + memset(port, 0, sizeof(*port));
> > + port++;
> > + }
>
> I thought this to be more about the reset and thus memsetting
> everything would give us a clean plate. However now it matches
> more of what the function is about.
>
> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Pushed this one just to keep whittling down the number of patches.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
* ✓ Fi.CI.IGT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() (rev2)
2017-09-25 12:49 [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Chris Wilson
` (5 preceding siblings ...)
2017-09-25 18:01 ` ✓ Fi.CI.IGT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Patchwork
@ 2017-09-25 20:34 ` Patchwork
6 siblings, 0 replies; 15+ messages in thread
From: Patchwork @ 2017-09-25 20:34 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() (rev2)
URL : https://patchwork.freedesktop.org/series/30838/
State : success
== Summary ==
Test perf:
Subgroup polling:
pass -> FAIL (shard-hsw) fdo#102252
fdo#102252 https://bugs.freedesktop.org/show_bug.cgi?id=102252
shard-hsw total:2429 pass:1324 dwarn:5 dfail:0 fail:17 skip:1083 time:9851s
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_5806/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2017-09-25 20:34 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-25 12:49 [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Chris Wilson
2017-09-25 12:49 ` [PATCH 2/3] drm/i915/execlists: Move request unwinding to a separate function Chris Wilson
2017-09-25 12:55 ` Mika Kuoppala
2017-09-25 13:18 ` Chris Wilson
2017-09-25 14:54 ` Mika Kuoppala
2017-09-25 12:49 ` [PATCH 3/3] drm/i915/execlists: Cache the last priolist lookup Chris Wilson
2017-09-25 12:54 ` Chris Wilson
2017-09-25 13:59 ` [PATCH v2] " Chris Wilson
2017-09-25 13:00 ` [PATCH 1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Mika Kuoppala
2017-09-25 13:08 ` Chris Wilson
2017-09-25 19:38 ` Chris Wilson
2017-09-25 13:36 ` ✗ Fi.CI.BAT: failure for series starting with [1/3] " Patchwork
2017-09-25 15:04 ` ✓ Fi.CI.BAT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() (rev2) Patchwork
2017-09-25 18:01 ` ✓ Fi.CI.IGT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() Patchwork
2017-09-25 20:34 ` ✓ Fi.CI.IGT: success for series starting with [1/3] drm/i915/execlists: Microoptimise execlists_cancel_port_request() (rev2) Patchwork
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.