* [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors
@ 2017-10-19 14:39 Mika Kuoppala
2017-10-19 14:39 ` [PATCH 2/2] drm/i915: Move execlists port head instead of memmoving array Mika Kuoppala
` (4 more replies)
0 siblings, 5 replies; 11+ messages in thread
From: Mika Kuoppala @ 2017-10-19 14:39 UTC (permalink / raw)
To: intel-gfx
From: Mika Kuoppala <mika.kuoppala@intel.com>
Instead of trusting that first available port is at index 0,
use accessor to hide this. This is a preparation for a
following patches where head can be at arbitrary location
in the port array.
v2: improved commit message, elsp_ready readability (Chris)
v3: s/execlist_port_index/execlist_port (Chris)
v4: rebase to new naming
v5: fix port_next indexing
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
---
drivers/gpu/drm/i915/i915_gpu_error.c | 6 ++-
drivers/gpu/drm/i915/i915_guc_submission.c | 53 +++++++++++++++----------
drivers/gpu/drm/i915/intel_engine_cs.c | 2 +-
drivers/gpu/drm/i915/intel_lrc.c | 63 ++++++++++++++++++------------
drivers/gpu/drm/i915/intel_ringbuffer.h | 38 ++++++++++++++++++
5 files changed, 114 insertions(+), 48 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 653fb69e7ecb..6d0bdb03b3f0 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1333,11 +1333,13 @@ static void engine_record_requests(struct intel_engine_cs *engine,
static void error_record_engine_execlists(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
- const struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
+ struct drm_i915_gem_request *rq;
+
+ rq = port_request(execlists_port(execlists, n));
if (!rq)
break;
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index a2e8114b739d..5222004db039 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -496,17 +496,19 @@ static void i915_guc_submit(struct intel_engine_cs *engine)
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client = guc->execbuf_client;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
const unsigned int engine_id = engine->id;
unsigned int n;
for (n = 0; n < execlists_num_ports(execlists); n++) {
+ struct execlist_port *port;
struct drm_i915_gem_request *rq;
unsigned int count;
- rq = port_unpack(&port[n], &count);
+ port = execlists_port(execlists, n);
+ rq = port_unpack(port, &count);
+
if (rq && count == 0) {
- port_set(&port[n], port_pack(rq, ++count));
+ port_set(port, port_pack(rq, ++count));
if (i915_vma_is_map_and_fenceable(rq->ring->vma))
POSTING_READ_FW(GUC_STATUS);
@@ -561,15 +563,20 @@ static void port_assign(struct execlist_port *port,
static void i915_guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
+ struct execlist_port *port;
struct drm_i915_gem_request *last = NULL;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
bool submit = false;
struct rb_node *rb;
- if (port_isset(port))
- port++;
+ port = execlists_port_head(execlists);
+
+ /*
+ * We don't coalesce into last submitted port with guc.
+ * Find first free port, this is safe as we dont dequeue without
+ * atleast last port free.
+ */
+ while (port_isset(port))
+ port = execlists_port_next(execlists, port);
spin_lock_irq(&engine->timeline->lock);
rb = execlists->first;
@@ -580,7 +587,7 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
if (last && rq->ctx != last->ctx) {
- if (port == last_port) {
+ if (port == execlists_port_tail(execlists)) {
__list_del_many(&p->requests,
&rq->priotree.link);
goto done;
@@ -588,7 +595,8 @@ static void i915_guc_dequeue(struct intel_engine_cs *engine)
if (submit)
port_assign(port, last);
- port++;
+
+ port = execlists_port_next(execlists, port);
}
INIT_LIST_HEAD(&rq->priotree.link);
@@ -619,22 +627,27 @@ static void i915_guc_irq_handler(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
- struct drm_i915_gem_request *rq;
- rq = port_request(&port[0]);
- while (rq && i915_gem_request_completed(rq)) {
+ do {
+ struct execlist_port *port;
+ struct drm_i915_gem_request *rq;
+
+ port = execlists_port_head(execlists);
+ rq = port_request(port);
+
+ if (!rq)
+ break;
+
+ if (!i915_gem_request_completed(rq))
+ break;
+
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
execlists_port_complete(execlists, port);
+ } while (1);
- rq = port_request(&port[0]);
- }
-
- if (!port_isset(last_port))
+ if (!port_isset(execlists_port_tail(execlists)))
i915_guc_dequeue(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a47a9c6bea52..8ba62d4c010e 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1549,7 +1549,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return false;
/* Both ports drained, no more ELSP submission? */
- if (port_request(&engine->execlists.port[0]))
+ if (port_request(execlists_port_head(&engine->execlists)))
return false;
/* ELSP is empty, but there are ready requests? */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7f45dd7dc3e5..2945aadc4b7e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -437,24 +437,26 @@ static inline void elsp_write(u64 desc, u32 __iomem *elsp)
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
- struct execlist_port *port = engine->execlists.port;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
u32 __iomem *elsp =
engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
- for (n = execlists_num_ports(&engine->execlists); n--; ) {
+ for (n = execlists_num_ports(execlists); n--; ) {
+ struct execlist_port *port;
struct drm_i915_gem_request *rq;
unsigned int count;
u64 desc;
- rq = port_unpack(&port[n], &count);
+ port = execlists_port(execlists, n);
+ rq = port_unpack(port, &count);
if (rq) {
GEM_BUG_ON(count > !n);
if (!count++)
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- port_set(&port[n], port_pack(rq, count));
+ port_set(port, port_pack(rq, count));
desc = execlists_update_context(rq);
- GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
+ GEM_DEBUG_EXEC(port->context_id = upper_32_bits(desc));
} else {
GEM_BUG_ON(!n);
desc = 0;
@@ -523,10 +525,8 @@ static bool can_preempt(struct intel_engine_cs *engine)
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
- struct drm_i915_gem_request *last = port_request(port);
+ struct execlist_port *port;
+ struct drm_i915_gem_request *last;
struct rb_node *rb;
bool submit = false;
@@ -557,6 +557,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (!rb)
goto unlock;
+ port = execlists_port_head(execlists);
+ last = port_request(port);
+
if (last) {
/*
* Don't resubmit or switch until all outstanding
@@ -564,7 +567,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* know the next preemption status we see corresponds
* to this ELSP update.
*/
- if (port_count(&port[0]) > 1)
+ if (port_count(port) > 1)
goto unlock;
if (can_preempt(engine) &&
@@ -598,7 +601,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* the driver is unable to keep up the supply of new
* work).
*/
- if (port_count(&port[1]))
+ if (port_count(execlists_port_next(execlists, port)))
goto unlock;
/* WaIdleLiteRestore:bdw,skl
@@ -634,7 +637,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* combine this request with the last, then we
* are done.
*/
- if (port == last_port) {
+ if (port == execlists_port_tail(execlists)) {
__list_del_many(&p->requests,
&rq->priotree.link);
goto done;
@@ -658,7 +661,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (submit)
port_assign(port, last);
- port++;
+
+ port = execlists_port_next(execlists, port);
GEM_BUG_ON(port_isset(port));
}
@@ -688,19 +692,24 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
static void
-execlist_cancel_port_requests(struct intel_engine_execlists *execlists)
+execlists_cancel_port_requests(struct intel_engine_execlists *execlists)
{
- struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
- while (num_ports-- && port_isset(port)) {
- struct drm_i915_gem_request *rq = port_request(port);
+ while (num_ports--) {
+ struct execlist_port *port;
+ struct drm_i915_gem_request *rq;
+
+ port = execlists_port_head(execlists);
+ if (!port_isset(port))
+ break;
+
+ rq = port_request(port);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
i915_gem_request_put(rq);
- memset(port, 0, sizeof(*port));
- port++;
+ execlists_port_complete(execlists, port);
}
}
@@ -714,7 +723,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->timeline->lock, flags);
/* Cancel the requests on the HW and clear the ELSP tracker. */
- execlist_cancel_port_requests(execlists);
+ execlists_cancel_port_requests(execlists);
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline->requests, link) {
@@ -769,7 +778,6 @@ static void intel_lrc_irq_handler(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port * const port = execlists->port;
struct drm_i915_private *dev_priv = engine->i915;
/* We can skip acquiring intel_runtime_pm_get() here as it was taken
@@ -788,6 +796,8 @@ static void intel_lrc_irq_handler(unsigned long data)
* new request (outside of the context-switch interrupt).
*/
while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
+ struct execlist_port *port;
+
/* The HWSP contains a (cacheable) mirror of the CSB */
const u32 *buf =
&engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
@@ -855,7 +865,7 @@ static void intel_lrc_irq_handler(unsigned long data)
if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
buf[2*head + 1] == PREEMPT_ID) {
- execlist_cancel_port_requests(execlists);
+ execlists_cancel_port_requests(execlists);
spin_lock_irq(&engine->timeline->lock);
unwind_incomplete_requests(engine);
@@ -870,6 +880,8 @@ static void intel_lrc_irq_handler(unsigned long data)
execlists->preempt)
continue;
+ port = execlists_port_head(execlists);
+
/* Check the context/desc id for this event matches */
GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
@@ -890,7 +902,7 @@ static void intel_lrc_irq_handler(unsigned long data)
}
/* After the final element, the hw should be idle */
- GEM_BUG_ON(port_count(port) == 0 &&
+ GEM_BUG_ON(port_count(execlists_port_head(execlists)) == 0 &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
}
@@ -921,6 +933,7 @@ static void insert_request(struct intel_engine_cs *engine,
static void execlists_submit_request(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
+ struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
@@ -928,7 +941,7 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
insert_request(engine, &request->priotree, request->priotree.priority);
- GEM_BUG_ON(!engine->execlists.first);
+ GEM_BUG_ON(!execlists->first);
GEM_BUG_ON(list_empty(&request->priotree.link));
spin_unlock_irqrestore(&engine->timeline->lock, flags);
@@ -1520,7 +1533,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* guessing the missed context-switch events by looking at what
* requests were completed.
*/
- execlist_cancel_port_requests(execlists);
+ execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
unwind_incomplete_requests(engine);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 17186f067408..cfec73400d0f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -251,6 +251,11 @@ struct intel_engine_execlists {
unsigned int port_mask;
/**
+ * @port_head: first used execlist port
+ */
+ unsigned int port_head;
+
+ /**
* @queue: queue of requests, in priority lists
*/
struct rb_root queue;
@@ -531,6 +536,39 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
return execlists->port_mask + 1;
}
+#define __port_add(start, n, mask) (((start) + (n)) & (mask))
+#define port_head_add(e, n) __port_add((e)->port_head, n, (e)->port_mask)
+
+/* Index starting from port_head */
+static inline struct execlist_port *
+execlists_port(struct intel_engine_execlists * const execlists,
+ const unsigned int n)
+{
+ return &execlists->port[port_head_add(execlists, n)];
+}
+
+static inline struct execlist_port *
+execlists_port_head(struct intel_engine_execlists * const execlists)
+{
+ return execlists_port(execlists, 0);
+}
+
+static inline struct execlist_port *
+execlists_port_tail(struct intel_engine_execlists * const execlists)
+{
+ return execlists_port(execlists, -1);
+}
+
+static inline struct execlist_port *
+execlists_port_next(struct intel_engine_execlists * const execlists,
+ const struct execlist_port * const port)
+{
+ const unsigned int n = __port_add(port_index(port, execlists),
+ 1,
+ execlists->port_mask);
+ return &execlists->port[n];
+}
+
static inline void
execlists_port_complete(struct intel_engine_execlists * const execlists,
struct execlist_port * const port)
--
2.11.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 2/2] drm/i915: Move execlists port head instead of memmoving array
2017-10-19 14:39 [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors Mika Kuoppala
@ 2017-10-19 14:39 ` Mika Kuoppala
2017-10-19 15:54 ` Chris Wilson
2017-10-19 14:48 ` [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors Chris Wilson
` (3 subsequent siblings)
4 siblings, 1 reply; 11+ messages in thread
From: Mika Kuoppala @ 2017-10-19 14:39 UTC (permalink / raw)
To: intel-gfx
From: Mika Kuoppala <mika.kuoppala@intel.com>
As all our access to execlist ports are through head and tail
helpers, we can now move the head instead of memmoving the array.
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
---
drivers/gpu/drm/i915/intel_ringbuffer.h | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index cfec73400d0f..27f5c42ae7c8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -573,12 +573,13 @@ static inline void
execlists_port_complete(struct intel_engine_execlists * const execlists,
struct execlist_port * const port)
{
- const unsigned int m = execlists->port_mask;
+ GEM_BUG_ON(port_index(port, execlists) != execlists->port_head);
+ GEM_BUG_ON(!port_isset(port));
- GEM_BUG_ON(port_index(port, execlists) != 0);
+ port->request_count = NULL;
+ GEM_DEBUG_DECL(port->context_id = 0);
- memmove(port, port + 1, m * sizeof(struct execlist_port));
- memset(port + m, 0, sizeof(struct execlist_port));
+ execlists->port_head = port_head_add(execlists, 1);
}
static inline unsigned int
--
2.11.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH 2/2] drm/i915: Move execlists port head instead of memmoving array
2017-10-19 14:39 ` [PATCH 2/2] drm/i915: Move execlists port head instead of memmoving array Mika Kuoppala
@ 2017-10-19 15:54 ` Chris Wilson
0 siblings, 0 replies; 11+ messages in thread
From: Chris Wilson @ 2017-10-19 15:54 UTC (permalink / raw)
To: Mika Kuoppala, intel-gfx; +Cc: Mika
Quoting Mika Kuoppala (2017-10-19 15:39:42)
> From: Mika Kuoppala <mika.kuoppala@intel.com>
>
> As all our access to execlist ports are through head and tail
> helpers, we can now move the head instead of memmoving the array.
>
> Cc: Michał Winiarski <michal.winiarski@intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
> ---
> drivers/gpu/drm/i915/intel_ringbuffer.h | 9 +++++----
> 1 file changed, 5 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index cfec73400d0f..27f5c42ae7c8 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -573,12 +573,13 @@ static inline void
> execlists_port_complete(struct intel_engine_execlists * const execlists,
> struct execlist_port * const port)
> {
> - const unsigned int m = execlists->port_mask;
> + GEM_BUG_ON(port_index(port, execlists) != execlists->port_head);
> + GEM_BUG_ON(!port_isset(port));
>
> - GEM_BUG_ON(port_index(port, execlists) != 0);
> + port->request_count = NULL;
> + GEM_DEBUG_DECL(port->context_id = 0);
memset(port, 0, sizeof(*port));
In my ring, we no longer needed the memset, but the tradeoff is tracking
the last_port.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors
2017-10-19 14:39 [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors Mika Kuoppala
2017-10-19 14:39 ` [PATCH 2/2] drm/i915: Move execlists port head instead of memmoving array Mika Kuoppala
@ 2017-10-19 14:48 ` Chris Wilson
2017-10-20 12:00 ` Mika Kuoppala
2017-10-19 14:50 ` Chris Wilson
` (2 subsequent siblings)
4 siblings, 1 reply; 11+ messages in thread
From: Chris Wilson @ 2017-10-19 14:48 UTC (permalink / raw)
To: Mika Kuoppala, intel-gfx; +Cc: Mika
Quoting Mika Kuoppala (2017-10-19 15:39:41)
> while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
> + struct execlist_port *port;
> +
> /* The HWSP contains a (cacheable) mirror of the CSB */
> const u32 *buf =
> &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
> @@ -855,7 +865,7 @@ static void intel_lrc_irq_handler(unsigned long data)
>
> if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
> buf[2*head + 1] == PREEMPT_ID) {
> - execlist_cancel_port_requests(execlists);
> + execlists_cancel_port_requests(execlists);
>
> spin_lock_irq(&engine->timeline->lock);
> unwind_incomplete_requests(engine);
> @@ -870,6 +880,8 @@ static void intel_lrc_irq_handler(unsigned long data)
> execlists->preempt)
> continue;
>
> + port = execlists_port_head(execlists);
> +
> /* Check the context/desc id for this event matches */
> GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
>
> @@ -890,7 +902,7 @@ static void intel_lrc_irq_handler(unsigned long data)
> }
>
> /* After the final element, the hw should be idle */
> - GEM_BUG_ON(port_count(port) == 0 &&
> + GEM_BUG_ON(port_count(execlists_port_head(execlists)) == 0 &&
> !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
> }
Can you try reworking this such that port is kept local without having
to go back to the struct on every loop? And then compare code
generation.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors
2017-10-19 14:48 ` [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors Chris Wilson
@ 2017-10-20 12:00 ` Mika Kuoppala
0 siblings, 0 replies; 11+ messages in thread
From: Mika Kuoppala @ 2017-10-20 12:00 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
Chris Wilson <chris@chris-wilson.co.uk> writes:
> Quoting Mika Kuoppala (2017-10-19 15:39:41)
>> while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
>> + struct execlist_port *port;
>> +
>> /* The HWSP contains a (cacheable) mirror of the CSB */
>> const u32 *buf =
>> &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
>> @@ -855,7 +865,7 @@ static void intel_lrc_irq_handler(unsigned long data)
>>
>> if (status & GEN8_CTX_STATUS_ACTIVE_IDLE &&
>> buf[2*head + 1] == PREEMPT_ID) {
>> - execlist_cancel_port_requests(execlists);
>> + execlists_cancel_port_requests(execlists);
>>
>> spin_lock_irq(&engine->timeline->lock);
>> unwind_incomplete_requests(engine);
>> @@ -870,6 +880,8 @@ static void intel_lrc_irq_handler(unsigned long data)
>> execlists->preempt)
>> continue;
>>
>> + port = execlists_port_head(execlists);
>> +
>> /* Check the context/desc id for this event matches */
>> GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
>>
>> @@ -890,7 +902,7 @@ static void intel_lrc_irq_handler(unsigned long data)
>> }
>>
>> /* After the final element, the hw should be idle */
>> - GEM_BUG_ON(port_count(port) == 0 &&
>> + GEM_BUG_ON(port_count(execlists_port_head(execlists)) == 0 &&
>> !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
>> }
>
> Can you try reworking this such that port is kept local without having
> to go back to the struct on every loop? And then compare code
> generation.
New one is with the last_port introduce and set outside loop.
add/remove: 0/0 grow/shrink: 1/1 up/down: 10/-7 (3)
function old new delta
intel_lrc_irq_handler 1847 1857 +10
i915_guc_irq_handler 1327 1320 -7
Without looking at the assembler, I would go with last_port
as a net win.
-Mika
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors
2017-10-19 14:39 [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors Mika Kuoppala
2017-10-19 14:39 ` [PATCH 2/2] drm/i915: Move execlists port head instead of memmoving array Mika Kuoppala
2017-10-19 14:48 ` [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors Chris Wilson
@ 2017-10-19 14:50 ` Chris Wilson
2017-10-19 14:59 ` ✗ Fi.CI.BAT: warning for series starting with [1/2] " Patchwork
2017-10-20 10:34 ` [PATCH 1/2] " Joonas Lahtinen
4 siblings, 0 replies; 11+ messages in thread
From: Chris Wilson @ 2017-10-19 14:50 UTC (permalink / raw)
To: Mika Kuoppala, intel-gfx; +Cc: Mika
Quoting Mika Kuoppala (2017-10-19 15:39:41)
> +#define __port_add(start, n, mask) (((start) + (n)) & (mask))
> +#define port_head_add(e, n) __port_add((e)->port_head, n, (e)->port_mask)
> +
> +/* Index starting from port_head */
> +static inline struct execlist_port *
> +execlists_port(struct intel_engine_execlists * const execlists,
> + const unsigned int n)
> +{
> + return &execlists->port[port_head_add(execlists, n)];
nth_port cf nth_page?
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 11+ messages in thread
* ✗ Fi.CI.BAT: warning for series starting with [1/2] drm/i915: Introduce execlist_port_* accessors
2017-10-19 14:39 [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors Mika Kuoppala
` (2 preceding siblings ...)
2017-10-19 14:50 ` Chris Wilson
@ 2017-10-19 14:59 ` Patchwork
2017-10-20 10:34 ` [PATCH 1/2] " Joonas Lahtinen
4 siblings, 0 replies; 11+ messages in thread
From: Patchwork @ 2017-10-19 14:59 UTC (permalink / raw)
To: Mika Kuoppala; +Cc: intel-gfx
== Series Details ==
Series: series starting with [1/2] drm/i915: Introduce execlist_port_* accessors
URL : https://patchwork.freedesktop.org/series/32300/
State : warning
== Summary ==
Series 32300v1 series starting with [1/2] drm/i915: Introduce execlist_port_* accessors
https://patchwork.freedesktop.org/api/1.0/series/32300/revisions/1/mbox/
Test drv_module_reload:
Subgroup basic-reload-inject:
pass -> DMESG-WARN (fi-skl-6770hq)
fi-bdw-5557u total:289 pass:268 dwarn:0 dfail:0 fail:0 skip:21 time:442s
fi-blb-e6850 total:289 pass:223 dwarn:1 dfail:0 fail:0 skip:65 time:369s
fi-bsw-n3050 total:289 pass:243 dwarn:0 dfail:0 fail:0 skip:46 time:514s
fi-bwr-2160 total:289 pass:183 dwarn:0 dfail:0 fail:0 skip:106 time:262s
fi-bxt-dsi total:289 pass:259 dwarn:0 dfail:0 fail:0 skip:30 time:494s
fi-bxt-j4205 total:289 pass:260 dwarn:0 dfail:0 fail:0 skip:29 time:501s
fi-byt-j1900 total:289 pass:253 dwarn:1 dfail:0 fail:0 skip:35 time:488s
fi-byt-n2820 total:289 pass:249 dwarn:1 dfail:0 fail:0 skip:39 time:479s
fi-cfl-s total:289 pass:253 dwarn:4 dfail:0 fail:0 skip:32 time:554s
fi-elk-e7500 total:289 pass:229 dwarn:0 dfail:0 fail:0 skip:60 time:421s
fi-gdg-551 total:289 pass:178 dwarn:1 dfail:0 fail:1 skip:109 time:248s
fi-glk-1 total:289 pass:261 dwarn:0 dfail:0 fail:0 skip:28 time:579s
fi-hsw-4770 total:289 pass:262 dwarn:0 dfail:0 fail:0 skip:27 time:453s
fi-hsw-4770r total:289 pass:262 dwarn:0 dfail:0 fail:0 skip:27 time:427s
fi-ilk-650 total:289 pass:228 dwarn:0 dfail:0 fail:0 skip:61 time:433s
fi-ivb-3520m total:289 pass:260 dwarn:0 dfail:0 fail:0 skip:29 time:491s
fi-ivb-3770 total:289 pass:260 dwarn:0 dfail:0 fail:0 skip:29 time:462s
fi-kbl-7500u total:289 pass:264 dwarn:1 dfail:0 fail:0 skip:24 time:482s
fi-kbl-7560u total:289 pass:270 dwarn:0 dfail:0 fail:0 skip:19 time:575s
fi-kbl-7567u total:289 pass:269 dwarn:0 dfail:0 fail:0 skip:20 time:474s
fi-kbl-r total:289 pass:262 dwarn:0 dfail:0 fail:0 skip:27 time:588s
fi-pnv-d510 total:289 pass:222 dwarn:1 dfail:0 fail:0 skip:66 time:546s
fi-skl-6260u total:289 pass:269 dwarn:0 dfail:0 fail:0 skip:20 time:448s
fi-skl-6700hq total:289 pass:263 dwarn:0 dfail:0 fail:0 skip:26 time:645s
fi-skl-6700k total:289 pass:265 dwarn:0 dfail:0 fail:0 skip:24 time:516s
fi-skl-6770hq total:289 pass:268 dwarn:1 dfail:0 fail:0 skip:20 time:506s
fi-skl-gvtdvm total:289 pass:266 dwarn:0 dfail:0 fail:0 skip:23 time:460s
fi-snb-2520m total:289 pass:250 dwarn:0 dfail:0 fail:0 skip:39 time:556s
fi-snb-2600 total:289 pass:249 dwarn:0 dfail:0 fail:0 skip:40 time:420s
fi-bdw-gvtdvm failed to connect after reboot
93f001963c9915c52b5ad84500075d231e008ced drm-tip: 2017y-10m-19d-10h-52m-17s UTC integration manifest
7fb186a28a20 drm/i915: Move execlists port head instead of memmoving array
6b361fdad897 drm/i915: Introduce execlist_port_* accessors
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_6104/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors
2017-10-19 14:39 [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors Mika Kuoppala
` (3 preceding siblings ...)
2017-10-19 14:59 ` ✗ Fi.CI.BAT: warning for series starting with [1/2] " Patchwork
@ 2017-10-20 10:34 ` Joonas Lahtinen
2017-10-20 11:12 ` Mika Kuoppala
4 siblings, 1 reply; 11+ messages in thread
From: Joonas Lahtinen @ 2017-10-20 10:34 UTC (permalink / raw)
To: Mika Kuoppala, intel-gfx
On Thu, 2017-10-19 at 17:39 +0300, Mika Kuoppala wrote:
> From: Mika Kuoppala <mika.kuoppala@intel.com>
>
> Instead of trusting that first available port is at index 0,
> use accessor to hide this. This is a preparation for a
> following patches where head can be at arbitrary location
> in the port array.
>
> v2: improved commit message, elsp_ready readability (Chris)
> v3: s/execlist_port_index/execlist_port (Chris)
> v4: rebase to new naming
> v5: fix port_next indexing
>
> Cc: Michał Winiarski <michal.winiarski@intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
<SNIP>
> @@ -561,15 +563,20 @@ static void port_assign(struct execlist_port *port,
> static void i915_guc_dequeue(struct intel_engine_cs *engine)
> {
> struct intel_engine_execlists * const execlists = &engine->execlists;
> - struct execlist_port *port = execlists->port;
> + struct execlist_port *port;
> struct drm_i915_gem_request *last = NULL;
> - const struct execlist_port * const last_port =
> - &execlists->port[execlists->port_mask];
> bool submit = false;
> struct rb_node *rb;
>
> - if (port_isset(port))
> - port++;
> + port = execlists_port_head(execlists);
> +
> + /*
> + * We don't coalesce into last submitted port with guc.
> + * Find first free port, this is safe as we dont dequeue without
> + * atleast last port free.
"at least" + "the"
<SNIP>
> @@ -557,6 +557,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> if (!rb)
> goto unlock;
>
> + port = execlists_port_head(execlists);
> + last = port_request(port);
> +
> if (last) {
> /*
> * Don't resubmit or switch until all outstanding
> @@ -564,7 +567,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> * know the next preemption status we see corresponds
> * to this ELSP update.
> */
> - if (port_count(&port[0]) > 1)
> + if (port_count(port) > 1)
> goto unlock;
>
> if (can_preempt(engine) &&
> @@ -598,7 +601,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> * the driver is unable to keep up the supply of new
> * work).
> */
> - if (port_count(&port[1]))
> + if (port_count(execlists_port_next(execlists, port)))
> goto unlock;
>
> /* WaIdleLiteRestore:bdw,skl
> @@ -634,7 +637,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> * combine this request with the last, then we
> * are done.
> */
> - if (port == last_port) {
> + if (port == execlists_port_tail(execlists)) {
> __list_del_many(&p->requests,
> &rq->priotree.link);
Nothing to fix related to this patch, but I was sure next hunk was
going to escape my screen :) Maybe we need to cut the indents a bit.
> @@ -890,7 +902,7 @@ static void intel_lrc_irq_handler(unsigned long data)
> }
>
> /* After the final element, the hw should be idle */
> - GEM_BUG_ON(port_count(port) == 0 &&
> + GEM_BUG_ON(port_count(execlists_port_head(execlists)) == 0 &&
> !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
Why did you stop trusting port variable here?
Other than that, looks good to me.
Regards, Joonas
--
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors
2017-10-20 10:34 ` [PATCH 1/2] " Joonas Lahtinen
@ 2017-10-20 11:12 ` Mika Kuoppala
2017-10-20 11:26 ` Chris Wilson
2017-10-20 12:53 ` Mika Kuoppala
0 siblings, 2 replies; 11+ messages in thread
From: Mika Kuoppala @ 2017-10-20 11:12 UTC (permalink / raw)
To: Joonas Lahtinen, intel-gfx
Joonas Lahtinen <joonas.lahtinen@linux.intel.com> writes:
> On Thu, 2017-10-19 at 17:39 +0300, Mika Kuoppala wrote:
>> From: Mika Kuoppala <mika.kuoppala@intel.com>
>>
>> Instead of trusting that first available port is at index 0,
>> use accessor to hide this. This is a preparation for a
>> following patches where head can be at arbitrary location
>> in the port array.
>>
>> v2: improved commit message, elsp_ready readability (Chris)
>> v3: s/execlist_port_index/execlist_port (Chris)
>> v4: rebase to new naming
>> v5: fix port_next indexing
>>
>> Cc: Michał Winiarski <michal.winiarski@intel.com>
>> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
>> Cc: Chris Wilson <chris@chris-wilson.co.uk>
>> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
>
> <SNIP>
>
>> @@ -561,15 +563,20 @@ static void port_assign(struct execlist_port *port,
>> static void i915_guc_dequeue(struct intel_engine_cs *engine)
>> {
>> struct intel_engine_execlists * const execlists = &engine->execlists;
>> - struct execlist_port *port = execlists->port;
>> + struct execlist_port *port;
>> struct drm_i915_gem_request *last = NULL;
>> - const struct execlist_port * const last_port =
>> - &execlists->port[execlists->port_mask];
>> bool submit = false;
>> struct rb_node *rb;
>>
>> - if (port_isset(port))
>> - port++;
>> + port = execlists_port_head(execlists);
>> +
>> + /*
>> + * We don't coalesce into last submitted port with guc.
>> + * Find first free port, this is safe as we dont dequeue without
>> + * atleast last port free.
>
> "at least" + "the"
>
> <SNIP>
>
>> @@ -557,6 +557,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>> if (!rb)
>> goto unlock;
>>
>> + port = execlists_port_head(execlists);
>> + last = port_request(port);
>> +
>> if (last) {
>> /*
>> * Don't resubmit or switch until all outstanding
>> @@ -564,7 +567,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>> * know the next preemption status we see corresponds
>> * to this ELSP update.
>> */
>> - if (port_count(&port[0]) > 1)
>> + if (port_count(port) > 1)
>> goto unlock;
>>
>> if (can_preempt(engine) &&
>> @@ -598,7 +601,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>> * the driver is unable to keep up the supply of new
>> * work).
>> */
>> - if (port_count(&port[1]))
>> + if (port_count(execlists_port_next(execlists, port)))
>> goto unlock;
>>
>> /* WaIdleLiteRestore:bdw,skl
>> @@ -634,7 +637,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>> * combine this request with the last, then we
>> * are done.
>> */
>> - if (port == last_port) {
>> + if (port == execlists_port_tail(execlists)) {
>> __list_del_many(&p->requests,
>> &rq->priotree.link);
>
> Nothing to fix related to this patch, but I was sure next hunk was
> going to escape my screen :) Maybe we need to cut the indents a bit.
>
I have noticed the same. But I didn't feel like attacking this loop
until everything is in place and working.
>> @@ -890,7 +902,7 @@ static void intel_lrc_irq_handler(unsigned long data)
>> }
>>
>> /* After the final element, the hw should be idle */
>> - GEM_BUG_ON(port_count(port) == 0 &&
>> + GEM_BUG_ON(port_count(execlists_port_head(execlists)) == 0 &&
>> !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
>
> Why did you stop trusting port variable here?
>
We did assing it pre loop before. Now we do it inside the loop. Also
I thought I made a favour for reader (and for the bug triager
as GEM_BUG_ON might soon show condition in dmesg)
to note that it is always the first port count we assert
for idleness.
-Mika
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors
2017-10-20 11:12 ` Mika Kuoppala
@ 2017-10-20 11:26 ` Chris Wilson
2017-10-20 12:53 ` Mika Kuoppala
1 sibling, 0 replies; 11+ messages in thread
From: Chris Wilson @ 2017-10-20 11:26 UTC (permalink / raw)
To: Mika Kuoppala, Joonas Lahtinen, intel-gfx
Quoting Mika Kuoppala (2017-10-20 12:12:02)
> Joonas Lahtinen <joonas.lahtinen@linux.intel.com> writes:
> >> @@ -890,7 +902,7 @@ static void intel_lrc_irq_handler(unsigned long data)
> >> }
> >>
> >> /* After the final element, the hw should be idle */
> >> - GEM_BUG_ON(port_count(port) == 0 &&
> >> + GEM_BUG_ON(port_count(execlists_port_head(execlists)) == 0 &&
> >> !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
> >
> > Why did you stop trusting port variable here?
> >
>
> We did assing it pre loop before. Now we do it inside the loop. Also
> I thought I made a favour for reader (and for the bug triager
> as GEM_BUG_ON might soon show condition in dmesg)
> to note that it is always the first port count we assert
> for idleness.
I would favour assert(port == el_port_head(el)); where appropriate.
The intent is that we comment upon the rationale of the GEM_BUG_ON() so
that we have an aide-memoire as to where to begin the hunt. For
triaging, we just need to be able to recognise the same BUG_ON as you
only need to dupe to the first one to catch up with the bug analysis.
We should err on making GEM_BUG_ON() fit the code better than a
potential bug report. We should be reading the code far more often...
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors
2017-10-20 11:12 ` Mika Kuoppala
2017-10-20 11:26 ` Chris Wilson
@ 2017-10-20 12:53 ` Mika Kuoppala
1 sibling, 0 replies; 11+ messages in thread
From: Mika Kuoppala @ 2017-10-20 12:53 UTC (permalink / raw)
To: Joonas Lahtinen, intel-gfx
Mika Kuoppala <mika.kuoppala@linux.intel.com> writes:
> Joonas Lahtinen <joonas.lahtinen@linux.intel.com> writes:
>
>> On Thu, 2017-10-19 at 17:39 +0300, Mika Kuoppala wrote:
>>> From: Mika Kuoppala <mika.kuoppala@intel.com>
>>>
>>> Instead of trusting that first available port is at index 0,
>>> use accessor to hide this. This is a preparation for a
>>> following patches where head can be at arbitrary location
>>> in the port array.
>>>
>>> v2: improved commit message, elsp_ready readability (Chris)
>>> v3: s/execlist_port_index/execlist_port (Chris)
>>> v4: rebase to new naming
>>> v5: fix port_next indexing
>>>
>>> Cc: Michał Winiarski <michal.winiarski@intel.com>
>>> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
>>> Cc: Chris Wilson <chris@chris-wilson.co.uk>
>>> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
>>
>> <SNIP>
>>
>>> @@ -561,15 +563,20 @@ static void port_assign(struct execlist_port *port,
>>> static void i915_guc_dequeue(struct intel_engine_cs *engine)
>>> {
>>> struct intel_engine_execlists * const execlists = &engine->execlists;
>>> - struct execlist_port *port = execlists->port;
>>> + struct execlist_port *port;
>>> struct drm_i915_gem_request *last = NULL;
>>> - const struct execlist_port * const last_port =
>>> - &execlists->port[execlists->port_mask];
>>> bool submit = false;
>>> struct rb_node *rb;
>>>
>>> - if (port_isset(port))
>>> - port++;
>>> + port = execlists_port_head(execlists);
>>> +
>>> + /*
>>> + * We don't coalesce into last submitted port with guc.
>>> + * Find first free port, this is safe as we dont dequeue without
>>> + * atleast last port free.
>>
>> "at least" + "the"
>>
>> <SNIP>
>>
>>> @@ -557,6 +557,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>>> if (!rb)
>>> goto unlock;
>>>
>>> + port = execlists_port_head(execlists);
>>> + last = port_request(port);
>>> +
>>> if (last) {
>>> /*
>>> * Don't resubmit or switch until all outstanding
>>> @@ -564,7 +567,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>>> * know the next preemption status we see corresponds
>>> * to this ELSP update.
>>> */
>>> - if (port_count(&port[0]) > 1)
>>> + if (port_count(port) > 1)
>>> goto unlock;
>>>
>>> if (can_preempt(engine) &&
>>> @@ -598,7 +601,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>>> * the driver is unable to keep up the supply of new
>>> * work).
>>> */
>>> - if (port_count(&port[1]))
>>> + if (port_count(execlists_port_next(execlists, port)))
>>> goto unlock;
>>>
>>> /* WaIdleLiteRestore:bdw,skl
>>> @@ -634,7 +637,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>>> * combine this request with the last, then we
>>> * are done.
>>> */
>>> - if (port == last_port) {
>>> + if (port == execlists_port_tail(execlists)) {
>>> __list_del_many(&p->requests,
>>> &rq->priotree.link);
>>
>> Nothing to fix related to this patch, but I was sure next hunk was
>> going to escape my screen :) Maybe we need to cut the indents a bit.
>>
>
> I have noticed the same. But I didn't feel like attacking this loop
> until everything is in place and working.
>
>>> @@ -890,7 +902,7 @@ static void intel_lrc_irq_handler(unsigned long data)
>>> }
>>>
>>> /* After the final element, the hw should be idle */
>>> - GEM_BUG_ON(port_count(port) == 0 &&
>>> + GEM_BUG_ON(port_count(execlists_port_head(execlists)) == 0 &&
>>> !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
>>
>> Why did you stop trusting port variable here?
>>
>
> We did assing it pre loop before. Now we do it inside the loop. Also
> I thought I made a favour for reader (and for the bug triager
> as GEM_BUG_ON might soon show condition in dmesg)
> to note that it is always the first port count we assert
> for idleness.
My apologies. Now rereading this it is indeed that last port
count we need to check for hw idleness.
-Mika
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2017-10-20 12:54 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-10-19 14:39 [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors Mika Kuoppala
2017-10-19 14:39 ` [PATCH 2/2] drm/i915: Move execlists port head instead of memmoving array Mika Kuoppala
2017-10-19 15:54 ` Chris Wilson
2017-10-19 14:48 ` [PATCH 1/2] drm/i915: Introduce execlist_port_* accessors Chris Wilson
2017-10-20 12:00 ` Mika Kuoppala
2017-10-19 14:50 ` Chris Wilson
2017-10-19 14:59 ` ✗ Fi.CI.BAT: warning for series starting with [1/2] " Patchwork
2017-10-20 10:34 ` [PATCH 1/2] " Joonas Lahtinen
2017-10-20 11:12 ` Mika Kuoppala
2017-10-20 11:26 ` Chris Wilson
2017-10-20 12:53 ` Mika Kuoppala
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.