* [PATCH] drm/i915/guc: Decouple GuC engine id from ring id
@ 2016-01-22 23:06 yu.dai
2016-01-23 8:14 ` ✓ Fi.CI.BAT: success for " Patchwork
` (4 more replies)
0 siblings, 5 replies; 9+ messages in thread
From: yu.dai @ 2016-01-22 23:06 UTC (permalink / raw)
To: intel-gfx; +Cc: daniel.vetter
From: Alex Dai <yu.dai@intel.com>
Previously GuC uses ring id as engine id because of same definition.
But this is not true since this commit:
commit de1add360522c876c25ef2bbbbab1c94bdb509ab
Author: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Date: Fri Jan 15 15:12:50 2016 +0000
drm/i915: Decouple execbuf uAPI from internal implementation
Added GuC engine id into GuC interface to decouple it from ring id used
by driver.
Signed-off-by: Alex Dai <yu.dai@intel.com>
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index c5db235..9a4e01e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2446,7 +2446,6 @@ static void i915_guc_client_info(struct seq_file *m,
struct drm_i915_private *dev_priv,
struct i915_guc_client *client)
{
- struct intel_engine_cs *ring;
uint64_t tot = 0;
uint32_t i;
@@ -2461,10 +2460,9 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
seq_printf(m, "\tLast submission result: %d\n", client->retcode);
- for_each_ring(ring, dev_priv, i) {
- seq_printf(m, "\tSubmissions: %llu %s\n",
- client->submissions[i],
- ring->name);
+ for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
+ seq_printf(m, "\tSubmissions: %llu, engine %d\n",
+ client->submissions[i], i);
tot += client->submissions[i];
}
seq_printf(m, "\tTotal: %llu\n", tot);
@@ -2477,7 +2475,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_guc guc;
struct i915_guc_client client = {};
- struct intel_engine_cs *ring;
enum intel_ring_id i;
u64 total = 0;
@@ -2501,9 +2498,9 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
seq_printf(m, "\nGuC submissions:\n");
- for_each_ring(ring, dev_priv, i) {
- seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
- ring->name, guc.submissions[i],
+ for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
+ seq_printf(m, "\tengine %d: %10llu, last seqno 0x%08x %9d\n",
+ i, guc.submissions[i],
guc.last_seqno[i], guc.last_seqno[i]);
total += guc.submissions[i];
}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 51ae5c1..601e2c8 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -365,6 +365,14 @@ static void guc_init_proc_desc(struct intel_guc *guc,
kunmap_atomic(base);
}
+static const enum intel_ring_id guc_engine_map[GUC_MAX_ENGINES_NUM] = {
+ [GUC_RENDER_ENGINE] = RCS,
+ [GUC_VIDEO_ENGINE] = VCS,
+ [GUC_BLITTER_ENGINE] = BCS,
+ [GUC_VIDEOENHANCE_ENGINE] = VECS,
+ [GUC_VIDEO_ENGINE2] = VCS2
+};
+
/*
* Initialise/clear the context descriptor shared with the GuC firmware.
*
@@ -388,9 +396,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
struct guc_execlist_context *lrc = &desc.lrc[i];
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
+ enum intel_ring_id ring_id = guc_engine_map[i];
+ struct intel_ringbuffer *ringbuf = ctx->engine[ring_id].ringbuf;
struct intel_engine_cs *ring;
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
@@ -402,7 +411,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- obj = ctx->engine[i].state;
+ obj = ctx->engine[ring_id].state;
if (!obj)
break; /* XXX: continue? */
@@ -414,7 +423,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
- (ring->id << GUC_ELC_ENGINE_OFFSET);
+ (i << GUC_ELC_ENGINE_OFFSET);
obj = ringbuf->obj;
@@ -423,7 +432,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
- desc.engines_used |= (1 << ring->id);
+ desc.engines_used |= (1 << i);
}
WARN_ON(desc.engines_used == 0);
@@ -510,7 +519,6 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
static int guc_add_workqueue_item(struct i915_guc_client *gc,
struct drm_i915_gem_request *rq)
{
- enum intel_ring_id ring_id = rq->ring->id;
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off, space;
@@ -544,7 +552,7 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
wqi->header = WQ_TYPE_INORDER |
(wq_len << WQ_LEN_SHIFT) |
- (ring_id << WQ_TARGET_SHIFT) |
+ (rq->ring->guc_engine_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
/* The GuC wants only the low-order word of the context descriptor */
@@ -594,7 +602,7 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
struct intel_guc *guc = client->guc;
- enum intel_ring_id ring_id = rq->ring->id;
+ unsigned int engine_id = rq->ring->guc_engine_id;
int q_ret, b_ret;
/* Need this because of the deferred pin ctx and ring */
@@ -605,7 +613,7 @@ int i915_guc_submit(struct i915_guc_client *client,
if (q_ret == 0)
b_ret = guc_ring_doorbell(client);
- client->submissions[ring_id] += 1;
+ client->submissions[engine_id] += 1;
if (q_ret) {
client->q_fail += 1;
client->retcode = q_ret;
@@ -615,8 +623,8 @@ int i915_guc_submit(struct i915_guc_client *client,
} else {
client->retcode = 0;
}
- guc->submissions[ring_id] += 1;
- guc->last_seqno[ring_id] = rq->seqno;
+ guc->submissions[engine_id] += 1;
+ guc->last_seqno[engine_id] = rq->seqno;
return q_ret;
}
@@ -848,7 +856,7 @@ static void init_guc_policies(struct guc_policies *policies)
policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
policy = &policies->policy[p][i];
policy->execution_quantum = 1000000;
@@ -900,7 +908,8 @@ static void guc_create_ads(struct intel_guc *guc)
ads->golden_context_lrca = ring->status_page.gfx_addr;
for_each_ring(ring, dev_priv, i)
- ads->eng_state_size[i] = intel_lr_context_size(ring);
+ ads->eng_state_size[ring->guc_engine_id] =
+ intel_lr_context_size(ring);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
@@ -912,12 +921,12 @@ static void guc_create_ads(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
- for (i = 0; i < I915_NUM_RINGS; i++) {
- reg_state->mmio_white_list[i].mmio_start =
+ for_each_ring(ring, dev_priv, i) {
+ reg_state->mmio_white_list[ring->guc_engine_id].mmio_start =
dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
- reg_state->mmio_white_list[i].count = 0;
+ reg_state->mmio_white_list[ring->guc_engine_id].count = 0;
}
ads->reg_state_addr = ads->scheduler_policies +
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 045b149..73002e9 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -46,7 +46,7 @@ struct i915_guc_client {
uint32_t wq_head;
/* GuC submission statistics & status */
- uint64_t submissions[I915_NUM_RINGS];
+ uint64_t submissions[GUC_MAX_ENGINES_NUM];
uint32_t q_fail;
uint32_t b_fail;
int retcode;
@@ -106,8 +106,8 @@ struct intel_guc {
uint32_t action_fail; /* Total number of failures */
int32_t action_err; /* Last error code */
- uint64_t submissions[I915_NUM_RINGS];
- uint32_t last_seqno[I915_NUM_RINGS];
+ uint64_t submissions[GUC_MAX_ENGINES_NUM];
+ uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
};
/* intel_guc_loader.c */
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 1856a47..2de57ff 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -44,6 +44,13 @@
#define GUC_MAX_GPU_CONTEXTS 1024
#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
+#define GUC_RENDER_ENGINE 0
+#define GUC_VIDEO_ENGINE 1
+#define GUC_BLITTER_ENGINE 2
+#define GUC_VIDEOENHANCE_ENGINE 3
+#define GUC_VIDEO_ENGINE2 4
+#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+
/* Work queue item header definitions */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
@@ -285,7 +292,7 @@ struct guc_context_desc {
u64 db_trigger_phy;
u16 db_id;
- struct guc_execlist_context lrc[I915_NUM_RINGS];
+ struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
u8 attribute;
@@ -344,7 +351,7 @@ struct guc_policy {
} __packed;
struct guc_policies {
- struct guc_policy policy[GUC_CTX_PRIORITY_NUM][I915_NUM_RINGS];
+ struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
/* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
@@ -388,14 +395,14 @@ struct guc_mmio_regset {
struct guc_mmio_reg_state {
struct guc_mmio_regset global_reg;
- struct guc_mmio_regset engine_reg[I915_NUM_RINGS];
+ struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
/* MMIO registers that are set as non privileged */
struct __packed {
u32 mmio_start;
u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
u32 count;
- } mmio_white_list[I915_NUM_RINGS];
+ } mmio_white_list[GUC_MAX_ENGINES_NUM];
} __packed;
/* GuC Additional Data Struct */
@@ -406,7 +413,7 @@ struct guc_ads {
u32 golden_context_lrca;
u32 scheduler_policies;
u32 reserved0[3];
- u32 eng_state_size[I915_NUM_RINGS];
+ u32 eng_state_size[GUC_MAX_ENGINES_NUM];
u32 reserved2[4];
} __packed;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 73d4347..65a19e8 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2086,6 +2086,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->name = "render ring";
ring->id = RCS;
ring->exec_id = I915_EXEC_RENDER;
+ ring->guc_engine_id = GUC_RENDER_ENGINE;
ring->mmio_base = RENDER_RING_BASE;
logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
@@ -2137,6 +2138,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
ring->name = "bsd ring";
ring->id = VCS;
ring->exec_id = I915_EXEC_BSD;
+ ring->guc_engine_id = GUC_VIDEO_ENGINE;
ring->mmio_base = GEN6_BSD_RING_BASE;
logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
@@ -2153,6 +2155,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
ring->name = "bsd2 ring";
ring->id = VCS2;
ring->exec_id = I915_EXEC_BSD;
+ ring->guc_engine_id = GUC_VIDEO_ENGINE2;
ring->mmio_base = GEN8_BSD2_RING_BASE;
logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
@@ -2169,6 +2172,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
ring->name = "blitter ring";
ring->id = BCS;
ring->exec_id = I915_EXEC_BLT;
+ ring->guc_engine_id = GUC_BLITTER_ENGINE;
ring->mmio_base = BLT_RING_BASE;
logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
@@ -2185,6 +2189,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
ring->name = "video enhancement ring";
ring->id = VECS;
ring->exec_id = I915_EXEC_VEBOX;
+ ring->guc_engine_id = GUC_VIDEOENHANCE_ENGINE;
ring->mmio_base = VEBOX_RING_BASE;
logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b12f2aa..b69eadb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -158,6 +158,7 @@ struct intel_engine_cs {
#define I915_NUM_RINGS 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
+ unsigned int guc_engine_id;
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
--
2.5.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 9+ messages in thread
* ✓ Fi.CI.BAT: success for drm/i915/guc: Decouple GuC engine id from ring id
2016-01-22 23:06 [PATCH] drm/i915/guc: Decouple GuC engine id from ring id yu.dai
@ 2016-01-23 8:14 ` Patchwork
2016-01-23 18:25 ` [PATCH] " Chris Wilson
` (3 subsequent siblings)
4 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2016-01-23 8:14 UTC (permalink / raw)
To: yu.dai; +Cc: intel-gfx
== Summary ==
Built on 8fe9e785ae04fa7c37f7935cff12d62e38054b60 drm-intel-nightly: 2016y-01m-21d-11h-02m-42s UTC integration manifest
bdw-nuci7 total:140 pass:131 dwarn:0 dfail:0 fail:0 skip:9
bdw-ultra total:143 pass:137 dwarn:0 dfail:0 fail:0 skip:6
bsw-nuc-2 total:143 pass:119 dwarn:0 dfail:0 fail:0 skip:24
byt-nuc total:143 pass:128 dwarn:0 dfail:0 fail:0 skip:15
hsw-brixbox total:143 pass:136 dwarn:0 dfail:0 fail:0 skip:7
hsw-gt2 total:143 pass:139 dwarn:0 dfail:0 fail:0 skip:4
ilk-hp8440p total:143 pass:104 dwarn:1 dfail:0 fail:0 skip:38
ivb-t430s total:143 pass:137 dwarn:0 dfail:0 fail:0 skip:6
skl-i5k-2 total:143 pass:134 dwarn:1 dfail:0 fail:0 skip:8
snb-dellxps total:143 pass:129 dwarn:0 dfail:0 fail:0 skip:14
snb-x220t total:143 pass:129 dwarn:0 dfail:0 fail:1 skip:13
Results at /archive/results/CI_IGT_test/Patchwork_1255/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] drm/i915/guc: Decouple GuC engine id from ring id
2016-01-22 23:06 [PATCH] drm/i915/guc: Decouple GuC engine id from ring id yu.dai
2016-01-23 8:14 ` ✓ Fi.CI.BAT: success for " Patchwork
@ 2016-01-23 18:25 ` Chris Wilson
2016-01-23 18:51 ` Yu Dai
2016-01-23 19:58 ` [PATCH v2] " yu.dai
` (2 subsequent siblings)
4 siblings, 1 reply; 9+ messages in thread
From: Chris Wilson @ 2016-01-23 18:25 UTC (permalink / raw)
To: yu.dai; +Cc: daniel.vetter, intel-gfx
On Fri, Jan 22, 2016 at 03:06:28PM -0800, yu.dai@intel.com wrote:
> From: Alex Dai <yu.dai@intel.com>
>
> Previously GuC uses ring id as engine id because of same definition.
> But this is not true since this commit:
>
> commit de1add360522c876c25ef2bbbbab1c94bdb509ab
> Author: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Date: Fri Jan 15 15:12:50 2016 +0000
>
> drm/i915: Decouple execbuf uAPI from internal implementation
>
> Added GuC engine id into GuC interface to decouple it from ring id used
> by driver.
>
> Signed-off-by: Alex Dai <yu.dai@intel.com>
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index c5db235..9a4e01e 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -2446,7 +2446,6 @@ static void i915_guc_client_info(struct seq_file *m,
> struct drm_i915_private *dev_priv,
> struct i915_guc_client *client)
> {
> - struct intel_engine_cs *ring;
> uint64_t tot = 0;
> uint32_t i;
>
> @@ -2461,10 +2460,9 @@ static void i915_guc_client_info(struct seq_file *m,
> seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
> seq_printf(m, "\tLast submission result: %d\n", client->retcode);
>
> - for_each_ring(ring, dev_priv, i) {
> - seq_printf(m, "\tSubmissions: %llu %s\n",
> - client->submissions[i],
> - ring->name);
> + for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
> + seq_printf(m, "\tSubmissions: %llu, engine %d\n",
> + client->submissions[i], i);
> tot += client->submissions[i];
> }
> seq_printf(m, "\tTotal: %llu\n", tot);
> @@ -2477,7 +2475,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
> struct drm_i915_private *dev_priv = dev->dev_private;
> struct intel_guc guc;
> struct i915_guc_client client = {};
> - struct intel_engine_cs *ring;
> enum intel_ring_id i;
> u64 total = 0;
>
> @@ -2501,9 +2498,9 @@ static int i915_guc_info(struct seq_file *m, void *data)
> seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
>
> seq_printf(m, "\nGuC submissions:\n");
> - for_each_ring(ring, dev_priv, i) {
> - seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
> - ring->name, guc.submissions[i],
> + for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
> + seq_printf(m, "\tengine %d: %10llu, last seqno 0x%08x %9d\n",
> + i, guc.submissions[i],
> guc.last_seqno[i], guc.last_seqno[i]);
> total += guc.submissions[i];
For debugfs, would it not be more convenient to use the ring->name and
show the corresponding guc engine id?
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 51ae5c1..601e2c8 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -365,6 +365,14 @@ static void guc_init_proc_desc(struct intel_guc *guc,
> kunmap_atomic(base);
> }
>
> +static const enum intel_ring_id guc_engine_map[GUC_MAX_ENGINES_NUM] = {
> + [GUC_RENDER_ENGINE] = RCS,
> + [GUC_VIDEO_ENGINE] = VCS,
> + [GUC_BLITTER_ENGINE] = BCS,
> + [GUC_VIDEOENHANCE_ENGINE] = VECS,
> + [GUC_VIDEO_ENGINE2] = VCS2
> +};
> +
> /*
> * Initialise/clear the context descriptor shared with the GuC firmware.
> *
> @@ -388,9 +396,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
> desc.priority = client->priority;
> desc.db_id = client->doorbell_id;
>
> - for (i = 0; i < I915_NUM_RINGS; i++) {
> + for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
> struct guc_execlist_context *lrc = &desc.lrc[i];
Again, would it not be more consistent to iterate over engines
(for_each_ring) and use struct guc_execlist_context *lrc =
&desc.lrc[ring->guc_id] ?
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index b12f2aa..b69eadb 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -158,6 +158,7 @@ struct intel_engine_cs {
> #define I915_NUM_RINGS 5
> #define _VCS(n) (VCS + (n))
> unsigned int exec_id;
> + unsigned int guc_engine_id;
Is the tautology useful? It is an engine, so intel_engine_cs->guc_id
would mean the correspondance map between our engines and the guc's.
-Chris
--
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH] drm/i915/guc: Decouple GuC engine id from ring id
2016-01-23 18:25 ` [PATCH] " Chris Wilson
@ 2016-01-23 18:51 ` Yu Dai
0 siblings, 0 replies; 9+ messages in thread
From: Yu Dai @ 2016-01-23 18:51 UTC (permalink / raw)
To: Chris Wilson, intel-gfx, tvrtko.ursulin, daniel.vetter,
david.s.gordon, chris.harris
On 01/23/2016 10:25 AM, Chris Wilson wrote:
> On Fri, Jan 22, 2016 at 03:06:28PM -0800, yu.dai@intel.com wrote:
> > From: Alex Dai <yu.dai@intel.com>
> >
> > Previously GuC uses ring id as engine id because of same definition.
> > But this is not true since this commit:
> >
> > commit de1add360522c876c25ef2bbbbab1c94bdb509ab
> > Author: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > Date: Fri Jan 15 15:12:50 2016 +0000
> >
> > drm/i915: Decouple execbuf uAPI from internal implementation
> >
> > Added GuC engine id into GuC interface to decouple it from ring id used
> > by driver.
> >
> > Signed-off-by: Alex Dai <yu.dai@intel.com>
> >
> > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> > index c5db235..9a4e01e 100644
> > --- a/drivers/gpu/drm/i915/i915_debugfs.c
> > +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> > @@ -2446,7 +2446,6 @@ static void i915_guc_client_info(struct seq_file *m,
> > struct drm_i915_private *dev_priv,
> > struct i915_guc_client *client)
> > {
> > - struct intel_engine_cs *ring;
> > uint64_t tot = 0;
> > uint32_t i;
> >
> > @@ -2461,10 +2460,9 @@ static void i915_guc_client_info(struct seq_file *m,
> > seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
> > seq_printf(m, "\tLast submission result: %d\n", client->retcode);
> >
> > - for_each_ring(ring, dev_priv, i) {
> > - seq_printf(m, "\tSubmissions: %llu %s\n",
> > - client->submissions[i],
> > - ring->name);
> > + for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
> > + seq_printf(m, "\tSubmissions: %llu, engine %d\n",
> > + client->submissions[i], i);
> > tot += client->submissions[i];
> > }
> > seq_printf(m, "\tTotal: %llu\n", tot);
> > @@ -2477,7 +2475,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
> > struct drm_i915_private *dev_priv = dev->dev_private;
> > struct intel_guc guc;
> > struct i915_guc_client client = {};
> > - struct intel_engine_cs *ring;
> > enum intel_ring_id i;
> > u64 total = 0;
> >
> > @@ -2501,9 +2498,9 @@ static int i915_guc_info(struct seq_file *m, void *data)
> > seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
> >
> > seq_printf(m, "\nGuC submissions:\n");
> > - for_each_ring(ring, dev_priv, i) {
> > - seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
> > - ring->name, guc.submissions[i],
> > + for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
> > + seq_printf(m, "\tengine %d: %10llu, last seqno 0x%08x %9d\n",
> > + i, guc.submissions[i],
> > guc.last_seqno[i], guc.last_seqno[i]);
> > total += guc.submissions[i];
>
> For debugfs, would it not be more convenient to use the ring->name and
> show the corresponding guc engine id?
>
> > diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> > index 51ae5c1..601e2c8 100644
> > --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> > +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> > @@ -365,6 +365,14 @@ static void guc_init_proc_desc(struct intel_guc *guc,
> > kunmap_atomic(base);
> > }
> >
> > +static const enum intel_ring_id guc_engine_map[GUC_MAX_ENGINES_NUM] = {
> > + [GUC_RENDER_ENGINE] = RCS,
> > + [GUC_VIDEO_ENGINE] = VCS,
> > + [GUC_BLITTER_ENGINE] = BCS,
> > + [GUC_VIDEOENHANCE_ENGINE] = VECS,
> > + [GUC_VIDEO_ENGINE2] = VCS2
> > +};
> > +
> > /*
> > * Initialise/clear the context descriptor shared with the GuC firmware.
> > *
> > @@ -388,9 +396,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
> > desc.priority = client->priority;
> > desc.db_id = client->doorbell_id;
> >
> > - for (i = 0; i < I915_NUM_RINGS; i++) {
> > + for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
> > struct guc_execlist_context *lrc = &desc.lrc[i];
>
> Again, would it not be more consistent to iterate over engines
> (for_each_ring) and use struct guc_execlist_context *lrc =
> &desc.lrc[ring->guc_id] ?
>
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > index b12f2aa..b69eadb 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > @@ -158,6 +158,7 @@ struct intel_engine_cs {
> > #define I915_NUM_RINGS 5
> > #define _VCS(n) (VCS + (n))
> > unsigned int exec_id;
> > + unsigned int guc_engine_id;
>
> Is the tautology useful? It is an engine, so intel_engine_cs->guc_id
> would mean the correspondance map between our engines and the guc's.
>
Yes, guc_id is a good name. And I agree with you, using for_each_ring
where possible will be more consistent with other diver code. Thanks for
your review and comments.
Alex
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v2] drm/i915/guc: Decouple GuC engine id from ring id
2016-01-22 23:06 [PATCH] drm/i915/guc: Decouple GuC engine id from ring id yu.dai
2016-01-23 8:14 ` ✓ Fi.CI.BAT: success for " Patchwork
2016-01-23 18:25 ` [PATCH] " Chris Wilson
@ 2016-01-23 19:58 ` yu.dai
2016-01-25 10:32 ` Tvrtko Ursulin
2016-01-24 7:43 ` ✗ Fi.CI.BAT: warning for drm/i915/guc: Decouple GuC engine id from ring id (rev2) Patchwork
2016-01-25 12:11 ` ✓ Fi.CI.BAT: success " Patchwork
4 siblings, 1 reply; 9+ messages in thread
From: yu.dai @ 2016-01-23 19:58 UTC (permalink / raw)
To: intel-gfx; +Cc: daniel.vetter
From: Alex Dai <yu.dai@intel.com>
Previously GuC uses ring id as engine id because of same definition.
But this is not true since this commit:
commit de1add360522c876c25ef2bbbbab1c94bdb509ab
Author: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Date: Fri Jan 15 15:12:50 2016 +0000
drm/i915: Decouple execbuf uAPI from internal implementation
Added GuC engine id into GuC interface to decouple it from ring id used
by driver.
v2: Keep ring name print out in debugfs; using for_each_ring() where
possible to keep driver consistent. (Chris W.)
Signed-off-by: Alex Dai <yu.dai@intel.com>
---
drivers/gpu/drm/i915/i915_debugfs.c | 12 +++++-----
drivers/gpu/drm/i915/i915_guc_submission.c | 38 ++++++++++++++----------------
drivers/gpu/drm/i915/intel_guc.h | 6 ++---
drivers/gpu/drm/i915/intel_guc_fwif.h | 17 +++++++++----
drivers/gpu/drm/i915/intel_lrc.c | 5 ++++
drivers/gpu/drm/i915/intel_ringbuffer.h | 1 +
6 files changed, 45 insertions(+), 34 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index c5db235..cea1844 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2463,9 +2463,9 @@ static void i915_guc_client_info(struct seq_file *m,
for_each_ring(ring, dev_priv, i) {
seq_printf(m, "\tSubmissions: %llu %s\n",
- client->submissions[i],
+ client->submissions[ring->guc_id],
ring->name);
- tot += client->submissions[i];
+ tot += client->submissions[ring->guc_id];
}
seq_printf(m, "\tTotal: %llu\n", tot);
}
@@ -2502,10 +2502,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC submissions:\n");
for_each_ring(ring, dev_priv, i) {
- seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
- ring->name, guc.submissions[i],
- guc.last_seqno[i], guc.last_seqno[i]);
- total += guc.submissions[i];
+ seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
+ ring->name, guc.submissions[ring->guc_id],
+ guc.last_seqno[ring->guc_id]);
+ total += guc.submissions[ring->guc_id];
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 51ae5c1..b4da20f 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -376,6 +376,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_engine_cs *ring;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
@@ -388,10 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
desc.priority = client->priority;
desc.db_id = client->doorbell_id;
- for (i = 0; i < I915_NUM_RINGS; i++) {
- struct guc_execlist_context *lrc = &desc.lrc[i];
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
- struct intel_engine_cs *ring;
+ for_each_ring(ring, dev_priv, i) {
+ struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
struct drm_i915_gem_object *obj;
uint64_t ctx_desc;
@@ -406,7 +406,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
if (!obj)
break; /* XXX: continue? */
- ring = ringbuf->ring;
ctx_desc = intel_lr_context_descriptor(ctx, ring);
lrc->context_desc = (u32)ctx_desc;
@@ -414,16 +413,16 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
- (ring->id << GUC_ELC_ENGINE_OFFSET);
+ (ring->guc_id << GUC_ELC_ENGINE_OFFSET);
- obj = ringbuf->obj;
+ obj = ctx->engine[i].ringbuf->obj;
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
- desc.engines_used |= (1 << ring->id);
+ desc.engines_used |= (1 << ring->guc_id);
}
WARN_ON(desc.engines_used == 0);
@@ -510,7 +509,6 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
static int guc_add_workqueue_item(struct i915_guc_client *gc,
struct drm_i915_gem_request *rq)
{
- enum intel_ring_id ring_id = rq->ring->id;
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off, space;
@@ -544,7 +542,7 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
wqi->header = WQ_TYPE_INORDER |
(wq_len << WQ_LEN_SHIFT) |
- (ring_id << WQ_TARGET_SHIFT) |
+ (rq->ring->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
/* The GuC wants only the low-order word of the context descriptor */
@@ -594,7 +592,7 @@ int i915_guc_submit(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
struct intel_guc *guc = client->guc;
- enum intel_ring_id ring_id = rq->ring->id;
+ unsigned int engine_id = rq->ring->guc_id;
int q_ret, b_ret;
/* Need this because of the deferred pin ctx and ring */
@@ -605,7 +603,7 @@ int i915_guc_submit(struct i915_guc_client *client,
if (q_ret == 0)
b_ret = guc_ring_doorbell(client);
- client->submissions[ring_id] += 1;
+ client->submissions[engine_id] += 1;
if (q_ret) {
client->q_fail += 1;
client->retcode = q_ret;
@@ -615,8 +613,8 @@ int i915_guc_submit(struct i915_guc_client *client,
} else {
client->retcode = 0;
}
- guc->submissions[ring_id] += 1;
- guc->last_seqno[ring_id] = rq->seqno;
+ guc->submissions[engine_id] += 1;
+ guc->last_seqno[engine_id] = rq->seqno;
return q_ret;
}
@@ -848,7 +846,7 @@ static void init_guc_policies(struct guc_policies *policies)
policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
- for (i = 0; i < I915_NUM_RINGS; i++) {
+ for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
policy = &policies->policy[p][i];
policy->execution_quantum = 1000000;
@@ -900,7 +898,7 @@ static void guc_create_ads(struct intel_guc *guc)
ads->golden_context_lrca = ring->status_page.gfx_addr;
for_each_ring(ring, dev_priv, i)
- ads->eng_state_size[i] = intel_lr_context_size(ring);
+ ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
/* GuC scheduling policies */
policies = (void *)ads + sizeof(struct guc_ads);
@@ -912,12 +910,12 @@ static void guc_create_ads(struct intel_guc *guc)
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
- for (i = 0; i < I915_NUM_RINGS; i++) {
- reg_state->mmio_white_list[i].mmio_start =
- dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START;
+ for_each_ring(ring, dev_priv, i) {
+ reg_state->mmio_white_list[ring->guc_id].mmio_start =
+ ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
- reg_state->mmio_white_list[i].count = 0;
+ reg_state->mmio_white_list[ring->guc_id].count = 0;
}
ads->reg_state_addr = ads->scheduler_policies +
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 045b149..73002e9 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -46,7 +46,7 @@ struct i915_guc_client {
uint32_t wq_head;
/* GuC submission statistics & status */
- uint64_t submissions[I915_NUM_RINGS];
+ uint64_t submissions[GUC_MAX_ENGINES_NUM];
uint32_t q_fail;
uint32_t b_fail;
int retcode;
@@ -106,8 +106,8 @@ struct intel_guc {
uint32_t action_fail; /* Total number of failures */
int32_t action_err; /* Last error code */
- uint64_t submissions[I915_NUM_RINGS];
- uint32_t last_seqno[I915_NUM_RINGS];
+ uint64_t submissions[GUC_MAX_ENGINES_NUM];
+ uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
};
/* intel_guc_loader.c */
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 1856a47..2de57ff 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -44,6 +44,13 @@
#define GUC_MAX_GPU_CONTEXTS 1024
#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
+#define GUC_RENDER_ENGINE 0
+#define GUC_VIDEO_ENGINE 1
+#define GUC_BLITTER_ENGINE 2
+#define GUC_VIDEOENHANCE_ENGINE 3
+#define GUC_VIDEO_ENGINE2 4
+#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+
/* Work queue item header definitions */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
@@ -285,7 +292,7 @@ struct guc_context_desc {
u64 db_trigger_phy;
u16 db_id;
- struct guc_execlist_context lrc[I915_NUM_RINGS];
+ struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
u8 attribute;
@@ -344,7 +351,7 @@ struct guc_policy {
} __packed;
struct guc_policies {
- struct guc_policy policy[GUC_CTX_PRIORITY_NUM][I915_NUM_RINGS];
+ struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
/* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
@@ -388,14 +395,14 @@ struct guc_mmio_regset {
struct guc_mmio_reg_state {
struct guc_mmio_regset global_reg;
- struct guc_mmio_regset engine_reg[I915_NUM_RINGS];
+ struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
/* MMIO registers that are set as non privileged */
struct __packed {
u32 mmio_start;
u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
u32 count;
- } mmio_white_list[I915_NUM_RINGS];
+ } mmio_white_list[GUC_MAX_ENGINES_NUM];
} __packed;
/* GuC Additional Data Struct */
@@ -406,7 +413,7 @@ struct guc_ads {
u32 golden_context_lrca;
u32 scheduler_policies;
u32 reserved0[3];
- u32 eng_state_size[I915_NUM_RINGS];
+ u32 eng_state_size[GUC_MAX_ENGINES_NUM];
u32 reserved2[4];
} __packed;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 73d4347..efb91ad 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2086,6 +2086,7 @@ static int logical_render_ring_init(struct drm_device *dev)
ring->name = "render ring";
ring->id = RCS;
ring->exec_id = I915_EXEC_RENDER;
+ ring->guc_id = GUC_RENDER_ENGINE;
ring->mmio_base = RENDER_RING_BASE;
logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
@@ -2137,6 +2138,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
ring->name = "bsd ring";
ring->id = VCS;
ring->exec_id = I915_EXEC_BSD;
+ ring->guc_id = GUC_VIDEO_ENGINE;
ring->mmio_base = GEN6_BSD_RING_BASE;
logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
@@ -2153,6 +2155,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
ring->name = "bsd2 ring";
ring->id = VCS2;
ring->exec_id = I915_EXEC_BSD;
+ ring->guc_id = GUC_VIDEO_ENGINE2;
ring->mmio_base = GEN8_BSD2_RING_BASE;
logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
@@ -2169,6 +2172,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
ring->name = "blitter ring";
ring->id = BCS;
ring->exec_id = I915_EXEC_BLT;
+ ring->guc_id = GUC_BLITTER_ENGINE;
ring->mmio_base = BLT_RING_BASE;
logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
@@ -2185,6 +2189,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
ring->name = "video enhancement ring";
ring->id = VECS;
ring->exec_id = I915_EXEC_VEBOX;
+ ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
ring->mmio_base = VEBOX_RING_BASE;
logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b12f2aa..566b0ae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -158,6 +158,7 @@ struct intel_engine_cs {
#define I915_NUM_RINGS 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
+ unsigned int guc_id;
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
--
2.5.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 9+ messages in thread
* ✗ Fi.CI.BAT: warning for drm/i915/guc: Decouple GuC engine id from ring id (rev2)
2016-01-22 23:06 [PATCH] drm/i915/guc: Decouple GuC engine id from ring id yu.dai
` (2 preceding siblings ...)
2016-01-23 19:58 ` [PATCH v2] " yu.dai
@ 2016-01-24 7:43 ` Patchwork
2016-01-25 11:03 ` Tvrtko Ursulin
2016-01-25 12:11 ` ✓ Fi.CI.BAT: success " Patchwork
4 siblings, 1 reply; 9+ messages in thread
From: Patchwork @ 2016-01-24 7:43 UTC (permalink / raw)
To: yu.dai; +Cc: intel-gfx
== Summary ==
Built on 8fe9e785ae04fa7c37f7935cff12d62e38054b60 drm-intel-nightly: 2016y-01m-21d-11h-02m-42s UTC integration manifest
Test kms_flip:
Subgroup basic-flip-vs-modeset:
pass -> DMESG-WARN (ilk-hp8440p)
bdw-nuci7 total:140 pass:131 dwarn:0 dfail:0 fail:0 skip:9
bdw-ultra total:143 pass:137 dwarn:0 dfail:0 fail:0 skip:6
bsw-nuc-2 total:143 pass:119 dwarn:0 dfail:0 fail:0 skip:24
byt-nuc total:143 pass:128 dwarn:0 dfail:0 fail:0 skip:15
hsw-brixbox total:143 pass:136 dwarn:0 dfail:0 fail:0 skip:7
hsw-gt2 total:143 pass:139 dwarn:0 dfail:0 fail:0 skip:4
ilk-hp8440p total:143 pass:103 dwarn:2 dfail:0 fail:0 skip:38
ivb-t430s total:143 pass:137 dwarn:0 dfail:0 fail:0 skip:6
skl-i5k-2 total:143 pass:134 dwarn:1 dfail:0 fail:0 skip:8
snb-dellxps total:143 pass:129 dwarn:0 dfail:0 fail:0 skip:14
snb-x220t total:143 pass:129 dwarn:0 dfail:0 fail:1 skip:13
Results at /archive/results/CI_IGT_test/Patchwork_1258/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH v2] drm/i915/guc: Decouple GuC engine id from ring id
2016-01-23 19:58 ` [PATCH v2] " yu.dai
@ 2016-01-25 10:32 ` Tvrtko Ursulin
0 siblings, 0 replies; 9+ messages in thread
From: Tvrtko Ursulin @ 2016-01-25 10:32 UTC (permalink / raw)
To: yu.dai, intel-gfx; +Cc: daniel.vetter
On 23/01/16 19:58, yu.dai@intel.com wrote:
> From: Alex Dai <yu.dai@intel.com>
>
> Previously GuC uses ring id as engine id because of same definition.
> But this is not true since this commit:
>
> commit de1add360522c876c25ef2bbbbab1c94bdb509ab
> Author: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Date: Fri Jan 15 15:12:50 2016 +0000
>
> drm/i915: Decouple execbuf uAPI from internal implementation
>
> Added GuC engine id into GuC interface to decouple it from ring id used
> by driver.
>
> v2: Keep ring name print out in debugfs; using for_each_ring() where
> possible to keep driver consistent. (Chris W.)
>
> Signed-off-by: Alex Dai <yu.dai@intel.com>
> ---
> drivers/gpu/drm/i915/i915_debugfs.c | 12 +++++-----
> drivers/gpu/drm/i915/i915_guc_submission.c | 38 ++++++++++++++----------------
> drivers/gpu/drm/i915/intel_guc.h | 6 ++---
> drivers/gpu/drm/i915/intel_guc_fwif.h | 17 +++++++++----
> drivers/gpu/drm/i915/intel_lrc.c | 5 ++++
> drivers/gpu/drm/i915/intel_ringbuffer.h | 1 +
> 6 files changed, 45 insertions(+), 34 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index c5db235..cea1844 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -2463,9 +2463,9 @@ static void i915_guc_client_info(struct seq_file *m,
>
> for_each_ring(ring, dev_priv, i) {
> seq_printf(m, "\tSubmissions: %llu %s\n",
> - client->submissions[i],
> + client->submissions[ring->guc_id],
> ring->name);
> - tot += client->submissions[i];
> + tot += client->submissions[ring->guc_id];
> }
> seq_printf(m, "\tTotal: %llu\n", tot);
> }
> @@ -2502,10 +2502,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
>
> seq_printf(m, "\nGuC submissions:\n");
> for_each_ring(ring, dev_priv, i) {
> - seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
> - ring->name, guc.submissions[i],
> - guc.last_seqno[i], guc.last_seqno[i]);
> - total += guc.submissions[i];
> + seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
> + ring->name, guc.submissions[ring->guc_id],
> + guc.last_seqno[ring->guc_id]);
> + total += guc.submissions[ring->guc_id];
> }
> seq_printf(m, "\t%s: %llu\n", "Total", total);
>
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 51ae5c1..b4da20f 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -376,6 +376,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
> static void guc_init_ctx_desc(struct intel_guc *guc,
> struct i915_guc_client *client)
> {
> + struct drm_i915_private *dev_priv = guc_to_i915(guc);
> + struct intel_engine_cs *ring;
> struct intel_context *ctx = client->owner;
> struct guc_context_desc desc;
> struct sg_table *sg;
> @@ -388,10 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
> desc.priority = client->priority;
> desc.db_id = client->doorbell_id;
>
> - for (i = 0; i < I915_NUM_RINGS; i++) {
> - struct guc_execlist_context *lrc = &desc.lrc[i];
> - struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
> - struct intel_engine_cs *ring;
> + for_each_ring(ring, dev_priv, i) {
> + struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
> struct drm_i915_gem_object *obj;
> uint64_t ctx_desc;
>
> @@ -406,7 +406,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
> if (!obj)
> break; /* XXX: continue? */
>
> - ring = ringbuf->ring;
> ctx_desc = intel_lr_context_descriptor(ctx, ring);
> lrc->context_desc = (u32)ctx_desc;
>
> @@ -414,16 +413,16 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
> lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
> LRC_STATE_PN * PAGE_SIZE;
> lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
> - (ring->id << GUC_ELC_ENGINE_OFFSET);
> + (ring->guc_id << GUC_ELC_ENGINE_OFFSET);
>
> - obj = ringbuf->obj;
> + obj = ctx->engine[i].ringbuf->obj;
>
> lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
> lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
> lrc->ring_next_free_location = lrc->ring_begin;
> lrc->ring_current_tail_pointer_value = 0;
>
> - desc.engines_used |= (1 << ring->id);
> + desc.engines_used |= (1 << ring->guc_id);
> }
>
> WARN_ON(desc.engines_used == 0);
> @@ -510,7 +509,6 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
> static int guc_add_workqueue_item(struct i915_guc_client *gc,
> struct drm_i915_gem_request *rq)
> {
> - enum intel_ring_id ring_id = rq->ring->id;
> struct guc_wq_item *wqi;
> void *base;
> u32 tail, wq_len, wq_off, space;
> @@ -544,7 +542,7 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
> wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
> wqi->header = WQ_TYPE_INORDER |
> (wq_len << WQ_LEN_SHIFT) |
> - (ring_id << WQ_TARGET_SHIFT) |
> + (rq->ring->guc_id << WQ_TARGET_SHIFT) |
> WQ_NO_WCFLUSH_WAIT;
>
> /* The GuC wants only the low-order word of the context descriptor */
> @@ -594,7 +592,7 @@ int i915_guc_submit(struct i915_guc_client *client,
> struct drm_i915_gem_request *rq)
> {
> struct intel_guc *guc = client->guc;
> - enum intel_ring_id ring_id = rq->ring->id;
> + unsigned int engine_id = rq->ring->guc_id;
> int q_ret, b_ret;
>
> /* Need this because of the deferred pin ctx and ring */
> @@ -605,7 +603,7 @@ int i915_guc_submit(struct i915_guc_client *client,
> if (q_ret == 0)
> b_ret = guc_ring_doorbell(client);
>
> - client->submissions[ring_id] += 1;
> + client->submissions[engine_id] += 1;
> if (q_ret) {
> client->q_fail += 1;
> client->retcode = q_ret;
> @@ -615,8 +613,8 @@ int i915_guc_submit(struct i915_guc_client *client,
> } else {
> client->retcode = 0;
> }
> - guc->submissions[ring_id] += 1;
> - guc->last_seqno[ring_id] = rq->seqno;
> + guc->submissions[engine_id] += 1;
> + guc->last_seqno[engine_id] = rq->seqno;
>
> return q_ret;
> }
> @@ -848,7 +846,7 @@ static void init_guc_policies(struct guc_policies *policies)
> policies->max_num_work_items = POLICY_MAX_NUM_WI;
>
> for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
> - for (i = 0; i < I915_NUM_RINGS; i++) {
> + for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
> policy = &policies->policy[p][i];
>
> policy->execution_quantum = 1000000;
> @@ -900,7 +898,7 @@ static void guc_create_ads(struct intel_guc *guc)
> ads->golden_context_lrca = ring->status_page.gfx_addr;
>
> for_each_ring(ring, dev_priv, i)
> - ads->eng_state_size[i] = intel_lr_context_size(ring);
> + ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
>
> /* GuC scheduling policies */
> policies = (void *)ads + sizeof(struct guc_ads);
> @@ -912,12 +910,12 @@ static void guc_create_ads(struct intel_guc *guc)
> /* MMIO reg state */
> reg_state = (void *)policies + sizeof(struct guc_policies);
>
> - for (i = 0; i < I915_NUM_RINGS; i++) {
> - reg_state->mmio_white_list[i].mmio_start =
> - dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START;
> + for_each_ring(ring, dev_priv, i) {
> + reg_state->mmio_white_list[ring->guc_id].mmio_start =
> + ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
>
> /* Nothing to be saved or restored for now. */
> - reg_state->mmio_white_list[i].count = 0;
> + reg_state->mmio_white_list[ring->guc_id].count = 0;
> }
>
> ads->reg_state_addr = ads->scheduler_policies +
> diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
> index 045b149..73002e9 100644
> --- a/drivers/gpu/drm/i915/intel_guc.h
> +++ b/drivers/gpu/drm/i915/intel_guc.h
> @@ -46,7 +46,7 @@ struct i915_guc_client {
> uint32_t wq_head;
>
> /* GuC submission statistics & status */
> - uint64_t submissions[I915_NUM_RINGS];
> + uint64_t submissions[GUC_MAX_ENGINES_NUM];
> uint32_t q_fail;
> uint32_t b_fail;
> int retcode;
> @@ -106,8 +106,8 @@ struct intel_guc {
> uint32_t action_fail; /* Total number of failures */
> int32_t action_err; /* Last error code */
>
> - uint64_t submissions[I915_NUM_RINGS];
> - uint32_t last_seqno[I915_NUM_RINGS];
> + uint64_t submissions[GUC_MAX_ENGINES_NUM];
> + uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
> };
>
> /* intel_guc_loader.c */
> diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
> index 1856a47..2de57ff 100644
> --- a/drivers/gpu/drm/i915/intel_guc_fwif.h
> +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
> @@ -44,6 +44,13 @@
> #define GUC_MAX_GPU_CONTEXTS 1024
> #define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
>
> +#define GUC_RENDER_ENGINE 0
> +#define GUC_VIDEO_ENGINE 1
> +#define GUC_BLITTER_ENGINE 2
> +#define GUC_VIDEOENHANCE_ENGINE 3
> +#define GUC_VIDEO_ENGINE2 4
> +#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
> +
> /* Work queue item header definitions */
> #define WQ_STATUS_ACTIVE 1
> #define WQ_STATUS_SUSPENDED 2
> @@ -285,7 +292,7 @@ struct guc_context_desc {
> u64 db_trigger_phy;
> u16 db_id;
>
> - struct guc_execlist_context lrc[I915_NUM_RINGS];
> + struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
>
> u8 attribute;
>
> @@ -344,7 +351,7 @@ struct guc_policy {
> } __packed;
>
> struct guc_policies {
> - struct guc_policy policy[GUC_CTX_PRIORITY_NUM][I915_NUM_RINGS];
> + struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
>
> /* In micro seconds. How much time to allow before DPC processing is
> * called back via interrupt (to prevent DPC queue drain starving).
> @@ -388,14 +395,14 @@ struct guc_mmio_regset {
>
> struct guc_mmio_reg_state {
> struct guc_mmio_regset global_reg;
> - struct guc_mmio_regset engine_reg[I915_NUM_RINGS];
> + struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
>
> /* MMIO registers that are set as non privileged */
> struct __packed {
> u32 mmio_start;
> u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
> u32 count;
> - } mmio_white_list[I915_NUM_RINGS];
> + } mmio_white_list[GUC_MAX_ENGINES_NUM];
> } __packed;
>
> /* GuC Additional Data Struct */
> @@ -406,7 +413,7 @@ struct guc_ads {
> u32 golden_context_lrca;
> u32 scheduler_policies;
> u32 reserved0[3];
> - u32 eng_state_size[I915_NUM_RINGS];
> + u32 eng_state_size[GUC_MAX_ENGINES_NUM];
> u32 reserved2[4];
> } __packed;
>
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 73d4347..efb91ad 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -2086,6 +2086,7 @@ static int logical_render_ring_init(struct drm_device *dev)
> ring->name = "render ring";
> ring->id = RCS;
> ring->exec_id = I915_EXEC_RENDER;
> + ring->guc_id = GUC_RENDER_ENGINE;
> ring->mmio_base = RENDER_RING_BASE;
>
> logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
> @@ -2137,6 +2138,7 @@ static int logical_bsd_ring_init(struct drm_device *dev)
> ring->name = "bsd ring";
> ring->id = VCS;
> ring->exec_id = I915_EXEC_BSD;
> + ring->guc_id = GUC_VIDEO_ENGINE;
> ring->mmio_base = GEN6_BSD_RING_BASE;
>
> logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
> @@ -2153,6 +2155,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
> ring->name = "bsd2 ring";
> ring->id = VCS2;
> ring->exec_id = I915_EXEC_BSD;
> + ring->guc_id = GUC_VIDEO_ENGINE2;
> ring->mmio_base = GEN8_BSD2_RING_BASE;
>
> logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
> @@ -2169,6 +2172,7 @@ static int logical_blt_ring_init(struct drm_device *dev)
> ring->name = "blitter ring";
> ring->id = BCS;
> ring->exec_id = I915_EXEC_BLT;
> + ring->guc_id = GUC_BLITTER_ENGINE;
> ring->mmio_base = BLT_RING_BASE;
>
> logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
> @@ -2185,6 +2189,7 @@ static int logical_vebox_ring_init(struct drm_device *dev)
> ring->name = "video enhancement ring";
> ring->id = VECS;
> ring->exec_id = I915_EXEC_VEBOX;
> + ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
> ring->mmio_base = VEBOX_RING_BASE;
>
> logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index b12f2aa..566b0ae 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -158,6 +158,7 @@ struct intel_engine_cs {
> #define I915_NUM_RINGS 5
> #define _VCS(n) (VCS + (n))
> unsigned int exec_id;
> + unsigned int guc_id;
> u32 mmio_base;
> struct drm_device *dev;
> struct intel_ringbuffer *buffer;
>
Looks OK to me, with a caveat that I don't know much about GuC. But
obviously is needed to fix the fallout from my decoupling. And it sounds
good to explicitly document this GuC dependency so:
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: ✗ Fi.CI.BAT: warning for drm/i915/guc: Decouple GuC engine id from ring id (rev2)
2016-01-24 7:43 ` ✗ Fi.CI.BAT: warning for drm/i915/guc: Decouple GuC engine id from ring id (rev2) Patchwork
@ 2016-01-25 11:03 ` Tvrtko Ursulin
0 siblings, 0 replies; 9+ messages in thread
From: Tvrtko Ursulin @ 2016-01-25 11:03 UTC (permalink / raw)
To: Patchwork, yu.dai; +Cc: intel-gfx
On 24/01/16 07:43, Patchwork wrote:
> == Summary ==
>
> Built on 8fe9e785ae04fa7c37f7935cff12d62e38054b60 drm-intel-nightly: 2016y-01m-21d-11h-02m-42s UTC integration manifest
>
> Test kms_flip:
> Subgroup basic-flip-vs-modeset:
> pass -> DMESG-WARN (ilk-hp8440p)
Known unrelated issue: https://bugs.freedesktop.org/show_bug.cgi?id=93787
>
> bdw-nuci7 total:140 pass:131 dwarn:0 dfail:0 fail:0 skip:9
> bdw-ultra total:143 pass:137 dwarn:0 dfail:0 fail:0 skip:6
> bsw-nuc-2 total:143 pass:119 dwarn:0 dfail:0 fail:0 skip:24
> byt-nuc total:143 pass:128 dwarn:0 dfail:0 fail:0 skip:15
> hsw-brixbox total:143 pass:136 dwarn:0 dfail:0 fail:0 skip:7
> hsw-gt2 total:143 pass:139 dwarn:0 dfail:0 fail:0 skip:4
> ilk-hp8440p total:143 pass:103 dwarn:2 dfail:0 fail:0 skip:38
> ivb-t430s total:143 pass:137 dwarn:0 dfail:0 fail:0 skip:6
> skl-i5k-2 total:143 pass:134 dwarn:1 dfail:0 fail:0 skip:8
> snb-dellxps total:143 pass:129 dwarn:0 dfail:0 fail:0 skip:14
> snb-x220t total:143 pass:129 dwarn:0 dfail:0 fail:1 skip:13
Patch merged, thanks!
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* ✓ Fi.CI.BAT: success for drm/i915/guc: Decouple GuC engine id from ring id (rev2)
2016-01-22 23:06 [PATCH] drm/i915/guc: Decouple GuC engine id from ring id yu.dai
` (3 preceding siblings ...)
2016-01-24 7:43 ` ✗ Fi.CI.BAT: warning for drm/i915/guc: Decouple GuC engine id from ring id (rev2) Patchwork
@ 2016-01-25 12:11 ` Patchwork
4 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2016-01-25 12:11 UTC (permalink / raw)
To: yu.dai; +Cc: intel-gfx
== Summary ==
Built on 8fe9e785ae04fa7c37f7935cff12d62e38054b60 drm-intel-nightly: 2016y-01m-21d-11h-02m-42s UTC integration manifest
Test kms_flip:
Subgroup basic-flip-vs-modeset:
pass -> DMESG-WARN (ilk-hp8440p) UNSTABLE
bdw-nuci7 total:140 pass:131 dwarn:0 dfail:0 fail:0 skip:9
bdw-ultra total:143 pass:137 dwarn:0 dfail:0 fail:0 skip:6
bsw-nuc-2 total:143 pass:119 dwarn:0 dfail:0 fail:0 skip:24
byt-nuc total:143 pass:128 dwarn:0 dfail:0 fail:0 skip:15
hsw-brixbox total:143 pass:136 dwarn:0 dfail:0 fail:0 skip:7
hsw-gt2 total:143 pass:139 dwarn:0 dfail:0 fail:0 skip:4
ilk-hp8440p total:143 pass:103 dwarn:2 dfail:0 fail:0 skip:38
ivb-t430s total:143 pass:137 dwarn:0 dfail:0 fail:0 skip:6
skl-i5k-2 total:143 pass:134 dwarn:1 dfail:0 fail:0 skip:8
snb-dellxps total:143 pass:129 dwarn:0 dfail:0 fail:0 skip:14
snb-x220t total:143 pass:129 dwarn:0 dfail:0 fail:1 skip:13
Results at /archive/results/CI_IGT_test/Patchwork_1258/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2016-01-25 12:11 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-22 23:06 [PATCH] drm/i915/guc: Decouple GuC engine id from ring id yu.dai
2016-01-23 8:14 ` ✓ Fi.CI.BAT: success for " Patchwork
2016-01-23 18:25 ` [PATCH] " Chris Wilson
2016-01-23 18:51 ` Yu Dai
2016-01-23 19:58 ` [PATCH v2] " yu.dai
2016-01-25 10:32 ` Tvrtko Ursulin
2016-01-24 7:43 ` ✗ Fi.CI.BAT: warning for drm/i915/guc: Decouple GuC engine id from ring id (rev2) Patchwork
2016-01-25 11:03 ` Tvrtko Ursulin
2016-01-25 12:11 ` ✓ Fi.CI.BAT: success " Patchwork
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.