All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jesse Barnes <jbarnes@virtuousgeek.org>
To: John Harrison <John.C.Harrison@Intel.com>,
	Intel-GFX@Lists.FreeDesktop.Org
Subject: Re: [PATCH v5 24/35] drm/i915: Added trace points to scheduler
Date: Fri, 26 Feb 2016 09:12:42 -0800	[thread overview]
Message-ID: <56D0878A.707@virtuousgeek.org> (raw)
In-Reply-To: <56D07572.9090807@Intel.com>

On 02/26/2016 07:55 AM, John Harrison wrote:
> On 23/02/2016 20:42, Jesse Barnes wrote:
>> On 02/18/2016 06:27 AM, John.C.Harrison@Intel.com wrote:
>>> From: John Harrison <John.C.Harrison@Intel.com>
>>>
>>> Added trace points to the scheduler to track all the various events,
>>> node state transitions and other interesting things that occur.
>>>
>>> v2: Updated for new request completion tracking implementation.
>>>
>>> v3: Updated for changes to node kill code.
>>>
>>> v4: Wrapped some long lines to keep the style checker happy.
>>>
>>> For: VIZ-1587
>>> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
>>> ---
>>>   drivers/gpu/drm/i915/i915_gem_execbuffer.c |   2 +
>>>   drivers/gpu/drm/i915/i915_scheduler.c      |  26 ++++
>>>   drivers/gpu/drm/i915/i915_trace.h          | 196 +++++++++++++++++++++++++++++
>>>   drivers/gpu/drm/i915/intel_lrc.c           |   2 +
>>>   4 files changed, 226 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>>> index b9ad0fd..d4de8c7 100644
>>> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>>> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
>>> @@ -1272,6 +1272,8 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>>>         i915_gem_execbuffer_move_to_active(vmas, params->request);
>>>   +    trace_i915_gem_ring_queue(ring, params);
>>> +
>>>       qe = container_of(params, typeof(*qe), params);
>>>       ret = i915_scheduler_queue_execbuffer(qe);
>>>       if (ret)
>>> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
>>> index 47d7de4..e56ce08 100644
>>> --- a/drivers/gpu/drm/i915/i915_scheduler.c
>>> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
>>> @@ -88,6 +88,8 @@ static void i915_scheduler_node_requeue(struct i915_scheduler_queue_entry *node)
>>>       /* Seqno will be reassigned on relaunch */
>>>       node->params.request->seqno = 0;
>>>       node->status = i915_sqs_queued;
>>> +    trace_i915_scheduler_unfly(node->params.ring, node);
>>> +    trace_i915_scheduler_node_state_change(node->params.ring, node);
>>>   }
>>>     /*
>>> @@ -99,7 +101,11 @@ static void i915_scheduler_node_kill(struct i915_scheduler_queue_entry *node)
>>>       WARN_ON(!node);
>>>       WARN_ON(I915_SQS_IS_COMPLETE(node));
>>>   +    if (I915_SQS_IS_FLYING(node))
>>> +        trace_i915_scheduler_unfly(node->params.ring, node);
>>> +
>>>       node->status = i915_sqs_dead;
>>> +    trace_i915_scheduler_node_state_change(node->params.ring, node);
>>>   }
>>>     /* Mark a node as in flight on the hardware. */
>>> @@ -124,6 +130,9 @@ static int i915_scheduler_node_fly(struct i915_scheduler_queue_entry *node)
>>>         node->status = i915_sqs_flying;
>>>   +    trace_i915_scheduler_fly(ring, node);
>>> +    trace_i915_scheduler_node_state_change(ring, node);
>>> +
>>>       if (!(scheduler->flags[ring->id] & i915_sf_interrupts_enabled)) {
>>>           bool success = true;
>>>   @@ -280,6 +289,8 @@ static int i915_scheduler_pop_from_queue_locked(struct intel_engine_cs *ring,
>>>           INIT_LIST_HEAD(&best->link);
>>>           best->status  = i915_sqs_popped;
>>>   +        trace_i915_scheduler_node_state_change(ring, best);
>>> +
>>>           ret = 0;
>>>       } else {
>>>           /* Can only get here if:
>>> @@ -297,6 +308,8 @@ static int i915_scheduler_pop_from_queue_locked(struct intel_engine_cs *ring,
>>>           }
>>>       }
>>>   +    trace_i915_scheduler_pop_from_queue(ring, best);
>>> +
>>>       *pop_node = best;
>>>       return ret;
>>>   }
>>> @@ -506,6 +519,8 @@ static int i915_scheduler_queue_execbuffer_bypass(struct i915_scheduler_queue_en
>>>       struct i915_scheduler *scheduler = dev_priv->scheduler;
>>>       int ret;
>>>   +    trace_i915_scheduler_queue(qe->params.ring, qe);
>>> +
>>>       intel_ring_reserved_space_cancel(qe->params.request->ringbuf);
>>>         scheduler->flags[qe->params.ring->id] |= i915_sf_submitting;
>>> @@ -628,6 +643,9 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
>>>       not_flying = i915_scheduler_count_flying(scheduler, ring) <
>>>                            scheduler->min_flying;
>>>   +    trace_i915_scheduler_queue(ring, node);
>>> +    trace_i915_scheduler_node_state_change(ring, node);
>>> +
>>>       spin_unlock_irq(&scheduler->lock);
>>>         if (not_flying)
>>> @@ -657,6 +675,8 @@ bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
>>>       struct i915_scheduler_queue_entry *node = req->scheduler_qe;
>>>       unsigned long flags;
>>>   +    trace_i915_scheduler_landing(req);
>>> +
>>>       if (!node)
>>>           return false;
>>>   @@ -670,6 +690,8 @@ bool i915_scheduler_notify_request(struct drm_i915_gem_request *req)
>>>       else
>>>           node->status = i915_sqs_complete;
>>>   +    trace_i915_scheduler_node_state_change(req->ring, node);
>>> +
>>>       spin_unlock_irqrestore(&scheduler->lock, flags);
>>>         return true;
>>> @@ -877,6 +899,8 @@ static bool i915_scheduler_remove(struct i915_scheduler *scheduler,
>>>       /* Launch more packets now? */
>>>       do_submit = (queued > 0) && (flying < scheduler->min_flying);
>>>   +    trace_i915_scheduler_remove(ring, min_seqno, do_submit);
>>> +
>>>       spin_unlock_irq(&scheduler->lock);
>>>         return do_submit;
>>> @@ -912,6 +936,8 @@ static void i915_scheduler_process_work(struct intel_engine_cs *ring)
>>>           node = list_first_entry(&remove, typeof(*node), link);
>>>           list_del(&node->link);
>>>   +        trace_i915_scheduler_destroy(ring, node);
>>> +
>>>           /* Free up all the DRM references */
>>>           i915_scheduler_clean_node(node);
>>>   diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
>>> index 455c215..c3c4e58 100644
>>> --- a/drivers/gpu/drm/i915/i915_trace.h
>>> +++ b/drivers/gpu/drm/i915/i915_trace.h
>>> @@ -9,6 +9,7 @@
>>>   #include "i915_drv.h"
>>>   #include "intel_drv.h"
>>>   #include "intel_ringbuffer.h"
>>> +#include "i915_scheduler.h"
>>>     #undef TRACE_SYSTEM
>>>   #define TRACE_SYSTEM i915
>>> @@ -826,6 +827,201 @@ TRACE_EVENT(switch_mm,
>>>             __entry->dev, __entry->ring, __entry->to, __entry->vm)
>>>   );
>>>   +TRACE_EVENT(i915_scheduler_queue,
>>> +        TP_PROTO(struct intel_engine_cs *ring,
>>> +             struct i915_scheduler_queue_entry *node),
>>> +        TP_ARGS(ring, node),
>>> +
>>> +        TP_STRUCT__entry(
>>> +                 __field(u32, ring)
>>> +                 __field(u32, uniq)
>>> +                 __field(u32, seqno)
>>> +                 ),
>>> +
>>> +        TP_fast_assign(
>>> +               __entry->ring  = ring->id;
>>> +               __entry->uniq  = node ? node->params.request->uniq  : 0;
>>> +               __entry->seqno = node ? node->params.request->seqno : 0;
>>> +               ),
>>> +
>>> +        TP_printk("ring=%d, uniq=%d, seqno=%d",
>>> +              __entry->ring, __entry->uniq, __entry->seqno)
>>> +);
>>> +
>>> +TRACE_EVENT(i915_scheduler_fly,
>>> +        TP_PROTO(struct intel_engine_cs *ring,
>>> +             struct i915_scheduler_queue_entry *node),
>>> +        TP_ARGS(ring, node),
>>> +
>>> +        TP_STRUCT__entry(
>>> +                 __field(u32, ring)
>>> +                 __field(u32, uniq)
>>> +                 __field(u32, seqno)
>>> +                 ),
>>> +
>>> +        TP_fast_assign(
>>> +               __entry->ring  = ring->id;
>>> +               __entry->uniq  = node ? node->params.request->uniq  : 0;
>>> +               __entry->seqno = node ? node->params.request->seqno : 0;
>>> +               ),
>>> +
>>> +        TP_printk("ring=%d, uniq=%d, seqno=%d",
>>> +              __entry->ring, __entry->uniq, __entry->seqno)
>>> +);
>>> +
>>> +TRACE_EVENT(i915_scheduler_unfly,
>>> +        TP_PROTO(struct intel_engine_cs *ring,
>>> +             struct i915_scheduler_queue_entry *node),
>>> +        TP_ARGS(ring, node),
>>> +
>>> +        TP_STRUCT__entry(
>>> +                 __field(u32, ring)
>>> +                 __field(u32, uniq)
>>> +                 __field(u32, seqno)
>>> +                 ),
>>> +
>>> +        TP_fast_assign(
>>> +               __entry->ring  = ring->id;
>>> +               __entry->uniq  = node ? node->params.request->uniq  : 0;
>>> +               __entry->seqno = node ? node->params.request->seqno : 0;
>>> +               ),
>>> +
>>> +        TP_printk("ring=%d, uniq=%d, seqno=%d",
>>> +              __entry->ring, __entry->uniq, __entry->seqno)
>>> +);
>>> +
>>> +TRACE_EVENT(i915_scheduler_landing,
>>> +        TP_PROTO(struct drm_i915_gem_request *req),
>>> +        TP_ARGS(req),
>>> +
>>> +        TP_STRUCT__entry(
>>> +                 __field(u32, ring)
>>> +                 __field(u32, uniq)
>>> +                 __field(u32, seqno)
>>> +                 __field(u32, status)
>>> +                 ),
>>> +
>>> +        TP_fast_assign(
>>> +               __entry->ring   = req->ring->id;
>>> +               __entry->uniq   = req->uniq;
>>> +               __entry->seqno  = req->seqno;
>>> +               __entry->status = req->scheduler_qe ?
>>> +                        req->scheduler_qe->status : ~0U;
>>> +               ),
>>> +
>>> +        TP_printk("ring=%d, uniq=%d, seqno=%d, status=%d",
>>> +              __entry->ring, __entry->uniq, __entry->seqno,
>>> +              __entry->status)
>>> +);
>>> +
>>> +TRACE_EVENT(i915_scheduler_remove,
>>> +        TP_PROTO(struct intel_engine_cs *ring,
>>> +             u32 min_seqno, bool do_submit),
>>> +        TP_ARGS(ring, min_seqno, do_submit),
>>> +
>>> +        TP_STRUCT__entry(
>>> +                 __field(u32, ring)
>>> +                 __field(u32, min_seqno)
>>> +                 __field(bool, do_submit)
>>> +                 ),
>>> +
>>> +        TP_fast_assign(
>>> +               __entry->ring      = ring->id;
>>> +               __entry->min_seqno = min_seqno;
>>> +               __entry->do_submit = do_submit;
>>> +               ),
>>> +
>>> +        TP_printk("ring=%d, min_seqno = %d, do_submit=%d",
>>> +              __entry->ring, __entry->min_seqno, __entry->do_submit)
>>> +);
>>> +
>>> +TRACE_EVENT(i915_scheduler_destroy,
>>> +        TP_PROTO(struct intel_engine_cs *ring,
>>> +             struct i915_scheduler_queue_entry *node),
>>> +        TP_ARGS(ring, node),
>>> +
>>> +        TP_STRUCT__entry(
>>> +                 __field(u32, ring)
>>> +                 __field(u32, uniq)
>>> +                 __field(u32, seqno)
>>> +                 ),
>>> +
>>> +        TP_fast_assign(
>>> +               __entry->ring  = ring->id;
>>> +               __entry->uniq  = node ? node->params.request->uniq  : 0;
>>> +               __entry->seqno = node ? node->params.request->seqno : 0;
>>> +               ),
>>> +
>>> +        TP_printk("ring=%d, uniq=%d, seqno=%d",
>>> +              __entry->ring, __entry->uniq, __entry->seqno)
>>> +);
>>> +
>>> +TRACE_EVENT(i915_scheduler_pop_from_queue,
>>> +        TP_PROTO(struct intel_engine_cs *ring,
>>> +             struct i915_scheduler_queue_entry *node),
>>> +        TP_ARGS(ring, node),
>>> +
>>> +        TP_STRUCT__entry(
>>> +                 __field(u32, ring)
>>> +                 __field(u32, uniq)
>>> +                 __field(u32, seqno)
>>> +                 ),
>>> +
>>> +        TP_fast_assign(
>>> +               __entry->ring  = ring->id;
>>> +               __entry->uniq  = node ? node->params.request->uniq  : 0;
>>> +               __entry->seqno = node ? node->params.request->seqno : 0;
>>> +               ),
>>> +
>>> +        TP_printk("ring=%d, uniq=%d, seqno=%d",
>>> +              __entry->ring, __entry->uniq, __entry->seqno)
>>> +);
>>> +
>>> +TRACE_EVENT(i915_scheduler_node_state_change,
>>> +        TP_PROTO(struct intel_engine_cs *ring,
>>> +             struct i915_scheduler_queue_entry *node),
>>> +        TP_ARGS(ring, node),
>>> +
>>> +        TP_STRUCT__entry(
>>> +                 __field(u32, ring)
>>> +                 __field(u32, uniq)
>>> +                 __field(u32, seqno)
>>> +                 __field(u32, status)
>>> +                 ),
>>> +
>>> +        TP_fast_assign(
>>> +               __entry->ring   = ring->id;
>>> +               __entry->uniq   = node ? node->params.request->uniq  : 0;
>>> +               __entry->seqno  = node->params.request->seqno;
>>> +               __entry->status = node->status;
>>> +               ),
>>> +
>>> +        TP_printk("ring=%d, uniq=%d, seqno=%d, status=%d",
>>> +              __entry->ring, __entry->uniq, __entry->seqno,
>>> +              __entry->status)
>>> +);
>>> +
>>> +TRACE_EVENT(i915_gem_ring_queue,
>>> +        TP_PROTO(struct intel_engine_cs *ring,
>>> +             struct i915_execbuffer_params *params),
>>> +        TP_ARGS(ring, params),
>>> +
>>> +        TP_STRUCT__entry(
>>> +                 __field(u32, ring)
>>> +                 __field(u32, uniq)
>>> +                 __field(u32, seqno)
>>> +                 ),
>>> +
>>> +        TP_fast_assign(
>>> +               __entry->ring  = ring->id;
>>> +               __entry->uniq  = params->request->uniq;
>>> +               __entry->seqno = params->request->seqno;
>>> +               ),
>>> +
>>> +        TP_printk("ring=%d, uniq=%d, seqno=%d", __entry->ring,
>>> +              __entry->uniq, __entry->seqno)
>>> +);
>>> +
>>>   #endif /* _I915_TRACE_H_ */
>>>     /* This part must be outside protection */
>>> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
>>> index 9c7a79a..2b9f49c 100644
>>> --- a/drivers/gpu/drm/i915/intel_lrc.c
>>> +++ b/drivers/gpu/drm/i915/intel_lrc.c
>>> @@ -954,6 +954,8 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>>>         i915_gem_execbuffer_move_to_active(vmas, params->request);
>>>   +    trace_i915_gem_ring_queue(ring, params);
>>> +
>>>       qe = container_of(params, typeof(*qe), params);
>>>       ret = i915_scheduler_queue_execbuffer(qe);
>>>       if (ret)
>>>
>> Oops, forgot to ask if there are any tools for igt that make use of these, e.g. some kind of scheduler top util that looks for bubbles or starved batches or something.
> Nothing at the moment. They have been used to debug issues by looking through trace logs. Also the validation team have tests that hook in to the various trace points to verify the internal operation of the scheduler and other parts of the i915 driver.

Nice.  I wasn't asking because it's a blocker or anything, just curious.

Thanks,
Jesse

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2016-02-26 17:13 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-02-18 14:26 [PATCH v5 00/35] GPU scheduler for i915 driver John.C.Harrison
2016-02-18 14:26 ` [PATCH v5 01/35] drm/i915: Add total count to context status debugfs output John.C.Harrison
2016-02-18 14:26 ` [PATCH v5 02/35] drm/i915: Prelude to splitting i915_gem_do_execbuffer in two John.C.Harrison
2016-02-18 14:26 ` [PATCH v5 03/35] drm/i915: Split i915_dem_do_execbuffer() in half John.C.Harrison
2016-02-18 14:26 ` [PATCH v5 04/35] drm/i915: Cache request pointer in *_submission_final() John.C.Harrison
2016-02-18 14:26 ` [PATCH v5 05/35] drm/i915: Re-instate request->uniq because it is extremely useful John.C.Harrison
2016-02-18 14:26 ` [PATCH v5 06/35] drm/i915: Start of GPU scheduler John.C.Harrison
2016-02-19 13:03   ` Joonas Lahtinen
2016-02-19 17:03     ` John Harrison
2016-02-26  9:13       ` Joonas Lahtinen
2016-02-26 14:18         ` John Harrison
2016-02-18 14:26 ` [PATCH v5 07/35] drm/i915: Prepare retire_requests to handle out-of-order seqnos John.C.Harrison
2016-02-19 19:23   ` Jesse Barnes
2016-02-18 14:26 ` [PATCH v5 08/35] drm/i915: Disable hardware semaphores when GPU scheduler is enabled John.C.Harrison
2016-02-19 19:27   ` Jesse Barnes
2016-02-18 14:26 ` [PATCH v5 09/35] drm/i915: Force MMIO flips when scheduler enabled John.C.Harrison
2016-02-19 19:28   ` Jesse Barnes
2016-02-19 19:53     ` Ville Syrjälä
2016-02-19 20:01       ` Jesse Barnes
2016-02-22  9:41         ` Lankhorst, Maarten
2016-02-22 12:53           ` John Harrison
2016-02-20  9:22     ` Chris Wilson
2016-02-22 20:42       ` Jesse Barnes
2016-02-23 11:16         ` Chris Wilson
2016-02-18 14:26 ` [PATCH v5 10/35] drm/i915: Added scheduler hook when closing DRM file handles John.C.Harrison
2016-03-01  8:59   ` Joonas Lahtinen
2016-03-01 14:52     ` John Harrison
2016-02-18 14:26 ` [PATCH v5 11/35] drm/i915: Added scheduler hook into i915_gem_request_notify() John.C.Harrison
2016-03-01  9:10   ` Joonas Lahtinen
2016-02-18 14:27 ` [PATCH v5 12/35] drm/i915: Added deferred work handler for scheduler John.C.Harrison
2016-03-01  9:16   ` Joonas Lahtinen
2016-03-01 15:12     ` John Harrison
2016-02-18 14:27 ` [PATCH v5 13/35] drm/i915: Redirect execbuffer_final() via scheduler John.C.Harrison
2016-02-19 19:33   ` Jesse Barnes
2016-02-18 14:27 ` [PATCH v5 14/35] drm/i915: Keep the reserved space mechanism happy John.C.Harrison
2016-02-19 19:36   ` Jesse Barnes
2016-02-18 14:27 ` [PATCH v5 15/35] drm/i915: Added tracking/locking of batch buffer objects John.C.Harrison
2016-02-19 19:42   ` Jesse Barnes
2016-02-18 14:27 ` [PATCH v5 16/35] drm/i915: Hook scheduler node clean up into retire requests John.C.Harrison
2016-02-19 19:44   ` Jesse Barnes
2016-02-18 14:27 ` [PATCH v5 17/35] drm/i915: Added scheduler support to __wait_request() calls John.C.Harrison
2016-03-01 10:02   ` Joonas Lahtinen
2016-03-11 11:47     ` John Harrison
2016-02-18 14:27 ` [PATCH v5 18/35] drm/i915: Added scheduler support to page fault handler John.C.Harrison
2016-02-19 19:45   ` Jesse Barnes
2016-02-18 14:27 ` [PATCH v5 19/35] drm/i915: Added scheduler flush calls to ring throttle and idle functions John.C.Harrison
2016-03-07 11:31   ` Joonas Lahtinen
2016-03-11 16:22     ` John Harrison
2016-02-18 14:27 ` [PATCH v5 20/35] drm/i915: Add scheduler hook to GPU reset John.C.Harrison
2016-02-23 20:27   ` Jesse Barnes
2016-02-18 14:27 ` [PATCH v5 21/35] drm/i915: Added a module parameter to allow the scheduler to be disabled John.C.Harrison
2016-02-23 20:29   ` Jesse Barnes
2016-02-18 14:27 ` [PATCH v5 22/35] drm/i915: Support for 'unflushed' ring idle John.C.Harrison
2016-02-23 20:35   ` Jesse Barnes
2016-02-18 14:27 ` [PATCH v5 23/35] drm/i915: Defer seqno allocation until actual hardware submission time John.C.Harrison
2016-03-07 12:16   ` Joonas Lahtinen
2016-02-18 14:27 ` [PATCH v5 24/35] drm/i915: Added trace points to scheduler John.C.Harrison
2016-02-23 20:42   ` Jesse Barnes
2016-02-23 20:42   ` Jesse Barnes
2016-02-26 15:55     ` John Harrison
2016-02-26 17:12       ` Jesse Barnes [this message]
2016-02-18 14:27 ` [PATCH v5 25/35] drm/i915: Added scheduler queue throttling by DRM file handle John.C.Harrison
2016-02-23 21:02   ` Jesse Barnes
2016-03-01 15:52     ` John Harrison
2016-02-18 14:27 ` [PATCH v5 26/35] drm/i915: Added debugfs interface to scheduler tuning parameters John.C.Harrison
2016-02-23 21:06   ` Jesse Barnes
2016-03-11 16:28     ` John Harrison
2016-03-11 17:25       ` Jesse Barnes
2016-02-18 14:27 ` [PATCH v5 27/35] drm/i915: Added debug state dump facilities to scheduler John.C.Harrison
2016-03-07 12:31   ` Joonas Lahtinen
2016-03-11 16:38     ` John Harrison
2016-03-15 10:53       ` Joonas Lahtinen
2016-02-18 14:27 ` [PATCH v5 28/35] drm/i915: Add early exit to execbuff_final() if insufficient ring space John.C.Harrison
2016-02-18 14:27 ` [PATCH v5 29/35] drm/i915: Added scheduler statistic reporting to debugfs John.C.Harrison
2016-02-18 14:27 ` [PATCH v5 30/35] drm/i915: Add scheduler support functions for TDR John.C.Harrison
2016-02-18 14:27 ` [PATCH v5 31/35] drm/i915: Scheduler state dump via debugfs John.C.Harrison
2016-02-18 14:27 ` [PATCH v5 32/35] drm/i915: Enable GPU scheduler by default John.C.Harrison
2016-02-18 14:27 ` [PATCH v5 33/35] drm/i915: Add scheduling priority to per-context parameters John.C.Harrison
2016-02-18 14:27 ` [PATCH v5 34/35] drm/i915: Add support for retro-actively banning batch buffers John.C.Harrison
2016-02-18 14:27 ` [PATCH v5 35/35] drm/i915: Allow scheduler to manage inter-ring object synchronisation John.C.Harrison
2016-02-18 14:27 ` [PATCH 01/20] igt/gem_ctx_param_basic: Updated to support scheduler priority interface John.C.Harrison
2016-02-18 15:30 ` ✗ Fi.CI.BAT: failure for GPU scheduler for i915 driver Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=56D0878A.707@virtuousgeek.org \
    --to=jbarnes@virtuousgeek.org \
    --cc=Intel-GFX@Lists.FreeDesktop.Org \
    --cc=John.C.Harrison@Intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.