All of lore.kernel.org
 help / color / mirror / Atom feed
From: John Harrison <John.C.Harrison@Intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH 14/42] drm/i915: Use a radixtree for random access to the object's backing storage
Date: Tue, 11 Oct 2016 11:15:45 +0100	[thread overview]
Message-ID: <018cb0b8-da63-2bbc-c668-34f78397ca85@Intel.com> (raw)
In-Reply-To: <27ae781e-e121-8262-b170-2cb83b8f8cb0@linux.intel.com>

On 11/10/2016 10:32, Tvrtko Ursulin wrote:
>
> On 07/10/2016 10:46, Chris Wilson wrote:
>> A while ago we switched from a contiguous array of pages into an sglist,
>> for that was both more convenient for mapping to hardware and avoided
>> the requirement for a vmalloc array of pages on every object. However,
>> certain GEM API calls (like pwrite, pread as well as performing
>> relocations) do desire access to individual struct pages. A quick hack
>> was to introduce a cache of the last access such that finding the
>> following page was quick - this works so long as the caller desired
>> sequential access. Walking backwards, or multiple callers, still hits a
>> slow linear search for each page. One solution is to store each
>> successful lookup in a radix tree.
>>
>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>> ---
>>   drivers/gpu/drm/i915/i915_drv.h         |  57 ++++--------
>>   drivers/gpu/drm/i915/i915_gem.c         | 149 
>> ++++++++++++++++++++++++++++----
>>   drivers/gpu/drm/i915/i915_gem_stolen.c  |   4 +-
>>   drivers/gpu/drm/i915/i915_gem_userptr.c |   4 +-
>>   4 files changed, 154 insertions(+), 60 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_drv.h 
>> b/drivers/gpu/drm/i915/i915_drv.h
>> index bad97f1e5265..a96b446d8db4 100644
>> --- a/drivers/gpu/drm/i915/i915_drv.h
>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>> @@ -2278,9 +2278,12 @@ struct drm_i915_gem_object {
>>         struct sg_table *pages;
>>       int pages_pin_count;
>> -    struct get_page {
>> -        struct scatterlist *sg;
>> -        int last;
>> +    struct i915_gem_object_page_iter {
>> +        struct scatterlist *sg_pos;
>> +        unsigned long sg_idx;
>> +
>> +        struct radix_tree_root radix;
>> +        struct mutex lock;
>>       } get_page;
>>       void *mapping;
>>   @@ -3168,45 +3171,21 @@ static inline int __sg_page_count(struct 
>> scatterlist *sg)
>>       return sg->length >> PAGE_SHIFT;
>>   }
>>   -struct page *
>> -i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
>> -
>> -static inline dma_addr_t
>> -i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
>> -{
>> -    if (n < obj->get_page.last) {
>> -        obj->get_page.sg = obj->pages->sgl;
>> -        obj->get_page.last = 0;
>> -    }
>> -
>> -    while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= 
>> n) {
>> -        obj->get_page.last += __sg_page_count(obj->get_page.sg++);
>> -        if (unlikely(sg_is_chain(obj->get_page.sg)))
>> -            obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
>> -    }
>> -
>> -    return sg_dma_address(obj->get_page.sg) + ((n - 
>> obj->get_page.last) << PAGE_SHIFT);
>> -}
>> -
>> -static inline struct page *
>> -i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
>> -{
>> -    if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
>> -        return NULL;
>> +struct scatterlist *
>> +i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
>> +               unsigned long n, unsigned int *offset);
>>   -    if (n < obj->get_page.last) {
>> -        obj->get_page.sg = obj->pages->sgl;
>> -        obj->get_page.last = 0;
>> -    }
>> +struct page *
>> +i915_gem_object_get_page(struct drm_i915_gem_object *obj,
>> +             unsigned long n);
>>   -    while (obj->get_page.last + __sg_page_count(obj->get_page.sg) 
>> <= n) {
>> -        obj->get_page.last += __sg_page_count(obj->get_page.sg++);
>> -        if (unlikely(sg_is_chain(obj->get_page.sg)))
>> -            obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
>> -    }
>> +struct page *
>> +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
>> +                   unsigned long n);
>>   -    return nth_page(sg_page(obj->get_page.sg), n - 
>> obj->get_page.last);
>> -}
>> +dma_addr_t
>> +i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
>> +                unsigned long n);
>>     static inline void i915_gem_object_pin_pages(struct 
>> drm_i915_gem_object *obj)
>>   {
>> diff --git a/drivers/gpu/drm/i915/i915_gem.c 
>> b/drivers/gpu/drm/i915/i915_gem.c
>> index ada837e393a7..af7d51f16658 100644
>> --- a/drivers/gpu/drm/i915/i915_gem.c
>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>> @@ -2292,6 +2292,15 @@ i915_gem_object_put_pages_gtt(struct 
>> drm_i915_gem_object *obj)
>>       kfree(obj->pages);
>>   }
>>   +static void __i915_gem_object_reset_page_iter(struct 
>> drm_i915_gem_object *obj)
>> +{
>> +    struct radix_tree_iter iter;
>> +    void **slot;
>> +
>> +    radix_tree_for_each_slot(slot, &obj->get_page.radix, &iter, 0)
>> +        radix_tree_delete(&obj->get_page.radix, iter.index);
>> +}
>> +
>>   int
>>   i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>>   {
>> @@ -2324,6 +2333,8 @@ i915_gem_object_put_pages(struct 
>> drm_i915_gem_object *obj)
>>           obj->mapping = NULL;
>>       }
>>   +    __i915_gem_object_reset_page_iter(obj);
>> +
>>       ops->put_pages(obj);
>>       obj->pages = NULL;
>>   @@ -2488,8 +2499,8 @@ i915_gem_object_get_pages(struct 
>> drm_i915_gem_object *obj)
>>         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
>>   -    obj->get_page.sg = obj->pages->sgl;
>> -    obj->get_page.last = 0;
>> +    obj->get_page.sg_pos = obj->pages->sgl;
>> +    obj->get_page.sg_idx = 0;
>>         return 0;
>>   }
>> @@ -4242,6 +4253,8 @@ void i915_gem_object_init(struct 
>> drm_i915_gem_object *obj,
>>         obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
>>       obj->madv = I915_MADV_WILLNEED;
>> +    INIT_RADIX_TREE(&obj->get_page.radix, GFP_KERNEL);
>> +    mutex_init(&obj->get_page.lock);
>>         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
>>   }
>> @@ -4904,21 +4917,6 @@ void i915_gem_track_fb(struct 
>> drm_i915_gem_object *old,
>>       }
>>   }
>>   -/* Like i915_gem_object_get_page(), but mark the returned page 
>> dirty */
>> -struct page *
>> -i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
>> -{
>> -    struct page *page;
>> -
>> -    /* Only default objects have per-page dirty tracking */
>> -    if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
>> -        return NULL;
>> -
>> -    page = i915_gem_object_get_page(obj, n);
>> -    set_page_dirty(page);
>> -    return page;
>> -}
>> -
>>   /* Allocate a new GEM object and fill it with the supplied data */
>>   struct drm_i915_gem_object *
>>   i915_gem_object_create_from_data(struct drm_device *dev,
>> @@ -4959,3 +4957,120 @@ fail:
>>       i915_gem_object_put(obj);
>>       return ERR_PTR(ret);
>>   }
>> +
>> +static struct scatterlist *
>> +__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
>> +             unsigned long n, unsigned int *offset)
>> +{
>> +    struct scatterlist *sg = obj->pages->sgl;
>> +    int idx = 0;
>> +
>> +    while (idx + __sg_page_count(sg) <= n) {
>> +        idx += __sg_page_count(sg++);
>> +        if (unlikely(sg_is_chain(sg)))
>> +            sg = sg_chain_ptr(sg);
>> +    }
>> +
>> +    *offset = n - idx;
>> +    return sg;
>> +}
>> +
>> +struct scatterlist *
>> +i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
>> +               unsigned long n,
>> +               unsigned int *offset)
>> +{
>> +    struct i915_gem_object_page_iter *iter = &obj->get_page;
>> +    struct scatterlist *sg;
>> +
>> +    GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
>> +    GEM_BUG_ON(obj->pages_pin_count == 0);
>> +
>> +    if (n < READ_ONCE(iter->sg_idx))
>> +        goto lookup;
>> +
>
> Ok, so on lookup of "n" you build the radix tree for all sg entries 
> from zero to n. Therefore for a lookup below the current index, there 
> must be a radix tree entry, correct?
That is my understanding.

>
>> +    mutex_lock(&iter->lock);
>> +    if (n >= iter->sg_idx &&
>> +        n < iter->sg_idx + __sg_page_count(iter->sg_pos)) {
>> +        sg = iter->sg_pos;
>> +        *offset = n - iter->sg_idx;
>> +        mutex_unlock(&iter->lock);
>> +        return sg;
>> +    }
>> +
>> +    while (iter->sg_idx <= n) {
>> +        unsigned long exception;
>> +        unsigned int count, i;
>> +
>> +        radix_tree_insert(&iter->radix,
>> +                  iter->sg_idx,
>> +                  iter->sg_pos);
>> +
>> +        exception =
>> +            RADIX_TREE_EXCEPTIONAL_ENTRY |
>> +            iter->sg_idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
>> +        count = __sg_page_count(iter->sg_pos);
>> +        for (i = 1; i < count; i++)
>> +            radix_tree_insert(&iter->radix,
>> +                      iter->sg_idx + i,
>> +                      (void *)exception);
>> +
>> +        iter->sg_idx += count;
>> +        iter->sg_pos = __sg_next(iter->sg_pos);
>> +    }
>> +    mutex_unlock(&iter->lock);
>
> Why not avoid falling through the lookup and return the previous sg 
> from here?
I thought I worked through this before and decided there was a good 
reason. However, looking again a few days later, I can't obviously see 
why. As you note below, this is non-trivial stuff and would definitely 
benefit from some explanatory comments.

>
>> +
>> +lookup:
>> +    rcu_read_lock();
>> +    sg = radix_tree_lookup(&iter->radix, n);
>> +    rcu_read_unlock();
>> +
>> +    if (unlikely(!sg))
>> +        return __i915_gem_object_get_sg(obj, n, offset);
>> +
>
> Considering the first observation from above, when does it then happen 
> that there is no radix tree entry at this point?
>
> Or in other words, maybe some comments should be added to this patch - 
> there are none and it is not that trivial.
>
>> +    *offset = 0;
>> +    if (unlikely(radix_tree_exception(sg))) {
>> +        unsigned long base =
>> +            (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
>> +        sg = radix_tree_lookup(&iter->radix, base);
>> +        *offset = n - base;
>> +    }
>> +    return sg;
>> +}
>> +
>> +struct page *
>> +i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned 
>> long n)
>> +{
>> +    struct scatterlist *sg;
>> +    unsigned int offset;
>> +
>> +    GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
>> +
>> +    sg = i915_gem_object_get_sg(obj, n, &offset);
>> +    return nth_page(sg_page(sg), offset);
>> +}
>> +
>> +/* Like i915_gem_object_get_page(), but mark the returned page dirty */
>> +struct page *
>> +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
>> +                   unsigned long n)
>> +{
>> +    struct page *page;
>> +
>> +    page = i915_gem_object_get_page(obj, n);
>> +    if (!obj->dirty)
>> +        set_page_dirty(page);
>> +
>> +    return page;
>> +}
>> +
>> +dma_addr_t
>> +i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
>> +                unsigned long n)
>> +{
>> +    struct scatterlist *sg;
>> +    unsigned int offset;
>> +
>> +    sg = i915_gem_object_get_sg(obj, n, &offset);
>> +    return sg_dma_address(sg) + (offset << PAGE_SHIFT);
>> +}
>> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c 
>> b/drivers/gpu/drm/i915/i915_gem_stolen.c
>> index 59989e8ee5dc..24bad4e60ef0 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
>> @@ -594,8 +594,8 @@ _i915_gem_object_create_stolen(struct drm_device 
>> *dev,
>>       if (obj->pages == NULL)
>>           goto cleanup;
>>   -    obj->get_page.sg = obj->pages->sgl;
>> -    obj->get_page.last = 0;
>> +    obj->get_page.sg_pos = obj->pages->sgl;
>> +    obj->get_page.sg_idx = 0;
>>         i915_gem_object_pin_pages(obj);
>>       obj->stolen = stolen;
>> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c 
>> b/drivers/gpu/drm/i915/i915_gem_userptr.c
>> index 1c891b92ac80..cb95789da76e 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
>> @@ -526,8 +526,8 @@ __i915_gem_userptr_get_pages_worker(struct 
>> work_struct *_work)
>>               if (ret == 0) {
>>                   list_add_tail(&obj->global_list,
>> &to_i915(dev)->mm.unbound_list);
>> -                obj->get_page.sg = obj->pages->sgl;
>> -                obj->get_page.last = 0;
>> +                obj->get_page.sg_pos = obj->pages->sgl;
>> +                obj->get_page.sg_idx = 0;
>>                   pinned = 0;
>>               }
>>           }
>
> Regards,
>
> Tvrtko
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2016-10-11 10:15 UTC|newest]

Thread overview: 107+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-10-07  9:45 Explicit fencing on multiple timelines, again Chris Wilson
2016-10-07  9:45 ` [PATCH 01/42] drm/i915: Allow disabling error capture Chris Wilson
2016-10-07  9:45 ` [PATCH 02/42] drm/i915: Stop the machine whilst capturing the GPU crash dump Chris Wilson
2016-10-07 10:11   ` Joonas Lahtinen
2016-10-07  9:45 ` [PATCH 03/42] drm/i915: Always use the GTT for error capture Chris Wilson
2016-10-07  9:45 ` [PATCH 04/42] drm/i915: Consolidate error object printing Chris Wilson
2016-10-07  9:45 ` [PATCH 05/42] drm/i915: Compress GPU objects in error state Chris Wilson
2016-10-07  9:45 ` [PATCH 06/42] drm/i915: Support asynchronous waits on struct fence from i915_gem_request Chris Wilson
2016-10-07  9:56   ` Joonas Lahtinen
2016-10-07 15:51   ` Tvrtko Ursulin
2016-10-07 16:12     ` Chris Wilson
2016-10-07 16:16       ` Tvrtko Ursulin
2016-10-07 16:37         ` Chris Wilson
2016-10-08  8:23           ` Tvrtko Ursulin
2016-10-08  8:58             ` Chris Wilson
2016-10-07  9:46 ` [PATCH 07/42] drm/i915: Allow i915_sw_fence_await_sw_fence() to allocate Chris Wilson
2016-10-07 16:10   ` Tvrtko Ursulin
2016-10-07 16:22     ` Chris Wilson
2016-10-08  8:21       ` Tvrtko Ursulin
2016-10-07  9:46 ` [PATCH 08/42] drm/i915: Rearrange i915_wait_request() accounting with callers Chris Wilson
2016-10-07  9:58   ` Joonas Lahtinen
2016-10-07  9:46 ` [PATCH 09/42] drm/i915: Remove unused i915_gem_active_wait() in favour of _unlocked() Chris Wilson
2016-10-07  9:46 ` [PATCH 10/42] drm/i915: Defer active reference until required Chris Wilson
2016-10-07 16:35   ` Tvrtko Ursulin
2016-10-07 16:58     ` Chris Wilson
2016-10-08  8:18       ` Tvrtko Ursulin
2016-10-07  9:46 ` [PATCH 11/42] drm/i915: Introduce an internal allocator for disposable private objects Chris Wilson
2016-10-07 10:01   ` Joonas Lahtinen
2016-10-07 16:52   ` Tvrtko Ursulin
2016-10-07 17:08     ` Chris Wilson
2016-10-08  8:12       ` Tvrtko Ursulin
2016-10-08  8:32         ` Chris Wilson
2016-10-08  8:34         ` [PATCH v2] " Chris Wilson
2016-10-10  7:01           ` Joonas Lahtinen
2016-10-10  8:11           ` Tvrtko Ursulin
2016-10-10  8:19             ` Chris Wilson
2016-10-10  8:25               ` Tvrtko Ursulin
2016-10-07  9:46 ` [PATCH 12/42] drm/i915: Reuse the active golden render state batch Chris Wilson
2016-10-07  9:46 ` [PATCH 13/42] drm/i915: Markup GEM API with lockdep asserts Chris Wilson
2016-10-07  9:46 ` [PATCH 14/42] drm/i915: Use a radixtree for random access to the object's backing storage Chris Wilson
2016-10-07 10:12   ` Joonas Lahtinen
2016-10-07 11:05     ` Chris Wilson
2016-10-07 11:33       ` Joonas Lahtinen
2016-10-07 13:36   ` John Harrison
2016-10-11  9:32   ` Tvrtko Ursulin
2016-10-11 10:15     ` John Harrison [this message]
2016-10-07  9:46 ` [PATCH 15/42] drm/i915: Use radixtree to jump start intel_partial_pages() Chris Wilson
2016-10-07 13:46   ` John Harrison
2016-10-07  9:46 ` [PATCH 16/42] drm/i915: Refactor object page API Chris Wilson
2016-10-10 10:54   ` John Harrison
2016-10-11 11:23   ` Tvrtko Ursulin
2016-10-13 11:04   ` Joonas Lahtinen
2016-10-13 11:10     ` Chris Wilson
2016-10-07  9:46 ` [PATCH 17/42] drm/i915: Pass around sg_table to get_pages/put_pages backend Chris Wilson
2016-10-14  9:12   ` Joonas Lahtinen
2016-10-14  9:24     ` Chris Wilson
2016-10-14  9:28   ` Tvrtko Ursulin
2016-10-14  9:43     ` Chris Wilson
2016-10-17 10:52       ` Tvrtko Ursulin
2016-10-17 11:08         ` Chris Wilson
2016-10-07  9:46 ` [PATCH 18/42] drm/i915: Move object backing storage manipulation to its own locking Chris Wilson
2016-10-13 12:46   ` Joonas Lahtinen
2016-10-13 12:56     ` Chris Wilson
2016-10-07  9:46 ` [PATCH 19/42] drm/i915/dmabuf: Acquire the backing storage outside of struct_mutex Chris Wilson
2016-10-13 11:54   ` Joonas Lahtinen
2016-10-07  9:46 ` [PATCH 20/42] drm/i915: Implement pread without struct-mutex Chris Wilson
2016-10-12 12:53   ` Joonas Lahtinen
2016-10-07  9:46 ` [PATCH 21/42] drm/i915: Implement pwrite " Chris Wilson
2016-10-13 11:17   ` Joonas Lahtinen
2016-10-13 11:54     ` Chris Wilson
2016-10-14  7:08       ` Joonas Lahtinen
2016-10-07  9:46 ` [PATCH 22/42] drm/i915: Acquire the backing storage outside of struct_mutex in set-domain Chris Wilson
2016-10-13 11:47   ` Joonas Lahtinen
2016-10-07  9:46 ` [PATCH 23/42] drm/i915: Move object release to a freelist + worker Chris Wilson
2016-10-11  9:52   ` John Harrison
2016-10-07  9:46 ` [PATCH 24/42] drm/i915: Treat a framebuffer reference as an active reference whilst shrinking Chris Wilson
2016-10-11  9:54   ` John Harrison
2016-10-07  9:46 ` [PATCH 25/42] drm/i915: Use lockless object free Chris Wilson
2016-10-11  9:56   ` John Harrison
2016-10-07  9:46 ` [PATCH 26/42] drm/i915: Move GEM activity tracking into a common struct reservation_object Chris Wilson
2016-10-07 10:10   ` Joonas Lahtinen
2016-10-07  9:46 ` [PATCH 27/42] drm: Add reference counting to drm_atomic_state Chris Wilson
2016-10-07  9:46 ` [PATCH 28/42] drm/i915: Restore nonblocking awaits for modesetting Chris Wilson
2016-10-07  9:46 ` [PATCH 29/42] drm/i915: Combine seqno + tracking into a global timeline struct Chris Wilson
2016-10-07  9:46 ` [PATCH 30/42] drm/i915: Queue the idling context switch after all other timelines Chris Wilson
2016-10-07  9:46 ` [PATCH 31/42] drm/i915: Wait first for submission, before waiting for request completion Chris Wilson
2016-10-07  9:46 ` [PATCH 32/42] drm/i915: Introduce a global_seqno for each request Chris Wilson
2016-10-07  9:46 ` [PATCH 33/42] drm/i915: Rename ->emit_request to ->emit_breadcrumb Chris Wilson
2016-10-07  9:46 ` [PATCH 34/42] drm/i915: Record space required for breadcrumb emission Chris Wilson
2016-10-07  9:46 ` [PATCH 35/42] drm/i915: Defer " Chris Wilson
2016-10-07  9:46 ` [PATCH 36/42] drm/i915: Move the global sync optimisation to the timeline Chris Wilson
2016-10-07  9:46 ` [PATCH 37/42] drm/i915: Create a unique name for the context Chris Wilson
2016-10-07  9:46 ` [PATCH 38/42] drm/i915: Reserve space in the global seqno during request allocation Chris Wilson
2016-10-07  9:46 ` [PATCH 39/42] drm/i915: Defer setting of global seqno on request to submission Chris Wilson
2016-10-07 10:25   ` Joonas Lahtinen
2016-10-07 10:27   ` Joonas Lahtinen
2016-10-07 11:03     ` Chris Wilson
2016-10-07 11:10       ` Joonas Lahtinen
2016-10-07  9:46 ` [PATCH 40/42] drm/i915: Enable multiple timelines Chris Wilson
2016-10-07 10:29   ` Joonas Lahtinen
2016-10-07 11:00     ` Chris Wilson
2016-10-07 11:07       ` Joonas Lahtinen
2016-10-07  9:46 ` [PATCH 41/42] drm/i915: Enable userspace to opt-out of implicit fencing Chris Wilson
2016-10-07  9:46 ` [PATCH 42/42] drm/i915: Support explicit fencing for execbuf Chris Wilson
2016-10-07 10:19 ` ✗ Fi.CI.BAT: warning for series starting with [01/42] drm/i915: Allow disabling error capture Patchwork
2016-10-10  7:23 ` Patchwork
2016-10-10 15:31 ` ✗ Fi.CI.BAT: failure for series starting with [01/42] drm/i915: Allow disabling error capture (rev2) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=018cb0b8-da63-2bbc-c668-34f78397ca85@Intel.com \
    --to=john.c.harrison@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.