* [Intel-gfx] [RFC 1/2] drm/i915/gem: Convert vm idr to xarray @ 2020-01-18 21:29 Chris Wilson 2020-01-18 21:29 ` [Intel-gfx] [RFC 2/2] drm/i915/gem: Introduce VM_WAIT, a futex-lite operation Chris Wilson ` (4 more replies) 0 siblings, 5 replies; 7+ messages in thread From: Chris Wilson @ 2020-01-18 21:29 UTC (permalink / raw) To: intel-gfx Replace the vm_idr + vm_idr_mutex to an XArray. The XArray data structure is now used to implement IDRs, and provides its won locking. We can simply remove the IDR wrapper and in the process also remove our extra mutex. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 80 ++++++--------------- drivers/gpu/drm/i915/i915_drv.h | 4 +- 2 files changed, 22 insertions(+), 62 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index a2e57e62af30..d2e4e8cbf4d4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -761,12 +761,6 @@ void i915_gem_driver_release__contexts(struct drm_i915_private *i915) flush_work(&i915->gem.contexts.free_work); } -static int vm_idr_cleanup(int id, void *p, void *data) -{ - i915_vm_put(p); - return 0; -} - static int gem_context_register(struct i915_gem_context *ctx, struct drm_i915_file_private *fpriv, u32 *id) @@ -803,9 +797,7 @@ int i915_gem_context_open(struct drm_i915_private *i915, u32 id; xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); - - mutex_init(&file_priv->vm_idr_lock); - idr_init_base(&file_priv->vm_idr, 1); + xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) { @@ -823,9 +815,8 @@ int i915_gem_context_open(struct drm_i915_private *i915, err_ctx: context_close(ctx); err: - idr_destroy(&file_priv->vm_idr); + xa_destroy(&file_priv->vm_xa); xa_destroy(&file_priv->context_xa); - mutex_destroy(&file_priv->vm_idr_lock); return err; } @@ -833,6 +824,7 @@ void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_private *i915 = file_priv->dev_priv; + struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long idx; @@ -840,9 +832,9 @@ void i915_gem_context_close(struct drm_file *file) context_close(ctx); xa_destroy(&file_priv->context_xa); - idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); - idr_destroy(&file_priv->vm_idr); - mutex_destroy(&file_priv->vm_idr_lock); + xa_for_each(&file_priv->vm_xa, idx, vm) + i915_vm_put(vm); + xa_destroy(&file_priv->vm_xa); contexts_flush_free(&i915->gem.contexts); } @@ -876,23 +868,13 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, goto err_put; } - err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + err = xa_alloc(&file_priv->vm_xa, &args->vm_id, + &ppgtt->vm, xa_limit_32b, GFP_KERNEL); if (err) goto err_put; - err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL); - if (err < 0) - goto err_unlock; - - GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */ - - mutex_unlock(&file_priv->vm_idr_lock); - - args->vm_id = err; return 0; -err_unlock: - mutex_unlock(&file_priv->vm_idr_lock); err_put: i915_vm_put(&ppgtt->vm); return err; @@ -904,8 +886,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_vm_control *args = data; struct i915_address_space *vm; - int err; - u32 id; if (args->flags) return -EINVAL; @@ -913,17 +893,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, if (args->extensions) return -EINVAL; - id = args->vm_id; - if (!id) - return -ENOENT; - - err = mutex_lock_interruptible(&file_priv->vm_idr_lock); - if (err) - return err; - - vm = idr_remove(&file_priv->vm_idr, id); - - mutex_unlock(&file_priv->vm_idr_lock); + vm = xa_erase(&file_priv->vm_xa, args->vm_id); if (!vm) return -ENOENT; @@ -1021,35 +991,27 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, struct drm_i915_gem_context_param *args) { struct i915_address_space *vm; - int ret; + int err = -ENODEV; + u32 id; if (!rcu_access_pointer(ctx->vm)) return -ENODEV; rcu_read_lock(); vm = context_get_vm_rcu(ctx); + if (vm) + err = xa_alloc(&file_priv->vm_xa, &id, vm, + xa_limit_32b, GFP_KERNEL); rcu_read_unlock(); + if (!err) { + i915_vm_open(vm); - ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); - if (ret) - goto err_put; - - ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL); - GEM_BUG_ON(!ret); - if (ret < 0) - goto err_unlock; - - i915_vm_open(vm); - - args->size = 0; - args->value = ret; + args->size = 0; + args->value = id; + } - ret = 0; -err_unlock: - mutex_unlock(&file_priv->vm_idr_lock); -err_put: i915_vm_put(vm); - return ret; + return err; } static void set_ppgtt_barrier(void *data) @@ -1151,7 +1113,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, return -ENOENT; rcu_read_lock(); - vm = idr_find(&file_priv->vm_idr, args->value); + vm = xa_load(&file_priv->vm_xa, args->value); if (vm && !kref_get_unless_zero(&vm->ref)) vm = NULL; rcu_read_unlock(); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 077af22b8340..50abf9113b2f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -203,9 +203,7 @@ struct drm_i915_file_private { } mm; struct xarray context_xa; - - struct idr vm_idr; - struct mutex vm_idr_lock; /* guards vm_idr */ + struct xarray vm_xa; unsigned int bsd_engine; -- 2.25.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply related [flat|nested] 7+ messages in thread
* [Intel-gfx] [RFC 2/2] drm/i915/gem: Introduce VM_WAIT, a futex-lite operation 2020-01-18 21:29 [Intel-gfx] [RFC 1/2] drm/i915/gem: Convert vm idr to xarray Chris Wilson @ 2020-01-18 21:29 ` Chris Wilson 2020-01-18 22:17 ` Chris Wilson 2020-01-18 21:41 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray Patchwork ` (3 subsequent siblings) 4 siblings, 1 reply; 7+ messages in thread From: Chris Wilson @ 2020-01-18 21:29 UTC (permalink / raw) To: intel-gfx; +Cc: Kristian H . Kristensen Currently, we only allow waiting on the forward progress of an individual GEM object, or of a GEM execbuf fence. The primary purpose of the fence is to provide a scheduling primitive to order the execution flow of batches (cf VkSempahore). Userspace instead uses values in memory to implement client fences, and has to mix busywaiting on the value coupled with a dma_fence in case it needs to sleep. It has no intermediate step where it can wait on the memory value itself to change, which is required for scenarios where the dma_fence may incur too much execution latency. The CPU equivalent is a futex-syscall used to setup a waiter/waker based on a memory location. This is used to implement an efficient sleep for pthread_mutex_t, where the fast uncontended path can be handled entirely in userspace. This patch implements a similar idea, where we take a virtual address in the client's ppGTT and install an interrupt handler to wake up the current task when the memory location passes the user supplied filter. It also allows the user to emit their own MI_USER_INTERRUPT within their batches after updating the value on the GPU to have sub-batch precision on the wakeup. Opens: - on attaching the waiter, we enable interrupts on all engines, irrespective of which are active to a VM. * we can optimise when to enable interrupts while the VM is active * we can extend the interface for the user to select which engines may wake us - we could return an fd wrapping the comparison operation on the memory address if we want to pass the waiter around different processes or reuse the waiter (with poll() + read() like timerfd). References: b2c97bc78919 ("anv/query: Busy-wait for available query entries") References: https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3279#note_377240 Testcase: igt/gem_vm_wait Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Jason Ekstrand <jason@jlekstrand.net> Cc: Kristian H. Kristensen <hoegsberg@google.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/i915_gem_ioctls.h | 2 + drivers/gpu/drm/i915/gem/i915_gem_vm.c | 380 +++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 43 +++ drivers/gpu/drm/i915/gt/intel_engine.h | 5 + drivers/gpu/drm/i915/gt/intel_engine_types.h | 1 + drivers/gpu/drm/i915/i915_drv.c | 1 + include/uapi/drm/i915_drm.h | 36 ++ 8 files changed, 469 insertions(+) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_vm.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 3c88d7d8c764..5e1441cf12d8 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -139,6 +139,7 @@ gem-y += \ gem/i915_gem_throttle.o \ gem/i915_gem_tiling.o \ gem/i915_gem_userptr.o \ + gem/i915_gem_vm.o \ gem/i915_gem_wait.o \ gem/i915_gemfs.o i915-y += \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h index 87d8b27f426d..92f265c84290 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h @@ -48,5 +48,7 @@ int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int i915_gem_vm_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_vm.c b/drivers/gpu/drm/i915/gem/i915_gem_vm.c new file mode 100644 index 000000000000..d9a1de3ec4a9 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_vm.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/wait.h> + +#include <drm/drm_file.h> +#include <drm/drm_utils.h> + +#include "i915_drv.h" +#include "i915_gem_ioctls.h" +#include "i915_user_extensions.h" + +struct iova_wake { + struct task_struct *tsk; + void *ptr; + u64 value; + u64 mask; + u16 width; + u16 op; + bool cache_coherent; +}; + +static bool iova_compare(const struct iova_wake *wake) +{ + u64 value = wake->value & wake->mask; + u64 target = 0; + + GEM_BUG_ON(wake->width > sizeof(target)); + + if (!wake->cache_coherent) + drm_clflush_virt_range(wake->ptr, wake->width); + switch (wake->width) { + case 1: memcpy(&target, wake->ptr, 1); break; + case 2: memcpy(&target, wake->ptr, 2); break; + case 4: memcpy(&target, wake->ptr, 4); break; + case 8: memcpy(&target, wake->ptr, 8); break; + } + target &= wake->mask; + + switch (wake->op) { + case I915_VM_WAIT_EQ: + return value == target; + case I915_VM_WAIT_NEQ: + return value != target; + + case I915_VM_WAIT_GT: + return target > value; + case I915_VM_WAIT_GTE: + return target >= value; + + case I915_VM_WAIT_LT: + return target < value; + case I915_VM_WAIT_LTE: + return target <= value; + + case I915_VM_WAIT_AFTER: + switch (wake->width) { + case 1: return (s8)(target - value) > 0; + case 2: return (s16)(target - value) > 0; + case 4: return (s32)(target - value) > 0; + default: return (s64)(target - value) > 0; + } + + case I915_VM_WAIT_BEFORE: + switch (wake->width) { + case 1: return (s8)(target - value) < 0; + case 2: return (s16)(target - value) < 0; + case 4: return (s32)(target - value) < 0; + default: return (s64)(target - value) < 0; + } + + default: + return true; + } +} + +static int iova_wake(wait_queue_entry_t *curr, + unsigned int mode, int wake_flags, + void *key) +{ + struct iova_wake *wake = curr->private; + + if (!iova_compare(wake)) + return 0; + + return wake_up_process(wake->tsk); +} + +static int iova_wake_map(struct i915_vma *vma, + struct drm_i915_gem_vm_wait *arg, + struct iova_wake *wake) +{ + struct drm_i915_gem_object *obj = vma->obj; + u64 offset = arg->iova - vma->node.start; + + wake->tsk = current; + wake->value = arg->value; + wake->mask = arg->mask; + wake->op = arg->op; + + if (i915_gem_object_has_struct_page(obj)) { + struct page *page; + + page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); + wake->ptr = kmap(page) + offset_in_page(offset); + + wake->cache_coherent = + obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ; + } else { + void *ptr; + + ptr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + wake->ptr = ptr + offset; + wake->cache_coherent = true; + } + + return 0; +} + +static void iova_wake_unmap(struct i915_vma *vma, + struct drm_i915_gem_vm_wait *arg, + struct iova_wake *wake) +{ + struct drm_i915_gem_object *obj = vma->obj; + + if (i915_gem_object_has_struct_page(obj)) { + u64 offset = arg->iova - vma->node.start; + + kunmap(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT)); + } else { + i915_gem_object_unpin_map(obj); + } +} + +static struct i915_vma * +find_vma_for_iova(struct i915_address_space *vm, u64 iova, unsigned int width) +{ + struct drm_mm_node *node; + struct i915_vma *vma; + + if (mutex_lock_interruptible(&vm->mutex)) + return ERR_PTR(-EINVAL); + + node = __drm_mm_interval_first(&vm->mm, iova, iova + width); + if (!node || node->color == I915_COLOR_UNEVICTABLE) { + vma = ERR_PTR(-ENOENT); + goto out_unlock; + } + + if (node->start > iova || iova + width > node->start + node->size) { + vma = ERR_PTR(-ENOENT); + goto out_unlock; + } + + vma = container_of(node, typeof(*vma), node); + i915_active_acquire(&vma->active); + +out_unlock: + mutex_unlock(&vm->mutex); + return vma; +} + +struct engine_wait { + struct wait_queue_entry wq_entry; + struct intel_engine_cs *engine; + struct engine_wait *next; +}; + +static int +add_engine_wait(struct engine_wait **head, + struct intel_engine_cs *engine, + struct iova_wake *wake) +{ + struct engine_wait *wait; + + wait = kmalloc(sizeof(*wait), GFP_KERNEL); + if (!wait) + return -ENOMEM; + + wait->engine = engine; + wait->wq_entry.flags = 0; + wait->wq_entry.private = wake; + wait->wq_entry.func = iova_wake; + intel_engine_add_wait(engine, &wait->wq_entry); + + wait->next = *head; + *head = wait; + + return 0; +} + +static int add_gt_wait(struct engine_wait **head, + struct intel_gt *gt, + struct iova_wake *wake) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err; + + for_each_engine(engine, gt, id) { + err = add_engine_wait(head, engine, wake); + if (err) + return err; + } + + return 0; +} + +static void remove_waits(struct engine_wait *wait) +{ + while (wait) { + struct engine_wait *next = wait->next; + + intel_engine_remove_wait(wait->engine, &wait->wq_entry); + kfree(wait); + + wait = next; + } +} + +static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) +{ + /* nsecs_to_jiffies64() does not guard against overflow */ + if (NSEC_PER_SEC % HZ && + div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) + return MAX_JIFFY_OFFSET; + + return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); +} + +static unsigned long to_wait_timeout(const struct drm_i915_gem_vm_wait *arg) +{ + if (arg->flags & I915_VM_WAIT_ABSTIME) + return drm_timeout_abs_to_jiffies(arg->timeout); + + if (arg->timeout < 0) + return MAX_SCHEDULE_TIMEOUT; + + if (arg->timeout == 0) + return 0; + + return nsecs_to_jiffies_timeout(arg->timeout); +} + +int i915_gem_vm_wait_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_vm_wait *arg = data; + struct engine_wait *wait = NULL; + struct i915_address_space *vm; + struct iova_wake wake; + unsigned long timeout; + struct i915_vma *vma; + ktime_t start; + int err; + + if (arg->flags & ~I915_VM_WAIT_ABSTIME) + return -EINVAL; + + switch (arg->op) { + case I915_VM_WAIT_EQ: + case I915_VM_WAIT_NEQ: + case I915_VM_WAIT_GT: + case I915_VM_WAIT_GTE: + case I915_VM_WAIT_LT: + case I915_VM_WAIT_LTE: + case I915_VM_WAIT_AFTER: + case I915_VM_WAIT_BEFORE: + break; + + default: + return -EINVAL; + } + + wake.width = fls64(arg->mask); + if (!wake.width) + return -EINVAL; + + /* Restrict the iova to be "naturally" aligned */ + wake.width = DIV_ROUND_UP(roundup_pow_of_two(wake.width), 8); + if (!IS_ALIGNED(arg->iova, wake.width)) + return -EINVAL; + + /* Natural alignment also means the iova cannot cross a page boundary */ + GEM_BUG_ON(arg->iova >> PAGE_SHIFT != + (arg->iova + wake.width) >> PAGE_SHIFT); + + rcu_read_lock(); + vm = xa_load(&file_priv->vm_xa, arg->vm_id); + if (vm && !kref_get_unless_zero(&vm->ref)) + vm = NULL; + rcu_read_unlock(); + if (!vm) + return -ENOENT; + + vma = find_vma_for_iova(vm, arg->iova, wake.width); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_vm; + } + + err = iova_wake_map(vma, arg, &wake); + if (err) + goto out_vma; + + err = i915_user_extensions(u64_to_user_ptr(arg->extensions), + NULL, 0, &wake); + if (err) + goto out_wake; + + if (iova_compare(&wake)) + goto out_wake; + + timeout = to_wait_timeout(arg); + if (!timeout) { + err = -ETIME; + goto out_vma; + } + + err = add_gt_wait(&wait, vm->gt, &wake); + if (err) + goto out_wait; + + start = ktime_get(); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + if (iova_compare(&wake)) + break; + + if (signal_pending(wake.tsk)) { + err = -ERESTARTSYS; + break; + } + + if (!timeout) { + err = -ETIME; + break; + } + + timeout = io_schedule_timeout(timeout); + } + __set_current_state(TASK_RUNNING); + + if (!(arg->flags & I915_VM_WAIT_ABSTIME) && arg->timeout > 0) { + arg->timeout -= ktime_to_ns(ktime_sub(ktime_get(), start)); + if (arg->timeout < 0) + arg->timeout = 0; + + /* + * Apparently ktime isn't accurate enough and occasionally has a + * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch + * things up to make the test happy. We allow up to 1 jiffy. + * + * This is a regression from the timespec->ktime conversion. + */ + if (err == -ETIME && !nsecs_to_jiffies(arg->timeout)) + arg->timeout = 0; + + /* Asked to wait beyond the jiffie/scheduler precision? */ + if (err == -ETIME && arg->timeout) + err = -EAGAIN; + } + +out_wait: + remove_waits(wait); +out_wake: + iova_wake_unmap(vma, arg, &wake); +out_vma: + i915_active_release(&vma->active); +out_vm: + i915_vm_put(vm); + return err; +} diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 0ba524a414c6..cb6ad8d66917 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -210,6 +210,8 @@ static void signal_irq_work(struct irq_work *work) i915_request_put(rq); } + + wake_up_all(&b->wq); } static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) @@ -254,6 +256,7 @@ void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine) INIT_LIST_HEAD(&b->signalers); init_irq_work(&b->irq_work, signal_irq_work); + init_waitqueue_head(&b->wq); } void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine) @@ -349,6 +352,46 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq) spin_unlock(&b->irq_lock); } +static void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + spin_lock_irq(&b->irq_lock); + if (!b->irq_enabled++) + irq_enable(engine); + GEM_BUG_ON(!b->irq_enabled); /* no overflow! */ + spin_unlock_irq(&b->irq_lock); +} + +static void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + spin_lock_irq(&b->irq_lock); + GEM_BUG_ON(!b->irq_enabled); /* no underflow! */ + if (!--b->irq_enabled) + irq_disable(engine); + spin_unlock_irq(&b->irq_lock); +} + +void intel_engine_add_wait(struct intel_engine_cs *engine, + struct wait_queue_entry *wait) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + intel_engine_pin_breadcrumbs_irq(engine); + add_wait_queue(&b->wq, wait); +} + +void intel_engine_remove_wait(struct intel_engine_cs *engine, + struct wait_queue_entry *wait) +{ + struct intel_breadcrumbs *b = &engine->breadcrumbs; + + remove_wait_queue(&b->wq, wait); + intel_engine_unpin_breadcrumbs_irq(engine); +} + void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, struct drm_printer *p) { diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 5df003061e44..dc00772dcba5 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -210,6 +210,11 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine); void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); +void intel_engine_add_wait(struct intel_engine_cs *engine, + struct wait_queue_entry *wait); +void intel_engine_remove_wait(struct intel_engine_cs *engine, + struct wait_queue_entry *wait); + void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); static inline void diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 77e68c7643de..415b12a6aef0 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -349,6 +349,7 @@ struct intel_engine_cs { struct list_head signalers; struct irq_work irq_work; /* for use from inside irq_lock */ + struct wait_queue_head wq; unsigned int irq_enabled; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f7385abdd74b..7ab4039cc1e5 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2747,6 +2747,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_VM_WAIT, i915_gem_vm_wait_ioctl, DRM_RENDER_ALLOW), }; static struct drm_driver driver = { diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 829c0a48577f..421df6aa4520 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -359,6 +359,7 @@ typedef struct _drm_i915_sarea { #define DRM_I915_QUERY 0x39 #define DRM_I915_GEM_VM_CREATE 0x3a #define DRM_I915_GEM_VM_DESTROY 0x3b +#define DRM_I915_GEM_VM_WAIT 0x3c /* Must be kept compact -- no holes */ #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) @@ -422,6 +423,7 @@ typedef struct _drm_i915_sarea { #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query) #define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control) #define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control) +#define DRM_IOCTL_I915_GEM_VM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_WAIT, struct drm_i915_gem_vm_wait) /* Allow drivers to submit batchbuffers directly to hardware, relying * on the security mechanisms provided by hardware. @@ -1824,6 +1826,40 @@ struct drm_i915_gem_vm_control { __u32 vm_id; }; +/* + * (*IOVA & MASK) OP (VALUE & MASK) + * + * OP: + * - EQ, NEQ + * - GT, GTE + * - LT, LTE + * - BEFORE, AFTER + * + */ +struct drm_i915_gem_vm_wait { + __u64 extensions; + __u64 iova; + __u32 vm_id; + __u16 op; +#define I915_VM_WAIT_EQ 0 +#define I915_VM_WAIT_NEQ 1 +#define I915_VM_WAIT_GT 2 +#define I915_VM_WAIT_GTE 3 +#define I915_VM_WAIT_LT 4 +#define I915_VM_WAIT_LTE 5 +#define I915_VM_WAIT_BEFORE 6 +#define I915_VM_WAIT_AFTER 7 + __u16 flags; +#define I915_VM_WAIT_ABSTIME 0x1 + __u64 value; + __u64 mask; +#define I915_VM_WAIT_U8 0xffu +#define I915_VM_WAIT_U16 0xffffu +#define I915_VM_WAIT_U32 0xfffffffful +#define I915_VM_WAIT_U64 0xffffffffffffffffull + __u64 timeout; +}; + struct drm_i915_reg_read { /* * Register offset. -- 2.25.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [Intel-gfx] [RFC 2/2] drm/i915/gem: Introduce VM_WAIT, a futex-lite operation 2020-01-18 21:29 ` [Intel-gfx] [RFC 2/2] drm/i915/gem: Introduce VM_WAIT, a futex-lite operation Chris Wilson @ 2020-01-18 22:17 ` Chris Wilson 0 siblings, 0 replies; 7+ messages in thread From: Chris Wilson @ 2020-01-18 22:17 UTC (permalink / raw) To: intel-gfx; +Cc: Kristian H . Kristensen Quoting Chris Wilson (2020-01-18 21:29:03) > Currently, we only allow waiting on the forward progress of an individual > GEM object, or of a GEM execbuf fence. The primary purpose of the fence > is to provide a scheduling primitive to order the execution flow of > batches (cf VkSempahore). > > Userspace instead uses values in memory to implement client fences, and > has to mix busywaiting on the value coupled with a dma_fence in case it > needs to sleep. It has no intermediate step where it can wait on the > memory value itself to change, which is required for scenarios where the > dma_fence may incur too much execution latency. > > The CPU equivalent is a futex-syscall used to setup a waiter/waker based > on a memory location. This is used to implement an efficient sleep for > pthread_mutex_t, where the fast uncontended path can be handled entirely > in userspace. > > This patch implements a similar idea, where we take a virtual address in > the client's ppGTT and install an interrupt handler to wake up the > current task when the memory location passes the user supplied filter. > It also allows the user to emit their own MI_USER_INTERRUPT within their > batches after updating the value on the GPU to have sub-batch precision > on the wakeup. > > Opens: > > - on attaching the waiter, we enable interrupts on all engines, > irrespective of which are active to a VM. > * we can optimise when to enable interrupts while the VM is active > * we can extend the interface for the user to select which engines may > wake us > > - we could return an fd wrapping the comparison operation on the memory > address if we want to pass the waiter around different processes or > reuse the waiter (with poll() + read() like timerfd). The other thing we could do is wrap up the comparator into a dma_fence so we can use it for scheduling as well. -Chris _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply [flat|nested] 7+ messages in thread
* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray 2020-01-18 21:29 [Intel-gfx] [RFC 1/2] drm/i915/gem: Convert vm idr to xarray Chris Wilson 2020-01-18 21:29 ` [Intel-gfx] [RFC 2/2] drm/i915/gem: Introduce VM_WAIT, a futex-lite operation Chris Wilson @ 2020-01-18 21:41 ` Patchwork 2020-01-18 22:06 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork ` (2 subsequent siblings) 4 siblings, 0 replies; 7+ messages in thread From: Patchwork @ 2020-01-18 21:41 UTC (permalink / raw) To: Chris Wilson; +Cc: intel-gfx == Series Details == Series: series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray URL : https://patchwork.freedesktop.org/series/72240/ State : warning == Summary == $ dim checkpatch origin/drm-tip 1108d38489da drm/i915/gem: Convert vm idr to xarray f9aa20565ce1 drm/i915/gem: Introduce VM_WAIT, a futex-lite operation -:41: WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line) #41: References: b2c97bc78919 ("anv/query: Busy-wait for available query entries") -:75: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating? #75: new file mode 100644 -:114: ERROR:TRAILING_STATEMENTS: trailing statements should be on next line #114: FILE: drivers/gpu/drm/i915/gem/i915_gem_vm.c:35: + case 1: memcpy(&target, wake->ptr, 1); break; -:115: ERROR:TRAILING_STATEMENTS: trailing statements should be on next line #115: FILE: drivers/gpu/drm/i915/gem/i915_gem_vm.c:36: + case 2: memcpy(&target, wake->ptr, 2); break; -:116: ERROR:TRAILING_STATEMENTS: trailing statements should be on next line #116: FILE: drivers/gpu/drm/i915/gem/i915_gem_vm.c:37: + case 4: memcpy(&target, wake->ptr, 4); break; -:117: ERROR:TRAILING_STATEMENTS: trailing statements should be on next line #117: FILE: drivers/gpu/drm/i915/gem/i915_gem_vm.c:38: + case 8: memcpy(&target, wake->ptr, 8); break; -:584: WARNING:LONG_LINE: line over 100 characters #584: FILE: include/uapi/drm/i915_drm.h:426: +#define DRM_IOCTL_I915_GEM_VM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_WAIT, struct drm_i915_gem_vm_wait) total: 4 errors, 3 warnings, 0 checks, 534 lines checked _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply [flat|nested] 7+ messages in thread
* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray 2020-01-18 21:29 [Intel-gfx] [RFC 1/2] drm/i915/gem: Convert vm idr to xarray Chris Wilson 2020-01-18 21:29 ` [Intel-gfx] [RFC 2/2] drm/i915/gem: Introduce VM_WAIT, a futex-lite operation Chris Wilson 2020-01-18 21:41 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray Patchwork @ 2020-01-18 22:06 ` Patchwork 2020-01-18 22:07 ` [Intel-gfx] ✗ Fi.CI.BUILD: warning " Patchwork 2020-01-20 23:32 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork 4 siblings, 0 replies; 7+ messages in thread From: Patchwork @ 2020-01-18 22:06 UTC (permalink / raw) To: Chris Wilson; +Cc: intel-gfx == Series Details == Series: series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray URL : https://patchwork.freedesktop.org/series/72240/ State : success == Summary == CI Bug Log - changes from CI_DRM_7772 -> Patchwork_16166 ==================================================== Summary ------- **SUCCESS** No regressions found. External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/index.html Known issues ------------ Here are the changes found in Patchwork_16166 that come from known issues: ### IGT changes ### #### Issues hit #### * igt@gem_close_race@basic-threads: - fi-byt-j1900: [PASS][1] -> [TIMEOUT][2] ([fdo#112271] / [i915#816]) [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/fi-byt-j1900/igt@gem_close_race@basic-threads.html [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/fi-byt-j1900/igt@gem_close_race@basic-threads.html * igt@i915_module_load@reload-with-fault-injection: - fi-cfl-guc: [PASS][3] -> [INCOMPLETE][4] ([i915#505] / [i915#671]) [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/fi-cfl-guc/igt@i915_module_load@reload-with-fault-injection.html [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/fi-cfl-guc/igt@i915_module_load@reload-with-fault-injection.html * igt@kms_chamelium@hdmi-hpd-fast: - fi-icl-u2: [PASS][5] -> [FAIL][6] ([i915#217]) [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/fi-icl-u2/igt@kms_chamelium@hdmi-hpd-fast.html [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/fi-icl-u2/igt@kms_chamelium@hdmi-hpd-fast.html [fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271 [i915#217]: https://gitlab.freedesktop.org/drm/intel/issues/217 [i915#505]: https://gitlab.freedesktop.org/drm/intel/issues/505 [i915#671]: https://gitlab.freedesktop.org/drm/intel/issues/671 [i915#816]: https://gitlab.freedesktop.org/drm/intel/issues/816 Participating hosts (40 -> 41) ------------------------------ Additional (7): fi-hsw-4770r fi-bdw-5557u fi-hsw-peppy fi-snb-2520m fi-ivb-3770 fi-elk-e7500 fi-snb-2600 Missing (6): fi-bsw-n3050 fi-byt-squawks fi-bsw-cyan fi-byt-clapper fi-bsw-nick fi-skl-6600u Build changes ------------- * CI: CI-20190529 -> None * Linux: CI_DRM_7772 -> Patchwork_16166 CI-20190529: 20190529 CI_DRM_7772: f65c394056d8637ff151fa83d5d1613adc0932d2 @ git://anongit.freedesktop.org/gfx-ci/linux IGT_5372: 0d00a27fbbd4d4a77d24499ea9811e07e65eb0ac @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools Patchwork_16166: f9aa20565ce1008169c28802307c9dbd448a556e @ git://anongit.freedesktop.org/gfx-ci/linux == Kernel 32bit build == Warning: Kernel 32bit buildtest failed: https://intel-gfx-ci.01.org/Patchwork_16166/build_32bit.log CALL scripts/checksyscalls.sh CALL scripts/atomic/check-atomics.sh CHK include/generated/compile.h Kernel: arch/x86/boot/bzImage is ready (#1) Building modules, stage 2. MODPOST 122 modules ERROR: "__udivdi3" [drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined! scripts/Makefile.modpost:93: recipe for target '__modpost' failed make[1]: *** [__modpost] Error 1 Makefile:1282: recipe for target 'modules' failed make: *** [modules] Error 2 == Linux commits == f9aa20565ce1 drm/i915/gem: Introduce VM_WAIT, a futex-lite operation 1108d38489da drm/i915/gem: Convert vm idr to xarray == Logs == For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/index.html _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply [flat|nested] 7+ messages in thread
* [Intel-gfx] ✗ Fi.CI.BUILD: warning for series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray 2020-01-18 21:29 [Intel-gfx] [RFC 1/2] drm/i915/gem: Convert vm idr to xarray Chris Wilson ` (2 preceding siblings ...) 2020-01-18 22:06 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork @ 2020-01-18 22:07 ` Patchwork 2020-01-20 23:32 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork 4 siblings, 0 replies; 7+ messages in thread From: Patchwork @ 2020-01-18 22:07 UTC (permalink / raw) To: Chris Wilson; +Cc: intel-gfx == Series Details == Series: series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray URL : https://patchwork.freedesktop.org/series/72240/ State : warning == Summary == CALL scripts/checksyscalls.sh CALL scripts/atomic/check-atomics.sh CHK include/generated/compile.h Kernel: arch/x86/boot/bzImage is ready (#1) Building modules, stage 2. MODPOST 122 modules ERROR: "__udivdi3" [drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined! scripts/Makefile.modpost:93: recipe for target '__modpost' failed make[1]: *** [__modpost] Error 1 Makefile:1282: recipe for target 'modules' failed make: *** [modules] Error 2 == Logs == For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/build_32bit.log _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply [flat|nested] 7+ messages in thread
* [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray 2020-01-18 21:29 [Intel-gfx] [RFC 1/2] drm/i915/gem: Convert vm idr to xarray Chris Wilson ` (3 preceding siblings ...) 2020-01-18 22:07 ` [Intel-gfx] ✗ Fi.CI.BUILD: warning " Patchwork @ 2020-01-20 23:32 ` Patchwork 4 siblings, 0 replies; 7+ messages in thread From: Patchwork @ 2020-01-20 23:32 UTC (permalink / raw) To: Chris Wilson; +Cc: intel-gfx == Series Details == Series: series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray URL : https://patchwork.freedesktop.org/series/72240/ State : failure == Summary == CI Bug Log - changes from CI_DRM_7772_full -> Patchwork_16166_full ==================================================== Summary ------- **FAILURE** Serious unknown changes coming with Patchwork_16166_full absolutely need to be verified manually. If you think the reported changes have nothing to do with the changes introduced in Patchwork_16166_full, please notify your bug team to allow them to document this new failure mode, which will reduce false positives in CI. Possible new issues ------------------- Here are the unknown changes that may have been introduced in Patchwork_16166_full: ### IGT changes ### #### Possible regressions #### * igt@runner@aborted: - shard-hsw: NOTRUN -> [FAIL][1] [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-hsw5/igt@runner@aborted.html Known issues ------------ Here are the changes found in Patchwork_16166_full that come from known issues: ### IGT changes ### #### Issues hit #### * igt@gem_ctx_isolation@rcs0-s3: - shard-kbl: [PASS][2] -> [DMESG-WARN][3] ([i915#180]) +4 similar issues [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-kbl6/igt@gem_ctx_isolation@rcs0-s3.html [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl3/igt@gem_ctx_isolation@rcs0-s3.html * igt@gem_ctx_persistence@rcs0-mixed-process: - shard-skl: [PASS][4] -> [FAIL][5] ([i915#679]) [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-skl1/igt@gem_ctx_persistence@rcs0-mixed-process.html [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-skl8/igt@gem_ctx_persistence@rcs0-mixed-process.html * igt@gem_ctx_persistence@vcs1-queued: - shard-iclb: [PASS][6] -> [SKIP][7] ([fdo#109276] / [fdo#112080]) +7 similar issues [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb4/igt@gem_ctx_persistence@vcs1-queued.html [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb8/igt@gem_ctx_persistence@vcs1-queued.html * igt@gem_exec_parallel@bcs0-fds: - shard-hsw: [PASS][8] -> [TIMEOUT][9] ([fdo#112271]) [8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-hsw7/igt@gem_exec_parallel@bcs0-fds.html [9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-hsw5/igt@gem_exec_parallel@bcs0-fds.html * igt@gem_exec_parallel@vcs1-fds: - shard-iclb: [PASS][10] -> [SKIP][11] ([fdo#112080]) +10 similar issues [10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb4/igt@gem_exec_parallel@vcs1-fds.html [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb5/igt@gem_exec_parallel@vcs1-fds.html * igt@gem_exec_schedule@preempt-other-chain-bsd: - shard-iclb: [PASS][12] -> [SKIP][13] ([fdo#112146]) +3 similar issues [12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb5/igt@gem_exec_schedule@preempt-other-chain-bsd.html [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb2/igt@gem_exec_schedule@preempt-other-chain-bsd.html * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive: - shard-apl: [PASS][14] -> [INCOMPLETE][15] ([fdo#103927] / [i915#530]) [14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-apl2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive.html [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-apl8/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive.html - shard-kbl: [PASS][16] -> [TIMEOUT][17] ([fdo#112271] / [i915#530]) +1 similar issue [16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-kbl6/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive.html [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl1/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive.html * igt@gem_ppgtt@flink-and-close-vma-leak: - shard-glk: [PASS][18] -> [FAIL][19] ([i915#644]) [18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-glk9/igt@gem_ppgtt@flink-and-close-vma-leak.html [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-glk5/igt@gem_ppgtt@flink-and-close-vma-leak.html * igt@gen9_exec_parse@allowed-all: - shard-glk: [PASS][20] -> [DMESG-WARN][21] ([i915#716]) [20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-glk5/igt@gen9_exec_parse@allowed-all.html [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-glk7/igt@gen9_exec_parse@allowed-all.html - shard-kbl: [PASS][22] -> [DMESG-WARN][23] ([i915#716]) [22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-kbl6/igt@gen9_exec_parse@allowed-all.html [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl4/igt@gen9_exec_parse@allowed-all.html * igt@i915_pm_rpm@system-suspend-execbuf: - shard-iclb: [PASS][24] -> [DMESG-WARN][25] ([fdo#111764]) [24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb6/igt@i915_pm_rpm@system-suspend-execbuf.html [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb3/igt@i915_pm_rpm@system-suspend-execbuf.html * igt@i915_pm_rps@reset: - shard-iclb: [PASS][26] -> [FAIL][27] ([i915#413]) [26]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb2/igt@i915_pm_rps@reset.html [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb8/igt@i915_pm_rps@reset.html * igt@i915_selftest@mock_requests: - shard-kbl: [PASS][28] -> [INCOMPLETE][29] ([fdo#103665]) [28]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-kbl4/igt@i915_selftest@mock_requests.html [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl4/igt@i915_selftest@mock_requests.html * igt@kms_color@pipe-a-ctm-red-to-blue: - shard-skl: [PASS][30] -> [DMESG-WARN][31] ([i915#109]) +3 similar issues [30]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-skl4/igt@kms_color@pipe-a-ctm-red-to-blue.html [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-skl2/igt@kms_color@pipe-a-ctm-red-to-blue.html * igt@kms_flip@flip-vs-expired-vblank: - shard-skl: [PASS][32] -> [FAIL][33] ([i915#79]) +1 similar issue [32]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-skl5/igt@kms_flip@flip-vs-expired-vblank.html [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-skl4/igt@kms_flip@flip-vs-expired-vblank.html * igt@kms_flip@flip-vs-suspend-interruptible: - shard-apl: [PASS][34] -> [DMESG-WARN][35] ([i915#180]) +2 similar issues [34]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-apl8/igt@kms_flip@flip-vs-suspend-interruptible.html [35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-apl6/igt@kms_flip@flip-vs-suspend-interruptible.html * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc: - shard-skl: [PASS][36] -> [FAIL][37] ([fdo#108145] / [i915#265]) [36]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-skl10/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html [37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-skl1/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html * igt@kms_psr2_su@page_flip: - shard-iclb: [PASS][38] -> [SKIP][39] ([fdo#109642] / [fdo#111068]) [38]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb2/igt@kms_psr2_su@page_flip.html [39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb8/igt@kms_psr2_su@page_flip.html * igt@kms_psr@no_drrs: - shard-iclb: [PASS][40] -> [FAIL][41] ([i915#173]) [40]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb2/igt@kms_psr@no_drrs.html [41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb1/igt@kms_psr@no_drrs.html * igt@kms_psr@psr2_cursor_render: - shard-iclb: [PASS][42] -> [SKIP][43] ([fdo#109441]) +3 similar issues [42]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb2/igt@kms_psr@psr2_cursor_render.html [43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb4/igt@kms_psr@psr2_cursor_render.html * igt@prime_busy@hang-bsd2: - shard-iclb: [PASS][44] -> [SKIP][45] ([fdo#109276]) +17 similar issues [44]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb1/igt@prime_busy@hang-bsd2.html [45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb3/igt@prime_busy@hang-bsd2.html #### Possible fixes #### * igt@gem_ctx_persistence@vcs1-hostile: - shard-iclb: [SKIP][46] ([fdo#109276] / [fdo#112080]) -> [PASS][47] [46]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb5/igt@gem_ctx_persistence@vcs1-hostile.html [47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb2/igt@gem_ctx_persistence@vcs1-hostile.html * igt@gem_ctx_shared@exec-single-timeline-bsd: - shard-iclb: [SKIP][48] ([fdo#110841]) -> [PASS][49] [48]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb2/igt@gem_ctx_shared@exec-single-timeline-bsd.html [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb6/igt@gem_ctx_shared@exec-single-timeline-bsd.html * igt@gem_exec_schedule@pi-distinct-iova-bsd: - shard-iclb: [SKIP][50] ([i915#677]) -> [PASS][51] [50]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb4/igt@gem_exec_schedule@pi-distinct-iova-bsd.html [51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb5/igt@gem_exec_schedule@pi-distinct-iova-bsd.html * igt@gem_exec_schedule@preempt-queue-chain-bsd2: - shard-iclb: [SKIP][52] ([fdo#109276]) -> [PASS][53] +10 similar issues [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb8/igt@gem_exec_schedule@preempt-queue-chain-bsd2.html [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb2/igt@gem_exec_schedule@preempt-queue-chain-bsd2.html * igt@gem_exec_schedule@preemptive-hang-bsd: - shard-iclb: [SKIP][54] ([fdo#112146]) -> [PASS][55] +5 similar issues [54]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb2/igt@gem_exec_schedule@preemptive-hang-bsd.html [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb6/igt@gem_exec_schedule@preemptive-hang-bsd.html * igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive: - shard-hsw: [TIMEOUT][56] ([fdo#112271] / [i915#530]) -> [PASS][57] [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-hsw2/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-hsw5/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html * igt@gem_persistent_relocs@forked-faulting-reloc-thrashing: - shard-kbl: [INCOMPLETE][58] ([fdo#103665] / [i915#530]) -> [PASS][59] [58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-kbl1/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html * igt@gem_ppgtt@flink-and-close-vma-leak: - shard-apl: [FAIL][60] ([i915#644]) -> [PASS][61] [60]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-apl4/igt@gem_ppgtt@flink-and-close-vma-leak.html [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-apl3/igt@gem_ppgtt@flink-and-close-vma-leak.html * igt@i915_pm_dc@dc6-psr: - shard-iclb: [FAIL][62] ([i915#454]) -> [PASS][63] [62]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb8/igt@i915_pm_dc@dc6-psr.html [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb5/igt@i915_pm_dc@dc6-psr.html * igt@i915_pm_rpm@system-suspend: - shard-kbl: [INCOMPLETE][64] ([fdo#103665] / [i915#151]) -> [PASS][65] [64]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-kbl6/igt@i915_pm_rpm@system-suspend.html [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl7/igt@i915_pm_rpm@system-suspend.html * igt@i915_selftest@mock_requests: - shard-snb: [INCOMPLETE][66] ([i915#82]) -> [PASS][67] [66]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-snb5/igt@i915_selftest@mock_requests.html [67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-snb4/igt@i915_selftest@mock_requests.html - shard-skl: [INCOMPLETE][68] ([i915#198]) -> [PASS][69] [68]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-skl7/igt@i915_selftest@mock_requests.html [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-skl2/igt@i915_selftest@mock_requests.html - shard-glk: [INCOMPLETE][70] ([i915#58] / [k.org#198133]) -> [PASS][71] [70]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-glk7/igt@i915_selftest@mock_requests.html [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-glk8/igt@i915_selftest@mock_requests.html * igt@kms_flip@flip-vs-suspend: - shard-skl: [INCOMPLETE][72] ([i915#221]) -> [PASS][73] [72]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-skl5/igt@kms_flip@flip-vs-suspend.html [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-skl4/igt@kms_flip@flip-vs-suspend.html * igt@kms_lease@cursor_implicit_plane: - shard-snb: [SKIP][74] ([fdo#109271]) -> [PASS][75] +3 similar issues [74]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-snb2/igt@kms_lease@cursor_implicit_plane.html [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-snb2/igt@kms_lease@cursor_implicit_plane.html * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a: - shard-kbl: [DMESG-WARN][76] ([i915#180]) -> [PASS][77] +7 similar issues [76]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-kbl7/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html [77]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl3/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html * igt@kms_psr@psr2_primary_mmap_cpu: - shard-iclb: [SKIP][78] ([fdo#109441]) -> [PASS][79] +2 similar issues [78]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb6/igt@kms_psr@psr2_primary_mmap_cpu.html [79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb2/igt@kms_psr@psr2_primary_mmap_cpu.html * igt@kms_setmode@basic: - shard-apl: [FAIL][80] ([i915#31]) -> [PASS][81] [80]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-apl2/igt@kms_setmode@basic.html [81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-apl4/igt@kms_setmode@basic.html * igt@perf_pmu@idle-vcs1: - shard-iclb: [SKIP][82] ([fdo#112080]) -> [PASS][83] +2 similar issues [82]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb6/igt@perf_pmu@idle-vcs1.html [83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb2/igt@perf_pmu@idle-vcs1.html #### Warnings #### * igt@gem_persistent_relocs@forked-interruptible-thrashing: - shard-apl: [TIMEOUT][84] ([fdo#112271] / [i915#530]) -> [INCOMPLETE][85] ([CI#80] / [fdo#103927] / [i915#530]) [84]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-apl1/igt@gem_persistent_relocs@forked-interruptible-thrashing.html [85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-apl2/igt@gem_persistent_relocs@forked-interruptible-thrashing.html * igt@kms_dp_dsc@basic-dsc-enable-edp: - shard-iclb: [SKIP][86] ([fdo#109349]) -> [DMESG-WARN][87] ([fdo#107724]) [86]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-iclb6/igt@kms_dp_dsc@basic-dsc-enable-edp.html [87]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html * igt@runner@aborted: - shard-kbl: ([FAIL][88], [FAIL][89]) ([i915#997]) -> ([FAIL][90], [FAIL][91], [FAIL][92], [FAIL][93]) ([fdo#103665] / [i915#716] / [i915#873] / [i915#997]) [88]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-kbl7/igt@runner@aborted.html [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-kbl7/igt@runner@aborted.html [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl4/igt@runner@aborted.html [91]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl7/igt@runner@aborted.html [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl4/igt@runner@aborted.html [93]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-kbl3/igt@runner@aborted.html - shard-glk: [FAIL][94] ([i915#873] / [k.org#202321]) -> ([FAIL][95], [FAIL][96], [FAIL][97]) ([i915#997] / [k.org#202321]) [94]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-glk7/igt@runner@aborted.html [95]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-glk9/igt@runner@aborted.html [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-glk7/igt@runner@aborted.html [97]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-glk9/igt@runner@aborted.html - shard-skl: ([FAIL][98], [FAIL][99], [FAIL][100]) ([i915#69] / [i915#873] / [i915#997]) -> ([FAIL][101], [FAIL][102]) ([i915#69] / [i915#997]) [98]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-skl10/igt@runner@aborted.html [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-skl8/igt@runner@aborted.html [100]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7772/shard-skl7/igt@runner@aborted.html [101]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-skl5/igt@runner@aborted.html [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/shard-skl6/igt@runner@aborted.html [CI#80]: https://gitlab.freedesktop.org/gfx-ci/i915-infra/issues/80 [fdo#103665]: https://bugs.freedesktop.org/show_bug.cgi?id=103665 [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927 [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724 [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145 [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271 [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276 [fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349 [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441 [fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642 [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841 [fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068 [fdo#111764]: https://bugs.freedesktop.org/show_bug.cgi?id=111764 [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080 [fdo#112146]: https://bugs.freedesktop.org/show_bug.cgi?id=112146 [fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271 [i915#109]: https://gitlab.freedesktop.org/drm/intel/issues/109 [i915#151]: https://gitlab.freedesktop.org/drm/intel/issues/151 [i915#173]: https://gitlab.freedesktop.org/drm/intel/issues/173 [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180 [i915#198]: https://gitlab.freedesktop.org/drm/intel/issues/198 [i915#221]: https://gitlab.freedesktop.org/drm/intel/issues/221 [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265 [i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31 [i915#413]: https://gitlab.freedesktop.org/drm/intel/issues/413 [i915#454]: https://gitlab.freedesktop.org/drm/intel/issues/454 [i915#530]: https://gitlab.freedesktop.org/drm/intel/issues/530 [i915#58]: https://gitlab.freedesktop.org/drm/intel/issues/58 [i915#644]: https://gitlab.freedesktop.org/drm/intel/issues/644 [i915#677]: https://gitlab.freedesktop.org/drm/intel/issues/677 [i915#679]: https://gitlab.freedesktop.org/drm/intel/issues/679 [i915#69]: https://gitlab.freedesktop.org/drm/intel/issues/69 [i915#716]: https://gitlab.freedesktop.org/drm/intel/issues/716 [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79 [i915#82]: https://gitlab.freedesktop.org/drm/intel/issues/82 [i915#873]: https://gitlab.freedesktop.org/drm/intel/issues/873 [i915#997]: https://gitlab.freedesktop.org/drm/intel/issues/997 [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133 [k.org#202321]: https://bugzilla.kernel.org/show_bug.cgi?id=202321 Participating hosts (10 -> 10) ------------------------------ No changes in participating hosts Build changes ------------- * CI: CI-20190529 -> None * Linux: CI_DRM_7772 -> Patchwork_16166 CI-20190529: 20190529 CI_DRM_7772: f65c394056d8637ff151fa83d5d1613adc0932d2 @ git://anongit.freedesktop.org/gfx-ci/linux IGT_5372: 0d00a27fbbd4d4a77d24499ea9811e07e65eb0ac @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools Patchwork_16166: f9aa20565ce1008169c28802307c9dbd448a556e @ git://anongit.freedesktop.org/gfx-ci/linux piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit == Logs == For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16166/index.html _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2020-01-20 23:32 UTC | newest] Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2020-01-18 21:29 [Intel-gfx] [RFC 1/2] drm/i915/gem: Convert vm idr to xarray Chris Wilson 2020-01-18 21:29 ` [Intel-gfx] [RFC 2/2] drm/i915/gem: Introduce VM_WAIT, a futex-lite operation Chris Wilson 2020-01-18 22:17 ` Chris Wilson 2020-01-18 21:41 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [RFC,1/2] drm/i915/gem: Convert vm idr to xarray Patchwork 2020-01-18 22:06 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork 2020-01-18 22:07 ` [Intel-gfx] ✗ Fi.CI.BUILD: warning " Patchwork 2020-01-20 23:32 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).