intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH 1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex
@ 2020-01-20 10:49 Chris Wilson
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 2/5] drm/i915: Tighten atomicity of i915_active_acquire vs i915_active_release Chris Wilson
                   ` (6 more replies)
  0 siblings, 7 replies; 13+ messages in thread
From: Chris Wilson @ 2020-01-20 10:49 UTC (permalink / raw)
  To: intel-gfx; +Cc: matthew.auld

<0> [198.668822] gem_exec-1246    0.... 193899010us : timeline_advance: timeline_advance:387 GEM_BUG_ON(!atomic_read(&tl->pin_count))
<0> [198.668859] ---------------------------------
<4> [198.669619] ------------[ cut here ]------------
<2> [198.669621] kernel BUG at drivers/gpu/drm/i915/gt/intel_timeline.c:387!
<4> [198.669703] invalid opcode: 0000 [#1] PREEMPT SMP PTI
<4> [198.669712] CPU: 0 PID: 1246 Comm: gem_exec_create Tainted: G     U  W         5.5.0-rc6-CI-CI_DRM_7755+ #1
<4> [198.669723] Hardware name:  /NUC7i5BNB, BIOS BNKBL357.86A.0054.2017.1025.1822 10/25/2017
<4> [198.669776] RIP: 0010:timeline_advance+0x7b/0xe0 [i915]
<4> [198.669785] Code: 00 48 c7 c2 10 f1 46 a0 48 c7 c7 70 1b 32 a0 e8 bb dd e7 e0 bf 01 00 00 00 e8 d1 af e7 e0 31 f6 bf 09 00 00 00 e8 35 ef d8 e0 <0f> 0b 48 c7 c1 48 fa 49 a0 ba 84 01 00 00 48 c7 c6 10 f1 46 a0 48
<4> [198.669803] RSP: 0018:ffffc900004c3a38 EFLAGS: 00010296
<4> [198.669810] RAX: ffff888270b35140 RBX: ffff88826f32ee00 RCX: 0000000000000006
<4> [198.669818] RDX: 00000000000017c5 RSI: 0000000000000000 RDI: 0000000000000009
<4> [198.669826] RBP: ffffc900004c3a64 R08: 0000000000000000 R09: 0000000000000000
<4> [198.669834] R10: 0000000000000000 R11: 0000000000000000 R12: ffff88826f9b5980
<4> [198.669841] R13: 0000000000000cc0 R14: ffffc900004c3dc0 R15: ffff888253610068
<4> [198.669849] FS:  00007f63e663fe40(0000) GS:ffff888276c00000(0000) knlGS:0000000000000000
<4> [198.669857] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
<4> [198.669864] CR2: 00007f171f8e39a8 CR3: 000000026b1f6005 CR4: 00000000003606f0
<4> [198.669872] Call Trace:
<4> [198.669924]  intel_timeline_get_seqno+0x12/0x40 [i915]
<4> [198.669977]  __i915_request_create+0x76/0x5a0 [i915]
<4> [198.670024]  i915_request_create+0x86/0x1c0 [i915]
<4> [198.670068]  i915_gem_do_execbuffer+0xbf2/0x2500 [i915]
<4> [198.670082]  ? __lock_acquire+0x460/0x15d0
<4> [198.670128]  i915_gem_execbuffer2_ioctl+0x11f/0x470 [i915]
<4> [198.670171]  ? i915_gem_execbuffer_ioctl+0x300/0x300 [i915]
<4> [198.670181]  drm_ioctl_kernel+0xa7/0xf0
<4> [198.670188]  drm_ioctl+0x2e1/0x390
<4> [198.670233]  ? i915_gem_execbuffer_ioctl+0x300/0x300 [i915]

Fixes: 841350223816 ("drm/i915/gt: Drop mutex serialisation between context pin/unpin")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_context.c | 46 ++++++++++++++-----------
 drivers/gpu/drm/i915/i915_active.h      |  6 ++++
 2 files changed, 31 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 23137b2a8689..57e8a051ddc2 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -67,21 +67,18 @@ static int intel_context_active_acquire(struct intel_context *ce)
 {
 	int err;
 
-	err = i915_active_acquire(&ce->active);
-	if (err)
-		return err;
+	__i915_active_acquire(&ce->active);
+
+	if (intel_context_is_barrier(ce))
+		return 0;
 
 	/* Preallocate tracking nodes */
-	if (!intel_context_is_barrier(ce)) {
-		err = i915_active_acquire_preallocate_barrier(&ce->active,
-							      ce->engine);
-		if (err) {
-			i915_active_release(&ce->active);
-			return err;
-		}
-	}
+	err = i915_active_acquire_preallocate_barrier(&ce->active,
+						      ce->engine);
+	if (err)
+		i915_active_release(&ce->active);
 
-	return 0;
+	return err;
 }
 
 static void intel_context_active_release(struct intel_context *ce)
@@ -101,13 +98,19 @@ int __intel_context_do_pin(struct intel_context *ce)
 			return err;
 	}
 
-	if (mutex_lock_interruptible(&ce->pin_mutex))
-		return -EINTR;
+	err = i915_active_acquire(&ce->active);
+	if (err)
+		return err;
+
+	if (mutex_lock_interruptible(&ce->pin_mutex)) {
+		err = -EINTR;
+		goto out_release;
+	}
 
-	if (likely(!atomic_read(&ce->pin_count))) {
+	if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
 		err = intel_context_active_acquire(ce);
 		if (unlikely(err))
-			goto err;
+			goto out_unlock;
 
 		err = ce->ops->pin(ce);
 		if (unlikely(err))
@@ -117,18 +120,19 @@ int __intel_context_do_pin(struct intel_context *ce)
 			 ce->ring->head, ce->ring->tail);
 
 		smp_mb__before_atomic(); /* flush pin before it is visible */
+		atomic_inc(&ce->pin_count);
 	}
 
-	atomic_inc(&ce->pin_count);
 	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
-
-	mutex_unlock(&ce->pin_mutex);
-	return 0;
+	GEM_BUG_ON(i915_active_is_idle(&ce->active));
+	goto out_unlock;
 
 err_active:
 	intel_context_active_release(ce);
-err:
+out_unlock:
 	mutex_unlock(&ce->pin_mutex);
+out_release:
+	i915_active_release(&ce->active);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index b571f675c795..480f234c2011 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -188,6 +188,12 @@ int i915_active_acquire(struct i915_active *ref);
 bool i915_active_acquire_if_busy(struct i915_active *ref);
 void i915_active_release(struct i915_active *ref);
 
+static inline void __i915_active_acquire(struct i915_active*ref)
+{
+	GEM_BUG_ON(!atomic_read(&ref->count));
+	atomic_inc(&ref->count);
+}
+
 static inline bool
 i915_active_is_idle(const struct i915_active *ref)
 {
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] [PATCH 2/5] drm/i915: Tighten atomicity of i915_active_acquire vs i915_active_release
  2020-01-20 10:49 [Intel-gfx] [PATCH 1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Chris Wilson
@ 2020-01-20 10:49 ` Chris Wilson
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 3/5] drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list Chris Wilson
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2020-01-20 10:49 UTC (permalink / raw)
  To: intel-gfx; +Cc: matthew.auld

As we use a mutex to serialise the first acquire (as it may be a lengthy
operation), but only an atomic decrement for the release, we have to
be careful in case a second thread races and completes both
acquire/release as the first finishes its acquire.

Fixes: c9ad602feabe ("drm/i915: Split i915_active.mutex into an irq-safe spinlock for the rbtree")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_active.c | 16 +++++++++-------
 1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index ace55d5d4ca7..9d6830885d2e 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -416,13 +416,15 @@ int i915_active_acquire(struct i915_active *ref)
 	if (err)
 		return err;
 
-	if (!atomic_read(&ref->count) && ref->active)
-		err = ref->active(ref);
-	if (!err) {
-		spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */
-		debug_active_activate(ref);
-		atomic_inc(&ref->count);
-		spin_unlock_irq(&ref->tree_lock);
+	if (likely(!i915_active_acquire_if_busy(ref))) {
+		if (ref->active)
+			err = ref->active(ref);
+		if (!err) {
+			spin_lock_irq(&ref->tree_lock); /* __active_retire() */
+			debug_active_activate(ref);
+			atomic_inc(&ref->count);
+			spin_unlock_irq(&ref->tree_lock);
+		}
 	}
 
 	mutex_unlock(&ref->mutex);
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] [PATCH 3/5] drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list
  2020-01-20 10:49 [Intel-gfx] [PATCH 1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Chris Wilson
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 2/5] drm/i915: Tighten atomicity of i915_active_acquire vs i915_active_release Chris Wilson
@ 2020-01-20 10:49 ` Chris Wilson
  2020-01-20 15:24   ` Abdiel Janulgue
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray Chris Wilson
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2020-01-20 10:49 UTC (permalink / raw)
  To: intel-gfx; +Cc: matthew.auld

Currently we create a new mmap_offset for every call to
mmap_offset_ioctl. This exposes ourselves to an abusive client that may
simply create new mmap_offsets ad infinitum, which will exhaust physical
memory and the virtual address space. In addition to the exhaustion, a
very long linear list of mmap_offsets causes other clients using the
object to incur long list walks -- these long lists can also be
generated by simply having many clients generate their own mmap_offset.

However, we can simply use the drm_vma_node itself to manage the file
association (allow/revoke) dropping our need to keep an mmo per-file.
Then if we keep a small rbtree of per-type mmap_offsets, we can lookup
duplicate requests quickly.

Fixes: cc662126b413 ("drm/i915: Introduce DRM_I915_GEM_MMAP_OFFSET")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      | 90 ++++++++++++++++---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    | 18 ++--
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  6 +-
 3 files changed, 85 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index b9fdac2f9003..e9be2508c04f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -455,10 +455,11 @@ static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 
 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	struct i915_mmap_offset *mmo;
+	struct i915_mmap_offset *mmo, *mn;
 
 	spin_lock(&obj->mmo.lock);
-	list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+	rbtree_postorder_for_each_entry_safe(mmo, mn,
+					     &obj->mmo.offsets, offset) {
 		/*
 		 * vma_node_unmap for GTT mmaps handled already in
 		 * __i915_gem_object_release_mmap_gtt
@@ -487,6 +488,67 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 	i915_gem_object_release_mmap_offset(obj);
 }
 
+static struct i915_mmap_offset *
+lookup_mmo(struct drm_i915_gem_object *obj,
+	   enum i915_mmap_type mmap_type)
+{
+	struct rb_node *rb;
+
+	spin_lock(&obj->mmo.lock);
+	rb = obj->mmo.offsets.rb_node;
+	while (rb) {
+		struct i915_mmap_offset *mmo =
+			rb_entry(rb, typeof(*mmo), offset);
+
+		if (mmo->mmap_type == mmap_type) {
+			spin_unlock(&obj->mmo.lock);
+			return mmo;
+		}
+
+		if (mmo->mmap_type < mmap_type)
+			rb = rb->rb_right;
+		else
+			rb = rb->rb_left;
+	}
+	spin_unlock(&obj->mmo.lock);
+
+	return NULL;
+}
+
+static struct i915_mmap_offset *
+insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
+{
+	struct rb_node *rb, **p;
+
+	spin_lock(&obj->mmo.lock);
+	rb = NULL;
+	p = &obj->mmo.offsets.rb_node;
+	while (*p) {
+		struct i915_mmap_offset *pos;
+
+		rb = *p;
+		pos = rb_entry(rb, typeof(*pos), offset);
+
+		if (pos->mmap_type == mmo->mmap_type) {
+			spin_unlock(&obj->mmo.lock);
+			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+					      &mmo->vma_node);
+			kfree(mmo);
+			return pos;
+		}
+
+		if (pos->mmap_type < mmo->mmap_type)
+			p = &rb->rb_right;
+		else
+			p = &rb->rb_left;
+	}
+	rb_link_node(&mmo->offset, rb, p);
+	rb_insert_color(&mmo->offset, &obj->mmo.offsets);
+	spin_unlock(&obj->mmo.lock);
+
+	return mmo;
+}
+
 static struct i915_mmap_offset *
 mmap_offset_attach(struct drm_i915_gem_object *obj,
 		   enum i915_mmap_type mmap_type,
@@ -496,20 +558,22 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
 	struct i915_mmap_offset *mmo;
 	int err;
 
+	mmo = lookup_mmo(obj, mmap_type);
+	if (mmo)
+		goto out;
+
 	mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
 	if (!mmo)
 		return ERR_PTR(-ENOMEM);
 
 	mmo->obj = obj;
-	mmo->dev = obj->base.dev;
-	mmo->file = file;
 	mmo->mmap_type = mmap_type;
 	drm_vma_node_reset(&mmo->vma_node);
 
-	err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
-				 obj->base.size / PAGE_SIZE);
+	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
+				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
 	if (likely(!err))
-		goto out;
+		goto insert;
 
 	/* Attempt to reap some mmap space from dead objects */
 	err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
@@ -517,19 +581,17 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
 		goto err;
 
 	i915_gem_drain_freed_objects(i915);
-	err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
-				 obj->base.size / PAGE_SIZE);
+	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
+				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
 	if (err)
 		goto err;
 
+insert:
+	mmo = insert_mmo(obj, mmo);
+	GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
 out:
 	if (file)
 		drm_vma_node_allow(&mmo->vma_node, file);
-
-	spin_lock(&obj->mmo.lock);
-	list_add(&mmo->offset, &obj->mmo.offsets);
-	spin_unlock(&obj->mmo.lock);
-
 	return mmo;
 
 err:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 46bacc82ddc4..35985218bd85 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -63,7 +63,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&obj->lut_list);
 
 	spin_lock_init(&obj->mmo.lock);
-	INIT_LIST_HEAD(&obj->mmo.offsets);
+	obj->mmo.offsets = RB_ROOT;
 
 	init_rcu_head(&obj->rcu);
 
@@ -100,8 +100,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem);
 	struct drm_i915_file_private *fpriv = file->driver_priv;
+	struct i915_mmap_offset *mmo, *mn;
 	struct i915_lut_handle *lut, *ln;
-	struct i915_mmap_offset *mmo;
 	LIST_HEAD(close);
 
 	i915_gem_object_lock(obj);
@@ -117,14 +117,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 	i915_gem_object_unlock(obj);
 
 	spin_lock(&obj->mmo.lock);
-	list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
-		if (mmo->file != file)
-			continue;
-
-		spin_unlock(&obj->mmo.lock);
+	rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
 		drm_vma_node_revoke(&mmo->vma_node, file);
-		spin_lock(&obj->mmo.lock);
-	}
 	spin_unlock(&obj->mmo.lock);
 
 	list_for_each_entry_safe(lut, ln, &close, obj_link) {
@@ -203,12 +197,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 		i915_gem_object_release_mmap(obj);
 
-		list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
+		rbtree_postorder_for_each_entry_safe(mmo, mn,
+						     &obj->mmo.offsets,
+						     offset) {
 			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
 					      &mmo->vma_node);
 			kfree(mmo);
 		}
-		INIT_LIST_HEAD(&obj->mmo.offsets);
+		obj->mmo.offsets = RB_ROOT;
 
 		GEM_BUG_ON(atomic_read(&obj->bind_count));
 		GEM_BUG_ON(obj->userfault_count);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 88e268633fdc..f64ad77e6b1e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -71,13 +71,11 @@ enum i915_mmap_type {
 };
 
 struct i915_mmap_offset {
-	struct drm_device *dev;
 	struct drm_vma_offset_node vma_node;
 	struct drm_i915_gem_object *obj;
-	struct drm_file *file;
 	enum i915_mmap_type mmap_type;
 
-	struct list_head offset;
+	struct rb_node offset;
 };
 
 struct drm_i915_gem_object {
@@ -137,7 +135,7 @@ struct drm_i915_gem_object {
 
 	struct {
 		spinlock_t lock; /* Protects access to mmo offsets */
-		struct list_head offsets;
+		struct rb_root offsets;
 	} mmo;
 
 	I915_SELFTEST_DECLARE(struct list_head st_link);
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray
  2020-01-20 10:49 [Intel-gfx] [PATCH 1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Chris Wilson
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 2/5] drm/i915: Tighten atomicity of i915_active_acquire vs i915_active_release Chris Wilson
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 3/5] drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list Chris Wilson
@ 2020-01-20 10:49 ` Chris Wilson
  2020-01-22 16:00   ` Ruhl, Michael J
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 5/5] drm/i915/display: Squelch kerneldoc complaints Chris Wilson
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2020-01-20 10:49 UTC (permalink / raw)
  To: intel-gfx; +Cc: matthew.auld

Replace the vm_idr + vm_idr_mutex to an XArray. The XArray data
structure is now used to implement IDRs, and provides its won locking.
We can simply remove the IDR wrapper and in the process also remove our
extra mutex.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c | 80 ++++++---------------
 drivers/gpu/drm/i915/i915_drv.h             |  4 +-
 2 files changed, 22 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index a2e57e62af30..d2e4e8cbf4d4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -761,12 +761,6 @@ void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
 	flush_work(&i915->gem.contexts.free_work);
 }
 
-static int vm_idr_cleanup(int id, void *p, void *data)
-{
-	i915_vm_put(p);
-	return 0;
-}
-
 static int gem_context_register(struct i915_gem_context *ctx,
 				struct drm_i915_file_private *fpriv,
 				u32 *id)
@@ -803,9 +797,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 	u32 id;
 
 	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
-
-	mutex_init(&file_priv->vm_idr_lock);
-	idr_init_base(&file_priv->vm_idr, 1);
+	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
 
 	ctx = i915_gem_create_context(i915, 0);
 	if (IS_ERR(ctx)) {
@@ -823,9 +815,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
 err_ctx:
 	context_close(ctx);
 err:
-	idr_destroy(&file_priv->vm_idr);
+	xa_destroy(&file_priv->vm_xa);
 	xa_destroy(&file_priv->context_xa);
-	mutex_destroy(&file_priv->vm_idr_lock);
 	return err;
 }
 
@@ -833,6 +824,7 @@ void i915_gem_context_close(struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct drm_i915_private *i915 = file_priv->dev_priv;
+	struct i915_address_space *vm;
 	struct i915_gem_context *ctx;
 	unsigned long idx;
 
@@ -840,9 +832,9 @@ void i915_gem_context_close(struct drm_file *file)
 		context_close(ctx);
 	xa_destroy(&file_priv->context_xa);
 
-	idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
-	idr_destroy(&file_priv->vm_idr);
-	mutex_destroy(&file_priv->vm_idr_lock);
+	xa_for_each(&file_priv->vm_xa, idx, vm)
+		i915_vm_put(vm);
+	xa_destroy(&file_priv->vm_xa);
 
 	contexts_flush_free(&i915->gem.contexts);
 }
@@ -876,23 +868,13 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
 			goto err_put;
 	}
 
-	err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+	err = xa_alloc(&file_priv->vm_xa, &args->vm_id,
+		       &ppgtt->vm, xa_limit_32b, GFP_KERNEL);
 	if (err)
 		goto err_put;
 
-	err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
-	if (err < 0)
-		goto err_unlock;
-
-	GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
-
-	mutex_unlock(&file_priv->vm_idr_lock);
-
-	args->vm_id = err;
 	return 0;
 
-err_unlock:
-	mutex_unlock(&file_priv->vm_idr_lock);
 err_put:
 	i915_vm_put(&ppgtt->vm);
 	return err;
@@ -904,8 +886,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 	struct drm_i915_gem_vm_control *args = data;
 	struct i915_address_space *vm;
-	int err;
-	u32 id;
 
 	if (args->flags)
 		return -EINVAL;
@@ -913,17 +893,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
 	if (args->extensions)
 		return -EINVAL;
 
-	id = args->vm_id;
-	if (!id)
-		return -ENOENT;
-
-	err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
-	if (err)
-		return err;
-
-	vm = idr_remove(&file_priv->vm_idr, id);
-
-	mutex_unlock(&file_priv->vm_idr_lock);
+	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
 	if (!vm)
 		return -ENOENT;
 
@@ -1021,35 +991,27 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
 		     struct drm_i915_gem_context_param *args)
 {
 	struct i915_address_space *vm;
-	int ret;
+	int err = -ENODEV;
+	u32 id;
 
 	if (!rcu_access_pointer(ctx->vm))
 		return -ENODEV;
 
 	rcu_read_lock();
 	vm = context_get_vm_rcu(ctx);
+	if (vm)
+		err = xa_alloc(&file_priv->vm_xa, &id, vm,
+			       xa_limit_32b, GFP_KERNEL);
 	rcu_read_unlock();
+	if (!err) {
+		i915_vm_open(vm);
 
-	ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
-	if (ret)
-		goto err_put;
-
-	ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
-	GEM_BUG_ON(!ret);
-	if (ret < 0)
-		goto err_unlock;
-
-	i915_vm_open(vm);
-
-	args->size = 0;
-	args->value = ret;
+		args->size = 0;
+		args->value = id;
+	}
 
-	ret = 0;
-err_unlock:
-	mutex_unlock(&file_priv->vm_idr_lock);
-err_put:
 	i915_vm_put(vm);
-	return ret;
+	return err;
 }
 
 static void set_ppgtt_barrier(void *data)
@@ -1151,7 +1113,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
 		return -ENOENT;
 
 	rcu_read_lock();
-	vm = idr_find(&file_priv->vm_idr, args->value);
+	vm = xa_load(&file_priv->vm_xa, args->value);
 	if (vm && !kref_get_unless_zero(&vm->ref))
 		vm = NULL;
 	rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 077af22b8340..50abf9113b2f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -203,9 +203,7 @@ struct drm_i915_file_private {
 	} mm;
 
 	struct xarray context_xa;
-
-	struct idr vm_idr;
-	struct mutex vm_idr_lock; /* guards vm_idr */
+	struct xarray vm_xa;
 
 	unsigned int bsd_engine;
 
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] [PATCH 5/5] drm/i915/display: Squelch kerneldoc complaints
  2020-01-20 10:49 [Intel-gfx] [PATCH 1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Chris Wilson
                   ` (2 preceding siblings ...)
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray Chris Wilson
@ 2020-01-20 10:49 ` Chris Wilson
  2020-01-20 15:17 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Patchwork
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2020-01-20 10:49 UTC (permalink / raw)
  To: intel-gfx; +Cc: matthew.auld

drivers/gpu/drm/i915/display/intel_atomic.c:185: warning: Function parameter or member 'state' not described in 'intel_connector_needs_modeset'
drivers/gpu/drm/i915/display/intel_atomic.c:185: warning: Function parameter or member 'connector' not described in 'intel_connector_needs_modeset'

drivers/gpu/drm/i915/display/intel_fbc.c:1124: warning: Function parameter or member 'state' not described in 'intel_fbc_enable'
drivers/gpu/drm/i915/display/intel_fbc.c:1124: warning: Excess function parameter 'crtc_state' description in 'intel_fbc_enable'
drivers/gpu/drm/i915/display/intel_fbc.c:1124: warning: Excess function parameter 'plane_state' description in 'intel_fbc_enable'

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/display/intel_atomic.c | 2 ++
 drivers/gpu/drm/i915/display/intel_fbc.c    | 3 +--
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index c362eecdd414..9921b1fa4e70 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -178,6 +178,8 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
 
 /**
  * intel_connector_needs_modeset - check if connector needs a modeset
+ * @state: the atomic state corresponding to this modeset
+ * @connector: the connector
  */
 bool
 intel_connector_needs_modeset(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 88a9c2fea695..d3be6f619b31 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1111,8 +1111,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
 /**
  * intel_fbc_enable: tries to enable FBC on the CRTC
  * @crtc: the CRTC
- * @crtc_state: corresponding &drm_crtc_state for @crtc
- * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
+ * @state: corresponding &drm_crtc_state for @crtc
  *
  * This function checks if the given CRTC was chosen for FBC, then enables it if
  * possible. Notice that it doesn't activate FBC. It is valid to call
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex
  2020-01-20 10:49 [Intel-gfx] [PATCH 1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Chris Wilson
                   ` (3 preceding siblings ...)
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 5/5] drm/i915/display: Squelch kerneldoc complaints Chris Wilson
@ 2020-01-20 15:17 ` Patchwork
  2020-01-20 23:42 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
  2020-01-21  8:40 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
  6 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2020-01-20 15:17 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex
URL   : https://patchwork.freedesktop.org/series/72269/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
6cbb3c0bda69 drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex
-:7: WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#7: 
<0> [198.668822] gem_exec-1246    0.... 193899010us : timeline_advance: timeline_advance:387 GEM_BUG_ON(!atomic_read(&tl->pin_count))

-:132: ERROR:POINTER_LOCATION: "foo*bar" should be "foo *bar"
#132: FILE: drivers/gpu/drm/i915/i915_active.h:191:
+static inline void __i915_active_acquire(struct i915_active*ref)

total: 1 errors, 1 warnings, 0 checks, 89 lines checked
4fc48a860edc drm/i915: Tighten atomicity of i915_active_acquire vs i915_active_release
1ac032874ab5 drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list
570974a6f083 drm/i915/gem: Convert vm idr to xarray
0614af4d0caa drm/i915/display: Squelch kerneldoc complaints

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH 3/5] drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 3/5] drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list Chris Wilson
@ 2020-01-20 15:24   ` Abdiel Janulgue
  0 siblings, 0 replies; 13+ messages in thread
From: Abdiel Janulgue @ 2020-01-20 15:24 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: matthew.auld


On 20/01/2020 12.49, Chris Wilson wrote:
> Currently we create a new mmap_offset for every call to
> mmap_offset_ioctl. This exposes ourselves to an abusive client that may
> simply create new mmap_offsets ad infinitum, which will exhaust physical
> memory and the virtual address space. In addition to the exhaustion, a
> very long linear list of mmap_offsets causes other clients using the
> object to incur long list walks -- these long lists can also be
> generated by simply having many clients generate their own mmap_offset.
> 
> However, we can simply use the drm_vma_node itself to manage the file
> association (allow/revoke) dropping our need to keep an mmo per-file.
> Then if we keep a small rbtree of per-type mmap_offsets, we can lookup
> duplicate requests quickly.

As discussed, rbtree does do away with the limitation of the linear list

Reviewed-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>

> Fixes: cc662126b413 ("drm/i915: Introduce DRM_I915_GEM_MMAP_OFFSET")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex
  2020-01-20 10:49 [Intel-gfx] [PATCH 1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Chris Wilson
                   ` (4 preceding siblings ...)
  2020-01-20 15:17 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Patchwork
@ 2020-01-20 23:42 ` Patchwork
  2020-01-21  8:40 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
  6 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2020-01-20 23:42 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex
URL   : https://patchwork.freedesktop.org/series/72269/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7779 -> Patchwork_16172
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/index.html

Known issues
------------

  Here are the changes found in Patchwork_16172 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_selftest@live_gem_contexts:
    - fi-cfl-8700k:       [PASS][1] -> [INCOMPLETE][2] ([i915#424])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/fi-cfl-8700k/igt@i915_selftest@live_gem_contexts.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/fi-cfl-8700k/igt@i915_selftest@live_gem_contexts.html
    - fi-hsw-peppy:       [PASS][3] -> [DMESG-FAIL][4] ([i915#722])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/fi-hsw-peppy/igt@i915_selftest@live_gem_contexts.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/fi-hsw-peppy/igt@i915_selftest@live_gem_contexts.html

  
#### Possible fixes ####

  * igt@i915_module_load@reload-with-fault-injection:
    - fi-cfl-guc:         [INCOMPLETE][5] ([i915#505] / [i915#671]) -> [PASS][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/fi-cfl-guc/igt@i915_module_load@reload-with-fault-injection.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/fi-cfl-guc/igt@i915_module_load@reload-with-fault-injection.html

  * igt@i915_pm_rpm@module-reload:
    - fi-kbl-guc:         [SKIP][7] ([fdo#109271]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/fi-kbl-guc/igt@i915_pm_rpm@module-reload.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/fi-kbl-guc/igt@i915_pm_rpm@module-reload.html

  * igt@i915_selftest@live_mman:
    - fi-bxt-dsi:         [DMESG-WARN][9] ([i915#889]) -> [PASS][10] +23 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/fi-bxt-dsi/igt@i915_selftest@live_mman.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/fi-bxt-dsi/igt@i915_selftest@live_mman.html

  * igt@i915_selftest@live_reset:
    - fi-bxt-dsi:         [DMESG-FAIL][11] ([i915#889]) -> [PASS][12] +7 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/fi-bxt-dsi/igt@i915_selftest@live_reset.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/fi-bxt-dsi/igt@i915_selftest@live_reset.html

  
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [i915#424]: https://gitlab.freedesktop.org/drm/intel/issues/424
  [i915#505]: https://gitlab.freedesktop.org/drm/intel/issues/505
  [i915#671]: https://gitlab.freedesktop.org/drm/intel/issues/671
  [i915#722]: https://gitlab.freedesktop.org/drm/intel/issues/722
  [i915#889]: https://gitlab.freedesktop.org/drm/intel/issues/889


Participating hosts (42 -> 40)
------------------------------

  Additional (5): fi-tgl-u fi-byt-j1900 fi-kbl-7500u fi-ivb-3770 fi-skl-6600u 
  Missing    (7): fi-ilk-m540 fi-hsw-4200u fi-glk-dsi fi-bsw-cyan fi-snb-2520m fi-byt-n2820 fi-byt-clapper 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7779 -> Patchwork_16172

  CI-20190529: 20190529
  CI_DRM_7779: 9481f0a0238efb8a7f001e4cdc16388fc097a7e4 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5373: 224e565df36693ab8ae8f58eb6ae42600c2464e2 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16172: 0614af4d0caa263584b0d1f877c0618998f678e9 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

0614af4d0caa drm/i915/display: Squelch kerneldoc complaints
570974a6f083 drm/i915/gem: Convert vm idr to xarray
1ac032874ab5 drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list
4fc48a860edc drm/i915: Tighten atomicity of i915_active_acquire vs i915_active_release
6cbb3c0bda69 drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✓ Fi.CI.IGT: success for series starting with [1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex
  2020-01-20 10:49 [Intel-gfx] [PATCH 1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Chris Wilson
                   ` (5 preceding siblings ...)
  2020-01-20 23:42 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
@ 2020-01-21  8:40 ` Patchwork
  6 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2020-01-21  8:40 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex
URL   : https://patchwork.freedesktop.org/series/72269/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7779_full -> Patchwork_16172_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_16172_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@bcs0-s3:
    - shard-skl:          [PASS][1] -> [INCOMPLETE][2] ([i915#69])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl8/igt@gem_ctx_isolation@bcs0-s3.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl2/igt@gem_ctx_isolation@bcs0-s3.html

  * igt@gem_ctx_isolation@vcs1-none:
    - shard-iclb:         [PASS][3] -> [SKIP][4] ([fdo#109276] / [fdo#112080]) +3 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb4/igt@gem_ctx_isolation@vcs1-none.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb6/igt@gem_ctx_isolation@vcs1-none.html

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [PASS][5] -> [SKIP][6] ([fdo#110841])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb7/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb4/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_ctx_shared@q-smoketest-vebox:
    - shard-tglb:         [PASS][7] -> [INCOMPLETE][8] ([fdo#111735] / [i915#472])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-tglb7/igt@gem_ctx_shared@q-smoketest-vebox.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-tglb3/igt@gem_ctx_shared@q-smoketest-vebox.html

  * igt@gem_exec_parallel@vcs1-fds:
    - shard-iclb:         [PASS][9] -> [SKIP][10] ([fdo#112080]) +12 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb1/igt@gem_exec_parallel@vcs1-fds.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb3/igt@gem_exec_parallel@vcs1-fds.html

  * igt@gem_exec_schedule@pi-common-bsd:
    - shard-iclb:         [PASS][11] -> [SKIP][12] ([i915#677])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb3/igt@gem_exec_schedule@pi-common-bsd.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb2/igt@gem_exec_schedule@pi-common-bsd.html

  * igt@gem_exec_schedule@preempt-other-chain-bsd:
    - shard-iclb:         [PASS][13] -> [SKIP][14] ([fdo#112146]) +7 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb6/igt@gem_exec_schedule@preempt-other-chain-bsd.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb2/igt@gem_exec_schedule@preempt-other-chain-bsd.html

  * igt@gem_exec_schedule@smoketest-bsd1:
    - shard-tglb:         [PASS][15] -> [INCOMPLETE][16] ([i915#463] / [i915#472])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-tglb5/igt@gem_exec_schedule@smoketest-bsd1.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-tglb4/igt@gem_exec_schedule@smoketest-bsd1.html

  * igt@gem_exec_suspend@basic-s4-devices:
    - shard-tglb:         [PASS][17] -> [INCOMPLETE][18] ([i915#460] / [i915#472]) +1 similar issue
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-tglb1/igt@gem_exec_suspend@basic-s4-devices.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-tglb4/igt@gem_exec_suspend@basic-s4-devices.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive:
    - shard-hsw:          [PASS][19] -> [TIMEOUT][20] ([fdo#112271] / [i915#530])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-hsw5/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-hsw5/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-apl:          [PASS][21] -> [TIMEOUT][22] ([fdo#112271] / [i915#530]) +1 similar issue
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-apl1/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-apl4/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-interruptible-thrash-inactive:
    - shard-kbl:          [PASS][23] -> [TIMEOUT][24] ([fdo#112271] / [i915#530]) +1 similar issue
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-kbl6/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-kbl2/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html

  * igt@gem_ppgtt@flink-and-close-vma-leak:
    - shard-glk:          [PASS][25] -> [FAIL][26] ([i915#644])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-glk6/igt@gem_ppgtt@flink-and-close-vma-leak.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-glk7/igt@gem_ppgtt@flink-and-close-vma-leak.html
    - shard-skl:          [PASS][27] -> [FAIL][28] ([i915#644])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl4/igt@gem_ppgtt@flink-and-close-vma-leak.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl3/igt@gem_ppgtt@flink-and-close-vma-leak.html

  * igt@gem_ringfill@basic-default-forked:
    - shard-tglb:         [PASS][29] -> [INCOMPLETE][30] ([i915#472]) +1 similar issue
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-tglb2/igt@gem_ringfill@basic-default-forked.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-tglb7/igt@gem_ringfill@basic-default-forked.html

  * igt@gem_tiled_blits@normal:
    - shard-hsw:          [PASS][31] -> [FAIL][32] ([i915#818]) +1 similar issue
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-hsw8/igt@gem_tiled_blits@normal.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-hsw5/igt@gem_tiled_blits@normal.html

  * igt@i915_selftest@mock_requests:
    - shard-snb:          [PASS][33] -> [INCOMPLETE][34] ([i915#82])
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-snb1/igt@i915_selftest@mock_requests.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-snb1/igt@i915_selftest@mock_requests.html
    - shard-glk:          [PASS][35] -> [INCOMPLETE][36] ([i915#58] / [k.org#198133])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-glk4/igt@i915_selftest@mock_requests.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-glk1/igt@i915_selftest@mock_requests.html
    - shard-apl:          [PASS][37] -> [INCOMPLETE][38] ([fdo#103927])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-apl2/igt@i915_selftest@mock_requests.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-apl8/igt@i915_selftest@mock_requests.html
    - shard-hsw:          [PASS][39] -> [INCOMPLETE][40] ([i915#61]) +1 similar issue
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-hsw4/igt@i915_selftest@mock_requests.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-hsw5/igt@i915_selftest@mock_requests.html

  * igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic:
    - shard-glk:          [PASS][41] -> [FAIL][42] ([i915#72])
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-glk7/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-glk9/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions:
    - shard-skl:          [PASS][43] -> [FAIL][44] ([IGT#5])
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl10/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl4/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - shard-kbl:          [PASS][45] -> [DMESG-WARN][46] ([i915#180]) +3 similar issues
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-kbl3/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-kbl1/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
    - shard-apl:          [PASS][47] -> [DMESG-WARN][48] ([i915#180]) +1 similar issue
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-apl4/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-apl6/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html

  * igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min:
    - shard-skl:          [PASS][49] -> [FAIL][50] ([fdo#108145])
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl9/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl3/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html

  * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
    - shard-skl:          [PASS][51] -> [FAIL][52] ([fdo#108145] / [i915#265])
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl7/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl8/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html

  * igt@kms_psr@psr2_primary_mmap_gtt:
    - shard-iclb:         [PASS][53] -> [SKIP][54] ([fdo#109441]) +1 similar issue
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb2/igt@kms_psr@psr2_primary_mmap_gtt.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb6/igt@kms_psr@psr2_primary_mmap_gtt.html

  * igt@prime_vgem@fence-wait-bsd2:
    - shard-iclb:         [PASS][55] -> [SKIP][56] ([fdo#109276]) +25 similar issues
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb2/igt@prime_vgem@fence-wait-bsd2.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb8/igt@prime_vgem@fence-wait-bsd2.html

  
#### Possible fixes ####

  * igt@gem_busy@busy-vcs1:
    - shard-iclb:         [SKIP][57] ([fdo#112080]) -> [PASS][58] +12 similar issues
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb7/igt@gem_busy@busy-vcs1.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb4/igt@gem_busy@busy-vcs1.html

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-kbl:          [DMESG-WARN][59] ([i915#180]) -> [PASS][60] +7 similar issues
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-kbl4/igt@gem_ctx_isolation@rcs0-s3.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-kbl3/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_ctx_persistence@vcs1-queued:
    - shard-iclb:         [SKIP][61] ([fdo#109276] / [fdo#112080]) -> [PASS][62] +3 similar issues
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb6/igt@gem_ctx_persistence@vcs1-queued.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb1/igt@gem_ctx_persistence@vcs1-queued.html

  * igt@gem_ctx_shared@q-smoketest-bsd1:
    - shard-tglb:         [INCOMPLETE][63] ([fdo#111735] / [i915#472]) -> [PASS][64]
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-tglb3/igt@gem_ctx_shared@q-smoketest-bsd1.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-tglb2/igt@gem_ctx_shared@q-smoketest-bsd1.html

  * igt@gem_exec_nop@basic-parallel:
    - shard-tglb:         [INCOMPLETE][65] ([i915#472]) -> [PASS][66] +2 similar issues
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-tglb5/igt@gem_exec_nop@basic-parallel.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-tglb2/igt@gem_exec_nop@basic-parallel.html

  * igt@gem_exec_schedule@in-order-bsd:
    - shard-iclb:         [SKIP][67] ([fdo#112146]) -> [PASS][68] +7 similar issues
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb2/igt@gem_exec_schedule@in-order-bsd.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb8/igt@gem_exec_schedule@in-order-bsd.html

  * igt@gem_exec_schedule@pi-distinct-iova-bsd:
    - shard-iclb:         [SKIP][69] ([i915#677]) -> [PASS][70]
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb1/igt@gem_exec_schedule@pi-distinct-iova-bsd.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb3/igt@gem_exec_schedule@pi-distinct-iova-bsd.html

  * igt@gem_exec_schedule@preempt-queue-contexts-bsd1:
    - shard-tglb:         [INCOMPLETE][71] ([fdo#111606] / [fdo#111677] / [i915#472]) -> [PASS][72]
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-tglb6/igt@gem_exec_schedule@preempt-queue-contexts-bsd1.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-tglb7/igt@gem_exec_schedule@preempt-queue-contexts-bsd1.html

  * {igt@gem_mmap_offset@open-flood}:
    - shard-snb:          [INCOMPLETE][73] ([i915#82]) -> [PASS][74]
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-snb1/igt@gem_mmap_offset@open-flood.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-snb5/igt@gem_mmap_offset@open-flood.html

  * igt@gem_persistent_relocs@forked-faulting-reloc-thrashing:
    - shard-hsw:          [INCOMPLETE][75] ([i915#530] / [i915#61]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-hsw1/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-hsw4/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive:
    - shard-apl:          [TIMEOUT][77] ([fdo#112271] / [i915#530]) -> [PASS][78]
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-apl3/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-apl4/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive.html

  * igt@gem_persistent_relocs@forked-interruptible-thrash-inactive:
    - shard-hsw:          [TIMEOUT][79] ([fdo#112271] / [i915#530]) -> [PASS][80] +1 similar issue
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-hsw7/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-hsw7/igt@gem_persistent_relocs@forked-interruptible-thrash-inactive.html

  * igt@gem_persistent_relocs@forked-interruptible-thrashing:
    - shard-skl:          [TIMEOUT][81] ([fdo#112271] / [i915#530]) -> [PASS][82]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl9/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl3/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
    - shard-kbl:          [FAIL][83] ([i915#520]) -> [PASS][84]
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-kbl4/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-kbl3/igt@gem_persistent_relocs@forked-interruptible-thrashing.html

  * igt@gem_persistent_relocs@forked-thrashing:
    - shard-kbl:          [INCOMPLETE][85] ([fdo#103665] / [i915#530]) -> [PASS][86] +1 similar issue
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-kbl3/igt@gem_persistent_relocs@forked-thrashing.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-kbl6/igt@gem_persistent_relocs@forked-thrashing.html

  * igt@gem_sync@basic-each:
    - shard-tglb:         [INCOMPLETE][87] ([i915#472] / [i915#707]) -> [PASS][88]
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-tglb3/igt@gem_sync@basic-each.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-tglb2/igt@gem_sync@basic-each.html

  * igt@i915_pm_rpm@system-suspend-execbuf:
    - shard-skl:          [INCOMPLETE][89] ([i915#151] / [i915#69]) -> [PASS][90]
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl1/igt@i915_pm_rpm@system-suspend-execbuf.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl9/igt@i915_pm_rpm@system-suspend-execbuf.html

  * igt@i915_selftest@mock_requests:
    - shard-iclb:         [INCOMPLETE][91] ([i915#140]) -> [PASS][92]
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb6/igt@i915_selftest@mock_requests.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb8/igt@i915_selftest@mock_requests.html

  * igt@kms_color@pipe-b-ctm-0-5:
    - shard-skl:          [DMESG-WARN][93] ([i915#109]) -> [PASS][94]
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl7/igt@kms_color@pipe-b-ctm-0-5.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl8/igt@kms_color@pipe-b-ctm-0-5.html

  * igt@kms_cursor_crc@pipe-a-cursor-128x128-random:
    - shard-snb:          [INCOMPLETE][95] ([CI#80] / [i915#82]) -> [PASS][96]
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-snb1/igt@kms_cursor_crc@pipe-a-cursor-128x128-random.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-snb5/igt@kms_cursor_crc@pipe-a-cursor-128x128-random.html

  * igt@kms_cursor_crc@pipe-c-cursor-256x85-random:
    - shard-skl:          [FAIL][97] ([i915#54]) -> [PASS][98]
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl7/igt@kms_cursor_crc@pipe-c-cursor-256x85-random.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl5/igt@kms_cursor_crc@pipe-c-cursor-256x85-random.html

  * igt@kms_draw_crc@draw-method-xrgb2101010-mmap-cpu-ytiled:
    - shard-skl:          [FAIL][99] ([i915#52] / [i915#54]) -> [PASS][100]
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl2/igt@kms_draw_crc@draw-method-xrgb2101010-mmap-cpu-ytiled.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl8/igt@kms_draw_crc@draw-method-xrgb2101010-mmap-cpu-ytiled.html

  * igt@kms_flip@flip-vs-suspend-interruptible:
    - shard-apl:          [DMESG-WARN][101] ([i915#180]) -> [PASS][102] +3 similar issues
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-apl1/igt@kms_flip@flip-vs-suspend-interruptible.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-apl2/igt@kms_flip@flip-vs-suspend-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw:
    - shard-tglb:         [FAIL][103] ([i915#49]) -> [PASS][104] +2 similar issues
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-tglb4/igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw.html
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-tglb6/igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes:
    - shard-skl:          [INCOMPLETE][105] ([i915#69]) -> [PASS][106]
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl10/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl4/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html

  * igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min:
    - shard-skl:          [FAIL][107] ([fdo#108145]) -> [PASS][108] +1 similar issue
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl7/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min.html
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl5/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min.html

  * igt@kms_plane_multiple@atomic-pipe-a-tiling-y:
    - shard-skl:          [DMESG-WARN][109] ([IGT#6]) -> [PASS][110]
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-skl5/igt@kms_plane_multiple@atomic-pipe-a-tiling-y.html
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-skl2/igt@kms_plane_multiple@atomic-pipe-a-tiling-y.html

  * igt@kms_psr@psr2_basic:
    - shard-iclb:         [SKIP][111] ([fdo#109441]) -> [PASS][112]
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb3/igt@kms_psr@psr2_basic.html
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb2/igt@kms_psr@psr2_basic.html

  * igt@kms_setmode@basic:
    - shard-kbl:          [FAIL][113] ([i915#31]) -> [PASS][114]
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-kbl1/igt@kms_setmode@basic.html
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-kbl2/igt@kms_setmode@basic.html

  * igt@prime_busy@hang-bsd2:
    - shard-iclb:         [SKIP][115] ([fdo#109276]) -> [PASS][116] +24 similar issues
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb6/igt@prime_busy@hang-bsd2.html
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb2/igt@prime_busy@hang-bsd2.html

  
#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv:
    - shard-iclb:         [FAIL][117] ([IGT#28]) -> [SKIP][118] ([fdo#109276] / [fdo#112080])
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb4/igt@gem_ctx_isolation@vcs1-nonpriv.html
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb6/igt@gem_ctx_isolation@vcs1-nonpriv.html

  * igt@kms_dp_dsc@basic-dsc-enable-edp:
    - shard-iclb:         [DMESG-WARN][119] ([fdo#107724]) -> [SKIP][120] ([fdo#109349])
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-iclb8/igt@kms_dp_dsc@basic-dsc-enable-edp.html

  * igt@runner@aborted:
    - shard-apl:          [FAIL][121] ([i915#997]) -> ([FAIL][122], [FAIL][123]) ([i915#873] / [i915#997])
   [121]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-apl3/igt@runner@aborted.html
   [122]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-apl8/igt@runner@aborted.html
   [123]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-apl1/igt@runner@aborted.html
    - shard-glk:          ([FAIL][124], [FAIL][125], [FAIL][126]) ([i915#997] / [k.org#202321]) -> ([FAIL][127], [FAIL][128], [FAIL][129], [FAIL][130]) ([i915#873] / [i915#997] / [k.org#202321])
   [124]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-glk7/igt@runner@aborted.html
   [125]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-glk6/igt@runner@aborted.html
   [126]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7779/shard-glk3/igt@runner@aborted.html
   [127]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-glk7/igt@runner@aborted.html
   [128]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-glk1/igt@runner@aborted.html
   [129]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-glk2/igt@runner@aborted.html
   [130]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/shard-glk2/igt@runner@aborted.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [CI#80]: https://gitlab.freedesktop.org/gfx-ci/i915-infra/issues/80
  [IGT#28]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/28
  [IGT#5]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/5
  [IGT#6]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/6
  [fdo#103665]: https://bugs.freedesktop.org/show_bug.cgi?id=103665
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
  [fdo#111606]: https://bugs.freedesktop.org/show_bug.cgi?id=111606
  [fdo#111677]: https://bugs.freedesktop.org/show_bug.cgi?id=111677
  [fdo#111735]: https://bugs.freedesktop.org/show_bug.cgi?id=111735
  [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080
  [fdo#112146]: https://bugs.freedesktop.org/show_bug.cgi?id=112146
  [fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271
  [i915#109]: https://gitlab.freedesktop.org/drm/intel/issues/109
  [i915#140]: https://gitlab.freedesktop.org/drm/intel/issues/140
  [i915#151]: https://gitlab.freedesktop.org/drm/intel/issues/151
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
  [i915#460]: https://gitlab.freedesktop.org/drm/intel/issues/460
  [i915#463]: https://gitlab.freedesktop.org/drm/intel/issues/463
  [i915#472]: https://gitlab.freedesktop.org/drm/intel/issues/472
  [i915#49]: https://gitlab.freedesktop.org/drm/intel/issues/49
  [i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
  [i915#520]: https://gitlab.freedesktop.org/drm/intel/issues/520
  [i915#530]: https://gitlab.freedesktop.org/drm/intel/issues/530
  [i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
  [i915#58]: https://gitlab.freedesktop.org/drm/intel/issues/58
  [i915#61]: https://gitlab.freedesktop.org/drm/intel/issues/61
  [i915#644]: https://gitlab.freedesktop.org/drm/intel/issues/644
  [i915#677]: https://gitlab.freedesktop.org/drm/intel/issues/677
  [i915#69]: https://gitlab.freedesktop.org/drm/intel/issues/69
  [i915#707]: https://gitlab.freedesktop.org/drm/intel/issues/707
  [i915#72]: https://gitlab.freedesktop.org/drm/intel/issues/72
  [i915#818]: https://gitlab.freedesktop.org/drm/intel/issues/818
  [i915#82]: https://gitlab.freedesktop.org/drm/intel/issues/82
  [i915#873]: https://gitlab.freedesktop.org/drm/intel/issues/873
  [i915#997]: https://gitlab.freedesktop.org/drm/intel/issues/997
  [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133
  [k.org#202321]: https://bugzilla.kernel.org/show_bug.cgi?id=202321


Participating hosts (9 -> 10)
------------------------------

  Additional (1): pig-glk-j5005 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7779 -> Patchwork_16172

  CI-20190529: 20190529
  CI_DRM_7779: 9481f0a0238efb8a7f001e4cdc16388fc097a7e4 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5373: 224e565df36693ab8ae8f58eb6ae42600c2464e2 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16172: 0614af4d0caa263584b0d1f877c0618998f678e9 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16172/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray
  2020-01-20 10:49 ` [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray Chris Wilson
@ 2020-01-22 16:00   ` Ruhl, Michael J
  2020-01-22 16:08     ` Chris Wilson
  0 siblings, 1 reply; 13+ messages in thread
From: Ruhl, Michael J @ 2020-01-22 16:00 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: Auld, Matthew

>-----Original Message-----
>From: Intel-gfx <intel-gfx-bounces@lists.freedesktop.org> On Behalf Of Chris
>Wilson
>Sent: Monday, January 20, 2020 5:49 AM
>To: intel-gfx@lists.freedesktop.org
>Cc: Auld, Matthew <matthew.auld@intel.com>
>Subject: [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray
>
>Replace the vm_idr + vm_idr_mutex to an XArray. The XArray data
>structure is now used to implement IDRs, and provides its won locking.

s/won/own

>We can simply remove the IDR wrapper and in the process also remove our
>extra mutex.
>
>Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>---
> drivers/gpu/drm/i915/gem/i915_gem_context.c | 80 ++++++---------------
> drivers/gpu/drm/i915/i915_drv.h             |  4 +-
> 2 files changed, 22 insertions(+), 62 deletions(-)
>
>diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c
>b/drivers/gpu/drm/i915/gem/i915_gem_context.c
>index a2e57e62af30..d2e4e8cbf4d4 100644
>--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
>+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
>@@ -761,12 +761,6 @@ void i915_gem_driver_release__contexts(struct
>drm_i915_private *i915)
> 	flush_work(&i915->gem.contexts.free_work);
> }
>
>-static int vm_idr_cleanup(int id, void *p, void *data)
>-{
>-	i915_vm_put(p);
>-	return 0;
>-}
>-
> static int gem_context_register(struct i915_gem_context *ctx,
> 				struct drm_i915_file_private *fpriv,
> 				u32 *id)
>@@ -803,9 +797,7 @@ int i915_gem_context_open(struct drm_i915_private
>*i915,
> 	u32 id;
>
> 	xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
>-
>-	mutex_init(&file_priv->vm_idr_lock);
>-	idr_init_base(&file_priv->vm_idr, 1);
>+	xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
>
> 	ctx = i915_gem_create_context(i915, 0);
> 	if (IS_ERR(ctx)) {
>@@ -823,9 +815,8 @@ int i915_gem_context_open(struct drm_i915_private
>*i915,
> err_ctx:
> 	context_close(ctx);
> err:
>-	idr_destroy(&file_priv->vm_idr);
>+	xa_destroy(&file_priv->vm_xa);
> 	xa_destroy(&file_priv->context_xa);
>-	mutex_destroy(&file_priv->vm_idr_lock);
> 	return err;
> }
>
>@@ -833,6 +824,7 @@ void i915_gem_context_close(struct drm_file *file)
> {
> 	struct drm_i915_file_private *file_priv = file->driver_priv;
> 	struct drm_i915_private *i915 = file_priv->dev_priv;
>+	struct i915_address_space *vm;
> 	struct i915_gem_context *ctx;
> 	unsigned long idx;
>
>@@ -840,9 +832,9 @@ void i915_gem_context_close(struct drm_file *file)
> 		context_close(ctx);
> 	xa_destroy(&file_priv->context_xa);
>
>-	idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
>-	idr_destroy(&file_priv->vm_idr);
>-	mutex_destroy(&file_priv->vm_idr_lock);
>+	xa_for_each(&file_priv->vm_xa, idx, vm)
>+		i915_vm_put(vm);
>+	xa_destroy(&file_priv->vm_xa);
>
> 	contexts_flush_free(&i915->gem.contexts);
> }
>@@ -876,23 +868,13 @@ int i915_gem_vm_create_ioctl(struct drm_device
>*dev, void *data,
> 			goto err_put;
> 	}
>
>-	err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
>+	err = xa_alloc(&file_priv->vm_xa, &args->vm_id,
>+		       &ppgtt->vm, xa_limit_32b, GFP_KERNEL);
> 	if (err)
> 		goto err_put;
>
>-	err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
>-	if (err < 0)
>-		goto err_unlock;
>-
>-	GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */

Moving this comment to the xa_init_flags() would help me understand
why the index started at 1.

>-	mutex_unlock(&file_priv->vm_idr_lock);
>-
>-	args->vm_id = err;
> 	return 0;
>
>-err_unlock:
>-	mutex_unlock(&file_priv->vm_idr_lock);
> err_put:
> 	i915_vm_put(&ppgtt->vm);
> 	return err;
>@@ -904,8 +886,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device
>*dev, void *data,
> 	struct drm_i915_file_private *file_priv = file->driver_priv;
> 	struct drm_i915_gem_vm_control *args = data;
> 	struct i915_address_space *vm;
>-	int err;
>-	u32 id;
>
> 	if (args->flags)
> 		return -EINVAL;
>@@ -913,17 +893,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device
>*dev, void *data,
> 	if (args->extensions)
> 		return -EINVAL;
>
>-	id = args->vm_id;
>-	if (!id)
>-		return -ENOENT;
>-
>-	err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
>-	if (err)
>-		return err;
>-
>-	vm = idr_remove(&file_priv->vm_idr, id);
>-
>-	mutex_unlock(&file_priv->vm_idr_lock);
>+	vm = xa_erase(&file_priv->vm_xa, args->vm_id);
> 	if (!vm)
> 		return -ENOENT;
>
>@@ -1021,35 +991,27 @@ static int get_ppgtt(struct drm_i915_file_private
>*file_priv,
> 		     struct drm_i915_gem_context_param *args)
> {
> 	struct i915_address_space *vm;
>-	int ret;
>+	int err = -ENODEV;
>+	u32 id;
>
> 	if (!rcu_access_pointer(ctx->vm))
> 		return -ENODEV;
>
> 	rcu_read_lock();
> 	vm = context_get_vm_rcu(ctx);
>+	if (vm)
>+		err = xa_alloc(&file_priv->vm_xa, &id, vm,
>+			       xa_limit_32b, GFP_KERNEL);
> 	rcu_read_unlock();
>+	if (!err) {
>+		i915_vm_open(vm);

Why did you switch to success path in the if here?

Can you do:

if (err)
	goto err_put;

?

>-	ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
>-	if (ret)
>-		goto err_put;
>-
>-	ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
>-	GEM_BUG_ON(!ret);
>-	if (ret < 0)
>-		goto err_unlock;
>-
>-	i915_vm_open(vm);
>-
>-	args->size = 0;
>-	args->value = ret;
>+		args->size = 0;
>+		args->value = id;

Would passing args->value to the xa_alloc be a useful?

Nice clean up.

Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>

Mike


>+	}
>
>-	ret = 0;
>-err_unlock:
>-	mutex_unlock(&file_priv->vm_idr_lock);
>-err_put:
> 	i915_vm_put(vm);
>-	return ret;
>+	return err;
> }
>
> static void set_ppgtt_barrier(void *data)
>@@ -1151,7 +1113,7 @@ static int set_ppgtt(struct drm_i915_file_private
>*file_priv,
> 		return -ENOENT;
>
> 	rcu_read_lock();
>-	vm = idr_find(&file_priv->vm_idr, args->value);
>+	vm = xa_load(&file_priv->vm_xa, args->value);
> 	if (vm && !kref_get_unless_zero(&vm->ref))
> 		vm = NULL;
> 	rcu_read_unlock();
>diff --git a/drivers/gpu/drm/i915/i915_drv.h
>b/drivers/gpu/drm/i915/i915_drv.h
>index 077af22b8340..50abf9113b2f 100644
>--- a/drivers/gpu/drm/i915/i915_drv.h
>+++ b/drivers/gpu/drm/i915/i915_drv.h
>@@ -203,9 +203,7 @@ struct drm_i915_file_private {
> 	} mm;
>
> 	struct xarray context_xa;
>-
>-	struct idr vm_idr;
>-	struct mutex vm_idr_lock; /* guards vm_idr */
>+	struct xarray vm_xa;
>
> 	unsigned int bsd_engine;
>
>--
>2.25.0
>
>_______________________________________________
>Intel-gfx mailing list
>Intel-gfx@lists.freedesktop.org
>https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray
  2020-01-22 16:00   ` Ruhl, Michael J
@ 2020-01-22 16:08     ` Chris Wilson
  2020-01-22 16:15       ` Ruhl, Michael J
  0 siblings, 1 reply; 13+ messages in thread
From: Chris Wilson @ 2020-01-22 16:08 UTC (permalink / raw)
  To: Ruhl, Michael J, intel-gfx; +Cc: Auld, Matthew

Quoting Ruhl, Michael J (2020-01-22 16:00:25)
> >-----Original Message-----
> >From: Intel-gfx <intel-gfx-bounces@lists.freedesktop.org> On Behalf Of Chris
> >Wilson
> >@@ -876,23 +868,13 @@ int i915_gem_vm_create_ioctl(struct drm_device
> >*dev, void *data,
> >                       goto err_put;
> >       }
> >
> >-      err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
> >+      err = xa_alloc(&file_priv->vm_xa, &args->vm_id,
> >+                     &ppgtt->vm, xa_limit_32b, GFP_KERNEL);
> >       if (err)
> >               goto err_put;
> >
> >-      err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
> >-      if (err < 0)
> >-              goto err_unlock;
> >-
> >-      GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
> 
> Moving this comment to the xa_init_flags() would help me understand
> why the index started at 1.

Hey, I take 0 being reserved for granted, and had to think about why
the context_xa was not 1-biased!

> >@@ -1021,35 +991,27 @@ static int get_ppgtt(struct drm_i915_file_private
> >*file_priv,
> >                    struct drm_i915_gem_context_param *args)
> > {
> >       struct i915_address_space *vm;
> >-      int ret;
> >+      int err = -ENODEV;
> >+      u32 id;
> >
> >       if (!rcu_access_pointer(ctx->vm))
> >               return -ENODEV;
> >
> >       rcu_read_lock();
> >       vm = context_get_vm_rcu(ctx);
> >+      if (vm)
> >+              err = xa_alloc(&file_priv->vm_xa, &id, vm,
> >+                             xa_limit_32b, GFP_KERNEL);
> >       rcu_read_unlock();
> >+      if (!err) {
> >+              i915_vm_open(vm);
> 
> Why did you switch to success path in the if here?

No good reason, just simple enough to fit inside one if {}.

> Can you do:
> 
> if (err)
>         goto err_put;
> 
> ?
> 
> >-      ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
> >-      if (ret)
> >-              goto err_put;
> >-
> >-      ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
> >-      GEM_BUG_ON(!ret);
> >-      if (ret < 0)
> >-              goto err_unlock;
> >-
> >-      i915_vm_open(vm);
> >-
> >-      args->size = 0;
> >-      args->value = ret;
> >+              args->size = 0;
> >+              args->value = id;
> 
> Would passing args->value to the xa_alloc be a useful?

General rule is not to alter user params except on success. While not
always required, the pattern does help to avoid common pitfalls where
userspace has to repeat an ioctl (e.g. SIGINT).
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray
  2020-01-22 16:08     ` Chris Wilson
@ 2020-01-22 16:15       ` Ruhl, Michael J
  2020-01-22 16:17         ` Chris Wilson
  0 siblings, 1 reply; 13+ messages in thread
From: Ruhl, Michael J @ 2020-01-22 16:15 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: Auld, Matthew



>-----Original Message-----
>From: Chris Wilson <chris@chris-wilson.co.uk>
>Sent: Wednesday, January 22, 2020 11:09 AM
>To: Ruhl, Michael J <michael.j.ruhl@intel.com>; intel-
>gfx@lists.freedesktop.org
>Cc: Auld, Matthew <matthew.auld@intel.com>
>Subject: RE: [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray
>
>Quoting Ruhl, Michael J (2020-01-22 16:00:25)
>> >-----Original Message-----
>> >From: Intel-gfx <intel-gfx-bounces@lists.freedesktop.org> On Behalf Of
>Chris
>> >Wilson
>> >@@ -876,23 +868,13 @@ int i915_gem_vm_create_ioctl(struct drm_device
>> >*dev, void *data,
>> >                       goto err_put;
>> >       }
>> >
>> >-      err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
>> >+      err = xa_alloc(&file_priv->vm_xa, &args->vm_id,
>> >+                     &ppgtt->vm, xa_limit_32b, GFP_KERNEL);
>> >       if (err)
>> >               goto err_put;
>> >
>> >-      err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
>> >-      if (err < 0)
>> >-              goto err_unlock;
>> >-
>> >-      GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
>>
>> Moving this comment to the xa_init_flags() would help me understand
>> why the index started at 1.
>
>Hey, I take 0 being reserved for granted, and had to think about why
>the context_xa was not 1-biased!
>
>> >@@ -1021,35 +991,27 @@ static int get_ppgtt(struct drm_i915_file_private
>> >*file_priv,
>> >                    struct drm_i915_gem_context_param *args)
>> > {
>> >       struct i915_address_space *vm;
>> >-      int ret;
>> >+      int err = -ENODEV;
>> >+      u32 id;
>> >
>> >       if (!rcu_access_pointer(ctx->vm))
>> >               return -ENODEV;
>> >
>> >       rcu_read_lock();
>> >       vm = context_get_vm_rcu(ctx);
>> >+      if (vm)
>> >+              err = xa_alloc(&file_priv->vm_xa, &id, vm,
>> >+                             xa_limit_32b, GFP_KERNEL);
>> >       rcu_read_unlock();
>> >+      if (!err) {
>> >+              i915_vm_open(vm);
>>
>> Why did you switch to success path in the if here?
>
>No good reason, just simple enough to fit inside one if {}.
>
>> Can you do:
>>
>> if (err)
>>         goto err_put;
>>
>> ?
>>
>> >-      ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
>> >-      if (ret)
>> >-              goto err_put;
>> >-
>> >-      ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
>> >-      GEM_BUG_ON(!ret);
>> >-      if (ret < 0)
>> >-              goto err_unlock;
>> >-
>> >-      i915_vm_open(vm);
>> >-
>> >-      args->size = 0;
>> >-      args->value = ret;
>> >+              args->size = 0;
>> >+              args->value = id;
>>
>> Would passing args->value to the xa_alloc be a useful?
>
>General rule is not to alter user params except on success. While not
>always required, the pattern does help to avoid common pitfalls where
>userspace has to repeat an ioctl (e.g. SIGINT).

Yup.  Following that rule, xa_array() will only update the value on success.

I was mostly commenting on it because you had done that in the previous
xa_alloc call.

Thanks,

Mike

>-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray
  2020-01-22 16:15       ` Ruhl, Michael J
@ 2020-01-22 16:17         ` Chris Wilson
  0 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2020-01-22 16:17 UTC (permalink / raw)
  To: Ruhl, Michael J, intel-gfx; +Cc: Auld, Matthew

Quoting Ruhl, Michael J (2020-01-22 16:15:17)
> 
> 
> >-----Original Message-----
> >From: Chris Wilson <chris@chris-wilson.co.uk>
> >Sent: Wednesday, January 22, 2020 11:09 AM
> >To: Ruhl, Michael J <michael.j.ruhl@intel.com>; intel-
> >gfx@lists.freedesktop.org
> >Cc: Auld, Matthew <matthew.auld@intel.com>
> >Subject: RE: [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray
> >
> >Quoting Ruhl, Michael J (2020-01-22 16:00:25)
> >> >-----Original Message-----
> >> >From: Intel-gfx <intel-gfx-bounces@lists.freedesktop.org> On Behalf Of
> >Chris
> >> >Wilson
> >> >@@ -876,23 +868,13 @@ int i915_gem_vm_create_ioctl(struct drm_device
> >> >*dev, void *data,
> >> >                       goto err_put;
> >> >       }
> >> >
> >> >-      err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
> >> >+      err = xa_alloc(&file_priv->vm_xa, &args->vm_id,
> >> >+                     &ppgtt->vm, xa_limit_32b, GFP_KERNEL);
> >> >       if (err)
> >> >               goto err_put;
> >> >
> >> >-      err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
> >> >-      if (err < 0)
> >> >-              goto err_unlock;
> >> >-
> >> >-      GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
> >>
> >> Moving this comment to the xa_init_flags() would help me understand
> >> why the index started at 1.
> >
> >Hey, I take 0 being reserved for granted, and had to think about why
> >the context_xa was not 1-biased!
> >
> >> >@@ -1021,35 +991,27 @@ static int get_ppgtt(struct drm_i915_file_private
> >> >*file_priv,
> >> >                    struct drm_i915_gem_context_param *args)
> >> > {
> >> >       struct i915_address_space *vm;
> >> >-      int ret;
> >> >+      int err = -ENODEV;
> >> >+      u32 id;
> >> >
> >> >       if (!rcu_access_pointer(ctx->vm))
> >> >               return -ENODEV;
> >> >
> >> >       rcu_read_lock();
> >> >       vm = context_get_vm_rcu(ctx);
> >> >+      if (vm)
> >> >+              err = xa_alloc(&file_priv->vm_xa, &id, vm,
> >> >+                             xa_limit_32b, GFP_KERNEL);
> >> >       rcu_read_unlock();
> >> >+      if (!err) {
> >> >+              i915_vm_open(vm);
> >>
> >> Why did you switch to success path in the if here?
> >
> >No good reason, just simple enough to fit inside one if {}.
> >
> >> Can you do:
> >>
> >> if (err)
> >>         goto err_put;
> >>
> >> ?
> >>
> >> >-      ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
> >> >-      if (ret)
> >> >-              goto err_put;
> >> >-
> >> >-      ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
> >> >-      GEM_BUG_ON(!ret);
> >> >-      if (ret < 0)
> >> >-              goto err_unlock;
> >> >-
> >> >-      i915_vm_open(vm);
> >> >-
> >> >-      args->size = 0;
> >> >-      args->value = ret;
> >> >+              args->size = 0;
> >> >+              args->value = id;
> >>
> >> Would passing args->value to the xa_alloc be a useful?
> >
> >General rule is not to alter user params except on success. While not
> >always required, the pattern does help to avoid common pitfalls where
> >userspace has to repeat an ioctl (e.g. SIGINT).
> 
> Yup.  Following that rule, xa_array() will only update the value on success.
> 
> I was mostly commenting on it because you had done that in the previous
> xa_alloc call.

Went back and changed that so both paths look very similar, begging to
be refactored...
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2020-01-22 16:17 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-20 10:49 [Intel-gfx] [PATCH 1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Chris Wilson
2020-01-20 10:49 ` [Intel-gfx] [PATCH 2/5] drm/i915: Tighten atomicity of i915_active_acquire vs i915_active_release Chris Wilson
2020-01-20 10:49 ` [Intel-gfx] [PATCH 3/5] drm/i915/gem: Store mmap_offsets in an rbtree rather than a plain list Chris Wilson
2020-01-20 15:24   ` Abdiel Janulgue
2020-01-20 10:49 ` [Intel-gfx] [PATCH 4/5] drm/i915/gem: Convert vm idr to xarray Chris Wilson
2020-01-22 16:00   ` Ruhl, Michael J
2020-01-22 16:08     ` Chris Wilson
2020-01-22 16:15       ` Ruhl, Michael J
2020-01-22 16:17         ` Chris Wilson
2020-01-20 10:49 ` [Intel-gfx] [PATCH 5/5] drm/i915/display: Squelch kerneldoc complaints Chris Wilson
2020-01-20 15:17 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/5] drm/i915/gt: Acquire ce->active before ce->pin_count/ce->pin_mutex Patchwork
2020-01-20 23:42 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-01-21  8:40 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).