All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler
@ 2019-10-10  7:14 Chris Wilson
  2019-10-10  7:14 ` [PATCH 02/10] drm/i915/execlists: Leave tell-tales as to why pending[] is bad Chris Wilson
                   ` (14 more replies)
  0 siblings, 15 replies; 43+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

Since writing the comment that the scheduler is entirely passive, we've
added minimal timeslicing which adds the most primitive of active
elements (a timeout and reschedule).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
---
 drivers/gpu/drm/i915/i915_scheduler_types.h | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index aad81acba9dc..d18e70550054 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -49,6 +49,15 @@ struct i915_sched_attr {
  * DAG of each request, we are able to insert it into a sorted queue when it
  * is ready, and are able to reorder its portion of the graph to accommodate
  * dynamic priority changes.
+ *
+ * Ok, there is now one active element to the "scheduler" in the backends.
+ * We let a new context run for a small amount of time before re-evaluating
+ * the run order. As we re-evaluate, we maintain the strict ordering of
+ * dependencies, but attempt to rotate the active contexts (the current context
+ * is put to the back of its priority queue, then reshuffling its dependents).
+ * This provides minimal timeslicing and prevents a userspace hog (e.g.
+ * something waiting on a user semaphore [VkEvent]) from denying service to
+ * others.
  */
 struct i915_sched_node {
 	struct list_head signalers_list; /* those before us, we depend upon */
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 43+ messages in thread
* [PATCH 01/10] drm/i915/gem: Distinguish each object type
@ 2019-10-14 22:05 Chris Wilson
  2019-10-14 22:05 ` [PATCH 04/10] drm/i915/execlists: Force preemption Chris Wilson
  0 siblings, 1 reply; 43+ messages in thread
From: Chris Wilson @ 2019-10-14 22:05 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

Separate each object class into a separate lock type to avoid lockdep
cross-contamination between paths (i.e. userptr!).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c           | 3 ++-
 drivers/gpu/drm/i915/gem/i915_gem_internal.c         | 3 ++-
 drivers/gpu/drm/i915/gem/i915_gem_object.c           | 5 +++--
 drivers/gpu/drm/i915/gem/i915_gem_object.h           | 3 ++-
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c            | 3 ++-
 drivers/gpu/drm/i915/gem/i915_gem_stolen.c           | 3 ++-
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c          | 3 ++-
 drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c | 3 ++-
 drivers/gpu/drm/i915/gem/selftests/huge_pages.c      | 8 +++++---
 drivers/gpu/drm/i915/gvt/dmabuf.c                    | 3 ++-
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c        | 3 ++-
 drivers/gpu/drm/i915/selftests/mock_region.c         | 3 ++-
 12 files changed, 28 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 96ce95c8ac5a..eaea49d08eb5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -256,6 +256,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 					     struct dma_buf *dma_buf)
 {
+	static struct lock_class_key lock_class;
 	struct dma_buf_attachment *attach;
 	struct drm_i915_gem_object *obj;
 	int ret;
@@ -287,7 +288,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 	}
 
 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
-	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
+	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
 	obj->base.import_attach = attach;
 	obj->base.resv = dma_buf->resv;
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 5ae694c24df4..9cfb0e41ff06 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -164,6 +164,7 @@ struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *i915,
 				phys_addr_t size)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_gem_object *obj;
 	unsigned int cache_level;
 
@@ -178,7 +179,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
 		return ERR_PTR(-ENOMEM);
 
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
-	i915_gem_object_init(obj, &i915_gem_object_internal_ops);
+	i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class);
 
 	/*
 	 * Mark the object as volatile, such that the pages are marked as
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index dbf9be9a79f4..a50296cce0d8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -47,9 +47,10 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
 }
 
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
-			  const struct drm_i915_gem_object_ops *ops)
+			  const struct drm_i915_gem_object_ops *ops,
+			  struct lock_class_key *key)
 {
-	mutex_init(&obj->mm.lock);
+	__mutex_init(&obj->mm.lock, "obj->mm.lock", key);
 
 	spin_lock_init(&obj->vma.lock);
 	INIT_LIST_HEAD(&obj->vma.list);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index c5e14c9c805c..b0245585b4d5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -23,7 +23,8 @@ struct drm_i915_gem_object *i915_gem_object_alloc(void);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
-			  const struct drm_i915_gem_object_ops *ops);
+			  const struct drm_i915_gem_object_ops *ops,
+			  struct lock_class_key *key);
 struct drm_i915_gem_object *
 i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4c4954e8ce0a..f36f7d658380 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -458,6 +458,7 @@ static int create_shmem(struct drm_i915_private *i915,
 struct drm_i915_gem_object *
 i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_gem_object *obj;
 	struct address_space *mapping;
 	unsigned int cache_level;
@@ -494,7 +495,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size)
 	mapping_set_gfp_mask(mapping, mask);
 	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
 
-	i915_gem_object_init(obj, &i915_gem_shmem_ops);
+	i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class);
 
 	obj->write_domain = I915_GEM_DOMAIN_CPU;
 	obj->read_domains = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index c76260ce13e3..a1f0cf2519b3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -551,6 +551,7 @@ static struct drm_i915_gem_object *
 _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
 			       struct drm_mm_node *stolen)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_gem_object *obj;
 	unsigned int cache_level;
 	int err = -ENOMEM;
@@ -560,7 +561,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
 		goto err;
 
 	drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
-	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
+	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
 
 	obj->stolen = stolen;
 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 4f970474013f..1e045c337044 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -725,6 +725,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
 		       void *data,
 		       struct drm_file *file)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_userptr *args = data;
 	struct drm_i915_gem_object *obj;
@@ -769,7 +770,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
 		return -ENOMEM;
 
 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
-	i915_gem_object_init(obj, &i915_gem_userptr_ops);
+	i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
 	obj->read_domains = I915_GEM_DOMAIN_CPU;
 	obj->write_domain = I915_GEM_DOMAIN_CPU;
 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index 3c5d17b2b670..892d12db6c49 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -96,6 +96,7 @@ huge_gem_object(struct drm_i915_private *i915,
 		phys_addr_t phys_size,
 		dma_addr_t dma_size)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_gem_object *obj;
 	unsigned int cache_level;
 
@@ -111,7 +112,7 @@ huge_gem_object(struct drm_i915_private *i915,
 		return ERR_PTR(-ENOMEM);
 
 	drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
-	i915_gem_object_init(obj, &huge_ops);
+	i915_gem_object_init(obj, &huge_ops, &lock_class);
 
 	obj->read_domains = I915_GEM_DOMAIN_CPU;
 	obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index f27772f6779a..dac8344507c1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -149,6 +149,7 @@ huge_pages_object(struct drm_i915_private *i915,
 		  u64 size,
 		  unsigned int page_mask)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_gem_object *obj;
 
 	GEM_BUG_ON(!size);
@@ -165,7 +166,7 @@ huge_pages_object(struct drm_i915_private *i915,
 		return ERR_PTR(-ENOMEM);
 
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
-	i915_gem_object_init(obj, &huge_page_ops);
+	i915_gem_object_init(obj, &huge_page_ops, &lock_class);
 
 	i915_gem_object_set_volatile(obj);
 
@@ -295,6 +296,7 @@ static const struct drm_i915_gem_object_ops fake_ops_single = {
 static struct drm_i915_gem_object *
 fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_gem_object *obj;
 
 	GEM_BUG_ON(!size);
@@ -313,9 +315,9 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
 
 	if (single)
-		i915_gem_object_init(obj, &fake_ops_single);
+		i915_gem_object_init(obj, &fake_ops_single, &lock_class);
 	else
-		i915_gem_object_init(obj, &fake_ops);
+		i915_gem_object_init(obj, &fake_ops, &lock_class);
 
 	i915_gem_object_set_volatile(obj);
 
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 13044c027f27..a816aef6142b 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -152,6 +152,7 @@ static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
 		struct intel_vgpu_fb_info *info)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_object *obj;
 
@@ -161,7 +162,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
 
 	drm_gem_private_object_init(dev, &obj->base,
 		roundup(info->size, PAGE_SIZE));
-	i915_gem_object_init(obj, &intel_vgpu_gem_ops);
+	i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
 
 	obj->read_domains = I915_GEM_DOMAIN_GTT;
 	obj->write_domain = 0;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index ebe735df6504..a1cb072e4a1b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -104,6 +104,7 @@ static const struct drm_i915_gem_object_ops fake_ops = {
 static struct drm_i915_gem_object *
 fake_dma_object(struct drm_i915_private *i915, u64 size)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_gem_object *obj;
 
 	GEM_BUG_ON(!size);
@@ -117,7 +118,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
 		goto err;
 
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
-	i915_gem_object_init(obj, &fake_ops);
+	i915_gem_object_init(obj, &fake_ops, &lock_class);
 
 	i915_gem_object_set_volatile(obj);
 
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 7b0c99ddc2d5..b2ad41c27e67 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -19,6 +19,7 @@ mock_object_create(struct intel_memory_region *mem,
 		   resource_size_t size,
 		   unsigned int flags)
 {
+	static struct lock_class_key lock_class;
 	struct drm_i915_private *i915 = mem->i915;
 	struct drm_i915_gem_object *obj;
 
@@ -30,7 +31,7 @@ mock_object_create(struct intel_memory_region *mem,
 		return ERR_PTR(-ENOMEM);
 
 	drm_gem_private_object_init(&i915->drm, &obj->base, size);
-	i915_gem_object_init(obj, &mock_region_obj_ops);
+	i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
 
 	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
 
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 43+ messages in thread

end of thread, other threads:[~2019-10-23 15:33 UTC | newest]

Thread overview: 43+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
2019-10-10  7:14 ` [PATCH 02/10] drm/i915/execlists: Leave tell-tales as to why pending[] is bad Chris Wilson
2019-10-11  8:39   ` Tvrtko Ursulin
2019-10-10  7:14 ` [PATCH 03/10] drm/i915: Expose engine properties via sysfs Chris Wilson
2019-10-11  8:44   ` Tvrtko Ursulin
2019-10-11  8:49     ` Chris Wilson
2019-10-11  9:04       ` Tvrtko Ursulin
2019-10-11  9:40   ` [PATCH v2] " Chris Wilson
2019-10-10  7:14 ` [PATCH 04/10] drm/i915/execlists: Force preemption Chris Wilson
2019-10-10  7:14 ` [PATCH 05/10] drm/i915: Mark up "sentinel" requests Chris Wilson
2019-10-11  8:45   ` Tvrtko Ursulin
2019-10-10  7:14 ` [PATCH 06/10] drm/i915/gt: Introduce barrier pulses along engines Chris Wilson
2019-10-11  9:11   ` Tvrtko Ursulin
2019-10-11  9:52     ` Chris Wilson
2019-10-10  7:14 ` [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out Chris Wilson
2019-10-11  9:47   ` Tvrtko Ursulin
2019-10-11 10:03     ` Chris Wilson
2019-10-11 10:15     ` Chris Wilson
2019-10-11 10:40       ` Chris Wilson
2019-10-11 11:16   ` [PATCH v2] " Chris Wilson
2019-10-11 13:10     ` Tvrtko Ursulin
2019-10-11 14:10       ` Chris Wilson
2019-10-10  7:14 ` [PATCH 08/10] drm/i915: Cancel non-persistent contexts on close Chris Wilson
2019-10-11 13:55   ` Tvrtko Ursulin
2019-10-11 14:22     ` Chris Wilson
2019-10-11 15:41       ` Chris Wilson
2019-10-10  7:14 ` [PATCH 09/10] drm/i915: Replace hangcheck by heartbeats Chris Wilson
2019-10-11 14:24   ` Tvrtko Ursulin
2019-10-11 15:06     ` Chris Wilson
2019-10-10  7:14 ` [PATCH 10/10] drm/i915: Flush idle barriers when waiting Chris Wilson
2019-10-11 14:56   ` Tvrtko Ursulin
2019-10-11 15:11     ` Chris Wilson
2019-10-14 13:08       ` Tvrtko Ursulin
2019-10-14 13:38         ` Chris Wilson
2019-10-23 15:33         ` Chris Wilson
2019-10-23 15:33           ` [Intel-gfx] " Chris Wilson
2019-10-10  8:18 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Patchwork
2019-10-10  8:42 ` ✓ Fi.CI.BAT: success " Patchwork
2019-10-10 16:19 ` ✗ Fi.CI.IGT: failure " Patchwork
2019-10-11  8:16 ` [PATCH 01/10] " Tvrtko Ursulin
2019-10-11  9:49 ` ✗ Fi.CI.BUILD: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev2) Patchwork
2019-10-11 11:39 ` ✗ Fi.CI.BUILD: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev3) Patchwork
2019-10-14 22:05 [PATCH 01/10] drm/i915/gem: Distinguish each object type Chris Wilson
2019-10-14 22:05 ` [PATCH 04/10] drm/i915/execlists: Force preemption Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.