All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code
@ 2015-09-03 12:01 Chris Wilson
  2015-09-03 12:01 ` [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset Chris Wilson
  2015-09-03 14:01 ` [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code Zanoni, Paulo R
  0 siblings, 2 replies; 12+ messages in thread
From: Chris Wilson @ 2015-09-03 12:01 UTC (permalink / raw)
  To: intel-gfx; +Cc: Mika Kuoppala

A small, very small, step to sharing the duplicate code between
execlists and legacy submission engines, starting with the ringbuffer
allocation code.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Dave Gordon <david.s.gordon@intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c        | 49 +++++-------------
 drivers/gpu/drm/i915/intel_ringbuffer.c | 89 ++++++++++++++++++++-------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  8 +--
 3 files changed, 70 insertions(+), 76 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 40cbba4ea4ba..28a712e7d2d0 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2340,8 +2340,7 @@ void intel_lr_context_free(struct intel_context *ctx)
 				i915_gem_object_ggtt_unpin(ctx_obj);
 			}
 			WARN_ON(ctx->engine[ring->id].pin_count);
-			intel_destroy_ringbuffer_obj(ringbuf);
-			kfree(ringbuf);
+			intel_ringbuffer_free(ringbuf);
 			drm_gem_object_unreference(&ctx_obj->base);
 		}
 	}
@@ -2442,42 +2441,20 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 			I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
 	}
 
-	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-	if (!ringbuf) {
-		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
-				ring->name);
-		ret = -ENOMEM;
+	ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
+	if (IS_ERR(ringbuf)) {
+		ret = PTR_ERR(ringbuf);
 		goto error_unpin_ctx;
 	}
 
-	ringbuf->ring = ring;
-
-	ringbuf->size = 4 * PAGE_SIZE;
-	ringbuf->effective_size = ringbuf->size;
-	ringbuf->head = 0;
-	ringbuf->tail = 0;
-	ringbuf->last_retired_head = -1;
-	intel_ring_update_space(ringbuf);
-
-	if (ringbuf->obj == NULL) {
-		ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
+	if (is_global_default_ctx) {
+		ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
 		if (ret) {
-			DRM_DEBUG_DRIVER(
-				"Failed to allocate ringbuffer obj %s: %d\n",
-				ring->name, ret);
-			goto error_free_rbuf;
+			DRM_ERROR(
+				  "Failed to pin and map ringbuffer %s: %d\n",
+				  ring->name, ret);
+			goto error_ringbuf;
 		}
-
-		if (is_global_default_ctx) {
-			ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
-			if (ret) {
-				DRM_ERROR(
-					"Failed to pin and map ringbuffer %s: %d\n",
-					ring->name, ret);
-				goto error_destroy_rbuf;
-			}
-		}
-
 	}
 
 	ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
@@ -2519,10 +2496,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 error:
 	if (is_global_default_ctx)
 		intel_unpin_ringbuffer_obj(ringbuf);
-error_destroy_rbuf:
-	intel_destroy_ringbuffer_obj(ringbuf);
-error_free_rbuf:
-	kfree(ringbuf);
+error_ringbuf:
+	intel_ringbuffer_free(ringbuf);
 error_unpin_ctx:
 	if (is_global_default_ctx)
 		i915_gem_object_ggtt_unpin(ctx_obj);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6e6b8db996ef..20a75bb516ac 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1996,14 +1996,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 	return 0;
 }
 
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 {
 	drm_gem_object_unreference(&ringbuf->obj->base);
 	ringbuf->obj = NULL;
 }
 
-int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-			       struct intel_ringbuffer *ringbuf)
+static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+				      struct intel_ringbuffer *ringbuf)
 {
 	struct drm_i915_gem_object *obj;
 
@@ -2023,6 +2023,48 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
 	return 0;
 }
 
+struct intel_ringbuffer *
+intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
+{
+	struct intel_ringbuffer *ring;
+	int ret;
+
+	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+	if (ring == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ring->ring = engine;
+
+	ring->size = size;
+	/* Workaround an erratum on the i830 which causes a hang if
+	 * the TAIL pointer points to within the last 2 cachelines
+	 * of the buffer.
+	 */
+	ring->effective_size = size;
+	if (IS_I830(engine->dev) || IS_845G(engine->dev))
+		ring->effective_size -= 2 * CACHELINE_BYTES;
+
+	ring->last_retired_head = -1;
+	intel_ring_update_space(ring);
+
+	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
+	if (ret) {
+		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
+			  engine->name, ret);
+		kfree(ring);
+		return ERR_PTR(ret);
+	}
+
+	return ring;
+}
+
+void
+intel_ringbuffer_free(struct intel_ringbuffer *ring)
+{
+	intel_destroy_ringbuffer_obj(ring);
+	kfree(ring);
+}
+
 static int intel_init_ring_buffer(struct drm_device *dev,
 				  struct intel_engine_cs *ring)
 {
@@ -2031,22 +2073,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 
 	WARN_ON(ring->buffer);
 
-	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
-	if (!ringbuf)
-		return -ENOMEM;
-	ring->buffer = ringbuf;
-
 	ring->dev = dev;
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
 	INIT_LIST_HEAD(&ring->execlist_queue);
 	i915_gem_batch_pool_init(dev, &ring->batch_pool);
-	ringbuf->size = 32 * PAGE_SIZE;
-	ringbuf->ring = ring;
 	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
 	init_waitqueue_head(&ring->irq_queue);
 
+	ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE);
+	if (IS_ERR(ringbuf))
+		return PTR_ERR(ringbuf);
+	ring->buffer = ringbuf;
+
 	if (I915_NEED_GFX_HWS(dev)) {
 		ret = init_status_page(ring);
 		if (ret)
@@ -2058,15 +2098,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 			goto error;
 	}
 
-	WARN_ON(ringbuf->obj);
-
-	ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
-	if (ret) {
-		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
-				ring->name, ret);
-		goto error;
-	}
-
 	ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
 	if (ret) {
 		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
@@ -2075,14 +2106,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 		goto error;
 	}
 
-	/* Workaround an erratum on the i830 which causes a hang if
-	 * the TAIL pointer points to within the last 2 cachelines
-	 * of the buffer.
-	 */
-	ringbuf->effective_size = ringbuf->size;
-	if (IS_I830(dev) || IS_845G(dev))
-		ringbuf->effective_size -= 2 * CACHELINE_BYTES;
-
 	ret = i915_cmd_parser_init_ring(ring);
 	if (ret)
 		goto error;
@@ -2090,7 +2113,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 	return 0;
 
 error:
-	kfree(ringbuf);
+	intel_ringbuffer_free(ringbuf);
 	ring->buffer = NULL;
 	return ret;
 }
@@ -2098,19 +2121,18 @@ error:
 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 {
 	struct drm_i915_private *dev_priv;
-	struct intel_ringbuffer *ringbuf;
 
 	if (!intel_ring_initialized(ring))
 		return;
 
 	dev_priv = to_i915(ring->dev);
-	ringbuf = ring->buffer;
 
 	intel_stop_ring_buffer(ring);
 	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
 
-	intel_unpin_ringbuffer_obj(ringbuf);
-	intel_destroy_ringbuffer_obj(ringbuf);
+	intel_unpin_ringbuffer_obj(ring->buffer);
+	intel_ringbuffer_free(ring->buffer);
+	ring->buffer = NULL;
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
@@ -2119,9 +2141,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
 
 	i915_cmd_parser_fini_ring(ring);
 	i915_gem_batch_pool_fini(&ring->batch_pool);
-
-	kfree(ringbuf);
-	ring->buffer = NULL;
 }
 
 static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 95b0b4b55fa6..49fa41dc0eb6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -420,12 +420,12 @@ intel_write_status_page(struct intel_engine_cs *ring,
 #define I915_GEM_HWS_SCRATCH_INDEX	0x40
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+struct intel_ringbuffer *
+intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
 				     struct intel_ringbuffer *ringbuf);
-void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
-int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-			       struct intel_ringbuffer *ringbuf);
+void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
+void intel_ringbuffer_free(struct intel_ringbuffer *ring);
 
 void intel_stop_ring_buffer(struct intel_engine_cs *ring);
 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
-- 
2.5.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset
  2015-09-03 12:01 [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code Chris Wilson
@ 2015-09-03 12:01 ` Chris Wilson
  2015-09-28 15:25   ` Mika Kuoppala
  2015-10-23 11:07   ` Mika Kuoppala
  2015-09-03 14:01 ` [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code Zanoni, Paulo R
  1 sibling, 2 replies; 12+ messages in thread
From: Chris Wilson @ 2015-09-03 12:01 UTC (permalink / raw)
  To: intel-gfx; +Cc: Mika Kuoppala

Having flushed all requests from all queues, we know that all
ringbuffers must now be empty. However, since we do not reclaim
all space when retiring the request (to prevent HEADs colliding
with rapid ringbuffer wraparound) the amount of available space
on each ringbuffer upon reset is less than when we start. Do one
more pass over all the ringbuffers to reset the available space

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Dave Gordon <david.s.gordon@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c         | 14 ++++++++++++++
 drivers/gpu/drm/i915/intel_lrc.c        |  1 +
 drivers/gpu/drm/i915/intel_ringbuffer.c | 13 ++++++++++---
 drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
 4 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 41263cd4170c..3a42c350fec9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2738,6 +2738,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 					struct intel_engine_cs *ring)
 {
+	struct intel_ringbuffer *buffer;
+
 	while (!list_empty(&ring->active_list)) {
 		struct drm_i915_gem_object *obj;
 
@@ -2783,6 +2785,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 
 		i915_gem_request_retire(request);
 	}
+
+	/* Having flushed all requests from all queues, we know that all
+	 * ringbuffers must now be empty. However, since we do not reclaim
+	 * all space when retiring the request (to prevent HEADs colliding
+	 * with rapid ringbuffer wraparound) the amount of available space
+	 * upon reset is less than when we start. Do one more pass over
+	 * all the ringbuffers to reset last_retired_head.
+	 */
+	list_for_each_entry(buffer, &ring->buffers, link) {
+		buffer->last_retired_head = buffer->tail;
+		intel_ring_update_space(buffer);
+	}
 }
 
 void i915_gem_reset(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 28a712e7d2d0..de52ddc108a7 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1881,6 +1881,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
 	i915_gem_batch_pool_init(dev, &ring->batch_pool);
 	init_waitqueue_head(&ring->irq_queue);
 
+	INIT_LIST_HEAD(&ring->buffers);
 	INIT_LIST_HEAD(&ring->execlist_queue);
 	INIT_LIST_HEAD(&ring->execlist_retired_req_list);
 	spin_lock_init(&ring->execlist_lock);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 20a75bb516ac..d2e0b3b7efbf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2030,10 +2030,14 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
 	int ret;
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
-	if (ring == NULL)
+	if (ring == NULL) {
+		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
+				 engine->name);
 		return ERR_PTR(-ENOMEM);
+	}
 
 	ring->ring = engine;
+	list_add(&ring->link, &engine->buffers);
 
 	ring->size = size;
 	/* Workaround an erratum on the i830 which causes a hang if
@@ -2049,8 +2053,9 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
 
 	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
 	if (ret) {
-		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
-			  engine->name, ret);
+		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
+				 engine->name, ret);
+		list_del(&ring->link);
 		kfree(ring);
 		return ERR_PTR(ret);
 	}
@@ -2062,6 +2067,7 @@ void
 intel_ringbuffer_free(struct intel_ringbuffer *ring)
 {
 	intel_destroy_ringbuffer_obj(ring);
+	list_del(&ring->link);
 	kfree(ring);
 }
 
@@ -2077,6 +2083,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 	INIT_LIST_HEAD(&ring->active_list);
 	INIT_LIST_HEAD(&ring->request_list);
 	INIT_LIST_HEAD(&ring->execlist_queue);
+	INIT_LIST_HEAD(&ring->buffers);
 	i915_gem_batch_pool_init(dev, &ring->batch_pool);
 	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 49fa41dc0eb6..58b1976a7d0a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -100,6 +100,7 @@ struct intel_ringbuffer {
 	void __iomem *virtual_start;
 
 	struct intel_engine_cs *ring;
+	struct list_head link;
 
 	u32 head;
 	u32 tail;
@@ -157,6 +158,7 @@ struct  intel_engine_cs {
 	u32		mmio_base;
 	struct		drm_device *dev;
 	struct intel_ringbuffer *buffer;
+	struct list_head buffers;
 
 	/*
 	 * A pool of objects to use as shadow copies of client batch buffers
-- 
2.5.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code
  2015-09-03 12:01 [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code Chris Wilson
  2015-09-03 12:01 ` [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset Chris Wilson
@ 2015-09-03 14:01 ` Zanoni, Paulo R
  2015-09-03 14:47   ` chris
  2015-09-03 14:56   ` Mika Kuoppala
  1 sibling, 2 replies; 12+ messages in thread
From: Zanoni, Paulo R @ 2015-09-03 14:01 UTC (permalink / raw)
  To: intel-gfx, chris; +Cc: Kuoppala, Mika

Em Qui, 2015-09-03 às 13:01 +0100, Chris Wilson escreveu:
> A small, very small, step to sharing the duplicate code between
> execlists and legacy submission engines, starting with the ringbuffer
> allocation code.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> Cc: Dave Gordon <david.s.gordon@intel.com>
> ---
>  drivers/gpu/drm/i915/intel_lrc.c        | 49 +++++-------------
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 89 ++++++++++++++++++++---
> ----------
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  8 +--
>  3 files changed, 70 insertions(+), 76 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c 
> b/drivers/gpu/drm/i915/intel_lrc.c
> index 40cbba4ea4ba..28a712e7d2d0 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -2340,8 +2340,7 @@ void intel_lr_context_free(struct intel_context 
> *ctx)
>  				i915_gem_object_ggtt_unpin(ctx_obj);
>  			}
>  			WARN_ON(ctx->engine[ring->id].pin_count);
> -			intel_destroy_ringbuffer_obj(ringbuf);
> -			kfree(ringbuf);
> +			intel_ringbuffer_free(ringbuf);
>  			drm_gem_object_unreference(&ctx_obj->base);
>  		}
>  	}
> @@ -2442,42 +2441,20 @@ int intel_lr_context_deferred_create(struct 
> intel_context *ctx,
>  			I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
>  	}
>  
> -	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
> -	if (!ringbuf) {
> -		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer 
> %s\n",

We got rid of this message, but I suppose it's not a problem, since it
was not a loud error message.


> -				ring->name);
> -		ret = -ENOMEM;
> +	ringbuf = intel_engine_create_ringbuffer(ring, 4 * 
> PAGE_SIZE);
> +	if (IS_ERR(ringbuf)) {
> +		ret = PTR_ERR(ringbuf);
>  		goto error_unpin_ctx;
>  	}
>  
> -	ringbuf->ring = ring;
> -
> -	ringbuf->size = 4 * PAGE_SIZE;
> -	ringbuf->effective_size = ringbuf->size;
> -	ringbuf->head = 0;
> -	ringbuf->tail = 0;
> -	ringbuf->last_retired_head = -1;
> -	intel_ring_update_space(ringbuf);
> -
> -	if (ringbuf->obj == NULL) {
> -		ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
> +	if (is_global_default_ctx) {
> +		ret = intel_pin_and_map_ringbuffer_obj(dev, 
> ringbuf);
>  		if (ret) {
> -			DRM_DEBUG_DRIVER(
> -				"Failed to allocate ringbuffer obj 
> %s: %d\n",
> -				ring->name, ret);
> -			goto error_free_rbuf;
> +			DRM_ERROR(
> +				  "Failed to pin and map ringbuffer 
> %s: %d\n",
> +				  ring->name, ret);
> +			goto error_ringbuf;
>  		}
> -
> -		if (is_global_default_ctx) {
> -			ret = intel_pin_and_map_ringbuffer_obj(dev, 
> ringbuf);
> -			if (ret) {
> -				DRM_ERROR(
> -					"Failed to pin and map 
> ringbuffer %s: %d\n",
> -					ring->name, ret);
> -				goto error_destroy_rbuf;
> -			}
> -		}
> -
>  	}
>  
>  	ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
> @@ -2519,10 +2496,8 @@ int intel_lr_context_deferred_create(struct 
> intel_context *ctx,
>  error:
>  	if (is_global_default_ctx)
>  		intel_unpin_ringbuffer_obj(ringbuf);
> -error_destroy_rbuf:
> -	intel_destroy_ringbuffer_obj(ringbuf);
> -error_free_rbuf:
> -	kfree(ringbuf);
> +error_ringbuf:
> +	intel_ringbuffer_free(ringbuf);
>  error_unpin_ctx:
>  	if (is_global_default_ctx)
>  		i915_gem_object_ggtt_unpin(ctx_obj);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
> b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 6e6b8db996ef..20a75bb516ac 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1996,14 +1996,14 @@ int intel_pin_and_map_ringbuffer_obj(struct 
> drm_device *dev,
>  	return 0;
>  }
>  
> -void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
> +static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer 
> *ringbuf)
>  {
>  	drm_gem_object_unreference(&ringbuf->obj->base);
>  	ringbuf->obj = NULL;
>  }
>  
> -int intel_alloc_ringbuffer_obj(struct drm_device *dev,
> -			       struct intel_ringbuffer *ringbuf)
> +static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
> +				      struct intel_ringbuffer 
> *ringbuf)
>  {
>  	struct drm_i915_gem_object *obj;
>  
> @@ -2023,6 +2023,48 @@ int intel_alloc_ringbuffer_obj(struct 
> drm_device *dev,
>  	return 0;
>  }
>  
> +struct intel_ringbuffer *
> +intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int 
> size)
> +{
> +	struct intel_ringbuffer *ring;
> +	int ret;
> +
> +	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
> +	if (ring == NULL)

The error message referenced above should probably be here.


> +		return ERR_PTR(-ENOMEM);
> +
> +	ring->ring = engine;
> +
> +	ring->size = size;
> +	/* Workaround an erratum on the i830 which causes a hang if
> +	 * the TAIL pointer points to within the last 2 cachelines
> +	 * of the buffer.
> +	 */
> +	ring->effective_size = size;
> +	if (IS_I830(engine->dev) || IS_845G(engine->dev))
> +		ring->effective_size -= 2 * CACHELINE_BYTES;
> +
> +	ring->last_retired_head = -1;
> +	intel_ring_update_space(ring);
> +
> +	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
> +	if (ret) {
> +		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",

Shouldn't this be "Failed to allocate ringbuffer obj %s: %d" ?

> +			  engine->name, ret);
> +		kfree(ring);
> +		return ERR_PTR(ret);
> +	}
> +
> +	return ring;
> +}
> +
> +void
> +intel_ringbuffer_free(struct intel_ringbuffer *ring)
> +{
> +	intel_destroy_ringbuffer_obj(ring);
> +	kfree(ring);
> +}
> +
>  static int intel_init_ring_buffer(struct drm_device *dev,
>  				  struct intel_engine_cs *ring)
>  {
> @@ -2031,22 +2073,20 @@ static int intel_init_ring_buffer(struct 
> drm_device *dev,
>  
>  	WARN_ON(ring->buffer);
>  
> -	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
> -	if (!ringbuf)
> -		return -ENOMEM;
> -	ring->buffer = ringbuf;
> -
>  	ring->dev = dev;
>  	INIT_LIST_HEAD(&ring->active_list);
>  	INIT_LIST_HEAD(&ring->request_list);
>  	INIT_LIST_HEAD(&ring->execlist_queue);
>  	i915_gem_batch_pool_init(dev, &ring->batch_pool);
> -	ringbuf->size = 32 * PAGE_SIZE;
> -	ringbuf->ring = ring;
>  	memset(ring->semaphore.sync_seqno, 0, sizeof(ring
> ->semaphore.sync_seqno));
>  
>  	init_waitqueue_head(&ring->irq_queue);
>  
> +	ringbuf = intel_engine_create_ringbuffer(ring, 32 * 
> PAGE_SIZE);
> +	if (IS_ERR(ringbuf))
> +		return PTR_ERR(ringbuf);
> +	ring->buffer = ringbuf;
> +
>  	if (I915_NEED_GFX_HWS(dev)) {
>  		ret = init_status_page(ring);
>  		if (ret)
> @@ -2058,15 +2098,6 @@ static int intel_init_ring_buffer(struct 
> drm_device *dev,
>  			goto error;
>  	}
>  
> -	WARN_ON(ringbuf->obj);
> -
> -	ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
> -	if (ret) {
> -		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
> -				ring->name, ret);
> -		goto error;
> -	}
> -
>  	ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
>  	if (ret) {
>  		DRM_ERROR("Failed to pin and map ringbuffer %s: 
> %d\n",
> @@ -2075,14 +2106,6 @@ static int intel_init_ring_buffer(struct 
> drm_device *dev,
>  		goto error;
>  	}
>  
> -	/* Workaround an erratum on the i830 which causes a hang if
> -	 * the TAIL pointer points to within the last 2 cachelines
> -	 * of the buffer.
> -	 */
> -	ringbuf->effective_size = ringbuf->size;
> -	if (IS_I830(dev) || IS_845G(dev))
> -		ringbuf->effective_size -= 2 * CACHELINE_BYTES;
> -
>  	ret = i915_cmd_parser_init_ring(ring);
>  	if (ret)
>  		goto error;
> @@ -2090,7 +2113,7 @@ static int intel_init_ring_buffer(struct 
> drm_device *dev,
>  	return 0;

The "goto error" right above the return 0 changes its behavior: it
wasn't calling intel_destroy_ringbuffer_obj(), but I suppose this is a
fix. I'm just pointing it since I'm not 100% sure.

Everything seems correct, so with or without changes:
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>


>  
>  error:
> -	kfree(ringbuf);
> +	intel_ringbuffer_free(ringbuf);
>  	ring->buffer = NULL;
>  	return ret;
>  }
> @@ -2098,19 +2121,18 @@ error:
>  void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
>  {
>  	struct drm_i915_private *dev_priv;
> -	struct intel_ringbuffer *ringbuf;
>  
>  	if (!intel_ring_initialized(ring))
>  		return;
>  
>  	dev_priv = to_i915(ring->dev);
> -	ringbuf = ring->buffer;
>  
>  	intel_stop_ring_buffer(ring);
>  	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & 
> MODE_IDLE) == 0);
>  
> -	intel_unpin_ringbuffer_obj(ringbuf);
> -	intel_destroy_ringbuffer_obj(ringbuf);
> +	intel_unpin_ringbuffer_obj(ring->buffer);
> +	intel_ringbuffer_free(ring->buffer);
> +	ring->buffer = NULL;
>  
>  	if (ring->cleanup)
>  		ring->cleanup(ring);
> @@ -2119,9 +2141,6 @@ void intel_cleanup_ring_buffer(struct 
> intel_engine_cs *ring)
>  
>  	i915_cmd_parser_fini_ring(ring);
>  	i915_gem_batch_pool_fini(&ring->batch_pool);
> -
> -	kfree(ringbuf);
> -	ring->buffer = NULL;
>  }
>  
>  static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
> b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 95b0b4b55fa6..49fa41dc0eb6 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -420,12 +420,12 @@ intel_write_status_page(struct intel_engine_cs 
> *ring,
>  #define I915_GEM_HWS_SCRATCH_INDEX	0x40
>  #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << 
> MI_STORE_DWORD_INDEX_SHIFT)
>  
> -void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
> +struct intel_ringbuffer *
> +intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int 
> size);
>  int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
>  				     struct intel_ringbuffer 
> *ringbuf);
> -void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
> -int intel_alloc_ringbuffer_obj(struct drm_device *dev,
> -			       struct intel_ringbuffer *ringbuf);
> +void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
> +void intel_ringbuffer_free(struct intel_ringbuffer *ring);
>  
>  void intel_stop_ring_buffer(struct intel_engine_cs *ring);
>  void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code
  2015-09-03 14:01 ` [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code Zanoni, Paulo R
@ 2015-09-03 14:47   ` chris
  2015-09-03 14:56   ` Mika Kuoppala
  1 sibling, 0 replies; 12+ messages in thread
From: chris @ 2015-09-03 14:47 UTC (permalink / raw)
  To: Zanoni, Paulo R; +Cc: intel-gfx, Kuoppala, Mika

On Thu, Sep 03, 2015 at 02:01:51PM +0000, Zanoni, Paulo R wrote:
> Em Qui, 2015-09-03 às 13:01 +0100, Chris Wilson escreveu:
> > A small, very small, step to sharing the duplicate code between
> > execlists and legacy submission engines, starting with the ringbuffer
> > allocation code.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
> > Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> > Cc: Dave Gordon <david.s.gordon@intel.com>
> > ---
> >  drivers/gpu/drm/i915/intel_lrc.c        | 49 +++++-------------
> >  drivers/gpu/drm/i915/intel_ringbuffer.c | 89 ++++++++++++++++++++---
> > ----------
> >  drivers/gpu/drm/i915/intel_ringbuffer.h |  8 +--
> >  3 files changed, 70 insertions(+), 76 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/intel_lrc.c 
> > b/drivers/gpu/drm/i915/intel_lrc.c
> > index 40cbba4ea4ba..28a712e7d2d0 100644
> > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > @@ -2340,8 +2340,7 @@ void intel_lr_context_free(struct intel_context 
> > *ctx)
> >  				i915_gem_object_ggtt_unpin(ctx_obj);
> >  			}
> >  			WARN_ON(ctx->engine[ring->id].pin_count);
> > -			intel_destroy_ringbuffer_obj(ringbuf);
> > -			kfree(ringbuf);
> > +			intel_ringbuffer_free(ringbuf);
> >  			drm_gem_object_unreference(&ctx_obj->base);
> >  		}
> >  	}
> > @@ -2442,42 +2441,20 @@ int intel_lr_context_deferred_create(struct 
> > intel_context *ctx,
> >  			I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
> >  	}
> >  
> > -	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
> > -	if (!ringbuf) {
> > -		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer 
> > %s\n",
> 
> We got rid of this message, but I suppose it's not a problem, since it
> was not a loud error message.

I additionally added these to patch 2. I removed the ones in intel_lrc
that were simply repeating the error message (albeit in a more generic
fashion due to loss of information). At this level an oom squalk
followed by ENOMEM reported to userspace is fairly obvious (and
definitely not our error).
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code
  2015-09-03 14:01 ` [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code Zanoni, Paulo R
  2015-09-03 14:47   ` chris
@ 2015-09-03 14:56   ` Mika Kuoppala
  2015-09-04  8:17     ` Daniel Vetter
  1 sibling, 1 reply; 12+ messages in thread
From: Mika Kuoppala @ 2015-09-03 14:56 UTC (permalink / raw)
  To: Zanoni, Paulo R, intel-gfx, chris

"Zanoni, Paulo R" <paulo.r.zanoni@intel.com> writes:

> Em Qui, 2015-09-03 às 13:01 +0100, Chris Wilson escreveu:
>> A small, very small, step to sharing the duplicate code between
>> execlists and legacy submission engines, starting with the ringbuffer
>> allocation code.
>> 
>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>> Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
>> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
>> Cc: Dave Gordon <david.s.gordon@intel.com>
>> ---
>>  drivers/gpu/drm/i915/intel_lrc.c        | 49 +++++-------------
>>  drivers/gpu/drm/i915/intel_ringbuffer.c | 89 ++++++++++++++++++++---
>> ----------
>>  drivers/gpu/drm/i915/intel_ringbuffer.h |  8 +--
>>  3 files changed, 70 insertions(+), 76 deletions(-)
>> 
>> diff --git a/drivers/gpu/drm/i915/intel_lrc.c 
>> b/drivers/gpu/drm/i915/intel_lrc.c
>> index 40cbba4ea4ba..28a712e7d2d0 100644
>> --- a/drivers/gpu/drm/i915/intel_lrc.c
>> +++ b/drivers/gpu/drm/i915/intel_lrc.c
>> @@ -2340,8 +2340,7 @@ void intel_lr_context_free(struct intel_context 
>> *ctx)
>>  				i915_gem_object_ggtt_unpin(ctx_obj);
>>  			}
>>  			WARN_ON(ctx->engine[ring->id].pin_count);
>> -			intel_destroy_ringbuffer_obj(ringbuf);
>> -			kfree(ringbuf);
>> +			intel_ringbuffer_free(ringbuf);
>>  			drm_gem_object_unreference(&ctx_obj->base);
>>  		}
>>  	}
>> @@ -2442,42 +2441,20 @@ int intel_lr_context_deferred_create(struct 
>> intel_context *ctx,
>>  			I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
>>  	}
>>  
>> -	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
>> -	if (!ringbuf) {
>> -		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer 
>> %s\n",
>
> We got rid of this message, but I suppose it's not a problem, since it
> was not a loud error message.
>
>
>> -				ring->name);
>> -		ret = -ENOMEM;
>> +	ringbuf = intel_engine_create_ringbuffer(ring, 4 * 
>> PAGE_SIZE);
>> +	if (IS_ERR(ringbuf)) {
>> +		ret = PTR_ERR(ringbuf);
>>  		goto error_unpin_ctx;
>>  	}
>>  
>> -	ringbuf->ring = ring;
>> -
>> -	ringbuf->size = 4 * PAGE_SIZE;
>> -	ringbuf->effective_size = ringbuf->size;
>> -	ringbuf->head = 0;
>> -	ringbuf->tail = 0;
>> -	ringbuf->last_retired_head = -1;
>> -	intel_ring_update_space(ringbuf);
>> -
>> -	if (ringbuf->obj == NULL) {
>> -		ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
>> +	if (is_global_default_ctx) {
>> +		ret = intel_pin_and_map_ringbuffer_obj(dev, 
>> ringbuf);
>>  		if (ret) {
>> -			DRM_DEBUG_DRIVER(
>> -				"Failed to allocate ringbuffer obj 
>> %s: %d\n",
>> -				ring->name, ret);
>> -			goto error_free_rbuf;
>> +			DRM_ERROR(
>> +				  "Failed to pin and map ringbuffer 
>> %s: %d\n",
>> +				  ring->name, ret);
>> +			goto error_ringbuf;
>>  		}
>> -
>> -		if (is_global_default_ctx) {
>> -			ret = intel_pin_and_map_ringbuffer_obj(dev, 
>> ringbuf);
>> -			if (ret) {
>> -				DRM_ERROR(
>> -					"Failed to pin and map 
>> ringbuffer %s: %d\n",
>> -					ring->name, ret);
>> -				goto error_destroy_rbuf;
>> -			}
>> -		}
>> -
>>  	}
>>  
>>  	ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
>> @@ -2519,10 +2496,8 @@ int intel_lr_context_deferred_create(struct 
>> intel_context *ctx,
>>  error:
>>  	if (is_global_default_ctx)
>>  		intel_unpin_ringbuffer_obj(ringbuf);
>> -error_destroy_rbuf:
>> -	intel_destroy_ringbuffer_obj(ringbuf);
>> -error_free_rbuf:
>> -	kfree(ringbuf);
>> +error_ringbuf:
>> +	intel_ringbuffer_free(ringbuf);
>>  error_unpin_ctx:
>>  	if (is_global_default_ctx)
>>  		i915_gem_object_ggtt_unpin(ctx_obj);
>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
>> b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> index 6e6b8db996ef..20a75bb516ac 100644
>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> @@ -1996,14 +1996,14 @@ int intel_pin_and_map_ringbuffer_obj(struct 
>> drm_device *dev,
>>  	return 0;
>>  }
>>  
>> -void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
>> +static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer 
>> *ringbuf)
>>  {
>>  	drm_gem_object_unreference(&ringbuf->obj->base);
>>  	ringbuf->obj = NULL;
>>  }
>>  
>> -int intel_alloc_ringbuffer_obj(struct drm_device *dev,
>> -			       struct intel_ringbuffer *ringbuf)
>> +static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
>> +				      struct intel_ringbuffer 
>> *ringbuf)
>>  {
>>  	struct drm_i915_gem_object *obj;
>>  
>> @@ -2023,6 +2023,48 @@ int intel_alloc_ringbuffer_obj(struct 
>> drm_device *dev,
>>  	return 0;
>>  }
>>  
>> +struct intel_ringbuffer *
>> +intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int 
>> size)
>> +{
>> +	struct intel_ringbuffer *ring;
>> +	int ret;
>> +
>> +	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
>> +	if (ring == NULL)
>
> The error message referenced above should probably be here.
>
>
>> +		return ERR_PTR(-ENOMEM);
>> +
>> +	ring->ring = engine;
>> +
>> +	ring->size = size;
>> +	/* Workaround an erratum on the i830 which causes a hang if
>> +	 * the TAIL pointer points to within the last 2 cachelines
>> +	 * of the buffer.
>> +	 */
>> +	ring->effective_size = size;
>> +	if (IS_I830(engine->dev) || IS_845G(engine->dev))
>> +		ring->effective_size -= 2 * CACHELINE_BYTES;
>> +
>> +	ring->last_retired_head = -1;
>> +	intel_ring_update_space(ring);
>> +
>> +	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
>> +	if (ret) {
>> +		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
>
> Shouldn't this be "Failed to allocate ringbuffer obj %s: %d" ?
>
>> +			  engine->name, ret);
>> +		kfree(ring);
>> +		return ERR_PTR(ret);
>> +	}
>> +
>> +	return ring;
>> +}
>> +
>> +void
>> +intel_ringbuffer_free(struct intel_ringbuffer *ring)
>> +{
>> +	intel_destroy_ringbuffer_obj(ring);
>> +	kfree(ring);
>> +}
>> +
>>  static int intel_init_ring_buffer(struct drm_device *dev,
>>  				  struct intel_engine_cs *ring)
>>  {
>> @@ -2031,22 +2073,20 @@ static int intel_init_ring_buffer(struct 
>> drm_device *dev,
>>  
>>  	WARN_ON(ring->buffer);
>>  
>> -	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
>> -	if (!ringbuf)
>> -		return -ENOMEM;
>> -	ring->buffer = ringbuf;
>> -
>>  	ring->dev = dev;
>>  	INIT_LIST_HEAD(&ring->active_list);
>>  	INIT_LIST_HEAD(&ring->request_list);
>>  	INIT_LIST_HEAD(&ring->execlist_queue);
>>  	i915_gem_batch_pool_init(dev, &ring->batch_pool);
>> -	ringbuf->size = 32 * PAGE_SIZE;
>> -	ringbuf->ring = ring;
>>  	memset(ring->semaphore.sync_seqno, 0, sizeof(ring
>> ->semaphore.sync_seqno));
>>  
>>  	init_waitqueue_head(&ring->irq_queue);
>>  
>> +	ringbuf = intel_engine_create_ringbuffer(ring, 32 * 
>> PAGE_SIZE);
>> +	if (IS_ERR(ringbuf))
>> +		return PTR_ERR(ringbuf);
>> +	ring->buffer = ringbuf;
>> +
>>  	if (I915_NEED_GFX_HWS(dev)) {
>>  		ret = init_status_page(ring);
>>  		if (ret)
>> @@ -2058,15 +2098,6 @@ static int intel_init_ring_buffer(struct 
>> drm_device *dev,
>>  			goto error;
>>  	}
>>  
>> -	WARN_ON(ringbuf->obj);
>> -
>> -	ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
>> -	if (ret) {
>> -		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
>> -				ring->name, ret);
>> -		goto error;
>> -	}
>> -
>>  	ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
>>  	if (ret) {
>>  		DRM_ERROR("Failed to pin and map ringbuffer %s: 
>> %d\n",
>> @@ -2075,14 +2106,6 @@ static int intel_init_ring_buffer(struct 
>> drm_device *dev,
>>  		goto error;
>>  	}
>>  
>> -	/* Workaround an erratum on the i830 which causes a hang if
>> -	 * the TAIL pointer points to within the last 2 cachelines
>> -	 * of the buffer.
>> -	 */
>> -	ringbuf->effective_size = ringbuf->size;
>> -	if (IS_I830(dev) || IS_845G(dev))
>> -		ringbuf->effective_size -= 2 * CACHELINE_BYTES;
>> -
>>  	ret = i915_cmd_parser_init_ring(ring);
>>  	if (ret)
>>  		goto error;
>> @@ -2090,7 +2113,7 @@ static int intel_init_ring_buffer(struct 
>> drm_device *dev,
>>  	return 0;
>
> The "goto error" right above the return 0 changes its behavior: it
> wasn't calling intel_destroy_ringbuffer_obj(), but I suppose this is a
> fix. I'm just pointing it since I'm not 100% sure.
>

I would say its a fix because we lost the ref at that point.

Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>

> Everything seems correct, so with or without changes:
> Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
>
>
>>  
>>  error:
>> -	kfree(ringbuf);
>> +	intel_ringbuffer_free(ringbuf);
>>  	ring->buffer = NULL;
>>  	return ret;
>>  }
>> @@ -2098,19 +2121,18 @@ error:
>>  void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
>>  {
>>  	struct drm_i915_private *dev_priv;
>> -	struct intel_ringbuffer *ringbuf;
>>  
>>  	if (!intel_ring_initialized(ring))
>>  		return;
>>  
>>  	dev_priv = to_i915(ring->dev);
>> -	ringbuf = ring->buffer;
>>  
>>  	intel_stop_ring_buffer(ring);
>>  	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & 
>> MODE_IDLE) == 0);
>>  
>> -	intel_unpin_ringbuffer_obj(ringbuf);
>> -	intel_destroy_ringbuffer_obj(ringbuf);
>> +	intel_unpin_ringbuffer_obj(ring->buffer);
>> +	intel_ringbuffer_free(ring->buffer);
>> +	ring->buffer = NULL;
>>  
>>  	if (ring->cleanup)
>>  		ring->cleanup(ring);
>> @@ -2119,9 +2141,6 @@ void intel_cleanup_ring_buffer(struct 
>> intel_engine_cs *ring)
>>  
>>  	i915_cmd_parser_fini_ring(ring);
>>  	i915_gem_batch_pool_fini(&ring->batch_pool);
>> -
>> -	kfree(ringbuf);
>> -	ring->buffer = NULL;
>>  }
>>  
>>  static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
>> b/drivers/gpu/drm/i915/intel_ringbuffer.h
>> index 95b0b4b55fa6..49fa41dc0eb6 100644
>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
>> @@ -420,12 +420,12 @@ intel_write_status_page(struct intel_engine_cs 
>> *ring,
>>  #define I915_GEM_HWS_SCRATCH_INDEX	0x40
>>  #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << 
>> MI_STORE_DWORD_INDEX_SHIFT)
>>  
>> -void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
>> +struct intel_ringbuffer *
>> +intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int 
>> size);
>>  int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
>>  				     struct intel_ringbuffer 
>> *ringbuf);
>> -void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
>> -int intel_alloc_ringbuffer_obj(struct drm_device *dev,
>> -			       struct intel_ringbuffer *ringbuf);
>> +void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
>> +void intel_ringbuffer_free(struct intel_ringbuffer *ring);
>>  
>>  void intel_stop_ring_buffer(struct intel_engine_cs *ring);
>>  void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code
  2015-09-03 14:56   ` Mika Kuoppala
@ 2015-09-04  8:17     ` Daniel Vetter
  0 siblings, 0 replies; 12+ messages in thread
From: Daniel Vetter @ 2015-09-04  8:17 UTC (permalink / raw)
  To: Mika Kuoppala; +Cc: intel-gfx

On Thu, Sep 03, 2015 at 05:56:20PM +0300, Mika Kuoppala wrote:
> "Zanoni, Paulo R" <paulo.r.zanoni@intel.com> writes:
> 
> > Em Qui, 2015-09-03 às 13:01 +0100, Chris Wilson escreveu:
> >> A small, very small, step to sharing the duplicate code between
> >> execlists and legacy submission engines, starting with the ringbuffer
> >> allocation code.
> >> 
> >> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >> Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
> >> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> >> Cc: Dave Gordon <david.s.gordon@intel.com>
> >> ---
> >>  drivers/gpu/drm/i915/intel_lrc.c        | 49 +++++-------------
> >>  drivers/gpu/drm/i915/intel_ringbuffer.c | 89 ++++++++++++++++++++---
> >> ----------
> >>  drivers/gpu/drm/i915/intel_ringbuffer.h |  8 +--
> >>  3 files changed, 70 insertions(+), 76 deletions(-)
> >> 
> >> diff --git a/drivers/gpu/drm/i915/intel_lrc.c 
> >> b/drivers/gpu/drm/i915/intel_lrc.c
> >> index 40cbba4ea4ba..28a712e7d2d0 100644
> >> --- a/drivers/gpu/drm/i915/intel_lrc.c
> >> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> >> @@ -2340,8 +2340,7 @@ void intel_lr_context_free(struct intel_context 
> >> *ctx)
> >>  				i915_gem_object_ggtt_unpin(ctx_obj);
> >>  			}
> >>  			WARN_ON(ctx->engine[ring->id].pin_count);
> >> -			intel_destroy_ringbuffer_obj(ringbuf);
> >> -			kfree(ringbuf);
> >> +			intel_ringbuffer_free(ringbuf);
> >>  			drm_gem_object_unreference(&ctx_obj->base);
> >>  		}
> >>  	}
> >> @@ -2442,42 +2441,20 @@ int intel_lr_context_deferred_create(struct 
> >> intel_context *ctx,
> >>  			I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
> >>  	}
> >>  
> >> -	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
> >> -	if (!ringbuf) {
> >> -		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer 
> >> %s\n",
> >
> > We got rid of this message, but I suppose it's not a problem, since it
> > was not a loud error message.
> >
> >
> >> -				ring->name);
> >> -		ret = -ENOMEM;
> >> +	ringbuf = intel_engine_create_ringbuffer(ring, 4 * 
> >> PAGE_SIZE);
> >> +	if (IS_ERR(ringbuf)) {
> >> +		ret = PTR_ERR(ringbuf);
> >>  		goto error_unpin_ctx;
> >>  	}
> >>  
> >> -	ringbuf->ring = ring;
> >> -
> >> -	ringbuf->size = 4 * PAGE_SIZE;
> >> -	ringbuf->effective_size = ringbuf->size;
> >> -	ringbuf->head = 0;
> >> -	ringbuf->tail = 0;
> >> -	ringbuf->last_retired_head = -1;
> >> -	intel_ring_update_space(ringbuf);
> >> -
> >> -	if (ringbuf->obj == NULL) {
> >> -		ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
> >> +	if (is_global_default_ctx) {
> >> +		ret = intel_pin_and_map_ringbuffer_obj(dev, 
> >> ringbuf);
> >>  		if (ret) {
> >> -			DRM_DEBUG_DRIVER(
> >> -				"Failed to allocate ringbuffer obj 
> >> %s: %d\n",
> >> -				ring->name, ret);
> >> -			goto error_free_rbuf;
> >> +			DRM_ERROR(
> >> +				  "Failed to pin and map ringbuffer 
> >> %s: %d\n",
> >> +				  ring->name, ret);
> >> +			goto error_ringbuf;
> >>  		}
> >> -
> >> -		if (is_global_default_ctx) {
> >> -			ret = intel_pin_and_map_ringbuffer_obj(dev, 
> >> ringbuf);
> >> -			if (ret) {
> >> -				DRM_ERROR(
> >> -					"Failed to pin and map 
> >> ringbuffer %s: %d\n",
> >> -					ring->name, ret);
> >> -				goto error_destroy_rbuf;
> >> -			}
> >> -		}
> >> -
> >>  	}
> >>  
> >>  	ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
> >> @@ -2519,10 +2496,8 @@ int intel_lr_context_deferred_create(struct 
> >> intel_context *ctx,
> >>  error:
> >>  	if (is_global_default_ctx)
> >>  		intel_unpin_ringbuffer_obj(ringbuf);
> >> -error_destroy_rbuf:
> >> -	intel_destroy_ringbuffer_obj(ringbuf);
> >> -error_free_rbuf:
> >> -	kfree(ringbuf);
> >> +error_ringbuf:
> >> +	intel_ringbuffer_free(ringbuf);
> >>  error_unpin_ctx:
> >>  	if (is_global_default_ctx)
> >>  		i915_gem_object_ggtt_unpin(ctx_obj);
> >> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c 
> >> b/drivers/gpu/drm/i915/intel_ringbuffer.c
> >> index 6e6b8db996ef..20a75bb516ac 100644
> >> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> >> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> >> @@ -1996,14 +1996,14 @@ int intel_pin_and_map_ringbuffer_obj(struct 
> >> drm_device *dev,
> >>  	return 0;
> >>  }
> >>  
> >> -void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
> >> +static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer 
> >> *ringbuf)
> >>  {
> >>  	drm_gem_object_unreference(&ringbuf->obj->base);
> >>  	ringbuf->obj = NULL;
> >>  }
> >>  
> >> -int intel_alloc_ringbuffer_obj(struct drm_device *dev,
> >> -			       struct intel_ringbuffer *ringbuf)
> >> +static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
> >> +				      struct intel_ringbuffer 
> >> *ringbuf)
> >>  {
> >>  	struct drm_i915_gem_object *obj;
> >>  
> >> @@ -2023,6 +2023,48 @@ int intel_alloc_ringbuffer_obj(struct 
> >> drm_device *dev,
> >>  	return 0;
> >>  }
> >>  
> >> +struct intel_ringbuffer *
> >> +intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int 
> >> size)
> >> +{
> >> +	struct intel_ringbuffer *ring;
> >> +	int ret;
> >> +
> >> +	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
> >> +	if (ring == NULL)
> >
> > The error message referenced above should probably be here.
> >
> >
> >> +		return ERR_PTR(-ENOMEM);
> >> +
> >> +	ring->ring = engine;
> >> +
> >> +	ring->size = size;
> >> +	/* Workaround an erratum on the i830 which causes a hang if
> >> +	 * the TAIL pointer points to within the last 2 cachelines
> >> +	 * of the buffer.
> >> +	 */
> >> +	ring->effective_size = size;
> >> +	if (IS_I830(engine->dev) || IS_845G(engine->dev))
> >> +		ring->effective_size -= 2 * CACHELINE_BYTES;
> >> +
> >> +	ring->last_retired_head = -1;
> >> +	intel_ring_update_space(ring);
> >> +
> >> +	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
> >> +	if (ret) {
> >> +		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
> >
> > Shouldn't this be "Failed to allocate ringbuffer obj %s: %d" ?
> >
> >> +			  engine->name, ret);
> >> +		kfree(ring);
> >> +		return ERR_PTR(ret);
> >> +	}
> >> +
> >> +	return ring;
> >> +}
> >> +
> >> +void
> >> +intel_ringbuffer_free(struct intel_ringbuffer *ring)
> >> +{
> >> +	intel_destroy_ringbuffer_obj(ring);
> >> +	kfree(ring);
> >> +}
> >> +
> >>  static int intel_init_ring_buffer(struct drm_device *dev,
> >>  				  struct intel_engine_cs *ring)
> >>  {
> >> @@ -2031,22 +2073,20 @@ static int intel_init_ring_buffer(struct 
> >> drm_device *dev,
> >>  
> >>  	WARN_ON(ring->buffer);
> >>  
> >> -	ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
> >> -	if (!ringbuf)
> >> -		return -ENOMEM;
> >> -	ring->buffer = ringbuf;
> >> -
> >>  	ring->dev = dev;
> >>  	INIT_LIST_HEAD(&ring->active_list);
> >>  	INIT_LIST_HEAD(&ring->request_list);
> >>  	INIT_LIST_HEAD(&ring->execlist_queue);
> >>  	i915_gem_batch_pool_init(dev, &ring->batch_pool);
> >> -	ringbuf->size = 32 * PAGE_SIZE;
> >> -	ringbuf->ring = ring;
> >>  	memset(ring->semaphore.sync_seqno, 0, sizeof(ring
> >> ->semaphore.sync_seqno));
> >>  
> >>  	init_waitqueue_head(&ring->irq_queue);
> >>  
> >> +	ringbuf = intel_engine_create_ringbuffer(ring, 32 * 
> >> PAGE_SIZE);
> >> +	if (IS_ERR(ringbuf))
> >> +		return PTR_ERR(ringbuf);
> >> +	ring->buffer = ringbuf;
> >> +
> >>  	if (I915_NEED_GFX_HWS(dev)) {
> >>  		ret = init_status_page(ring);
> >>  		if (ret)
> >> @@ -2058,15 +2098,6 @@ static int intel_init_ring_buffer(struct 
> >> drm_device *dev,
> >>  			goto error;
> >>  	}
> >>  
> >> -	WARN_ON(ringbuf->obj);
> >> -
> >> -	ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
> >> -	if (ret) {
> >> -		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
> >> -				ring->name, ret);
> >> -		goto error;
> >> -	}
> >> -
> >>  	ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
> >>  	if (ret) {
> >>  		DRM_ERROR("Failed to pin and map ringbuffer %s: 
> >> %d\n",
> >> @@ -2075,14 +2106,6 @@ static int intel_init_ring_buffer(struct 
> >> drm_device *dev,
> >>  		goto error;
> >>  	}
> >>  
> >> -	/* Workaround an erratum on the i830 which causes a hang if
> >> -	 * the TAIL pointer points to within the last 2 cachelines
> >> -	 * of the buffer.
> >> -	 */
> >> -	ringbuf->effective_size = ringbuf->size;
> >> -	if (IS_I830(dev) || IS_845G(dev))
> >> -		ringbuf->effective_size -= 2 * CACHELINE_BYTES;
> >> -
> >>  	ret = i915_cmd_parser_init_ring(ring);
> >>  	if (ret)
> >>  		goto error;
> >> @@ -2090,7 +2113,7 @@ static int intel_init_ring_buffer(struct 
> >> drm_device *dev,
> >>  	return 0;
> >
> > The "goto error" right above the return 0 changes its behavior: it
> > wasn't calling intel_destroy_ringbuffer_obj(), but I suppose this is a
> > fix. I'm just pointing it since I'm not 100% sure.
> >
> 
> I would say its a fix because we lost the ref at that point.
> 
> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
> 
> > Everything seems correct, so with or without changes:
> > Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>

Queued for -next, thanks for the patch.
-Daniel

> >
> >
> >>  
> >>  error:
> >> -	kfree(ringbuf);
> >> +	intel_ringbuffer_free(ringbuf);
> >>  	ring->buffer = NULL;
> >>  	return ret;
> >>  }
> >> @@ -2098,19 +2121,18 @@ error:
> >>  void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
> >>  {
> >>  	struct drm_i915_private *dev_priv;
> >> -	struct intel_ringbuffer *ringbuf;
> >>  
> >>  	if (!intel_ring_initialized(ring))
> >>  		return;
> >>  
> >>  	dev_priv = to_i915(ring->dev);
> >> -	ringbuf = ring->buffer;
> >>  
> >>  	intel_stop_ring_buffer(ring);
> >>  	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & 
> >> MODE_IDLE) == 0);
> >>  
> >> -	intel_unpin_ringbuffer_obj(ringbuf);
> >> -	intel_destroy_ringbuffer_obj(ringbuf);
> >> +	intel_unpin_ringbuffer_obj(ring->buffer);
> >> +	intel_ringbuffer_free(ring->buffer);
> >> +	ring->buffer = NULL;
> >>  
> >>  	if (ring->cleanup)
> >>  		ring->cleanup(ring);
> >> @@ -2119,9 +2141,6 @@ void intel_cleanup_ring_buffer(struct 
> >> intel_engine_cs *ring)
> >>  
> >>  	i915_cmd_parser_fini_ring(ring);
> >>  	i915_gem_batch_pool_fini(&ring->batch_pool);
> >> -
> >> -	kfree(ringbuf);
> >> -	ring->buffer = NULL;
> >>  }
> >>  
> >>  static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
> >> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h 
> >> b/drivers/gpu/drm/i915/intel_ringbuffer.h
> >> index 95b0b4b55fa6..49fa41dc0eb6 100644
> >> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> >> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> >> @@ -420,12 +420,12 @@ intel_write_status_page(struct intel_engine_cs 
> >> *ring,
> >>  #define I915_GEM_HWS_SCRATCH_INDEX	0x40
> >>  #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << 
> >> MI_STORE_DWORD_INDEX_SHIFT)
> >>  
> >> -void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
> >> +struct intel_ringbuffer *
> >> +intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int 
> >> size);
> >>  int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
> >>  				     struct intel_ringbuffer 
> >> *ringbuf);
> >> -void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
> >> -int intel_alloc_ringbuffer_obj(struct drm_device *dev,
> >> -			       struct intel_ringbuffer *ringbuf);
> >> +void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
> >> +void intel_ringbuffer_free(struct intel_ringbuffer *ring);
> >>  
> >>  void intel_stop_ring_buffer(struct intel_engine_cs *ring);
> >>  void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset
  2015-09-03 12:01 ` [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset Chris Wilson
@ 2015-09-28 15:25   ` Mika Kuoppala
  2015-09-28 15:43     ` Chris Wilson
  2015-10-23 11:07   ` Mika Kuoppala
  1 sibling, 1 reply; 12+ messages in thread
From: Mika Kuoppala @ 2015-09-28 15:25 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


Hi,

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Having flushed all requests from all queues, we know that all
> ringbuffers must now be empty. However, since we do not reclaim
> all space when retiring the request (to prevent HEADs colliding
> with rapid ringbuffer wraparound) the amount of available space
> on each ringbuffer upon reset is less than when we start. Do one
> more pass over all the ringbuffers to reset the available space
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> Cc: Dave Gordon <david.s.gordon@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem.c         | 14 ++++++++++++++
>  drivers/gpu/drm/i915/intel_lrc.c        |  1 +
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 13 ++++++++++---
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
>  4 files changed, 27 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 41263cd4170c..3a42c350fec9 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2738,6 +2738,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
>  static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
>  					struct intel_engine_cs *ring)
>  {
> +	struct intel_ringbuffer *buffer;
> +
>  	while (!list_empty(&ring->active_list)) {
>  		struct drm_i915_gem_object *obj;
>  
> @@ -2783,6 +2785,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
>  
>  		i915_gem_request_retire(request);
>  	}
> +
> +	/* Having flushed all requests from all queues, we know that all
> +	 * ringbuffers must now be empty. However, since we do not reclaim
> +	 * all space when retiring the request (to prevent HEADs colliding
> +	 * with rapid ringbuffer wraparound) the amount of available space
> +	 * upon reset is less than when we start. Do one more pass over
> +	 * all the ringbuffers to reset last_retired_head.
> +	 */
> +	list_for_each_entry(buffer, &ring->buffers, link) {
> +		buffer->last_retired_head = buffer->tail;
> +		intel_ring_update_space(buffer);
> +	}
>  }
>  

As right after cleaning up the rings in i915_gem_reset(),
we have i915_gem_context_reset(). That will go through
all contexts and their ringbuffers and set tail and head to
zero.

If we do the space adjustment in intel_lr_context_reset(),
we can avoid adding new ring->buffers list for this purpose:

diff --git a/drivers/gpu/drm/i915/intel_lrc.c
b/drivers/gpu/drm/i915/intel_lrc.c
index 256167b..e110d6b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2532,15 +2532,16 @@ void intel_lr_context_reset(struct drm_device
*dev,
                        WARN(1, "Failed get_pages for context obj\n");
                        continue;
                }
+
+               ringbuf->last_retired_head = ringbuf->tail;
+               intel_ring_update_space(ringbuf);
+
                page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
                reg_state = kmap_atomic(page);
 
-               reg_state[CTX_RING_HEAD+1] = 0;
-               reg_state[CTX_RING_TAIL+1] = 0;
+               reg_state[CTX_RING_HEAD+1] = ringbuf->head;
+               reg_state[CTX_RING_TAIL+1] = ringbuf->tail;
 
                kunmap_atomic(reg_state);
-
-               ringbuf->head = 0;
-               ringbuf->tail = 0;
        }


Thanks,
--Mika

>  void i915_gem_reset(struct drm_device *dev)
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 28a712e7d2d0..de52ddc108a7 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -1881,6 +1881,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
>  	i915_gem_batch_pool_init(dev, &ring->batch_pool);
>  	init_waitqueue_head(&ring->irq_queue);
>  
> +	INIT_LIST_HEAD(&ring->buffers);
>  	INIT_LIST_HEAD(&ring->execlist_queue);
>  	INIT_LIST_HEAD(&ring->execlist_retired_req_list);
>  	spin_lock_init(&ring->execlist_lock);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 20a75bb516ac..d2e0b3b7efbf 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2030,10 +2030,14 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
>  	int ret;
>  
>  	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
> -	if (ring == NULL)
> +	if (ring == NULL) {
> +		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
> +				 engine->name);
>  		return ERR_PTR(-ENOMEM);
> +	}
>  
>  	ring->ring = engine;
> +	list_add(&ring->link, &engine->buffers);
>  
>  	ring->size = size;
>  	/* Workaround an erratum on the i830 which causes a hang if
> @@ -2049,8 +2053,9 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
>  
>  	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
>  	if (ret) {
> -		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
> -			  engine->name, ret);
> +		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
> +				 engine->name, ret);
> +		list_del(&ring->link);
>  		kfree(ring);
>  		return ERR_PTR(ret);
>  	}
> @@ -2062,6 +2067,7 @@ void
>  intel_ringbuffer_free(struct intel_ringbuffer *ring)
>  {
>  	intel_destroy_ringbuffer_obj(ring);
> +	list_del(&ring->link);
>  	kfree(ring);
>  }
>  
> @@ -2077,6 +2083,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
>  	INIT_LIST_HEAD(&ring->active_list);
>  	INIT_LIST_HEAD(&ring->request_list);
>  	INIT_LIST_HEAD(&ring->execlist_queue);
> +	INIT_LIST_HEAD(&ring->buffers);
>  	i915_gem_batch_pool_init(dev, &ring->batch_pool);
>  	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 49fa41dc0eb6..58b1976a7d0a 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -100,6 +100,7 @@ struct intel_ringbuffer {
>  	void __iomem *virtual_start;
>  
>  	struct intel_engine_cs *ring;
> +	struct list_head link;
>  
>  	u32 head;
>  	u32 tail;
> @@ -157,6 +158,7 @@ struct  intel_engine_cs {
>  	u32		mmio_base;
>  	struct		drm_device *dev;
>  	struct intel_ringbuffer *buffer;
> +	struct list_head buffers;
>  
>  	/*
>  	 * A pool of objects to use as shadow copies of client batch buffers
> -- 
> 2.5.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset
  2015-09-28 15:25   ` Mika Kuoppala
@ 2015-09-28 15:43     ` Chris Wilson
  0 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2015-09-28 15:43 UTC (permalink / raw)
  To: Mika Kuoppala; +Cc: intel-gfx

On Mon, Sep 28, 2015 at 06:25:04PM +0300, Mika Kuoppala wrote:
> 
> Hi,
> 
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > Having flushed all requests from all queues, we know that all
> > ringbuffers must now be empty. However, since we do not reclaim
> > all space when retiring the request (to prevent HEADs colliding
> > with rapid ringbuffer wraparound) the amount of available space
> > on each ringbuffer upon reset is less than when we start. Do one
> > more pass over all the ringbuffers to reset the available space
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
> > Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> > Cc: Dave Gordon <david.s.gordon@intel.com>
> > ---
> >  drivers/gpu/drm/i915/i915_gem.c         | 14 ++++++++++++++
> >  drivers/gpu/drm/i915/intel_lrc.c        |  1 +
> >  drivers/gpu/drm/i915/intel_ringbuffer.c | 13 ++++++++++---
> >  drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
> >  4 files changed, 27 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 41263cd4170c..3a42c350fec9 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -2738,6 +2738,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
> >  static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
> >  					struct intel_engine_cs *ring)
> >  {
> > +	struct intel_ringbuffer *buffer;
> > +
> >  	while (!list_empty(&ring->active_list)) {
> >  		struct drm_i915_gem_object *obj;
> >  
> > @@ -2783,6 +2785,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
> >  
> >  		i915_gem_request_retire(request);
> >  	}
> > +
> > +	/* Having flushed all requests from all queues, we know that all
> > +	 * ringbuffers must now be empty. However, since we do not reclaim
> > +	 * all space when retiring the request (to prevent HEADs colliding
> > +	 * with rapid ringbuffer wraparound) the amount of available space
> > +	 * upon reset is less than when we start. Do one more pass over
> > +	 * all the ringbuffers to reset last_retired_head.
> > +	 */
> > +	list_for_each_entry(buffer, &ring->buffers, link) {
> > +		buffer->last_retired_head = buffer->tail;
> > +		intel_ring_update_space(buffer);
> > +	}
> >  }
> >  
> 
> As right after cleaning up the rings in i915_gem_reset(),
> we have i915_gem_context_reset(). That will go through
> all contexts and their ringbuffers and set tail and head to
> zero.
> 
> If we do the space adjustment in intel_lr_context_reset(),
> we can avoid adding new ring->buffers list for this purpose:

No. The point is that we want to do it in a generic manner so that we can
remove intel_lr_context_reset() (the legacy code is just a degenerate
case of execlists, look at the patches I sent last year to so how simple
the code becomes after applying that transformation).
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset
  2015-09-03 12:01 ` [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset Chris Wilson
  2015-09-28 15:25   ` Mika Kuoppala
@ 2015-10-23 11:07   ` Mika Kuoppala
  2015-10-23 11:12     ` Chris Wilson
  1 sibling, 1 reply; 12+ messages in thread
From: Mika Kuoppala @ 2015-10-23 11:07 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Having flushed all requests from all queues, we know that all
> ringbuffers must now be empty. However, since we do not reclaim
> all space when retiring the request (to prevent HEADs colliding
> with rapid ringbuffer wraparound) the amount of available space
> on each ringbuffer upon reset is less than when we start. Do one
> more pass over all the ringbuffers to reset the available space
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> Cc: Dave Gordon <david.s.gordon@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem.c         | 14 ++++++++++++++
>  drivers/gpu/drm/i915/intel_lrc.c        |  1 +
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 13 ++++++++++---
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
>  4 files changed, 27 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 41263cd4170c..3a42c350fec9 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2738,6 +2738,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
>  static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
>  					struct intel_engine_cs *ring)
>  {
> +	struct intel_ringbuffer *buffer;
> +
>  	while (!list_empty(&ring->active_list)) {
>  		struct drm_i915_gem_object *obj;
>  
> @@ -2783,6 +2785,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
>  
>  		i915_gem_request_retire(request);
>  	}
> +
> +	/* Having flushed all requests from all queues, we know that all
> +	 * ringbuffers must now be empty. However, since we do not reclaim
> +	 * all space when retiring the request (to prevent HEADs colliding
> +	 * with rapid ringbuffer wraparound) the amount of available space
> +	 * upon reset is less than when we start. Do one more pass over
> +	 * all the ringbuffers to reset last_retired_head.
> +	 */
> +	list_for_each_entry(buffer, &ring->buffers, link) {
> +		buffer->last_retired_head = buffer->tail;
> +		intel_ring_update_space(buffer);
> +	}

This is all in vain as the i915_gem_context_reset() ->
intel_lr_context_reset still sets head and tail to zero.

So your last_retired_head will still dangle in a pre-reset
world when the rest of the ringbuf items will be set to post
reset world.


-Mika


>  }
>  
>  void i915_gem_reset(struct drm_device *dev)
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 28a712e7d2d0..de52ddc108a7 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -1881,6 +1881,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
>  	i915_gem_batch_pool_init(dev, &ring->batch_pool);
>  	init_waitqueue_head(&ring->irq_queue);
>  
> +	INIT_LIST_HEAD(&ring->buffers);
>  	INIT_LIST_HEAD(&ring->execlist_queue);
>  	INIT_LIST_HEAD(&ring->execlist_retired_req_list);
>  	spin_lock_init(&ring->execlist_lock);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 20a75bb516ac..d2e0b3b7efbf 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2030,10 +2030,14 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
>  	int ret;
>  
>  	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
> -	if (ring == NULL)
> +	if (ring == NULL) {
> +		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
> +				 engine->name);
>  		return ERR_PTR(-ENOMEM);
> +	}
>  
>  	ring->ring = engine;
> +	list_add(&ring->link, &engine->buffers);
>  
>  	ring->size = size;
>  	/* Workaround an erratum on the i830 which causes a hang if
> @@ -2049,8 +2053,9 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
>  
>  	ret = intel_alloc_ringbuffer_obj(engine->dev, ring);
>  	if (ret) {
> -		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
> -			  engine->name, ret);
> +		DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
> +				 engine->name, ret);
> +		list_del(&ring->link);
>  		kfree(ring);
>  		return ERR_PTR(ret);
>  	}
> @@ -2062,6 +2067,7 @@ void
>  intel_ringbuffer_free(struct intel_ringbuffer *ring)
>  {
>  	intel_destroy_ringbuffer_obj(ring);
> +	list_del(&ring->link);
>  	kfree(ring);
>  }
>  
> @@ -2077,6 +2083,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
>  	INIT_LIST_HEAD(&ring->active_list);
>  	INIT_LIST_HEAD(&ring->request_list);
>  	INIT_LIST_HEAD(&ring->execlist_queue);
> +	INIT_LIST_HEAD(&ring->buffers);
>  	i915_gem_batch_pool_init(dev, &ring->batch_pool);
>  	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 49fa41dc0eb6..58b1976a7d0a 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -100,6 +100,7 @@ struct intel_ringbuffer {
>  	void __iomem *virtual_start;
>  
>  	struct intel_engine_cs *ring;
> +	struct list_head link;
>  
>  	u32 head;
>  	u32 tail;
> @@ -157,6 +158,7 @@ struct  intel_engine_cs {
>  	u32		mmio_base;
>  	struct		drm_device *dev;
>  	struct intel_ringbuffer *buffer;
> +	struct list_head buffers;
>  
>  	/*
>  	 * A pool of objects to use as shadow copies of client batch buffers
> -- 
> 2.5.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset
  2015-10-23 11:07   ` Mika Kuoppala
@ 2015-10-23 11:12     ` Chris Wilson
  2015-10-23 11:46       ` Mika Kuoppala
  0 siblings, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2015-10-23 11:12 UTC (permalink / raw)
  To: Mika Kuoppala; +Cc: intel-gfx

On Fri, Oct 23, 2015 at 02:07:35PM +0300, Mika Kuoppala wrote:
> Chris Wilson <chris@chris-wilson.co.uk> writes:
> 
> > Having flushed all requests from all queues, we know that all
> > ringbuffers must now be empty. However, since we do not reclaim
> > all space when retiring the request (to prevent HEADs colliding
> > with rapid ringbuffer wraparound) the amount of available space
> > on each ringbuffer upon reset is less than when we start. Do one
> > more pass over all the ringbuffers to reset the available space
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
> > Cc: Mika Kuoppala <mika.kuoppala@intel.com>
> > Cc: Dave Gordon <david.s.gordon@intel.com>
> > ---
> >  drivers/gpu/drm/i915/i915_gem.c         | 14 ++++++++++++++
> >  drivers/gpu/drm/i915/intel_lrc.c        |  1 +
> >  drivers/gpu/drm/i915/intel_ringbuffer.c | 13 ++++++++++---
> >  drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
> >  4 files changed, 27 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 41263cd4170c..3a42c350fec9 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -2738,6 +2738,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
> >  static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
> >  					struct intel_engine_cs *ring)
> >  {
> > +	struct intel_ringbuffer *buffer;
> > +
> >  	while (!list_empty(&ring->active_list)) {
> >  		struct drm_i915_gem_object *obj;
> >  
> > @@ -2783,6 +2785,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
> >  
> >  		i915_gem_request_retire(request);
> >  	}
> > +
> > +	/* Having flushed all requests from all queues, we know that all
> > +	 * ringbuffers must now be empty. However, since we do not reclaim
> > +	 * all space when retiring the request (to prevent HEADs colliding
> > +	 * with rapid ringbuffer wraparound) the amount of available space
> > +	 * upon reset is less than when we start. Do one more pass over
> > +	 * all the ringbuffers to reset last_retired_head.
> > +	 */
> > +	list_for_each_entry(buffer, &ring->buffers, link) {
> > +		buffer->last_retired_head = buffer->tail;
> > +		intel_ring_update_space(buffer);
> > +	}
> 
> This is all in vain as the i915_gem_context_reset() ->
> intel_lr_context_reset still sets head and tail to zero.
> 
> So your last_retired_head will still dangle in a pre-reset
> world when the rest of the ringbuf items will be set to post
> reset world.

It's only setting that so that we computed the full ring space as
available and then we set last_retired_head back to -1. So what's
dangling?
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset
  2015-10-23 11:12     ` Chris Wilson
@ 2015-10-23 11:46       ` Mika Kuoppala
  2015-10-28 17:18         ` Tvrtko Ursulin
  0 siblings, 1 reply; 12+ messages in thread
From: Mika Kuoppala @ 2015-10-23 11:46 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> On Fri, Oct 23, 2015 at 02:07:35PM +0300, Mika Kuoppala wrote:
>> Chris Wilson <chris@chris-wilson.co.uk> writes:
>> 
>> > Having flushed all requests from all queues, we know that all
>> > ringbuffers must now be empty. However, since we do not reclaim
>> > all space when retiring the request (to prevent HEADs colliding
>> > with rapid ringbuffer wraparound) the amount of available space
>> > on each ringbuffer upon reset is less than when we start. Do one
>> > more pass over all the ringbuffers to reset the available space
>> >
>> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>> > Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
>> > Cc: Mika Kuoppala <mika.kuoppala@intel.com>
>> > Cc: Dave Gordon <david.s.gordon@intel.com>
>> > ---
>> >  drivers/gpu/drm/i915/i915_gem.c         | 14 ++++++++++++++
>> >  drivers/gpu/drm/i915/intel_lrc.c        |  1 +
>> >  drivers/gpu/drm/i915/intel_ringbuffer.c | 13 ++++++++++---
>> >  drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
>> >  4 files changed, 27 insertions(+), 3 deletions(-)
>> >
>> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
>> > index 41263cd4170c..3a42c350fec9 100644
>> > --- a/drivers/gpu/drm/i915/i915_gem.c
>> > +++ b/drivers/gpu/drm/i915/i915_gem.c
>> > @@ -2738,6 +2738,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
>> >  static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
>> >  					struct intel_engine_cs *ring)
>> >  {
>> > +	struct intel_ringbuffer *buffer;
>> > +
>> >  	while (!list_empty(&ring->active_list)) {
>> >  		struct drm_i915_gem_object *obj;
>> >  
>> > @@ -2783,6 +2785,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
>> >  
>> >  		i915_gem_request_retire(request);
>> >  	}
>> > +
>> > +	/* Having flushed all requests from all queues, we know that all
>> > +	 * ringbuffers must now be empty. However, since we do not reclaim
>> > +	 * all space when retiring the request (to prevent HEADs colliding
>> > +	 * with rapid ringbuffer wraparound) the amount of available space
>> > +	 * upon reset is less than when we start. Do one more pass over
>> > +	 * all the ringbuffers to reset last_retired_head.
>> > +	 */
>> > +	list_for_each_entry(buffer, &ring->buffers, link) {
>> > +		buffer->last_retired_head = buffer->tail;
>> > +		intel_ring_update_space(buffer);
>> > +	}
>> 
>> This is all in vain as the i915_gem_context_reset() ->
>> intel_lr_context_reset still sets head and tail to zero.
>> 
>> So your last_retired_head will still dangle in a pre-reset
>> world when the rest of the ringbuf items will be set to post
>> reset world.
>
> It's only setting that so that we computed the full ring space as
> available and then we set last_retired_head back to -1. So what's
> dangling?
> -Chris

My understanding of the ringbuffer code was dandling. It is all
clear now. We set head = tail and thus reset the ring space to full.

References: https://bugs.freedesktop.org/show_bug.cgi?id=91634

should be added as this very likely fixes that one.

Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>

>
> -- 
> Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset
  2015-10-23 11:46       ` Mika Kuoppala
@ 2015-10-28 17:18         ` Tvrtko Ursulin
  0 siblings, 0 replies; 12+ messages in thread
From: Tvrtko Ursulin @ 2015-10-28 17:18 UTC (permalink / raw)
  To: Mika Kuoppala, Chris Wilson; +Cc: intel-gfx


On 23/10/15 12:46, Mika Kuoppala wrote:
> Chris Wilson <chris@chris-wilson.co.uk> writes:
>
>> On Fri, Oct 23, 2015 at 02:07:35PM +0300, Mika Kuoppala wrote:
>>> Chris Wilson <chris@chris-wilson.co.uk> writes:
>>>
>>>> Having flushed all requests from all queues, we know that all
>>>> ringbuffers must now be empty. However, since we do not reclaim
>>>> all space when retiring the request (to prevent HEADs colliding
>>>> with rapid ringbuffer wraparound) the amount of available space
>>>> on each ringbuffer upon reset is less than when we start. Do one
>>>> more pass over all the ringbuffers to reset the available space
>>>>
>>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>>> Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
>>>> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
>>>> Cc: Dave Gordon <david.s.gordon@intel.com>
>>>> ---
>>>>   drivers/gpu/drm/i915/i915_gem.c         | 14 ++++++++++++++
>>>>   drivers/gpu/drm/i915/intel_lrc.c        |  1 +
>>>>   drivers/gpu/drm/i915/intel_ringbuffer.c | 13 ++++++++++---
>>>>   drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
>>>>   4 files changed, 27 insertions(+), 3 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
>>>> index 41263cd4170c..3a42c350fec9 100644
>>>> --- a/drivers/gpu/drm/i915/i915_gem.c
>>>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>>>> @@ -2738,6 +2738,8 @@ static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
>>>>   static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
>>>>   					struct intel_engine_cs *ring)
>>>>   {
>>>> +	struct intel_ringbuffer *buffer;
>>>> +
>>>>   	while (!list_empty(&ring->active_list)) {
>>>>   		struct drm_i915_gem_object *obj;
>>>>
>>>> @@ -2783,6 +2785,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
>>>>
>>>>   		i915_gem_request_retire(request);
>>>>   	}
>>>> +
>>>> +	/* Having flushed all requests from all queues, we know that all
>>>> +	 * ringbuffers must now be empty. However, since we do not reclaim
>>>> +	 * all space when retiring the request (to prevent HEADs colliding
>>>> +	 * with rapid ringbuffer wraparound) the amount of available space
>>>> +	 * upon reset is less than when we start. Do one more pass over
>>>> +	 * all the ringbuffers to reset last_retired_head.
>>>> +	 */
>>>> +	list_for_each_entry(buffer, &ring->buffers, link) {
>>>> +		buffer->last_retired_head = buffer->tail;
>>>> +		intel_ring_update_space(buffer);
>>>> +	}
>>>
>>> This is all in vain as the i915_gem_context_reset() ->
>>> intel_lr_context_reset still sets head and tail to zero.
>>>
>>> So your last_retired_head will still dangle in a pre-reset
>>> world when the rest of the ringbuf items will be set to post
>>> reset world.
>>
>> It's only setting that so that we computed the full ring space as
>> available and then we set last_retired_head back to -1. So what's
>> dangling?
>> -Chris
>
> My understanding of the ringbuffer code was dandling. It is all
> clear now. We set head = tail and thus reset the ring space to full.
>
> References: https://bugs.freedesktop.org/show_bug.cgi?id=91634
>
> should be added as this very likely fixes that one.
>
> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>

I've merged this one to try out dim - seems to have worked.

Regards,

Tvrtko

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2015-10-28 17:18 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-09-03 12:01 [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code Chris Wilson
2015-09-03 12:01 ` [PATCH 2/2] drm/i915: Recover all available ringbuffer space following reset Chris Wilson
2015-09-28 15:25   ` Mika Kuoppala
2015-09-28 15:43     ` Chris Wilson
2015-10-23 11:07   ` Mika Kuoppala
2015-10-23 11:12     ` Chris Wilson
2015-10-23 11:46       ` Mika Kuoppala
2015-10-28 17:18         ` Tvrtko Ursulin
2015-09-03 14:01 ` [PATCH 1/2] drm/i915: Refactor common ringbuffer allocation code Zanoni, Paulo R
2015-09-03 14:47   ` chris
2015-09-03 14:56   ` Mika Kuoppala
2015-09-04  8:17     ` Daniel Vetter

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.