All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation
@ 2020-03-10 11:27 Nirmoy Das
  2020-03-10 11:27 ` [PATCH 2/2] drm/amdgpu: do not set nil entry in compute_prio_sched Nirmoy Das
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Nirmoy Das @ 2020-03-10 11:27 UTC (permalink / raw)
  To: amd-gfx; +Cc: alexander.deucher, Ray.Huang, nirmoy.das, christian.koenig

Move initialization of struct drm_gpu_scheduler array,
amdgpu_ctx_init_sched() to amdgpu_ring.c.

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c    | 68 -------------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h    |  3 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c   | 77 ++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h   |  2 +
 5 files changed, 80 insertions(+), 72 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 3b2370ad1e47..06d151c0fe4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -661,71 +661,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
 	idr_destroy(&mgr->ctx_handles);
 	mutex_destroy(&mgr->lock);
 }
-
-
-static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
-{
-	int num_compute_sched_normal = 0;
-	int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
-	int i;
-
-	/* use one drm sched array, gfx.compute_sched to store both high and
-	 * normal priority drm compute schedulers */
-	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
-		if (!adev->gfx.compute_ring[i].has_high_prio)
-			adev->gfx.compute_sched[num_compute_sched_normal++] =
-				&adev->gfx.compute_ring[i].sched;
-		else
-			adev->gfx.compute_sched[num_compute_sched_high--] =
-				&adev->gfx.compute_ring[i].sched;
-	}
-
-	/* compute ring only has two priority for now */
-	i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
-	adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
-	adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
-
-	i = AMDGPU_GFX_PIPE_PRIO_HIGH;
-	adev->gfx.compute_prio_sched[i] =
-		&adev->gfx.compute_sched[num_compute_sched_high - 1];
-	adev->gfx.num_compute_sched[i] =
-		adev->gfx.num_compute_rings - num_compute_sched_normal;
-}
-
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
-{
-	int i, j;
-
-	amdgpu_ctx_init_compute_sched(adev);
-	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
-		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
-		adev->gfx.num_gfx_sched++;
-	}
-
-	for (i = 0; i < adev->sdma.num_instances; i++) {
-		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
-		adev->sdma.num_sdma_sched++;
-	}
-
-	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-		if (adev->vcn.harvest_config & (1 << i))
-			continue;
-		adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
-			&adev->vcn.inst[i].ring_dec.sched;
-	}
-
-	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
-		if (adev->vcn.harvest_config & (1 << i))
-			continue;
-		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
-			adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
-				&adev->vcn.inst[i].ring_enc[j].sched;
-	}
-
-	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
-		if (adev->jpeg.harvest_config & (1 << i))
-			continue;
-		adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
-			&adev->jpeg.inst[i].ring_dec.sched;
-	}
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index de490f183af2..f54e10314661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
 
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
-
-
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 572eb6ea8eab..b2a99f9fc223 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3092,7 +3092,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 			adev->gfx.config.max_cu_per_sh,
 			adev->gfx.cu_info.number);
 
-	amdgpu_ctx_init_sched(adev);
+	amdgpu_ring_init_sched(adev);
 
 	adev->accel_working = true;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a7e1d0425ed0..99875dd633e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -454,3 +454,80 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
 	ring->sched.ready = !r;
 	return r;
 }
+
+static void amdgpu_ring_init_compute_sched(struct amdgpu_device *adev)
+{
+	int num_compute_sched_normal = 0;
+	int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+	int i;
+
+	/* use one drm sched array, gfx.compute_sched to store both high and */
+	/* normal priority drm compute schedulers */
+	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+		if (!adev->gfx.compute_ring[i].has_high_prio)
+			adev->gfx.compute_sched[num_compute_sched_normal++] =
+				&adev->gfx.compute_ring[i].sched;
+		else
+			adev->gfx.compute_sched[num_compute_sched_high--] =
+				&adev->gfx.compute_ring[i].sched;
+	}
+
+	/* compute ring only has two priority for now */
+	i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+	adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+	adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+
+	i = AMDGPU_GFX_PIPE_PRIO_HIGH;
+	adev->gfx.compute_prio_sched[i] =
+		&adev->gfx.compute_sched[num_compute_sched_high - 1];
+	adev->gfx.num_compute_sched[i] =
+		adev->gfx.num_compute_rings - num_compute_sched_normal;
+}
+
+/**
+ * amdgpu_ring_init_sched - populate array of drm scheds for each HW IP
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Populate an array of struct drm_gpu_schedulers for each HW IP which
+ * can be use by amdgpu_ctx_get_entity() to initialize an entity.
+ *
+ */
+
+void amdgpu_ring_init_sched(struct amdgpu_device *adev)
+{
+	int i, j;
+
+	amdgpu_ring_init_compute_sched(adev);
+	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
+		adev->gfx.num_gfx_sched++;
+	}
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
+		adev->sdma.num_sdma_sched++;
+	}
+
+	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+		if (adev->vcn.harvest_config & (1 << i))
+			continue;
+		adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
+			&adev->vcn.inst[i].ring_dec.sched;
+	}
+
+	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+		if (adev->vcn.harvest_config & (1 << i))
+			continue;
+		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
+			adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
+				&adev->vcn.inst[i].ring_enc[j].sched;
+	}
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->jpeg.harvest_config & (1 << i))
+			continue;
+		adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
+			&adev->jpeg.inst[i].ring_dec.sched;
+	}
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 9a443013d70d..4ccd056d4353 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -326,4 +326,6 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
 			     struct amdgpu_ring *ring);
 void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
 
+void amdgpu_ring_init_sched(struct amdgpu_device *adev);
+
 #endif
-- 
2.25.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/2] drm/amdgpu: do not set nil entry in compute_prio_sched
  2020-03-10 11:27 [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation Nirmoy Das
@ 2020-03-10 11:27 ` Nirmoy Das
  2020-03-10 11:27 ` [PATCH 2/2] drm/amdgpu: fix assigning " Nirmoy Das
  2020-03-10 11:39 ` [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation Nirmoy
  2 siblings, 0 replies; 8+ messages in thread
From: Nirmoy Das @ 2020-03-10 11:27 UTC (permalink / raw)
  To: amd-gfx; +Cc: alexander.deucher, Ray.Huang, nirmoy.das, christian.koenig

If there are no high priority compute queues available then set normal
priority sched array to compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_HIGH]

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 99875dd633e6..01faeb8b4ef2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -478,10 +478,18 @@ static void amdgpu_ring_init_compute_sched(struct amdgpu_device *adev)
 	adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
 
 	i = AMDGPU_GFX_PIPE_PRIO_HIGH;
-	adev->gfx.compute_prio_sched[i] =
-		&adev->gfx.compute_sched[num_compute_sched_high - 1];
-	adev->gfx.num_compute_sched[i] =
-		adev->gfx.num_compute_rings - num_compute_sched_normal;
+	if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
+		/* When compute has no high priority rings then use */
+		/* normal priority sched array */
+		adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+		adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+	} else {
+
+		adev->gfx.compute_prio_sched[i] =
+			&adev->gfx.compute_sched[num_compute_sched_high - 1];
+		adev->gfx.num_compute_sched[i] =
+			adev->gfx.num_compute_rings - num_compute_sched_normal;
+	}
 }
 
 /**
-- 
2.25.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/2] drm/amdgpu: fix assigning nil entry in compute_prio_sched
  2020-03-10 11:27 [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation Nirmoy Das
  2020-03-10 11:27 ` [PATCH 2/2] drm/amdgpu: do not set nil entry in compute_prio_sched Nirmoy Das
@ 2020-03-10 11:27 ` Nirmoy Das
  2020-03-10 11:29   ` Nirmoy
  2020-03-10 11:42   ` Christian König
  2020-03-10 11:39 ` [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation Nirmoy
  2 siblings, 2 replies; 8+ messages in thread
From: Nirmoy Das @ 2020-03-10 11:27 UTC (permalink / raw)
  To: amd-gfx; +Cc: alexander.deucher, Ray.Huang, nirmoy.das, christian.koenig

If there is no high priority compute queue then set normal
priority sched array to compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_HIGH]

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 99875dd633e6..01faeb8b4ef2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -478,10 +478,18 @@ static void amdgpu_ring_init_compute_sched(struct amdgpu_device *adev)
 	adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
 
 	i = AMDGPU_GFX_PIPE_PRIO_HIGH;
-	adev->gfx.compute_prio_sched[i] =
-		&adev->gfx.compute_sched[num_compute_sched_high - 1];
-	adev->gfx.num_compute_sched[i] =
-		adev->gfx.num_compute_rings - num_compute_sched_normal;
+	if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
+		/* When compute has no high priority rings then use */
+		/* normal priority sched array */
+		adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+		adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+	} else {
+
+		adev->gfx.compute_prio_sched[i] =
+			&adev->gfx.compute_sched[num_compute_sched_high - 1];
+		adev->gfx.num_compute_sched[i] =
+			adev->gfx.num_compute_rings - num_compute_sched_normal;
+	}
 }
 
 /**
-- 
2.25.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: fix assigning nil entry in compute_prio_sched
  2020-03-10 11:27 ` [PATCH 2/2] drm/amdgpu: fix assigning " Nirmoy Das
@ 2020-03-10 11:29   ` Nirmoy
  2020-03-10 11:42   ` Christian König
  1 sibling, 0 replies; 8+ messages in thread
From: Nirmoy @ 2020-03-10 11:29 UTC (permalink / raw)
  To: Nirmoy Das, amd-gfx
  Cc: alexander.deucher, Ray.Huang, nirmoy.das, christian.koenig

Please ignore this stale patch.

On 3/10/20 12:27 PM, Nirmoy Das wrote:
> If there is no high priority compute queue then set normal
> priority sched array to compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_HIGH]
>
> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 16 ++++++++++++----
>   1 file changed, 12 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> index 99875dd633e6..01faeb8b4ef2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> @@ -478,10 +478,18 @@ static void amdgpu_ring_init_compute_sched(struct amdgpu_device *adev)
>   	adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
>   
>   	i = AMDGPU_GFX_PIPE_PRIO_HIGH;
> -	adev->gfx.compute_prio_sched[i] =
> -		&adev->gfx.compute_sched[num_compute_sched_high - 1];
> -	adev->gfx.num_compute_sched[i] =
> -		adev->gfx.num_compute_rings - num_compute_sched_normal;
> +	if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
> +		/* When compute has no high priority rings then use */
> +		/* normal priority sched array */
> +		adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
> +		adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
> +	} else {
> +
> +		adev->gfx.compute_prio_sched[i] =
> +			&adev->gfx.compute_sched[num_compute_sched_high - 1];
> +		adev->gfx.num_compute_sched[i] =
> +			adev->gfx.num_compute_rings - num_compute_sched_normal;
> +	}
>   }
>   
>   /**
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation
  2020-03-10 11:27 [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation Nirmoy Das
  2020-03-10 11:27 ` [PATCH 2/2] drm/amdgpu: do not set nil entry in compute_prio_sched Nirmoy Das
  2020-03-10 11:27 ` [PATCH 2/2] drm/amdgpu: fix assigning " Nirmoy Das
@ 2020-03-10 11:39 ` Nirmoy
  2020-03-10 11:41   ` Christian König
  2 siblings, 1 reply; 8+ messages in thread
From: Nirmoy @ 2020-03-10 11:39 UTC (permalink / raw)
  To: Nirmoy Das, amd-gfx
  Cc: alexander.deucher, Ray.Huang, nirmoy.das, christian.koenig

Hi Christian,


I think we still need amdgpu_ring.has_high_prio bool. I was thinking of 
using

amdgpu_gfx_is_high_priority_compute_queue() to see if a ring is set to 
high priority

but then I realized we don't support high priority gfx queue on gfx7 and 
less.


Regards,

Nirmoy

On 3/10/20 12:27 PM, Nirmoy Das wrote:
> Move initialization of struct drm_gpu_scheduler array,
> amdgpu_ctx_init_sched() to amdgpu_ring.c.
>
> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c    | 68 -------------------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h    |  3 -
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c   | 77 ++++++++++++++++++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h   |  2 +
>   5 files changed, 80 insertions(+), 72 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 3b2370ad1e47..06d151c0fe4e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -661,71 +661,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
>   	idr_destroy(&mgr->ctx_handles);
>   	mutex_destroy(&mgr->lock);
>   }
> -
> -
> -static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
> -{
> -	int num_compute_sched_normal = 0;
> -	int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
> -	int i;
> -
> -	/* use one drm sched array, gfx.compute_sched to store both high and
> -	 * normal priority drm compute schedulers */
> -	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
> -		if (!adev->gfx.compute_ring[i].has_high_prio)
> -			adev->gfx.compute_sched[num_compute_sched_normal++] =
> -				&adev->gfx.compute_ring[i].sched;
> -		else
> -			adev->gfx.compute_sched[num_compute_sched_high--] =
> -				&adev->gfx.compute_ring[i].sched;
> -	}
> -
> -	/* compute ring only has two priority for now */
> -	i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
> -	adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
> -	adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
> -
> -	i = AMDGPU_GFX_PIPE_PRIO_HIGH;
> -	adev->gfx.compute_prio_sched[i] =
> -		&adev->gfx.compute_sched[num_compute_sched_high - 1];
> -	adev->gfx.num_compute_sched[i] =
> -		adev->gfx.num_compute_rings - num_compute_sched_normal;
> -}
> -
> -void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
> -{
> -	int i, j;
> -
> -	amdgpu_ctx_init_compute_sched(adev);
> -	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
> -		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
> -		adev->gfx.num_gfx_sched++;
> -	}
> -
> -	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
> -		adev->sdma.num_sdma_sched++;
> -	}
> -
> -	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> -		if (adev->vcn.harvest_config & (1 << i))
> -			continue;
> -		adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
> -			&adev->vcn.inst[i].ring_dec.sched;
> -	}
> -
> -	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> -		if (adev->vcn.harvest_config & (1 << i))
> -			continue;
> -		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
> -			adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
> -				&adev->vcn.inst[i].ring_enc[j].sched;
> -	}
> -
> -	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
> -		if (adev->jpeg.harvest_config & (1 << i))
> -			continue;
> -		adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
> -			&adev->jpeg.inst[i].ring_dec.sched;
> -	}
> -}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> index de490f183af2..f54e10314661 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> @@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
>   long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
>   void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
>   
> -void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
> -
> -
>   #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 572eb6ea8eab..b2a99f9fc223 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -3092,7 +3092,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>   			adev->gfx.config.max_cu_per_sh,
>   			adev->gfx.cu_info.number);
>   
> -	amdgpu_ctx_init_sched(adev);
> +	amdgpu_ring_init_sched(adev);
>   
>   	adev->accel_working = true;
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> index a7e1d0425ed0..99875dd633e6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> @@ -454,3 +454,80 @@ int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
>   	ring->sched.ready = !r;
>   	return r;
>   }
> +
> +static void amdgpu_ring_init_compute_sched(struct amdgpu_device *adev)
> +{
> +	int num_compute_sched_normal = 0;
> +	int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
> +	int i;
> +
> +	/* use one drm sched array, gfx.compute_sched to store both high and */
> +	/* normal priority drm compute schedulers */
> +	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
> +		if (!adev->gfx.compute_ring[i].has_high_prio)
> +			adev->gfx.compute_sched[num_compute_sched_normal++] =
> +				&adev->gfx.compute_ring[i].sched;
> +		else
> +			adev->gfx.compute_sched[num_compute_sched_high--] =
> +				&adev->gfx.compute_ring[i].sched;
> +	}
> +
> +	/* compute ring only has two priority for now */
> +	i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
> +	adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
> +	adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
> +
> +	i = AMDGPU_GFX_PIPE_PRIO_HIGH;
> +	adev->gfx.compute_prio_sched[i] =
> +		&adev->gfx.compute_sched[num_compute_sched_high - 1];
> +	adev->gfx.num_compute_sched[i] =
> +		adev->gfx.num_compute_rings - num_compute_sched_normal;
> +}
> +
> +/**
> + * amdgpu_ring_init_sched - populate array of drm scheds for each HW IP
> + *
> + * @adev: amdgpu_device pointer
> + *
> + * Populate an array of struct drm_gpu_schedulers for each HW IP which
> + * can be use by amdgpu_ctx_get_entity() to initialize an entity.
> + *
> + */
> +
> +void amdgpu_ring_init_sched(struct amdgpu_device *adev)
> +{
> +	int i, j;
> +
> +	amdgpu_ring_init_compute_sched(adev);
> +	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
> +		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
> +		adev->gfx.num_gfx_sched++;
> +	}
> +
> +	for (i = 0; i < adev->sdma.num_instances; i++) {
> +		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
> +		adev->sdma.num_sdma_sched++;
> +	}
> +
> +	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> +		if (adev->vcn.harvest_config & (1 << i))
> +			continue;
> +		adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
> +			&adev->vcn.inst[i].ring_dec.sched;
> +	}
> +
> +	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> +		if (adev->vcn.harvest_config & (1 << i))
> +			continue;
> +		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
> +			adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
> +				&adev->vcn.inst[i].ring_enc[j].sched;
> +	}
> +
> +	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
> +		if (adev->jpeg.harvest_config & (1 << i))
> +			continue;
> +		adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
> +			&adev->jpeg.inst[i].ring_dec.sched;
> +	}
> +}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> index 9a443013d70d..4ccd056d4353 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
> @@ -326,4 +326,6 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
>   			     struct amdgpu_ring *ring);
>   void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
>   
> +void amdgpu_ring_init_sched(struct amdgpu_device *adev);
> +
>   #endif
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation
  2020-03-10 11:39 ` [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation Nirmoy
@ 2020-03-10 11:41   ` Christian König
  2020-03-10 11:55     ` Nirmoy
  0 siblings, 1 reply; 8+ messages in thread
From: Christian König @ 2020-03-10 11:41 UTC (permalink / raw)
  To: Nirmoy, Nirmoy Das, amd-gfx; +Cc: alexander.deucher, Ray.Huang, nirmoy.das

Hi Nirmoy,

you can stick with that for now.

In the long term we should make the priority a parameter of 
amdgpu_ring_init(). And then amdgpu_ring_init() can gather the rings by 
priority and type.

That in turn would make amdgpu_ring_init_sched() and 
amdgpu_ring_init_compute_sched() superfluous.

But fixing the bug with the NULL pointer dereference should come first, 
everything else is just cleanup and has lower urgency.

Regards,
Christian.

Am 10.03.20 um 12:39 schrieb Nirmoy:
> Hi Christian,
>
>
> I think we still need amdgpu_ring.has_high_prio bool. I was thinking 
> of using
>
> amdgpu_gfx_is_high_priority_compute_queue() to see if a ring is set to 
> high priority
>
> but then I realized we don't support high priority gfx queue on gfx7 
> and less.
>
>
> Regards,
>
> Nirmoy
>
> On 3/10/20 12:27 PM, Nirmoy Das wrote:
>> Move initialization of struct drm_gpu_scheduler array,
>> amdgpu_ctx_init_sched() to amdgpu_ring.c.
>>
>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c    | 68 -------------------
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h    |  3 -
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c   | 77 ++++++++++++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h   |  2 +
>>   5 files changed, 80 insertions(+), 72 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> index 3b2370ad1e47..06d151c0fe4e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> @@ -661,71 +661,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr 
>> *mgr)
>>       idr_destroy(&mgr->ctx_handles);
>>       mutex_destroy(&mgr->lock);
>>   }
>> -
>> -
>> -static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
>> -{
>> -    int num_compute_sched_normal = 0;
>> -    int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
>> -    int i;
>> -
>> -    /* use one drm sched array, gfx.compute_sched to store both high 
>> and
>> -     * normal priority drm compute schedulers */
>> -    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
>> -        if (!adev->gfx.compute_ring[i].has_high_prio)
>> - adev->gfx.compute_sched[num_compute_sched_normal++] =
>> -                &adev->gfx.compute_ring[i].sched;
>> -        else
>> - adev->gfx.compute_sched[num_compute_sched_high--] =
>> -                &adev->gfx.compute_ring[i].sched;
>> -    }
>> -
>> -    /* compute ring only has two priority for now */
>> -    i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
>> -    adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
>> -    adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
>> -
>> -    i = AMDGPU_GFX_PIPE_PRIO_HIGH;
>> -    adev->gfx.compute_prio_sched[i] =
>> -        &adev->gfx.compute_sched[num_compute_sched_high - 1];
>> -    adev->gfx.num_compute_sched[i] =
>> -        adev->gfx.num_compute_rings - num_compute_sched_normal;
>> -}
>> -
>> -void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
>> -{
>> -    int i, j;
>> -
>> -    amdgpu_ctx_init_compute_sched(adev);
>> -    for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
>> -        adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
>> -        adev->gfx.num_gfx_sched++;
>> -    }
>> -
>> -    for (i = 0; i < adev->sdma.num_instances; i++) {
>> -        adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
>> -        adev->sdma.num_sdma_sched++;
>> -    }
>> -
>> -    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>> -        if (adev->vcn.harvest_config & (1 << i))
>> -            continue;
>> - adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
>> -            &adev->vcn.inst[i].ring_dec.sched;
>> -    }
>> -
>> -    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>> -        if (adev->vcn.harvest_config & (1 << i))
>> -            continue;
>> -        for (j = 0; j < adev->vcn.num_enc_rings; ++j)
>> - adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
>> -                &adev->vcn.inst[i].ring_enc[j].sched;
>> -    }
>> -
>> -    for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
>> -        if (adev->jpeg.harvest_config & (1 << i))
>> -            continue;
>> - adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
>> -            &adev->jpeg.inst[i].ring_dec.sched;
>> -    }
>> -}
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
>> index de490f183af2..f54e10314661 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
>> @@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct 
>> amdgpu_ctx_mgr *mgr);
>>   long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long 
>> timeout);
>>   void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
>>   -void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
>> -
>> -
>>   #endif
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 572eb6ea8eab..b2a99f9fc223 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -3092,7 +3092,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>               adev->gfx.config.max_cu_per_sh,
>>               adev->gfx.cu_info.number);
>>   -    amdgpu_ctx_init_sched(adev);
>> +    amdgpu_ring_init_sched(adev);
>>         adev->accel_working = true;
>>   diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>> index a7e1d0425ed0..99875dd633e6 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>> @@ -454,3 +454,80 @@ int amdgpu_ring_test_helper(struct amdgpu_ring 
>> *ring)
>>       ring->sched.ready = !r;
>>       return r;
>>   }
>> +
>> +static void amdgpu_ring_init_compute_sched(struct amdgpu_device *adev)
>> +{
>> +    int num_compute_sched_normal = 0;
>> +    int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
>> +    int i;
>> +
>> +    /* use one drm sched array, gfx.compute_sched to store both high 
>> and */
>> +    /* normal priority drm compute schedulers */
>> +    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
>> +        if (!adev->gfx.compute_ring[i].has_high_prio)
>> + adev->gfx.compute_sched[num_compute_sched_normal++] =
>> +                &adev->gfx.compute_ring[i].sched;
>> +        else
>> + adev->gfx.compute_sched[num_compute_sched_high--] =
>> +                &adev->gfx.compute_ring[i].sched;
>> +    }
>> +
>> +    /* compute ring only has two priority for now */
>> +    i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
>> +    adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
>> +    adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
>> +
>> +    i = AMDGPU_GFX_PIPE_PRIO_HIGH;
>> +    adev->gfx.compute_prio_sched[i] =
>> +        &adev->gfx.compute_sched[num_compute_sched_high - 1];
>> +    adev->gfx.num_compute_sched[i] =
>> +        adev->gfx.num_compute_rings - num_compute_sched_normal;
>> +}
>> +
>> +/**
>> + * amdgpu_ring_init_sched - populate array of drm scheds for each HW IP
>> + *
>> + * @adev: amdgpu_device pointer
>> + *
>> + * Populate an array of struct drm_gpu_schedulers for each HW IP which
>> + * can be use by amdgpu_ctx_get_entity() to initialize an entity.
>> + *
>> + */
>> +
>> +void amdgpu_ring_init_sched(struct amdgpu_device *adev)
>> +{
>> +    int i, j;
>> +
>> +    amdgpu_ring_init_compute_sched(adev);
>> +    for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
>> +        adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
>> +        adev->gfx.num_gfx_sched++;
>> +    }
>> +
>> +    for (i = 0; i < adev->sdma.num_instances; i++) {
>> +        adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
>> +        adev->sdma.num_sdma_sched++;
>> +    }
>> +
>> +    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>> +        if (adev->vcn.harvest_config & (1 << i))
>> +            continue;
>> + adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
>> +            &adev->vcn.inst[i].ring_dec.sched;
>> +    }
>> +
>> +    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>> +        if (adev->vcn.harvest_config & (1 << i))
>> +            continue;
>> +        for (j = 0; j < adev->vcn.num_enc_rings; ++j)
>> + adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
>> +                &adev->vcn.inst[i].ring_enc[j].sched;
>> +    }
>> +
>> +    for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
>> +        if (adev->jpeg.harvest_config & (1 << i))
>> +            continue;
>> + adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
>> +            &adev->jpeg.inst[i].ring_dec.sched;
>> +    }
>> +}
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
>> index 9a443013d70d..4ccd056d4353 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
>> @@ -326,4 +326,6 @@ int amdgpu_debugfs_ring_init(struct amdgpu_device 
>> *adev,
>>                    struct amdgpu_ring *ring);
>>   void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
>>   +void amdgpu_ring_init_sched(struct amdgpu_device *adev);
>> +
>>   #endif

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] drm/amdgpu: fix assigning nil entry in compute_prio_sched
  2020-03-10 11:27 ` [PATCH 2/2] drm/amdgpu: fix assigning " Nirmoy Das
  2020-03-10 11:29   ` Nirmoy
@ 2020-03-10 11:42   ` Christian König
  1 sibling, 0 replies; 8+ messages in thread
From: Christian König @ 2020-03-10 11:42 UTC (permalink / raw)
  To: Nirmoy Das, amd-gfx; +Cc: alexander.deucher, Ray.Huang, nirmoy.das

Am 10.03.20 um 12:27 schrieb Nirmoy Das:
> If there is no high priority compute queue then set normal
> priority sched array to compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_HIGH]

Please move that patch to the beginning of the series since it is a bug fix.

Thanks,
Christian.

>
> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 16 ++++++++++++----
>   1 file changed, 12 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> index 99875dd633e6..01faeb8b4ef2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
> @@ -478,10 +478,18 @@ static void amdgpu_ring_init_compute_sched(struct amdgpu_device *adev)
>   	adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
>   
>   	i = AMDGPU_GFX_PIPE_PRIO_HIGH;
> -	adev->gfx.compute_prio_sched[i] =
> -		&adev->gfx.compute_sched[num_compute_sched_high - 1];
> -	adev->gfx.num_compute_sched[i] =
> -		adev->gfx.num_compute_rings - num_compute_sched_normal;
> +	if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
> +		/* When compute has no high priority rings then use */
> +		/* normal priority sched array */
> +		adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
> +		adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
> +	} else {
> +
> +		adev->gfx.compute_prio_sched[i] =
> +			&adev->gfx.compute_sched[num_compute_sched_high - 1];
> +		adev->gfx.num_compute_sched[i] =
> +			adev->gfx.num_compute_rings - num_compute_sched_normal;
> +	}
>   }
>   
>   /**

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation
  2020-03-10 11:41   ` Christian König
@ 2020-03-10 11:55     ` Nirmoy
  0 siblings, 0 replies; 8+ messages in thread
From: Nirmoy @ 2020-03-10 11:55 UTC (permalink / raw)
  To: Christian König, Nirmoy Das, amd-gfx
  Cc: alexander.deucher, Ray.Huang, nirmoy.das


On 3/10/20 12:41 PM, Christian König wrote:
> Hi Nirmoy,
>
> you can stick with that for now.
>
> In the long term we should make the priority a parameter of 
> amdgpu_ring_init(). And then amdgpu_ring_init() can gather the rings 
> by priority and type.
>
> That in turn would make amdgpu_ring_init_sched() and 
> amdgpu_ring_init_compute_sched() superfluous.


Yes that would be even better.

>
> But fixing the bug with the NULL pointer dereference should come 
> first, everything else is just cleanup and has lower urgency.


I will swap these two patches.


Thanks,

Nirmoy

>
> Regards,
> Christian.
>
> Am 10.03.20 um 12:39 schrieb Nirmoy:
>> Hi Christian,
>>
>>
>> I think we still need amdgpu_ring.has_high_prio bool. I was thinking 
>> of using
>>
>> amdgpu_gfx_is_high_priority_compute_queue() to see if a ring is set 
>> to high priority
>>
>> but then I realized we don't support high priority gfx queue on gfx7 
>> and less.
>>
>>
>> Regards,
>>
>> Nirmoy
>>
>> On 3/10/20 12:27 PM, Nirmoy Das wrote:
>>> Move initialization of struct drm_gpu_scheduler array,
>>> amdgpu_ctx_init_sched() to amdgpu_ring.c.
>>>
>>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c    | 68 -------------------
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h    |  3 -
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c   | 77 
>>> ++++++++++++++++++++++
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h   |  2 +
>>>   5 files changed, 80 insertions(+), 72 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>>> index 3b2370ad1e47..06d151c0fe4e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>>> @@ -661,71 +661,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr 
>>> *mgr)
>>>       idr_destroy(&mgr->ctx_handles);
>>>       mutex_destroy(&mgr->lock);
>>>   }
>>> -
>>> -
>>> -static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
>>> -{
>>> -    int num_compute_sched_normal = 0;
>>> -    int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
>>> -    int i;
>>> -
>>> -    /* use one drm sched array, gfx.compute_sched to store both 
>>> high and
>>> -     * normal priority drm compute schedulers */
>>> -    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
>>> -        if (!adev->gfx.compute_ring[i].has_high_prio)
>>> - adev->gfx.compute_sched[num_compute_sched_normal++] =
>>> -                &adev->gfx.compute_ring[i].sched;
>>> -        else
>>> - adev->gfx.compute_sched[num_compute_sched_high--] =
>>> -                &adev->gfx.compute_ring[i].sched;
>>> -    }
>>> -
>>> -    /* compute ring only has two priority for now */
>>> -    i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
>>> -    adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
>>> -    adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
>>> -
>>> -    i = AMDGPU_GFX_PIPE_PRIO_HIGH;
>>> -    adev->gfx.compute_prio_sched[i] =
>>> - &adev->gfx.compute_sched[num_compute_sched_high - 1];
>>> -    adev->gfx.num_compute_sched[i] =
>>> -        adev->gfx.num_compute_rings - num_compute_sched_normal;
>>> -}
>>> -
>>> -void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
>>> -{
>>> -    int i, j;
>>> -
>>> -    amdgpu_ctx_init_compute_sched(adev);
>>> -    for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
>>> -        adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
>>> -        adev->gfx.num_gfx_sched++;
>>> -    }
>>> -
>>> -    for (i = 0; i < adev->sdma.num_instances; i++) {
>>> -        adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
>>> -        adev->sdma.num_sdma_sched++;
>>> -    }
>>> -
>>> -    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>>> -        if (adev->vcn.harvest_config & (1 << i))
>>> -            continue;
>>> - adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
>>> -            &adev->vcn.inst[i].ring_dec.sched;
>>> -    }
>>> -
>>> -    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>>> -        if (adev->vcn.harvest_config & (1 << i))
>>> -            continue;
>>> -        for (j = 0; j < adev->vcn.num_enc_rings; ++j)
>>> - adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
>>> -                &adev->vcn.inst[i].ring_enc[j].sched;
>>> -    }
>>> -
>>> -    for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
>>> -        if (adev->jpeg.harvest_config & (1 << i))
>>> -            continue;
>>> - adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
>>> -            &adev->jpeg.inst[i].ring_dec.sched;
>>> -    }
>>> -}
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
>>> index de490f183af2..f54e10314661 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
>>> @@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct 
>>> amdgpu_ctx_mgr *mgr);
>>>   long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long 
>>> timeout);
>>>   void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
>>>   -void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
>>> -
>>> -
>>>   #endif
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> index 572eb6ea8eab..b2a99f9fc223 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> @@ -3092,7 +3092,7 @@ int amdgpu_device_init(struct amdgpu_device 
>>> *adev,
>>>               adev->gfx.config.max_cu_per_sh,
>>>               adev->gfx.cu_info.number);
>>>   -    amdgpu_ctx_init_sched(adev);
>>> +    amdgpu_ring_init_sched(adev);
>>>         adev->accel_working = true;
>>>   diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>>> index a7e1d0425ed0..99875dd633e6 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
>>> @@ -454,3 +454,80 @@ int amdgpu_ring_test_helper(struct amdgpu_ring 
>>> *ring)
>>>       ring->sched.ready = !r;
>>>       return r;
>>>   }
>>> +
>>> +static void amdgpu_ring_init_compute_sched(struct amdgpu_device *adev)
>>> +{
>>> +    int num_compute_sched_normal = 0;
>>> +    int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
>>> +    int i;
>>> +
>>> +    /* use one drm sched array, gfx.compute_sched to store both 
>>> high and */
>>> +    /* normal priority drm compute schedulers */
>>> +    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
>>> +        if (!adev->gfx.compute_ring[i].has_high_prio)
>>> + adev->gfx.compute_sched[num_compute_sched_normal++] =
>>> +                &adev->gfx.compute_ring[i].sched;
>>> +        else
>>> + adev->gfx.compute_sched[num_compute_sched_high--] =
>>> +                &adev->gfx.compute_ring[i].sched;
>>> +    }
>>> +
>>> +    /* compute ring only has two priority for now */
>>> +    i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
>>> +    adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
>>> +    adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
>>> +
>>> +    i = AMDGPU_GFX_PIPE_PRIO_HIGH;
>>> +    adev->gfx.compute_prio_sched[i] =
>>> + &adev->gfx.compute_sched[num_compute_sched_high - 1];
>>> +    adev->gfx.num_compute_sched[i] =
>>> +        adev->gfx.num_compute_rings - num_compute_sched_normal;
>>> +}
>>> +
>>> +/**
>>> + * amdgpu_ring_init_sched - populate array of drm scheds for each 
>>> HW IP
>>> + *
>>> + * @adev: amdgpu_device pointer
>>> + *
>>> + * Populate an array of struct drm_gpu_schedulers for each HW IP which
>>> + * can be use by amdgpu_ctx_get_entity() to initialize an entity.
>>> + *
>>> + */
>>> +
>>> +void amdgpu_ring_init_sched(struct amdgpu_device *adev)
>>> +{
>>> +    int i, j;
>>> +
>>> +    amdgpu_ring_init_compute_sched(adev);
>>> +    for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
>>> +        adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
>>> +        adev->gfx.num_gfx_sched++;
>>> +    }
>>> +
>>> +    for (i = 0; i < adev->sdma.num_instances; i++) {
>>> +        adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
>>> +        adev->sdma.num_sdma_sched++;
>>> +    }
>>> +
>>> +    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>>> +        if (adev->vcn.harvest_config & (1 << i))
>>> +            continue;
>>> + adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
>>> +            &adev->vcn.inst[i].ring_dec.sched;
>>> +    }
>>> +
>>> +    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>>> +        if (adev->vcn.harvest_config & (1 << i))
>>> +            continue;
>>> +        for (j = 0; j < adev->vcn.num_enc_rings; ++j)
>>> + adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
>>> +                &adev->vcn.inst[i].ring_enc[j].sched;
>>> +    }
>>> +
>>> +    for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
>>> +        if (adev->jpeg.harvest_config & (1 << i))
>>> +            continue;
>>> + adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
>>> +            &adev->jpeg.inst[i].ring_dec.sched;
>>> +    }
>>> +}
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
>>> index 9a443013d70d..4ccd056d4353 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
>>> @@ -326,4 +326,6 @@ int amdgpu_debugfs_ring_init(struct 
>>> amdgpu_device *adev,
>>>                    struct amdgpu_ring *ring);
>>>   void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
>>>   +void amdgpu_ring_init_sched(struct amdgpu_device *adev);
>>> +
>>>   #endif
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-03-10 11:52 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-10 11:27 [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation Nirmoy Das
2020-03-10 11:27 ` [PATCH 2/2] drm/amdgpu: do not set nil entry in compute_prio_sched Nirmoy Das
2020-03-10 11:27 ` [PATCH 2/2] drm/amdgpu: fix assigning " Nirmoy Das
2020-03-10 11:29   ` Nirmoy
2020-03-10 11:42   ` Christian König
2020-03-10 11:39 ` [PATCH 1/2] drm/amdgpu: cleanup drm_gpu_scheduler array creation Nirmoy
2020-03-10 11:41   ` Christian König
2020-03-10 11:55     ` Nirmoy

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.