All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] drm/scheduler: rework entity creation
@ 2019-12-09 21:53 Nirmoy Das
  2019-12-09 21:53 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Nirmoy Das @ 2019-12-09 21:53 UTC (permalink / raw)
  To: alexander.deucher, kenny.ho, christian.koenig; +Cc: nirmoy.das, amd-gfx

Entity currently keeps a copy of run_queue list and modify it in
drm_sched_entity_set_priority(). Entities shouldn't modify run_queue
list. Use drm_gpu_scheduler list instead of drm_sched_rq list
in drm_sched_entity struct. In this way we can select a runqueue based
on entity/ctx's priority for a  drm scheduler.

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c  |  8 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c  |  7 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   | 14 +++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c    |  7 ++-
 drivers/gpu/drm/lima/lima_sched.c        |  5 +-
 drivers/gpu/drm/panfrost/panfrost_job.c  |  8 ++-
 drivers/gpu/drm/scheduler/sched_entity.c | 74 ++++++++++--------------
 drivers/gpu/drm/v3d/v3d_drv.c            |  8 ++-
 include/drm/gpu_scheduler.h              |  8 ++-
 11 files changed, 78 insertions(+), 75 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a0d3d7b756eb..1d6850af9908 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -122,7 +122,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
 		struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-		struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
+		struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
 		unsigned num_rings = 0;
 		unsigned num_rqs = 0;
 
@@ -181,12 +181,13 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 			if (!rings[j]->adev)
 				continue;
 
-			rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
+			sched_list[num_rqs++] = &rings[j]->sched;
 		}
 
 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
 			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
-						  rqs, num_rqs, &ctx->guilty);
+						  priority, sched_list,
+						  num_rqs, &ctx->guilty);
 		if (r)
 			goto error_cleanup_entities;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 19ffe00d9072..2b6e35893918 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1957,11 +1957,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 
 	if (enable) {
 		struct amdgpu_ring *ring;
-		struct drm_sched_rq *rq;
+		struct drm_gpu_scheduler *sched;
 
 		ring = adev->mman.buffer_funcs_ring;
-		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-		r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL);
+		sched = &ring->sched;
+		r = drm_sched_entity_init(&adev->mman.entity,
+				          DRM_SCHED_PRIORITY_KERNEL, &sched,
+					  1, NULL);
 		if (r) {
 			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",
 				  r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e324bfe6c58f..a1a110f5513d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -330,12 +330,13 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
 {
 	struct amdgpu_ring *ring;
-	struct drm_sched_rq *rq;
+	struct drm_gpu_scheduler *sched;
 	int r;
 
 	ring = &adev->uvd.inst[0].ring;
-	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+	sched = &ring->sched;
+	r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+				  &sched, 1, NULL);
 	if (r) {
 		DRM_ERROR("Failed setting up UVD kernel entity.\n");
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 46b590af2fd2..ceb0dbf685f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -240,12 +240,13 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 int amdgpu_vce_entity_init(struct amdgpu_device *adev)
 {
 	struct amdgpu_ring *ring;
-	struct drm_sched_rq *rq;
+	struct drm_gpu_scheduler *sched;
 	int r;
 
 	ring = &adev->vce.ring[0];
-	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+	sched = &ring->sched;
+	r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
+				  &sched, 1, NULL);
 	if (r != 0) {
 		DRM_ERROR("Failed setting up VCE run queue.\n");
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index a94c4faa5af1..5e78db30c722 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2687,6 +2687,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 {
 	struct amdgpu_bo_param bp;
 	struct amdgpu_bo *root;
+	struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
 	int r, i;
 
 	vm->va = RB_ROOT_CACHED;
@@ -2700,14 +2701,19 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	spin_lock_init(&vm->invalidated_lock);
 	INIT_LIST_HEAD(&vm->freed);
 
+	for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
+		sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
+
 	/* create scheduler entities for page table updates */
-	r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
-				  adev->vm_manager.vm_pte_num_rqs, NULL);
+	r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
+				  sched_list, adev->vm_manager.vm_pte_num_rqs,
+				  NULL);
 	if (r)
 		return r;
 
-	r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
-				  adev->vm_manager.vm_pte_num_rqs, NULL);
+	r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
+				  sched_list, adev->vm_manager.vm_pte_num_rqs,
+				  NULL);
 	if (r)
 		goto error_free_direct;
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 1f9c01be40d7..76ecdf8bd31c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -65,12 +65,13 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
 
 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
 		struct etnaviv_gpu *gpu = priv->gpu[i];
-		struct drm_sched_rq *rq;
+		struct drm_gpu_scheduler *sched;
 
 		if (gpu) {
-			rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+			sched = &gpu->sched;
 			drm_sched_entity_init(&ctx->sched_entity[i],
-					      &rq, 1, NULL);
+					      DRM_SCHED_PRIORITY_NORMAL, &sched,
+					      1, NULL);
 			}
 	}
 
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index f522c5f99729..fc8362e4149b 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -159,9 +159,10 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
 			    struct lima_sched_context *context,
 			    atomic_t *guilty)
 {
-	struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
+	struct drm_gpu_scheduler *sched = &pipe->base;
 
-	return drm_sched_entity_init(&context->base, &rq, 1, guilty);
+	return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
+				     &sched, 1, guilty);
 }
 
 void lima_sched_context_fini(struct lima_sched_pipe *pipe,
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index d411eb6c8eb9..4f9ae5a12090 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -542,12 +542,14 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
 {
 	struct panfrost_device *pfdev = panfrost_priv->pfdev;
 	struct panfrost_job_slot *js = pfdev->js;
-	struct drm_sched_rq *rq;
+	struct drm_gpu_scheduler *sched;
 	int ret, i;
 
 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
-		rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
+		sched = &js->queue[i].sched;
+		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
+					    DRM_SCHED_PRIORITY_NORMAL, &sched,
+					    1, NULL, DRM_SCHED_PRIORITY_NORMAL);
 		if (WARN_ON(ret))
 			return ret;
 	}
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 461a7a8129f4..f9b6ce29c58f 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -38,9 +38,10 @@
  * submit to HW ring.
  *
  * @entity: scheduler entity to init
- * @rq_list: the list of run queue on which jobs from this
+ * @priority: priority of the entity
+ * @sched_list: the list of drm scheds on which jobs from this
  *           entity can be submitted
- * @num_rq_list: number of run queue in rq_list
+ * @num_sched_list: number of drm sched in sched_list
  * @guilty: atomic_t set to 1 when a job on this queue
  *          is found to be guilty causing a timeout
  *
@@ -50,32 +51,35 @@
  * Returns 0 on success or a negative error code on failure.
  */
 int drm_sched_entity_init(struct drm_sched_entity *entity,
-			  struct drm_sched_rq **rq_list,
-			  unsigned int num_rq_list,
+			  enum drm_sched_priority priority,
+			  struct drm_gpu_scheduler **sched_list,
+			  unsigned int num_sched_list,
 			  atomic_t *guilty)
 {
 	int i;
 
-	if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
+	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
 		return -EINVAL;
 
 	memset(entity, 0, sizeof(struct drm_sched_entity));
 	INIT_LIST_HEAD(&entity->list);
 	entity->rq = NULL;
 	entity->guilty = guilty;
-	entity->num_rq_list = num_rq_list;
-	entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
-				GFP_KERNEL);
-	if (!entity->rq_list)
+	entity->num_sched_list = num_sched_list;
+	entity->priority = priority;
+	entity->sched_list =  kcalloc(num_sched_list,
+				      sizeof(struct drm_gpu_scheduler *), GFP_KERNEL);
+
+	if(!entity->sched_list)
 		return -ENOMEM;
 
 	init_completion(&entity->entity_idle);
 
-	for (i = 0; i < num_rq_list; ++i)
-		entity->rq_list[i] = rq_list[i];
+	for (i = 0; i < num_sched_list; i++)
+		entity->sched_list[i] = sched_list[i];
 
-	if (num_rq_list)
-		entity->rq = rq_list[0];
+	if (num_sched_list)
+		entity->rq = &entity->sched_list[0]->sched_rq[entity->priority];
 
 	entity->last_scheduled = NULL;
 
@@ -139,10 +143,10 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
 	unsigned int min_jobs = UINT_MAX, num_jobs;
 	int i;
 
-	for (i = 0; i < entity->num_rq_list; ++i) {
-		struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+	for (i = 0; i < entity->num_sched_list; ++i) {
+		struct drm_gpu_scheduler *sched = entity->sched_list[i];
 
-		if (!entity->rq_list[i]->sched->ready) {
+		if (!entity->sched_list[i]->ready) {
 			DRM_WARN("sched%s is not ready, skipping", sched->name);
 			continue;
 		}
@@ -150,7 +154,7 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
 		num_jobs = atomic_read(&sched->num_jobs);
 		if (num_jobs < min_jobs) {
 			min_jobs = num_jobs;
-			rq = entity->rq_list[i];
+			rq = &entity->sched_list[i]->sched_rq[entity->priority];
 		}
 	}
 
@@ -308,7 +312,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
 
 	dma_fence_put(entity->last_scheduled);
 	entity->last_scheduled = NULL;
-	kfree(entity->rq_list);
+	kfree(entity->sched_list);
 }
 EXPORT_SYMBOL(drm_sched_entity_fini);
 
@@ -353,15 +357,6 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
 	drm_sched_wakeup(entity->rq->sched);
 }
 
-/**
- * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
- */
-static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
-					     enum drm_sched_priority priority)
-{
-	*rq = &(*rq)->sched->sched_rq[priority];
-}
-
 /**
  * drm_sched_entity_set_priority - Sets priority of the entity
  *
@@ -373,19 +368,8 @@ static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
 void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
 				   enum drm_sched_priority priority)
 {
-	unsigned int i;
-
 	spin_lock(&entity->rq_lock);
-
-	for (i = 0; i < entity->num_rq_list; ++i)
-		drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
-
-	if (entity->rq) {
-		drm_sched_rq_remove_entity(entity->rq, entity);
-		drm_sched_entity_set_rq_priority(&entity->rq, priority);
-		drm_sched_rq_add_entity(entity->rq, entity);
-	}
-
+	entity->priority = priority;
 	spin_unlock(&entity->rq_lock);
 }
 EXPORT_SYMBOL(drm_sched_entity_set_priority);
@@ -490,20 +474,20 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
 	struct dma_fence *fence;
 	struct drm_sched_rq *rq;
 
-	if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
+	if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
 		return;
 
 	fence = READ_ONCE(entity->last_scheduled);
 	if (fence && !dma_fence_is_signaled(fence))
 		return;
 
+	spin_lock(&entity->rq_lock);
 	rq = drm_sched_entity_get_free_sched(entity);
-	if (rq == entity->rq)
-		return;
+	if (rq != entity->rq) {
+		drm_sched_rq_remove_entity(entity->rq, entity);
+		entity->rq = rq;
+	}
 
-	spin_lock(&entity->rq_lock);
-	drm_sched_rq_remove_entity(entity->rq, entity);
-	entity->rq = rq;
 	spin_unlock(&entity->rq_lock);
 }
 
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 1a07462b4528..eaa8e9682373 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -140,7 +140,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct v3d_dev *v3d = to_v3d_dev(dev);
 	struct v3d_file_priv *v3d_priv;
-	struct drm_sched_rq *rq;
+	struct drm_gpu_scheduler *sched;
 	int i;
 
 	v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -150,8 +150,10 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
 	v3d_priv->v3d = v3d;
 
 	for (i = 0; i < V3D_MAX_QUEUES; i++) {
-		rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
+		sched = &v3d->queue[i].sched;
+		drm_sched_entity_init(&v3d_priv->sched_entity[i],
+				      DRM_SCHED_PRIORITY_NORMAL, &sched,
+				      1, NULL);
 	}
 
 	file->driver_priv = v3d_priv;
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 684692a8ed76..96a1a1b7526e 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -81,8 +81,9 @@ enum drm_sched_priority {
 struct drm_sched_entity {
 	struct list_head		list;
 	struct drm_sched_rq		*rq;
-	struct drm_sched_rq		**rq_list;
-	unsigned int                    num_rq_list;
+	unsigned int                    num_sched_list;
+	struct drm_gpu_scheduler        **sched_list;
+	enum drm_sched_priority         priority;
 	spinlock_t			rq_lock;
 
 	struct spsc_queue		job_queue;
@@ -312,7 +313,8 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 				struct drm_sched_entity *entity);
 
 int drm_sched_entity_init(struct drm_sched_entity *entity,
-			  struct drm_sched_rq **rq_list,
+			  enum drm_sched_priority priority,
+			  struct drm_gpu_scheduler **sched_list,
 			  unsigned int num_rq_list,
 			  atomic_t *guilty);
 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
-- 
2.24.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list
  2019-12-09 21:53 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
@ 2019-12-09 21:53 ` Nirmoy Das
  2019-12-09 21:53 ` [PATCH 3/4] amd/amdgpu: add sched list to IPs with multiple run-queues Nirmoy Das
  2019-12-09 21:53 ` [PATCH 4/4] drm/scheduler: do not keep a copy of sched list Nirmoy Das
  2 siblings, 0 replies; 7+ messages in thread
From: Nirmoy Das @ 2019-12-09 21:53 UTC (permalink / raw)
  To: alexander.deucher, kenny.ho, christian.koenig; +Cc: nirmoy.das, amd-gfx

drm_sched_entity_init() takes drm gpu scheduler list instead of
drm_sched_rq list. This makes conversion of drm_sched_rq list
to drm gpu scheduler list unnecessary

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     | 11 ++++-------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h     |  4 ++--
 drivers/gpu/drm/amd/amdgpu/cik_sdma.c      |  8 +++-----
 drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c     |  8 +++-----
 drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c     |  8 +++-----
 drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c     |  5 ++---
 drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c     |  8 +++-----
 drivers/gpu/drm/amd/amdgpu/si_dma.c        |  8 +++-----
 9 files changed, 24 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f85007382093..cf4953c4e2cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2779,7 +2779,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	adev->mman.buffer_funcs = NULL;
 	adev->mman.buffer_funcs_ring = NULL;
 	adev->vm_manager.vm_pte_funcs = NULL;
-	adev->vm_manager.vm_pte_num_rqs = 0;
+	adev->vm_manager.vm_pte_num_scheds = 0;
 	adev->gmc.gmc_funcs = NULL;
 	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
 	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5e78db30c722..0e1ed8ef2ce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2687,7 +2687,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 {
 	struct amdgpu_bo_param bp;
 	struct amdgpu_bo *root;
-	struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
 	int r, i;
 
 	vm->va = RB_ROOT_CACHED;
@@ -2701,19 +2700,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	spin_lock_init(&vm->invalidated_lock);
 	INIT_LIST_HEAD(&vm->freed);
 
-	for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
-		sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
 
 	/* create scheduler entities for page table updates */
 	r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
-				  sched_list, adev->vm_manager.vm_pte_num_rqs,
-				  NULL);
+				  adev->vm_manager.vm_pte_scheds,
+				  adev->vm_manager.vm_pte_num_scheds, NULL);
 	if (r)
 		return r;
 
 	r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
-				  sched_list, adev->vm_manager.vm_pte_num_rqs,
-				  NULL);
+				  adev->vm_manager.vm_pte_scheds,
+				  adev->vm_manager.vm_pte_num_scheds, NULL);
 	if (r)
 		goto error_free_direct;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 76fcf853035c..5eaba8645a43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -322,8 +322,8 @@ struct amdgpu_vm_manager {
 	u64					vram_base_offset;
 	/* vm pte handling */
 	const struct amdgpu_vm_pte_funcs	*vm_pte_funcs;
-	struct drm_sched_rq			*vm_pte_rqs[AMDGPU_MAX_RINGS];
-	unsigned				vm_pte_num_rqs;
+	struct drm_gpu_scheduler		*vm_pte_scheds[AMDGPU_MAX_RINGS];
+	unsigned				vm_pte_num_scheds;
 	struct amdgpu_ring			*page_fault;
 
 	/* partial resident texture handling */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 82cdb8f57bfd..1f22a8d0f7f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1373,16 +1373,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
 
 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-	struct drm_gpu_scheduler *sched;
 	unsigned i;
 
 	adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sched = &adev->sdma.instance[i].ring.sched;
-		adev->vm_manager.vm_pte_rqs[i] =
-			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+		adev->vm_manager.vm_pte_scheds[i] =
+			&adev->sdma.instance[i].ring.sched;
 	}
-	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version cik_sdma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 89e8c74a40f4..606b621145a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -1261,16 +1261,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
 
 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-	struct drm_gpu_scheduler *sched;
 	unsigned i;
 
 	adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sched = &adev->sdma.instance[i].ring.sched;
-		adev->vm_manager.vm_pte_rqs[i] =
-			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+		adev->vm_manager.vm_pte_scheds[i] =
+			&adev->sdma.instance[i].ring.sched;
 	}
-	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 011fd12c41fe..a559573ec8fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1699,16 +1699,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
 
 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-	struct drm_gpu_scheduler *sched;
 	unsigned i;
 
 	adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sched = &adev->sdma.instance[i].ring.sched;
-		adev->vm_manager.vm_pte_rqs[i] =
-			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+		adev->vm_manager.vm_pte_scheds[i] =
+			 &adev->sdma.instance[i].ring.sched;
 	}
-	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 350b2c99fefc..bd9ed33bab43 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2411,10 +2411,9 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 			sched = &adev->sdma.instance[i].page.sched;
 		else
 			sched = &adev->sdma.instance[i].ring.sched;
-		adev->vm_manager.vm_pte_rqs[i] =
-			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+		adev->vm_manager.vm_pte_scheds[i] = sched;
 	}
-	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 64c53eed7fac..63f667cfe3f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1723,17 +1723,15 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
 
 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-	struct drm_gpu_scheduler *sched;
 	unsigned i;
 
 	if (adev->vm_manager.vm_pte_funcs == NULL) {
 		adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
 		for (i = 0; i < adev->sdma.num_instances; i++) {
-			sched = &adev->sdma.instance[i].ring.sched;
-			adev->vm_manager.vm_pte_rqs[i] =
-				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+			adev->vm_manager.vm_pte_scheds[i] =
+				&adev->sdma.instance[i].ring.sched;
 		}
-		adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+		adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 	}
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 122df0732f0c..9ad85eddf9c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -835,16 +835,14 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
 
 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
 {
-	struct drm_gpu_scheduler *sched;
 	unsigned i;
 
 	adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
 	for (i = 0; i < adev->sdma.num_instances; i++) {
-		sched = &adev->sdma.instance[i].ring.sched;
-		adev->vm_manager.vm_pte_rqs[i] =
-			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+		adev->vm_manager.vm_pte_scheds[i] =
+			&adev->sdma.instance[i].ring.sched;
 	}
-	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
+	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
 }
 
 const struct amdgpu_ip_block_version si_dma_ip_block =
-- 
2.24.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/4] amd/amdgpu: add sched list to IPs with multiple run-queues
  2019-12-09 21:53 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
  2019-12-09 21:53 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
@ 2019-12-09 21:53 ` Nirmoy Das
  2019-12-10 11:28   ` Christian König
  2019-12-09 21:53 ` [PATCH 4/4] drm/scheduler: do not keep a copy of sched list Nirmoy Das
  2 siblings, 1 reply; 7+ messages in thread
From: Nirmoy Das @ 2019-12-09 21:53 UTC (permalink / raw)
  To: alexander.deucher, kenny.ho, christian.koenig; +Cc: nirmoy.das, amd-gfx

This sched list can be passed on to entity creation routine
instead of manually creating such sched list on every context creation.

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c    | 69 ++++++++--------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 44 ++++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h    |  4 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h   |  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h   |  2 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h    |  9 ++-
 6 files changed, 85 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 1d6850af9908..c1fc75299b7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -74,7 +74,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 			   struct amdgpu_ctx *ctx)
 {
 	unsigned num_entities = amdgpu_ctx_total_num_entities();
-	unsigned i, j, k;
+	unsigned i, j;
 	int r;
 
 	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
@@ -121,73 +121,56 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
 
 	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
-		struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
-		struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
-		unsigned num_rings = 0;
-		unsigned num_rqs = 0;
+		struct drm_gpu_scheduler **sched_list;
+		struct drm_gpu_scheduler *sched;
+		unsigned num_scheds = 0;
 
 		switch (i) {
 		case AMDGPU_HW_IP_GFX:
-			rings[0] = &adev->gfx.gfx_ring[0];
-			num_rings = 1;
+			sched_list = adev->gfx.gfx_sched_list;
+			num_scheds = 1;
 			break;
 		case AMDGPU_HW_IP_COMPUTE:
-			for (j = 0; j < adev->gfx.num_compute_rings; ++j)
-				rings[j] = &adev->gfx.compute_ring[j];
-			num_rings = adev->gfx.num_compute_rings;
+			sched_list = adev->gfx.compute_sched_list;
+			num_scheds = adev->gfx.num_compute_rings;
 			break;
 		case AMDGPU_HW_IP_DMA:
-			for (j = 0; j < adev->sdma.num_instances; ++j)
-				rings[j] = &adev->sdma.instance[j].ring;
-			num_rings = adev->sdma.num_instances;
+			sched_list = adev->sdma.sdma_sched_list;
+			num_scheds = adev->sdma.num_instances;
 			break;
 		case AMDGPU_HW_IP_UVD:
-			rings[0] = &adev->uvd.inst[0].ring;
-			num_rings = 1;
+			sched = &adev->uvd.inst[0].ring.sched;
+			sched_list = &sched;
+			num_scheds = 1;
 			break;
 		case AMDGPU_HW_IP_VCE:
-			rings[0] = &adev->vce.ring[0];
-			num_rings = 1;
+			sched = &adev->vce.ring[0].sched;
+			sched_list = &sched;
+			num_scheds = 1;
 			break;
 		case AMDGPU_HW_IP_UVD_ENC:
-			rings[0] = &adev->uvd.inst[0].ring_enc[0];
-			num_rings = 1;
+			sched = &adev->uvd.inst[0].ring_enc[0].sched;
+			sched_list = &sched;
+			num_scheds = 1;
 			break;
 		case AMDGPU_HW_IP_VCN_DEC:
-			for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
-				if (adev->vcn.harvest_config & (1 << j))
-					continue;
-				rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
-			}
+			sched_list = adev->vcn.vcn_dec_sched_list;
+			num_scheds =  adev->vcn.num_vcn_dec_sched_list;
 			break;
 		case AMDGPU_HW_IP_VCN_ENC:
-			for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
-				if (adev->vcn.harvest_config & (1 << j))
-					continue;
-				for (k = 0; k < adev->vcn.num_enc_rings; ++k)
-					rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k];
-			}
+			sched_list = adev->vcn.vcn_enc_sched_list;
+			num_scheds =  adev->vcn.num_vcn_enc_sched_list;
 			break;
 		case AMDGPU_HW_IP_VCN_JPEG:
-			for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) {
-				if (adev->vcn.harvest_config & (1 << j))
-					continue;
-				rings[num_rings++] = &adev->jpeg.inst[j].ring_dec;
-			}
+			sched_list = adev->jpeg.jpeg_sched_list;
+			num_scheds =  adev->jpeg.num_jpeg_sched_list;
 			break;
 		}
 
-		for (j = 0; j < num_rings; ++j) {
-			if (!rings[j]->adev)
-				continue;
-
-			sched_list[num_rqs++] = &rings[j]->sched;
-		}
-
 		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
 			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
 						  priority, sched_list,
-						  num_rqs, &ctx->guilty);
+						  num_scheds, &ctx->guilty);
 		if (r)
 			goto error_cleanup_entities;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cf4953c4e2cf..f973b61a26da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2738,6 +2738,48 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
 	return ret;
 }
 
+static void amdgpu_device_init_sched_list(struct amdgpu_device *adev)
+{
+	int i, j;
+
+	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+		adev->gfx.gfx_sched_list[i] = &adev->gfx.gfx_ring[i].sched;
+		adev->gfx.num_gfx_sched_list++;
+	}
+
+	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+		adev->gfx.compute_sched_list[i] = &adev->gfx.compute_ring[i].sched;
+		adev->gfx.num_compute_sched_list++;
+	}
+
+	for (i = 0; i < adev->sdma.num_instances; i++) {
+		adev->sdma.sdma_sched_list[i] = &adev->sdma.instance[i].ring.sched;
+		adev->sdma.num_sdma_sched_list++;
+	}
+
+	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+		if (adev->vcn.harvest_config & (1 << i))
+			continue;
+		adev->vcn.vcn_dec_sched_list[adev->vcn.num_vcn_dec_sched_list++] =
+			&adev->vcn.inst[i].ring_dec.sched;
+	}
+
+	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+		if (adev->vcn.harvest_config & (1 << i))
+			continue;
+		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
+			adev->vcn.vcn_enc_sched_list[adev->vcn.num_vcn_enc_sched_list++] =
+				&adev->vcn.inst[i].ring_enc[j].sched;
+	}
+
+	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+		if (adev->vcn.harvest_config & (1 << i))
+			continue;
+		adev->jpeg.jpeg_sched_list[adev->jpeg.num_jpeg_sched_list++] =
+			&adev->jpeg.inst[i].ring_dec.sched;
+	}
+}
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -3024,6 +3066,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 		goto failed;
 	}
 
+	amdgpu_device_init_sched_list(adev);
+
 	adev->accel_working = true;
 
 	amdgpu_vm_check_compute_bug(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 0ae0a2715b0d..b2264d4cd510 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -269,8 +269,12 @@ struct amdgpu_gfx {
 	bool				me_fw_write_wait;
 	bool				cp_fw_write_wait;
 	struct amdgpu_ring		gfx_ring[AMDGPU_MAX_GFX_RINGS];
+	struct drm_gpu_scheduler	*gfx_sched_list[AMDGPU_MAX_GFX_RINGS];
+	uint32_t			num_gfx_sched_list;
 	unsigned			num_gfx_rings;
 	struct amdgpu_ring		compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+	uint32_t			num_compute_sched_list;
+	struct drm_gpu_scheduler	*compute_sched_list[AMDGPU_MAX_COMPUTE_RINGS];
 	unsigned			num_compute_rings;
 	struct amdgpu_irq_src		eop_irq;
 	struct amdgpu_irq_src		priv_reg_irq;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 5131a0a1bc8a..5ce6ab05eeac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -43,6 +43,8 @@ struct amdgpu_jpeg {
 	uint8_t	num_jpeg_inst;
 	struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
 	struct amdgpu_jpeg_reg internal;
+	struct drm_gpu_scheduler *jpeg_sched_list[AMDGPU_MAX_JPEG_INSTANCES];
+	uint32_t num_jpeg_sched_list;
 	unsigned harvest_config;
 	struct delayed_work idle_work;
 	enum amd_powergating_state cur_state;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index b3134655789f..0f6cb6768398 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -52,6 +52,8 @@ struct amdgpu_sdma_instance {
 
 struct amdgpu_sdma {
 	struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+	struct drm_gpu_scheduler    *sdma_sched_list[AMDGPU_MAX_SDMA_INSTANCES];
+	uint32_t		    num_sdma_sched_list;
 	struct amdgpu_irq_src	trap_irq;
 	struct amdgpu_irq_src	illegal_inst_irq;
 	struct amdgpu_irq_src	ecc_irq;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 402a5046b985..ae65cb8f07e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -31,6 +31,7 @@
 #define AMDGPU_VCN_MAX_ENC_RINGS	3
 
 #define AMDGPU_MAX_VCN_INSTANCES	2
+#define AMDGPU_MAX_VCN_ENC_RINGS  AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
 
 #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
 #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
@@ -186,8 +187,12 @@ struct amdgpu_vcn {
 	uint32_t		*dpg_sram_curr_addr;
 
 	uint8_t	num_vcn_inst;
-	struct amdgpu_vcn_inst	inst[AMDGPU_MAX_VCN_INSTANCES];
-	struct amdgpu_vcn_reg	internal;
+	struct amdgpu_vcn_inst	 inst[AMDGPU_MAX_VCN_INSTANCES];
+	struct amdgpu_vcn_reg	 internal;
+	struct drm_gpu_scheduler *vcn_enc_sched_list[AMDGPU_MAX_VCN_ENC_RINGS];
+	struct drm_gpu_scheduler *vcn_dec_sched_list[AMDGPU_MAX_VCN_INSTANCES];
+	uint32_t		 num_vcn_enc_sched_list;
+	uint32_t		 num_vcn_dec_sched_list;
 
 	unsigned	harvest_config;
 	int (*pause_dpg_mode)(struct amdgpu_device *adev,
-- 
2.24.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 4/4] drm/scheduler: do not keep a copy of sched list
  2019-12-09 21:53 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
  2019-12-09 21:53 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
  2019-12-09 21:53 ` [PATCH 3/4] amd/amdgpu: add sched list to IPs with multiple run-queues Nirmoy Das
@ 2019-12-09 21:53 ` Nirmoy Das
  2 siblings, 0 replies; 7+ messages in thread
From: Nirmoy Das @ 2019-12-09 21:53 UTC (permalink / raw)
  To: alexander.deucher, kenny.ho, christian.koenig; +Cc: nirmoy.das, amd-gfx

entity should not keep copy and maintain sched list for
itself.

Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
---
 drivers/gpu/drm/scheduler/sched_entity.c | 10 +---------
 1 file changed, 1 insertion(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index f9b6ce29c58f..a5f729f421f8 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -67,17 +67,10 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
 	entity->guilty = guilty;
 	entity->num_sched_list = num_sched_list;
 	entity->priority = priority;
-	entity->sched_list =  kcalloc(num_sched_list,
-				      sizeof(struct drm_gpu_scheduler *), GFP_KERNEL);
+	entity->sched_list =  sched_list;
 
-	if(!entity->sched_list)
-		return -ENOMEM;
 
 	init_completion(&entity->entity_idle);
-
-	for (i = 0; i < num_sched_list; i++)
-		entity->sched_list[i] = sched_list[i];
-
 	if (num_sched_list)
 		entity->rq = &entity->sched_list[0]->sched_rq[entity->priority];
 
@@ -312,7 +305,6 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
 
 	dma_fence_put(entity->last_scheduled);
 	entity->last_scheduled = NULL;
-	kfree(entity->sched_list);
 }
 EXPORT_SYMBOL(drm_sched_entity_fini);
 
-- 
2.24.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/4] amd/amdgpu: add sched list to IPs with multiple run-queues
  2019-12-09 21:53 ` [PATCH 3/4] amd/amdgpu: add sched list to IPs with multiple run-queues Nirmoy Das
@ 2019-12-10 11:28   ` Christian König
  2019-12-10 12:55     ` Nirmoy
  0 siblings, 1 reply; 7+ messages in thread
From: Christian König @ 2019-12-10 11:28 UTC (permalink / raw)
  To: Nirmoy Das, alexander.deucher, kenny.ho; +Cc: nirmoy.das, amd-gfx

Am 09.12.19 um 22:53 schrieb Nirmoy Das:
> This sched list can be passed on to entity creation routine
> instead of manually creating such sched list on every context creation.

Please drop the "_list" from the names here. A list usually means a 
linked list and those are actually arrays.

Additional to that amdgpu_device_init_sched_list() should probably go 
into amdgpu_ctx.c instead. That is actually not really device related, 
but more UAPI/ctx stuff.

Apart from that looks good to me,
Christian.

>
> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c    | 69 ++++++++--------------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 44 ++++++++++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h    |  4 ++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h   |  2 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h   |  2 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h    |  9 ++-
>   6 files changed, 85 insertions(+), 45 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 1d6850af9908..c1fc75299b7d 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -74,7 +74,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
>   			   struct amdgpu_ctx *ctx)
>   {
>   	unsigned num_entities = amdgpu_ctx_total_num_entities();
> -	unsigned i, j, k;
> +	unsigned i, j;
>   	int r;
>   
>   	if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
> @@ -121,73 +121,56 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
>   	ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
>   
>   	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
> -		struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
> -		struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
> -		unsigned num_rings = 0;
> -		unsigned num_rqs = 0;
> +		struct drm_gpu_scheduler **sched_list;
> +		struct drm_gpu_scheduler *sched;
> +		unsigned num_scheds = 0;
>   
>   		switch (i) {
>   		case AMDGPU_HW_IP_GFX:
> -			rings[0] = &adev->gfx.gfx_ring[0];
> -			num_rings = 1;
> +			sched_list = adev->gfx.gfx_sched_list;
> +			num_scheds = 1;
>   			break;
>   		case AMDGPU_HW_IP_COMPUTE:
> -			for (j = 0; j < adev->gfx.num_compute_rings; ++j)
> -				rings[j] = &adev->gfx.compute_ring[j];
> -			num_rings = adev->gfx.num_compute_rings;
> +			sched_list = adev->gfx.compute_sched_list;
> +			num_scheds = adev->gfx.num_compute_rings;
>   			break;
>   		case AMDGPU_HW_IP_DMA:
> -			for (j = 0; j < adev->sdma.num_instances; ++j)
> -				rings[j] = &adev->sdma.instance[j].ring;
> -			num_rings = adev->sdma.num_instances;
> +			sched_list = adev->sdma.sdma_sched_list;
> +			num_scheds = adev->sdma.num_instances;
>   			break;
>   		case AMDGPU_HW_IP_UVD:
> -			rings[0] = &adev->uvd.inst[0].ring;
> -			num_rings = 1;
> +			sched = &adev->uvd.inst[0].ring.sched;
> +			sched_list = &sched;
> +			num_scheds = 1;
>   			break;
>   		case AMDGPU_HW_IP_VCE:
> -			rings[0] = &adev->vce.ring[0];
> -			num_rings = 1;
> +			sched = &adev->vce.ring[0].sched;
> +			sched_list = &sched;
> +			num_scheds = 1;
>   			break;
>   		case AMDGPU_HW_IP_UVD_ENC:
> -			rings[0] = &adev->uvd.inst[0].ring_enc[0];
> -			num_rings = 1;
> +			sched = &adev->uvd.inst[0].ring_enc[0].sched;
> +			sched_list = &sched;
> +			num_scheds = 1;
>   			break;
>   		case AMDGPU_HW_IP_VCN_DEC:
> -			for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
> -				if (adev->vcn.harvest_config & (1 << j))
> -					continue;
> -				rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
> -			}
> +			sched_list = adev->vcn.vcn_dec_sched_list;
> +			num_scheds =  adev->vcn.num_vcn_dec_sched_list;
>   			break;
>   		case AMDGPU_HW_IP_VCN_ENC:
> -			for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
> -				if (adev->vcn.harvest_config & (1 << j))
> -					continue;
> -				for (k = 0; k < adev->vcn.num_enc_rings; ++k)
> -					rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k];
> -			}
> +			sched_list = adev->vcn.vcn_enc_sched_list;
> +			num_scheds =  adev->vcn.num_vcn_enc_sched_list;
>   			break;
>   		case AMDGPU_HW_IP_VCN_JPEG:
> -			for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) {
> -				if (adev->vcn.harvest_config & (1 << j))
> -					continue;
> -				rings[num_rings++] = &adev->jpeg.inst[j].ring_dec;
> -			}
> +			sched_list = adev->jpeg.jpeg_sched_list;
> +			num_scheds =  adev->jpeg.num_jpeg_sched_list;
>   			break;
>   		}
>   
> -		for (j = 0; j < num_rings; ++j) {
> -			if (!rings[j]->adev)
> -				continue;
> -
> -			sched_list[num_rqs++] = &rings[j]->sched;
> -		}
> -
>   		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
>   			r = drm_sched_entity_init(&ctx->entities[i][j].entity,
>   						  priority, sched_list,
> -						  num_rqs, &ctx->guilty);
> +						  num_scheds, &ctx->guilty);
>   		if (r)
>   			goto error_cleanup_entities;
>   	}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index cf4953c4e2cf..f973b61a26da 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2738,6 +2738,48 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
>   	return ret;
>   }
>   
> +static void amdgpu_device_init_sched_list(struct amdgpu_device *adev)
> +{
> +	int i, j;
> +
> +	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
> +		adev->gfx.gfx_sched_list[i] = &adev->gfx.gfx_ring[i].sched;
> +		adev->gfx.num_gfx_sched_list++;
> +	}
> +
> +	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
> +		adev->gfx.compute_sched_list[i] = &adev->gfx.compute_ring[i].sched;
> +		adev->gfx.num_compute_sched_list++;
> +	}
> +
> +	for (i = 0; i < adev->sdma.num_instances; i++) {
> +		adev->sdma.sdma_sched_list[i] = &adev->sdma.instance[i].ring.sched;
> +		adev->sdma.num_sdma_sched_list++;
> +	}
> +
> +	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> +		if (adev->vcn.harvest_config & (1 << i))
> +			continue;
> +		adev->vcn.vcn_dec_sched_list[adev->vcn.num_vcn_dec_sched_list++] =
> +			&adev->vcn.inst[i].ring_dec.sched;
> +	}
> +
> +	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
> +		if (adev->vcn.harvest_config & (1 << i))
> +			continue;
> +		for (j = 0; j < adev->vcn.num_enc_rings; ++j)
> +			adev->vcn.vcn_enc_sched_list[adev->vcn.num_vcn_enc_sched_list++] =
> +				&adev->vcn.inst[i].ring_enc[j].sched;
> +	}
> +
> +	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
> +		if (adev->vcn.harvest_config & (1 << i))
> +			continue;
> +		adev->jpeg.jpeg_sched_list[adev->jpeg.num_jpeg_sched_list++] =
> +			&adev->jpeg.inst[i].ring_dec.sched;
> +	}
> +}
> +
>   /**
>    * amdgpu_device_init - initialize the driver
>    *
> @@ -3024,6 +3066,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>   		goto failed;
>   	}
>   
> +	amdgpu_device_init_sched_list(adev);
> +
>   	adev->accel_working = true;
>   
>   	amdgpu_vm_check_compute_bug(adev);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> index 0ae0a2715b0d..b2264d4cd510 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> @@ -269,8 +269,12 @@ struct amdgpu_gfx {
>   	bool				me_fw_write_wait;
>   	bool				cp_fw_write_wait;
>   	struct amdgpu_ring		gfx_ring[AMDGPU_MAX_GFX_RINGS];
> +	struct drm_gpu_scheduler	*gfx_sched_list[AMDGPU_MAX_GFX_RINGS];
> +	uint32_t			num_gfx_sched_list;
>   	unsigned			num_gfx_rings;
>   	struct amdgpu_ring		compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
> +	uint32_t			num_compute_sched_list;
> +	struct drm_gpu_scheduler	*compute_sched_list[AMDGPU_MAX_COMPUTE_RINGS];
>   	unsigned			num_compute_rings;
>   	struct amdgpu_irq_src		eop_irq;
>   	struct amdgpu_irq_src		priv_reg_irq;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
> index 5131a0a1bc8a..5ce6ab05eeac 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
> @@ -43,6 +43,8 @@ struct amdgpu_jpeg {
>   	uint8_t	num_jpeg_inst;
>   	struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
>   	struct amdgpu_jpeg_reg internal;
> +	struct drm_gpu_scheduler *jpeg_sched_list[AMDGPU_MAX_JPEG_INSTANCES];
> +	uint32_t num_jpeg_sched_list;
>   	unsigned harvest_config;
>   	struct delayed_work idle_work;
>   	enum amd_powergating_state cur_state;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
> index b3134655789f..0f6cb6768398 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
> @@ -52,6 +52,8 @@ struct amdgpu_sdma_instance {
>   
>   struct amdgpu_sdma {
>   	struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
> +	struct drm_gpu_scheduler    *sdma_sched_list[AMDGPU_MAX_SDMA_INSTANCES];
> +	uint32_t		    num_sdma_sched_list;
>   	struct amdgpu_irq_src	trap_irq;
>   	struct amdgpu_irq_src	illegal_inst_irq;
>   	struct amdgpu_irq_src	ecc_irq;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> index 402a5046b985..ae65cb8f07e8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> @@ -31,6 +31,7 @@
>   #define AMDGPU_VCN_MAX_ENC_RINGS	3
>   
>   #define AMDGPU_MAX_VCN_INSTANCES	2
> +#define AMDGPU_MAX_VCN_ENC_RINGS  AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES
>   
>   #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
>   #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
> @@ -186,8 +187,12 @@ struct amdgpu_vcn {
>   	uint32_t		*dpg_sram_curr_addr;
>   
>   	uint8_t	num_vcn_inst;
> -	struct amdgpu_vcn_inst	inst[AMDGPU_MAX_VCN_INSTANCES];
> -	struct amdgpu_vcn_reg	internal;
> +	struct amdgpu_vcn_inst	 inst[AMDGPU_MAX_VCN_INSTANCES];
> +	struct amdgpu_vcn_reg	 internal;
> +	struct drm_gpu_scheduler *vcn_enc_sched_list[AMDGPU_MAX_VCN_ENC_RINGS];
> +	struct drm_gpu_scheduler *vcn_dec_sched_list[AMDGPU_MAX_VCN_INSTANCES];
> +	uint32_t		 num_vcn_enc_sched_list;
> +	uint32_t		 num_vcn_dec_sched_list;
>   
>   	unsigned	harvest_config;
>   	int (*pause_dpg_mode)(struct amdgpu_device *adev,

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/4] amd/amdgpu: add sched list to IPs with multiple run-queues
  2019-12-10 11:28   ` Christian König
@ 2019-12-10 12:55     ` Nirmoy
  2019-12-10 12:56       ` Christian König
  0 siblings, 1 reply; 7+ messages in thread
From: Nirmoy @ 2019-12-10 12:55 UTC (permalink / raw)
  To: Christian König, Nirmoy Das, alexander.deucher, kenny.ho
  Cc: nirmoy.das, amd-gfx

Thanks Christian. That make sense, resent modified patches.

On 12/10/19 12:28 PM, Christian König wrote:
> Am 09.12.19 um 22:53 schrieb Nirmoy Das:
>> This sched list can be passed on to entity creation routine
>> instead of manually creating such sched list on every context creation.
>
> Please drop the "_list" from the names here. A list usually means a 
> linked list and those are actually arrays.
>
> Additional to that amdgpu_device_init_sched_list() should probably go 
> into amdgpu_ctx.c instead. That is actually not really device related, 
> but more UAPI/ctx stuff.
>
> Apart from that looks good to me,
> Christian.
>
>>
>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c    | 69 ++++++++--------------
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 44 ++++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h    |  4 ++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h   |  2 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h   |  2 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h    |  9 ++-
>>   6 files changed, 85 insertions(+), 45 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> index 1d6850af9908..c1fc75299b7d 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>> @@ -74,7 +74,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
>>                  struct amdgpu_ctx *ctx)
>>   {
>>       unsigned num_entities = amdgpu_ctx_total_num_entities();
>> -    unsigned i, j, k;
>> +    unsigned i, j;
>>       int r;
>>         if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
>> @@ -121,73 +121,56 @@ static int amdgpu_ctx_init(struct amdgpu_device 
>> *adev,
>>       ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
>>         for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
>> -        struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
>> -        struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
>> -        unsigned num_rings = 0;
>> -        unsigned num_rqs = 0;
>> +        struct drm_gpu_scheduler **sched_list;
>> +        struct drm_gpu_scheduler *sched;
>> +        unsigned num_scheds = 0;
>>             switch (i) {
>>           case AMDGPU_HW_IP_GFX:
>> -            rings[0] = &adev->gfx.gfx_ring[0];
>> -            num_rings = 1;
>> +            sched_list = adev->gfx.gfx_sched_list;
>> +            num_scheds = 1;
>>               break;
>>           case AMDGPU_HW_IP_COMPUTE:
>> -            for (j = 0; j < adev->gfx.num_compute_rings; ++j)
>> -                rings[j] = &adev->gfx.compute_ring[j];
>> -            num_rings = adev->gfx.num_compute_rings;
>> +            sched_list = adev->gfx.compute_sched_list;
>> +            num_scheds = adev->gfx.num_compute_rings;
>>               break;
>>           case AMDGPU_HW_IP_DMA:
>> -            for (j = 0; j < adev->sdma.num_instances; ++j)
>> -                rings[j] = &adev->sdma.instance[j].ring;
>> -            num_rings = adev->sdma.num_instances;
>> +            sched_list = adev->sdma.sdma_sched_list;
>> +            num_scheds = adev->sdma.num_instances;
>>               break;
>>           case AMDGPU_HW_IP_UVD:
>> -            rings[0] = &adev->uvd.inst[0].ring;
>> -            num_rings = 1;
>> +            sched = &adev->uvd.inst[0].ring.sched;
>> +            sched_list = &sched;
>> +            num_scheds = 1;
>>               break;
>>           case AMDGPU_HW_IP_VCE:
>> -            rings[0] = &adev->vce.ring[0];
>> -            num_rings = 1;
>> +            sched = &adev->vce.ring[0].sched;
>> +            sched_list = &sched;
>> +            num_scheds = 1;
>>               break;
>>           case AMDGPU_HW_IP_UVD_ENC:
>> -            rings[0] = &adev->uvd.inst[0].ring_enc[0];
>> -            num_rings = 1;
>> +            sched = &adev->uvd.inst[0].ring_enc[0].sched;
>> +            sched_list = &sched;
>> +            num_scheds = 1;
>>               break;
>>           case AMDGPU_HW_IP_VCN_DEC:
>> -            for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
>> -                if (adev->vcn.harvest_config & (1 << j))
>> -                    continue;
>> -                rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
>> -            }
>> +            sched_list = adev->vcn.vcn_dec_sched_list;
>> +            num_scheds =  adev->vcn.num_vcn_dec_sched_list;
>>               break;
>>           case AMDGPU_HW_IP_VCN_ENC:
>> -            for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
>> -                if (adev->vcn.harvest_config & (1 << j))
>> -                    continue;
>> -                for (k = 0; k < adev->vcn.num_enc_rings; ++k)
>> -                    rings[num_rings++] = 
>> &adev->vcn.inst[j].ring_enc[k];
>> -            }
>> +            sched_list = adev->vcn.vcn_enc_sched_list;
>> +            num_scheds =  adev->vcn.num_vcn_enc_sched_list;
>>               break;
>>           case AMDGPU_HW_IP_VCN_JPEG:
>> -            for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) {
>> -                if (adev->vcn.harvest_config & (1 << j))
>> -                    continue;
>> -                rings[num_rings++] = &adev->jpeg.inst[j].ring_dec;
>> -            }
>> +            sched_list = adev->jpeg.jpeg_sched_list;
>> +            num_scheds =  adev->jpeg.num_jpeg_sched_list;
>>               break;
>>           }
>>   -        for (j = 0; j < num_rings; ++j) {
>> -            if (!rings[j]->adev)
>> -                continue;
>> -
>> -            sched_list[num_rqs++] = &rings[j]->sched;
>> -        }
>> -
>>           for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
>>               r = drm_sched_entity_init(&ctx->entities[i][j].entity,
>>                             priority, sched_list,
>> -                          num_rqs, &ctx->guilty);
>> +                          num_scheds, &ctx->guilty);
>>           if (r)
>>               goto error_cleanup_entities;
>>       }
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index cf4953c4e2cf..f973b61a26da 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -2738,6 +2738,48 @@ static int 
>> amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
>>       return ret;
>>   }
>>   +static void amdgpu_device_init_sched_list(struct amdgpu_device *adev)
>> +{
>> +    int i, j;
>> +
>> +    for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
>> +        adev->gfx.gfx_sched_list[i] = &adev->gfx.gfx_ring[i].sched;
>> +        adev->gfx.num_gfx_sched_list++;
>> +    }
>> +
>> +    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
>> +        adev->gfx.compute_sched_list[i] = 
>> &adev->gfx.compute_ring[i].sched;
>> +        adev->gfx.num_compute_sched_list++;
>> +    }
>> +
>> +    for (i = 0; i < adev->sdma.num_instances; i++) {
>> +        adev->sdma.sdma_sched_list[i] = 
>> &adev->sdma.instance[i].ring.sched;
>> +        adev->sdma.num_sdma_sched_list++;
>> +    }
>> +
>> +    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>> +        if (adev->vcn.harvest_config & (1 << i))
>> +            continue;
>> + adev->vcn.vcn_dec_sched_list[adev->vcn.num_vcn_dec_sched_list++] =
>> +            &adev->vcn.inst[i].ring_dec.sched;
>> +    }
>> +
>> +    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>> +        if (adev->vcn.harvest_config & (1 << i))
>> +            continue;
>> +        for (j = 0; j < adev->vcn.num_enc_rings; ++j)
>> + adev->vcn.vcn_enc_sched_list[adev->vcn.num_vcn_enc_sched_list++] =
>> +                &adev->vcn.inst[i].ring_enc[j].sched;
>> +    }
>> +
>> +    for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
>> +        if (adev->vcn.harvest_config & (1 << i))
>> +            continue;
>> + adev->jpeg.jpeg_sched_list[adev->jpeg.num_jpeg_sched_list++] =
>> +            &adev->jpeg.inst[i].ring_dec.sched;
>> +    }
>> +}
>> +
>>   /**
>>    * amdgpu_device_init - initialize the driver
>>    *
>> @@ -3024,6 +3066,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>>           goto failed;
>>       }
>>   +    amdgpu_device_init_sched_list(adev);
>> +
>>       adev->accel_working = true;
>>         amdgpu_vm_check_compute_bug(adev);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
>> index 0ae0a2715b0d..b2264d4cd510 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
>> @@ -269,8 +269,12 @@ struct amdgpu_gfx {
>>       bool                me_fw_write_wait;
>>       bool                cp_fw_write_wait;
>>       struct amdgpu_ring        gfx_ring[AMDGPU_MAX_GFX_RINGS];
>> +    struct drm_gpu_scheduler *gfx_sched_list[AMDGPU_MAX_GFX_RINGS];
>> +    uint32_t            num_gfx_sched_list;
>>       unsigned            num_gfx_rings;
>>       struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
>> +    uint32_t            num_compute_sched_list;
>> +    struct drm_gpu_scheduler 
>> *compute_sched_list[AMDGPU_MAX_COMPUTE_RINGS];
>>       unsigned            num_compute_rings;
>>       struct amdgpu_irq_src        eop_irq;
>>       struct amdgpu_irq_src        priv_reg_irq;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
>> index 5131a0a1bc8a..5ce6ab05eeac 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
>> @@ -43,6 +43,8 @@ struct amdgpu_jpeg {
>>       uint8_t    num_jpeg_inst;
>>       struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
>>       struct amdgpu_jpeg_reg internal;
>> +    struct drm_gpu_scheduler 
>> *jpeg_sched_list[AMDGPU_MAX_JPEG_INSTANCES];
>> +    uint32_t num_jpeg_sched_list;
>>       unsigned harvest_config;
>>       struct delayed_work idle_work;
>>       enum amd_powergating_state cur_state;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
>> index b3134655789f..0f6cb6768398 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
>> @@ -52,6 +52,8 @@ struct amdgpu_sdma_instance {
>>     struct amdgpu_sdma {
>>       struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
>> +    struct drm_gpu_scheduler 
>> *sdma_sched_list[AMDGPU_MAX_SDMA_INSTANCES];
>> +    uint32_t            num_sdma_sched_list;
>>       struct amdgpu_irq_src    trap_irq;
>>       struct amdgpu_irq_src    illegal_inst_irq;
>>       struct amdgpu_irq_src    ecc_irq;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
>> index 402a5046b985..ae65cb8f07e8 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
>> @@ -31,6 +31,7 @@
>>   #define AMDGPU_VCN_MAX_ENC_RINGS    3
>>     #define AMDGPU_MAX_VCN_INSTANCES    2
>> +#define AMDGPU_MAX_VCN_ENC_RINGS  AMDGPU_VCN_MAX_ENC_RINGS * 
>> AMDGPU_MAX_VCN_INSTANCES
>>     #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
>>   #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
>> @@ -186,8 +187,12 @@ struct amdgpu_vcn {
>>       uint32_t        *dpg_sram_curr_addr;
>>         uint8_t    num_vcn_inst;
>> -    struct amdgpu_vcn_inst    inst[AMDGPU_MAX_VCN_INSTANCES];
>> -    struct amdgpu_vcn_reg    internal;
>> +    struct amdgpu_vcn_inst     inst[AMDGPU_MAX_VCN_INSTANCES];
>> +    struct amdgpu_vcn_reg     internal;
>> +    struct drm_gpu_scheduler 
>> *vcn_enc_sched_list[AMDGPU_MAX_VCN_ENC_RINGS];
>> +    struct drm_gpu_scheduler 
>> *vcn_dec_sched_list[AMDGPU_MAX_VCN_INSTANCES];
>> +    uint32_t         num_vcn_enc_sched_list;
>> +    uint32_t         num_vcn_dec_sched_list;
>>         unsigned    harvest_config;
>>       int (*pause_dpg_mode)(struct amdgpu_device *adev,
>
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/4] amd/amdgpu: add sched list to IPs with multiple run-queues
  2019-12-10 12:55     ` Nirmoy
@ 2019-12-10 12:56       ` Christian König
  0 siblings, 0 replies; 7+ messages in thread
From: Christian König @ 2019-12-10 12:56 UTC (permalink / raw)
  To: Nirmoy, Nirmoy Das, alexander.deucher, kenny.ho; +Cc: nirmoy.das, amd-gfx

Yeah, but you are to fast for me. I was still looking into comments for 
patch #4 :)

Christian.

Am 10.12.19 um 13:55 schrieb Nirmoy:
> Thanks Christian. That make sense, resent modified patches.
>
> On 12/10/19 12:28 PM, Christian König wrote:
>> Am 09.12.19 um 22:53 schrieb Nirmoy Das:
>>> This sched list can be passed on to entity creation routine
>>> instead of manually creating such sched list on every context creation.
>>
>> Please drop the "_list" from the names here. A list usually means a 
>> linked list and those are actually arrays.
>>
>> Additional to that amdgpu_device_init_sched_list() should probably go 
>> into amdgpu_ctx.c instead. That is actually not really device 
>> related, but more UAPI/ctx stuff.
>>
>> Apart from that looks good to me,
>> Christian.
>>
>>>
>>> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
>>> ---
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c    | 69 
>>> ++++++++--------------
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 44 ++++++++++++++
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h    |  4 ++
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h   |  2 +
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h   |  2 +
>>>   drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h    |  9 ++-
>>>   6 files changed, 85 insertions(+), 45 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>>> index 1d6850af9908..c1fc75299b7d 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
>>> @@ -74,7 +74,7 @@ static int amdgpu_ctx_init(struct amdgpu_device 
>>> *adev,
>>>                  struct amdgpu_ctx *ctx)
>>>   {
>>>       unsigned num_entities = amdgpu_ctx_total_num_entities();
>>> -    unsigned i, j, k;
>>> +    unsigned i, j;
>>>       int r;
>>>         if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
>>> @@ -121,73 +121,56 @@ static int amdgpu_ctx_init(struct 
>>> amdgpu_device *adev,
>>>       ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
>>>         for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
>>> -        struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
>>> -        struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
>>> -        unsigned num_rings = 0;
>>> -        unsigned num_rqs = 0;
>>> +        struct drm_gpu_scheduler **sched_list;
>>> +        struct drm_gpu_scheduler *sched;
>>> +        unsigned num_scheds = 0;
>>>             switch (i) {
>>>           case AMDGPU_HW_IP_GFX:
>>> -            rings[0] = &adev->gfx.gfx_ring[0];
>>> -            num_rings = 1;
>>> +            sched_list = adev->gfx.gfx_sched_list;
>>> +            num_scheds = 1;
>>>               break;
>>>           case AMDGPU_HW_IP_COMPUTE:
>>> -            for (j = 0; j < adev->gfx.num_compute_rings; ++j)
>>> -                rings[j] = &adev->gfx.compute_ring[j];
>>> -            num_rings = adev->gfx.num_compute_rings;
>>> +            sched_list = adev->gfx.compute_sched_list;
>>> +            num_scheds = adev->gfx.num_compute_rings;
>>>               break;
>>>           case AMDGPU_HW_IP_DMA:
>>> -            for (j = 0; j < adev->sdma.num_instances; ++j)
>>> -                rings[j] = &adev->sdma.instance[j].ring;
>>> -            num_rings = adev->sdma.num_instances;
>>> +            sched_list = adev->sdma.sdma_sched_list;
>>> +            num_scheds = adev->sdma.num_instances;
>>>               break;
>>>           case AMDGPU_HW_IP_UVD:
>>> -            rings[0] = &adev->uvd.inst[0].ring;
>>> -            num_rings = 1;
>>> +            sched = &adev->uvd.inst[0].ring.sched;
>>> +            sched_list = &sched;
>>> +            num_scheds = 1;
>>>               break;
>>>           case AMDGPU_HW_IP_VCE:
>>> -            rings[0] = &adev->vce.ring[0];
>>> -            num_rings = 1;
>>> +            sched = &adev->vce.ring[0].sched;
>>> +            sched_list = &sched;
>>> +            num_scheds = 1;
>>>               break;
>>>           case AMDGPU_HW_IP_UVD_ENC:
>>> -            rings[0] = &adev->uvd.inst[0].ring_enc[0];
>>> -            num_rings = 1;
>>> +            sched = &adev->uvd.inst[0].ring_enc[0].sched;
>>> +            sched_list = &sched;
>>> +            num_scheds = 1;
>>>               break;
>>>           case AMDGPU_HW_IP_VCN_DEC:
>>> -            for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
>>> -                if (adev->vcn.harvest_config & (1 << j))
>>> -                    continue;
>>> -                rings[num_rings++] = &adev->vcn.inst[j].ring_dec;
>>> -            }
>>> +            sched_list = adev->vcn.vcn_dec_sched_list;
>>> +            num_scheds = adev->vcn.num_vcn_dec_sched_list;
>>>               break;
>>>           case AMDGPU_HW_IP_VCN_ENC:
>>> -            for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
>>> -                if (adev->vcn.harvest_config & (1 << j))
>>> -                    continue;
>>> -                for (k = 0; k < adev->vcn.num_enc_rings; ++k)
>>> -                    rings[num_rings++] = 
>>> &adev->vcn.inst[j].ring_enc[k];
>>> -            }
>>> +            sched_list = adev->vcn.vcn_enc_sched_list;
>>> +            num_scheds = adev->vcn.num_vcn_enc_sched_list;
>>>               break;
>>>           case AMDGPU_HW_IP_VCN_JPEG:
>>> -            for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) {
>>> -                if (adev->vcn.harvest_config & (1 << j))
>>> -                    continue;
>>> -                rings[num_rings++] = &adev->jpeg.inst[j].ring_dec;
>>> -            }
>>> +            sched_list = adev->jpeg.jpeg_sched_list;
>>> +            num_scheds =  adev->jpeg.num_jpeg_sched_list;
>>>               break;
>>>           }
>>>   -        for (j = 0; j < num_rings; ++j) {
>>> -            if (!rings[j]->adev)
>>> -                continue;
>>> -
>>> -            sched_list[num_rqs++] = &rings[j]->sched;
>>> -        }
>>> -
>>>           for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
>>>               r = drm_sched_entity_init(&ctx->entities[i][j].entity,
>>>                             priority, sched_list,
>>> -                          num_rqs, &ctx->guilty);
>>> +                          num_scheds, &ctx->guilty);
>>>           if (r)
>>>               goto error_cleanup_entities;
>>>       }
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> index cf4953c4e2cf..f973b61a26da 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> @@ -2738,6 +2738,48 @@ static int 
>>> amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
>>>       return ret;
>>>   }
>>>   +static void amdgpu_device_init_sched_list(struct amdgpu_device 
>>> *adev)
>>> +{
>>> +    int i, j;
>>> +
>>> +    for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
>>> +        adev->gfx.gfx_sched_list[i] = &adev->gfx.gfx_ring[i].sched;
>>> +        adev->gfx.num_gfx_sched_list++;
>>> +    }
>>> +
>>> +    for (i = 0; i < adev->gfx.num_compute_rings; i++) {
>>> +        adev->gfx.compute_sched_list[i] = 
>>> &adev->gfx.compute_ring[i].sched;
>>> +        adev->gfx.num_compute_sched_list++;
>>> +    }
>>> +
>>> +    for (i = 0; i < adev->sdma.num_instances; i++) {
>>> +        adev->sdma.sdma_sched_list[i] = 
>>> &adev->sdma.instance[i].ring.sched;
>>> +        adev->sdma.num_sdma_sched_list++;
>>> +    }
>>> +
>>> +    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>>> +        if (adev->vcn.harvest_config & (1 << i))
>>> +            continue;
>>> + adev->vcn.vcn_dec_sched_list[adev->vcn.num_vcn_dec_sched_list++] =
>>> +            &adev->vcn.inst[i].ring_dec.sched;
>>> +    }
>>> +
>>> +    for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
>>> +        if (adev->vcn.harvest_config & (1 << i))
>>> +            continue;
>>> +        for (j = 0; j < adev->vcn.num_enc_rings; ++j)
>>> + adev->vcn.vcn_enc_sched_list[adev->vcn.num_vcn_enc_sched_list++] =
>>> +                &adev->vcn.inst[i].ring_enc[j].sched;
>>> +    }
>>> +
>>> +    for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
>>> +        if (adev->vcn.harvest_config & (1 << i))
>>> +            continue;
>>> + adev->jpeg.jpeg_sched_list[adev->jpeg.num_jpeg_sched_list++] =
>>> +            &adev->jpeg.inst[i].ring_dec.sched;
>>> +    }
>>> +}
>>> +
>>>   /**
>>>    * amdgpu_device_init - initialize the driver
>>>    *
>>> @@ -3024,6 +3066,8 @@ int amdgpu_device_init(struct amdgpu_device 
>>> *adev,
>>>           goto failed;
>>>       }
>>>   +    amdgpu_device_init_sched_list(adev);
>>> +
>>>       adev->accel_working = true;
>>>         amdgpu_vm_check_compute_bug(adev);
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
>>> index 0ae0a2715b0d..b2264d4cd510 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
>>> @@ -269,8 +269,12 @@ struct amdgpu_gfx {
>>>       bool                me_fw_write_wait;
>>>       bool                cp_fw_write_wait;
>>>       struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
>>> +    struct drm_gpu_scheduler *gfx_sched_list[AMDGPU_MAX_GFX_RINGS];
>>> +    uint32_t            num_gfx_sched_list;
>>>       unsigned            num_gfx_rings;
>>>       struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
>>> +    uint32_t            num_compute_sched_list;
>>> +    struct drm_gpu_scheduler 
>>> *compute_sched_list[AMDGPU_MAX_COMPUTE_RINGS];
>>>       unsigned            num_compute_rings;
>>>       struct amdgpu_irq_src        eop_irq;
>>>       struct amdgpu_irq_src        priv_reg_irq;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
>>> index 5131a0a1bc8a..5ce6ab05eeac 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
>>> @@ -43,6 +43,8 @@ struct amdgpu_jpeg {
>>>       uint8_t    num_jpeg_inst;
>>>       struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
>>>       struct amdgpu_jpeg_reg internal;
>>> +    struct drm_gpu_scheduler 
>>> *jpeg_sched_list[AMDGPU_MAX_JPEG_INSTANCES];
>>> +    uint32_t num_jpeg_sched_list;
>>>       unsigned harvest_config;
>>>       struct delayed_work idle_work;
>>>       enum amd_powergating_state cur_state;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
>>> index b3134655789f..0f6cb6768398 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
>>> @@ -52,6 +52,8 @@ struct amdgpu_sdma_instance {
>>>     struct amdgpu_sdma {
>>>       struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
>>> +    struct drm_gpu_scheduler 
>>> *sdma_sched_list[AMDGPU_MAX_SDMA_INSTANCES];
>>> +    uint32_t            num_sdma_sched_list;
>>>       struct amdgpu_irq_src    trap_irq;
>>>       struct amdgpu_irq_src    illegal_inst_irq;
>>>       struct amdgpu_irq_src    ecc_irq;
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h 
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
>>> index 402a5046b985..ae65cb8f07e8 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
>>> @@ -31,6 +31,7 @@
>>>   #define AMDGPU_VCN_MAX_ENC_RINGS    3
>>>     #define AMDGPU_MAX_VCN_INSTANCES    2
>>> +#define AMDGPU_MAX_VCN_ENC_RINGS  AMDGPU_VCN_MAX_ENC_RINGS * 
>>> AMDGPU_MAX_VCN_INSTANCES
>>>     #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
>>>   #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
>>> @@ -186,8 +187,12 @@ struct amdgpu_vcn {
>>>       uint32_t        *dpg_sram_curr_addr;
>>>         uint8_t    num_vcn_inst;
>>> -    struct amdgpu_vcn_inst    inst[AMDGPU_MAX_VCN_INSTANCES];
>>> -    struct amdgpu_vcn_reg    internal;
>>> +    struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
>>> +    struct amdgpu_vcn_reg     internal;
>>> +    struct drm_gpu_scheduler 
>>> *vcn_enc_sched_list[AMDGPU_MAX_VCN_ENC_RINGS];
>>> +    struct drm_gpu_scheduler 
>>> *vcn_dec_sched_list[AMDGPU_MAX_VCN_INSTANCES];
>>> +    uint32_t         num_vcn_enc_sched_list;
>>> +    uint32_t         num_vcn_dec_sched_list;
>>>         unsigned    harvest_config;
>>>       int (*pause_dpg_mode)(struct amdgpu_device *adev,
>>

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2019-12-10 12:56 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-09 21:53 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-09 21:53 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
2019-12-09 21:53 ` [PATCH 3/4] amd/amdgpu: add sched list to IPs with multiple run-queues Nirmoy Das
2019-12-10 11:28   ` Christian König
2019-12-10 12:55     ` Nirmoy
2019-12-10 12:56       ` Christian König
2019-12-09 21:53 ` [PATCH 4/4] drm/scheduler: do not keep a copy of sched list Nirmoy Das

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.