All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] drm/scheduler: add a list of run queues to the entity
@ 2018-08-01  8:19 Nayan Deshmukh
  2018-08-01  8:20 ` [PATCH 4/4] drm/scheduler: move idle entities to scheduler with less load v2 Nayan Deshmukh
       [not found] ` <20180801082002.20696-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 2 replies; 15+ messages in thread
From: Nayan Deshmukh @ 2018-08-01  8:19 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Nayan Deshmukh, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg, alexdeucher-Re5JQEeQqe8AvxtiuMwx3w,
	christian.koenig-5C7GfCeVMHo, l.stach-bIcnvbaLZ9MEGnE8C9+IrQ

These are the potential run queues on which the jobs from this
entity can be scheduled. We will use this to do load balancing.

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 8 ++++++++
 include/drm/gpu_scheduler.h               | 7 ++++++-
 2 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 3f2fc5e8242a..a3eacc35cf98 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -179,6 +179,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
 			  unsigned int num_rq_list,
 			  atomic_t *guilty)
 {
+	int i;
+
 	if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
 		return -EINVAL;
 
@@ -186,6 +188,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
 	INIT_LIST_HEAD(&entity->list);
 	entity->rq = rq_list[0];
 	entity->guilty = guilty;
+	entity->num_rq_list = num_rq_list;
+	entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
+				GFP_KERNEL);
+	for (i = 0; i < num_rq_list; ++i)
+		entity->rq_list[i] = rq_list[i];
 	entity->last_scheduled = NULL;
 
 	spin_lock_init(&entity->rq_lock);
@@ -363,6 +370,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
 
 	dma_fence_put(entity->last_scheduled);
 	entity->last_scheduled = NULL;
+	kfree(entity->rq_list);
 }
 EXPORT_SYMBOL(drm_sched_entity_fini);
 
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 091b9afcd184..a60896222a3e 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -50,7 +50,10 @@ enum drm_sched_priority {
  *
  * @list: used to append this struct to the list of entities in the
  *        runqueue.
- * @rq: runqueue to which this entity belongs.
+ * @rq: runqueue on which this entity is currently scheduled.
+ * @rq_list: a list of run queues on which jobs from this entity can
+ *           be scheduled
+ * @num_rq_list: number of run queues in the rq_list
  * @rq_lock: lock to modify the runqueue to which this entity belongs.
  * @job_queue: the list of jobs of this entity.
  * @fence_seq: a linearly increasing seqno incremented with each
@@ -74,6 +77,8 @@ enum drm_sched_priority {
 struct drm_sched_entity {
 	struct list_head		list;
 	struct drm_sched_rq		*rq;
+	struct drm_sched_rq		**rq_list;
+	unsigned int                    num_rq_list;
 	spinlock_t			rq_lock;
 
 	struct spsc_queue		job_queue;
-- 
2.14.3

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 2/4] drm/scheduler: add counter for total jobs in scheduler
       [not found] ` <20180801082002.20696-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2018-08-01  8:20   ` Nayan Deshmukh
       [not found]     ` <20180801082002.20696-2-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2018-08-01  8:20   ` [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2 Nayan Deshmukh
  2018-08-01  9:35   ` [PATCH 1/4] drm/scheduler: add a list of run queues to the entity Christian König
  2 siblings, 1 reply; 15+ messages in thread
From: Nayan Deshmukh @ 2018-08-01  8:20 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Nayan Deshmukh, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg, alexdeucher-Re5JQEeQqe8AvxtiuMwx3w,
	christian.koenig-5C7GfCeVMHo, l.stach-bIcnvbaLZ9MEGnE8C9+IrQ

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++
 include/drm/gpu_scheduler.h               | 2 ++
 2 files changed, 5 insertions(+)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index a3eacc35cf98..375f6f7f6a93 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 
 	trace_drm_sched_job(sched_job, entity);
 
+	atomic_inc(&entity->rq->sched->num_jobs);
 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
 	/* first job wakes up scheduler */
@@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 
 	dma_fence_get(&s_fence->finished);
 	atomic_dec(&sched->hw_rq_count);
+	atomic_dec(&sched->num_jobs);
 	drm_sched_fence_finished(s_fence);
 
 	trace_drm_sched_process_job(s_fence);
@@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
 	INIT_LIST_HEAD(&sched->ring_mirror_list);
 	spin_lock_init(&sched->job_list_lock);
 	atomic_set(&sched->hw_rq_count, 0);
+	atomic_set(&sched->num_jobs, 0);
 	atomic64_set(&sched->job_id_count, 0);
 
 	/* Each scheduler will run on a seperate kernel thread */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index a60896222a3e..89881ce974a5 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
  * @job_list_lock: lock to protect the ring_mirror_list.
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
  *              guilty and it will be considered for scheduling further.
+ * @num_jobs: the number of jobs in queue in the scheduler
  *
  * One scheduler is implemented for each hardware ring.
  */
@@ -277,6 +278,7 @@ struct drm_gpu_scheduler {
 	struct list_head		ring_mirror_list;
 	spinlock_t			job_list_lock;
 	int				hang_limit;
+	atomic_t                        num_jobs;
 };
 
 int drm_sched_init(struct drm_gpu_scheduler *sched,
-- 
2.14.3

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2
       [not found] ` <20180801082002.20696-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2018-08-01  8:20   ` [PATCH 2/4] drm/scheduler: add counter for total jobs in scheduler Nayan Deshmukh
@ 2018-08-01  8:20   ` Nayan Deshmukh
  2018-08-01 15:35     ` Andrey Grodzovsky
  2018-08-01  9:35   ` [PATCH 1/4] drm/scheduler: add a list of run queues to the entity Christian König
  2 siblings, 1 reply; 15+ messages in thread
From: Nayan Deshmukh @ 2018-08-01  8:20 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Nayan Deshmukh, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg, alexdeucher-Re5JQEeQqe8AvxtiuMwx3w,
	christian.koenig-5C7GfCeVMHo, l.stach-bIcnvbaLZ9MEGnE8C9+IrQ

The function selects the run queue from the rq_list with the
least load. The load is decided by the number of jobs in a
scheduler.

v2: avoid using atomic read twice consecutively, instead store
    it locally

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 25 +++++++++++++++++++++++++
 1 file changed, 25 insertions(+)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 375f6f7f6a93..fb4e542660b0 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
 	return true;
 }
 
+/**
+ * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
+ *
+ * @entity: scheduler entity
+ *
+ * Return the pointer to the rq with least load.
+ */
+static struct drm_sched_rq *
+drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
+{
+	struct drm_sched_rq *rq = NULL;
+	unsigned int min_jobs = UINT_MAX, num_jobs;
+	int i;
+
+	for (i = 0; i < entity->num_rq_list; ++i) {
+		num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
+		if (num_jobs < min_jobs) {
+			min_jobs = num_jobs;
+			rq = entity->rq_list[i];
+		}
+	}
+
+	return rq;
+}
+
 static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 				    struct dma_fence_cb *cb)
 {
-- 
2.14.3

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 4/4] drm/scheduler: move idle entities to scheduler with less load v2
  2018-08-01  8:19 [PATCH 1/4] drm/scheduler: add a list of run queues to the entity Nayan Deshmukh
@ 2018-08-01  8:20 ` Nayan Deshmukh
       [not found] ` <20180801082002.20696-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  1 sibling, 0 replies; 15+ messages in thread
From: Nayan Deshmukh @ 2018-08-01  8:20 UTC (permalink / raw)
  To: dri-devel; +Cc: Nayan Deshmukh, amd-gfx, christian.koenig

This is the first attempt to move entities between schedulers to
have dynamic load balancing. We just move entities with no jobs for
now as moving the ones with jobs will lead to other compilcations
like ensuring that the other scheduler does not remove a job from
the current entity while we are moving.

v2: remove unused variable and an unecessary check

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 22 ++++++++++++++++++----
 1 file changed, 18 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index fb4e542660b0..087fa479f7e0 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -539,6 +539,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 	if (!sched_job)
 		return NULL;
 
+	sched_job->sched = sched;
+	sched_job->s_fence->sched = sched;
 	while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
 		if (drm_sched_entity_add_dependency_cb(entity))
 			return NULL;
@@ -569,11 +571,23 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 			       struct drm_sched_entity *entity)
 {
-	struct drm_gpu_scheduler *sched = sched_job->sched;
-	bool first = false;
+	struct drm_sched_rq *rq = entity->rq;
+	bool first = false, reschedule, idle;
 
-	trace_drm_sched_job(sched_job, entity);
+	idle = entity->last_scheduled == NULL ||
+		dma_fence_is_signaled(entity->last_scheduled);
+	first = spsc_queue_count(&entity->job_queue) == 0;
+	reschedule = idle && first && (entity->num_rq_list > 1);
 
+	if (reschedule) {
+		rq = drm_sched_entity_get_free_sched(entity);
+		spin_lock(&entity->rq_lock);
+		drm_sched_rq_remove_entity(entity->rq, entity);
+		entity->rq = rq;
+		spin_unlock(&entity->rq_lock);
+	}
+
+	trace_drm_sched_job(sched_job, entity);
 	atomic_inc(&entity->rq->sched->num_jobs);
 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
@@ -588,7 +602,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 		}
 		drm_sched_rq_add_entity(entity->rq, entity);
 		spin_unlock(&entity->rq_lock);
-		drm_sched_wakeup(sched);
+		drm_sched_wakeup(entity->rq->sched);
 	}
 }
 EXPORT_SYMBOL(drm_sched_entity_push_job);
-- 
2.14.3

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH 1/4] drm/scheduler: add a list of run queues to the entity
       [not found] ` <20180801082002.20696-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2018-08-01  8:20   ` [PATCH 2/4] drm/scheduler: add counter for total jobs in scheduler Nayan Deshmukh
  2018-08-01  8:20   ` [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2 Nayan Deshmukh
@ 2018-08-01  9:35   ` Christian König
  2 siblings, 0 replies; 15+ messages in thread
From: Christian König @ 2018-08-01  9:35 UTC (permalink / raw)
  To: Nayan Deshmukh, dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: alexdeucher-Re5JQEeQqe8AvxtiuMwx3w, eric-WhKQ6XTQaPysTnJN9+BGXg,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	l.stach-bIcnvbaLZ9MEGnE8C9+IrQ

Am 01.08.2018 um 10:19 schrieb Nayan Deshmukh:
> These are the potential run queues on which the jobs from this
> entity can be scheduled. We will use this to do load balancing.
>
> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>

Reviewed-by: Christian König <christian.koenig@amd.com> for the whole 
series.

I also just pushed them into our internal branch for upstreaming.

Thanks for all the work,
Christian.

> ---
>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 8 ++++++++
>   include/drm/gpu_scheduler.h               | 7 ++++++-
>   2 files changed, 14 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index 3f2fc5e8242a..a3eacc35cf98 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -179,6 +179,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>   			  unsigned int num_rq_list,
>   			  atomic_t *guilty)
>   {
> +	int i;
> +
>   	if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
>   		return -EINVAL;
>   
> @@ -186,6 +188,11 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>   	INIT_LIST_HEAD(&entity->list);
>   	entity->rq = rq_list[0];
>   	entity->guilty = guilty;
> +	entity->num_rq_list = num_rq_list;
> +	entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
> +				GFP_KERNEL);
> +	for (i = 0; i < num_rq_list; ++i)
> +		entity->rq_list[i] = rq_list[i];
>   	entity->last_scheduled = NULL;
>   
>   	spin_lock_init(&entity->rq_lock);
> @@ -363,6 +370,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
>   
>   	dma_fence_put(entity->last_scheduled);
>   	entity->last_scheduled = NULL;
> +	kfree(entity->rq_list);
>   }
>   EXPORT_SYMBOL(drm_sched_entity_fini);
>   
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 091b9afcd184..a60896222a3e 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -50,7 +50,10 @@ enum drm_sched_priority {
>    *
>    * @list: used to append this struct to the list of entities in the
>    *        runqueue.
> - * @rq: runqueue to which this entity belongs.
> + * @rq: runqueue on which this entity is currently scheduled.
> + * @rq_list: a list of run queues on which jobs from this entity can
> + *           be scheduled
> + * @num_rq_list: number of run queues in the rq_list
>    * @rq_lock: lock to modify the runqueue to which this entity belongs.
>    * @job_queue: the list of jobs of this entity.
>    * @fence_seq: a linearly increasing seqno incremented with each
> @@ -74,6 +77,8 @@ enum drm_sched_priority {
>   struct drm_sched_entity {
>   	struct list_head		list;
>   	struct drm_sched_rq		*rq;
> +	struct drm_sched_rq		**rq_list;
> +	unsigned int                    num_rq_list;
>   	spinlock_t			rq_lock;
>   
>   	struct spsc_queue		job_queue;

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/4] drm/scheduler: add counter for total jobs in scheduler
  2018-08-01 13:15       ` Huang Rui
@ 2018-08-01 13:06         ` Christian König
  2018-08-02  2:57           ` Huang Rui
  0 siblings, 1 reply; 15+ messages in thread
From: Christian König @ 2018-08-01 13:06 UTC (permalink / raw)
  To: Huang Rui, Nayan Deshmukh
  Cc: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	christian.koenig-5C7GfCeVMHo

Yeah, I've actually added one before pushing it to amd-staging-drm-next.

But thanks for the reminder, wanted to note that to Nayan as well :)

Christian.

Am 01.08.2018 um 15:15 schrieb Huang Rui:
> On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:
>
> This should need a commmit message.
>
> Thanks,
> Ray
>
>> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
>> ---
>>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++
>>   include/drm/gpu_scheduler.h               | 2 ++
>>   2 files changed, 5 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> index a3eacc35cf98..375f6f7f6a93 100644
>> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
>>   
>>   	trace_drm_sched_job(sched_job, entity);
>>   
>> +	atomic_inc(&entity->rq->sched->num_jobs);
>>   	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
>>   
>>   	/* first job wakes up scheduler */
>> @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
>>   
>>   	dma_fence_get(&s_fence->finished);
>>   	atomic_dec(&sched->hw_rq_count);
>> +	atomic_dec(&sched->num_jobs);
>>   	drm_sched_fence_finished(s_fence);
>>   
>>   	trace_drm_sched_process_job(s_fence);
>> @@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>>   	INIT_LIST_HEAD(&sched->ring_mirror_list);
>>   	spin_lock_init(&sched->job_list_lock);
>>   	atomic_set(&sched->hw_rq_count, 0);
>> +	atomic_set(&sched->num_jobs, 0);
>>   	atomic64_set(&sched->job_id_count, 0);
>>   
>>   	/* Each scheduler will run on a seperate kernel thread */
>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>> index a60896222a3e..89881ce974a5 100644
>> --- a/include/drm/gpu_scheduler.h
>> +++ b/include/drm/gpu_scheduler.h
>> @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
>>    * @job_list_lock: lock to protect the ring_mirror_list.
>>    * @hang_limit: once the hangs by a job crosses this limit then it is marked
>>    *              guilty and it will be considered for scheduling further.
>> + * @num_jobs: the number of jobs in queue in the scheduler
>>    *
>>    * One scheduler is implemented for each hardware ring.
>>    */
>> @@ -277,6 +278,7 @@ struct drm_gpu_scheduler {
>>   	struct list_head		ring_mirror_list;
>>   	spinlock_t			job_list_lock;
>>   	int				hang_limit;
>> +	atomic_t                        num_jobs;
>>   };
>>   
>>   int drm_sched_init(struct drm_gpu_scheduler *sched,
>> -- 
>> 2.14.3
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/4] drm/scheduler: add counter for total jobs in scheduler
       [not found]     ` <20180801082002.20696-2-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2018-08-01 13:15       ` Huang Rui
  2018-08-01 13:06         ` Christian König
  0 siblings, 1 reply; 15+ messages in thread
From: Huang Rui @ 2018-08-01 13:15 UTC (permalink / raw)
  To: Nayan Deshmukh
  Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	alexdeucher-Re5JQEeQqe8AvxtiuMwx3w, christian.koenig-5C7GfCeVMHo,
	l.stach-bIcnvbaLZ9MEGnE8C9+IrQ

On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:

This should need a commmit message.

Thanks,
Ray

> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> ---
>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++
>  include/drm/gpu_scheduler.h               | 2 ++
>  2 files changed, 5 insertions(+)
> 
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index a3eacc35cf98..375f6f7f6a93 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
>  
>  	trace_drm_sched_job(sched_job, entity);
>  
> +	atomic_inc(&entity->rq->sched->num_jobs);
>  	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
>  
>  	/* first job wakes up scheduler */
> @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
>  
>  	dma_fence_get(&s_fence->finished);
>  	atomic_dec(&sched->hw_rq_count);
> +	atomic_dec(&sched->num_jobs);
>  	drm_sched_fence_finished(s_fence);
>  
>  	trace_drm_sched_process_job(s_fence);
> @@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>  	INIT_LIST_HEAD(&sched->ring_mirror_list);
>  	spin_lock_init(&sched->job_list_lock);
>  	atomic_set(&sched->hw_rq_count, 0);
> +	atomic_set(&sched->num_jobs, 0);
>  	atomic64_set(&sched->job_id_count, 0);
>  
>  	/* Each scheduler will run on a seperate kernel thread */
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index a60896222a3e..89881ce974a5 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
>   * @job_list_lock: lock to protect the ring_mirror_list.
>   * @hang_limit: once the hangs by a job crosses this limit then it is marked
>   *              guilty and it will be considered for scheduling further.
> + * @num_jobs: the number of jobs in queue in the scheduler
>   *
>   * One scheduler is implemented for each hardware ring.
>   */
> @@ -277,6 +278,7 @@ struct drm_gpu_scheduler {
>  	struct list_head		ring_mirror_list;
>  	spinlock_t			job_list_lock;
>  	int				hang_limit;
> +	atomic_t                        num_jobs;
>  };
>  
>  int drm_sched_init(struct drm_gpu_scheduler *sched,
> -- 
> 2.14.3
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2
  2018-08-01  8:20   ` [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2 Nayan Deshmukh
@ 2018-08-01 15:35     ` Andrey Grodzovsky
  2018-08-01 16:06       ` Nayan Deshmukh
  0 siblings, 1 reply; 15+ messages in thread
From: Andrey Grodzovsky @ 2018-08-01 15:35 UTC (permalink / raw)
  To: Nayan Deshmukh, dri-devel; +Cc: christian.koenig, amd-gfx

Clarification question -  if the run queues belong to different 
schedulers they effectively point to different rings,

it means we allow to move (reschedule) a drm_sched_entity from one ring 
to another - i assume that the idea int the first place, that

you have a set of HW rings and you can utilize any of them for your jobs 
(like compute rings). Correct ?

Andrey


On 08/01/2018 04:20 AM, Nayan Deshmukh wrote:
> The function selects the run queue from the rq_list with the
> least load. The load is decided by the number of jobs in a
> scheduler.
>
> v2: avoid using atomic read twice consecutively, instead store
>      it locally
>
> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> ---
>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 25 +++++++++++++++++++++++++
>   1 file changed, 25 insertions(+)
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index 375f6f7f6a93..fb4e542660b0 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
>   	return true;
>   }
>   
> +/**
> + * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
> + *
> + * @entity: scheduler entity
> + *
> + * Return the pointer to the rq with least load.
> + */
> +static struct drm_sched_rq *
> +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
> +{
> +	struct drm_sched_rq *rq = NULL;
> +	unsigned int min_jobs = UINT_MAX, num_jobs;
> +	int i;
> +
> +	for (i = 0; i < entity->num_rq_list; ++i) {
> +		num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
> +		if (num_jobs < min_jobs) {
> +			min_jobs = num_jobs;
> +			rq = entity->rq_list[i];
> +		}
> +	}
> +
> +	return rq;
> +}
> +
>   static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>   				    struct dma_fence_cb *cb)
>   {

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2
  2018-08-01 15:35     ` Andrey Grodzovsky
@ 2018-08-01 16:06       ` Nayan Deshmukh
       [not found]         ` <CAFd4ddx8D2iKquRu4YVh1gnRMpLFgWf3CBPdk5SD-nJ8dNXEPQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 15+ messages in thread
From: Nayan Deshmukh @ 2018-08-01 16:06 UTC (permalink / raw)
  To: Andrey Grodzovsky; +Cc: amd-gfx, Maling list - DRI developers, christian.koenig


[-- Attachment #1.1: Type: text/plain, Size: 2298 bytes --]

Yes, that is correct.

Nayan

On Wed, Aug 1, 2018, 9:05 PM Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
wrote:

> Clarification question -  if the run queues belong to different
> schedulers they effectively point to different rings,
>
> it means we allow to move (reschedule) a drm_sched_entity from one ring
> to another - i assume that the idea int the first place, that
>
> you have a set of HW rings and you can utilize any of them for your jobs
> (like compute rings). Correct ?
>
> Andrey
>
>
> On 08/01/2018 04:20 AM, Nayan Deshmukh wrote:
> > The function selects the run queue from the rq_list with the
> > least load. The load is decided by the number of jobs in a
> > scheduler.
> >
> > v2: avoid using atomic read twice consecutively, instead store
> >      it locally
> >
> > Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> > ---
> >   drivers/gpu/drm/scheduler/gpu_scheduler.c | 25
> +++++++++++++++++++++++++
> >   1 file changed, 25 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > index 375f6f7f6a93..fb4e542660b0 100644
> > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct
> drm_sched_entity *entity)
> >       return true;
> >   }
> >
> > +/**
> > + * drm_sched_entity_get_free_sched - Get the rq from rq_list with least
> load
> > + *
> > + * @entity: scheduler entity
> > + *
> > + * Return the pointer to the rq with least load.
> > + */
> > +static struct drm_sched_rq *
> > +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
> > +{
> > +     struct drm_sched_rq *rq = NULL;
> > +     unsigned int min_jobs = UINT_MAX, num_jobs;
> > +     int i;
> > +
> > +     for (i = 0; i < entity->num_rq_list; ++i) {
> > +             num_jobs =
> atomic_read(&entity->rq_list[i]->sched->num_jobs);
> > +             if (num_jobs < min_jobs) {
> > +                     min_jobs = num_jobs;
> > +                     rq = entity->rq_list[i];
> > +             }
> > +     }
> > +
> > +     return rq;
> > +}
> > +
> >   static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
> >                                   struct dma_fence_cb *cb)
> >   {
>
>

[-- Attachment #1.2: Type: text/html, Size: 3134 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2
       [not found]         ` <CAFd4ddx8D2iKquRu4YVh1gnRMpLFgWf3CBPdk5SD-nJ8dNXEPQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2018-08-01 17:51           ` Andrey Grodzovsky
  2018-08-02  2:51           ` Zhou, David(ChunMing)
  1 sibling, 0 replies; 15+ messages in thread
From: Andrey Grodzovsky @ 2018-08-01 17:51 UTC (permalink / raw)
  To: Nayan Deshmukh
  Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg, Maling list - DRI developers,
	alexdeucher-Re5JQEeQqe8AvxtiuMwx3w, christian.koenig-5C7GfCeVMHo,
	l.stach-bIcnvbaLZ9MEGnE8C9+IrQ


[-- Attachment #1.1: Type: text/plain, Size: 2983 bytes --]

Series is Acked-by: Andrey Grodzovsky <andrey.grodzovsky-5C7GfCeVMHo@public.gmane.org>

Andrey


On 08/01/2018 12:06 PM, Nayan Deshmukh wrote:
> Yes, that is correct.
>
> Nayan
>
> On Wed, Aug 1, 2018, 9:05 PM Andrey Grodzovsky 
> <Andrey.Grodzovsky-5C7GfCeVMHo@public.gmane.org <mailto:Andrey.Grodzovsky-5C7GfCeVMHo@public.gmane.org>> wrote:
>
>     Clarification question -  if the run queues belong to different
>     schedulers they effectively point to different rings,
>
>     it means we allow to move (reschedule) a drm_sched_entity from one
>     ring
>     to another - i assume that the idea int the first place, that
>
>     you have a set of HW rings and you can utilize any of them for
>     your jobs
>     (like compute rings). Correct ?
>
>     Andrey
>
>
>     On 08/01/2018 04:20 AM, Nayan Deshmukh wrote:
>     > The function selects the run queue from the rq_list with the
>     > least load. The load is decided by the number of jobs in a
>     > scheduler.
>     >
>     > v2: avoid using atomic read twice consecutively, instead store
>     >      it locally
>     >
>     > Signed-off-by: Nayan Deshmukh <nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org
>     <mailto:nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>>
>     > ---
>     >   drivers/gpu/drm/scheduler/gpu_scheduler.c | 25
>     +++++++++++++++++++++++++
>     >   1 file changed, 25 insertions(+)
>     >
>     > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>     b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>     > index 375f6f7f6a93..fb4e542660b0 100644
>     > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>     > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>     > @@ -255,6 +255,31 @@ static bool
>     drm_sched_entity_is_ready(struct drm_sched_entity *entity)
>     >       return true;
>     >   }
>     >
>     > +/**
>     > + * drm_sched_entity_get_free_sched - Get the rq from rq_list
>     with least load
>     > + *
>     > + * @entity: scheduler entity
>     > + *
>     > + * Return the pointer to the rq with least load.
>     > + */
>     > +static struct drm_sched_rq *
>     > +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
>     > +{
>     > +     struct drm_sched_rq *rq = NULL;
>     > +     unsigned int min_jobs = UINT_MAX, num_jobs;
>     > +     int i;
>     > +
>     > +     for (i = 0; i < entity->num_rq_list; ++i) {
>     > +             num_jobs =
>     atomic_read(&entity->rq_list[i]->sched->num_jobs);
>     > +             if (num_jobs < min_jobs) {
>     > +                     min_jobs = num_jobs;
>     > +                     rq = entity->rq_list[i];
>     > +             }
>     > +     }
>     > +
>     > +     return rq;
>     > +}
>     > +
>     >   static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>     >                                   struct dma_fence_cb *cb)
>     >   {
>


[-- Attachment #1.2: Type: text/html, Size: 4866 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* RE: [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2
       [not found]         ` <CAFd4ddx8D2iKquRu4YVh1gnRMpLFgWf3CBPdk5SD-nJ8dNXEPQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2018-08-01 17:51           ` Andrey Grodzovsky
@ 2018-08-02  2:51           ` Zhou, David(ChunMing)
  2018-08-02  6:01             ` Nayan Deshmukh
  1 sibling, 1 reply; 15+ messages in thread
From: Zhou, David(ChunMing) @ 2018-08-02  2:51 UTC (permalink / raw)
  To: Nayan Deshmukh, Grodzovsky, Andrey
  Cc: Maling list - DRI developers,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Koenig, Christian


[-- Attachment #1.1: Type: text/plain, Size: 3091 bytes --]

Another big question:
I agree the general idea is good to balance scheduler load for same ring family.
But, when same entity job run on different scheduler, that means the later job could be completed ahead of front, Right?
That will break fence design, later fence must be signaled after front fence in same fence context.

Anything I missed?

Regards,
David Zhou

From: dri-devel <dri-devel-bounces@lists.freedesktop.org> On Behalf Of Nayan Deshmukh
Sent: Thursday, August 02, 2018 12:07 AM
To: Grodzovsky, Andrey <Andrey.Grodzovsky@amd.com>
Cc: amd-gfx@lists.freedesktop.org; Maling list - DRI developers <dri-devel@lists.freedesktop.org>; Koenig, Christian <Christian.Koenig@amd.com>
Subject: Re: [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2

Yes, that is correct.

Nayan

On Wed, Aug 1, 2018, 9:05 PM Andrey Grodzovsky <Andrey.Grodzovsky@amd.com<mailto:Andrey.Grodzovsky@amd.com>> wrote:
Clarification question -  if the run queues belong to different
schedulers they effectively point to different rings,

it means we allow to move (reschedule) a drm_sched_entity from one ring
to another - i assume that the idea int the first place, that

you have a set of HW rings and you can utilize any of them for your jobs
(like compute rings). Correct ?

Andrey


On 08/01/2018 04:20 AM, Nayan Deshmukh wrote:
> The function selects the run queue from the rq_list with the
> least load. The load is decided by the number of jobs in a
> scheduler.
>
> v2: avoid using atomic read twice consecutively, instead store
>      it locally
>
> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com<mailto:nayan26deshmukh@gmail.com>>
> ---
>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 25 +++++++++++++++++++++++++
>   1 file changed, 25 insertions(+)
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index 375f6f7f6a93..fb4e542660b0 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
>       return true;
>   }
>
> +/**
> + * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
> + *
> + * @entity: scheduler entity
> + *
> + * Return the pointer to the rq with least load.
> + */
> +static struct drm_sched_rq *
> +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
> +{
> +     struct drm_sched_rq *rq = NULL;
> +     unsigned int min_jobs = UINT_MAX, num_jobs;
> +     int i;
> +
> +     for (i = 0; i < entity->num_rq_list; ++i) {
> +             num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
> +             if (num_jobs < min_jobs) {
> +                     min_jobs = num_jobs;
> +                     rq = entity->rq_list[i];
> +             }
> +     }
> +
> +     return rq;
> +}
> +
>   static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>                                   struct dma_fence_cb *cb)
>   {

[-- Attachment #1.2: Type: text/html, Size: 7273 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/4] drm/scheduler: add counter for total jobs in scheduler
  2018-08-01 13:06         ` Christian König
@ 2018-08-02  2:57           ` Huang Rui
  2018-08-02  6:11             ` Nayan Deshmukh
  0 siblings, 1 reply; 15+ messages in thread
From: Huang Rui @ 2018-08-02  2:57 UTC (permalink / raw)
  To: Koenig, Christian; +Cc: Nayan Deshmukh, dri-devel, amd-gfx

On Wed, Aug 01, 2018 at 09:06:29PM +0800, Christian König wrote:
> Yeah, I've actually added one before pushing it to amd-staging-drm-next.
> 
> But thanks for the reminder, wanted to note that to Nayan as well :)
> 

Yes, a soft reminder to Nayan. Thanks Nayan for the contribution. :-)

Thanks,
Ray

> Christian.
> 
> Am 01.08.2018 um 15:15 schrieb Huang Rui:
> > On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:
> >
> > This should need a commmit message.
> >
> > Thanks,
> > Ray
> >
> >> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> >> ---
> >>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++
> >>   include/drm/gpu_scheduler.h               | 2 ++
> >>   2 files changed, 5 insertions(+)
> >>
> >> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> >> index a3eacc35cf98..375f6f7f6a93 100644
> >> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> >> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> >> @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
> >>   
> >>   	trace_drm_sched_job(sched_job, entity);
> >>   
> >> +	atomic_inc(&entity->rq->sched->num_jobs);
> >>   	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
> >>   
> >>   	/* first job wakes up scheduler */
> >> @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
> >>   
> >>   	dma_fence_get(&s_fence->finished);
> >>   	atomic_dec(&sched->hw_rq_count);
> >> +	atomic_dec(&sched->num_jobs);
> >>   	drm_sched_fence_finished(s_fence);
> >>   
> >>   	trace_drm_sched_process_job(s_fence);
> >> @@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> >>   	INIT_LIST_HEAD(&sched->ring_mirror_list);
> >>   	spin_lock_init(&sched->job_list_lock);
> >>   	atomic_set(&sched->hw_rq_count, 0);
> >> +	atomic_set(&sched->num_jobs, 0);
> >>   	atomic64_set(&sched->job_id_count, 0);
> >>   
> >>   	/* Each scheduler will run on a seperate kernel thread */
> >> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> >> index a60896222a3e..89881ce974a5 100644
> >> --- a/include/drm/gpu_scheduler.h
> >> +++ b/include/drm/gpu_scheduler.h
> >> @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
> >>    * @job_list_lock: lock to protect the ring_mirror_list.
> >>    * @hang_limit: once the hangs by a job crosses this limit then it is marked
> >>    *              guilty and it will be considered for scheduling further.
> >> + * @num_jobs: the number of jobs in queue in the scheduler
> >>    *
> >>    * One scheduler is implemented for each hardware ring.
> >>    */
> >> @@ -277,6 +278,7 @@ struct drm_gpu_scheduler {
> >>   	struct list_head		ring_mirror_list;
> >>   	spinlock_t			job_list_lock;
> >>   	int				hang_limit;
> >> +	atomic_t                        num_jobs;
> >>   };
> >>   
> >>   int drm_sched_init(struct drm_gpu_scheduler *sched,
> >> -- 
> >> 2.14.3
> >>
> >> _______________________________________________
> >> amd-gfx mailing list
> >> amd-gfx@lists.freedesktop.org
> >> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> > _______________________________________________
> > dri-devel mailing list
> > dri-devel@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/dri-devel
> 
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2
  2018-08-02  2:51           ` Zhou, David(ChunMing)
@ 2018-08-02  6:01             ` Nayan Deshmukh
       [not found]               ` <CAFd4ddyf=EhJ7pmzq3sEGa6U1sQ7Ga7fUH+sW+VKeBxJABjnKQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  0 siblings, 1 reply; 15+ messages in thread
From: Nayan Deshmukh @ 2018-08-02  6:01 UTC (permalink / raw)
  To: David1.Zhou; +Cc: Maling list - DRI developers, amd-gfx, Christian König


[-- Attachment #1.1: Type: text/plain, Size: 3936 bytes --]

Hi David,

On Thu, Aug 2, 2018 at 8:22 AM Zhou, David(ChunMing) <David1.Zhou@amd.com>
wrote:

> Another big question:
>
> I agree the general idea is good to balance scheduler load for same ring
> family.
>
> But, when same entity job run on different scheduler, that means the later
> job could be completed ahead of front, Right?
>
Really good question. To avoid this senario we do not move an entity which
already has a job in the hardware queue. We only move entities whose
last_scheduled fence has been signalled which means that the last submitted
job of this entity has finished executing.

Moving an entity which already has a job in the hardware queue will hinder
the dependency optimization that we are using and hence will not anyway
lead to a better performance. I have talked about the issue in more detail
here [1]. Please let me know if you have any more doubts regarding this.

Cheers,
Nayan

[1]
http://ndesh26.github.io/gsoc/2018/06/14/GSoC-Update-A-Curious-Case-of-Dependency-Handling/

That will break fence design, later fence must be signaled after front
> fence in same fence context.
>
>
>
> Anything I missed?
>
>
>
> Regards,
>
> David Zhou
>
>
>
> *From:* dri-devel <dri-devel-bounces@lists.freedesktop.org> *On Behalf Of
> *Nayan Deshmukh
> *Sent:* Thursday, August 02, 2018 12:07 AM
> *To:* Grodzovsky, Andrey <Andrey.Grodzovsky@amd.com>
> *Cc:* amd-gfx@lists.freedesktop.org; Maling list - DRI developers <
> dri-devel@lists.freedesktop.org>; Koenig, Christian <
> Christian.Koenig@amd.com>
> *Subject:* Re: [PATCH 3/4] drm/scheduler: add new function to get least
> loaded sched v2
>
>
>
> Yes, that is correct.
>
>
>
> Nayan
>
>
>
> On Wed, Aug 1, 2018, 9:05 PM Andrey Grodzovsky <Andrey.Grodzovsky@amd.com>
> wrote:
>
> Clarification question -  if the run queues belong to different
> schedulers they effectively point to different rings,
>
> it means we allow to move (reschedule) a drm_sched_entity from one ring
> to another - i assume that the idea int the first place, that
>
> you have a set of HW rings and you can utilize any of them for your jobs
> (like compute rings). Correct ?
>
> Andrey
>
>
> On 08/01/2018 04:20 AM, Nayan Deshmukh wrote:
> > The function selects the run queue from the rq_list with the
> > least load. The load is decided by the number of jobs in a
> > scheduler.
> >
> > v2: avoid using atomic read twice consecutively, instead store
> >      it locally
> >
> > Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> > ---
> >   drivers/gpu/drm/scheduler/gpu_scheduler.c | 25
> +++++++++++++++++++++++++
> >   1 file changed, 25 insertions(+)
> >
> > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > index 375f6f7f6a93..fb4e542660b0 100644
> > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > @@ -255,6 +255,31 @@ static bool drm_sched_entity_is_ready(struct
> drm_sched_entity *entity)
> >       return true;
> >   }
> >
> > +/**
> > + * drm_sched_entity_get_free_sched - Get the rq from rq_list with least
> load
> > + *
> > + * @entity: scheduler entity
> > + *
> > + * Return the pointer to the rq with least load.
> > + */
> > +static struct drm_sched_rq *
> > +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
> > +{
> > +     struct drm_sched_rq *rq = NULL;
> > +     unsigned int min_jobs = UINT_MAX, num_jobs;
> > +     int i;
> > +
> > +     for (i = 0; i < entity->num_rq_list; ++i) {
> > +             num_jobs =
> atomic_read(&entity->rq_list[i]->sched->num_jobs);
> > +             if (num_jobs < min_jobs) {
> > +                     min_jobs = num_jobs;
> > +                     rq = entity->rq_list[i];
> > +             }
> > +     }
> > +
> > +     return rq;
> > +}
> > +
> >   static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
> >                                   struct dma_fence_cb *cb)
> >   {
>
>

[-- Attachment #1.2: Type: text/html, Size: 6717 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/4] drm/scheduler: add counter for total jobs in scheduler
  2018-08-02  2:57           ` Huang Rui
@ 2018-08-02  6:11             ` Nayan Deshmukh
  0 siblings, 0 replies; 15+ messages in thread
From: Nayan Deshmukh @ 2018-08-02  6:11 UTC (permalink / raw)
  To: ray.huang-5C7GfCeVMHo
  Cc: Maling list - DRI developers, Christian König,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW


[-- Attachment #1.1: Type: text/plain, Size: 4033 bytes --]

Thanks for the reminders. I felt that the commit header was sufficient
enough but I guess that didn't cover the motivation for the change.

Thanks Christian for adding the commit message.

Regards,
Nayan

On Thu, Aug 2, 2018 at 8:16 AM Huang Rui <ray.huang-5C7GfCeVMHo@public.gmane.org> wrote:

> On Wed, Aug 01, 2018 at 09:06:29PM +0800, Christian König wrote:
> > Yeah, I've actually added one before pushing it to amd-staging-drm-next.
> >
> > But thanks for the reminder, wanted to note that to Nayan as well :)
> >
>
> Yes, a soft reminder to Nayan. Thanks Nayan for the contribution. :-)
>
> Thanks,
> Ray
>
> > Christian.
> >
> > Am 01.08.2018 um 15:15 schrieb Huang Rui:
> > > On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:
> > >
> > > This should need a commmit message.
> > >
> > > Thanks,
> > > Ray
> > >
> > >> Signed-off-by: Nayan Deshmukh <nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
> > >> ---
> > >>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++
> > >>   include/drm/gpu_scheduler.h               | 2 ++
> > >>   2 files changed, 5 insertions(+)
> > >>
> > >> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > >> index a3eacc35cf98..375f6f7f6a93 100644
> > >> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > >> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > >> @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct
> drm_sched_job *sched_job,
> > >>
> > >>    trace_drm_sched_job(sched_job, entity);
> > >>
> > >> +  atomic_inc(&entity->rq->sched->num_jobs);
> > >>    first = spsc_queue_push(&entity->job_queue,
> &sched_job->queue_node);
> > >>
> > >>    /* first job wakes up scheduler */
> > >> @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct
> dma_fence *f, struct dma_fence_cb *cb)
> > >>
> > >>    dma_fence_get(&s_fence->finished);
> > >>    atomic_dec(&sched->hw_rq_count);
> > >> +  atomic_dec(&sched->num_jobs);
> > >>    drm_sched_fence_finished(s_fence);
> > >>
> > >>    trace_drm_sched_process_job(s_fence);
> > >> @@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler
> *sched,
> > >>    INIT_LIST_HEAD(&sched->ring_mirror_list);
> > >>    spin_lock_init(&sched->job_list_lock);
> > >>    atomic_set(&sched->hw_rq_count, 0);
> > >> +  atomic_set(&sched->num_jobs, 0);
> > >>    atomic64_set(&sched->job_id_count, 0);
> > >>
> > >>    /* Each scheduler will run on a seperate kernel thread */
> > >> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> > >> index a60896222a3e..89881ce974a5 100644
> > >> --- a/include/drm/gpu_scheduler.h
> > >> +++ b/include/drm/gpu_scheduler.h
> > >> @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
> > >>    * @job_list_lock: lock to protect the ring_mirror_list.
> > >>    * @hang_limit: once the hangs by a job crosses this limit then it
> is marked
> > >>    *              guilty and it will be considered for scheduling
> further.
> > >> + * @num_jobs: the number of jobs in queue in the scheduler
> > >>    *
> > >>    * One scheduler is implemented for each hardware ring.
> > >>    */
> > >> @@ -277,6 +278,7 @@ struct drm_gpu_scheduler {
> > >>    struct list_head                ring_mirror_list;
> > >>    spinlock_t                      job_list_lock;
> > >>    int                             hang_limit;
> > >> +  atomic_t                        num_jobs;
> > >>   };
> > >>
> > >>   int drm_sched_init(struct drm_gpu_scheduler *sched,
> > >> --
> > >> 2.14.3
> > >>
> > >> _______________________________________________
> > >> amd-gfx mailing list
> > >> amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> > >> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> > > _______________________________________________
> > > dri-devel mailing list
> > > dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
> > > https://lists.freedesktop.org/mailman/listinfo/dri-devel
> >
>

[-- Attachment #1.2: Type: text/html, Size: 5872 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2
       [not found]               ` <CAFd4ddyf=EhJ7pmzq3sEGa6U1sQ7Ga7fUH+sW+VKeBxJABjnKQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2018-08-02  6:42                 ` zhoucm1
  0 siblings, 0 replies; 15+ messages in thread
From: zhoucm1 @ 2018-08-02  6:42 UTC (permalink / raw)
  To: Nayan Deshmukh, David1.Zhou-5C7GfCeVMHo
  Cc: Andrey.Grodzovsky-5C7GfCeVMHo, Maling list - DRI developers,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW, Christian König


[-- Attachment #1.1: Type: text/plain, Size: 5508 bytes --]



On 2018年08月02日 14:01, Nayan Deshmukh wrote:
> Hi David,
>
> On Thu, Aug 2, 2018 at 8:22 AM Zhou, David(ChunMing) 
> <David1.Zhou-5C7GfCeVMHo@public.gmane.org <mailto:David1.Zhou-5C7GfCeVMHo@public.gmane.org>> wrote:
>
>     Another big question:
>
>     I agree the general idea is good to balance scheduler load for
>     same ring family.
>
>     But, when same entity job run on different scheduler, that means
>     the later job could be completed ahead of front, Right?
>
> Really good question. To avoid this senario we do not move an entity 
> which already has a job in the hardware queue. We only move entities 
> whose last_scheduled fence has been signalled which means that the 
> last submitted job of this entity has finished executing.
Good handling I missed when reviewing them.

Cheers,
David Zhou
>
> Moving an entity which already has a job in the hardware queue will 
> hinder the dependency optimization that we are using and hence will 
> not anyway lead to a better performance. I have talked about the issue 
> in more detail here [1]. Please let me know if you have any more 
> doubts regarding this.
>
> Cheers,
> Nayan
>
> [1] 
> http://ndesh26.github.io/gsoc/2018/06/14/GSoC-Update-A-Curious-Case-of-Dependency-Handling/
>
>     That will break fence design, later fence must be signaled after
>     front fence in same fence context.
>
>     Anything I missed?
>
>     Regards,
>
>     David Zhou
>
>     *From:* dri-devel <dri-devel-bounces-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>     <mailto:dri-devel-bounces-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org>> *On Behalf Of
>     *Nayan Deshmukh
>     *Sent:* Thursday, August 02, 2018 12:07 AM
>     *To:* Grodzovsky, Andrey <Andrey.Grodzovsky-5C7GfCeVMHo@public.gmane.org
>     <mailto:Andrey.Grodzovsky-5C7GfCeVMHo@public.gmane.org>>
>     *Cc:* amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>     <mailto:amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org>; Maling list - DRI
>     developers <dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
>     <mailto:dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org>>; Koenig, Christian
>     <Christian.Koenig-5C7GfCeVMHo@public.gmane.org <mailto:Christian.Koenig-5C7GfCeVMHo@public.gmane.org>>
>     *Subject:* Re: [PATCH 3/4] drm/scheduler: add new function to get
>     least loaded sched v2
>
>     Yes, that is correct.
>
>     Nayan
>
>     On Wed, Aug 1, 2018, 9:05 PM Andrey Grodzovsky
>     <Andrey.Grodzovsky-5C7GfCeVMHo@public.gmane.org <mailto:Andrey.Grodzovsky-5C7GfCeVMHo@public.gmane.org>> wrote:
>
>         Clarification question -  if the run queues belong to different
>         schedulers they effectively point to different rings,
>
>         it means we allow to move (reschedule) a drm_sched_entity from
>         one ring
>         to another - i assume that the idea int the first place, that
>
>         you have a set of HW rings and you can utilize any of them for
>         your jobs
>         (like compute rings). Correct ?
>
>         Andrey
>
>
>         On 08/01/2018 04:20 AM, Nayan Deshmukh wrote:
>         > The function selects the run queue from the rq_list with the
>         > least load. The load is decided by the number of jobs in a
>         > scheduler.
>         >
>         > v2: avoid using atomic read twice consecutively, instead store
>         >      it locally
>         >
>         > Signed-off-by: Nayan Deshmukh <nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org
>         <mailto:nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>>
>         > ---
>         >   drivers/gpu/drm/scheduler/gpu_scheduler.c | 25
>         +++++++++++++++++++++++++
>         >   1 file changed, 25 insertions(+)
>         >
>         > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>         b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>         > index 375f6f7f6a93..fb4e542660b0 100644
>         > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>         > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>         > @@ -255,6 +255,31 @@ static bool
>         drm_sched_entity_is_ready(struct drm_sched_entity *entity)
>         >       return true;
>         >   }
>         >
>         > +/**
>         > + * drm_sched_entity_get_free_sched - Get the rq from
>         rq_list with least load
>         > + *
>         > + * @entity: scheduler entity
>         > + *
>         > + * Return the pointer to the rq with least load.
>         > + */
>         > +static struct drm_sched_rq *
>         > +drm_sched_entity_get_free_sched(struct drm_sched_entity
>         *entity)
>         > +{
>         > +     struct drm_sched_rq *rq = NULL;
>         > +     unsigned int min_jobs = UINT_MAX, num_jobs;
>         > +     int i;
>         > +
>         > +     for (i = 0; i < entity->num_rq_list; ++i) {
>         > +             num_jobs =
>         atomic_read(&entity->rq_list[i]->sched->num_jobs);
>         > +             if (num_jobs < min_jobs) {
>         > +                     min_jobs = num_jobs;
>         > +                     rq = entity->rq_list[i];
>         > +             }
>         > +     }
>         > +
>         > +     return rq;
>         > +}
>         > +
>         >   static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>         >                                   struct dma_fence_cb *cb)
>         >   {
>


[-- Attachment #1.2: Type: text/html, Size: 11918 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2018-08-02  6:42 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-01  8:19 [PATCH 1/4] drm/scheduler: add a list of run queues to the entity Nayan Deshmukh
2018-08-01  8:20 ` [PATCH 4/4] drm/scheduler: move idle entities to scheduler with less load v2 Nayan Deshmukh
     [not found] ` <20180801082002.20696-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-08-01  8:20   ` [PATCH 2/4] drm/scheduler: add counter for total jobs in scheduler Nayan Deshmukh
     [not found]     ` <20180801082002.20696-2-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-08-01 13:15       ` Huang Rui
2018-08-01 13:06         ` Christian König
2018-08-02  2:57           ` Huang Rui
2018-08-02  6:11             ` Nayan Deshmukh
2018-08-01  8:20   ` [PATCH 3/4] drm/scheduler: add new function to get least loaded sched v2 Nayan Deshmukh
2018-08-01 15:35     ` Andrey Grodzovsky
2018-08-01 16:06       ` Nayan Deshmukh
     [not found]         ` <CAFd4ddx8D2iKquRu4YVh1gnRMpLFgWf3CBPdk5SD-nJ8dNXEPQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-08-01 17:51           ` Andrey Grodzovsky
2018-08-02  2:51           ` Zhou, David(ChunMing)
2018-08-02  6:01             ` Nayan Deshmukh
     [not found]               ` <CAFd4ddyf=EhJ7pmzq3sEGa6U1sQ7Ga7fUH+sW+VKeBxJABjnKQ-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-08-02  6:42                 ` zhoucm1
2018-08-01  9:35   ` [PATCH 1/4] drm/scheduler: add a list of run queues to the entity Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.