All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/sched: Extend the documentation.
@ 2018-04-04 22:32 ` Eric Anholt
  0 siblings, 0 replies; 15+ messages in thread
From: Eric Anholt @ 2018-04-04 22:32 UTC (permalink / raw)
  To: dri-devel
  Cc: linux-kernel, Lucas Stach, Alex Deucher, christian.koenig, Eric Anholt

These comments answer all the questions I had for myself when
implementing a driver using the GPU scheduler.

Signed-off-by: Eric Anholt <eric@anholt.net>
---
 include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)

diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index dfd54fb94e10..c053a32341bf 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -43,10 +43,12 @@ enum drm_sched_priority {
 };
 
 /**
- * A scheduler entity is a wrapper around a job queue or a group
- * of other entities. Entities take turns emitting jobs from their
- * job queues to corresponding hardware ring based on scheduling
- * policy.
+ * drm_sched_entity - A wrapper around a job queue (typically attached
+ * to the DRM file_priv).
+ *
+ * Entities will emit jobs in order to their corresponding hardware
+ * ring, and the scheduler will alternate between entities based on
+ * scheduling policy.
 */
 struct drm_sched_entity {
 	struct list_head		list;
@@ -78,7 +80,18 @@ struct drm_sched_rq {
 
 struct drm_sched_fence {
 	struct dma_fence		scheduled;
+
+	/* This fence is what will be signaled by the scheduler when
+	 * the job is completed.
+	 *
+	 * When setting up an out fence for the job, you should use
+	 * this, since it's available immediately upon
+	 * drm_sched_job_init(), and the fence returned by the driver
+	 * from run_job() won't be created until the dependencies have
+	 * resolved.
+	 */
 	struct dma_fence		finished;
+
 	struct dma_fence_cb		cb;
 	struct dma_fence		*parent;
 	struct drm_gpu_scheduler	*sched;
@@ -88,6 +101,13 @@ struct drm_sched_fence {
 
 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
 
+/**
+ * drm_sched_job - A job to be run by an entity.
+ *
+ * A job is created by the driver using drm_sched_job_init(), and
+ * should call drm_sched_entity_push_job() once it wants the scheduler
+ * to schedule the job.
+ */
 struct drm_sched_job {
 	struct spsc_node		queue_node;
 	struct drm_gpu_scheduler	*sched;
@@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
  * these functions should be implemented in driver side
 */
 struct drm_sched_backend_ops {
+	/* Called when the scheduler is considering scheduling this
+	 * job next, to get another struct dma_fence for this job to
+	 * block on.  Once it returns NULL, run_job() may be called.
+	 */
 	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
 					struct drm_sched_entity *s_entity);
+
+	/* Called to execute the job once all of the dependencies have
+	 * been resolved.  This may be called multiple times, if
+	 * timedout_job() has happened and drm_sched_job_recovery()
+	 * decides to try it again.
+	 */
 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
+
+	/* Called when a job has taken too long to execute, to trigger
+	 * GPU recovery.
+	 */
 	void (*timedout_job)(struct drm_sched_job *sched_job);
+
+	/* Called once the job's finished fence has been signaled and
+	 * it's time to clean it up.
+	 */
 	void (*free_job)(struct drm_sched_job *sched_job);
 };
 
-- 
2.17.0

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH] drm/sched: Extend the documentation.
@ 2018-04-04 22:32 ` Eric Anholt
  0 siblings, 0 replies; 15+ messages in thread
From: Eric Anholt @ 2018-04-04 22:32 UTC (permalink / raw)
  To: dri-devel; +Cc: Alex Deucher, christian.koenig, linux-kernel

These comments answer all the questions I had for myself when
implementing a driver using the GPU scheduler.

Signed-off-by: Eric Anholt <eric@anholt.net>
---
 include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
 1 file changed, 42 insertions(+), 4 deletions(-)

diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index dfd54fb94e10..c053a32341bf 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -43,10 +43,12 @@ enum drm_sched_priority {
 };
 
 /**
- * A scheduler entity is a wrapper around a job queue or a group
- * of other entities. Entities take turns emitting jobs from their
- * job queues to corresponding hardware ring based on scheduling
- * policy.
+ * drm_sched_entity - A wrapper around a job queue (typically attached
+ * to the DRM file_priv).
+ *
+ * Entities will emit jobs in order to their corresponding hardware
+ * ring, and the scheduler will alternate between entities based on
+ * scheduling policy.
 */
 struct drm_sched_entity {
 	struct list_head		list;
@@ -78,7 +80,18 @@ struct drm_sched_rq {
 
 struct drm_sched_fence {
 	struct dma_fence		scheduled;
+
+	/* This fence is what will be signaled by the scheduler when
+	 * the job is completed.
+	 *
+	 * When setting up an out fence for the job, you should use
+	 * this, since it's available immediately upon
+	 * drm_sched_job_init(), and the fence returned by the driver
+	 * from run_job() won't be created until the dependencies have
+	 * resolved.
+	 */
 	struct dma_fence		finished;
+
 	struct dma_fence_cb		cb;
 	struct dma_fence		*parent;
 	struct drm_gpu_scheduler	*sched;
@@ -88,6 +101,13 @@ struct drm_sched_fence {
 
 struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
 
+/**
+ * drm_sched_job - A job to be run by an entity.
+ *
+ * A job is created by the driver using drm_sched_job_init(), and
+ * should call drm_sched_entity_push_job() once it wants the scheduler
+ * to schedule the job.
+ */
 struct drm_sched_job {
 	struct spsc_node		queue_node;
 	struct drm_gpu_scheduler	*sched;
@@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
  * these functions should be implemented in driver side
 */
 struct drm_sched_backend_ops {
+	/* Called when the scheduler is considering scheduling this
+	 * job next, to get another struct dma_fence for this job to
+	 * block on.  Once it returns NULL, run_job() may be called.
+	 */
 	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
 					struct drm_sched_entity *s_entity);
+
+	/* Called to execute the job once all of the dependencies have
+	 * been resolved.  This may be called multiple times, if
+	 * timedout_job() has happened and drm_sched_job_recovery()
+	 * decides to try it again.
+	 */
 	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
+
+	/* Called when a job has taken too long to execute, to trigger
+	 * GPU recovery.
+	 */
 	void (*timedout_job)(struct drm_sched_job *sched_job);
+
+	/* Called once the job's finished fence has been signaled and
+	 * it's time to clean it up.
+	 */
 	void (*free_job)(struct drm_sched_job *sched_job);
 };
 
-- 
2.17.0

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
  2018-04-04 22:32 ` Eric Anholt
@ 2018-04-05  6:16   ` Daniel Vetter
  -1 siblings, 0 replies; 15+ messages in thread
From: Daniel Vetter @ 2018-04-05  6:16 UTC (permalink / raw)
  To: Eric Anholt
  Cc: dri-devel, Alex Deucher, Christian König, Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
> These comments answer all the questions I had for myself when
> implementing a driver using the GPU scheduler.
>
> Signed-off-by: Eric Anholt <eric@anholt.net>

Pulling all these comments into the generated kerneldoc would be
awesome, maybe as a new "GPU Scheduler" chapter at the end of
drm-mm.rst? Would mean a bit of busywork to convert the existing raw
comments into proper kerneldoc. Also has the benefit that 0day will
complain when you forget to update the comment when editing the
function prototype - kerneldoc which isn't included anywhere in .rst
won't be checked automatically.
-Daniel

> ---
>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>  1 file changed, 42 insertions(+), 4 deletions(-)
>
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index dfd54fb94e10..c053a32341bf 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>  };
>
>  /**
> - * A scheduler entity is a wrapper around a job queue or a group
> - * of other entities. Entities take turns emitting jobs from their
> - * job queues to corresponding hardware ring based on scheduling
> - * policy.
> + * drm_sched_entity - A wrapper around a job queue (typically attached
> + * to the DRM file_priv).
> + *
> + * Entities will emit jobs in order to their corresponding hardware
> + * ring, and the scheduler will alternate between entities based on
> + * scheduling policy.
>  */
>  struct drm_sched_entity {
>         struct list_head                list;
> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>
>  struct drm_sched_fence {
>         struct dma_fence                scheduled;
> +
> +       /* This fence is what will be signaled by the scheduler when
> +        * the job is completed.
> +        *
> +        * When setting up an out fence for the job, you should use
> +        * this, since it's available immediately upon
> +        * drm_sched_job_init(), and the fence returned by the driver
> +        * from run_job() won't be created until the dependencies have
> +        * resolved.
> +        */
>         struct dma_fence                finished;
> +
>         struct dma_fence_cb             cb;
>         struct dma_fence                *parent;
>         struct drm_gpu_scheduler        *sched;
> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>
>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>
> +/**
> + * drm_sched_job - A job to be run by an entity.
> + *
> + * A job is created by the driver using drm_sched_job_init(), and
> + * should call drm_sched_entity_push_job() once it wants the scheduler
> + * to schedule the job.
> + */
>  struct drm_sched_job {
>         struct spsc_node                queue_node;
>         struct drm_gpu_scheduler        *sched;
> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>   * these functions should be implemented in driver side
>  */
>  struct drm_sched_backend_ops {
> +       /* Called when the scheduler is considering scheduling this
> +        * job next, to get another struct dma_fence for this job to
> +        * block on.  Once it returns NULL, run_job() may be called.
> +        */
>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>                                         struct drm_sched_entity *s_entity);
> +
> +       /* Called to execute the job once all of the dependencies have
> +        * been resolved.  This may be called multiple times, if
> +        * timedout_job() has happened and drm_sched_job_recovery()
> +        * decides to try it again.
> +        */
>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
> +
> +       /* Called when a job has taken too long to execute, to trigger
> +        * GPU recovery.
> +        */
>         void (*timedout_job)(struct drm_sched_job *sched_job);
> +
> +       /* Called once the job's finished fence has been signaled and
> +        * it's time to clean it up.
> +        */
>         void (*free_job)(struct drm_sched_job *sched_job);
>  };
>
> --
> 2.17.0
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
@ 2018-04-05  6:16   ` Daniel Vetter
  0 siblings, 0 replies; 15+ messages in thread
From: Daniel Vetter @ 2018-04-05  6:16 UTC (permalink / raw)
  To: Eric Anholt
  Cc: Alex Deucher, Christian König, dri-devel, Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
> These comments answer all the questions I had for myself when
> implementing a driver using the GPU scheduler.
>
> Signed-off-by: Eric Anholt <eric@anholt.net>

Pulling all these comments into the generated kerneldoc would be
awesome, maybe as a new "GPU Scheduler" chapter at the end of
drm-mm.rst? Would mean a bit of busywork to convert the existing raw
comments into proper kerneldoc. Also has the benefit that 0day will
complain when you forget to update the comment when editing the
function prototype - kerneldoc which isn't included anywhere in .rst
won't be checked automatically.
-Daniel

> ---
>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>  1 file changed, 42 insertions(+), 4 deletions(-)
>
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index dfd54fb94e10..c053a32341bf 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>  };
>
>  /**
> - * A scheduler entity is a wrapper around a job queue or a group
> - * of other entities. Entities take turns emitting jobs from their
> - * job queues to corresponding hardware ring based on scheduling
> - * policy.
> + * drm_sched_entity - A wrapper around a job queue (typically attached
> + * to the DRM file_priv).
> + *
> + * Entities will emit jobs in order to their corresponding hardware
> + * ring, and the scheduler will alternate between entities based on
> + * scheduling policy.
>  */
>  struct drm_sched_entity {
>         struct list_head                list;
> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>
>  struct drm_sched_fence {
>         struct dma_fence                scheduled;
> +
> +       /* This fence is what will be signaled by the scheduler when
> +        * the job is completed.
> +        *
> +        * When setting up an out fence for the job, you should use
> +        * this, since it's available immediately upon
> +        * drm_sched_job_init(), and the fence returned by the driver
> +        * from run_job() won't be created until the dependencies have
> +        * resolved.
> +        */
>         struct dma_fence                finished;
> +
>         struct dma_fence_cb             cb;
>         struct dma_fence                *parent;
>         struct drm_gpu_scheduler        *sched;
> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>
>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>
> +/**
> + * drm_sched_job - A job to be run by an entity.
> + *
> + * A job is created by the driver using drm_sched_job_init(), and
> + * should call drm_sched_entity_push_job() once it wants the scheduler
> + * to schedule the job.
> + */
>  struct drm_sched_job {
>         struct spsc_node                queue_node;
>         struct drm_gpu_scheduler        *sched;
> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>   * these functions should be implemented in driver side
>  */
>  struct drm_sched_backend_ops {
> +       /* Called when the scheduler is considering scheduling this
> +        * job next, to get another struct dma_fence for this job to
> +        * block on.  Once it returns NULL, run_job() may be called.
> +        */
>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>                                         struct drm_sched_entity *s_entity);
> +
> +       /* Called to execute the job once all of the dependencies have
> +        * been resolved.  This may be called multiple times, if
> +        * timedout_job() has happened and drm_sched_job_recovery()
> +        * decides to try it again.
> +        */
>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
> +
> +       /* Called when a job has taken too long to execute, to trigger
> +        * GPU recovery.
> +        */
>         void (*timedout_job)(struct drm_sched_job *sched_job);
> +
> +       /* Called once the job's finished fence has been signaled and
> +        * it's time to clean it up.
> +        */
>         void (*free_job)(struct drm_sched_job *sched_job);
>  };
>
> --
> 2.17.0
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
  2018-04-04 22:32 ` Eric Anholt
@ 2018-04-05 12:33   ` Christian König
  -1 siblings, 0 replies; 15+ messages in thread
From: Christian König @ 2018-04-05 12:33 UTC (permalink / raw)
  To: Eric Anholt, dri-devel; +Cc: linux-kernel, Lucas Stach, Alex Deucher

Am 05.04.2018 um 00:32 schrieb Eric Anholt:
> These comments answer all the questions I had for myself when
> implementing a driver using the GPU scheduler.
>
> Signed-off-by: Eric Anholt <eric@anholt.net>

Reviewed-by: Christian König <christian.koenig@amd.com>

Already pushed to amd-staging-drm-next as well.

Thanks,
Christian.

> ---
>   include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>   1 file changed, 42 insertions(+), 4 deletions(-)
>
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index dfd54fb94e10..c053a32341bf 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>   };
>   
>   /**
> - * A scheduler entity is a wrapper around a job queue or a group
> - * of other entities. Entities take turns emitting jobs from their
> - * job queues to corresponding hardware ring based on scheduling
> - * policy.
> + * drm_sched_entity - A wrapper around a job queue (typically attached
> + * to the DRM file_priv).
> + *
> + * Entities will emit jobs in order to their corresponding hardware
> + * ring, and the scheduler will alternate between entities based on
> + * scheduling policy.
>   */
>   struct drm_sched_entity {
>   	struct list_head		list;
> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>   
>   struct drm_sched_fence {
>   	struct dma_fence		scheduled;
> +
> +	/* This fence is what will be signaled by the scheduler when
> +	 * the job is completed.
> +	 *
> +	 * When setting up an out fence for the job, you should use
> +	 * this, since it's available immediately upon
> +	 * drm_sched_job_init(), and the fence returned by the driver
> +	 * from run_job() won't be created until the dependencies have
> +	 * resolved.
> +	 */
>   	struct dma_fence		finished;
> +
>   	struct dma_fence_cb		cb;
>   	struct dma_fence		*parent;
>   	struct drm_gpu_scheduler	*sched;
> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>   
>   struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>   
> +/**
> + * drm_sched_job - A job to be run by an entity.
> + *
> + * A job is created by the driver using drm_sched_job_init(), and
> + * should call drm_sched_entity_push_job() once it wants the scheduler
> + * to schedule the job.
> + */
>   struct drm_sched_job {
>   	struct spsc_node		queue_node;
>   	struct drm_gpu_scheduler	*sched;
> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>    * these functions should be implemented in driver side
>   */
>   struct drm_sched_backend_ops {
> +	/* Called when the scheduler is considering scheduling this
> +	 * job next, to get another struct dma_fence for this job to
> +	 * block on.  Once it returns NULL, run_job() may be called.
> +	 */
>   	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>   					struct drm_sched_entity *s_entity);
> +
> +	/* Called to execute the job once all of the dependencies have
> +	 * been resolved.  This may be called multiple times, if
> +	 * timedout_job() has happened and drm_sched_job_recovery()
> +	 * decides to try it again.
> +	 */
>   	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
> +
> +	/* Called when a job has taken too long to execute, to trigger
> +	 * GPU recovery.
> +	 */
>   	void (*timedout_job)(struct drm_sched_job *sched_job);
> +
> +	/* Called once the job's finished fence has been signaled and
> +	 * it's time to clean it up.
> +	 */
>   	void (*free_job)(struct drm_sched_job *sched_job);
>   };
>   

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
@ 2018-04-05 12:33   ` Christian König
  0 siblings, 0 replies; 15+ messages in thread
From: Christian König @ 2018-04-05 12:33 UTC (permalink / raw)
  To: Eric Anholt, dri-devel; +Cc: Alex Deucher, linux-kernel

Am 05.04.2018 um 00:32 schrieb Eric Anholt:
> These comments answer all the questions I had for myself when
> implementing a driver using the GPU scheduler.
>
> Signed-off-by: Eric Anholt <eric@anholt.net>

Reviewed-by: Christian König <christian.koenig@amd.com>

Already pushed to amd-staging-drm-next as well.

Thanks,
Christian.

> ---
>   include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>   1 file changed, 42 insertions(+), 4 deletions(-)
>
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index dfd54fb94e10..c053a32341bf 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>   };
>   
>   /**
> - * A scheduler entity is a wrapper around a job queue or a group
> - * of other entities. Entities take turns emitting jobs from their
> - * job queues to corresponding hardware ring based on scheduling
> - * policy.
> + * drm_sched_entity - A wrapper around a job queue (typically attached
> + * to the DRM file_priv).
> + *
> + * Entities will emit jobs in order to their corresponding hardware
> + * ring, and the scheduler will alternate between entities based on
> + * scheduling policy.
>   */
>   struct drm_sched_entity {
>   	struct list_head		list;
> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>   
>   struct drm_sched_fence {
>   	struct dma_fence		scheduled;
> +
> +	/* This fence is what will be signaled by the scheduler when
> +	 * the job is completed.
> +	 *
> +	 * When setting up an out fence for the job, you should use
> +	 * this, since it's available immediately upon
> +	 * drm_sched_job_init(), and the fence returned by the driver
> +	 * from run_job() won't be created until the dependencies have
> +	 * resolved.
> +	 */
>   	struct dma_fence		finished;
> +
>   	struct dma_fence_cb		cb;
>   	struct dma_fence		*parent;
>   	struct drm_gpu_scheduler	*sched;
> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>   
>   struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>   
> +/**
> + * drm_sched_job - A job to be run by an entity.
> + *
> + * A job is created by the driver using drm_sched_job_init(), and
> + * should call drm_sched_entity_push_job() once it wants the scheduler
> + * to schedule the job.
> + */
>   struct drm_sched_job {
>   	struct spsc_node		queue_node;
>   	struct drm_gpu_scheduler	*sched;
> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>    * these functions should be implemented in driver side
>   */
>   struct drm_sched_backend_ops {
> +	/* Called when the scheduler is considering scheduling this
> +	 * job next, to get another struct dma_fence for this job to
> +	 * block on.  Once it returns NULL, run_job() may be called.
> +	 */
>   	struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>   					struct drm_sched_entity *s_entity);
> +
> +	/* Called to execute the job once all of the dependencies have
> +	 * been resolved.  This may be called multiple times, if
> +	 * timedout_job() has happened and drm_sched_job_recovery()
> +	 * decides to try it again.
> +	 */
>   	struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
> +
> +	/* Called when a job has taken too long to execute, to trigger
> +	 * GPU recovery.
> +	 */
>   	void (*timedout_job)(struct drm_sched_job *sched_job);
> +
> +	/* Called once the job's finished fence has been signaled and
> +	 * it's time to clean it up.
> +	 */
>   	void (*free_job)(struct drm_sched_job *sched_job);
>   };
>   

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
  2018-04-05  6:16   ` Daniel Vetter
@ 2018-04-05 13:27     ` Alex Deucher
  -1 siblings, 0 replies; 15+ messages in thread
From: Alex Deucher @ 2018-04-05 13:27 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: Eric Anholt, Alex Deucher, Christian König, dri-devel,
	Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 2:16 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
>> These comments answer all the questions I had for myself when
>> implementing a driver using the GPU scheduler.
>>
>> Signed-off-by: Eric Anholt <eric@anholt.net>
>
> Pulling all these comments into the generated kerneldoc would be
> awesome, maybe as a new "GPU Scheduler" chapter at the end of
> drm-mm.rst? Would mean a bit of busywork to convert the existing raw
> comments into proper kerneldoc. Also has the benefit that 0day will
> complain when you forget to update the comment when editing the
> function prototype - kerneldoc which isn't included anywhere in .rst
> won't be checked automatically.

I was actually planning to do this myself, but Nayan wanted to do this
a prep work for his proposed GSoC project so I was going to see how
far he got first.

Alex

> -Daniel
>
>> ---
>>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>>  1 file changed, 42 insertions(+), 4 deletions(-)
>>
>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>> index dfd54fb94e10..c053a32341bf 100644
>> --- a/include/drm/gpu_scheduler.h
>> +++ b/include/drm/gpu_scheduler.h
>> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>>  };
>>
>>  /**
>> - * A scheduler entity is a wrapper around a job queue or a group
>> - * of other entities. Entities take turns emitting jobs from their
>> - * job queues to corresponding hardware ring based on scheduling
>> - * policy.
>> + * drm_sched_entity - A wrapper around a job queue (typically attached
>> + * to the DRM file_priv).
>> + *
>> + * Entities will emit jobs in order to their corresponding hardware
>> + * ring, and the scheduler will alternate between entities based on
>> + * scheduling policy.
>>  */
>>  struct drm_sched_entity {
>>         struct list_head                list;
>> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>>
>>  struct drm_sched_fence {
>>         struct dma_fence                scheduled;
>> +
>> +       /* This fence is what will be signaled by the scheduler when
>> +        * the job is completed.
>> +        *
>> +        * When setting up an out fence for the job, you should use
>> +        * this, since it's available immediately upon
>> +        * drm_sched_job_init(), and the fence returned by the driver
>> +        * from run_job() won't be created until the dependencies have
>> +        * resolved.
>> +        */
>>         struct dma_fence                finished;
>> +
>>         struct dma_fence_cb             cb;
>>         struct dma_fence                *parent;
>>         struct drm_gpu_scheduler        *sched;
>> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>>
>>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>>
>> +/**
>> + * drm_sched_job - A job to be run by an entity.
>> + *
>> + * A job is created by the driver using drm_sched_job_init(), and
>> + * should call drm_sched_entity_push_job() once it wants the scheduler
>> + * to schedule the job.
>> + */
>>  struct drm_sched_job {
>>         struct spsc_node                queue_node;
>>         struct drm_gpu_scheduler        *sched;
>> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>>   * these functions should be implemented in driver side
>>  */
>>  struct drm_sched_backend_ops {
>> +       /* Called when the scheduler is considering scheduling this
>> +        * job next, to get another struct dma_fence for this job to
>> +        * block on.  Once it returns NULL, run_job() may be called.
>> +        */
>>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>>                                         struct drm_sched_entity *s_entity);
>> +
>> +       /* Called to execute the job once all of the dependencies have
>> +        * been resolved.  This may be called multiple times, if
>> +        * timedout_job() has happened and drm_sched_job_recovery()
>> +        * decides to try it again.
>> +        */
>>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
>> +
>> +       /* Called when a job has taken too long to execute, to trigger
>> +        * GPU recovery.
>> +        */
>>         void (*timedout_job)(struct drm_sched_job *sched_job);
>> +
>> +       /* Called once the job's finished fence has been signaled and
>> +        * it's time to clean it up.
>> +        */
>>         void (*free_job)(struct drm_sched_job *sched_job);
>>  };
>>
>> --
>> 2.17.0
>>
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
>
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
@ 2018-04-05 13:27     ` Alex Deucher
  0 siblings, 0 replies; 15+ messages in thread
From: Alex Deucher @ 2018-04-05 13:27 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: Alex Deucher, Christian König, dri-devel, Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 2:16 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
>> These comments answer all the questions I had for myself when
>> implementing a driver using the GPU scheduler.
>>
>> Signed-off-by: Eric Anholt <eric@anholt.net>
>
> Pulling all these comments into the generated kerneldoc would be
> awesome, maybe as a new "GPU Scheduler" chapter at the end of
> drm-mm.rst? Would mean a bit of busywork to convert the existing raw
> comments into proper kerneldoc. Also has the benefit that 0day will
> complain when you forget to update the comment when editing the
> function prototype - kerneldoc which isn't included anywhere in .rst
> won't be checked automatically.

I was actually planning to do this myself, but Nayan wanted to do this
a prep work for his proposed GSoC project so I was going to see how
far he got first.

Alex

> -Daniel
>
>> ---
>>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>>  1 file changed, 42 insertions(+), 4 deletions(-)
>>
>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>> index dfd54fb94e10..c053a32341bf 100644
>> --- a/include/drm/gpu_scheduler.h
>> +++ b/include/drm/gpu_scheduler.h
>> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>>  };
>>
>>  /**
>> - * A scheduler entity is a wrapper around a job queue or a group
>> - * of other entities. Entities take turns emitting jobs from their
>> - * job queues to corresponding hardware ring based on scheduling
>> - * policy.
>> + * drm_sched_entity - A wrapper around a job queue (typically attached
>> + * to the DRM file_priv).
>> + *
>> + * Entities will emit jobs in order to their corresponding hardware
>> + * ring, and the scheduler will alternate between entities based on
>> + * scheduling policy.
>>  */
>>  struct drm_sched_entity {
>>         struct list_head                list;
>> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>>
>>  struct drm_sched_fence {
>>         struct dma_fence                scheduled;
>> +
>> +       /* This fence is what will be signaled by the scheduler when
>> +        * the job is completed.
>> +        *
>> +        * When setting up an out fence for the job, you should use
>> +        * this, since it's available immediately upon
>> +        * drm_sched_job_init(), and the fence returned by the driver
>> +        * from run_job() won't be created until the dependencies have
>> +        * resolved.
>> +        */
>>         struct dma_fence                finished;
>> +
>>         struct dma_fence_cb             cb;
>>         struct dma_fence                *parent;
>>         struct drm_gpu_scheduler        *sched;
>> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>>
>>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>>
>> +/**
>> + * drm_sched_job - A job to be run by an entity.
>> + *
>> + * A job is created by the driver using drm_sched_job_init(), and
>> + * should call drm_sched_entity_push_job() once it wants the scheduler
>> + * to schedule the job.
>> + */
>>  struct drm_sched_job {
>>         struct spsc_node                queue_node;
>>         struct drm_gpu_scheduler        *sched;
>> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>>   * these functions should be implemented in driver side
>>  */
>>  struct drm_sched_backend_ops {
>> +       /* Called when the scheduler is considering scheduling this
>> +        * job next, to get another struct dma_fence for this job to
>> +        * block on.  Once it returns NULL, run_job() may be called.
>> +        */
>>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>>                                         struct drm_sched_entity *s_entity);
>> +
>> +       /* Called to execute the job once all of the dependencies have
>> +        * been resolved.  This may be called multiple times, if
>> +        * timedout_job() has happened and drm_sched_job_recovery()
>> +        * decides to try it again.
>> +        */
>>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
>> +
>> +       /* Called when a job has taken too long to execute, to trigger
>> +        * GPU recovery.
>> +        */
>>         void (*timedout_job)(struct drm_sched_job *sched_job);
>> +
>> +       /* Called once the job's finished fence has been signaled and
>> +        * it's time to clean it up.
>> +        */
>>         void (*free_job)(struct drm_sched_job *sched_job);
>>  };
>>
>> --
>> 2.17.0
>>
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
>
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
  2018-04-05 13:27     ` Alex Deucher
  (?)
@ 2018-04-05 13:29     ` Daniel Vetter
  2018-04-05 13:41         ` Nayan Deshmukh
  -1 siblings, 1 reply; 15+ messages in thread
From: Daniel Vetter @ 2018-04-05 13:29 UTC (permalink / raw)
  To: Alex Deucher
  Cc: Eric Anholt, Alex Deucher, Christian König, dri-devel,
	Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 3:27 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
> On Thu, Apr 5, 2018 at 2:16 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
>> On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
>>> These comments answer all the questions I had for myself when
>>> implementing a driver using the GPU scheduler.
>>>
>>> Signed-off-by: Eric Anholt <eric@anholt.net>
>>
>> Pulling all these comments into the generated kerneldoc would be
>> awesome, maybe as a new "GPU Scheduler" chapter at the end of
>> drm-mm.rst? Would mean a bit of busywork to convert the existing raw
>> comments into proper kerneldoc. Also has the benefit that 0day will
>> complain when you forget to update the comment when editing the
>> function prototype - kerneldoc which isn't included anywhere in .rst
>> won't be checked automatically.
>
> I was actually planning to do this myself, but Nayan wanted to do this
> a prep work for his proposed GSoC project so I was going to see how
> far he got first.

Awesome. I'm also happy to help out with any kerneldoc questions and
best practices. Technically ofc no clue about the scheduler :-)

Cheers, Daniel
> Alex
>
>> -Daniel
>>
>>> ---
>>>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>>>  1 file changed, 42 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>> index dfd54fb94e10..c053a32341bf 100644
>>> --- a/include/drm/gpu_scheduler.h
>>> +++ b/include/drm/gpu_scheduler.h
>>> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>>>  };
>>>
>>>  /**
>>> - * A scheduler entity is a wrapper around a job queue or a group
>>> - * of other entities. Entities take turns emitting jobs from their
>>> - * job queues to corresponding hardware ring based on scheduling
>>> - * policy.
>>> + * drm_sched_entity - A wrapper around a job queue (typically attached
>>> + * to the DRM file_priv).
>>> + *
>>> + * Entities will emit jobs in order to their corresponding hardware
>>> + * ring, and the scheduler will alternate between entities based on
>>> + * scheduling policy.
>>>  */
>>>  struct drm_sched_entity {
>>>         struct list_head                list;
>>> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>>>
>>>  struct drm_sched_fence {
>>>         struct dma_fence                scheduled;
>>> +
>>> +       /* This fence is what will be signaled by the scheduler when
>>> +        * the job is completed.
>>> +        *
>>> +        * When setting up an out fence for the job, you should use
>>> +        * this, since it's available immediately upon
>>> +        * drm_sched_job_init(), and the fence returned by the driver
>>> +        * from run_job() won't be created until the dependencies have
>>> +        * resolved.
>>> +        */
>>>         struct dma_fence                finished;
>>> +
>>>         struct dma_fence_cb             cb;
>>>         struct dma_fence                *parent;
>>>         struct drm_gpu_scheduler        *sched;
>>> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>>>
>>>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>>>
>>> +/**
>>> + * drm_sched_job - A job to be run by an entity.
>>> + *
>>> + * A job is created by the driver using drm_sched_job_init(), and
>>> + * should call drm_sched_entity_push_job() once it wants the scheduler
>>> + * to schedule the job.
>>> + */
>>>  struct drm_sched_job {
>>>         struct spsc_node                queue_node;
>>>         struct drm_gpu_scheduler        *sched;
>>> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>>>   * these functions should be implemented in driver side
>>>  */
>>>  struct drm_sched_backend_ops {
>>> +       /* Called when the scheduler is considering scheduling this
>>> +        * job next, to get another struct dma_fence for this job to
>>> +        * block on.  Once it returns NULL, run_job() may be called.
>>> +        */
>>>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>>>                                         struct drm_sched_entity *s_entity);
>>> +
>>> +       /* Called to execute the job once all of the dependencies have
>>> +        * been resolved.  This may be called multiple times, if
>>> +        * timedout_job() has happened and drm_sched_job_recovery()
>>> +        * decides to try it again.
>>> +        */
>>>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
>>> +
>>> +       /* Called when a job has taken too long to execute, to trigger
>>> +        * GPU recovery.
>>> +        */
>>>         void (*timedout_job)(struct drm_sched_job *sched_job);
>>> +
>>> +       /* Called once the job's finished fence has been signaled and
>>> +        * it's time to clean it up.
>>> +        */
>>>         void (*free_job)(struct drm_sched_job *sched_job);
>>>  };
>>>
>>> --
>>> 2.17.0
>>>
>>> _______________________________________________
>>> dri-devel mailing list
>>> dri-devel@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>
>>
>>
>> --
>> Daniel Vetter
>> Software Engineer, Intel Corporation
>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
  2018-04-05 13:29     ` Daniel Vetter
@ 2018-04-05 13:41         ` Nayan Deshmukh
  0 siblings, 0 replies; 15+ messages in thread
From: Nayan Deshmukh @ 2018-04-05 13:41 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: Alex Deucher, Alex Deucher, Christian König, dri-devel,
	Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 6:59 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Thu, Apr 5, 2018 at 3:27 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>> On Thu, Apr 5, 2018 at 2:16 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
>>> On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
>>>> These comments answer all the questions I had for myself when
>>>> implementing a driver using the GPU scheduler.
>>>>
>>>> Signed-off-by: Eric Anholt <eric@anholt.net>
>>>
>>> Pulling all these comments into the generated kerneldoc would be
>>> awesome, maybe as a new "GPU Scheduler" chapter at the end of
>>> drm-mm.rst? Would mean a bit of busywork to convert the existing raw
>>> comments into proper kerneldoc. Also has the benefit that 0day will
>>> complain when you forget to update the comment when editing the
>>> function prototype - kerneldoc which isn't included anywhere in .rst
>>> won't be checked automatically.
>>
>> I was actually planning to do this myself, but Nayan wanted to do this
>> a prep work for his proposed GSoC project so I was going to see how
>> far he got first.

It is still on my TODO list. Just got a bit busy with my coursework. I
will try to look at it during the weekend.
>
> Awesome. I'm also happy to help out with any kerneldoc questions and
> best practices. Technically ofc no clue about the scheduler :-)
>
I was thinking of adding a different rst for scheduler altogther. Will
it be better to add it in drm-mm.rst itself?

> Cheers, Daniel
>> Alex
>>
>>> -Daniel
>>>
>>>> ---
>>>>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>>>>  1 file changed, 42 insertions(+), 4 deletions(-)
>>>>
>>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>>> index dfd54fb94e10..c053a32341bf 100644
>>>> --- a/include/drm/gpu_scheduler.h
>>>> +++ b/include/drm/gpu_scheduler.h
>>>> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>>>>  };
>>>>
>>>>  /**
>>>> - * A scheduler entity is a wrapper around a job queue or a group
>>>> - * of other entities. Entities take turns emitting jobs from their
>>>> - * job queues to corresponding hardware ring based on scheduling
>>>> - * policy.
>>>> + * drm_sched_entity - A wrapper around a job queue (typically attached
>>>> + * to the DRM file_priv).
>>>> + *
>>>> + * Entities will emit jobs in order to their corresponding hardware
>>>> + * ring, and the scheduler will alternate between entities based on
>>>> + * scheduling policy.
>>>>  */
>>>>  struct drm_sched_entity {
>>>>         struct list_head                list;
>>>> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>>>>
>>>>  struct drm_sched_fence {
>>>>         struct dma_fence                scheduled;
>>>> +
>>>> +       /* This fence is what will be signaled by the scheduler when
>>>> +        * the job is completed.
>>>> +        *
>>>> +        * When setting up an out fence for the job, you should use
>>>> +        * this, since it's available immediately upon
>>>> +        * drm_sched_job_init(), and the fence returned by the driver
>>>> +        * from run_job() won't be created until the dependencies have
>>>> +        * resolved.
>>>> +        */
>>>>         struct dma_fence                finished;
>>>> +
>>>>         struct dma_fence_cb             cb;
>>>>         struct dma_fence                *parent;
>>>>         struct drm_gpu_scheduler        *sched;
>>>> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>>>>
>>>>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>>>>
>>>> +/**
>>>> + * drm_sched_job - A job to be run by an entity.
>>>> + *
>>>> + * A job is created by the driver using drm_sched_job_init(), and
>>>> + * should call drm_sched_entity_push_job() once it wants the scheduler
>>>> + * to schedule the job.
>>>> + */
>>>>  struct drm_sched_job {
>>>>         struct spsc_node                queue_node;
>>>>         struct drm_gpu_scheduler        *sched;
>>>> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>>>>   * these functions should be implemented in driver side
>>>>  */
>>>>  struct drm_sched_backend_ops {
>>>> +       /* Called when the scheduler is considering scheduling this
>>>> +        * job next, to get another struct dma_fence for this job to
>>>> +        * block on.  Once it returns NULL, run_job() may be called.
>>>> +        */
>>>>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>>>>                                         struct drm_sched_entity *s_entity);
>>>> +
>>>> +       /* Called to execute the job once all of the dependencies have
>>>> +        * been resolved.  This may be called multiple times, if
>>>> +        * timedout_job() has happened and drm_sched_job_recovery()
>>>> +        * decides to try it again.
>>>> +        */
>>>>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
>>>> +
>>>> +       /* Called when a job has taken too long to execute, to trigger
>>>> +        * GPU recovery.
>>>> +        */
>>>>         void (*timedout_job)(struct drm_sched_job *sched_job);
>>>> +
>>>> +       /* Called once the job's finished fence has been signaled and
>>>> +        * it's time to clean it up.
>>>> +        */
>>>>         void (*free_job)(struct drm_sched_job *sched_job);
>>>>  };
>>>>
>>>> --
>>>> 2.17.0
>>>>
>>>> _______________________________________________
>>>> dri-devel mailing list
>>>> dri-devel@lists.freedesktop.org
>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>>
>>>
>>>
>>> --
>>> Daniel Vetter
>>> Software Engineer, Intel Corporation
>>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>>> _______________________________________________
>>> dri-devel mailing list
>>> dri-devel@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
>
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
@ 2018-04-05 13:41         ` Nayan Deshmukh
  0 siblings, 0 replies; 15+ messages in thread
From: Nayan Deshmukh @ 2018-04-05 13:41 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: Alex Deucher, Christian König, dri-devel, Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 6:59 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Thu, Apr 5, 2018 at 3:27 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>> On Thu, Apr 5, 2018 at 2:16 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
>>> On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
>>>> These comments answer all the questions I had for myself when
>>>> implementing a driver using the GPU scheduler.
>>>>
>>>> Signed-off-by: Eric Anholt <eric@anholt.net>
>>>
>>> Pulling all these comments into the generated kerneldoc would be
>>> awesome, maybe as a new "GPU Scheduler" chapter at the end of
>>> drm-mm.rst? Would mean a bit of busywork to convert the existing raw
>>> comments into proper kerneldoc. Also has the benefit that 0day will
>>> complain when you forget to update the comment when editing the
>>> function prototype - kerneldoc which isn't included anywhere in .rst
>>> won't be checked automatically.
>>
>> I was actually planning to do this myself, but Nayan wanted to do this
>> a prep work for his proposed GSoC project so I was going to see how
>> far he got first.

It is still on my TODO list. Just got a bit busy with my coursework. I
will try to look at it during the weekend.
>
> Awesome. I'm also happy to help out with any kerneldoc questions and
> best practices. Technically ofc no clue about the scheduler :-)
>
I was thinking of adding a different rst for scheduler altogther. Will
it be better to add it in drm-mm.rst itself?

> Cheers, Daniel
>> Alex
>>
>>> -Daniel
>>>
>>>> ---
>>>>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>>>>  1 file changed, 42 insertions(+), 4 deletions(-)
>>>>
>>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>>> index dfd54fb94e10..c053a32341bf 100644
>>>> --- a/include/drm/gpu_scheduler.h
>>>> +++ b/include/drm/gpu_scheduler.h
>>>> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>>>>  };
>>>>
>>>>  /**
>>>> - * A scheduler entity is a wrapper around a job queue or a group
>>>> - * of other entities. Entities take turns emitting jobs from their
>>>> - * job queues to corresponding hardware ring based on scheduling
>>>> - * policy.
>>>> + * drm_sched_entity - A wrapper around a job queue (typically attached
>>>> + * to the DRM file_priv).
>>>> + *
>>>> + * Entities will emit jobs in order to their corresponding hardware
>>>> + * ring, and the scheduler will alternate between entities based on
>>>> + * scheduling policy.
>>>>  */
>>>>  struct drm_sched_entity {
>>>>         struct list_head                list;
>>>> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>>>>
>>>>  struct drm_sched_fence {
>>>>         struct dma_fence                scheduled;
>>>> +
>>>> +       /* This fence is what will be signaled by the scheduler when
>>>> +        * the job is completed.
>>>> +        *
>>>> +        * When setting up an out fence for the job, you should use
>>>> +        * this, since it's available immediately upon
>>>> +        * drm_sched_job_init(), and the fence returned by the driver
>>>> +        * from run_job() won't be created until the dependencies have
>>>> +        * resolved.
>>>> +        */
>>>>         struct dma_fence                finished;
>>>> +
>>>>         struct dma_fence_cb             cb;
>>>>         struct dma_fence                *parent;
>>>>         struct drm_gpu_scheduler        *sched;
>>>> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>>>>
>>>>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>>>>
>>>> +/**
>>>> + * drm_sched_job - A job to be run by an entity.
>>>> + *
>>>> + * A job is created by the driver using drm_sched_job_init(), and
>>>> + * should call drm_sched_entity_push_job() once it wants the scheduler
>>>> + * to schedule the job.
>>>> + */
>>>>  struct drm_sched_job {
>>>>         struct spsc_node                queue_node;
>>>>         struct drm_gpu_scheduler        *sched;
>>>> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>>>>   * these functions should be implemented in driver side
>>>>  */
>>>>  struct drm_sched_backend_ops {
>>>> +       /* Called when the scheduler is considering scheduling this
>>>> +        * job next, to get another struct dma_fence for this job to
>>>> +        * block on.  Once it returns NULL, run_job() may be called.
>>>> +        */
>>>>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>>>>                                         struct drm_sched_entity *s_entity);
>>>> +
>>>> +       /* Called to execute the job once all of the dependencies have
>>>> +        * been resolved.  This may be called multiple times, if
>>>> +        * timedout_job() has happened and drm_sched_job_recovery()
>>>> +        * decides to try it again.
>>>> +        */
>>>>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
>>>> +
>>>> +       /* Called when a job has taken too long to execute, to trigger
>>>> +        * GPU recovery.
>>>> +        */
>>>>         void (*timedout_job)(struct drm_sched_job *sched_job);
>>>> +
>>>> +       /* Called once the job's finished fence has been signaled and
>>>> +        * it's time to clean it up.
>>>> +        */
>>>>         void (*free_job)(struct drm_sched_job *sched_job);
>>>>  };
>>>>
>>>> --
>>>> 2.17.0
>>>>
>>>> _______________________________________________
>>>> dri-devel mailing list
>>>> dri-devel@lists.freedesktop.org
>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>>
>>>
>>>
>>> --
>>> Daniel Vetter
>>> Software Engineer, Intel Corporation
>>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>>> _______________________________________________
>>> dri-devel mailing list
>>> dri-devel@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
>
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
  2018-04-05 13:41         ` Nayan Deshmukh
@ 2018-04-05 13:44           ` Alex Deucher
  -1 siblings, 0 replies; 15+ messages in thread
From: Alex Deucher @ 2018-04-05 13:44 UTC (permalink / raw)
  To: Nayan Deshmukh
  Cc: Daniel Vetter, Alex Deucher, Christian König, dri-devel,
	Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 9:41 AM, Nayan Deshmukh
<nayan26deshmukh@gmail.com> wrote:
> On Thu, Apr 5, 2018 at 6:59 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
>> On Thu, Apr 5, 2018 at 3:27 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>>> On Thu, Apr 5, 2018 at 2:16 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
>>>> On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
>>>>> These comments answer all the questions I had for myself when
>>>>> implementing a driver using the GPU scheduler.
>>>>>
>>>>> Signed-off-by: Eric Anholt <eric@anholt.net>
>>>>
>>>> Pulling all these comments into the generated kerneldoc would be
>>>> awesome, maybe as a new "GPU Scheduler" chapter at the end of
>>>> drm-mm.rst? Would mean a bit of busywork to convert the existing raw
>>>> comments into proper kerneldoc. Also has the benefit that 0day will
>>>> complain when you forget to update the comment when editing the
>>>> function prototype - kerneldoc which isn't included anywhere in .rst
>>>> won't be checked automatically.
>>>
>>> I was actually planning to do this myself, but Nayan wanted to do this
>>> a prep work for his proposed GSoC project so I was going to see how
>>> far he got first.
>
> It is still on my TODO list. Just got a bit busy with my coursework. I
> will try to look at it during the weekend.

No worries.  Take your time.

>>
>> Awesome. I'm also happy to help out with any kerneldoc questions and
>> best practices. Technically ofc no clue about the scheduler :-)
>>
> I was thinking of adding a different rst for scheduler altogther. Will
> it be better to add it in drm-mm.rst itself?

I had been planning to add a separate file too since it's a separate
entity.  Do whatever you think works best.

Alex

>
>> Cheers, Daniel
>>> Alex
>>>
>>>> -Daniel
>>>>
>>>>> ---
>>>>>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>>>>>  1 file changed, 42 insertions(+), 4 deletions(-)
>>>>>
>>>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>>>> index dfd54fb94e10..c053a32341bf 100644
>>>>> --- a/include/drm/gpu_scheduler.h
>>>>> +++ b/include/drm/gpu_scheduler.h
>>>>> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>>>>>  };
>>>>>
>>>>>  /**
>>>>> - * A scheduler entity is a wrapper around a job queue or a group
>>>>> - * of other entities. Entities take turns emitting jobs from their
>>>>> - * job queues to corresponding hardware ring based on scheduling
>>>>> - * policy.
>>>>> + * drm_sched_entity - A wrapper around a job queue (typically attached
>>>>> + * to the DRM file_priv).
>>>>> + *
>>>>> + * Entities will emit jobs in order to their corresponding hardware
>>>>> + * ring, and the scheduler will alternate between entities based on
>>>>> + * scheduling policy.
>>>>>  */
>>>>>  struct drm_sched_entity {
>>>>>         struct list_head                list;
>>>>> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>>>>>
>>>>>  struct drm_sched_fence {
>>>>>         struct dma_fence                scheduled;
>>>>> +
>>>>> +       /* This fence is what will be signaled by the scheduler when
>>>>> +        * the job is completed.
>>>>> +        *
>>>>> +        * When setting up an out fence for the job, you should use
>>>>> +        * this, since it's available immediately upon
>>>>> +        * drm_sched_job_init(), and the fence returned by the driver
>>>>> +        * from run_job() won't be created until the dependencies have
>>>>> +        * resolved.
>>>>> +        */
>>>>>         struct dma_fence                finished;
>>>>> +
>>>>>         struct dma_fence_cb             cb;
>>>>>         struct dma_fence                *parent;
>>>>>         struct drm_gpu_scheduler        *sched;
>>>>> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>>>>>
>>>>>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>>>>>
>>>>> +/**
>>>>> + * drm_sched_job - A job to be run by an entity.
>>>>> + *
>>>>> + * A job is created by the driver using drm_sched_job_init(), and
>>>>> + * should call drm_sched_entity_push_job() once it wants the scheduler
>>>>> + * to schedule the job.
>>>>> + */
>>>>>  struct drm_sched_job {
>>>>>         struct spsc_node                queue_node;
>>>>>         struct drm_gpu_scheduler        *sched;
>>>>> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>>>>>   * these functions should be implemented in driver side
>>>>>  */
>>>>>  struct drm_sched_backend_ops {
>>>>> +       /* Called when the scheduler is considering scheduling this
>>>>> +        * job next, to get another struct dma_fence for this job to
>>>>> +        * block on.  Once it returns NULL, run_job() may be called.
>>>>> +        */
>>>>>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>>>>>                                         struct drm_sched_entity *s_entity);
>>>>> +
>>>>> +       /* Called to execute the job once all of the dependencies have
>>>>> +        * been resolved.  This may be called multiple times, if
>>>>> +        * timedout_job() has happened and drm_sched_job_recovery()
>>>>> +        * decides to try it again.
>>>>> +        */
>>>>>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
>>>>> +
>>>>> +       /* Called when a job has taken too long to execute, to trigger
>>>>> +        * GPU recovery.
>>>>> +        */
>>>>>         void (*timedout_job)(struct drm_sched_job *sched_job);
>>>>> +
>>>>> +       /* Called once the job's finished fence has been signaled and
>>>>> +        * it's time to clean it up.
>>>>> +        */
>>>>>         void (*free_job)(struct drm_sched_job *sched_job);
>>>>>  };
>>>>>
>>>>> --
>>>>> 2.17.0
>>>>>
>>>>> _______________________________________________
>>>>> dri-devel mailing list
>>>>> dri-devel@lists.freedesktop.org
>>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>>>
>>>>
>>>>
>>>> --
>>>> Daniel Vetter
>>>> Software Engineer, Intel Corporation
>>>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>>>> _______________________________________________
>>>> dri-devel mailing list
>>>> dri-devel@lists.freedesktop.org
>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>
>>
>>
>> --
>> Daniel Vetter
>> Software Engineer, Intel Corporation
>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
@ 2018-04-05 13:44           ` Alex Deucher
  0 siblings, 0 replies; 15+ messages in thread
From: Alex Deucher @ 2018-04-05 13:44 UTC (permalink / raw)
  To: Nayan Deshmukh
  Cc: Alex Deucher, dri-devel, Christian König, Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 9:41 AM, Nayan Deshmukh
<nayan26deshmukh@gmail.com> wrote:
> On Thu, Apr 5, 2018 at 6:59 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
>> On Thu, Apr 5, 2018 at 3:27 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>>> On Thu, Apr 5, 2018 at 2:16 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
>>>> On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
>>>>> These comments answer all the questions I had for myself when
>>>>> implementing a driver using the GPU scheduler.
>>>>>
>>>>> Signed-off-by: Eric Anholt <eric@anholt.net>
>>>>
>>>> Pulling all these comments into the generated kerneldoc would be
>>>> awesome, maybe as a new "GPU Scheduler" chapter at the end of
>>>> drm-mm.rst? Would mean a bit of busywork to convert the existing raw
>>>> comments into proper kerneldoc. Also has the benefit that 0day will
>>>> complain when you forget to update the comment when editing the
>>>> function prototype - kerneldoc which isn't included anywhere in .rst
>>>> won't be checked automatically.
>>>
>>> I was actually planning to do this myself, but Nayan wanted to do this
>>> a prep work for his proposed GSoC project so I was going to see how
>>> far he got first.
>
> It is still on my TODO list. Just got a bit busy with my coursework. I
> will try to look at it during the weekend.

No worries.  Take your time.

>>
>> Awesome. I'm also happy to help out with any kerneldoc questions and
>> best practices. Technically ofc no clue about the scheduler :-)
>>
> I was thinking of adding a different rst for scheduler altogther. Will
> it be better to add it in drm-mm.rst itself?

I had been planning to add a separate file too since it's a separate
entity.  Do whatever you think works best.

Alex

>
>> Cheers, Daniel
>>> Alex
>>>
>>>> -Daniel
>>>>
>>>>> ---
>>>>>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>>>>>  1 file changed, 42 insertions(+), 4 deletions(-)
>>>>>
>>>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>>>> index dfd54fb94e10..c053a32341bf 100644
>>>>> --- a/include/drm/gpu_scheduler.h
>>>>> +++ b/include/drm/gpu_scheduler.h
>>>>> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>>>>>  };
>>>>>
>>>>>  /**
>>>>> - * A scheduler entity is a wrapper around a job queue or a group
>>>>> - * of other entities. Entities take turns emitting jobs from their
>>>>> - * job queues to corresponding hardware ring based on scheduling
>>>>> - * policy.
>>>>> + * drm_sched_entity - A wrapper around a job queue (typically attached
>>>>> + * to the DRM file_priv).
>>>>> + *
>>>>> + * Entities will emit jobs in order to their corresponding hardware
>>>>> + * ring, and the scheduler will alternate between entities based on
>>>>> + * scheduling policy.
>>>>>  */
>>>>>  struct drm_sched_entity {
>>>>>         struct list_head                list;
>>>>> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>>>>>
>>>>>  struct drm_sched_fence {
>>>>>         struct dma_fence                scheduled;
>>>>> +
>>>>> +       /* This fence is what will be signaled by the scheduler when
>>>>> +        * the job is completed.
>>>>> +        *
>>>>> +        * When setting up an out fence for the job, you should use
>>>>> +        * this, since it's available immediately upon
>>>>> +        * drm_sched_job_init(), and the fence returned by the driver
>>>>> +        * from run_job() won't be created until the dependencies have
>>>>> +        * resolved.
>>>>> +        */
>>>>>         struct dma_fence                finished;
>>>>> +
>>>>>         struct dma_fence_cb             cb;
>>>>>         struct dma_fence                *parent;
>>>>>         struct drm_gpu_scheduler        *sched;
>>>>> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>>>>>
>>>>>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>>>>>
>>>>> +/**
>>>>> + * drm_sched_job - A job to be run by an entity.
>>>>> + *
>>>>> + * A job is created by the driver using drm_sched_job_init(), and
>>>>> + * should call drm_sched_entity_push_job() once it wants the scheduler
>>>>> + * to schedule the job.
>>>>> + */
>>>>>  struct drm_sched_job {
>>>>>         struct spsc_node                queue_node;
>>>>>         struct drm_gpu_scheduler        *sched;
>>>>> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>>>>>   * these functions should be implemented in driver side
>>>>>  */
>>>>>  struct drm_sched_backend_ops {
>>>>> +       /* Called when the scheduler is considering scheduling this
>>>>> +        * job next, to get another struct dma_fence for this job to
>>>>> +        * block on.  Once it returns NULL, run_job() may be called.
>>>>> +        */
>>>>>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>>>>>                                         struct drm_sched_entity *s_entity);
>>>>> +
>>>>> +       /* Called to execute the job once all of the dependencies have
>>>>> +        * been resolved.  This may be called multiple times, if
>>>>> +        * timedout_job() has happened and drm_sched_job_recovery()
>>>>> +        * decides to try it again.
>>>>> +        */
>>>>>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
>>>>> +
>>>>> +       /* Called when a job has taken too long to execute, to trigger
>>>>> +        * GPU recovery.
>>>>> +        */
>>>>>         void (*timedout_job)(struct drm_sched_job *sched_job);
>>>>> +
>>>>> +       /* Called once the job's finished fence has been signaled and
>>>>> +        * it's time to clean it up.
>>>>> +        */
>>>>>         void (*free_job)(struct drm_sched_job *sched_job);
>>>>>  };
>>>>>
>>>>> --
>>>>> 2.17.0
>>>>>
>>>>> _______________________________________________
>>>>> dri-devel mailing list
>>>>> dri-devel@lists.freedesktop.org
>>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>>>
>>>>
>>>>
>>>> --
>>>> Daniel Vetter
>>>> Software Engineer, Intel Corporation
>>>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>>>> _______________________________________________
>>>> dri-devel mailing list
>>>> dri-devel@lists.freedesktop.org
>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>
>>
>>
>> --
>> Daniel Vetter
>> Software Engineer, Intel Corporation
>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
  2018-04-05 13:44           ` Alex Deucher
@ 2018-04-05 14:03             ` Daniel Vetter
  -1 siblings, 0 replies; 15+ messages in thread
From: Daniel Vetter @ 2018-04-05 14:03 UTC (permalink / raw)
  To: Alex Deucher
  Cc: Nayan Deshmukh, Alex Deucher, Christian König, dri-devel,
	Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 3:44 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
> On Thu, Apr 5, 2018 at 9:41 AM, Nayan Deshmukh
> <nayan26deshmukh@gmail.com> wrote:
>> On Thu, Apr 5, 2018 at 6:59 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
>>> On Thu, Apr 5, 2018 at 3:27 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>>>> On Thu, Apr 5, 2018 at 2:16 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
>>>>> On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
>>>>>> These comments answer all the questions I had for myself when
>>>>>> implementing a driver using the GPU scheduler.
>>>>>>
>>>>>> Signed-off-by: Eric Anholt <eric@anholt.net>
>>>>>
>>>>> Pulling all these comments into the generated kerneldoc would be
>>>>> awesome, maybe as a new "GPU Scheduler" chapter at the end of
>>>>> drm-mm.rst? Would mean a bit of busywork to convert the existing raw
>>>>> comments into proper kerneldoc. Also has the benefit that 0day will
>>>>> complain when you forget to update the comment when editing the
>>>>> function prototype - kerneldoc which isn't included anywhere in .rst
>>>>> won't be checked automatically.
>>>>
>>>> I was actually planning to do this myself, but Nayan wanted to do this
>>>> a prep work for his proposed GSoC project so I was going to see how
>>>> far he got first.
>>
>> It is still on my TODO list. Just got a bit busy with my coursework. I
>> will try to look at it during the weekend.
>
> No worries.  Take your time.
>
>>>
>>> Awesome. I'm also happy to help out with any kerneldoc questions and
>>> best practices. Technically ofc no clue about the scheduler :-)
>>>
>> I was thinking of adding a different rst for scheduler altogther. Will
>> it be better to add it in drm-mm.rst itself?
>
> I had been planning to add a separate file too since it's a separate
> entity.  Do whatever you think works best.

My recommendation is that to put the gist of the docs all into
source-code comments. That way there's a much higher chance to spot
them. In the docs you'll then only have the chapter structure (and not
sure the scheduler needs more than 1 chapter).

drm-mm.rst is a bit misnamed, since it contains all the stuff for
handling rendering: MM, fences, dma-buf/prime, drm_syncobjs. I think
scheduler fits very well in that topic range. We can ofc rename it to
drm-rendering.rst or similar, if the -mm.rst is misleading (plus
adjust the title).
-Daniel

>
> Alex
>
>>
>>> Cheers, Daniel
>>>> Alex
>>>>
>>>>> -Daniel
>>>>>
>>>>>> ---
>>>>>>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>>>>>>  1 file changed, 42 insertions(+), 4 deletions(-)
>>>>>>
>>>>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>>>>> index dfd54fb94e10..c053a32341bf 100644
>>>>>> --- a/include/drm/gpu_scheduler.h
>>>>>> +++ b/include/drm/gpu_scheduler.h
>>>>>> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>>>>>>  };
>>>>>>
>>>>>>  /**
>>>>>> - * A scheduler entity is a wrapper around a job queue or a group
>>>>>> - * of other entities. Entities take turns emitting jobs from their
>>>>>> - * job queues to corresponding hardware ring based on scheduling
>>>>>> - * policy.
>>>>>> + * drm_sched_entity - A wrapper around a job queue (typically attached
>>>>>> + * to the DRM file_priv).
>>>>>> + *
>>>>>> + * Entities will emit jobs in order to their corresponding hardware
>>>>>> + * ring, and the scheduler will alternate between entities based on
>>>>>> + * scheduling policy.
>>>>>>  */
>>>>>>  struct drm_sched_entity {
>>>>>>         struct list_head                list;
>>>>>> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>>>>>>
>>>>>>  struct drm_sched_fence {
>>>>>>         struct dma_fence                scheduled;
>>>>>> +
>>>>>> +       /* This fence is what will be signaled by the scheduler when
>>>>>> +        * the job is completed.
>>>>>> +        *
>>>>>> +        * When setting up an out fence for the job, you should use
>>>>>> +        * this, since it's available immediately upon
>>>>>> +        * drm_sched_job_init(), and the fence returned by the driver
>>>>>> +        * from run_job() won't be created until the dependencies have
>>>>>> +        * resolved.
>>>>>> +        */
>>>>>>         struct dma_fence                finished;
>>>>>> +
>>>>>>         struct dma_fence_cb             cb;
>>>>>>         struct dma_fence                *parent;
>>>>>>         struct drm_gpu_scheduler        *sched;
>>>>>> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>>>>>>
>>>>>>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>>>>>>
>>>>>> +/**
>>>>>> + * drm_sched_job - A job to be run by an entity.
>>>>>> + *
>>>>>> + * A job is created by the driver using drm_sched_job_init(), and
>>>>>> + * should call drm_sched_entity_push_job() once it wants the scheduler
>>>>>> + * to schedule the job.
>>>>>> + */
>>>>>>  struct drm_sched_job {
>>>>>>         struct spsc_node                queue_node;
>>>>>>         struct drm_gpu_scheduler        *sched;
>>>>>> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>>>>>>   * these functions should be implemented in driver side
>>>>>>  */
>>>>>>  struct drm_sched_backend_ops {
>>>>>> +       /* Called when the scheduler is considering scheduling this
>>>>>> +        * job next, to get another struct dma_fence for this job to
>>>>>> +        * block on.  Once it returns NULL, run_job() may be called.
>>>>>> +        */
>>>>>>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>>>>>>                                         struct drm_sched_entity *s_entity);
>>>>>> +
>>>>>> +       /* Called to execute the job once all of the dependencies have
>>>>>> +        * been resolved.  This may be called multiple times, if
>>>>>> +        * timedout_job() has happened and drm_sched_job_recovery()
>>>>>> +        * decides to try it again.
>>>>>> +        */
>>>>>>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
>>>>>> +
>>>>>> +       /* Called when a job has taken too long to execute, to trigger
>>>>>> +        * GPU recovery.
>>>>>> +        */
>>>>>>         void (*timedout_job)(struct drm_sched_job *sched_job);
>>>>>> +
>>>>>> +       /* Called once the job's finished fence has been signaled and
>>>>>> +        * it's time to clean it up.
>>>>>> +        */
>>>>>>         void (*free_job)(struct drm_sched_job *sched_job);
>>>>>>  };
>>>>>>
>>>>>> --
>>>>>> 2.17.0
>>>>>>
>>>>>> _______________________________________________
>>>>>> dri-devel mailing list
>>>>>> dri-devel@lists.freedesktop.org
>>>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>>>>
>>>>>
>>>>>
>>>>> --
>>>>> Daniel Vetter
>>>>> Software Engineer, Intel Corporation
>>>>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>>>>> _______________________________________________
>>>>> dri-devel mailing list
>>>>> dri-devel@lists.freedesktop.org
>>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>>
>>>
>>>
>>> --
>>> Daniel Vetter
>>> Software Engineer, Intel Corporation
>>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>>> _______________________________________________
>>> dri-devel mailing list
>>> dri-devel@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH] drm/sched: Extend the documentation.
@ 2018-04-05 14:03             ` Daniel Vetter
  0 siblings, 0 replies; 15+ messages in thread
From: Daniel Vetter @ 2018-04-05 14:03 UTC (permalink / raw)
  To: Alex Deucher
  Cc: Nayan Deshmukh, Alex Deucher, Christian König, dri-devel,
	Linux Kernel Mailing List

On Thu, Apr 5, 2018 at 3:44 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
> On Thu, Apr 5, 2018 at 9:41 AM, Nayan Deshmukh
> <nayan26deshmukh@gmail.com> wrote:
>> On Thu, Apr 5, 2018 at 6:59 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
>>> On Thu, Apr 5, 2018 at 3:27 PM, Alex Deucher <alexdeucher@gmail.com> wrote:
>>>> On Thu, Apr 5, 2018 at 2:16 AM, Daniel Vetter <daniel@ffwll.ch> wrote:
>>>>> On Thu, Apr 5, 2018 at 12:32 AM, Eric Anholt <eric@anholt.net> wrote:
>>>>>> These comments answer all the questions I had for myself when
>>>>>> implementing a driver using the GPU scheduler.
>>>>>>
>>>>>> Signed-off-by: Eric Anholt <eric@anholt.net>
>>>>>
>>>>> Pulling all these comments into the generated kerneldoc would be
>>>>> awesome, maybe as a new "GPU Scheduler" chapter at the end of
>>>>> drm-mm.rst? Would mean a bit of busywork to convert the existing raw
>>>>> comments into proper kerneldoc. Also has the benefit that 0day will
>>>>> complain when you forget to update the comment when editing the
>>>>> function prototype - kerneldoc which isn't included anywhere in .rst
>>>>> won't be checked automatically.
>>>>
>>>> I was actually planning to do this myself, but Nayan wanted to do this
>>>> a prep work for his proposed GSoC project so I was going to see how
>>>> far he got first.
>>
>> It is still on my TODO list. Just got a bit busy with my coursework. I
>> will try to look at it during the weekend.
>
> No worries.  Take your time.
>
>>>
>>> Awesome. I'm also happy to help out with any kerneldoc questions and
>>> best practices. Technically ofc no clue about the scheduler :-)
>>>
>> I was thinking of adding a different rst for scheduler altogther. Will
>> it be better to add it in drm-mm.rst itself?
>
> I had been planning to add a separate file too since it's a separate
> entity.  Do whatever you think works best.

My recommendation is that to put the gist of the docs all into
source-code comments. That way there's a much higher chance to spot
them. In the docs you'll then only have the chapter structure (and not
sure the scheduler needs more than 1 chapter).

drm-mm.rst is a bit misnamed, since it contains all the stuff for
handling rendering: MM, fences, dma-buf/prime, drm_syncobjs. I think
scheduler fits very well in that topic range. We can ofc rename it to
drm-rendering.rst or similar, if the -mm.rst is misleading (plus
adjust the title).
-Daniel

>
> Alex
>
>>
>>> Cheers, Daniel
>>>> Alex
>>>>
>>>>> -Daniel
>>>>>
>>>>>> ---
>>>>>>  include/drm/gpu_scheduler.h | 46 +++++++++++++++++++++++++++++++++----
>>>>>>  1 file changed, 42 insertions(+), 4 deletions(-)
>>>>>>
>>>>>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>>>>>> index dfd54fb94e10..c053a32341bf 100644
>>>>>> --- a/include/drm/gpu_scheduler.h
>>>>>> +++ b/include/drm/gpu_scheduler.h
>>>>>> @@ -43,10 +43,12 @@ enum drm_sched_priority {
>>>>>>  };
>>>>>>
>>>>>>  /**
>>>>>> - * A scheduler entity is a wrapper around a job queue or a group
>>>>>> - * of other entities. Entities take turns emitting jobs from their
>>>>>> - * job queues to corresponding hardware ring based on scheduling
>>>>>> - * policy.
>>>>>> + * drm_sched_entity - A wrapper around a job queue (typically attached
>>>>>> + * to the DRM file_priv).
>>>>>> + *
>>>>>> + * Entities will emit jobs in order to their corresponding hardware
>>>>>> + * ring, and the scheduler will alternate between entities based on
>>>>>> + * scheduling policy.
>>>>>>  */
>>>>>>  struct drm_sched_entity {
>>>>>>         struct list_head                list;
>>>>>> @@ -78,7 +80,18 @@ struct drm_sched_rq {
>>>>>>
>>>>>>  struct drm_sched_fence {
>>>>>>         struct dma_fence                scheduled;
>>>>>> +
>>>>>> +       /* This fence is what will be signaled by the scheduler when
>>>>>> +        * the job is completed.
>>>>>> +        *
>>>>>> +        * When setting up an out fence for the job, you should use
>>>>>> +        * this, since it's available immediately upon
>>>>>> +        * drm_sched_job_init(), and the fence returned by the driver
>>>>>> +        * from run_job() won't be created until the dependencies have
>>>>>> +        * resolved.
>>>>>> +        */
>>>>>>         struct dma_fence                finished;
>>>>>> +
>>>>>>         struct dma_fence_cb             cb;
>>>>>>         struct dma_fence                *parent;
>>>>>>         struct drm_gpu_scheduler        *sched;
>>>>>> @@ -88,6 +101,13 @@ struct drm_sched_fence {
>>>>>>
>>>>>>  struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
>>>>>>
>>>>>> +/**
>>>>>> + * drm_sched_job - A job to be run by an entity.
>>>>>> + *
>>>>>> + * A job is created by the driver using drm_sched_job_init(), and
>>>>>> + * should call drm_sched_entity_push_job() once it wants the scheduler
>>>>>> + * to schedule the job.
>>>>>> + */
>>>>>>  struct drm_sched_job {
>>>>>>         struct spsc_node                queue_node;
>>>>>>         struct drm_gpu_scheduler        *sched;
>>>>>> @@ -112,10 +132,28 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
>>>>>>   * these functions should be implemented in driver side
>>>>>>  */
>>>>>>  struct drm_sched_backend_ops {
>>>>>> +       /* Called when the scheduler is considering scheduling this
>>>>>> +        * job next, to get another struct dma_fence for this job to
>>>>>> +        * block on.  Once it returns NULL, run_job() may be called.
>>>>>> +        */
>>>>>>         struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
>>>>>>                                         struct drm_sched_entity *s_entity);
>>>>>> +
>>>>>> +       /* Called to execute the job once all of the dependencies have
>>>>>> +        * been resolved.  This may be called multiple times, if
>>>>>> +        * timedout_job() has happened and drm_sched_job_recovery()
>>>>>> +        * decides to try it again.
>>>>>> +        */
>>>>>>         struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
>>>>>> +
>>>>>> +       /* Called when a job has taken too long to execute, to trigger
>>>>>> +        * GPU recovery.
>>>>>> +        */
>>>>>>         void (*timedout_job)(struct drm_sched_job *sched_job);
>>>>>> +
>>>>>> +       /* Called once the job's finished fence has been signaled and
>>>>>> +        * it's time to clean it up.
>>>>>> +        */
>>>>>>         void (*free_job)(struct drm_sched_job *sched_job);
>>>>>>  };
>>>>>>
>>>>>> --
>>>>>> 2.17.0
>>>>>>
>>>>>> _______________________________________________
>>>>>> dri-devel mailing list
>>>>>> dri-devel@lists.freedesktop.org
>>>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>>>>
>>>>>
>>>>>
>>>>> --
>>>>> Daniel Vetter
>>>>> Software Engineer, Intel Corporation
>>>>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>>>>> _______________________________________________
>>>>> dri-devel mailing list
>>>>> dri-devel@lists.freedesktop.org
>>>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel
>>>
>>>
>>>
>>> --
>>> Daniel Vetter
>>> Software Engineer, Intel Corporation
>>> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
>>> _______________________________________________
>>> dri-devel mailing list
>>> dri-devel@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2018-04-05 14:03 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-04 22:32 [PATCH] drm/sched: Extend the documentation Eric Anholt
2018-04-04 22:32 ` Eric Anholt
2018-04-05  6:16 ` Daniel Vetter
2018-04-05  6:16   ` Daniel Vetter
2018-04-05 13:27   ` Alex Deucher
2018-04-05 13:27     ` Alex Deucher
2018-04-05 13:29     ` Daniel Vetter
2018-04-05 13:41       ` Nayan Deshmukh
2018-04-05 13:41         ` Nayan Deshmukh
2018-04-05 13:44         ` Alex Deucher
2018-04-05 13:44           ` Alex Deucher
2018-04-05 14:03           ` Daniel Vetter
2018-04-05 14:03             ` Daniel Vetter
2018-04-05 12:33 ` Christian König
2018-04-05 12:33   ` Christian König

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.