All of lore.kernel.org
 help / color / mirror / Atom feed
From: Luben Tuikov <luben.tuikov@amd.com>
To: "Andrey Grodzovsky" <Andrey.Grodzovsky@amd.com>,
	"Christian König" <christian.koenig@amd.com>,
	"Lucas Stach" <l.stach@pengutronix.de>,
	"Alexander Deucher" <Alexander.Deucher@amd.com>
Cc: Emily Deng <Emily.Deng@amd.com>,
	Luben Tuikov <luben.tuikov@amd.com>,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	steven.price@arm.com
Subject: [PATCH 1/6] drm/scheduler: "node" --> "list"
Date: Tue, 24 Nov 2020 22:17:03 -0500	[thread overview]
Message-ID: <20201125031708.6433-2-luben.tuikov@amd.com> (raw)
In-Reply-To: <20201125031708.6433-1-luben.tuikov@amd.com>

Rename "node" to "list" in struct drm_sched_job,
in order to make it consistent with what we see
being used throughout gpu_scheduler.h, for
instance in struct drm_sched_entity, as well as
the rest of DRM and the kernel.

Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c |  6 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c     |  2 +-
 drivers/gpu/drm/scheduler/sched_main.c      | 23 +++++++++++----------
 include/drm/gpu_scheduler.h                 |  4 ++--
 5 files changed, 19 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 5c1f3725c741..8358cae0b5a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1427,7 +1427,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
 	struct dma_fence *fence;
 
 	spin_lock(&sched->job_list_lock);
-	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+	list_for_each_entry(s_job, &sched->ring_mirror_list, list) {
 		fence = sched->ops->run_job(s_job);
 		dma_fence_put(fence);
 	}
@@ -1459,10 +1459,10 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
 
 no_preempt:
 	spin_lock(&sched->job_list_lock);
-	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
 		if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
 			/* remove job from ring_mirror_list */
-			list_del_init(&s_job->node);
+			list_del_init(&s_job->list);
 			sched->ops->free_job(s_job);
 			continue;
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7560b05e4ac1..4df6de81cd41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4128,7 +4128,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
 
 		spin_lock(&ring->sched.job_list_lock);
 		job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
-				struct drm_sched_job, node);
+				struct drm_sched_job, list);
 		spin_unlock(&ring->sched.job_list_lock);
 		if (job)
 			return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index dcfe8a3b03ff..aca52a46b93d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
 	}
 
 	/* Signal all jobs already scheduled to HW */
-	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+	list_for_each_entry(s_job, &sched->ring_mirror_list, list) {
 		struct drm_sched_fence *s_fence = s_job->s_fence;
 
 		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index c6332d75025e..c52eba407ebd 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -272,7 +272,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
 	struct drm_gpu_scheduler *sched = s_job->sched;
 
 	spin_lock(&sched->job_list_lock);
-	list_add_tail(&s_job->node, &sched->ring_mirror_list);
+	list_add_tail(&s_job->list, &sched->ring_mirror_list);
 	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
 }
@@ -287,7 +287,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
 	spin_lock(&sched->job_list_lock);
 	job = list_first_entry_or_null(&sched->ring_mirror_list,
-				       struct drm_sched_job, node);
+				       struct drm_sched_job, list);
 
 	if (job) {
 		/*
@@ -295,7 +295,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
 		 * is parked at which point it's safe.
 		 */
-		list_del_init(&job->node);
+		list_del_init(&job->list);
 		spin_unlock(&sched->job_list_lock);
 
 		job->sched->ops->timedout_job(job);
@@ -392,7 +392,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 		 * Add at the head of the queue to reflect it was the earliest
 		 * job extracted.
 		 */
-		list_add(&bad->node, &sched->ring_mirror_list);
+		list_add(&bad->list, &sched->ring_mirror_list);
 
 	/*
 	 * Iterate the job list from later to  earlier one and either deactive
@@ -400,7 +400,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 	 * signaled.
 	 * This iteration is thread safe as sched thread is stopped.
 	 */
-	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list,
+					 list) {
 		if (s_job->s_fence->parent &&
 		    dma_fence_remove_callback(s_job->s_fence->parent,
 					      &s_job->cb)) {
@@ -411,7 +412,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 			 * Locking here is for concurrent resume timeout
 			 */
 			spin_lock(&sched->job_list_lock);
-			list_del_init(&s_job->node);
+			list_del_init(&s_job->list);
 			spin_unlock(&sched->job_list_lock);
 
 			/*
@@ -462,7 +463,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 	 * so no new jobs are being inserted or removed. Also concurrent
 	 * GPU recovers can't run in parallel.
 	 */
-	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
 		struct dma_fence *fence = s_job->s_fence->parent;
 
 		atomic_inc(&sched->hw_rq_count);
@@ -505,7 +506,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
 	bool found_guilty = false;
 	struct dma_fence *fence;
 
-	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
 		struct drm_sched_fence *s_fence = s_job->s_fence;
 
 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
@@ -565,7 +566,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
 		return -ENOMEM;
 	job->id = atomic64_inc_return(&sched->job_id_count);
 
-	INIT_LIST_HEAD(&job->node);
+	INIT_LIST_HEAD(&job->list);
 
 	return 0;
 }
@@ -684,11 +685,11 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 	spin_lock(&sched->job_list_lock);
 
 	job = list_first_entry_or_null(&sched->ring_mirror_list,
-				       struct drm_sched_job, node);
+				       struct drm_sched_job, list);
 
 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
 		/* remove job from ring_mirror_list */
-		list_del_init(&job->node);
+		list_del_init(&job->list);
 	} else {
 		job = NULL;
 		/* queue timeout for next job */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 92436553fd6a..3add0072bd37 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -189,14 +189,14 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
  */
 struct drm_sched_job {
 	struct spsc_node		queue_node;
+	struct list_head		list;
 	struct drm_gpu_scheduler	*sched;
 	struct drm_sched_fence		*s_fence;
 	struct dma_fence_cb		finish_cb;
-	struct list_head		node;
 	uint64_t			id;
 	atomic_t			karma;
 	enum drm_sched_priority		s_priority;
-	struct drm_sched_entity  *entity;
+	struct drm_sched_entity         *entity;
 	struct dma_fence_cb		cb;
 };
 
-- 
2.29.2.154.g7f7ebe054a

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

WARNING: multiple messages have this Message-ID (diff)
From: Luben Tuikov <luben.tuikov@amd.com>
To: "Andrey Grodzovsky" <Andrey.Grodzovsky@amd.com>,
	"Christian König" <christian.koenig@amd.com>,
	"Lucas Stach" <l.stach@pengutronix.de>,
	"Alexander Deucher" <Alexander.Deucher@amd.com>
Cc: Emily Deng <Emily.Deng@amd.com>,
	Luben Tuikov <luben.tuikov@amd.com>,
	amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org,
	steven.price@arm.com
Subject: [PATCH 1/6] drm/scheduler: "node" --> "list"
Date: Tue, 24 Nov 2020 22:17:03 -0500	[thread overview]
Message-ID: <20201125031708.6433-2-luben.tuikov@amd.com> (raw)
In-Reply-To: <20201125031708.6433-1-luben.tuikov@amd.com>

Rename "node" to "list" in struct drm_sched_job,
in order to make it consistent with what we see
being used throughout gpu_scheduler.h, for
instance in struct drm_sched_entity, as well as
the rest of DRM and the kernel.

Signed-off-by: Luben Tuikov <luben.tuikov@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c |  6 +++---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c     |  2 +-
 drivers/gpu/drm/scheduler/sched_main.c      | 23 +++++++++++----------
 include/drm/gpu_scheduler.h                 |  4 ++--
 5 files changed, 19 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 5c1f3725c741..8358cae0b5a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1427,7 +1427,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
 	struct dma_fence *fence;
 
 	spin_lock(&sched->job_list_lock);
-	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+	list_for_each_entry(s_job, &sched->ring_mirror_list, list) {
 		fence = sched->ops->run_job(s_job);
 		dma_fence_put(fence);
 	}
@@ -1459,10 +1459,10 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
 
 no_preempt:
 	spin_lock(&sched->job_list_lock);
-	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
 		if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
 			/* remove job from ring_mirror_list */
-			list_del_init(&s_job->node);
+			list_del_init(&s_job->list);
 			sched->ops->free_job(s_job);
 			continue;
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7560b05e4ac1..4df6de81cd41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4128,7 +4128,7 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
 
 		spin_lock(&ring->sched.job_list_lock);
 		job = list_first_entry_or_null(&ring->sched.ring_mirror_list,
-				struct drm_sched_job, node);
+				struct drm_sched_job, list);
 		spin_unlock(&ring->sched.job_list_lock);
 		if (job)
 			return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index dcfe8a3b03ff..aca52a46b93d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
 	}
 
 	/* Signal all jobs already scheduled to HW */
-	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+	list_for_each_entry(s_job, &sched->ring_mirror_list, list) {
 		struct drm_sched_fence *s_fence = s_job->s_fence;
 
 		dma_fence_set_error(&s_fence->finished, -EHWPOISON);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index c6332d75025e..c52eba407ebd 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -272,7 +272,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
 	struct drm_gpu_scheduler *sched = s_job->sched;
 
 	spin_lock(&sched->job_list_lock);
-	list_add_tail(&s_job->node, &sched->ring_mirror_list);
+	list_add_tail(&s_job->list, &sched->ring_mirror_list);
 	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
 }
@@ -287,7 +287,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
 	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
 	spin_lock(&sched->job_list_lock);
 	job = list_first_entry_or_null(&sched->ring_mirror_list,
-				       struct drm_sched_job, node);
+				       struct drm_sched_job, list);
 
 	if (job) {
 		/*
@@ -295,7 +295,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
 		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
 		 * is parked at which point it's safe.
 		 */
-		list_del_init(&job->node);
+		list_del_init(&job->list);
 		spin_unlock(&sched->job_list_lock);
 
 		job->sched->ops->timedout_job(job);
@@ -392,7 +392,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 		 * Add at the head of the queue to reflect it was the earliest
 		 * job extracted.
 		 */
-		list_add(&bad->node, &sched->ring_mirror_list);
+		list_add(&bad->list, &sched->ring_mirror_list);
 
 	/*
 	 * Iterate the job list from later to  earlier one and either deactive
@@ -400,7 +400,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 	 * signaled.
 	 * This iteration is thread safe as sched thread is stopped.
 	 */
-	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list,
+					 list) {
 		if (s_job->s_fence->parent &&
 		    dma_fence_remove_callback(s_job->s_fence->parent,
 					      &s_job->cb)) {
@@ -411,7 +412,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 			 * Locking here is for concurrent resume timeout
 			 */
 			spin_lock(&sched->job_list_lock);
-			list_del_init(&s_job->node);
+			list_del_init(&s_job->list);
 			spin_unlock(&sched->job_list_lock);
 
 			/*
@@ -462,7 +463,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 	 * so no new jobs are being inserted or removed. Also concurrent
 	 * GPU recovers can't run in parallel.
 	 */
-	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
 		struct dma_fence *fence = s_job->s_fence->parent;
 
 		atomic_inc(&sched->hw_rq_count);
@@ -505,7 +506,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
 	bool found_guilty = false;
 	struct dma_fence *fence;
 
-	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, list) {
 		struct drm_sched_fence *s_fence = s_job->s_fence;
 
 		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
@@ -565,7 +566,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
 		return -ENOMEM;
 	job->id = atomic64_inc_return(&sched->job_id_count);
 
-	INIT_LIST_HEAD(&job->node);
+	INIT_LIST_HEAD(&job->list);
 
 	return 0;
 }
@@ -684,11 +685,11 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 	spin_lock(&sched->job_list_lock);
 
 	job = list_first_entry_or_null(&sched->ring_mirror_list,
-				       struct drm_sched_job, node);
+				       struct drm_sched_job, list);
 
 	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
 		/* remove job from ring_mirror_list */
-		list_del_init(&job->node);
+		list_del_init(&job->list);
 	} else {
 		job = NULL;
 		/* queue timeout for next job */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 92436553fd6a..3add0072bd37 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -189,14 +189,14 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
  */
 struct drm_sched_job {
 	struct spsc_node		queue_node;
+	struct list_head		list;
 	struct drm_gpu_scheduler	*sched;
 	struct drm_sched_fence		*s_fence;
 	struct dma_fence_cb		finish_cb;
-	struct list_head		node;
 	uint64_t			id;
 	atomic_t			karma;
 	enum drm_sched_priority		s_priority;
-	struct drm_sched_entity  *entity;
+	struct drm_sched_entity         *entity;
 	struct dma_fence_cb		cb;
 };
 
-- 
2.29.2.154.g7f7ebe054a

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  reply	other threads:[~2020-11-25  3:17 UTC|newest]

Thread overview: 125+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-25 20:51 [PATCH v4] drm/scheduler: Avoid accessing freed bad job Andrey Grodzovsky
2019-11-25 20:51 ` Andrey Grodzovsky
2019-11-25 20:51 ` Andrey Grodzovsky
2019-11-25 21:44 ` Deng, Emily
2019-11-25 21:44   ` Deng, Emily
2019-11-25 21:44   ` Deng, Emily
2019-11-26  0:09   ` Grodzovsky, Andrey
2019-11-26  0:09     ` Grodzovsky, Andrey
2019-11-26  0:09     ` Grodzovsky, Andrey
     [not found]     ` <MWHPR12MB1453C6FC45A83482232CA3EDEA450-Gy0DoCVfaSWZBIDmKHdw+wdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2019-11-26 15:36       ` Deucher, Alexander
2019-11-26 15:36         ` Deucher, Alexander
2019-11-26 15:36         ` Deucher, Alexander
2019-11-26 15:37 ` Andrey Grodzovsky
2019-11-26 15:37   ` Andrey Grodzovsky
     [not found]   ` <b8b716a7-e235-38b2-ea6d-0a21881fa64e-5C7GfCeVMHo@public.gmane.org>
2019-11-27  0:41     ` Deng, Emily
2019-11-27  0:41       ` Deng, Emily
2019-11-27  0:41       ` Deng, Emily
     [not found]       ` <MN2PR12MB2975CA8858F21FDF325C33FE8F440-rweVpJHSKToFlvJWC7EAqwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2019-12-02 19:24         ` Deng, Emily
2019-12-02 19:24           ` Deng, Emily
2019-12-02 19:24           ` Deng, Emily
2019-12-03 19:10           ` Andrey Grodzovsky
2019-12-03 19:10             ` Andrey Grodzovsky
2019-12-03 19:44             ` Deucher, Alexander
2019-12-03 19:44               ` Deucher, Alexander
2019-12-03 19:57               ` Andrey Grodzovsky
2019-12-03 19:57                 ` Andrey Grodzovsky
2019-12-03 19:59                 ` Deucher, Alexander
2019-12-03 19:59                   ` Deucher, Alexander
2019-12-03 20:32                   ` Andrey Grodzovsky
2019-12-03 20:32                     ` Andrey Grodzovsky
2019-12-03 20:58                     ` Deng, Emily
2019-12-03 20:58                       ` Deng, Emily
2019-12-03 19:53             ` Deng, Emily
2019-12-03 19:53               ` Deng, Emily
2020-02-05 18:24 ` Lucas Stach
2020-02-05 18:24   ` Lucas Stach
2020-02-06 11:10   ` Lucas Stach
2020-02-06 11:10     ` Lucas Stach
2020-02-06 11:49     ` Christian König
2020-02-06 11:49       ` Christian König
2020-02-06 14:49       ` Alex Deucher
2020-02-06 14:49         ` Alex Deucher
2020-02-06 14:51         ` Christian König
2020-02-06 14:51           ` Christian König
2020-02-06 15:49           ` Andrey Grodzovsky
2020-02-06 15:49             ` Andrey Grodzovsky
2020-02-10 16:55             ` Andrey Grodzovsky
2020-02-10 16:55               ` Andrey Grodzovsky
2020-02-10 21:50               ` Luben Tuikov
2020-02-10 21:50                 ` Luben Tuikov
2020-02-11 15:55                 ` Andrey Grodzovsky
2020-02-11 15:55                   ` Andrey Grodzovsky
2020-02-11 21:27                   ` Andrey Grodzovsky
2020-02-11 21:27                     ` Andrey Grodzovsky
2020-02-12  0:53                     ` Luben Tuikov
2020-02-12  0:53                       ` Luben Tuikov
2020-02-12 16:33                       ` Andrey Grodzovsky
2020-02-12 16:33                         ` Andrey Grodzovsky
2020-07-21 11:03                         ` Lucas Stach
2020-07-21 11:03                           ` Lucas Stach
2020-07-21 13:36                           ` Andrey Grodzovsky
2020-07-21 13:36                             ` Andrey Grodzovsky
2020-07-21 13:39                             ` Christian König
2020-07-21 13:39                               ` Christian König
2020-07-21 13:42                               ` Andrey Grodzovsky
2020-07-21 13:42                                 ` Andrey Grodzovsky
2020-07-21 18:29                                 ` Luben Tuikov
2020-07-21 18:29                                   ` Luben Tuikov
2020-11-25  3:17                                   ` [PATCH 0/6] Allow to extend the timeout without jobs disappearing Luben Tuikov
2020-11-25  3:17                                     ` Luben Tuikov
2020-11-25  3:17                                     ` Luben Tuikov [this message]
2020-11-25  3:17                                       ` [PATCH 1/6] drm/scheduler: "node" --> "list" Luben Tuikov
2020-11-25  9:44                                       ` Christian König
2020-11-25  9:44                                         ` Christian König
2020-11-25  3:17                                     ` [PATCH 2/6] gpu/drm: ring_mirror_list --> pending_list Luben Tuikov
2020-11-25  3:17                                       ` Luben Tuikov
2020-11-25  9:47                                       ` Christian König
2020-11-25  9:47                                         ` Christian König
2020-11-25 16:42                                         ` Luben Tuikov
2020-11-25 16:42                                           ` Luben Tuikov
2020-11-25  3:17                                     ` [PATCH 3/6] drm/scheduler: Job timeout handler returns status Luben Tuikov
2020-11-25  3:17                                       ` Luben Tuikov
2020-11-25  4:41                                       ` kernel test robot
2020-11-25  4:41                                         ` kernel test robot
2020-11-25  4:41                                         ` kernel test robot
2020-11-25  9:50                                       ` Christian König
2020-11-25  9:50                                         ` Christian König
2020-11-25 16:48                                         ` Luben Tuikov
2020-11-25 16:48                                           ` Luben Tuikov
2020-11-25 11:04                                       ` Steven Price
2020-11-25 11:04                                         ` Steven Price
2020-11-25 11:15                                         ` Lucas Stach
2020-11-25 11:15                                           ` Lucas Stach
2020-11-25 11:22                                           ` Steven Price
2020-11-25 11:22                                             ` Steven Price
2020-11-25 11:47                                             ` Lucas Stach
2020-11-25 11:47                                               ` Lucas Stach
2020-11-25 12:41                                         ` Christian König
2020-11-25 12:41                                           ` Christian König
2020-11-26 15:06                                       ` Andrey Grodzovsky
2020-11-26 15:06                                         ` Andrey Grodzovsky
2020-11-25  3:17                                     ` [PATCH 4/6] drm/scheduler: Essentialize the job done callback Luben Tuikov
2020-11-25  3:17                                       ` Luben Tuikov
2020-11-25  9:51                                       ` Christian König
2020-11-25  9:51                                         ` Christian König
2020-11-25  3:17                                     ` [PATCH 5/6] drm/amdgpu: Don't hardcode thread name length Luben Tuikov
2020-11-25  3:17                                       ` Luben Tuikov
2020-11-25  9:55                                       ` Christian König
2020-11-25  9:55                                         ` Christian König
2020-11-25 17:01                                         ` Luben Tuikov
2020-11-25 17:01                                           ` Luben Tuikov
2020-11-26  8:11                                           ` Christian König
2020-11-26  8:11                                             ` Christian König
2020-11-25  3:17                                     ` [PATCH 6/6] drm/sched: Make use of a "done" thread Luben Tuikov
2020-11-25  3:17                                       ` Luben Tuikov
2020-11-25 10:10                                       ` Christian König
2020-11-25 10:10                                         ` Christian König
2020-11-26  0:24                                         ` Luben Tuikov
2020-11-26  0:24                                           ` Luben Tuikov
2020-11-25 11:09                                       ` Steven Price
2020-11-25 11:09                                         ` Steven Price
2020-11-26  0:30                                         ` Luben Tuikov
2020-11-26  0:30                                           ` Luben Tuikov
2020-02-07 15:26           ` [PATCH v4] drm/scheduler: Avoid accessing freed bad job Daniel Vetter
2020-02-07 15:26             ` Daniel Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201125031708.6433-2-luben.tuikov@amd.com \
    --to=luben.tuikov@amd.com \
    --cc=Alexander.Deucher@amd.com \
    --cc=Andrey.Grodzovsky@amd.com \
    --cc=Emily.Deng@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=christian.koenig@amd.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=l.stach@pengutronix.de \
    --cc=steven.price@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.