All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/scheduler: modify API to avoid redundancy
@ 2018-07-20 12:21 Nayan Deshmukh
  2018-07-20 14:47 ` Christian König
       [not found] ` <20180720122106.10344-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  0 siblings, 2 replies; 7+ messages in thread
From: Nayan Deshmukh @ 2018-07-20 12:21 UTC (permalink / raw)
  To: dri-devel; +Cc: Nayan Deshmukh, amd-gfx, christian.koenig

entity has a scheduler field and we don't need the sched argument
in any of the functions where entity is provided.

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 13 +++++--------
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  3 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  4 ++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c     |  3 +--
 drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  4 ++--
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++++++++++---------
 drivers/gpu/drm/v3d/v3d_drv.c             |  4 +---
 drivers/gpu/drm/v3d/v3d_gem.c             |  2 --
 include/drm/gpu_scheduler.h               | 10 +++-------
 13 files changed, 30 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7c5cc33d0cda..7e5ebf823309 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 	job = p->job;
 	p->job = NULL;
 
-	r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+	r = drm_sched_job_init(&job->base, entity, p->filp);
 	if (r) {
 		amdgpu_job_free(job);
 		amdgpu_mn_unlock(p->mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 83e3b320a793..df6965761046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
 failed:
 	for (j = 0; j < i; j++)
-		drm_sched_entity_destroy(&adev->rings[j]->sched,
-				      &ctx->rings[j].entity);
+		drm_sched_entity_destroy(&ctx->rings[j].entity);
 	kfree(ctx->fences);
 	ctx->fences = NULL;
 	return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
 		if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
 			continue;
 
-		drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
-			&ctx->rings[i].entity);
+		drm_sched_entity_destroy(&ctx->rings[i].entity);
 	}
 
 	amdgpu_ctx_fini(ref);
@@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
 			if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
 				continue;
 
-			max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
-					  &ctx->rings[i].entity, max_wait);
+			max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
+							  max_wait);
 		}
 	}
 	mutex_unlock(&mgr->lock);
@@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
 				continue;
 
 			if (kref_read(&ctx->refcount) == 1)
-				drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
-					&ctx->rings[i].entity);
+				drm_sched_entity_fini(&ctx->rings[i].entity);
 			else
 				DRM_ERROR("ctx %p is still alive\n", ctx);
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5a2c26a85984..631481a730e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
 	if (!f)
 		return -EINVAL;
 
-	r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
+	r = drm_sched_job_init(&job->base, entity, owner);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 13977ea6a097..913705d4dfd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
 			return;
 		}
 	} else {
-		drm_sched_entity_destroy(adev->mman.entity.sched,
-					 &adev->mman.entity);
+		drm_sched_entity_destroy(&adev->mman.entity);
 	}
 
 	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 80b5c453f8c1..8e2c96da275e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
 	int i, j;
 
-	drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
-				 &adev->uvd.entity);
+	drm_sched_entity_destroy(&adev->uvd.entity);
 
 	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
 		kfree(adev->uvd.inst[j].saved_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 86182c966ed6..b6ab4f5350c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
 	if (adev->vce.vcpu_bo == NULL)
 		return 0;
 
-	drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
+	drm_sched_entity_destroy(&adev->vce.entity);
 
 	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
 		(void **)&adev->vce.cpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 098dd1ba751a..74b4a28a41d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	vm->root.base.bo = NULL;
 
 error_free_sched_entity:
-	drm_sched_entity_destroy(&ring->sched, &vm->entity);
+	drm_sched_entity_destroy(&vm->entity);
 
 	return r;
 }
@@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
 	}
 
-	drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
+	drm_sched_entity_destroy(&vm->entity);
 
 	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
 		dev_err(adev->dev, "still active bo inside vm\n");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 36414ba56b22..207532c05eb8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
 				gpu->lastctx = NULL;
 			mutex_unlock(&gpu->lock);
 
-			drm_sched_entity_destroy(&gpu->sched,
-						&ctx->sched_entity[i]);
+			drm_sched_entity_destroy(&ctx->sched_entity[i]);
 		}
 	}
 
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a74eb57af15b..590e44b0d963 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
 {
 	int ret;
 
-	ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
-				 sched_entity, submit->cmdbuf.ctx);
+	ret = drm_sched_job_init(&submit->sched_job, sched_entity,
+				 submit->cmdbuf.ctx);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index dac71e3b4514..a3b55c542025 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
  *
  * Returns the remaining time in jiffies left from the input timeout
  */
-long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity, long timeout)
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
 {
+	struct drm_gpu_scheduler *sched;
 	long ret = timeout;
 
+	sched = entity->sched;
 	if (!drm_sched_entity_is_initialized(sched, entity))
 		return ret;
 	/**
@@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
  * entity and signals all jobs with an error code if the process was killed.
  *
  */
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_sched_entity *entity)
 {
+	struct drm_gpu_scheduler *sched;
 
+	sched = entity->sched;
 	drm_sched_entity_set_rq(entity, NULL);
 
 	/* Consumption of existing IBs wasn't completed. Forcefully
@@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
  *
  * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
  */
-void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
-				struct drm_sched_entity *entity)
+void drm_sched_entity_destroy(struct drm_sched_entity *entity)
 {
-	drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
-	drm_sched_entity_fini(sched, entity);
+	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+	drm_sched_entity_fini(entity);
 }
 EXPORT_SYMBOL(drm_sched_entity_destroy);
 
@@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
  * Returns 0 for success, negative error code otherwise.
  */
 int drm_sched_job_init(struct drm_sched_job *job,
-		       struct drm_gpu_scheduler *sched,
 		       struct drm_sched_entity *entity,
 		       void *owner)
 {
+	struct drm_gpu_scheduler *sched = entity->sched;
+
 	job->sched = sched;
 	job->entity = entity;
 	job->s_priority = entity->rq - sched->sched_rq;
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 1dceba2b42fd..2a85fa68ffea 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
 static void
 v3d_postclose(struct drm_device *dev, struct drm_file *file)
 {
-	struct v3d_dev *v3d = to_v3d_dev(dev);
 	struct v3d_file_priv *v3d_priv = file->driver_priv;
 	enum v3d_queue q;
 
 	for (q = 0; q < V3D_MAX_QUEUES; q++) {
-		drm_sched_entity_destroy(&v3d->queue[q].sched,
-				      &v3d_priv->sched_entity[q]);
+		drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
 	}
 
 	kfree(v3d_priv);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b513f9189caf..9029590267aa 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -552,7 +552,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
 	if (exec->bin.start != exec->bin.end) {
 		ret = drm_sched_job_init(&exec->bin.base,
-					 &v3d->queue[V3D_BIN].sched,
 					 &v3d_priv->sched_entity[V3D_BIN],
 					 v3d_priv);
 		if (ret)
@@ -567,7 +566,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 	}
 
 	ret = drm_sched_job_init(&exec->render.base,
-				 &v3d->queue[V3D_RENDER].sched,
 				 &v3d_priv->sched_entity[V3D_RENDER],
 				 v3d_priv);
 	if (ret)
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 2205e89722f6..728346abcc81 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
 			  struct drm_sched_rq **rq_list,
 			  unsigned int num_rq_list,
 			  atomic_t *guilty);
-long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity, long timeout);
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity);
-void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity);
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
+void drm_sched_entity_fini(struct drm_sched_entity *entity);
+void drm_sched_entity_destroy(struct drm_sched_entity *entity);
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 			       struct drm_sched_entity *entity);
 void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
@@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
 void drm_sched_fence_finished(struct drm_sched_fence *fence);
 int drm_sched_job_init(struct drm_sched_job *job,
-		       struct drm_gpu_scheduler *sched,
 		       struct drm_sched_entity *entity,
 		       void *owner);
 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
-- 
2.14.3

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/2] drm/scheduler: remove sched field from the entity
       [not found] ` <20180720122106.10344-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2018-07-20 12:21   ` Nayan Deshmukh
       [not found]     ` <20180720122106.10344-2-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2018-07-20 18:13   ` [PATCH 1/2] drm/scheduler: modify API to avoid redundancy Eric Anholt
  2018-08-09  8:57   ` Daniel Vetter
  2 siblings, 1 reply; 7+ messages in thread
From: Nayan Deshmukh @ 2018-07-20 12:21 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: Nayan Deshmukh, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	eric-WhKQ6XTQaPysTnJN9+BGXg, alexdeucher-Re5JQEeQqe8AvxtiuMwx3w,
	christian.koenig-5C7GfCeVMHo, l.stach-bIcnvbaLZ9MEGnE8C9+IrQ

The scheduler of the entity is decided by the run queue on which
it is queued. This patch avoids us the effort required to maintain
a sync between rq and sched field when we start shifting entites
among different rqs.

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  6 +++---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 19 +++++++++----------
 drivers/gpu/drm/scheduler/sched_fence.c   |  2 +-
 include/drm/gpu_scheduler.h               |  2 --
 6 files changed, 16 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7e5ebf823309..9572ca1ac15e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1262,7 +1262,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 	priority = job->base.s_priority;
 	drm_sched_entity_push_job(&job->base, entity);
 
-	ring = to_amdgpu_ring(entity->sched);
+	ring = to_amdgpu_ring(entity->rq->sched);
 	amdgpu_ring_priority_get(ring, priority);
 
 	ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 631481a730e0..391e2f7c03aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -143,7 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
 	priority = job->base.s_priority;
 	drm_sched_entity_push_job(&job->base, entity);
 
-	ring = to_amdgpu_ring(entity->sched);
+	ring = to_amdgpu_ring(entity->rq->sched);
 	amdgpu_ring_priority_get(ring, priority);
 
 	return 0;
@@ -167,7 +167,7 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
 					       struct drm_sched_entity *s_entity)
 {
-	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
+	struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
 	struct amdgpu_job *job = to_amdgpu_job(sched_job);
 	struct amdgpu_vm *vm = job->vm;
 	struct dma_fence *fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 74b4a28a41d6..5d7d7900ccab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -387,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 		ats_entries = 0;
 	}
 
-	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
 
 	r = reservation_object_reserve_shared(bo->tbo.resv);
 	if (r)
@@ -1113,7 +1113,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
 		struct amdgpu_ring *ring;
 		struct dma_fence *fence;
 
-		ring = container_of(vm->entity.sched, struct amdgpu_ring,
+		ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
 				    sched);
 
 		amdgpu_ring_pad_ib(ring, params.ib);
@@ -1403,7 +1403,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 					   addr, flags);
 	}
 
-	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+	ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
 
 	nptes = last - start + 1;
 
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index a3b55c542025..3f2fc5e8242a 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -185,7 +185,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
 	memset(entity, 0, sizeof(struct drm_sched_entity));
 	INIT_LIST_HEAD(&entity->list);
 	entity->rq = rq_list[0];
-	entity->sched = rq_list[0]->sched;
 	entity->guilty = guilty;
 	entity->last_scheduled = NULL;
 
@@ -210,8 +209,8 @@ EXPORT_SYMBOL(drm_sched_entity_init);
 static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
 					    struct drm_sched_entity *entity)
 {
-	return entity->sched == sched &&
-		entity->rq != NULL;
+	return entity->rq != NULL &&
+		entity->rq->sched == sched;
 }
 
 /**
@@ -278,7 +277,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
 	struct drm_gpu_scheduler *sched;
 	long ret = timeout;
 
-	sched = entity->sched;
+	sched = entity->rq->sched;
 	if (!drm_sched_entity_is_initialized(sched, entity))
 		return ret;
 	/**
@@ -317,7 +316,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
 {
 	struct drm_gpu_scheduler *sched;
 
-	sched = entity->sched;
+	sched = entity->rq->sched;
 	drm_sched_entity_set_rq(entity, NULL);
 
 	/* Consumption of existing IBs wasn't completed. Forcefully
@@ -388,7 +387,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb
 		container_of(cb, struct drm_sched_entity, cb);
 	entity->dependency = NULL;
 	dma_fence_put(f);
-	drm_sched_wakeup(entity->sched);
+	drm_sched_wakeup(entity->rq->sched);
 }
 
 static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
@@ -438,7 +437,7 @@ EXPORT_SYMBOL(drm_sched_entity_set_rq);
 bool drm_sched_dependency_optimized(struct dma_fence* fence,
 				    struct drm_sched_entity *entity)
 {
-	struct drm_gpu_scheduler *sched = entity->sched;
+	struct drm_gpu_scheduler *sched = entity->rq->sched;
 	struct drm_sched_fence *s_fence;
 
 	if (!fence || dma_fence_is_signaled(fence))
@@ -455,7 +454,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
 
 static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
 {
-	struct drm_gpu_scheduler *sched = entity->sched;
+	struct drm_gpu_scheduler *sched = entity->rq->sched;
 	struct dma_fence * fence = entity->dependency;
 	struct drm_sched_fence *s_fence;
 
@@ -500,7 +499,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
 static struct drm_sched_job *
 drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 {
-	struct drm_gpu_scheduler *sched = entity->sched;
+	struct drm_gpu_scheduler *sched = entity->rq->sched;
 	struct drm_sched_job *sched_job = to_drm_sched_job(
 						spsc_queue_peek(&entity->job_queue));
 
@@ -744,7 +743,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_sched_entity *entity,
 		       void *owner)
 {
-	struct drm_gpu_scheduler *sched = entity->sched;
+	struct drm_gpu_scheduler *sched = entity->rq->sched;
 
 	job->sched = sched;
 	job->entity = entity;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index df4461648e3f..4029312fdd81 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -172,7 +172,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
 		return NULL;
 
 	fence->owner = owner;
-	fence->sched = entity->sched;
+	fence->sched = entity->rq->sched;
 	spin_lock_init(&fence->lock);
 
 	seq = atomic_inc_return(&entity->fence_seq);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 728346abcc81..091b9afcd184 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -52,7 +52,6 @@ enum drm_sched_priority {
  *        runqueue.
  * @rq: runqueue to which this entity belongs.
  * @rq_lock: lock to modify the runqueue to which this entity belongs.
- * @sched: the scheduler instance to which this entity is enqueued.
  * @job_queue: the list of jobs of this entity.
  * @fence_seq: a linearly increasing seqno incremented with each
  *             new &drm_sched_fence which is part of the entity.
@@ -76,7 +75,6 @@ struct drm_sched_entity {
 	struct list_head		list;
 	struct drm_sched_rq		*rq;
 	spinlock_t			rq_lock;
-	struct drm_gpu_scheduler	*sched;
 
 	struct spsc_queue		job_queue;
 
-- 
2.14.3

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] drm/scheduler: modify API to avoid redundancy
  2018-07-20 12:21 [PATCH 1/2] drm/scheduler: modify API to avoid redundancy Nayan Deshmukh
@ 2018-07-20 14:47 ` Christian König
       [not found] ` <20180720122106.10344-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  1 sibling, 0 replies; 7+ messages in thread
From: Christian König @ 2018-07-20 14:47 UTC (permalink / raw)
  To: Nayan Deshmukh, dri-devel; +Cc: amd-gfx

Am 20.07.2018 um 14:21 schrieb Nayan Deshmukh:
> entity has a scheduler field and we don't need the sched argument
> in any of the functions where entity is provided.
>
> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>

Reviewed-by: Christian König <christian.koenig@amd.com> for the series.

> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 13 +++++--------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  3 +--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  3 +--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  4 ++--
>   drivers/gpu/drm/etnaviv/etnaviv_drv.c     |  3 +--
>   drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  4 ++--
>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++++++++++---------
>   drivers/gpu/drm/v3d/v3d_drv.c             |  4 +---
>   drivers/gpu/drm/v3d/v3d_gem.c             |  2 --
>   include/drm/gpu_scheduler.h               | 10 +++-------
>   13 files changed, 30 insertions(+), 42 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 7c5cc33d0cda..7e5ebf823309 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>   	job = p->job;
>   	p->job = NULL;
>   
> -	r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
> +	r = drm_sched_job_init(&job->base, entity, p->filp);
>   	if (r) {
>   		amdgpu_job_free(job);
>   		amdgpu_mn_unlock(p->mn);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 83e3b320a793..df6965761046 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
>   
>   failed:
>   	for (j = 0; j < i; j++)
> -		drm_sched_entity_destroy(&adev->rings[j]->sched,
> -				      &ctx->rings[j].entity);
> +		drm_sched_entity_destroy(&ctx->rings[j].entity);
>   	kfree(ctx->fences);
>   	ctx->fences = NULL;
>   	return r;
> @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
>   		if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
>   			continue;
>   
> -		drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
> -			&ctx->rings[i].entity);
> +		drm_sched_entity_destroy(&ctx->rings[i].entity);
>   	}
>   
>   	amdgpu_ctx_fini(ref);
> @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
>   			if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
>   				continue;
>   
> -			max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
> -					  &ctx->rings[i].entity, max_wait);
> +			max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
> +							  max_wait);
>   		}
>   	}
>   	mutex_unlock(&mgr->lock);
> @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
>   				continue;
>   
>   			if (kref_read(&ctx->refcount) == 1)
> -				drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
> -					&ctx->rings[i].entity);
> +				drm_sched_entity_fini(&ctx->rings[i].entity);
>   			else
>   				DRM_ERROR("ctx %p is still alive\n", ctx);
>   		}
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 5a2c26a85984..631481a730e0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
>   	if (!f)
>   		return -EINVAL;
>   
> -	r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
> +	r = drm_sched_job_init(&job->base, entity, owner);
>   	if (r)
>   		return r;
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 13977ea6a097..913705d4dfd3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>   			return;
>   		}
>   	} else {
> -		drm_sched_entity_destroy(adev->mman.entity.sched,
> -					 &adev->mman.entity);
> +		drm_sched_entity_destroy(&adev->mman.entity);
>   	}
>   
>   	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 80b5c453f8c1..8e2c96da275e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
>   {
>   	int i, j;
>   
> -	drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
> -				 &adev->uvd.entity);
> +	drm_sched_entity_destroy(&adev->uvd.entity);
>   
>   	for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
>   		kfree(adev->uvd.inst[j].saved_bo);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> index 86182c966ed6..b6ab4f5350c8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> @@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
>   	if (adev->vce.vcpu_bo == NULL)
>   		return 0;
>   
> -	drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
> +	drm_sched_entity_destroy(&adev->vce.entity);
>   
>   	amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
>   		(void **)&adev->vce.cpu_addr);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 098dd1ba751a..74b4a28a41d6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   	vm->root.base.bo = NULL;
>   
>   error_free_sched_entity:
> -	drm_sched_entity_destroy(&ring->sched, &vm->entity);
> +	drm_sched_entity_destroy(&vm->entity);
>   
>   	return r;
>   }
> @@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>   		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
>   	}
>   
> -	drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
> +	drm_sched_entity_destroy(&vm->entity);
>   
>   	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
>   		dev_err(adev->dev, "still active bo inside vm\n");
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> index 36414ba56b22..207532c05eb8 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> @@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
>   				gpu->lastctx = NULL;
>   			mutex_unlock(&gpu->lock);
>   
> -			drm_sched_entity_destroy(&gpu->sched,
> -						&ctx->sched_entity[i]);
> +			drm_sched_entity_destroy(&ctx->sched_entity[i]);
>   		}
>   	}
>   
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index a74eb57af15b..590e44b0d963 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
>   {
>   	int ret;
>   
> -	ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
> -				 sched_entity, submit->cmdbuf.ctx);
> +	ret = drm_sched_job_init(&submit->sched_job, sched_entity,
> +				 submit->cmdbuf.ctx);
>   	if (ret)
>   		return ret;
>   
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index dac71e3b4514..a3b55c542025 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>    *
>    * Returns the remaining time in jiffies left from the input timeout
>    */
> -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
> -			   struct drm_sched_entity *entity, long timeout)
> +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
>   {
> +	struct drm_gpu_scheduler *sched;
>   	long ret = timeout;
>   
> +	sched = entity->sched;
>   	if (!drm_sched_entity_is_initialized(sched, entity))
>   		return ret;
>   	/**
> @@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
>    * entity and signals all jobs with an error code if the process was killed.
>    *
>    */
> -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
> -			   struct drm_sched_entity *entity)
> +void drm_sched_entity_fini(struct drm_sched_entity *entity)
>   {
> +	struct drm_gpu_scheduler *sched;
>   
> +	sched = entity->sched;
>   	drm_sched_entity_set_rq(entity, NULL);
>   
>   	/* Consumption of existing IBs wasn't completed. Forcefully
> @@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
>    *
>    * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
>    */
> -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
> -				struct drm_sched_entity *entity)
> +void drm_sched_entity_destroy(struct drm_sched_entity *entity)
>   {
> -	drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
> -	drm_sched_entity_fini(sched, entity);
> +	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
> +	drm_sched_entity_fini(entity);
>   }
>   EXPORT_SYMBOL(drm_sched_entity_destroy);
>   
> @@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
>    * Returns 0 for success, negative error code otherwise.
>    */
>   int drm_sched_job_init(struct drm_sched_job *job,
> -		       struct drm_gpu_scheduler *sched,
>   		       struct drm_sched_entity *entity,
>   		       void *owner)
>   {
> +	struct drm_gpu_scheduler *sched = entity->sched;
> +
>   	job->sched = sched;
>   	job->entity = entity;
>   	job->s_priority = entity->rq - sched->sched_rq;
> diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
> index 1dceba2b42fd..2a85fa68ffea 100644
> --- a/drivers/gpu/drm/v3d/v3d_drv.c
> +++ b/drivers/gpu/drm/v3d/v3d_drv.c
> @@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
>   static void
>   v3d_postclose(struct drm_device *dev, struct drm_file *file)
>   {
> -	struct v3d_dev *v3d = to_v3d_dev(dev);
>   	struct v3d_file_priv *v3d_priv = file->driver_priv;
>   	enum v3d_queue q;
>   
>   	for (q = 0; q < V3D_MAX_QUEUES; q++) {
> -		drm_sched_entity_destroy(&v3d->queue[q].sched,
> -				      &v3d_priv->sched_entity[q]);
> +		drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
>   	}
>   
>   	kfree(v3d_priv);
> diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
> index b513f9189caf..9029590267aa 100644
> --- a/drivers/gpu/drm/v3d/v3d_gem.c
> +++ b/drivers/gpu/drm/v3d/v3d_gem.c
> @@ -552,7 +552,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
>   
>   	if (exec->bin.start != exec->bin.end) {
>   		ret = drm_sched_job_init(&exec->bin.base,
> -					 &v3d->queue[V3D_BIN].sched,
>   					 &v3d_priv->sched_entity[V3D_BIN],
>   					 v3d_priv);
>   		if (ret)
> @@ -567,7 +566,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
>   	}
>   
>   	ret = drm_sched_job_init(&exec->render.base,
> -				 &v3d->queue[V3D_RENDER].sched,
>   				 &v3d_priv->sched_entity[V3D_RENDER],
>   				 v3d_priv);
>   	if (ret)
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 2205e89722f6..728346abcc81 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>   			  struct drm_sched_rq **rq_list,
>   			  unsigned int num_rq_list,
>   			  atomic_t *guilty);
> -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
> -			   struct drm_sched_entity *entity, long timeout);
> -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
> -			   struct drm_sched_entity *entity);
> -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
> -			   struct drm_sched_entity *entity);
> +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
> +void drm_sched_entity_fini(struct drm_sched_entity *entity);
> +void drm_sched_entity_destroy(struct drm_sched_entity *entity);
>   void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
>   			       struct drm_sched_entity *entity);
>   void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
> @@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
>   void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
>   void drm_sched_fence_finished(struct drm_sched_fence *fence);
>   int drm_sched_job_init(struct drm_sched_job *job,
> -		       struct drm_gpu_scheduler *sched,
>   		       struct drm_sched_entity *entity,
>   		       void *owner);
>   void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] drm/scheduler: modify API to avoid redundancy
       [not found] ` <20180720122106.10344-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2018-07-20 12:21   ` [PATCH 2/2] drm/scheduler: remove sched field from the entity Nayan Deshmukh
@ 2018-07-20 18:13   ` Eric Anholt
  2018-08-09  8:57   ` Daniel Vetter
  2 siblings, 0 replies; 7+ messages in thread
From: Eric Anholt @ 2018-07-20 18:13 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: alexdeucher-Re5JQEeQqe8AvxtiuMwx3w, Nayan Deshmukh,
	christian.koenig-5C7GfCeVMHo,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	l.stach-bIcnvbaLZ9MEGnE8C9+IrQ


[-- Attachment #1.1: Type: text/plain, Size: 2551 bytes --]

Nayan Deshmukh <nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> writes:

> entity has a scheduler field and we don't need the sched argument
> in any of the functions where entity is provided.
>
> Signed-off-by: Nayan Deshmukh <nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 13 +++++--------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  3 +--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  3 +--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  4 ++--
>  drivers/gpu/drm/etnaviv/etnaviv_drv.c     |  3 +--
>  drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  4 ++--
>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++++++++++---------
>  drivers/gpu/drm/v3d/v3d_drv.c             |  4 +---
>  drivers/gpu/drm/v3d/v3d_gem.c             |  2 --
>  include/drm/gpu_scheduler.h               | 10 +++-------
>  13 files changed, 30 insertions(+), 42 deletions(-)
>

> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index dac71e3b4514..a3b55c542025 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>   *
>   * Returns the remaining time in jiffies left from the input timeout
>   */
> -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
> -			   struct drm_sched_entity *entity, long timeout)
> +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
>  {
> +	struct drm_gpu_scheduler *sched;
>  	long ret = timeout;
>  
> +	sched = entity->sched;
>  	if (!drm_sched_entity_is_initialized(sched, entity))
>  		return ret;
>  	/**
> @@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
>   * entity and signals all jobs with an error code if the process was killed.
>   *
>   */
> -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
> -			   struct drm_sched_entity *entity)
> +void drm_sched_entity_fini(struct drm_sched_entity *entity)
>  {
> +	struct drm_gpu_scheduler *sched;
>  
> +	sched = entity->sched;

Maybe fold the initialization into the declaration above, like you did
elsewhere?

Regardless, this is a wonderful cleanup of the API.

Reviewed-by: Eric Anholt <eric-WhKQ6XTQaPysTnJN9+BGXg@public.gmane.org>

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 832 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/2] drm/scheduler: remove sched field from the entity
       [not found]     ` <20180720122106.10344-2-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2018-07-20 18:14       ` Eric Anholt
  0 siblings, 0 replies; 7+ messages in thread
From: Eric Anholt @ 2018-07-20 18:14 UTC (permalink / raw)
  To: dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW
  Cc: alexdeucher-Re5JQEeQqe8AvxtiuMwx3w, Nayan Deshmukh,
	christian.koenig-5C7GfCeVMHo,
	amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	l.stach-bIcnvbaLZ9MEGnE8C9+IrQ


[-- Attachment #1.1: Type: text/plain, Size: 381 bytes --]

Nayan Deshmukh <nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> writes:

> The scheduler of the entity is decided by the run queue on which
> it is queued. This patch avoids us the effort required to maintain
> a sync between rq and sched field when we start shifting entites
> among different rqs.

Reviewed-by: Eric Anholt <eric-WhKQ6XTQaPysTnJN9+BGXg@public.gmane.org>

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 832 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] drm/scheduler: modify API to avoid redundancy
       [not found] ` <20180720122106.10344-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  2018-07-20 12:21   ` [PATCH 2/2] drm/scheduler: remove sched field from the entity Nayan Deshmukh
  2018-07-20 18:13   ` [PATCH 1/2] drm/scheduler: modify API to avoid redundancy Eric Anholt
@ 2018-08-09  8:57   ` Daniel Vetter
       [not found]     ` <CAKMK7uHNEqh0Z+Wh7ZCPNvHHs6+yd+kywZFHvryAzipuTD0XAw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
  2 siblings, 1 reply; 7+ messages in thread
From: Daniel Vetter @ 2018-08-09  8:57 UTC (permalink / raw)
  To: Nayan Deshmukh; +Cc: amd-gfx list, dri-devel, Christian König

On Fri, Jul 20, 2018 at 2:21 PM, Nayan Deshmukh
<nayan26deshmukh@gmail.com> wrote:
> entity has a scheduler field and we don't need the sched argument
> in any of the functions where entity is provided.
>
> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>

This breaks the make htmldocs build a bit:

./drivers/gpu/drm/scheduler/gpu_scheduler.c:262: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_flush'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:303: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_fini'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:365: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_destroy'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:730: warning: Excess
function parameter 'sched' description in 'drm_sched_job_init'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:263: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_flush'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:304: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_fini'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:366: warning: Excess
function parameter 'sched' description in 'drm_sched_entity_destroy'
./drivers/gpu/drm/scheduler/gpu_scheduler.c:731: warning: Excess
function parameter 'sched' description in 'drm_sched_job_init'

Care to fix it?

Thanks, Daniel
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 13 +++++--------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  3 +--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  3 +--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  4 ++--
>  drivers/gpu/drm/etnaviv/etnaviv_drv.c     |  3 +--
>  drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  4 ++--
>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++++++++++---------
>  drivers/gpu/drm/v3d/v3d_drv.c             |  4 +---
>  drivers/gpu/drm/v3d/v3d_gem.c             |  2 --
>  include/drm/gpu_scheduler.h               | 10 +++-------
>  13 files changed, 30 insertions(+), 42 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 7c5cc33d0cda..7e5ebf823309 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>         job = p->job;
>         p->job = NULL;
>
> -       r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
> +       r = drm_sched_job_init(&job->base, entity, p->filp);
>         if (r) {
>                 amdgpu_job_free(job);
>                 amdgpu_mn_unlock(p->mn);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 83e3b320a793..df6965761046 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
>
>  failed:
>         for (j = 0; j < i; j++)
> -               drm_sched_entity_destroy(&adev->rings[j]->sched,
> -                                     &ctx->rings[j].entity);
> +               drm_sched_entity_destroy(&ctx->rings[j].entity);
>         kfree(ctx->fences);
>         ctx->fences = NULL;
>         return r;
> @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
>                 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
>                         continue;
>
> -               drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
> -                       &ctx->rings[i].entity);
> +               drm_sched_entity_destroy(&ctx->rings[i].entity);
>         }
>
>         amdgpu_ctx_fini(ref);
> @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
>                         if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
>                                 continue;
>
> -                       max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
> -                                         &ctx->rings[i].entity, max_wait);
> +                       max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
> +                                                         max_wait);
>                 }
>         }
>         mutex_unlock(&mgr->lock);
> @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
>                                 continue;
>
>                         if (kref_read(&ctx->refcount) == 1)
> -                               drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
> -                                       &ctx->rings[i].entity);
> +                               drm_sched_entity_fini(&ctx->rings[i].entity);
>                         else
>                                 DRM_ERROR("ctx %p is still alive\n", ctx);
>                 }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 5a2c26a85984..631481a730e0 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
>         if (!f)
>                 return -EINVAL;
>
> -       r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
> +       r = drm_sched_job_init(&job->base, entity, owner);
>         if (r)
>                 return r;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 13977ea6a097..913705d4dfd3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
>                         return;
>                 }
>         } else {
> -               drm_sched_entity_destroy(adev->mman.entity.sched,
> -                                        &adev->mman.entity);
> +               drm_sched_entity_destroy(&adev->mman.entity);
>         }
>
>         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 80b5c453f8c1..8e2c96da275e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
>  {
>         int i, j;
>
> -       drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
> -                                &adev->uvd.entity);
> +       drm_sched_entity_destroy(&adev->uvd.entity);
>
>         for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
>                 kfree(adev->uvd.inst[j].saved_bo);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> index 86182c966ed6..b6ab4f5350c8 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> @@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
>         if (adev->vce.vcpu_bo == NULL)
>                 return 0;
>
> -       drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
> +       drm_sched_entity_destroy(&adev->vce.entity);
>
>         amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
>                 (void **)&adev->vce.cpu_addr);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 098dd1ba751a..74b4a28a41d6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>         vm->root.base.bo = NULL;
>
>  error_free_sched_entity:
> -       drm_sched_entity_destroy(&ring->sched, &vm->entity);
> +       drm_sched_entity_destroy(&vm->entity);
>
>         return r;
>  }
> @@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
>                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
>         }
>
> -       drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
> +       drm_sched_entity_destroy(&vm->entity);
>
>         if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
>                 dev_err(adev->dev, "still active bo inside vm\n");
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> index 36414ba56b22..207532c05eb8 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> @@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
>                                 gpu->lastctx = NULL;
>                         mutex_unlock(&gpu->lock);
>
> -                       drm_sched_entity_destroy(&gpu->sched,
> -                                               &ctx->sched_entity[i]);
> +                       drm_sched_entity_destroy(&ctx->sched_entity[i]);
>                 }
>         }
>
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index a74eb57af15b..590e44b0d963 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
>  {
>         int ret;
>
> -       ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
> -                                sched_entity, submit->cmdbuf.ctx);
> +       ret = drm_sched_job_init(&submit->sched_job, sched_entity,
> +                                submit->cmdbuf.ctx);
>         if (ret)
>                 return ret;
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index dac71e3b4514..a3b55c542025 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>   *
>   * Returns the remaining time in jiffies left from the input timeout
>   */
> -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
> -                          struct drm_sched_entity *entity, long timeout)
> +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
>  {
> +       struct drm_gpu_scheduler *sched;
>         long ret = timeout;
>
> +       sched = entity->sched;
>         if (!drm_sched_entity_is_initialized(sched, entity))
>                 return ret;
>         /**
> @@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
>   * entity and signals all jobs with an error code if the process was killed.
>   *
>   */
> -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
> -                          struct drm_sched_entity *entity)
> +void drm_sched_entity_fini(struct drm_sched_entity *entity)
>  {
> +       struct drm_gpu_scheduler *sched;
>
> +       sched = entity->sched;
>         drm_sched_entity_set_rq(entity, NULL);
>
>         /* Consumption of existing IBs wasn't completed. Forcefully
> @@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
>   *
>   * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
>   */
> -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
> -                               struct drm_sched_entity *entity)
> +void drm_sched_entity_destroy(struct drm_sched_entity *entity)
>  {
> -       drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
> -       drm_sched_entity_fini(sched, entity);
> +       drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
> +       drm_sched_entity_fini(entity);
>  }
>  EXPORT_SYMBOL(drm_sched_entity_destroy);
>
> @@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
>   * Returns 0 for success, negative error code otherwise.
>   */
>  int drm_sched_job_init(struct drm_sched_job *job,
> -                      struct drm_gpu_scheduler *sched,
>                        struct drm_sched_entity *entity,
>                        void *owner)
>  {
> +       struct drm_gpu_scheduler *sched = entity->sched;
> +
>         job->sched = sched;
>         job->entity = entity;
>         job->s_priority = entity->rq - sched->sched_rq;
> diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
> index 1dceba2b42fd..2a85fa68ffea 100644
> --- a/drivers/gpu/drm/v3d/v3d_drv.c
> +++ b/drivers/gpu/drm/v3d/v3d_drv.c
> @@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
>  static void
>  v3d_postclose(struct drm_device *dev, struct drm_file *file)
>  {
> -       struct v3d_dev *v3d = to_v3d_dev(dev);
>         struct v3d_file_priv *v3d_priv = file->driver_priv;
>         enum v3d_queue q;
>
>         for (q = 0; q < V3D_MAX_QUEUES; q++) {
> -               drm_sched_entity_destroy(&v3d->queue[q].sched,
> -                                     &v3d_priv->sched_entity[q]);
> +               drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
>         }
>
>         kfree(v3d_priv);
> diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
> index b513f9189caf..9029590267aa 100644
> --- a/drivers/gpu/drm/v3d/v3d_gem.c
> +++ b/drivers/gpu/drm/v3d/v3d_gem.c
> @@ -552,7 +552,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
>
>         if (exec->bin.start != exec->bin.end) {
>                 ret = drm_sched_job_init(&exec->bin.base,
> -                                        &v3d->queue[V3D_BIN].sched,
>                                          &v3d_priv->sched_entity[V3D_BIN],
>                                          v3d_priv);
>                 if (ret)
> @@ -567,7 +566,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
>         }
>
>         ret = drm_sched_job_init(&exec->render.base,
> -                                &v3d->queue[V3D_RENDER].sched,
>                                  &v3d_priv->sched_entity[V3D_RENDER],
>                                  v3d_priv);
>         if (ret)
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 2205e89722f6..728346abcc81 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>                           struct drm_sched_rq **rq_list,
>                           unsigned int num_rq_list,
>                           atomic_t *guilty);
> -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
> -                          struct drm_sched_entity *entity, long timeout);
> -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
> -                          struct drm_sched_entity *entity);
> -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
> -                          struct drm_sched_entity *entity);
> +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
> +void drm_sched_entity_fini(struct drm_sched_entity *entity);
> +void drm_sched_entity_destroy(struct drm_sched_entity *entity);
>  void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
>                                struct drm_sched_entity *entity);
>  void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
> @@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
>  void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
>  void drm_sched_fence_finished(struct drm_sched_fence *fence);
>  int drm_sched_job_init(struct drm_sched_job *job,
> -                      struct drm_gpu_scheduler *sched,
>                        struct drm_sched_entity *entity,
>                        void *owner);
>  void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
> --
> 2.14.3
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 1/2] drm/scheduler: modify API to avoid redundancy
       [not found]     ` <CAKMK7uHNEqh0Z+Wh7ZCPNvHHs6+yd+kywZFHvryAzipuTD0XAw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2018-08-09  8:59       ` Nayan Deshmukh
  0 siblings, 0 replies; 7+ messages in thread
From: Nayan Deshmukh @ 2018-08-09  8:59 UTC (permalink / raw)
  To: Daniel Vetter
  Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW,
	Maling list - DRI developers, Christian König

Hi Daniel,


On Thu, Aug 9, 2018 at 2:27 PM Daniel Vetter <daniel@ffwll.ch> wrote:
>
> On Fri, Jul 20, 2018 at 2:21 PM, Nayan Deshmukh
> <nayan26deshmukh@gmail.com> wrote:
> > entity has a scheduler field and we don't need the sched argument
> > in any of the functions where entity is provided.
> >
> > Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
>
> This breaks the make htmldocs build a bit:
>
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:262: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_flush'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:303: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_fini'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:365: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_destroy'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:730: warning: Excess
> function parameter 'sched' description in 'drm_sched_job_init'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:263: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_flush'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:304: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_fini'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:366: warning: Excess
> function parameter 'sched' description in 'drm_sched_entity_destroy'
> ./drivers/gpu/drm/scheduler/gpu_scheduler.c:731: warning: Excess
> function parameter 'sched' description in 'drm_sched_job_init'
>
> Care to fix it?
My Bad thanks for pointing it out. I will send in a patch to fix it.

Cheers,
Nayan
>
> Thanks, Daniel
> > ---
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |  2 +-
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   | 13 +++++--------
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 +-
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  3 +--
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  3 +--
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  2 +-
> >  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  4 ++--
> >  drivers/gpu/drm/etnaviv/etnaviv_drv.c     |  3 +--
> >  drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  4 ++--
> >  drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++++++++++---------
> >  drivers/gpu/drm/v3d/v3d_drv.c             |  4 +---
> >  drivers/gpu/drm/v3d/v3d_gem.c             |  2 --
> >  include/drm/gpu_scheduler.h               | 10 +++-------
> >  13 files changed, 30 insertions(+), 42 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> > index 7c5cc33d0cda..7e5ebf823309 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> > @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
> >         job = p->job;
> >         p->job = NULL;
> >
> > -       r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
> > +       r = drm_sched_job_init(&job->base, entity, p->filp);
> >         if (r) {
> >                 amdgpu_job_free(job);
> >                 amdgpu_mn_unlock(p->mn);
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > index 83e3b320a793..df6965761046 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> > @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
> >
> >  failed:
> >         for (j = 0; j < i; j++)
> > -               drm_sched_entity_destroy(&adev->rings[j]->sched,
> > -                                     &ctx->rings[j].entity);
> > +               drm_sched_entity_destroy(&ctx->rings[j].entity);
> >         kfree(ctx->fences);
> >         ctx->fences = NULL;
> >         return r;
> > @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
> >                 if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
> >                         continue;
> >
> > -               drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
> > -                       &ctx->rings[i].entity);
> > +               drm_sched_entity_destroy(&ctx->rings[i].entity);
> >         }
> >
> >         amdgpu_ctx_fini(ref);
> > @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
> >                         if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
> >                                 continue;
> >
> > -                       max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
> > -                                         &ctx->rings[i].entity, max_wait);
> > +                       max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
> > +                                                         max_wait);
> >                 }
> >         }
> >         mutex_unlock(&mgr->lock);
> > @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
> >                                 continue;
> >
> >                         if (kref_read(&ctx->refcount) == 1)
> > -                               drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
> > -                                       &ctx->rings[i].entity);
> > +                               drm_sched_entity_fini(&ctx->rings[i].entity);
> >                         else
> >                                 DRM_ERROR("ctx %p is still alive\n", ctx);
> >                 }
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> > index 5a2c26a85984..631481a730e0 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> > @@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
> >         if (!f)
> >                 return -EINVAL;
> >
> > -       r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
> > +       r = drm_sched_job_init(&job->base, entity, owner);
> >         if (r)
> >                 return r;
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> > index 13977ea6a097..913705d4dfd3 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> > @@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
> >                         return;
> >                 }
> >         } else {
> > -               drm_sched_entity_destroy(adev->mman.entity.sched,
> > -                                        &adev->mman.entity);
> > +               drm_sched_entity_destroy(&adev->mman.entity);
> >         }
> >
> >         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> > index 80b5c453f8c1..8e2c96da275e 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> > @@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
> >  {
> >         int i, j;
> >
> > -       drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
> > -                                &adev->uvd.entity);
> > +       drm_sched_entity_destroy(&adev->uvd.entity);
> >
> >         for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
> >                 kfree(adev->uvd.inst[j].saved_bo);
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> > index 86182c966ed6..b6ab4f5350c8 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> > @@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
> >         if (adev->vce.vcpu_bo == NULL)
> >                 return 0;
> >
> > -       drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
> > +       drm_sched_entity_destroy(&adev->vce.entity);
> >
> >         amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
> >                 (void **)&adev->vce.cpu_addr);
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> > index 098dd1ba751a..74b4a28a41d6 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> > @@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
> >         vm->root.base.bo = NULL;
> >
> >  error_free_sched_entity:
> > -       drm_sched_entity_destroy(&ring->sched, &vm->entity);
> > +       drm_sched_entity_destroy(&vm->entity);
> >
> >         return r;
> >  }
> > @@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
> >                 spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
> >         }
> >
> > -       drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
> > +       drm_sched_entity_destroy(&vm->entity);
> >
> >         if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
> >                 dev_err(adev->dev, "still active bo inside vm\n");
> > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> > index 36414ba56b22..207532c05eb8 100644
> > --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> > +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> > @@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
> >                                 gpu->lastctx = NULL;
> >                         mutex_unlock(&gpu->lock);
> >
> > -                       drm_sched_entity_destroy(&gpu->sched,
> > -                                               &ctx->sched_entity[i]);
> > +                       drm_sched_entity_destroy(&ctx->sched_entity[i]);
> >                 }
> >         }
> >
> > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> > index a74eb57af15b..590e44b0d963 100644
> > --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> > +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> > @@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
> >  {
> >         int ret;
> >
> > -       ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
> > -                                sched_entity, submit->cmdbuf.ctx);
> > +       ret = drm_sched_job_init(&submit->sched_job, sched_entity,
> > +                                submit->cmdbuf.ctx);
> >         if (ret)
> >                 return ret;
> >
> > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > index dac71e3b4514..a3b55c542025 100644
> > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > @@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
> >   *
> >   * Returns the remaining time in jiffies left from the input timeout
> >   */
> > -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
> > -                          struct drm_sched_entity *entity, long timeout)
> > +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
> >  {
> > +       struct drm_gpu_scheduler *sched;
> >         long ret = timeout;
> >
> > +       sched = entity->sched;
> >         if (!drm_sched_entity_is_initialized(sched, entity))
> >                 return ret;
> >         /**
> > @@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
> >   * entity and signals all jobs with an error code if the process was killed.
> >   *
> >   */
> > -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
> > -                          struct drm_sched_entity *entity)
> > +void drm_sched_entity_fini(struct drm_sched_entity *entity)
> >  {
> > +       struct drm_gpu_scheduler *sched;
> >
> > +       sched = entity->sched;
> >         drm_sched_entity_set_rq(entity, NULL);
> >
> >         /* Consumption of existing IBs wasn't completed. Forcefully
> > @@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
> >   *
> >   * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
> >   */
> > -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
> > -                               struct drm_sched_entity *entity)
> > +void drm_sched_entity_destroy(struct drm_sched_entity *entity)
> >  {
> > -       drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
> > -       drm_sched_entity_fini(sched, entity);
> > +       drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
> > +       drm_sched_entity_fini(entity);
> >  }
> >  EXPORT_SYMBOL(drm_sched_entity_destroy);
> >
> > @@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
> >   * Returns 0 for success, negative error code otherwise.
> >   */
> >  int drm_sched_job_init(struct drm_sched_job *job,
> > -                      struct drm_gpu_scheduler *sched,
> >                        struct drm_sched_entity *entity,
> >                        void *owner)
> >  {
> > +       struct drm_gpu_scheduler *sched = entity->sched;
> > +
> >         job->sched = sched;
> >         job->entity = entity;
> >         job->s_priority = entity->rq - sched->sched_rq;
> > diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
> > index 1dceba2b42fd..2a85fa68ffea 100644
> > --- a/drivers/gpu/drm/v3d/v3d_drv.c
> > +++ b/drivers/gpu/drm/v3d/v3d_drv.c
> > @@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
> >  static void
> >  v3d_postclose(struct drm_device *dev, struct drm_file *file)
> >  {
> > -       struct v3d_dev *v3d = to_v3d_dev(dev);
> >         struct v3d_file_priv *v3d_priv = file->driver_priv;
> >         enum v3d_queue q;
> >
> >         for (q = 0; q < V3D_MAX_QUEUES; q++) {
> > -               drm_sched_entity_destroy(&v3d->queue[q].sched,
> > -                                     &v3d_priv->sched_entity[q]);
> > +               drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
> >         }
> >
> >         kfree(v3d_priv);
> > diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
> > index b513f9189caf..9029590267aa 100644
> > --- a/drivers/gpu/drm/v3d/v3d_gem.c
> > +++ b/drivers/gpu/drm/v3d/v3d_gem.c
> > @@ -552,7 +552,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >
> >         if (exec->bin.start != exec->bin.end) {
> >                 ret = drm_sched_job_init(&exec->bin.base,
> > -                                        &v3d->queue[V3D_BIN].sched,
> >                                          &v3d_priv->sched_entity[V3D_BIN],
> >                                          v3d_priv);
> >                 if (ret)
> > @@ -567,7 +566,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >         }
> >
> >         ret = drm_sched_job_init(&exec->render.base,
> > -                                &v3d->queue[V3D_RENDER].sched,
> >                                  &v3d_priv->sched_entity[V3D_RENDER],
> >                                  v3d_priv);
> >         if (ret)
> > diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> > index 2205e89722f6..728346abcc81 100644
> > --- a/include/drm/gpu_scheduler.h
> > +++ b/include/drm/gpu_scheduler.h
> > @@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
> >                           struct drm_sched_rq **rq_list,
> >                           unsigned int num_rq_list,
> >                           atomic_t *guilty);
> > -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
> > -                          struct drm_sched_entity *entity, long timeout);
> > -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
> > -                          struct drm_sched_entity *entity);
> > -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
> > -                          struct drm_sched_entity *entity);
> > +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
> > +void drm_sched_entity_fini(struct drm_sched_entity *entity);
> > +void drm_sched_entity_destroy(struct drm_sched_entity *entity);
> >  void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
> >                                struct drm_sched_entity *entity);
> >  void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
> > @@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
> >  void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
> >  void drm_sched_fence_finished(struct drm_sched_fence *fence);
> >  int drm_sched_job_init(struct drm_sched_job *job,
> > -                      struct drm_gpu_scheduler *sched,
> >                        struct drm_sched_entity *entity,
> >                        void *owner);
> >  void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
> > --
> > 2.14.3
> >
> > _______________________________________________
> > dri-devel mailing list
> > dri-devel@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
>
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2018-08-09  8:59 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-07-20 12:21 [PATCH 1/2] drm/scheduler: modify API to avoid redundancy Nayan Deshmukh
2018-07-20 14:47 ` Christian König
     [not found] ` <20180720122106.10344-1-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-07-20 12:21   ` [PATCH 2/2] drm/scheduler: remove sched field from the entity Nayan Deshmukh
     [not found]     ` <20180720122106.10344-2-nayan26deshmukh-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-07-20 18:14       ` Eric Anholt
2018-07-20 18:13   ` [PATCH 1/2] drm/scheduler: modify API to avoid redundancy Eric Anholt
2018-08-09  8:57   ` Daniel Vetter
     [not found]     ` <CAKMK7uHNEqh0Z+Wh7ZCPNvHHs6+yd+kywZFHvryAzipuTD0XAw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2018-08-09  8:59       ` Nayan Deshmukh

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.