All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Christian König" <ckoenig.leichtzumerken@gmail.com>
To: Nirmoy Das <nirmoy.aiemd@gmail.com>,
	alexander.deucher@amd.com, kenny.ho@amd.com,
	christian.koenig@amd.com
Cc: nirmoy.das@amd.com, amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 4/4] drm/scheduler: do not keep a copy of sched list
Date: Fri, 6 Dec 2019 20:41:26 +0100	[thread overview]
Message-ID: <6cc23df3-97f7-f961-12f3-d99677cd5304@gmail.com> (raw)
In-Reply-To: <20191206173304.3025-4-nirmoy.das@amd.com>

Am 06.12.19 um 18:33 schrieb Nirmoy Das:
> entity should not keep copy and maintain sched list for
> itself.

That is a good step, but we need to take this further.

The sched_list is static for the whole device and we shouldn't allocate 
it inside the context at all.

Christian.

>
> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c  | 11 ++++++++---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h  |  1 +
>   drivers/gpu/drm/scheduler/sched_entity.c | 12 +-----------
>   3 files changed, 10 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index c7643af8827f..1939b414d23b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -71,7 +71,6 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
>   static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip)
>   {
>   	struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
> -	struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
>   	struct amdgpu_device *adev = ctx->adev;
>   	unsigned num_rings = 0;
>   	unsigned num_scheds = 0;
> @@ -129,16 +128,21 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip)
>   			break;
>   	}
>   
> +	ctx->sched_list = kcalloc(num_rings,
> +				  sizeof(struct drm_gpu_scheduler *), GFP_KERNEL);
> +	if (ctx->sched_list == NULL )
> +		return -ENOMEM;
> +
>   	for (i = 0; i < num_rings; ++i) {
>   		if (!rings[i]->adev)
>   			continue;
>   
> -		sched_list[num_scheds++] = &rings[i]->sched;
> +		ctx->sched_list[num_scheds++] = &rings[i]->sched;
>   	}
>   
>   	for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i)
>   		r = drm_sched_entity_init(&ctx->entities[hw_ip][i].entity,
> -				ctx->init_priority, sched_list, num_scheds, &ctx->guilty);
> +				ctx->init_priority, ctx->sched_list, num_scheds, &ctx->guilty);
>   	if (r)
>   		goto error_cleanup_entities;
>   
> @@ -228,6 +232,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
>   		for (j = 0; j < amdgpu_sched_jobs; ++j)
>   			dma_fence_put(ctx->entities[0][i].fences[j]);
>   	kfree(ctx->fences);
> +	kfree(ctx->sched_list);
>   	kfree(ctx->entities[0]);
>   
>   	mutex_destroy(&ctx->lock);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> index da808633732b..9fd02cc47352 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
> @@ -44,6 +44,7 @@ struct amdgpu_ctx {
>   	spinlock_t			ring_lock;
>   	struct dma_fence		**fences;
>   	struct amdgpu_ctx_entity	*entities[AMDGPU_HW_IP_NUM];
> +	struct drm_gpu_scheduler	**sched_list;
>   	bool				preamble_presented;
>   	enum drm_sched_priority		init_priority;
>   	enum drm_sched_priority		override_priority;
> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
> index f9b6ce29c58f..c84a9a66f7f0 100644
> --- a/drivers/gpu/drm/scheduler/sched_entity.c
> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> @@ -56,8 +56,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>   			  unsigned int num_sched_list,
>   			  atomic_t *guilty)
>   {
> -	int i;
> -
>   	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
>   		return -EINVAL;
>   
> @@ -67,17 +65,10 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>   	entity->guilty = guilty;
>   	entity->num_sched_list = num_sched_list;
>   	entity->priority = priority;
> -	entity->sched_list =  kcalloc(num_sched_list,
> -				      sizeof(struct drm_gpu_scheduler *), GFP_KERNEL);
> -
> -	if(!entity->sched_list)
> -		return -ENOMEM;
> +	entity->sched_list =  sched_list;
>   
>   	init_completion(&entity->entity_idle);
>   
> -	for (i = 0; i < num_sched_list; i++)
> -		entity->sched_list[i] = sched_list[i];
> -
>   	if (num_sched_list)
>   		entity->rq = &entity->sched_list[0]->sched_rq[entity->priority];
>   
> @@ -312,7 +303,6 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
>   
>   	dma_fence_put(entity->last_scheduled);
>   	entity->last_scheduled = NULL;
> -	kfree(entity->sched_list);
>   }
>   EXPORT_SYMBOL(drm_sched_entity_fini);
>   

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  reply	other threads:[~2019-12-06 19:41 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-06 17:33 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-06 17:33 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
2019-12-06 19:37   ` Christian König
2019-12-06 17:33 ` [PATCH 3/4] drm/amdgpu: allocate entities on demand Nirmoy Das
2019-12-06 19:40   ` Christian König
2019-12-06 17:33 ` [PATCH 4/4] drm/scheduler: do not keep a copy of sched list Nirmoy Das
2019-12-06 19:41   ` Christian König [this message]
2019-12-08 19:57     ` Nirmoy
2019-12-09 12:20       ` Christian König
2019-12-09 13:56         ` Nirmoy
2019-12-09 14:09           ` Nirmoy
2019-12-09 15:37             ` Christian König
2019-12-09 21:53 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-09 21:53 ` [PATCH 4/4] drm/scheduler: do not keep a copy of sched list Nirmoy Das
2019-12-10 12:52 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-10 12:53 ` [PATCH 4/4] drm/scheduler: do not keep a copy of sched list Nirmoy Das
2019-12-10 13:00   ` Christian König
2019-12-10 13:02     ` Christian König
2019-12-10 14:47     ` Nirmoy
2019-12-10 15:08       ` Nirmoy
2019-12-10 17:32         ` Christian König
2019-12-10 18:38           ` Nirmoy
2019-12-10 18:17 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-10 18:17 ` [PATCH 4/4] drm/scheduler: do not keep a copy of sched list Nirmoy Das
2019-12-11 12:25   ` Christian König
2019-12-11 14:24 [PATCH 1/4 v2] drm/scheduler: rework entity creation Nirmoy Das
2019-12-11 14:24 ` [PATCH 4/4] drm/scheduler: do not keep a copy of sched list Nirmoy Das
2019-12-11 15:42 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-11 15:42 ` [PATCH 4/4] drm/scheduler: do not keep a copy of sched list Nirmoy Das
2019-12-11 15:42   ` Nirmoy Das

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=6cc23df3-97f7-f961-12f3-d99677cd5304@gmail.com \
    --to=ckoenig.leichtzumerken@gmail.com \
    --cc=alexander.deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=christian.koenig@amd.com \
    --cc=kenny.ho@amd.com \
    --cc=nirmoy.aiemd@gmail.com \
    --cc=nirmoy.das@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.