All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Christian König" <ckoenig.leichtzumerken@gmail.com>
To: Nirmoy Das <nirmoy.aiemd@gmail.com>,
	alexander.deucher@amd.com, kenny.ho@amd.com,
	christian.koenig@amd.com
Cc: nirmoy.das@amd.com, amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list
Date: Fri, 6 Dec 2019 20:37:29 +0100	[thread overview]
Message-ID: <edc041fa-5c2a-ddb4-a92e-10f1eafac207@gmail.com> (raw)
In-Reply-To: <20191206173304.3025-2-nirmoy.das@amd.com>

Am 06.12.19 um 18:33 schrieb Nirmoy Das:
> drm_sched_entity_init() takes drm gpu scheduler list instead of
> drm_sched_rq list. This makes conversion of drm_sched_rq list
> to drm gpu scheduler list unnecessary
>
> Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>

Reviewed-by: Christian König <christian.koenig@amd.com>

> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c     | 11 ++++-------
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h     |  4 ++--
>   drivers/gpu/drm/amd/amdgpu/cik_sdma.c      |  8 +++-----
>   drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c     |  8 +++-----
>   drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c     |  8 +++-----
>   drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c     |  5 ++---
>   drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c     |  8 +++-----
>   drivers/gpu/drm/amd/amdgpu/si_dma.c        |  8 +++-----
>   9 files changed, 24 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index f85007382093..cf4953c4e2cf 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2779,7 +2779,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>   	adev->mman.buffer_funcs = NULL;
>   	adev->mman.buffer_funcs_ring = NULL;
>   	adev->vm_manager.vm_pte_funcs = NULL;
> -	adev->vm_manager.vm_pte_num_rqs = 0;
> +	adev->vm_manager.vm_pte_num_scheds = 0;
>   	adev->gmc.gmc_funcs = NULL;
>   	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
>   	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 5e78db30c722..0e1ed8ef2ce7 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -2687,7 +2687,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   {
>   	struct amdgpu_bo_param bp;
>   	struct amdgpu_bo *root;
> -	struct drm_gpu_scheduler *sched_list[AMDGPU_MAX_RINGS];
>   	int r, i;
>   
>   	vm->va = RB_ROOT_CACHED;
> @@ -2701,19 +2700,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>   	spin_lock_init(&vm->invalidated_lock);
>   	INIT_LIST_HEAD(&vm->freed);
>   
> -	for (i = 0; i < adev->vm_manager.vm_pte_num_rqs; i++)
> -		sched_list[i] = adev->vm_manager.vm_pte_rqs[i]->sched;
>   
>   	/* create scheduler entities for page table updates */
>   	r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
> -				  sched_list, adev->vm_manager.vm_pte_num_rqs,
> -				  NULL);
> +				  adev->vm_manager.vm_pte_scheds,
> +				  adev->vm_manager.vm_pte_num_scheds, NULL);
>   	if (r)
>   		return r;
>   
>   	r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
> -				  sched_list, adev->vm_manager.vm_pte_num_rqs,
> -				  NULL);
> +				  adev->vm_manager.vm_pte_scheds,
> +				  adev->vm_manager.vm_pte_num_scheds, NULL);
>   	if (r)
>   		goto error_free_direct;
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> index 76fcf853035c..5eaba8645a43 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
> @@ -322,8 +322,8 @@ struct amdgpu_vm_manager {
>   	u64					vram_base_offset;
>   	/* vm pte handling */
>   	const struct amdgpu_vm_pte_funcs	*vm_pte_funcs;
> -	struct drm_sched_rq			*vm_pte_rqs[AMDGPU_MAX_RINGS];
> -	unsigned				vm_pte_num_rqs;
> +	struct drm_gpu_scheduler		*vm_pte_scheds[AMDGPU_MAX_RINGS];
> +	unsigned				vm_pte_num_scheds;
>   	struct amdgpu_ring			*page_fault;
>   
>   	/* partial resident texture handling */
> diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> index 82cdb8f57bfd..1f22a8d0f7f3 100644
> --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
> @@ -1373,16 +1373,14 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
>   
>   static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
>   {
> -	struct drm_gpu_scheduler *sched;
>   	unsigned i;
>   
>   	adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
>   	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		sched = &adev->sdma.instance[i].ring.sched;
> -		adev->vm_manager.vm_pte_rqs[i] =
> -			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +		adev->vm_manager.vm_pte_scheds[i] =
> +			&adev->sdma.instance[i].ring.sched;
>   	}
> -	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
> +	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
>   }
>   
>   const struct amdgpu_ip_block_version cik_sdma_ip_block =
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> index 89e8c74a40f4..606b621145a1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
> @@ -1261,16 +1261,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
>   
>   static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
>   {
> -	struct drm_gpu_scheduler *sched;
>   	unsigned i;
>   
>   	adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
>   	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		sched = &adev->sdma.instance[i].ring.sched;
> -		adev->vm_manager.vm_pte_rqs[i] =
> -			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +		adev->vm_manager.vm_pte_scheds[i] =
> +			&adev->sdma.instance[i].ring.sched;
>   	}
> -	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
> +	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
>   }
>   
>   const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> index 011fd12c41fe..a559573ec8fd 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
> @@ -1699,16 +1699,14 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
>   
>   static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
>   {
> -	struct drm_gpu_scheduler *sched;
>   	unsigned i;
>   
>   	adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
>   	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		sched = &adev->sdma.instance[i].ring.sched;
> -		adev->vm_manager.vm_pte_rqs[i] =
> -			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +		adev->vm_manager.vm_pte_scheds[i] =
> +			 &adev->sdma.instance[i].ring.sched;
>   	}
> -	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
> +	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
>   }
>   
>   const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> index 350b2c99fefc..bd9ed33bab43 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
> @@ -2411,10 +2411,9 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
>   			sched = &adev->sdma.instance[i].page.sched;
>   		else
>   			sched = &adev->sdma.instance[i].ring.sched;
> -		adev->vm_manager.vm_pte_rqs[i] =
> -			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +		adev->vm_manager.vm_pte_scheds[i] = sched;
>   	}
> -	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
> +	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
>   }
>   
>   const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
> diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> index 64c53eed7fac..63f667cfe3f6 100644
> --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
> @@ -1723,17 +1723,15 @@ static const struct amdgpu_vm_pte_funcs sdma_v5_0_vm_pte_funcs = {
>   
>   static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev)
>   {
> -	struct drm_gpu_scheduler *sched;
>   	unsigned i;
>   
>   	if (adev->vm_manager.vm_pte_funcs == NULL) {
>   		adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs;
>   		for (i = 0; i < adev->sdma.num_instances; i++) {
> -			sched = &adev->sdma.instance[i].ring.sched;
> -			adev->vm_manager.vm_pte_rqs[i] =
> -				&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +			adev->vm_manager.vm_pte_scheds[i] =
> +				&adev->sdma.instance[i].ring.sched;
>   		}
> -		adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
> +		adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
>   	}
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
> index 122df0732f0c..9ad85eddf9c4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
> +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
> @@ -835,16 +835,14 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
>   
>   static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
>   {
> -	struct drm_gpu_scheduler *sched;
>   	unsigned i;
>   
>   	adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
>   	for (i = 0; i < adev->sdma.num_instances; i++) {
> -		sched = &adev->sdma.instance[i].ring.sched;
> -		adev->vm_manager.vm_pte_rqs[i] =
> -			&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> +		adev->vm_manager.vm_pte_scheds[i] =
> +			&adev->sdma.instance[i].ring.sched;
>   	}
> -	adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
> +	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
>   }
>   
>   const struct amdgpu_ip_block_version si_dma_ip_block =

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  reply	other threads:[~2019-12-06 19:37 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-06 17:33 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-06 17:33 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
2019-12-06 19:37   ` Christian König [this message]
2019-12-06 17:33 ` [PATCH 3/4] drm/amdgpu: allocate entities on demand Nirmoy Das
2019-12-06 19:40   ` Christian König
2019-12-06 17:33 ` [PATCH 4/4] drm/scheduler: do not keep a copy of sched list Nirmoy Das
2019-12-06 19:41   ` Christian König
2019-12-08 19:57     ` Nirmoy
2019-12-09 12:20       ` Christian König
2019-12-09 13:56         ` Nirmoy
2019-12-09 14:09           ` Nirmoy
2019-12-09 15:37             ` Christian König
2019-12-09 21:53 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-09 21:53 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
2019-12-10 12:52 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-10 12:52 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
2019-12-10 18:17 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-10 18:17 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
2019-12-11 14:24 [PATCH 1/4 v2] drm/scheduler: rework entity creation Nirmoy Das
2019-12-11 14:24 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
2019-12-11 15:42 [PATCH 1/4] drm/scheduler: rework entity creation Nirmoy Das
2019-12-11 15:42 ` [PATCH 2/4] drm/amdgpu: replace vm_pte's run-queue list with drm gpu scheds list Nirmoy Das
2019-12-11 15:42   ` Nirmoy Das

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=edc041fa-5c2a-ddb4-a92e-10f1eafac207@gmail.com \
    --to=ckoenig.leichtzumerken@gmail.com \
    --cc=alexander.deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=christian.koenig@amd.com \
    --cc=kenny.ho@amd.com \
    --cc=nirmoy.aiemd@gmail.com \
    --cc=nirmoy.das@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.