From: Leo Liu <leo.liu@amd.com>
To: "Christian König" <ckoenig.leichtzumerken@gmail.com>,
amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Subject: Re: [PATCH 3/3] drm/amdgpu: share scheduler score on VCN3 instances
Date: Thu, 4 Feb 2021 13:34:33 -0500 [thread overview]
Message-ID: <036c900b-df46-5259-dbd2-d882f9a7341b@amd.com> (raw)
In-Reply-To: <20210204144405.2737-3-christian.koenig@amd.com>
The series are:
Reviewed-and-Tested-by: Leo Liu <leo.liu@amd.com>
On 2021-02-04 9:44 a.m., Christian König wrote:
> The VCN3 instances can do both decode as well as encode.
>
> Share the scheduler load balancing score and remove fixing encode to
> only the second instance.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 +
> drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 11 +++++++----
> 2 files changed, 8 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> index 13aa417f6be7..d10bc4f0a05f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> @@ -211,6 +211,7 @@ struct amdgpu_vcn_inst {
> void *saved_bo;
> struct amdgpu_ring ring_dec;
> struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
> + atomic_t sched_score;
> struct amdgpu_irq_src irq;
> struct amdgpu_vcn_reg external;
> struct amdgpu_bo *dpg_sram_bo;
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> index 239a4eb52c61..b33f513fd2ac 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> @@ -171,6 +171,7 @@ static int vcn_v3_0_sw_init(void *handle)
>
> for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> volatile struct amdgpu_fw_shared *fw_shared;
> +
> if (adev->vcn.harvest_config & (1 << i))
> continue;
>
> @@ -198,6 +199,8 @@ static int vcn_v3_0_sw_init(void *handle)
> if (r)
> return r;
>
> + atomic_set(&adev->vcn.inst[i].sched_score, 0);
> +
> ring = &adev->vcn.inst[i].ring_dec;
> ring->use_doorbell = true;
> if (amdgpu_sriov_vf(adev)) {
> @@ -209,7 +212,8 @@ static int vcn_v3_0_sw_init(void *handle)
> ring->no_scheduler = true;
> sprintf(ring->name, "vcn_dec_%d", i);
> r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
> - AMDGPU_RING_PRIO_DEFAULT, NULL);
> + AMDGPU_RING_PRIO_DEFAULT,
> + &adev->vcn.inst[i].sched_score);
> if (r)
> return r;
>
> @@ -227,11 +231,10 @@ static int vcn_v3_0_sw_init(void *handle)
> } else {
> ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
> }
> - if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1)
> - ring->no_scheduler = true;
> sprintf(ring->name, "vcn_enc_%d.%d", i, j);
> r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
> - AMDGPU_RING_PRIO_DEFAULT, NULL);
> + AMDGPU_RING_PRIO_DEFAULT,
> + &adev->vcn.inst[i].sched_score);
> if (r)
> return r;
> }
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
WARNING: multiple messages have this Message-ID (diff)
From: Leo Liu <leo.liu@amd.com>
To: "Christian König" <ckoenig.leichtzumerken@gmail.com>,
amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Subject: Re: [PATCH 3/3] drm/amdgpu: share scheduler score on VCN3 instances
Date: Thu, 4 Feb 2021 13:34:33 -0500 [thread overview]
Message-ID: <036c900b-df46-5259-dbd2-d882f9a7341b@amd.com> (raw)
In-Reply-To: <20210204144405.2737-3-christian.koenig@amd.com>
The series are:
Reviewed-and-Tested-by: Leo Liu <leo.liu@amd.com>
On 2021-02-04 9:44 a.m., Christian König wrote:
> The VCN3 instances can do both decode as well as encode.
>
> Share the scheduler load balancing score and remove fixing encode to
> only the second instance.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 1 +
> drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 11 +++++++----
> 2 files changed, 8 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> index 13aa417f6be7..d10bc4f0a05f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> @@ -211,6 +211,7 @@ struct amdgpu_vcn_inst {
> void *saved_bo;
> struct amdgpu_ring ring_dec;
> struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
> + atomic_t sched_score;
> struct amdgpu_irq_src irq;
> struct amdgpu_vcn_reg external;
> struct amdgpu_bo *dpg_sram_bo;
> diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> index 239a4eb52c61..b33f513fd2ac 100644
> --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
> @@ -171,6 +171,7 @@ static int vcn_v3_0_sw_init(void *handle)
>
> for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
> volatile struct amdgpu_fw_shared *fw_shared;
> +
> if (adev->vcn.harvest_config & (1 << i))
> continue;
>
> @@ -198,6 +199,8 @@ static int vcn_v3_0_sw_init(void *handle)
> if (r)
> return r;
>
> + atomic_set(&adev->vcn.inst[i].sched_score, 0);
> +
> ring = &adev->vcn.inst[i].ring_dec;
> ring->use_doorbell = true;
> if (amdgpu_sriov_vf(adev)) {
> @@ -209,7 +212,8 @@ static int vcn_v3_0_sw_init(void *handle)
> ring->no_scheduler = true;
> sprintf(ring->name, "vcn_dec_%d", i);
> r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
> - AMDGPU_RING_PRIO_DEFAULT, NULL);
> + AMDGPU_RING_PRIO_DEFAULT,
> + &adev->vcn.inst[i].sched_score);
> if (r)
> return r;
>
> @@ -227,11 +231,10 @@ static int vcn_v3_0_sw_init(void *handle)
> } else {
> ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
> }
> - if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1)
> - ring->no_scheduler = true;
> sprintf(ring->name, "vcn_enc_%d.%d", i, j);
> r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
> - AMDGPU_RING_PRIO_DEFAULT, NULL);
> + AMDGPU_RING_PRIO_DEFAULT,
> + &adev->vcn.inst[i].sched_score);
> if (r)
> return r;
> }
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
next prev parent reply other threads:[~2021-02-04 18:34 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-04 14:44 [PATCH 1/3] drm/scheduler: provide scheduler score externally Christian König
2021-02-04 14:44 ` Christian König
2021-02-04 14:44 ` [PATCH 2/3] drm/amdgpu: add the sched_score to amdgpu_ring_init Christian König
2021-02-04 14:44 ` Christian König
2021-02-04 14:44 ` [PATCH 3/3] drm/amdgpu: share scheduler score on VCN3 instances Christian König
2021-02-04 14:44 ` Christian König
2021-02-04 18:34 ` Leo Liu [this message]
2021-02-04 18:34 ` Leo Liu
2021-02-05 9:58 ` Christian König
2021-02-05 9:58 ` Christian König
2021-02-05 14:50 ` Deucher, Alexander
2021-02-05 14:50 ` Deucher, Alexander
2021-02-05 14:53 ` Christian König
2021-02-05 14:53 ` Christian König
2021-02-05 15:09 ` Deucher, Alexander
2021-02-05 15:09 ` Deucher, Alexander
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=036c900b-df46-5259-dbd2-d882f9a7341b@amd.com \
--to=leo.liu@amd.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=ckoenig.leichtzumerken@gmail.com \
--cc=dri-devel@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.