All of lore.kernel.org
 help / color / mirror / Atom feed
From: Lijo Lazar <lijo.lazar@amd.com>
To: David M Nieto <david.nieto@amd.com>, amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm
Date: Wed, 19 May 2021 11:13:55 +0530	[thread overview]
Message-ID: <476f8809-5521-98b4-e08f-1d06fc099468@amd.com> (raw)
In-Reply-To: <20210518040957.23266-3-david.nieto@amd.com>



On 5/18/2021 9:39 AM, David M Nieto wrote:
> Enable displaying DPM levels for VCN clocks
> in swsmu supported ASICs
> 
> Signed-off-by: David M Nieto <david.nieto@amd.com>
> ---
>   .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 46 ++++++++++++++++++
>   .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c   |  4 ++
>   .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c   |  8 ++++
>   .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c   | 38 +++++++++++++++
>   .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c    | 48 +++++++++++++++++++
>   5 files changed, 144 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> index 77693bf0840c..1735a96dd307 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
> @@ -822,6 +822,52 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
>   				now) ? "*" : ""));
>   		break;
>   
> +	case SMU_VCLK:
> +		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
> +		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +				i, single_dpm_table->dpm_levels[i].value,
> +				(clocks.num_levels == 1) ? "*" :
> +				(arcturus_freqs_in_same_level(
> +				clocks.data[i].clocks_in_khz / 1000,
> +				now) ? "*" : ""));
> +		break;
> +
> +	case SMU_DCLK:
> +		ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
> +		ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +				i, single_dpm_table->dpm_levels[i].value,
> +				(clocks.num_levels == 1) ? "*" :
> +				(arcturus_freqs_in_same_level(
> +				clocks.data[i].clocks_in_khz / 1000,
> +				now) ? "*" : ""));
> +		break;
> +
>   	case SMU_PCIE:
>   		gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
>   		lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> index 9339fd24ae8c..2e801f2e42a9 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
> @@ -1273,6 +1273,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_DCLK:
>   	case SMU_DCEFCLK:
>   		ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
>   		if (ret)
> @@ -1444,6 +1446,8 @@ static int navi10_force_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_DCLK:

This is related to forcing clock levels, commit messages only mentions 
about display. Skip this or modify commit message accordingly.

>   		/* There is only 2 levels for fine grained DPM */
>   		if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
>   			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> index 0c40a54c46d7..6da6d08d8858 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
> @@ -987,6 +987,10 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_VCLK1:
> +	case SMU_DCLK:
> +	case SMU_DCLK1:
>   	case SMU_DCEFCLK:
>   		ret = sienna_cichlid_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
>   		if (ret)
> @@ -1150,6 +1154,10 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_UCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_VCLK1:
> +	case SMU_DCLK:
> +	case SMU_DCLK1:
>   		/* There is only 2 levels for fine grained DPM */
>   		if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
>   			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> index f43b4c623685..3a6b52b7b647 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
> @@ -109,6 +109,8 @@ static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
>   	CLK_MAP(SOCCLK, CLOCK_SOCCLK),
>   	CLK_MAP(UCLK, CLOCK_FCLK),
>   	CLK_MAP(MCLK, CLOCK_FCLK),
> +	CLK_MAP(VCLK, CLOCK_VCLK),
> +	CLK_MAP(DCLK, CLOCK_DCLK),
>   };
>   
>   static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
> @@ -202,6 +204,17 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
>   			return -EINVAL;
>   		*freq = clk_table->FClocks[dpm_level].Freq;
>   		break;
> +	case SMU_VCLK:
> +		if (dpm_level >= NUM_VCN_DPM_LEVELS)
> +			return -EINVAL;
> +		*freq = clk_table->VClocks[dpm_level].Freq;
> +		break;
> +	case SMU_DCLK:
> +		if (dpm_level >= NUM_VCN_DPM_LEVELS)
> +			return -EINVAL;
> +		*freq = clk_table->DClocks[dpm_level].Freq;
> +		break;
> +
>   	default:
>   		return -EINVAL;
>   	}
> @@ -296,6 +309,8 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
>   		case SMU_UCLK:
>   		case SMU_FCLK:
>   		case SMU_MCLK:
> +		case SMU_VCLK:
> +		case SMU_DCLK:
>   			ret = renoir_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);

Please double check if this is the right place for V/D clocks. mclk_mask 
says this is related to memory clock values only.

>   			if (ret)
>   				goto failed;
> @@ -324,6 +339,8 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
>   		case SMU_UCLK:
>   		case SMU_FCLK:
>   		case SMU_MCLK:
> +		case SMU_DCLK:
> +		case SMU_VCLK:
>   			ret = renoir_get_dpm_clk_limited(smu, clk_type, NUM_MEMCLK_DPM_LEVELS - 1, min);

Please double check if this is the right place for V/D clocks. 
NUM_MEMCLK_DPM_LEVELS says this is related to memory clock values only.

>   			if (ret)
>   				goto failed;
> @@ -532,6 +549,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
>   		count = NUM_FCLK_DPM_LEVELS;
>   		cur_value = metrics.ClockFrequency[CLOCK_FCLK];
>   		break;
> +	case SMU_VCLK:
> +		count = NUM_VCN_DPM_LEVELS;
> +		cur_value = metrics.ClockFrequency[CLOCK_VCLK];
> +		break;
> +	case SMU_DCLK:
> +		count = NUM_VCN_DPM_LEVELS;
> +		cur_value = metrics.ClockFrequency[CLOCK_DCLK];
> +		break;
>   	default:
>   		break;
>   	}
> @@ -543,6 +568,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_DCEFCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_DCLK:
>   		for (i = 0; i < count; i++) {
>   			ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
>   			if (ret)
> @@ -730,6 +757,17 @@ static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks
>   		clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
>   	}
>   
> +	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
> +		clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
> +		clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
> +	}
> +
> +	for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
> +		clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
> +		clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
> +	}
> +
> +
>   	return 0;
>   }
>   
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> index 7c191a5d6db9..bc628326776c 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
> @@ -816,6 +816,52 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
>   								       now) ? "*" : ""));
>   		break;
>   
> +	case SMU_VCLK:
> +		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
> +		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +					i, single_dpm_table->dpm_levels[i].value,
> +					(clocks.num_levels == 1) ? "*" :
> +					(aldebaran_freqs_in_same_level(
> +								       clocks.data[i].clocks_in_khz / 1000,
> +								       now) ? "*" : ""));
> +		break;
> +
> +	case SMU_DCLK:
> +		ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
> +			return ret;
> +		}
> +
> +		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
> +		ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
> +		if (ret) {
> +			dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
> +			return ret;
> +		}
> +
> +		for (i = 0; i < single_dpm_table->count; i++)
> +			size += sprintf(buf + size, "%d: %uMhz %s\n",
> +					i, single_dpm_table->dpm_levels[i].value,
> +					(clocks.num_levels == 1) ? "*" :
> +					(aldebaran_freqs_in_same_level(
> +								       clocks.data[i].clocks_in_khz / 1000,
> +								       now) ? "*" : ""));
> +		break;
> +
>   	default:
>   		break;
>   	}
> @@ -920,6 +966,8 @@ static int aldebaran_force_clk_levels(struct smu_context *smu,
>   	case SMU_MCLK:
>   	case SMU_SOCCLK:
>   	case SMU_FCLK:
> +	case SMU_VCLK:
> +	case SMU_DCLK:

V/D Clock forcing is not applicable on Aldebaran. No need to add 
anything here.

>   		/*
>   		 * Should not arrive here since aldebaran does not
>   		 * support mclk/socclk/fclk softmin/softmax settings
> 

-- 
Thanks,
Lijo
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  reply	other threads:[~2021-05-19  5:44 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-14 21:01 [PATCH 1/2] drm/amdgpu/pm: Update metrics table David M Nieto
2021-05-14 21:01 ` [PATCH 2/2] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
2021-05-17  6:28   ` Lazar, Lijo
2021-05-17 20:06     ` Nieto, David M
2021-05-18  4:09     ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
2021-05-18  4:09       ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
2021-05-19  5:35         ` Lijo Lazar
2021-05-18  4:09       ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm David M Nieto
2021-05-19  5:43         ` Lijo Lazar [this message]
2021-05-19  6:02           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table David M Nieto
2021-05-19  6:02             ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x David M Nieto
2021-05-19 15:43               ` Lijo Lazar
2021-05-19  6:02             ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm David M Nieto
2021-05-19 15:44               ` Lijo Lazar
2021-05-19 15:42             ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table Lijo Lazar
2021-05-19 17:39           ` [PATCH 1/3] drm/amdgpu/pm: Update metrics table (v2) David M Nieto
2021-05-19 17:39             ` [PATCH 2/3] drm/amdgpu/pm: add new fields for Navi1x (v3) David M Nieto
2021-05-19 17:39             ` [PATCH 3/3] drm/amdgpu/pm: display vcn pp dpm (v3) David M Nieto
2021-05-20  5:16           ` [PATCH] drm/amdgpu/pm: display vcn pp dpm (v4) David M Nieto

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=476f8809-5521-98b4-e08f-1d06fc099468@amd.com \
    --to=lijo.lazar@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=david.nieto@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.