All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alex Deucher <alexdeucher@gmail.com>
To: Evan Quan <evan.quan@amd.com>
Cc: "Deucher, Alexander" <Alexander.Deucher@amd.com>,
	amd-gfx list <amd-gfx@lists.freedesktop.org>
Subject: Re: [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures
Date: Mon, 22 Feb 2021 16:45:49 -0500	[thread overview]
Message-ID: <CADnq5_PzDTq-499hdRQ-VL4PCPaWafMS-a-eE-Xg-83gaXrCnw@mail.gmail.com> (raw)
In-Reply-To: <20210222040329.1280956-1-evan.quan@amd.com>

On Sun, Feb 21, 2021 at 11:03 PM Evan Quan <evan.quan@amd.com> wrote:
>
> To make sure they are naturally aligned.
>
> Change-Id: I496a5b79158bdbd2e17f179098939e050b2ad489
> Signed-off-by: Evan Quan <evan.quan@amd.com>

Won't this break existing apps that query this info?  We need to make
sure umr and rocm-smi can handle this.

Alex


> ---
>  drivers/gpu/drm/amd/include/kgd_pp_interface.h        | 11 ++++++-----
>  drivers/gpu/drm/amd/pm/inc/smu_v11_0.h                |  4 ++--
>  drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c |  8 ++++----
>  drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c |  8 ++++----
>  drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c        |  8 ++++----
>  5 files changed, 20 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> index 828513412e20..3a8f64e1a10c 100644
> --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
> @@ -332,9 +332,9 @@ struct amd_pm_funcs {
>  };
>
>  struct metrics_table_header {
> -       uint16_t                        structure_size;
> -       uint8_t                         format_revision;
> -       uint8_t                         content_revision;
> +       uint32_t                        structure_size;
> +       uint16_t                        format_revision;
> +       uint16_t                        content_revision;
>  };
>
>  struct gpu_metrics_v1_0 {
> @@ -385,8 +385,9 @@ struct gpu_metrics_v1_0 {
>         uint16_t                        current_fan_speed;
>
>         /* Link width/speed */
> -       uint8_t                         pcie_link_width;
> -       uint8_t                         pcie_link_speed; // in 0.1 GT/s
> +       uint16_t                        pcie_link_width;
> +       uint16_t                        pcie_link_speed; // in 0.1 GT/s
> +       uint8_t                         padding[2];
>  };
>
>  struct gpu_metrics_v2_0 {
> diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> index 50dd1529b994..f4e7a330f67f 100644
> --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
> @@ -284,11 +284,11 @@ int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
>
>  int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu);
>
> -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
> +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu);
>
>  int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu);
>
> -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
> +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu);
>
>  int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
>                               bool enablement);
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> index c0753029a8e2..95e905d8418d 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
> @@ -52,8 +52,8 @@
>
>  #define LINK_WIDTH_MAX                         6
>  #define LINK_SPEED_MAX                         3
> -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> -static int link_speed[] = {25, 50, 80, 160};
> +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> +static uint16_t link_speed[] = {25, 50, 80, 160};
>
>  static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
>                 enum pp_clock_type type, uint32_t mask);
> @@ -2117,7 +2117,7 @@ static int vega12_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
>                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
>  }
>
> -static int vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
> +static uint16_t vega12_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
>  {
>         uint32_t width_level;
>
> @@ -2137,7 +2137,7 @@ static int vega12_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
>                 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
>  }
>
> -static int vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
> +static uint16_t vega12_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
>  {
>         uint32_t speed_level;
>
> diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> index 87811b005b85..3d462405b572 100644
> --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
> @@ -57,8 +57,8 @@
>
>  #define LINK_WIDTH_MAX                         6
>  #define LINK_SPEED_MAX                         3
> -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> -static int link_speed[] = {25, 50, 80, 160};
> +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> +static uint16_t link_speed[] = {25, 50, 80, 160};
>
>  static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
>  {
> @@ -3279,7 +3279,7 @@ static int vega20_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
>                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
>  }
>
> -static int vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
> +static uint16_t vega20_get_current_pcie_link_width(struct pp_hwmgr *hwmgr)
>  {
>         uint32_t width_level;
>
> @@ -3299,7 +3299,7 @@ static int vega20_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
>                 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
>  }
>
> -static int vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
> +static uint16_t vega20_get_current_pcie_link_speed(struct pp_hwmgr *hwmgr)
>  {
>         uint32_t speed_level;
>
> diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> index 60ef63073ad4..86af9832ba9c 100644
> --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
> @@ -99,8 +99,8 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
>  #define mmCG_THERMAL_STATUS_ARCT               0x90
>  #define mmCG_THERMAL_STATUS_ARCT_BASE_IDX      0
>
> -static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
> -static int link_speed[] = {25, 50, 80, 160};
> +static uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
> +static uint16_t link_speed[] = {25, 50, 80, 160};
>
>  int smu_v11_0_init_microcode(struct smu_context *smu)
>  {
> @@ -2134,7 +2134,7 @@ int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
>                 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
>  }
>
> -int smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
> +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
>  {
>         uint32_t width_level;
>
> @@ -2154,7 +2154,7 @@ int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
>                 >> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
>  }
>
> -int smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
> +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
>  {
>         uint32_t speed_level;
>
> --
> 2.29.0
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  parent reply	other threads:[~2021-02-22 21:46 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-22  4:03 [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures Evan Quan
2021-02-22  4:03 ` [PATCH 2/2] drm/amd/pm: optimize the link width/speed retrieving Evan Quan
2021-02-22 21:47   ` Alex Deucher
2021-02-23  2:03     ` Quan, Evan
2021-02-22 21:45 ` Alex Deucher [this message]
2021-02-23 11:48   ` [PATCH 1/2] drm/amd/pm: correct gpu metrics related data structures Tom St Denis

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CADnq5_PzDTq-499hdRQ-VL4PCPaWafMS-a-eE-Xg-83gaXrCnw@mail.gmail.com \
    --to=alexdeucher@gmail.com \
    --cc=Alexander.Deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=evan.quan@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.