All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Deucher, Alexander" <Alexander.Deucher@amd.com>
To: "Gao, Likun" <Likun.Gao@amd.com>,
	"amd-gfx@lists.freedesktop.org" <amd-gfx@lists.freedesktop.org>
Cc: "Feng, Kenneth" <Kenneth.Feng@amd.com>,
	"Zhang, Hawking" <Hawking.Zhang@amd.com>
Subject: Re: [PATCH] drm/amdgpu: skip vram operation for BAMACO runtime
Date: Fri, 11 Dec 2020 14:55:10 +0000	[thread overview]
Message-ID: <MN2PR12MB4488786228BE12ADC2765418F7CA0@MN2PR12MB4488.namprd12.prod.outlook.com> (raw)
In-Reply-To: <20201211090448.113278-1-likun.gao@amd.com>


[-- Attachment #1.1: Type: text/plain, Size: 10596 bytes --]

[AMD Public Use]

Instead of checking the global parameters everywhere, let's check the runtime pm parameter and then set a local adev variable per device.  That way we can have a mix of devices that support different runtime pm modes in the same system and everything works.

Alex

________________________________
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> on behalf of Likun Gao <likun.gao@amd.com>
Sent: Friday, December 11, 2020 4:04 AM
To: amd-gfx@lists.freedesktop.org <amd-gfx@lists.freedesktop.org>
Cc: Gao, Likun <Likun.Gao@amd.com>; Feng, Kenneth <Kenneth.Feng@amd.com>; Zhang, Hawking <Hawking.Zhang@amd.com>
Subject: [PATCH] drm/amdgpu: skip vram operation for BAMACO runtime

From: Likun Gao <Likun.Gao@amd.com>

Skip vram related operation for bamaco rumtime suspend and resume as
vram is alive when BAMACO.
It can save about 32ms when suspend and about 15ms when resume.

Signed-off-by: Likun Gao <Likun.Gao@amd.com>
Change-Id: I6ad39765de5ed1aac2dc51e96ed7a21a727272cd
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  9 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c    | 72 +++++++++++++---------
 2 files changed, 50 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 0ec7c28c4d5a..66b790dfb151 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2464,7 +2464,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
         amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
         amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);

-       amdgpu_device_fill_reset_magic(adev);
+       if ((amdgpu_runtime_pm != 2) || !adev->in_runpm)
+               amdgpu_device_fill_reset_magic(adev);

         r = amdgpu_device_enable_mgpu_fan_boost();
         if (r)
@@ -3706,7 +3707,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
         amdgpu_amdkfd_suspend(adev, !fbcon);

         /* evict vram memory */
-       amdgpu_bo_evict_vram(adev);
+       if ((amdgpu_runtime_pm != 2) || !adev->in_runpm)
+               amdgpu_bo_evict_vram(adev);

         amdgpu_fence_driver_suspend(adev);

@@ -3718,7 +3720,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
          * This second call to evict vram is to evict the gart page table
          * using the CPU.
          */
-       amdgpu_bo_evict_vram(adev);
+       if ((amdgpu_runtime_pm != 2) || !adev->in_runpm)
+               amdgpu_bo_evict_vram(adev);

         return 0;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 523d22db094b..67e74b43a1ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -397,10 +397,12 @@ static int psp_tmr_init(struct psp_context *psp)
                 }
         }

-       pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
-       ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
+       if ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm) {
+               pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+               ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
                                       AMDGPU_GEM_DOMAIN_VRAM,
                                       &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+       }

         return ret;
 }
@@ -504,8 +506,10 @@ static int psp_tmr_terminate(struct psp_context *psp)
                 return ret;

         /* free TMR memory buffer */
-       pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
-       amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+       if ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm) {
+               pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+               amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+       }

         return 0;
 }
@@ -795,9 +799,10 @@ int psp_xgmi_terminate(struct psp_context *psp)
         psp->xgmi_context.initialized = 0;

         /* free xgmi shared memory */
-       amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
-                       &psp->xgmi_context.xgmi_shared_mc_addr,
-                       &psp->xgmi_context.xgmi_shared_buf);
+       if ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm)
+               amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
+                               &psp->xgmi_context.xgmi_shared_mc_addr,
+                               &psp->xgmi_context.xgmi_shared_buf);

         return 0;
 }
@@ -812,7 +817,8 @@ int psp_xgmi_initialize(struct psp_context *psp)
             !psp->adev->psp.ta_xgmi_start_addr)
                 return -ENOENT;

-       if (!psp->xgmi_context.initialized) {
+       if (!psp->xgmi_context.initialized &&
+           ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm)) {
                 ret = psp_xgmi_init_shared_buf(psp);
                 if (ret)
                         return ret;
@@ -1122,9 +1128,10 @@ static int psp_ras_terminate(struct psp_context *psp)
         psp->ras.ras_initialized = false;

         /* free ras shared memory */
-       amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
-                       &psp->ras.ras_shared_mc_addr,
-                       &psp->ras.ras_shared_buf);
+       if ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm)
+               amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
+                               &psp->ras.ras_shared_mc_addr,
+                               &psp->ras.ras_shared_buf);

         return 0;
 }
@@ -1145,7 +1152,8 @@ static int psp_ras_initialize(struct psp_context *psp)
                 return 0;
         }

-       if (!psp->ras.ras_initialized) {
+       if (!psp->ras.ras_initialized &&
+           ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm)) {
                 ret = psp_ras_init_shared_buf(psp);
                 if (ret)
                         return ret;
@@ -1257,7 +1265,8 @@ static int psp_hdcp_initialize(struct psp_context *psp)
                 return 0;
         }

-       if (!psp->hdcp_context.hdcp_initialized) {
+       if (!psp->hdcp_context.hdcp_initialized &&
+           ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm)) {
                 ret = psp_hdcp_init_shared_buf(psp);
                 if (ret)
                         return ret;
@@ -1325,9 +1334,10 @@ static int psp_hdcp_terminate(struct psp_context *psp)
         psp->hdcp_context.hdcp_initialized = false;

         /* free hdcp shared memory */
-       amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
-                             &psp->hdcp_context.hdcp_shared_mc_addr,
-                             &psp->hdcp_context.hdcp_shared_buf);
+       if ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm)
+               amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
+                                     &psp->hdcp_context.hdcp_shared_mc_addr,
+                                     &psp->hdcp_context.hdcp_shared_buf);

         return 0;
 }
@@ -1404,7 +1414,8 @@ static int psp_dtm_initialize(struct psp_context *psp)
                 return 0;
         }

-       if (!psp->dtm_context.dtm_initialized) {
+       if (!psp->dtm_context.dtm_initialized &&
+           ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm)) {
                 ret = psp_dtm_init_shared_buf(psp);
                 if (ret)
                         return ret;
@@ -1472,9 +1483,10 @@ static int psp_dtm_terminate(struct psp_context *psp)
         psp->dtm_context.dtm_initialized = false;

         /* free hdcp shared memory */
-       amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
-                             &psp->dtm_context.dtm_shared_mc_addr,
-                             &psp->dtm_context.dtm_shared_buf);
+       if ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm)
+               amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
+                                     &psp->dtm_context.dtm_shared_mc_addr,
+                                     &psp->dtm_context.dtm_shared_buf);

         return 0;
 }
@@ -1563,7 +1575,8 @@ static int psp_rap_initialize(struct psp_context *psp)
                 return 0;
         }

-       if (!psp->rap_context.rap_initialized) {
+       if (!psp->rap_context.rap_initialized &&
+           ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm)) {
                 ret = psp_rap_init_shared_buf(psp);
                 if (ret)
                         return ret;
@@ -1602,9 +1615,10 @@ static int psp_rap_terminate(struct psp_context *psp)
         psp->rap_context.rap_initialized = false;

         /* free rap shared memory */
-       amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
-                             &psp->rap_context.rap_shared_mc_addr,
-                             &psp->rap_context.rap_shared_buf);
+       if ((amdgpu_runtime_pm !=2) || !psp->adev->in_runpm)
+               amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
+                                     &psp->rap_context.rap_shared_mc_addr,
+                                     &psp->rap_context.rap_shared_buf);

         return ret;
 }
@@ -2261,10 +2275,12 @@ static int psp_resume(void *handle)

         DRM_INFO("PSP is resuming...\n");

-       ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
-       if (ret) {
-               DRM_ERROR("Failed to process memory training!\n");
-               return ret;
+       if ((amdgpu_runtime_pm != 2) || !psp->adev->in_runpm) {
+               ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
+               if (ret) {
+                       DRM_ERROR("Failed to process memory training!\n");
+                       return ret;
+               }
         }

         mutex_lock(&adev->firmware.mutex);
--
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&amp;data=04%7C01%7Calexander.deucher%40amd.com%7C0bee5882ad6142eb967c08d89db4018b%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637432743751698480%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&amp;sdata=1AU6gN6Yqp8yZT%2BHDWfAwFh9UuWh7XoJg2kD%2F%2BbwZjA%3D&amp;reserved=0

[-- Attachment #1.2: Type: text/html, Size: 23555 bytes --]

[-- Attachment #2: Type: text/plain, Size: 154 bytes --]

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

      reply	other threads:[~2020-12-11 14:55 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-11  9:04 [PATCH] drm/amdgpu: skip vram operation for BAMACO runtime Likun Gao
2020-12-11 14:55 ` Deucher, Alexander [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=MN2PR12MB4488786228BE12ADC2765418F7CA0@MN2PR12MB4488.namprd12.prod.outlook.com \
    --to=alexander.deucher@amd.com \
    --cc=Hawking.Zhang@amd.com \
    --cc=Kenneth.Feng@amd.com \
    --cc=Likun.Gao@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.