All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/amdgpu: support dpm level modification under virtualization
@ 2019-04-09 15:18 Yintian Tao
       [not found] ` <1554823102-23667-1-git-send-email-yttao-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 11+ messages in thread
From: Yintian Tao @ 2019-04-09 15:18 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Yintian Tao

Under vega10 virtualuzation, smu ip block will not be added.
Therefore, we need add pp clk query and force dpm level function
at amdgpu_virt_ops to support the feature.

Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
Signed-off-by: Yintian Tao <yttao@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78 ++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
 7 files changed, 147 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3ff8899..bb0fd5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	mutex_init(&adev->virt.vf_errors.lock);
 	hash_init(adev->mn_hash);
 	mutex_init(&adev->lock_reset);
+	mutex_init(&adev->virt.dpm_mutex);
 
 	amdgpu_device_check_arguments(adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6190495..1353955 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		if (adev->pm.dpm_enabled) {
 			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
 			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+		} else if (amdgpu_sriov_vf(adev)) {
+			dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
+			dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
 		} else {
 			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
 			dev_info.max_memory_clock = adev->clock.default_mclk * 10;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 5540259..0162d1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -380,6 +380,17 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 		goto fail;
 	}
 
+        if (amdgpu_sriov_vf(adev)) {
+                if (amdgim_is_hwperf(adev) &&
+                    adev->virt.ops->force_dpm_level) {
+                        mutex_lock(&adev->pm.mutex);
+                        adev->virt.ops->force_dpm_level(adev, level);
+                        mutex_unlock(&adev->pm.mutex);
+                        return count;
+                } else
+                        return -EINVAL;
+        }
+
 	if (current_level == level)
 		return count;
 
@@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 
+	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+	    adev->virt.ops->get_pp_clk)
+		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
 	if (is_support_sw_smu(adev))
 		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
 	else if (adev->powerplay.pp_funcs->print_clock_levels)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 462a04e..ae4b2a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
 	}
 }
 
+static uint32_t parse_clk(char *buf, bool min)
+{
+        char *ptr = buf;
+        uint32_t clk = 0;
+
+        do {
+                ptr = strchr(ptr, ':');
+                if (!ptr)
+                        break;
+                ptr+=2;
+                clk = simple_strtoul(ptr, NULL, 10);
+        } while (!min);
+
+        return clk * 100;
+}
+
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
+{
+        char buf[512] = {0};
+
+        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
+        return parse_clk(buf, lowest);
+}
+
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
+{
+        char buf[512] = {0};
+
+        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+
+        return parse_clk(buf, lowest);
+}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 722deef..584947b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
 	int (*reset_gpu)(struct amdgpu_device *adev);
 	int (*wait_reset)(struct amdgpu_device *adev);
 	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
+	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
+	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
 };
 
 /*
@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
 	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
 	/* VRAM LOST by GIM */
 	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+	/* HW PERF SIM in GIM */
+	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
 };
 
 struct amd_sriov_msg_pf2vf_info_header {
@@ -252,6 +256,8 @@ struct amdgpu_virt {
 	struct amdgpu_vf_error_buffer   vf_errors;
 	struct amdgpu_virt_fw_reserve	fw_reserve;
 	uint32_t gim_feature;
+	/* protect DPM events to GIM */
+	struct mutex                    dpm_mutex;
 };
 
 #define amdgpu_sriov_enabled(adev) \
@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
 #endif
 }
 
+#define amdgim_is_hwperf(adev) \
+	((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
@@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
 					unsigned int key,
 					unsigned int chksum);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 73851eb..8dbad49 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
 	xgpu_ai_mailbox_set_valid(adev, false);
 }
 
+static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
+{
+        int r = 0;
+        u32 req, val, size;
+
+        if (!amdgim_is_hwperf(adev) || buf == NULL)
+                return -EBADRQC;
+
+        switch(type) {
+        case PP_SCLK:
+                req = IDH_IRQ_GET_PP_SCLK;
+                break;
+        case PP_MCLK:
+                req = IDH_IRQ_GET_PP_MCLK;
+                break;
+        default:
+                return -EBADRQC;
+        }
+
+        mutex_lock(&adev->virt.dpm_mutex);
+
+        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+        if (!r && adev->fw_vram_usage.va != NULL) {
+                val = RREG32_NO_KIQ(
+                        SOC15_REG_OFFSET(NBIO, 0,
+                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
+                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
+                                val), PAGE_SIZE);
+
+                if (size < PAGE_SIZE)
+                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
+                else
+                        size = 0;
+
+                r = size;
+                goto out;
+        }
+
+        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+        if(r)
+                pr_info("%s DPM request failed",
+                        (type == PP_SCLK)? "SCLK" : "MCLK");
+
+out:
+        mutex_unlock(&adev->virt.dpm_mutex);
+        return r;
+}
+
+static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
+{
+        int r = 0;
+        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
+
+        if (!amdgim_is_hwperf(adev))
+                return -EBADRQC;
+
+        mutex_lock(&adev->virt.dpm_mutex);
+        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
+
+        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+        if (!r)
+                goto out;
+
+        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+        if (!r)
+                pr_info("DPM request failed");
+        else
+                pr_info("Mailbox is broken");
+
+out:
+        mutex_unlock(&adev->virt.dpm_mutex);
+        return r;
+}
+
 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
 					enum idh_request req)
 {
@@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
 	.reset_gpu = xgpu_ai_request_reset,
 	.wait_reset = NULL,
 	.trans_msg = xgpu_ai_mailbox_trans_msg,
+	.get_pp_clk = xgpu_ai_get_pp_clk,
+	.force_dpm_level = xgpu_ai_force_dpm_level,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index b4a9cee..39d151b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,6 +35,10 @@ enum idh_request {
 	IDH_REL_GPU_FINI_ACCESS,
 	IDH_REQ_GPU_RESET_ACCESS,
 
+	IDH_IRQ_FORCE_DPM_LEVEL = 10,
+	IDH_IRQ_GET_PP_SCLK,
+	IDH_IRQ_GET_PP_MCLK,
+
 	IDH_LOG_VF_ERROR       = 200,
 };
 
@@ -43,6 +47,8 @@ enum idh_event {
 	IDH_READY_TO_ACCESS_GPU,
 	IDH_FLR_NOTIFICATION,
 	IDH_FLR_NOTIFICATION_CMPL,
+	IDH_SUCCESS,
+	IDH_FAIL,
 	IDH_EVENT_MAX
 };
 
-- 
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization
       [not found] ` <1554823102-23667-1-git-send-email-yttao-5C7GfCeVMHo@public.gmane.org>
@ 2019-04-10  8:38   ` Tao, Yintian
       [not found]     ` <MN2PR12MB3039532056AA164C6BC60E94E52E0-rweVpJHSKTr/fpeVZQY9RQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  2019-04-10  9:58   ` Quan, Evan
  2019-04-10 20:54   ` Kuehling, Felix
  2 siblings, 1 reply; 11+ messages in thread
From: Tao, Yintian @ 2019-04-10  8:38 UTC (permalink / raw)
  To: Koenig, Christian; +Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Ping....


Hi Christian 


Can you help have review on it? Thanks in advance.

Best Regards
Yintian Tao

-----Original Message-----
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Yintian Tao
Sent: Tuesday, April 09, 2019 11:18 PM
To: amd-gfx@lists.freedesktop.org
Cc: Tao, Yintian <Yintian.Tao@amd.com>
Subject: [PATCH] drm/amdgpu: support dpm level modification under virtualization

Under vega10 virtualuzation, smu ip block will not be added.
Therefore, we need add pp clk query and force dpm level function at amdgpu_virt_ops to support the feature.

Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
Signed-off-by: Yintian Tao <yttao@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78 ++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
 7 files changed, 147 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3ff8899..bb0fd5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	mutex_init(&adev->virt.vf_errors.lock);
 	hash_init(adev->mn_hash);
 	mutex_init(&adev->lock_reset);
+	mutex_init(&adev->virt.dpm_mutex);
 
 	amdgpu_device_check_arguments(adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6190495..1353955 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		if (adev->pm.dpm_enabled) {
 			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
 			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+		} else if (amdgpu_sriov_vf(adev)) {
+			dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
+			dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
 		} else {
 			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
 			dev_info.max_memory_clock = adev->clock.default_mclk * 10; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 5540259..0162d1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -380,6 +380,17 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 		goto fail;
 	}
 
+        if (amdgpu_sriov_vf(adev)) {
+                if (amdgim_is_hwperf(adev) &&
+                    adev->virt.ops->force_dpm_level) {
+                        mutex_lock(&adev->pm.mutex);
+                        adev->virt.ops->force_dpm_level(adev, level);
+                        mutex_unlock(&adev->pm.mutex);
+                        return count;
+                } else
+                        return -EINVAL;
+        }
+
 	if (current_level == level)
 		return count;
 
@@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 
+	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+	    adev->virt.ops->get_pp_clk)
+		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
 	if (is_support_sw_smu(adev))
 		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
 	else if (adev->powerplay.pp_funcs->print_clock_levels)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 462a04e..ae4b2a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
 	}
 }
 
+static uint32_t parse_clk(char *buf, bool min) {
+        char *ptr = buf;
+        uint32_t clk = 0;
+
+        do {
+                ptr = strchr(ptr, ':');
+                if (!ptr)
+                        break;
+                ptr+=2;
+                clk = simple_strtoul(ptr, NULL, 10);
+        } while (!min);
+
+        return clk * 100;
+}
+
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest) 
+{
+        char buf[512] = {0};
+
+        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
+        return parse_clk(buf, lowest);
+}
+
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) 
+{
+        char buf[512] = {0};
+
+        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+
+        return parse_clk(buf, lowest);
+}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 722deef..584947b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
 	int (*reset_gpu)(struct amdgpu_device *adev);
 	int (*wait_reset)(struct amdgpu_device *adev);
 	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
+	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
+	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
 };
 
 /*
@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
 	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
 	/* VRAM LOST by GIM */
 	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+	/* HW PERF SIM in GIM */
+	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
 };
 
 struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct amdgpu_virt {
 	struct amdgpu_vf_error_buffer   vf_errors;
 	struct amdgpu_virt_fw_reserve	fw_reserve;
 	uint32_t gim_feature;
+	/* protect DPM events to GIM */
+	struct mutex                    dpm_mutex;
 };
 
 #define amdgpu_sriov_enabled(adev) \
@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)  #endif  }
 
+#define amdgim_is_hwperf(adev) \
+	((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
 					unsigned int key,
 					unsigned int chksum);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); 
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 73851eb..8dbad49 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
 	xgpu_ai_mailbox_set_valid(adev, false);  }
 
+static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, 
+char *buf) {
+        int r = 0;
+        u32 req, val, size;
+
+        if (!amdgim_is_hwperf(adev) || buf == NULL)
+                return -EBADRQC;
+
+        switch(type) {
+        case PP_SCLK:
+                req = IDH_IRQ_GET_PP_SCLK;
+                break;
+        case PP_MCLK:
+                req = IDH_IRQ_GET_PP_MCLK;
+                break;
+        default:
+                return -EBADRQC;
+        }
+
+        mutex_lock(&adev->virt.dpm_mutex);
+
+        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+        if (!r && adev->fw_vram_usage.va != NULL) {
+                val = RREG32_NO_KIQ(
+                        SOC15_REG_OFFSET(NBIO, 0,
+                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
+                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
+                                val), PAGE_SIZE);
+
+                if (size < PAGE_SIZE)
+                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
+                else
+                        size = 0;
+
+                r = size;
+                goto out;
+        }
+
+        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+        if(r)
+                pr_info("%s DPM request failed",
+                        (type == PP_SCLK)? "SCLK" : "MCLK");
+
+out:
+        mutex_unlock(&adev->virt.dpm_mutex);
+        return r;
+}
+
+static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 
+level) {
+        int r = 0;
+        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
+
+        if (!amdgim_is_hwperf(adev))
+                return -EBADRQC;
+
+        mutex_lock(&adev->virt.dpm_mutex);
+        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
+
+        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+        if (!r)
+                goto out;
+
+        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+        if (!r)
+                pr_info("DPM request failed");
+        else
+                pr_info("Mailbox is broken");
+
+out:
+        mutex_unlock(&adev->virt.dpm_mutex);
+        return r;
+}
+
 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
 					enum idh_request req)
 {
@@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
 	.reset_gpu = xgpu_ai_request_reset,
 	.wait_reset = NULL,
 	.trans_msg = xgpu_ai_mailbox_trans_msg,
+	.get_pp_clk = xgpu_ai_get_pp_clk,
+	.force_dpm_level = xgpu_ai_force_dpm_level,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index b4a9cee..39d151b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,6 +35,10 @@ enum idh_request {
 	IDH_REL_GPU_FINI_ACCESS,
 	IDH_REQ_GPU_RESET_ACCESS,
 
+	IDH_IRQ_FORCE_DPM_LEVEL = 10,
+	IDH_IRQ_GET_PP_SCLK,
+	IDH_IRQ_GET_PP_MCLK,
+
 	IDH_LOG_VF_ERROR       = 200,
 };
 
@@ -43,6 +47,8 @@ enum idh_event {
 	IDH_READY_TO_ACCESS_GPU,
 	IDH_FLR_NOTIFICATION,
 	IDH_FLR_NOTIFICATION_CMPL,
+	IDH_SUCCESS,
+	IDH_FAIL,
 	IDH_EVENT_MAX
 };
 
--
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdgpu: support dpm level modification under virtualization
       [not found]     ` <MN2PR12MB3039532056AA164C6BC60E94E52E0-rweVpJHSKTr/fpeVZQY9RQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2019-04-10  8:43       ` Koenig, Christian
       [not found]         ` <62b84af0-e0c0-452a-9d83-26e221d37a85-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 11+ messages in thread
From: Koenig, Christian @ 2019-04-10  8:43 UTC (permalink / raw)
  To: Tao, Yintian; +Cc: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hi Yintian,

sorry but this is power management and that is not something I'm very 
familiar with.

I can only say that at least on first glance the coding style looks good 
to me :)

Probably best to wait for Alex to wake up.

Regards,
Christian.

Am 10.04.19 um 10:38 schrieb Tao, Yintian:
> Ping....
>
>
> Hi Christian
>
>
> Can you help have review on it? Thanks in advance.
>
> Best Regards
> Yintian Tao
>
> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Yintian Tao
> Sent: Tuesday, April 09, 2019 11:18 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Tao, Yintian <Yintian.Tao@amd.com>
> Subject: [PATCH] drm/amdgpu: support dpm level modification under virtualization
>
> Under vega10 virtualuzation, smu ip block will not be added.
> Therefore, we need add pp clk query and force dpm level function at amdgpu_virt_ops to support the feature.
>
> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
> Signed-off-by: Yintian Tao <yttao@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78 ++++++++++++++++++++++++++++++
>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
>   7 files changed, 147 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 3ff8899..bb0fd5a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>   	mutex_init(&adev->virt.vf_errors.lock);
>   	hash_init(adev->mn_hash);
>   	mutex_init(&adev->lock_reset);
> +	mutex_init(&adev->virt.dpm_mutex);
>   
>   	amdgpu_device_check_arguments(adev);
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 6190495..1353955 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
>   		if (adev->pm.dpm_enabled) {
>   			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
>   			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
> +		} else if (amdgpu_sriov_vf(adev)) {
> +			dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
> +			dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
>   		} else {
>   			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
>   			dev_info.max_memory_clock = adev->clock.default_mclk * 10; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index 5540259..0162d1e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -380,6 +380,17 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
>   		goto fail;
>   	}
>   
> +        if (amdgpu_sriov_vf(adev)) {
> +                if (amdgim_is_hwperf(adev) &&
> +                    adev->virt.ops->force_dpm_level) {
> +                        mutex_lock(&adev->pm.mutex);
> +                        adev->virt.ops->force_dpm_level(adev, level);
> +                        mutex_unlock(&adev->pm.mutex);
> +                        return count;
> +                } else
> +                        return -EINVAL;
> +        }
> +
>   	if (current_level == level)
>   		return count;
>   
> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
>   	struct drm_device *ddev = dev_get_drvdata(dev);
>   	struct amdgpu_device *adev = ddev->dev_private;
>   
> +	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
> +	    adev->virt.ops->get_pp_clk)
> +		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> +
>   	if (is_support_sw_smu(adev))
>   		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
>   	else if (adev->powerplay.pp_funcs->print_clock_levels)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 462a04e..ae4b2a1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
>   	}
>   }
>   
> +static uint32_t parse_clk(char *buf, bool min) {
> +        char *ptr = buf;
> +        uint32_t clk = 0;
> +
> +        do {
> +                ptr = strchr(ptr, ':');
> +                if (!ptr)
> +                        break;
> +                ptr+=2;
> +                clk = simple_strtoul(ptr, NULL, 10);
> +        } while (!min);
> +
> +        return clk * 100;
> +}
> +
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
> +{
> +        char buf[512] = {0};
> +
> +        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> +
> +        return parse_clk(buf, lowest);
> +}
> +
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
> +{
> +        char buf[512] = {0};
> +
> +        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
> +
> +        return parse_clk(buf, lowest);
> +}
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 722deef..584947b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
>   	int (*reset_gpu)(struct amdgpu_device *adev);
>   	int (*wait_reset)(struct amdgpu_device *adev);
>   	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
> +	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
> +	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
>   };
>   
>   /*
> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
>   	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
>   	/* VRAM LOST by GIM */
>   	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
> +	/* HW PERF SIM in GIM */
> +	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
>   };
>   
>   struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct amdgpu_virt {
>   	struct amdgpu_vf_error_buffer   vf_errors;
>   	struct amdgpu_virt_fw_reserve	fw_reserve;
>   	uint32_t gim_feature;
> +	/* protect DPM events to GIM */
> +	struct mutex                    dpm_mutex;
>   };
>   
>   #define amdgpu_sriov_enabled(adev) \
> @@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)  #endif  }
>   
> +#define amdgim_is_hwperf(adev) \
> +	((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
> +
>   bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
>   					unsigned int key,
>   					unsigned int chksum);
>   void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
>   
>   #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> index 73851eb..8dbad49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
>   	xgpu_ai_mailbox_set_valid(adev, false);  }
>   
> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type,
> +char *buf) {
> +        int r = 0;
> +        u32 req, val, size;
> +
> +        if (!amdgim_is_hwperf(adev) || buf == NULL)
> +                return -EBADRQC;
> +
> +        switch(type) {
> +        case PP_SCLK:
> +                req = IDH_IRQ_GET_PP_SCLK;
> +                break;
> +        case PP_MCLK:
> +                req = IDH_IRQ_GET_PP_MCLK;
> +                break;
> +        default:
> +                return -EBADRQC;
> +        }
> +
> +        mutex_lock(&adev->virt.dpm_mutex);
> +
> +        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> +        if (!r && adev->fw_vram_usage.va != NULL) {
> +                val = RREG32_NO_KIQ(
> +                        SOC15_REG_OFFSET(NBIO, 0,
> +                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
> +                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
> +                                val), PAGE_SIZE);
> +
> +                if (size < PAGE_SIZE)
> +                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
> +                else
> +                        size = 0;
> +
> +                r = size;
> +                goto out;
> +        }
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> +        if(r)
> +                pr_info("%s DPM request failed",
> +                        (type == PP_SCLK)? "SCLK" : "MCLK");
> +
> +out:
> +        mutex_unlock(&adev->virt.dpm_mutex);
> +        return r;
> +}
> +
> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
> +level) {
> +        int r = 0;
> +        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
> +
> +        if (!amdgim_is_hwperf(adev))
> +                return -EBADRQC;
> +
> +        mutex_lock(&adev->virt.dpm_mutex);
> +        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> +        if (!r)
> +                goto out;
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> +        if (!r)
> +                pr_info("DPM request failed");
> +        else
> +                pr_info("Mailbox is broken");
> +
> +out:
> +        mutex_unlock(&adev->virt.dpm_mutex);
> +        return r;
> +}
> +
>   static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
>   					enum idh_request req)
>   {
> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
>   	.reset_gpu = xgpu_ai_request_reset,
>   	.wait_reset = NULL,
>   	.trans_msg = xgpu_ai_mailbox_trans_msg,
> +	.get_pp_clk = xgpu_ai_get_pp_clk,
> +	.force_dpm_level = xgpu_ai_force_dpm_level,
>   };
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> index b4a9cee..39d151b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> @@ -35,6 +35,10 @@ enum idh_request {
>   	IDH_REL_GPU_FINI_ACCESS,
>   	IDH_REQ_GPU_RESET_ACCESS,
>   
> +	IDH_IRQ_FORCE_DPM_LEVEL = 10,
> +	IDH_IRQ_GET_PP_SCLK,
> +	IDH_IRQ_GET_PP_MCLK,
> +
>   	IDH_LOG_VF_ERROR       = 200,
>   };
>   
> @@ -43,6 +47,8 @@ enum idh_event {
>   	IDH_READY_TO_ACCESS_GPU,
>   	IDH_FLR_NOTIFICATION,
>   	IDH_FLR_NOTIFICATION_CMPL,
> +	IDH_SUCCESS,
> +	IDH_FAIL,
>   	IDH_EVENT_MAX
>   };
>   
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization
       [not found]         ` <62b84af0-e0c0-452a-9d83-26e221d37a85-5C7GfCeVMHo@public.gmane.org>
@ 2019-04-10  9:07           ` Tao, Yintian
  0 siblings, 0 replies; 11+ messages in thread
From: Tao, Yintian @ 2019-04-10  9:07 UTC (permalink / raw)
  To: Deucher, Alexander
  Cc: Koenig, Christian, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hi  Christian

Many thanks. I got it.

Hi  Alex

Can you help review on it? Many thanks in advance.


Best Regards
Yintian Tao

-----Original Message-----
From: Koenig, Christian <Christian.Koenig@amd.com> 
Sent: Wednesday, April 10, 2019 4:44 PM
To: Tao, Yintian <Yintian.Tao@amd.com>
Cc: amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: support dpm level modification under virtualization

Hi Yintian,

sorry but this is power management and that is not something I'm very familiar with.

I can only say that at least on first glance the coding style looks good to me :)

Probably best to wait for Alex to wake up.

Regards,
Christian.

Am 10.04.19 um 10:38 schrieb Tao, Yintian:
> Ping....
>
>
> Hi Christian
>
>
> Can you help have review on it? Thanks in advance.
>
> Best Regards
> Yintian Tao
>
> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of 
> Yintian Tao
> Sent: Tuesday, April 09, 2019 11:18 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Tao, Yintian <Yintian.Tao@amd.com>
> Subject: [PATCH] drm/amdgpu: support dpm level modification under 
> virtualization
>
> Under vega10 virtualuzation, smu ip block will not be added.
> Therefore, we need add pp clk query and force dpm level function at amdgpu_virt_ops to support the feature.
>
> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
> Signed-off-by: Yintian Tao <yttao@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78 ++++++++++++++++++++++++++++++
>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
>   7 files changed, 147 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 3ff8899..bb0fd5a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
>   	mutex_init(&adev->virt.vf_errors.lock);
>   	hash_init(adev->mn_hash);
>   	mutex_init(&adev->lock_reset);
> +	mutex_init(&adev->virt.dpm_mutex);
>   
>   	amdgpu_device_check_arguments(adev);
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 6190495..1353955 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
>   		if (adev->pm.dpm_enabled) {
>   			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
>   			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 
> 10;
> +		} else if (amdgpu_sriov_vf(adev)) {
> +			dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
> +			dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 
> +10;
>   		} else {
>   			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
>   			dev_info.max_memory_clock = adev->clock.default_mclk * 10; diff 
> --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index 5540259..0162d1e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -380,6 +380,17 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
>   		goto fail;
>   	}
>   
> +        if (amdgpu_sriov_vf(adev)) {
> +                if (amdgim_is_hwperf(adev) &&
> +                    adev->virt.ops->force_dpm_level) {
> +                        mutex_lock(&adev->pm.mutex);
> +                        adev->virt.ops->force_dpm_level(adev, level);
> +                        mutex_unlock(&adev->pm.mutex);
> +                        return count;
> +                } else
> +                        return -EINVAL;
> +        }
> +
>   	if (current_level == level)
>   		return count;
>   
> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
>   	struct drm_device *ddev = dev_get_drvdata(dev);
>   	struct amdgpu_device *adev = ddev->dev_private;
>   
> +	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
> +	    adev->virt.ops->get_pp_clk)
> +		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> +
>   	if (is_support_sw_smu(adev))
>   		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
>   	else if (adev->powerplay.pp_funcs->print_clock_levels)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 462a04e..ae4b2a1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
>   	}
>   }
>   
> +static uint32_t parse_clk(char *buf, bool min) {
> +        char *ptr = buf;
> +        uint32_t clk = 0;
> +
> +        do {
> +                ptr = strchr(ptr, ':');
> +                if (!ptr)
> +                        break;
> +                ptr+=2;
> +                clk = simple_strtoul(ptr, NULL, 10);
> +        } while (!min);
> +
> +        return clk * 100;
> +}
> +
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool 
> +lowest) {
> +        char buf[512] = {0};
> +
> +        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> +
> +        return parse_clk(buf, lowest); }
> +
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool 
> +lowest) {
> +        char buf[512] = {0};
> +
> +        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
> +
> +        return parse_clk(buf, lowest); }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h 
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 722deef..584947b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
>   	int (*reset_gpu)(struct amdgpu_device *adev);
>   	int (*wait_reset)(struct amdgpu_device *adev);
>   	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, 
> u32 data2, u32 data3);
> +	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
> +	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
>   };
>   
>   /*
> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
>   	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
>   	/* VRAM LOST by GIM */
>   	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
> +	/* HW PERF SIM in GIM */
> +	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
>   };
>   
>   struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct amdgpu_virt {
>   	struct amdgpu_vf_error_buffer   vf_errors;
>   	struct amdgpu_virt_fw_reserve	fw_reserve;
>   	uint32_t gim_feature;
> +	/* protect DPM events to GIM */
> +	struct mutex                    dpm_mutex;
>   };
>   
>   #define amdgpu_sriov_enabled(adev) \ @@ -278,6 +284,9 @@ static 
> inline bool is_virtual_machine(void)  #endif  }
>   
> +#define amdgim_is_hwperf(adev) \
> +	((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
> +
>   bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
>   					unsigned int key,
>   					unsigned int chksum);
>   void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool 
> +lowest); uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, 
> +bool lowest);
>   
>   #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c 
> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> index 73851eb..8dbad49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
>   	xgpu_ai_mailbox_set_valid(adev, false);  }
>   
> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, 
> +char *buf) {
> +        int r = 0;
> +        u32 req, val, size;
> +
> +        if (!amdgim_is_hwperf(adev) || buf == NULL)
> +                return -EBADRQC;
> +
> +        switch(type) {
> +        case PP_SCLK:
> +                req = IDH_IRQ_GET_PP_SCLK;
> +                break;
> +        case PP_MCLK:
> +                req = IDH_IRQ_GET_PP_MCLK;
> +                break;
> +        default:
> +                return -EBADRQC;
> +        }
> +
> +        mutex_lock(&adev->virt.dpm_mutex);
> +
> +        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> +        if (!r && adev->fw_vram_usage.va != NULL) {
> +                val = RREG32_NO_KIQ(
> +                        SOC15_REG_OFFSET(NBIO, 0,
> +                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
> +                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
> +                                val), PAGE_SIZE);
> +
> +                if (size < PAGE_SIZE)
> +                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
> +                else
> +                        size = 0;
> +
> +                r = size;
> +                goto out;
> +        }
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> +        if(r)
> +                pr_info("%s DPM request failed",
> +                        (type == PP_SCLK)? "SCLK" : "MCLK");
> +
> +out:
> +        mutex_unlock(&adev->virt.dpm_mutex);
> +        return r;
> +}
> +
> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
> +level) {
> +        int r = 0;
> +        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
> +
> +        if (!amdgim_is_hwperf(adev))
> +                return -EBADRQC;
> +
> +        mutex_lock(&adev->virt.dpm_mutex);
> +        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> +        if (!r)
> +                goto out;
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> +        if (!r)
> +                pr_info("DPM request failed");
> +        else
> +                pr_info("Mailbox is broken");
> +
> +out:
> +        mutex_unlock(&adev->virt.dpm_mutex);
> +        return r;
> +}
> +
>   static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
>   					enum idh_request req)
>   {
> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
>   	.reset_gpu = xgpu_ai_request_reset,
>   	.wait_reset = NULL,
>   	.trans_msg = xgpu_ai_mailbox_trans_msg,
> +	.get_pp_clk = xgpu_ai_get_pp_clk,
> +	.force_dpm_level = xgpu_ai_force_dpm_level,
>   };
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h 
> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> index b4a9cee..39d151b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> @@ -35,6 +35,10 @@ enum idh_request {
>   	IDH_REL_GPU_FINI_ACCESS,
>   	IDH_REQ_GPU_RESET_ACCESS,
>   
> +	IDH_IRQ_FORCE_DPM_LEVEL = 10,
> +	IDH_IRQ_GET_PP_SCLK,
> +	IDH_IRQ_GET_PP_MCLK,
> +
>   	IDH_LOG_VF_ERROR       = 200,
>   };
>   
> @@ -43,6 +47,8 @@ enum idh_event {
>   	IDH_READY_TO_ACCESS_GPU,
>   	IDH_FLR_NOTIFICATION,
>   	IDH_FLR_NOTIFICATION_CMPL,
> +	IDH_SUCCESS,
> +	IDH_FAIL,
>   	IDH_EVENT_MAX
>   };
>   
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization
       [not found] ` <1554823102-23667-1-git-send-email-yttao-5C7GfCeVMHo@public.gmane.org>
  2019-04-10  8:38   ` Tao, Yintian
@ 2019-04-10  9:58   ` Quan, Evan
       [not found]     ` <MN2PR12MB3344FC85F11263060FDD478AE42E0-rweVpJHSKToDMgCC8P//OwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  2019-04-10 20:54   ` Kuehling, Felix
  2 siblings, 1 reply; 11+ messages in thread
From: Quan, Evan @ 2019-04-10  9:58 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tao, Yintian



> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of
> Yintian Tao
> Sent: 2019年4月9日 23:18
> To: amd-gfx@lists.freedesktop.org
> Cc: Tao, Yintian <Yintian.Tao@amd.com>
> Subject: [PATCH] drm/amdgpu: support dpm level modification under
> virtualization
> 
> Under vega10 virtualuzation, smu ip block will not be added.
> Therefore, we need add pp clk query and force dpm level function at
> amdgpu_virt_ops to support the feature.
> 
> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
> Signed-off-by: Yintian Tao <yttao@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
>  drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78
> ++++++++++++++++++++++++++++++
>  drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
>  7 files changed, 147 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 3ff8899..bb0fd5a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device
> *adev,
>  	mutex_init(&adev->virt.vf_errors.lock);
>  	hash_init(adev->mn_hash);
>  	mutex_init(&adev->lock_reset);
> +	mutex_init(&adev->virt.dpm_mutex);
> 
>  	amdgpu_device_check_arguments(adev);
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 6190495..1353955 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev,
> void *data, struct drm_file
>  		if (adev->pm.dpm_enabled) {
>  			dev_info.max_engine_clock =
> amdgpu_dpm_get_sclk(adev, false) * 10;
>  			dev_info.max_memory_clock =
> amdgpu_dpm_get_mclk(adev, false) * 10;
> +		} else if (amdgpu_sriov_vf(adev)) {
> +			dev_info.max_engine_clock =
> amdgpu_virt_get_sclk(adev, false) * 10;
> +			dev_info.max_memory_clock =
> amdgpu_virt_get_mclk(adev, false) * 10;
>  		} else {
>  			dev_info.max_engine_clock = adev-
> >clock.default_sclk * 10;
>  			dev_info.max_memory_clock = adev-
> >clock.default_mclk * 10; diff --git
> a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index 5540259..0162d1e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -380,6 +380,17 @@ static ssize_t
> amdgpu_set_dpm_forced_performance_level(struct device *dev,
>  		goto fail;
>  	}
> 
> +        if (amdgpu_sriov_vf(adev)) {
> +                if (amdgim_is_hwperf(adev) &&
> +                    adev->virt.ops->force_dpm_level) {
> +                        mutex_lock(&adev->pm.mutex);
> +                        adev->virt.ops->force_dpm_level(adev, level);
> +                        mutex_unlock(&adev->pm.mutex);
> +                        return count;
> +                } else
> +                        return -EINVAL;
> +        }
> +
>  	if (current_level == level)
>  		return count;
> 
> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct
> device *dev,
>  	struct drm_device *ddev = dev_get_drvdata(dev);
>  	struct amdgpu_device *adev = ddev->dev_private;
> 
> +	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
> +	    adev->virt.ops->get_pp_clk)
> +		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> +
>  	if (is_support_sw_smu(adev))
>  		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
>  	else if (adev->powerplay.pp_funcs->print_clock_levels)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 462a04e..ae4b2a1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct
> amdgpu_device *adev)
>  	}
>  }
> 
> +static uint32_t parse_clk(char *buf, bool min) {
> +        char *ptr = buf;
> +        uint32_t clk = 0;
> +
> +        do {
> +                ptr = strchr(ptr, ':');
> +                if (!ptr)
> +                        break;
> +                ptr+=2;
> +                clk = simple_strtoul(ptr, NULL, 10);
> +        } while (!min);
> +
> +        return clk * 100;
> +}
> +
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
> +{
> +        char buf[512] = {0};
[Quan, Evan] In  xgpu_ai_get_pp_clk, the max  buffer size seems PAGE_SIZE. Maybe this should be aligned with that.
> +
> +        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
[Quan, Evan] Better to check existence of adev->virt.ops->get_pp_clk before dereference it. As what you did in amdgpu_set_dpm_forced_performance_level.
> +
> +        return parse_clk(buf, lowest);
> +}
> +
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
> +{
> +        char buf[512] = {0};
> +
> +        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
> +
> +        return parse_clk(buf, lowest);
> +}
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 722deef..584947b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
>  	int (*reset_gpu)(struct amdgpu_device *adev);
>  	int (*wait_reset)(struct amdgpu_device *adev);
>  	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1,
> u32 data2, u32 data3);
> +	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
> +	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
>  };
> 
>  /*
> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
>  	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
>  	/* VRAM LOST by GIM */
>  	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
> +	/* HW PERF SIM in GIM */
> +	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
>  };
> 
>  struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct
> amdgpu_virt {
>  	struct amdgpu_vf_error_buffer   vf_errors;
>  	struct amdgpu_virt_fw_reserve	fw_reserve;
>  	uint32_t gim_feature;
> +	/* protect DPM events to GIM */
> +	struct mutex                    dpm_mutex;
>  };
> 
>  #define amdgpu_sriov_enabled(adev) \
> @@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)  #endif  }
> 
> +#define amdgim_is_hwperf(adev) \
> +	((adev)->virt.gim_feature &
> AMDGIM_FEATURE_HW_PERF_SIMULATION)
> +
>  bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void
> amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t
> amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -
> 295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
> unsigned long obj_size,
>  					unsigned int key,
>  					unsigned int chksum);
>  void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
> 
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> index 73851eb..8dbad49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct
> amdgpu_device *adev,
>  	xgpu_ai_mailbox_set_valid(adev, false);  }
> 
> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type,
> +char *buf) {
> +        int r = 0;
> +        u32 req, val, size;
> +
> +        if (!amdgim_is_hwperf(adev) || buf == NULL)
> +                return -EBADRQC;
> +
> +        switch(type) {
> +        case PP_SCLK:
> +                req = IDH_IRQ_GET_PP_SCLK;
> +                break;
> +        case PP_MCLK:
> +                req = IDH_IRQ_GET_PP_MCLK;
> +                break;
> +        default:
> +                return -EBADRQC;
> +        }
> +
> +        mutex_lock(&adev->virt.dpm_mutex);
> +
> +        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> +        if (!r && adev->fw_vram_usage.va != NULL) {
> +                val = RREG32_NO_KIQ(
> +                        SOC15_REG_OFFSET(NBIO, 0,
> +                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
> +                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
> +                                val), PAGE_SIZE);
> +
> +                if (size < PAGE_SIZE)
> +                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
> +                else
> +                        size = 0;
> +
> +                r = size;
> +                goto out;
> +        }
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> +        if(r)
> +                pr_info("%s DPM request failed",
> +                        (type == PP_SCLK)? "SCLK" : "MCLK");
> +
> +out:
> +        mutex_unlock(&adev->virt.dpm_mutex);
> +        return r;
> +}
> +
> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
> +level) {
> +        int r = 0;
> +        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
> +
> +        if (!amdgim_is_hwperf(adev))
> +                return -EBADRQC;
> +
> +        mutex_lock(&adev->virt.dpm_mutex);
> +        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> +        if (!r)
> +                goto out;
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> +        if (!r)
> +                pr_info("DPM request failed");
> +        else
> +                pr_info("Mailbox is broken");
> +
> +out:
> +        mutex_unlock(&adev->virt.dpm_mutex);
> +        return r;
> +}
> +
>  static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
>  					enum idh_request req)
>  {
> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
>  	.reset_gpu = xgpu_ai_request_reset,
>  	.wait_reset = NULL,
>  	.trans_msg = xgpu_ai_mailbox_trans_msg,
> +	.get_pp_clk = xgpu_ai_get_pp_clk,
> +	.force_dpm_level = xgpu_ai_force_dpm_level,
>  };
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> index b4a9cee..39d151b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> @@ -35,6 +35,10 @@ enum idh_request {
>  	IDH_REL_GPU_FINI_ACCESS,
>  	IDH_REQ_GPU_RESET_ACCESS,
> 
> +	IDH_IRQ_FORCE_DPM_LEVEL = 10,
> +	IDH_IRQ_GET_PP_SCLK,
> +	IDH_IRQ_GET_PP_MCLK,
> +
>  	IDH_LOG_VF_ERROR       = 200,
>  };
> 
> @@ -43,6 +47,8 @@ enum idh_event {
>  	IDH_READY_TO_ACCESS_GPU,
>  	IDH_FLR_NOTIFICATION,
>  	IDH_FLR_NOTIFICATION_CMPL,
> +	IDH_SUCCESS,
> +	IDH_FAIL,
>  	IDH_EVENT_MAX
>  };
> 
> --
> 2.7.4
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization
       [not found]     ` <MN2PR12MB3344FC85F11263060FDD478AE42E0-rweVpJHSKToDMgCC8P//OwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2019-04-10 10:00       ` Tao, Yintian
  2019-04-10 10:32       ` Christian König
  1 sibling, 0 replies; 11+ messages in thread
From: Tao, Yintian @ 2019-04-10 10:00 UTC (permalink / raw)
  To: Quan, Evan, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hi  Evan 

Many thanks for your review.

I will submit v2 patch according to your comments.

Best Regards
Yintian Tao

-----Original Message-----
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Quan, Evan
Sent: Wednesday, April 10, 2019 5:58 PM
To: Tao, Yintian <Yintian.Tao@amd.com>; amd-gfx@lists.freedesktop.org
Cc: Tao, Yintian <Yintian.Tao@amd.com>
Subject: RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization



> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of 
> Yintian Tao
> Sent: 2019年4月9日 23:18
> To: amd-gfx@lists.freedesktop.org
> Cc: Tao, Yintian <Yintian.Tao@amd.com>
> Subject: [PATCH] drm/amdgpu: support dpm level modification under 
> virtualization
> 
> Under vega10 virtualuzation, smu ip block will not be added.
> Therefore, we need add pp clk query and force dpm level function at 
> amdgpu_virt_ops to support the feature.
> 
> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
> Signed-off-by: Yintian Tao <yttao@amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
>  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
>  drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
>  drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78
> ++++++++++++++++++++++++++++++
>  drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
>  7 files changed, 147 insertions(+)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 3ff8899..bb0fd5a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device 
> *adev,
>  	mutex_init(&adev->virt.vf_errors.lock);
>  	hash_init(adev->mn_hash);
>  	mutex_init(&adev->lock_reset);
> +	mutex_init(&adev->virt.dpm_mutex);
> 
>  	amdgpu_device_check_arguments(adev);
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 6190495..1353955 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device 
> *dev, void *data, struct drm_file
>  		if (adev->pm.dpm_enabled) {
>  			dev_info.max_engine_clock =
> amdgpu_dpm_get_sclk(adev, false) * 10;
>  			dev_info.max_memory_clock =
> amdgpu_dpm_get_mclk(adev, false) * 10;
> +		} else if (amdgpu_sriov_vf(adev)) {
> +			dev_info.max_engine_clock =
> amdgpu_virt_get_sclk(adev, false) * 10;
> +			dev_info.max_memory_clock =
> amdgpu_virt_get_mclk(adev, false) * 10;
>  		} else {
>  			dev_info.max_engine_clock = adev-
> >clock.default_sclk * 10;
>  			dev_info.max_memory_clock = adev-
> >clock.default_mclk * 10; diff --git
> a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index 5540259..0162d1e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -380,6 +380,17 @@ static ssize_t
> amdgpu_set_dpm_forced_performance_level(struct device *dev,
>  		goto fail;
>  	}
> 
> +        if (amdgpu_sriov_vf(adev)) {
> +                if (amdgim_is_hwperf(adev) &&
> +                    adev->virt.ops->force_dpm_level) {
> +                        mutex_lock(&adev->pm.mutex);
> +                        adev->virt.ops->force_dpm_level(adev, level);
> +                        mutex_unlock(&adev->pm.mutex);
> +                        return count;
> +                } else
> +                        return -EINVAL;
> +        }
> +
>  	if (current_level == level)
>  		return count;
> 
> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct 
> device *dev,
>  	struct drm_device *ddev = dev_get_drvdata(dev);
>  	struct amdgpu_device *adev = ddev->dev_private;
> 
> +	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
> +	    adev->virt.ops->get_pp_clk)
> +		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> +
>  	if (is_support_sw_smu(adev))
>  		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
>  	else if (adev->powerplay.pp_funcs->print_clock_levels)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 462a04e..ae4b2a1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct
> amdgpu_device *adev)
>  	}
>  }
> 
> +static uint32_t parse_clk(char *buf, bool min) {
> +        char *ptr = buf;
> +        uint32_t clk = 0;
> +
> +        do {
> +                ptr = strchr(ptr, ':');
> +                if (!ptr)
> +                        break;
> +                ptr+=2;
> +                clk = simple_strtoul(ptr, NULL, 10);
> +        } while (!min);
> +
> +        return clk * 100;
> +}
> +
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool 
> +lowest) {
> +        char buf[512] = {0};
[Quan, Evan] In  xgpu_ai_get_pp_clk, the max  buffer size seems PAGE_SIZE. Maybe this should be aligned with that.
> +
> +        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
[Quan, Evan] Better to check existence of adev->virt.ops->get_pp_clk before dereference it. As what you did in amdgpu_set_dpm_forced_performance_level.
> +
> +        return parse_clk(buf, lowest); }
> +
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool 
> +lowest) {
> +        char buf[512] = {0};
> +
> +        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
> +
> +        return parse_clk(buf, lowest); }
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 722deef..584947b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
>  	int (*reset_gpu)(struct amdgpu_device *adev);
>  	int (*wait_reset)(struct amdgpu_device *adev);
>  	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1,
> u32 data2, u32 data3);
> +	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
> +	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
>  };
> 
>  /*
> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
>  	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
>  	/* VRAM LOST by GIM */
>  	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
> +	/* HW PERF SIM in GIM */
> +	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
>  };
> 
>  struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct 
> amdgpu_virt {
>  	struct amdgpu_vf_error_buffer   vf_errors;
>  	struct amdgpu_virt_fw_reserve	fw_reserve;
>  	uint32_t gim_feature;
> +	/* protect DPM events to GIM */
> +	struct mutex                    dpm_mutex;
>  };
> 
>  #define amdgpu_sriov_enabled(adev) \
> @@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)  
> #endif  }
> 
> +#define amdgim_is_hwperf(adev) \
> +	((adev)->virt.gim_feature &
> AMDGIM_FEATURE_HW_PERF_SIMULATION)
> +
>  bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void 
> amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t 
> amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -
> 295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, 
> unsigned long obj_size,
>  					unsigned int key,
>  					unsigned int chksum);
>  void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool 
> +lowest); uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, 
> +bool lowest);
> 
>  #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> index 73851eb..8dbad49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct 
> amdgpu_device *adev,
>  	xgpu_ai_mailbox_set_valid(adev, false);  }
> 
> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, 
> +char *buf) {
> +        int r = 0;
> +        u32 req, val, size;
> +
> +        if (!amdgim_is_hwperf(adev) || buf == NULL)
> +                return -EBADRQC;
> +
> +        switch(type) {
> +        case PP_SCLK:
> +                req = IDH_IRQ_GET_PP_SCLK;
> +                break;
> +        case PP_MCLK:
> +                req = IDH_IRQ_GET_PP_MCLK;
> +                break;
> +        default:
> +                return -EBADRQC;
> +        }
> +
> +        mutex_lock(&adev->virt.dpm_mutex);
> +
> +        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> +        if (!r && adev->fw_vram_usage.va != NULL) {
> +                val = RREG32_NO_KIQ(
> +                        SOC15_REG_OFFSET(NBIO, 0,
> +                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
> +                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
> +                                val), PAGE_SIZE);
> +
> +                if (size < PAGE_SIZE)
> +                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
> +                else
> +                        size = 0;
> +
> +                r = size;
> +                goto out;
> +        }
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> +        if(r)
> +                pr_info("%s DPM request failed",
> +                        (type == PP_SCLK)? "SCLK" : "MCLK");
> +
> +out:
> +        mutex_unlock(&adev->virt.dpm_mutex);
> +        return r;
> +}
> +
> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
> +level) {
> +        int r = 0;
> +        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
> +
> +        if (!amdgim_is_hwperf(adev))
> +                return -EBADRQC;
> +
> +        mutex_lock(&adev->virt.dpm_mutex);
> +        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> +        if (!r)
> +                goto out;
> +
> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> +        if (!r)
> +                pr_info("DPM request failed");
> +        else
> +                pr_info("Mailbox is broken");
> +
> +out:
> +        mutex_unlock(&adev->virt.dpm_mutex);
> +        return r;
> +}
> +
>  static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
>  					enum idh_request req)
>  {
> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
>  	.reset_gpu = xgpu_ai_request_reset,
>  	.wait_reset = NULL,
>  	.trans_msg = xgpu_ai_mailbox_trans_msg,
> +	.get_pp_clk = xgpu_ai_get_pp_clk,
> +	.force_dpm_level = xgpu_ai_force_dpm_level,
>  };
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> index b4a9cee..39d151b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> @@ -35,6 +35,10 @@ enum idh_request {
>  	IDH_REL_GPU_FINI_ACCESS,
>  	IDH_REQ_GPU_RESET_ACCESS,
> 
> +	IDH_IRQ_FORCE_DPM_LEVEL = 10,
> +	IDH_IRQ_GET_PP_SCLK,
> +	IDH_IRQ_GET_PP_MCLK,
> +
>  	IDH_LOG_VF_ERROR       = 200,
>  };
> 
> @@ -43,6 +47,8 @@ enum idh_event {
>  	IDH_READY_TO_ACCESS_GPU,
>  	IDH_FLR_NOTIFICATION,
>  	IDH_FLR_NOTIFICATION_CMPL,
> +	IDH_SUCCESS,
> +	IDH_FAIL,
>  	IDH_EVENT_MAX
>  };
> 
> --
> 2.7.4
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdgpu: support dpm level modification under virtualization
       [not found]     ` <MN2PR12MB3344FC85F11263060FDD478AE42E0-rweVpJHSKToDMgCC8P//OwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  2019-04-10 10:00       ` Tao, Yintian
@ 2019-04-10 10:32       ` Christian König
       [not found]         ` <fd0591d3-18b0-d08c-b6d3-ecedae79f568-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
  1 sibling, 1 reply; 11+ messages in thread
From: Christian König @ 2019-04-10 10:32 UTC (permalink / raw)
  To: Quan, Evan, Tao, Yintian, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Am 10.04.19 um 11:58 schrieb Quan, Evan:
>
>> -----Original Message-----
>> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of
>> Yintian Tao
>> Sent: 2019年4月9日 23:18
>> To: amd-gfx@lists.freedesktop.org
>> Cc: Tao, Yintian <Yintian.Tao@amd.com>
>> Subject: [PATCH] drm/amdgpu: support dpm level modification under
>> virtualization
>>
>> Under vega10 virtualuzation, smu ip block will not be added.
>> Therefore, we need add pp clk query and force dpm level function at
>> amdgpu_virt_ops to support the feature.
>>
>> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
>> Signed-off-by: Yintian Tao <yttao@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
>>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78
>> ++++++++++++++++++++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
>>   7 files changed, 147 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 3ff8899..bb0fd5a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device
>> *adev,
>>   	mutex_init(&adev->virt.vf_errors.lock);
>>   	hash_init(adev->mn_hash);
>>   	mutex_init(&adev->lock_reset);
>> +	mutex_init(&adev->virt.dpm_mutex);
>>
>>   	amdgpu_device_check_arguments(adev);
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> index 6190495..1353955 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> @@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev,
>> void *data, struct drm_file
>>   		if (adev->pm.dpm_enabled) {
>>   			dev_info.max_engine_clock =
>> amdgpu_dpm_get_sclk(adev, false) * 10;
>>   			dev_info.max_memory_clock =
>> amdgpu_dpm_get_mclk(adev, false) * 10;
>> +		} else if (amdgpu_sriov_vf(adev)) {
>> +			dev_info.max_engine_clock =
>> amdgpu_virt_get_sclk(adev, false) * 10;
>> +			dev_info.max_memory_clock =
>> amdgpu_virt_get_mclk(adev, false) * 10;
>>   		} else {
>>   			dev_info.max_engine_clock = adev-
>>> clock.default_sclk * 10;
>>   			dev_info.max_memory_clock = adev-
>>> clock.default_mclk * 10; diff --git
>> a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> index 5540259..0162d1e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> @@ -380,6 +380,17 @@ static ssize_t
>> amdgpu_set_dpm_forced_performance_level(struct device *dev,
>>   		goto fail;
>>   	}
>>
>> +        if (amdgpu_sriov_vf(adev)) {
>> +                if (amdgim_is_hwperf(adev) &&
>> +                    adev->virt.ops->force_dpm_level) {
>> +                        mutex_lock(&adev->pm.mutex);
>> +                        adev->virt.ops->force_dpm_level(adev, level);
>> +                        mutex_unlock(&adev->pm.mutex);
>> +                        return count;
>> +                } else
>> +                        return -EINVAL;
>> +        }
>> +
>>   	if (current_level == level)
>>   		return count;
>>
>> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct
>> device *dev,
>>   	struct drm_device *ddev = dev_get_drvdata(dev);
>>   	struct amdgpu_device *adev = ddev->dev_private;
>>
>> +	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
>> +	    adev->virt.ops->get_pp_clk)
>> +		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
>> +
>>   	if (is_support_sw_smu(adev))
>>   		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
>>   	else if (adev->powerplay.pp_funcs->print_clock_levels)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> index 462a04e..ae4b2a1 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> @@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct
>> amdgpu_device *adev)
>>   	}
>>   }
>>
>> +static uint32_t parse_clk(char *buf, bool min) {
>> +        char *ptr = buf;
>> +        uint32_t clk = 0;
>> +
>> +        do {
>> +                ptr = strchr(ptr, ':');
>> +                if (!ptr)
>> +                        break;
>> +                ptr+=2;
>> +                clk = simple_strtoul(ptr, NULL, 10);
>> +        } while (!min);
>> +
>> +        return clk * 100;
>> +}
>> +
>> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
>> +{
>> +        char buf[512] = {0};
> [Quan, Evan] In  xgpu_ai_get_pp_clk, the max  buffer size seems PAGE_SIZE. Maybe this should be aligned with that.

Well at least on the stack that would be a really bad idea. Where have 
you seen that?

Saying so 512 is a bit large as well and additional to that please stop 
initializing things with "{0}", use memset for that.

Christian.

>> +
>> +        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> [Quan, Evan] Better to check existence of adev->virt.ops->get_pp_clk before dereference it. As what you did in amdgpu_set_dpm_forced_performance_level.
>> +
>> +        return parse_clk(buf, lowest);
>> +}
>> +
>> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
>> +{
>> +        char buf[512] = {0};
>> +
>> +        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
>> +
>> +        return parse_clk(buf, lowest);
>> +}
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> index 722deef..584947b 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
>>   	int (*reset_gpu)(struct amdgpu_device *adev);
>>   	int (*wait_reset)(struct amdgpu_device *adev);
>>   	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1,
>> u32 data2, u32 data3);
>> +	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
>> +	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
>>   };
>>
>>   /*
>> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
>>   	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
>>   	/* VRAM LOST by GIM */
>>   	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
>> +	/* HW PERF SIM in GIM */
>> +	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
>>   };
>>
>>   struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct
>> amdgpu_virt {
>>   	struct amdgpu_vf_error_buffer   vf_errors;
>>   	struct amdgpu_virt_fw_reserve	fw_reserve;
>>   	uint32_t gim_feature;
>> +	/* protect DPM events to GIM */
>> +	struct mutex                    dpm_mutex;
>>   };
>>
>>   #define amdgpu_sriov_enabled(adev) \
>> @@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)  #endif  }
>>
>> +#define amdgim_is_hwperf(adev) \
>> +	((adev)->virt.gim_feature &
>> AMDGIM_FEATURE_HW_PERF_SIMULATION)
>> +
>>   bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void
>> amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t
>> amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -
>> 295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
>> unsigned long obj_size,
>>   					unsigned int key,
>>   					unsigned int chksum);
>>   void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
>> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
>> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
>>
>>   #endif
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> index 73851eb..8dbad49 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct
>> amdgpu_device *adev,
>>   	xgpu_ai_mailbox_set_valid(adev, false);  }
>>
>> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type,
>> +char *buf) {
>> +        int r = 0;
>> +        u32 req, val, size;
>> +
>> +        if (!amdgim_is_hwperf(adev) || buf == NULL)
>> +                return -EBADRQC;
>> +
>> +        switch(type) {
>> +        case PP_SCLK:
>> +                req = IDH_IRQ_GET_PP_SCLK;
>> +                break;
>> +        case PP_MCLK:
>> +                req = IDH_IRQ_GET_PP_MCLK;
>> +                break;
>> +        default:
>> +                return -EBADRQC;
>> +        }
>> +
>> +        mutex_lock(&adev->virt.dpm_mutex);
>> +
>> +        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
>> +        if (!r && adev->fw_vram_usage.va != NULL) {
>> +                val = RREG32_NO_KIQ(
>> +                        SOC15_REG_OFFSET(NBIO, 0,
>> +                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
>> +                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
>> +                                val), PAGE_SIZE);
>> +
>> +                if (size < PAGE_SIZE)
>> +                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
>> +                else
>> +                        size = 0;
>> +
>> +                r = size;
>> +                goto out;
>> +        }
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
>> +        if(r)
>> +                pr_info("%s DPM request failed",
>> +                        (type == PP_SCLK)? "SCLK" : "MCLK");
>> +
>> +out:
>> +        mutex_unlock(&adev->virt.dpm_mutex);
>> +        return r;
>> +}
>> +
>> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
>> +level) {
>> +        int r = 0;
>> +        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
>> +
>> +        if (!amdgim_is_hwperf(adev))
>> +                return -EBADRQC;
>> +
>> +        mutex_lock(&adev->virt.dpm_mutex);
>> +        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
>> +        if (!r)
>> +                goto out;
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
>> +        if (!r)
>> +                pr_info("DPM request failed");
>> +        else
>> +                pr_info("Mailbox is broken");
>> +
>> +out:
>> +        mutex_unlock(&adev->virt.dpm_mutex);
>> +        return r;
>> +}
>> +
>>   static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
>>   					enum idh_request req)
>>   {
>> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
>>   	.reset_gpu = xgpu_ai_request_reset,
>>   	.wait_reset = NULL,
>>   	.trans_msg = xgpu_ai_mailbox_trans_msg,
>> +	.get_pp_clk = xgpu_ai_get_pp_clk,
>> +	.force_dpm_level = xgpu_ai_force_dpm_level,
>>   };
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> index b4a9cee..39d151b 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> @@ -35,6 +35,10 @@ enum idh_request {
>>   	IDH_REL_GPU_FINI_ACCESS,
>>   	IDH_REQ_GPU_RESET_ACCESS,
>>
>> +	IDH_IRQ_FORCE_DPM_LEVEL = 10,
>> +	IDH_IRQ_GET_PP_SCLK,
>> +	IDH_IRQ_GET_PP_MCLK,
>> +
>>   	IDH_LOG_VF_ERROR       = 200,
>>   };
>>
>> @@ -43,6 +47,8 @@ enum idh_event {
>>   	IDH_READY_TO_ACCESS_GPU,
>>   	IDH_FLR_NOTIFICATION,
>>   	IDH_FLR_NOTIFICATION_CMPL,
>> +	IDH_SUCCESS,
>> +	IDH_FAIL,
>>   	IDH_EVENT_MAX
>>   };
>>
>> --
>> 2.7.4
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization
       [not found]         ` <fd0591d3-18b0-d08c-b6d3-ecedae79f568-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
@ 2019-04-10 12:09           ` Tao, Yintian
       [not found]             ` <MN2PR12MB3039AFAF64BDB7BA4CD2F4E3E52E0-rweVpJHSKTr/fpeVZQY9RQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
  0 siblings, 1 reply; 11+ messages in thread
From: Tao, Yintian @ 2019-04-10 12:09 UTC (permalink / raw)
  To: Koenig, Christian, Quan, Evan, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hi  Christian


Thanks for your review. May I use the kzalloc to allocate the memory for the buffer to avoid the stack problem you said?

Because hypervisor driver will transfer the message through this page size memory.

Best Regards
Yintian Tao

-----Original Message-----
From: Christian König <ckoenig.leichtzumerken@gmail.com> 
Sent: Wednesday, April 10, 2019 6:32 PM
To: Quan, Evan <Evan.Quan@amd.com>; Tao, Yintian <Yintian.Tao@amd.com>; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: support dpm level modification under virtualization

Am 10.04.19 um 11:58 schrieb Quan, Evan:
>
>> -----Original Message-----
>> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of 
>> Yintian Tao
>> Sent: 2019年4月9日 23:18
>> To: amd-gfx@lists.freedesktop.org
>> Cc: Tao, Yintian <Yintian.Tao@amd.com>
>> Subject: [PATCH] drm/amdgpu: support dpm level modification under 
>> virtualization
>>
>> Under vega10 virtualuzation, smu ip block will not be added.
>> Therefore, we need add pp clk query and force dpm level function at 
>> amdgpu_virt_ops to support the feature.
>>
>> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
>> Signed-off-by: Yintian Tao <yttao@amd.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
>>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78
>> ++++++++++++++++++++++++++++++
>>   drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
>>   7 files changed, 147 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> index 3ff8899..bb0fd5a 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device 
>> *adev,
>>   	mutex_init(&adev->virt.vf_errors.lock);
>>   	hash_init(adev->mn_hash);
>>   	mutex_init(&adev->lock_reset);
>> +	mutex_init(&adev->virt.dpm_mutex);
>>
>>   	amdgpu_device_check_arguments(adev);
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> index 6190495..1353955 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> @@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device 
>> *dev, void *data, struct drm_file
>>   		if (adev->pm.dpm_enabled) {
>>   			dev_info.max_engine_clock =
>> amdgpu_dpm_get_sclk(adev, false) * 10;
>>   			dev_info.max_memory_clock =
>> amdgpu_dpm_get_mclk(adev, false) * 10;
>> +		} else if (amdgpu_sriov_vf(adev)) {
>> +			dev_info.max_engine_clock =
>> amdgpu_virt_get_sclk(adev, false) * 10;
>> +			dev_info.max_memory_clock =
>> amdgpu_virt_get_mclk(adev, false) * 10;
>>   		} else {
>>   			dev_info.max_engine_clock = adev-
>>> clock.default_sclk * 10;
>>   			dev_info.max_memory_clock = adev-
>>> clock.default_mclk * 10; diff --git
>> a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> index 5540259..0162d1e 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>> @@ -380,6 +380,17 @@ static ssize_t
>> amdgpu_set_dpm_forced_performance_level(struct device *dev,
>>   		goto fail;
>>   	}
>>
>> +        if (amdgpu_sriov_vf(adev)) {
>> +                if (amdgim_is_hwperf(adev) &&
>> +                    adev->virt.ops->force_dpm_level) {
>> +                        mutex_lock(&adev->pm.mutex);
>> +                        adev->virt.ops->force_dpm_level(adev, level);
>> +                        mutex_unlock(&adev->pm.mutex);
>> +                        return count;
>> +                } else
>> +                        return -EINVAL;
>> +        }
>> +
>>   	if (current_level == level)
>>   		return count;
>>
>> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct 
>> device *dev,
>>   	struct drm_device *ddev = dev_get_drvdata(dev);
>>   	struct amdgpu_device *adev = ddev->dev_private;
>>
>> +	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
>> +	    adev->virt.ops->get_pp_clk)
>> +		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
>> +
>>   	if (is_support_sw_smu(adev))
>>   		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
>>   	else if (adev->powerplay.pp_funcs->print_clock_levels)
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> index 462a04e..ae4b2a1 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>> @@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct
>> amdgpu_device *adev)
>>   	}
>>   }
>>
>> +static uint32_t parse_clk(char *buf, bool min) {
>> +        char *ptr = buf;
>> +        uint32_t clk = 0;
>> +
>> +        do {
>> +                ptr = strchr(ptr, ':');
>> +                if (!ptr)
>> +                        break;
>> +                ptr+=2;
>> +                clk = simple_strtoul(ptr, NULL, 10);
>> +        } while (!min);
>> +
>> +        return clk * 100;
>> +}
>> +
>> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool 
>> +lowest) {
>> +        char buf[512] = {0};
> [Quan, Evan] In  xgpu_ai_get_pp_clk, the max  buffer size seems PAGE_SIZE. Maybe this should be aligned with that.

Well at least on the stack that would be a really bad idea. Where have you seen that?

Saying so 512 is a bit large as well and additional to that please stop initializing things with "{0}", use memset for that.

Christian.

>> +
>> +        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> [Quan, Evan] Better to check existence of adev->virt.ops->get_pp_clk before dereference it. As what you did in amdgpu_set_dpm_forced_performance_level.
>> +
>> +        return parse_clk(buf, lowest); }
>> +
>> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool 
>> +lowest) {
>> +        char buf[512] = {0};
>> +
>> +        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
>> +
>> +        return parse_clk(buf, lowest); }
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> index 722deef..584947b 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
>>   	int (*reset_gpu)(struct amdgpu_device *adev);
>>   	int (*wait_reset)(struct amdgpu_device *adev);
>>   	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1,
>> u32 data2, u32 data3);
>> +	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
>> +	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
>>   };
>>
>>   /*
>> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
>>   	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
>>   	/* VRAM LOST by GIM */
>>   	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
>> +	/* HW PERF SIM in GIM */
>> +	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
>>   };
>>
>>   struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct 
>> amdgpu_virt {
>>   	struct amdgpu_vf_error_buffer   vf_errors;
>>   	struct amdgpu_virt_fw_reserve	fw_reserve;
>>   	uint32_t gim_feature;
>> +	/* protect DPM events to GIM */
>> +	struct mutex                    dpm_mutex;
>>   };
>>
>>   #define amdgpu_sriov_enabled(adev) \ @@ -278,6 +284,9 @@ static 
>> inline bool is_virtual_machine(void)  #endif  }
>>
>> +#define amdgim_is_hwperf(adev) \
>> +	((adev)->virt.gim_feature &
>> AMDGIM_FEATURE_HW_PERF_SIMULATION)
>> +
>>   bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void 
>> amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t 
>> amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -
>> 295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, 
>> unsigned long obj_size,
>>   					unsigned int key,
>>   					unsigned int chksum);
>>   void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
>> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool 
>> +lowest); uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, 
>> +bool lowest);
>>
>>   #endif
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> index 73851eb..8dbad49 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct 
>> amdgpu_device *adev,
>>   	xgpu_ai_mailbox_set_valid(adev, false);  }
>>
>> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, 
>> +char *buf) {
>> +        int r = 0;
>> +        u32 req, val, size;
>> +
>> +        if (!amdgim_is_hwperf(adev) || buf == NULL)
>> +                return -EBADRQC;
>> +
>> +        switch(type) {
>> +        case PP_SCLK:
>> +                req = IDH_IRQ_GET_PP_SCLK;
>> +                break;
>> +        case PP_MCLK:
>> +                req = IDH_IRQ_GET_PP_MCLK;
>> +                break;
>> +        default:
>> +                return -EBADRQC;
>> +        }
>> +
>> +        mutex_lock(&adev->virt.dpm_mutex);
>> +
>> +        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
>> +        if (!r && adev->fw_vram_usage.va != NULL) {
>> +                val = RREG32_NO_KIQ(
>> +                        SOC15_REG_OFFSET(NBIO, 0,
>> +                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
>> +                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
>> +                                val), PAGE_SIZE);
>> +
>> +                if (size < PAGE_SIZE)
>> +                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
>> +                else
>> +                        size = 0;
>> +
>> +                r = size;
>> +                goto out;
>> +        }
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
>> +        if(r)
>> +                pr_info("%s DPM request failed",
>> +                        (type == PP_SCLK)? "SCLK" : "MCLK");
>> +
>> +out:
>> +        mutex_unlock(&adev->virt.dpm_mutex);
>> +        return r;
>> +}
>> +
>> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
>> +level) {
>> +        int r = 0;
>> +        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
>> +
>> +        if (!amdgim_is_hwperf(adev))
>> +                return -EBADRQC;
>> +
>> +        mutex_lock(&adev->virt.dpm_mutex);
>> +        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
>> +        if (!r)
>> +                goto out;
>> +
>> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
>> +        if (!r)
>> +                pr_info("DPM request failed");
>> +        else
>> +                pr_info("Mailbox is broken");
>> +
>> +out:
>> +        mutex_unlock(&adev->virt.dpm_mutex);
>> +        return r;
>> +}
>> +
>>   static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
>>   					enum idh_request req)
>>   {
>> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
>>   	.reset_gpu = xgpu_ai_request_reset,
>>   	.wait_reset = NULL,
>>   	.trans_msg = xgpu_ai_mailbox_trans_msg,
>> +	.get_pp_clk = xgpu_ai_get_pp_clk,
>> +	.force_dpm_level = xgpu_ai_force_dpm_level,
>>   };
>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> index b4a9cee..39d151b 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>> @@ -35,6 +35,10 @@ enum idh_request {
>>   	IDH_REL_GPU_FINI_ACCESS,
>>   	IDH_REQ_GPU_RESET_ACCESS,
>>
>> +	IDH_IRQ_FORCE_DPM_LEVEL = 10,
>> +	IDH_IRQ_GET_PP_SCLK,
>> +	IDH_IRQ_GET_PP_MCLK,
>> +
>>   	IDH_LOG_VF_ERROR       = 200,
>>   };
>>
>> @@ -43,6 +47,8 @@ enum idh_event {
>>   	IDH_READY_TO_ACCESS_GPU,
>>   	IDH_FLR_NOTIFICATION,
>>   	IDH_FLR_NOTIFICATION_CMPL,
>> +	IDH_SUCCESS,
>> +	IDH_FAIL,
>>   	IDH_EVENT_MAX
>>   };
>>
>> --
>> 2.7.4
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH] drm/amdgpu: support dpm level modification under virtualization
       [not found]             ` <MN2PR12MB3039AFAF64BDB7BA4CD2F4E3E52E0-rweVpJHSKTr/fpeVZQY9RQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
@ 2019-04-10 12:23               ` Koenig, Christian
       [not found]                 ` <f45055be-c7bd-0912-4946-f9673725fa51-5C7GfCeVMHo@public.gmane.org>
  0 siblings, 1 reply; 11+ messages in thread
From: Koenig, Christian @ 2019-04-10 12:23 UTC (permalink / raw)
  To: Tao, Yintian, Quan, Evan, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hi Yintian,

yeah, kzalloc would obvious work.

But why do you need such a large buffer in the first place?

Rule of thumb is that each function should not use more than 1KB of 
stack, otherwise the compiler will raise a warning.

Christian.

Am 10.04.19 um 14:09 schrieb Tao, Yintian:
> Hi  Christian
>
>
> Thanks for your review. May I use the kzalloc to allocate the memory for the buffer to avoid the stack problem you said?
>
> Because hypervisor driver will transfer the message through this page size memory.
>
> Best Regards
> Yintian Tao
>
> -----Original Message-----
> From: Christian König <ckoenig.leichtzumerken@gmail.com>
> Sent: Wednesday, April 10, 2019 6:32 PM
> To: Quan, Evan <Evan.Quan@amd.com>; Tao, Yintian <Yintian.Tao@amd.com>; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH] drm/amdgpu: support dpm level modification under virtualization
>
> Am 10.04.19 um 11:58 schrieb Quan, Evan:
>>> -----Original Message-----
>>> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of
>>> Yintian Tao
>>> Sent: 2019年4月9日 23:18
>>> To: amd-gfx@lists.freedesktop.org
>>> Cc: Tao, Yintian <Yintian.Tao@amd.com>
>>> Subject: [PATCH] drm/amdgpu: support dpm level modification under
>>> virtualization
>>>
>>> Under vega10 virtualuzation, smu ip block will not be added.
>>> Therefore, we need add pp clk query and force dpm level function at
>>> amdgpu_virt_ops to support the feature.
>>>
>>> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
>>> Signed-off-by: Yintian Tao <yttao@amd.com>
>>> ---
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
>>>    drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78
>>> ++++++++++++++++++++++++++++++
>>>    drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
>>>    7 files changed, 147 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> index 3ff8899..bb0fd5a 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device
>>> *adev,
>>>    	mutex_init(&adev->virt.vf_errors.lock);
>>>    	hash_init(adev->mn_hash);
>>>    	mutex_init(&adev->lock_reset);
>>> +	mutex_init(&adev->virt.dpm_mutex);
>>>
>>>    	amdgpu_device_check_arguments(adev);
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> index 6190495..1353955 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> @@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device
>>> *dev, void *data, struct drm_file
>>>    		if (adev->pm.dpm_enabled) {
>>>    			dev_info.max_engine_clock =
>>> amdgpu_dpm_get_sclk(adev, false) * 10;
>>>    			dev_info.max_memory_clock =
>>> amdgpu_dpm_get_mclk(adev, false) * 10;
>>> +		} else if (amdgpu_sriov_vf(adev)) {
>>> +			dev_info.max_engine_clock =
>>> amdgpu_virt_get_sclk(adev, false) * 10;
>>> +			dev_info.max_memory_clock =
>>> amdgpu_virt_get_mclk(adev, false) * 10;
>>>    		} else {
>>>    			dev_info.max_engine_clock = adev-
>>>> clock.default_sclk * 10;
>>>    			dev_info.max_memory_clock = adev-
>>>> clock.default_mclk * 10; diff --git
>>> a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> index 5540259..0162d1e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> @@ -380,6 +380,17 @@ static ssize_t
>>> amdgpu_set_dpm_forced_performance_level(struct device *dev,
>>>    		goto fail;
>>>    	}
>>>
>>> +        if (amdgpu_sriov_vf(adev)) {
>>> +                if (amdgim_is_hwperf(adev) &&
>>> +                    adev->virt.ops->force_dpm_level) {
>>> +                        mutex_lock(&adev->pm.mutex);
>>> +                        adev->virt.ops->force_dpm_level(adev, level);
>>> +                        mutex_unlock(&adev->pm.mutex);
>>> +                        return count;
>>> +                } else
>>> +                        return -EINVAL;
>>> +        }
>>> +
>>>    	if (current_level == level)
>>>    		return count;
>>>
>>> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct
>>> device *dev,
>>>    	struct drm_device *ddev = dev_get_drvdata(dev);
>>>    	struct amdgpu_device *adev = ddev->dev_private;
>>>
>>> +	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
>>> +	    adev->virt.ops->get_pp_clk)
>>> +		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
>>> +
>>>    	if (is_support_sw_smu(adev))
>>>    		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
>>>    	else if (adev->powerplay.pp_funcs->print_clock_levels)
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>>> index 462a04e..ae4b2a1 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>>> @@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct
>>> amdgpu_device *adev)
>>>    	}
>>>    }
>>>
>>> +static uint32_t parse_clk(char *buf, bool min) {
>>> +        char *ptr = buf;
>>> +        uint32_t clk = 0;
>>> +
>>> +        do {
>>> +                ptr = strchr(ptr, ':');
>>> +                if (!ptr)
>>> +                        break;
>>> +                ptr+=2;
>>> +                clk = simple_strtoul(ptr, NULL, 10);
>>> +        } while (!min);
>>> +
>>> +        return clk * 100;
>>> +}
>>> +
>>> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool
>>> +lowest) {
>>> +        char buf[512] = {0};
>> [Quan, Evan] In  xgpu_ai_get_pp_clk, the max  buffer size seems PAGE_SIZE. Maybe this should be aligned with that.
> Well at least on the stack that would be a really bad idea. Where have you seen that?
>
> Saying so 512 is a bit large as well and additional to that please stop initializing things with "{0}", use memset for that.
>
> Christian.
>
>>> +
>>> +        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
>> [Quan, Evan] Better to check existence of adev->virt.ops->get_pp_clk before dereference it. As what you did in amdgpu_set_dpm_forced_performance_level.
>>> +
>>> +        return parse_clk(buf, lowest); }
>>> +
>>> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool
>>> +lowest) {
>>> +        char buf[512] = {0};
>>> +
>>> +        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
>>> +
>>> +        return parse_clk(buf, lowest); }
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>>> index 722deef..584947b 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>>> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
>>>    	int (*reset_gpu)(struct amdgpu_device *adev);
>>>    	int (*wait_reset)(struct amdgpu_device *adev);
>>>    	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1,
>>> u32 data2, u32 data3);
>>> +	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
>>> +	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
>>>    };
>>>
>>>    /*
>>> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
>>>    	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
>>>    	/* VRAM LOST by GIM */
>>>    	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
>>> +	/* HW PERF SIM in GIM */
>>> +	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
>>>    };
>>>
>>>    struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct
>>> amdgpu_virt {
>>>    	struct amdgpu_vf_error_buffer   vf_errors;
>>>    	struct amdgpu_virt_fw_reserve	fw_reserve;
>>>    	uint32_t gim_feature;
>>> +	/* protect DPM events to GIM */
>>> +	struct mutex                    dpm_mutex;
>>>    };
>>>
>>>    #define amdgpu_sriov_enabled(adev) \ @@ -278,6 +284,9 @@ static
>>> inline bool is_virtual_machine(void)  #endif  }
>>>
>>> +#define amdgim_is_hwperf(adev) \
>>> +	((adev)->virt.gim_feature &
>>> AMDGIM_FEATURE_HW_PERF_SIMULATION)
>>> +
>>>    bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void
>>> amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t
>>> amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -
>>> 295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
>>> unsigned long obj_size,
>>>    					unsigned int key,
>>>    					unsigned int chksum);
>>>    void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
>>> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool
>>> +lowest); uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev,
>>> +bool lowest);
>>>
>>>    #endif
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>>> index 73851eb..8dbad49 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>>> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct
>>> amdgpu_device *adev,
>>>    	xgpu_ai_mailbox_set_valid(adev, false);  }
>>>
>>> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type,
>>> +char *buf) {
>>> +        int r = 0;
>>> +        u32 req, val, size;
>>> +
>>> +        if (!amdgim_is_hwperf(adev) || buf == NULL)
>>> +                return -EBADRQC;
>>> +
>>> +        switch(type) {
>>> +        case PP_SCLK:
>>> +                req = IDH_IRQ_GET_PP_SCLK;
>>> +                break;
>>> +        case PP_MCLK:
>>> +                req = IDH_IRQ_GET_PP_MCLK;
>>> +                break;
>>> +        default:
>>> +                return -EBADRQC;
>>> +        }
>>> +
>>> +        mutex_lock(&adev->virt.dpm_mutex);
>>> +
>>> +        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
>>> +
>>> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
>>> +        if (!r && adev->fw_vram_usage.va != NULL) {
>>> +                val = RREG32_NO_KIQ(
>>> +                        SOC15_REG_OFFSET(NBIO, 0,
>>> +                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
>>> +                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
>>> +                                val), PAGE_SIZE);
>>> +
>>> +                if (size < PAGE_SIZE)
>>> +                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
>>> +                else
>>> +                        size = 0;
>>> +
>>> +                r = size;
>>> +                goto out;
>>> +        }
>>> +
>>> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
>>> +        if(r)
>>> +                pr_info("%s DPM request failed",
>>> +                        (type == PP_SCLK)? "SCLK" : "MCLK");
>>> +
>>> +out:
>>> +        mutex_unlock(&adev->virt.dpm_mutex);
>>> +        return r;
>>> +}
>>> +
>>> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
>>> +level) {
>>> +        int r = 0;
>>> +        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
>>> +
>>> +        if (!amdgim_is_hwperf(adev))
>>> +                return -EBADRQC;
>>> +
>>> +        mutex_lock(&adev->virt.dpm_mutex);
>>> +        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
>>> +
>>> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
>>> +        if (!r)
>>> +                goto out;
>>> +
>>> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
>>> +        if (!r)
>>> +                pr_info("DPM request failed");
>>> +        else
>>> +                pr_info("Mailbox is broken");
>>> +
>>> +out:
>>> +        mutex_unlock(&adev->virt.dpm_mutex);
>>> +        return r;
>>> +}
>>> +
>>>    static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
>>>    					enum idh_request req)
>>>    {
>>> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
>>>    	.reset_gpu = xgpu_ai_request_reset,
>>>    	.wait_reset = NULL,
>>>    	.trans_msg = xgpu_ai_mailbox_trans_msg,
>>> +	.get_pp_clk = xgpu_ai_get_pp_clk,
>>> +	.force_dpm_level = xgpu_ai_force_dpm_level,
>>>    };
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>>> index b4a9cee..39d151b 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>>> @@ -35,6 +35,10 @@ enum idh_request {
>>>    	IDH_REL_GPU_FINI_ACCESS,
>>>    	IDH_REQ_GPU_RESET_ACCESS,
>>>
>>> +	IDH_IRQ_FORCE_DPM_LEVEL = 10,
>>> +	IDH_IRQ_GET_PP_SCLK,
>>> +	IDH_IRQ_GET_PP_MCLK,
>>> +
>>>    	IDH_LOG_VF_ERROR       = 200,
>>>    };
>>>
>>> @@ -43,6 +47,8 @@ enum idh_event {
>>>    	IDH_READY_TO_ACCESS_GPU,
>>>    	IDH_FLR_NOTIFICATION,
>>>    	IDH_FLR_NOTIFICATION_CMPL,
>>> +	IDH_SUCCESS,
>>> +	IDH_FAIL,
>>>    	IDH_EVENT_MAX
>>>    };
>>>
>>> --
>>> 2.7.4
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization
       [not found]                 ` <f45055be-c7bd-0912-4946-f9673725fa51-5C7GfCeVMHo@public.gmane.org>
@ 2019-04-10 12:34                   ` Tao, Yintian
  0 siblings, 0 replies; 11+ messages in thread
From: Tao, Yintian @ 2019-04-10 12:34 UTC (permalink / raw)
  To: Koenig, Christian, Quan, Evan, amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW

Hi  Christian


Many thanks.

The reason is that the reserved buffer for pf2vf communication is allocated from visible VRAM and the allocation granularity from it is page size at amdgpu_ttm_fw_reserve_vram_init.


Best Regards
Yintian Tao


-----Original Message-----
From: Koenig, Christian <Christian.Koenig@amd.com> 
Sent: Wednesday, April 10, 2019 8:23 PM
To: Tao, Yintian <Yintian.Tao@amd.com>; Quan, Evan <Evan.Quan@amd.com>; amd-gfx@lists.freedesktop.org
Subject: Re: [PATCH] drm/amdgpu: support dpm level modification under virtualization

Hi Yintian,

yeah, kzalloc would obvious work.

But why do you need such a large buffer in the first place?

Rule of thumb is that each function should not use more than 1KB of stack, otherwise the compiler will raise a warning.

Christian.

Am 10.04.19 um 14:09 schrieb Tao, Yintian:
> Hi  Christian
>
>
> Thanks for your review. May I use the kzalloc to allocate the memory for the buffer to avoid the stack problem you said?
>
> Because hypervisor driver will transfer the message through this page size memory.
>
> Best Regards
> Yintian Tao
>
> -----Original Message-----
> From: Christian König <ckoenig.leichtzumerken@gmail.com>
> Sent: Wednesday, April 10, 2019 6:32 PM
> To: Quan, Evan <Evan.Quan@amd.com>; Tao, Yintian 
> <Yintian.Tao@amd.com>; amd-gfx@lists.freedesktop.org
> Subject: Re: [PATCH] drm/amdgpu: support dpm level modification under 
> virtualization
>
> Am 10.04.19 um 11:58 schrieb Quan, Evan:
>>> -----Original Message-----
>>> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of 
>>> Yintian Tao
>>> Sent: 2019年4月9日 23:18
>>> To: amd-gfx@lists.freedesktop.org
>>> Cc: Tao, Yintian <Yintian.Tao@amd.com>
>>> Subject: [PATCH] drm/amdgpu: support dpm level modification under 
>>> virtualization
>>>
>>> Under vega10 virtualuzation, smu ip block will not be added.
>>> Therefore, we need add pp clk query and force dpm level function at 
>>> amdgpu_virt_ops to support the feature.
>>>
>>> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
>>> Signed-off-by: Yintian Tao <yttao@amd.com>
>>> ---
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
>>>    drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78
>>> ++++++++++++++++++++++++++++++
>>>    drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
>>>    7 files changed, 147 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> index 3ff8899..bb0fd5a 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
>>> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device 
>>> *adev,
>>>    	mutex_init(&adev->virt.vf_errors.lock);
>>>    	hash_init(adev->mn_hash);
>>>    	mutex_init(&adev->lock_reset);
>>> +	mutex_init(&adev->virt.dpm_mutex);
>>>
>>>    	amdgpu_device_check_arguments(adev);
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> index 6190495..1353955 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>>> @@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device 
>>> *dev, void *data, struct drm_file
>>>    		if (adev->pm.dpm_enabled) {
>>>    			dev_info.max_engine_clock =
>>> amdgpu_dpm_get_sclk(adev, false) * 10;
>>>    			dev_info.max_memory_clock =
>>> amdgpu_dpm_get_mclk(adev, false) * 10;
>>> +		} else if (amdgpu_sriov_vf(adev)) {
>>> +			dev_info.max_engine_clock =
>>> amdgpu_virt_get_sclk(adev, false) * 10;
>>> +			dev_info.max_memory_clock =
>>> amdgpu_virt_get_mclk(adev, false) * 10;
>>>    		} else {
>>>    			dev_info.max_engine_clock = adev-
>>>> clock.default_sclk * 10;
>>>    			dev_info.max_memory_clock = adev-
>>>> clock.default_mclk * 10; diff --git
>>> a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> index 5540259..0162d1e 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
>>> @@ -380,6 +380,17 @@ static ssize_t
>>> amdgpu_set_dpm_forced_performance_level(struct device *dev,
>>>    		goto fail;
>>>    	}
>>>
>>> +        if (amdgpu_sriov_vf(adev)) {
>>> +                if (amdgim_is_hwperf(adev) &&
>>> +                    adev->virt.ops->force_dpm_level) {
>>> +                        mutex_lock(&adev->pm.mutex);
>>> +                        adev->virt.ops->force_dpm_level(adev, level);
>>> +                        mutex_unlock(&adev->pm.mutex);
>>> +                        return count;
>>> +                } else
>>> +                        return -EINVAL;
>>> +        }
>>> +
>>>    	if (current_level == level)
>>>    		return count;
>>>
>>> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct 
>>> device *dev,
>>>    	struct drm_device *ddev = dev_get_drvdata(dev);
>>>    	struct amdgpu_device *adev = ddev->dev_private;
>>>
>>> +	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
>>> +	    adev->virt.ops->get_pp_clk)
>>> +		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
>>> +
>>>    	if (is_support_sw_smu(adev))
>>>    		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
>>>    	else if (adev->powerplay.pp_funcs->print_clock_levels)
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>>> index 462a04e..ae4b2a1 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
>>> @@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct
>>> amdgpu_device *adev)
>>>    	}
>>>    }
>>>
>>> +static uint32_t parse_clk(char *buf, bool min) {
>>> +        char *ptr = buf;
>>> +        uint32_t clk = 0;
>>> +
>>> +        do {
>>> +                ptr = strchr(ptr, ':');
>>> +                if (!ptr)
>>> +                        break;
>>> +                ptr+=2;
>>> +                clk = simple_strtoul(ptr, NULL, 10);
>>> +        } while (!min);
>>> +
>>> +        return clk * 100;
>>> +}
>>> +
>>> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool
>>> +lowest) {
>>> +        char buf[512] = {0};
>> [Quan, Evan] In  xgpu_ai_get_pp_clk, the max  buffer size seems PAGE_SIZE. Maybe this should be aligned with that.
> Well at least on the stack that would be a really bad idea. Where have you seen that?
>
> Saying so 512 is a bit large as well and additional to that please stop initializing things with "{0}", use memset for that.
>
> Christian.
>
>>> +
>>> +        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
>> [Quan, Evan] Better to check existence of adev->virt.ops->get_pp_clk before dereference it. As what you did in amdgpu_set_dpm_forced_performance_level.
>>> +
>>> +        return parse_clk(buf, lowest); }
>>> +
>>> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool
>>> +lowest) {
>>> +        char buf[512] = {0};
>>> +
>>> +        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
>>> +
>>> +        return parse_clk(buf, lowest); }
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>>> index 722deef..584947b 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
>>> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
>>>    	int (*reset_gpu)(struct amdgpu_device *adev);
>>>    	int (*wait_reset)(struct amdgpu_device *adev);
>>>    	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 
>>> data1,
>>> u32 data2, u32 data3);
>>> +	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
>>> +	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
>>>    };
>>>
>>>    /*
>>> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
>>>    	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
>>>    	/* VRAM LOST by GIM */
>>>    	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
>>> +	/* HW PERF SIM in GIM */
>>> +	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
>>>    };
>>>
>>>    struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ 
>>> struct amdgpu_virt {
>>>    	struct amdgpu_vf_error_buffer   vf_errors;
>>>    	struct amdgpu_virt_fw_reserve	fw_reserve;
>>>    	uint32_t gim_feature;
>>> +	/* protect DPM events to GIM */
>>> +	struct mutex                    dpm_mutex;
>>>    };
>>>
>>>    #define amdgpu_sriov_enabled(adev) \ @@ -278,6 +284,9 @@ static 
>>> inline bool is_virtual_machine(void)  #endif  }
>>>
>>> +#define amdgim_is_hwperf(adev) \
>>> +	((adev)->virt.gim_feature &
>>> AMDGIM_FEATURE_HW_PERF_SIMULATION)
>>> +
>>>    bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void 
>>> amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t 
>>> amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -
>>> 295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, 
>>> unsigned long obj_size,
>>>    					unsigned int key,
>>>    					unsigned int chksum);
>>>    void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
>>> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool 
>>> +lowest); uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, 
>>> +bool lowest);
>>>
>>>    #endif
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>>> index 73851eb..8dbad49 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
>>> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct 
>>> amdgpu_device *adev,
>>>    	xgpu_ai_mailbox_set_valid(adev, false);  }
>>>
>>> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, 
>>> +char *buf) {
>>> +        int r = 0;
>>> +        u32 req, val, size;
>>> +
>>> +        if (!amdgim_is_hwperf(adev) || buf == NULL)
>>> +                return -EBADRQC;
>>> +
>>> +        switch(type) {
>>> +        case PP_SCLK:
>>> +                req = IDH_IRQ_GET_PP_SCLK;
>>> +                break;
>>> +        case PP_MCLK:
>>> +                req = IDH_IRQ_GET_PP_MCLK;
>>> +                break;
>>> +        default:
>>> +                return -EBADRQC;
>>> +        }
>>> +
>>> +        mutex_lock(&adev->virt.dpm_mutex);
>>> +
>>> +        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
>>> +
>>> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
>>> +        if (!r && adev->fw_vram_usage.va != NULL) {
>>> +                val = RREG32_NO_KIQ(
>>> +                        SOC15_REG_OFFSET(NBIO, 0,
>>> +                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
>>> +                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
>>> +                                val), PAGE_SIZE);
>>> +
>>> +                if (size < PAGE_SIZE)
>>> +                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
>>> +                else
>>> +                        size = 0;
>>> +
>>> +                r = size;
>>> +                goto out;
>>> +        }
>>> +
>>> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
>>> +        if(r)
>>> +                pr_info("%s DPM request failed",
>>> +                        (type == PP_SCLK)? "SCLK" : "MCLK");
>>> +
>>> +out:
>>> +        mutex_unlock(&adev->virt.dpm_mutex);
>>> +        return r;
>>> +}
>>> +
>>> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
>>> +level) {
>>> +        int r = 0;
>>> +        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
>>> +
>>> +        if (!amdgim_is_hwperf(adev))
>>> +                return -EBADRQC;
>>> +
>>> +        mutex_lock(&adev->virt.dpm_mutex);
>>> +        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
>>> +
>>> +        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
>>> +        if (!r)
>>> +                goto out;
>>> +
>>> +        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
>>> +        if (!r)
>>> +                pr_info("DPM request failed");
>>> +        else
>>> +                pr_info("Mailbox is broken");
>>> +
>>> +out:
>>> +        mutex_unlock(&adev->virt.dpm_mutex);
>>> +        return r;
>>> +}
>>> +
>>>    static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
>>>    					enum idh_request req)
>>>    {
>>> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
>>>    	.reset_gpu = xgpu_ai_request_reset,
>>>    	.wait_reset = NULL,
>>>    	.trans_msg = xgpu_ai_mailbox_trans_msg,
>>> +	.get_pp_clk = xgpu_ai_get_pp_clk,
>>> +	.force_dpm_level = xgpu_ai_force_dpm_level,
>>>    };
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>>> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>>> index b4a9cee..39d151b 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>>> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
>>> @@ -35,6 +35,10 @@ enum idh_request {
>>>    	IDH_REL_GPU_FINI_ACCESS,
>>>    	IDH_REQ_GPU_RESET_ACCESS,
>>>
>>> +	IDH_IRQ_FORCE_DPM_LEVEL = 10,
>>> +	IDH_IRQ_GET_PP_SCLK,
>>> +	IDH_IRQ_GET_PP_MCLK,
>>> +
>>>    	IDH_LOG_VF_ERROR       = 200,
>>>    };
>>>
>>> @@ -43,6 +47,8 @@ enum idh_event {
>>>    	IDH_READY_TO_ACCESS_GPU,
>>>    	IDH_FLR_NOTIFICATION,
>>>    	IDH_FLR_NOTIFICATION_CMPL,
>>> +	IDH_SUCCESS,
>>> +	IDH_FAIL,
>>>    	IDH_EVENT_MAX
>>>    };
>>>
>>> --
>>> 2.7.4
>>>
>>> _______________________________________________
>>> amd-gfx mailing list
>>> amd-gfx@lists.freedesktop.org
>>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization
       [not found] ` <1554823102-23667-1-git-send-email-yttao-5C7GfCeVMHo@public.gmane.org>
  2019-04-10  8:38   ` Tao, Yintian
  2019-04-10  9:58   ` Quan, Evan
@ 2019-04-10 20:54   ` Kuehling, Felix
  2 siblings, 0 replies; 11+ messages in thread
From: Kuehling, Felix @ 2019-04-10 20:54 UTC (permalink / raw)
  To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tao, Yintian

How does forcing DPM levels work in SRIOV? Can clocks switch fast enough to allow different VFs have different clocks? If not, can one VF override the clocks used by another VF? In that case, wouldn't that violate the isolation between VFs?

Regards,
  Felix

-----Original Message-----
From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of Yintian Tao
Sent: Tuesday, April 9, 2019 11:18
To: amd-gfx@lists.freedesktop.org
Cc: Tao, Yintian <Yintian.Tao@amd.com>
Subject: [PATCH] drm/amdgpu: support dpm level modification under virtualization

Under vega10 virtualuzation, smu ip block will not be added.
Therefore, we need add pp clk query and force dpm level function at amdgpu_virt_ops to support the feature.

Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
Signed-off-by: Yintian Tao <yttao@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  1 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  3 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c     | 15 ++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 33 +++++++++++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   | 11 +++++
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c      | 78 ++++++++++++++++++++++++++++++
 drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h      |  6 +++
 7 files changed, 147 insertions(+)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3ff8899..bb0fd5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 	mutex_init(&adev->virt.vf_errors.lock);
 	hash_init(adev->mn_hash);
 	mutex_init(&adev->lock_reset);
+	mutex_init(&adev->virt.dpm_mutex);
 
 	amdgpu_device_check_arguments(adev);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6190495..1353955 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -727,6 +727,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
 		if (adev->pm.dpm_enabled) {
 			dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
 			dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+		} else if (amdgpu_sriov_vf(adev)) {
+			dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
+			dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
 		} else {
 			dev_info.max_engine_clock = adev->clock.default_sclk * 10;
 			dev_info.max_memory_clock = adev->clock.default_mclk * 10; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 5540259..0162d1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -380,6 +380,17 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
 		goto fail;
 	}
 
+        if (amdgpu_sriov_vf(adev)) {
+                if (amdgim_is_hwperf(adev) &&
+                    adev->virt.ops->force_dpm_level) {
+                        mutex_lock(&adev->pm.mutex);
+                        adev->virt.ops->force_dpm_level(adev, level);
+                        mutex_unlock(&adev->pm.mutex);
+                        return count;
+                } else
+                        return -EINVAL;
+        }
+
 	if (current_level == level)
 		return count;
 
@@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
 	struct drm_device *ddev = dev_get_drvdata(dev);
 	struct amdgpu_device *adev = ddev->dev_private;
 
+	if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+	    adev->virt.ops->get_pp_clk)
+		return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
 	if (is_support_sw_smu(adev))
 		return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
 	else if (adev->powerplay.pp_funcs->print_clock_levels)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 462a04e..ae4b2a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -375,4 +375,37 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
 	}
 }
 
+static uint32_t parse_clk(char *buf, bool min) {
+        char *ptr = buf;
+        uint32_t clk = 0;
+
+        do {
+                ptr = strchr(ptr, ':');
+                if (!ptr)
+                        break;
+                ptr+=2;
+                clk = simple_strtoul(ptr, NULL, 10);
+        } while (!min);
+
+        return clk * 100;
+}
+
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest) 
+{
+        char buf[512] = {0};
+
+        adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
+        return parse_clk(buf, lowest);
+}
+
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) 
+{
+        char buf[512] = {0};
+
+        adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+
+        return parse_clk(buf, lowest);
+}
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 722deef..584947b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
 	int (*reset_gpu)(struct amdgpu_device *adev);
 	int (*wait_reset)(struct amdgpu_device *adev);
 	void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
+	int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
+	int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
 };
 
 /*
@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
 	AMDGIM_FEATURE_GIM_LOAD_UCODES   = 0x2,
 	/* VRAM LOST by GIM */
 	AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+	/* HW PERF SIM in GIM */
+	AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
 };
 
 struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct amdgpu_virt {
 	struct amdgpu_vf_error_buffer   vf_errors;
 	struct amdgpu_virt_fw_reserve	fw_reserve;
 	uint32_t gim_feature;
+	/* protect DPM events to GIM */
+	struct mutex                    dpm_mutex;
 };
 
 #define amdgpu_sriov_enabled(adev) \
@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)  #endif  }
 
+#define amdgim_is_hwperf(adev) \
+	((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);  void amdgpu_virt_init_setting(struct amdgpu_device *adev);  uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
 					unsigned int key,
 					unsigned int chksum);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); 
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
 
 #endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 73851eb..8dbad49 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
 	xgpu_ai_mailbox_set_valid(adev, false);  }
 
+static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, 
+char *buf) {
+        int r = 0;
+        u32 req, val, size;
+
+        if (!amdgim_is_hwperf(adev) || buf == NULL)
+                return -EBADRQC;
+
+        switch(type) {
+        case PP_SCLK:
+                req = IDH_IRQ_GET_PP_SCLK;
+                break;
+        case PP_MCLK:
+                req = IDH_IRQ_GET_PP_MCLK;
+                break;
+        default:
+                return -EBADRQC;
+        }
+
+        mutex_lock(&adev->virt.dpm_mutex);
+
+        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+        if (!r && adev->fw_vram_usage.va != NULL) {
+                val = RREG32_NO_KIQ(
+                        SOC15_REG_OFFSET(NBIO, 0,
+                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
+                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
+                                val), PAGE_SIZE);
+
+                if (size < PAGE_SIZE)
+                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
+                else
+                        size = 0;
+
+                r = size;
+                goto out;
+        }
+
+        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+        if(r)
+                pr_info("%s DPM request failed",
+                        (type == PP_SCLK)? "SCLK" : "MCLK");
+
+out:
+        mutex_unlock(&adev->virt.dpm_mutex);
+        return r;
+}
+
+static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 
+level) {
+        int r = 0;
+        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
+
+        if (!amdgim_is_hwperf(adev))
+                return -EBADRQC;
+
+        mutex_lock(&adev->virt.dpm_mutex);
+        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
+
+        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+        if (!r)
+                goto out;
+
+        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+        if (!r)
+                pr_info("DPM request failed");
+        else
+                pr_info("Mailbox is broken");
+
+out:
+        mutex_unlock(&adev->virt.dpm_mutex);
+        return r;
+}
+
 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
 					enum idh_request req)
 {
@@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
 	.reset_gpu = xgpu_ai_request_reset,
 	.wait_reset = NULL,
 	.trans_msg = xgpu_ai_mailbox_trans_msg,
+	.get_pp_clk = xgpu_ai_get_pp_clk,
+	.force_dpm_level = xgpu_ai_force_dpm_level,
 };
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index b4a9cee..39d151b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,6 +35,10 @@ enum idh_request {
 	IDH_REL_GPU_FINI_ACCESS,
 	IDH_REQ_GPU_RESET_ACCESS,
 
+	IDH_IRQ_FORCE_DPM_LEVEL = 10,
+	IDH_IRQ_GET_PP_SCLK,
+	IDH_IRQ_GET_PP_MCLK,
+
 	IDH_LOG_VF_ERROR       = 200,
 };
 
@@ -43,6 +47,8 @@ enum idh_event {
 	IDH_READY_TO_ACCESS_GPU,
 	IDH_FLR_NOTIFICATION,
 	IDH_FLR_NOTIFICATION_CMPL,
+	IDH_SUCCESS,
+	IDH_FAIL,
 	IDH_EVENT_MAX
 };
 
--
2.7.4

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2019-04-10 20:54 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-09 15:18 [PATCH] drm/amdgpu: support dpm level modification under virtualization Yintian Tao
     [not found] ` <1554823102-23667-1-git-send-email-yttao-5C7GfCeVMHo@public.gmane.org>
2019-04-10  8:38   ` Tao, Yintian
     [not found]     ` <MN2PR12MB3039532056AA164C6BC60E94E52E0-rweVpJHSKTr/fpeVZQY9RQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2019-04-10  8:43       ` Koenig, Christian
     [not found]         ` <62b84af0-e0c0-452a-9d83-26e221d37a85-5C7GfCeVMHo@public.gmane.org>
2019-04-10  9:07           ` Tao, Yintian
2019-04-10  9:58   ` Quan, Evan
     [not found]     ` <MN2PR12MB3344FC85F11263060FDD478AE42E0-rweVpJHSKToDMgCC8P//OwdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2019-04-10 10:00       ` Tao, Yintian
2019-04-10 10:32       ` Christian König
     [not found]         ` <fd0591d3-18b0-d08c-b6d3-ecedae79f568-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2019-04-10 12:09           ` Tao, Yintian
     [not found]             ` <MN2PR12MB3039AFAF64BDB7BA4CD2F4E3E52E0-rweVpJHSKTr/fpeVZQY9RQdYzm3356FpvxpqHgZTriW3zl9H0oFU5g@public.gmane.org>
2019-04-10 12:23               ` Koenig, Christian
     [not found]                 ` <f45055be-c7bd-0912-4946-f9673725fa51-5C7GfCeVMHo@public.gmane.org>
2019-04-10 12:34                   ` Tao, Yintian
2019-04-10 20:54   ` Kuehling, Felix

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.