* [PATCH] drm/amdgpu: support dpm level modification under virtualization v3
@ 2019-04-10 14:25 Yintian Tao
[not found] ` <1554906332-10229-1-git-send-email-yttao-5C7GfCeVMHo@public.gmane.org>
0 siblings, 1 reply; 4+ messages in thread
From: Yintian Tao @ 2019-04-10 14:25 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Yintian Tao
Under vega10 virtualuzation, smu ip block will not be added.
Therefore, we need add pp clk query and force dpm level function
at amdgpu_virt_ops to support the feature.
v2: add get_pp_clk existence check and use kzalloc to allocate buf
v3: return -ENOMEM for allocation failure and correct the coding style
Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
Signed-off-by: Yintian Tao <yttao@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 ++
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 15 ++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 49 +++++++++++++++++++
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 11 +++++
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 78 ++++++++++++++++++++++++++++++
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h | 6 +++
7 files changed, 164 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3ff8899..bb0fd5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
+ mutex_init(&adev->virt.dpm_mutex);
amdgpu_device_check_arguments(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6190495..29ec28f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -727,6 +727,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (adev->pm.dpm_enabled) {
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+ } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+ adev->virt.ops->get_pp_clk) {
+ dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
+ dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
dev_info.max_memory_clock = adev->clock.default_mclk * 10;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 5540259..0162d1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -380,6 +380,17 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
goto fail;
}
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgim_is_hwperf(adev) &&
+ adev->virt.ops->force_dpm_level) {
+ mutex_lock(&adev->pm.mutex);
+ adev->virt.ops->force_dpm_level(adev, level);
+ mutex_unlock(&adev->pm.mutex);
+ return count;
+ } else
+ return -EINVAL;
+ }
+
if (current_level == level)
return count;
@@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+ adev->virt.ops->get_pp_clk)
+ return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
if (is_support_sw_smu(adev))
return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 462a04e..7e7f9ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -375,4 +375,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
+static uint32_t parse_clk(char *buf, bool min)
+{
+ char *ptr = buf;
+ uint32_t clk = 0;
+
+ do {
+ ptr = strchr(ptr, ':');
+ if (!ptr)
+ break;
+ ptr+=2;
+ clk = simple_strtoul(ptr, NULL, 10);
+ } while (!min);
+
+ return clk * 100;
+}
+
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
+{
+ char *buf = NULL;
+ uint32_t clk = 0;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+ clk = parse_clk(buf, lowest);
+
+ kfree(buf);
+
+ return clk;
+}
+
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
+{
+ char *buf = NULL;
+ uint32_t clk = 0;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+ clk = parse_clk(buf, lowest);
+
+ kfree(buf);
+
+ return clk;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 722deef..584947b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
+ int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
+ int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
};
/*
@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+ /* HW PERF SIM in GIM */
+ AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
};
struct amd_sriov_msg_pf2vf_info_header {
@@ -252,6 +256,8 @@ struct amdgpu_virt {
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
+ /* protect DPM events to GIM */
+ struct mutex dpm_mutex;
};
#define amdgpu_sriov_enabled(adev) \
@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
#endif
}
+#define amdgim_is_hwperf(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
@@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 73851eb..8dbad49 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_ai_mailbox_set_valid(adev, false);
}
+static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
+{
+ int r = 0;
+ u32 req, val, size;
+
+ if (!amdgim_is_hwperf(adev) || buf == NULL)
+ return -EBADRQC;
+
+ switch(type) {
+ case PP_SCLK:
+ req = IDH_IRQ_GET_PP_SCLK;
+ break;
+ case PP_MCLK:
+ req = IDH_IRQ_GET_PP_MCLK;
+ break;
+ default:
+ return -EBADRQC;
+ }
+
+ mutex_lock(&adev->virt.dpm_mutex);
+
+ xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+ if (!r && adev->fw_vram_usage.va != NULL) {
+ val = RREG32_NO_KIQ(
+ SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
+ size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
+ val), PAGE_SIZE);
+
+ if (size < PAGE_SIZE)
+ strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
+ else
+ size = 0;
+
+ r = size;
+ goto out;
+ }
+
+ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+ if(r)
+ pr_info("%s DPM request failed",
+ (type == PP_SCLK)? "SCLK" : "MCLK");
+
+out:
+ mutex_unlock(&adev->virt.dpm_mutex);
+ return r;
+}
+
+static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
+{
+ int r = 0;
+ u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
+
+ if (!amdgim_is_hwperf(adev))
+ return -EBADRQC;
+
+ mutex_lock(&adev->virt.dpm_mutex);
+ xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
+
+ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+ if (!r)
+ goto out;
+
+ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+ if (!r)
+ pr_info("DPM request failed");
+ else
+ pr_info("Mailbox is broken");
+
+out:
+ mutex_unlock(&adev->virt.dpm_mutex);
+ return r;
+}
+
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
@@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
+ .get_pp_clk = xgpu_ai_get_pp_clk,
+ .force_dpm_level = xgpu_ai_force_dpm_level,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index b4a9cee..39d151b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,6 +35,10 @@ enum idh_request {
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
+ IDH_IRQ_FORCE_DPM_LEVEL = 10,
+ IDH_IRQ_GET_PP_SCLK,
+ IDH_IRQ_GET_PP_MCLK,
+
IDH_LOG_VF_ERROR = 200,
};
@@ -43,6 +47,8 @@ enum idh_event {
IDH_READY_TO_ACCESS_GPU,
IDH_FLR_NOTIFICATION,
IDH_FLR_NOTIFICATION_CMPL,
+ IDH_SUCCESS,
+ IDH_FAIL,
IDH_EVENT_MAX
};
--
2.7.4
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH] drm/amdgpu: support dpm level modification under virtualization v3
[not found] ` <1554906332-10229-1-git-send-email-yttao-5C7GfCeVMHo@public.gmane.org>
@ 2019-04-10 15:31 ` Alex Deucher
[not found] ` <CADnq5_P-P7XKwzF4Ggdwsb_tRTAkgoHEtJAVYf7JvrX2GRfCQw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-04-11 1:13 ` Quan, Evan
1 sibling, 1 reply; 4+ messages in thread
From: Alex Deucher @ 2019-04-10 15:31 UTC (permalink / raw)
To: Yintian Tao; +Cc: amd-gfx list
On Wed, Apr 10, 2019 at 10:25 AM Yintian Tao <yttao@amd.com> wrote:
>
> Under vega10 virtualuzation, smu ip block will not be added.
> Therefore, we need add pp clk query and force dpm level function
> at amdgpu_virt_ops to support the feature.
>
> v2: add get_pp_clk existence check and use kzalloc to allocate buf
>
> v3: return -ENOMEM for allocation failure and correct the coding style
>
> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
> Signed-off-by: Yintian Tao <yttao@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
> drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 ++
> drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 15 ++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 49 +++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 11 +++++
> drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 78 ++++++++++++++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h | 6 +++
> 7 files changed, 164 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 3ff8899..bb0fd5a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
> mutex_init(&adev->virt.vf_errors.lock);
> hash_init(adev->mn_hash);
> mutex_init(&adev->lock_reset);
> + mutex_init(&adev->virt.dpm_mutex);
>
> amdgpu_device_check_arguments(adev);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 6190495..29ec28f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -727,6 +727,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
> if (adev->pm.dpm_enabled) {
> dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
> dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
> + } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
> + adev->virt.ops->get_pp_clk) {
> + dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
> + dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
> } else {
> dev_info.max_engine_clock = adev->clock.default_sclk * 10;
> dev_info.max_memory_clock = adev->clock.default_mclk * 10;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index 5540259..0162d1e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -380,6 +380,17 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
> goto fail;
> }
>
> + if (amdgpu_sriov_vf(adev)) {
> + if (amdgim_is_hwperf(adev) &&
> + adev->virt.ops->force_dpm_level) {
> + mutex_lock(&adev->pm.mutex);
> + adev->virt.ops->force_dpm_level(adev, level);
> + mutex_unlock(&adev->pm.mutex);
> + return count;
> + } else
> + return -EINVAL;
Coding style. If any clause as parens, all should. E.g., this should be :
} else {
return -EINVAL;
}
With that fixed, this patch is:
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
> + }
> +
> if (current_level == level)
> return count;
>
> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
> struct drm_device *ddev = dev_get_drvdata(dev);
> struct amdgpu_device *adev = ddev->dev_private;
>
> + if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
> + adev->virt.ops->get_pp_clk)
> + return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> +
> if (is_support_sw_smu(adev))
> return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
> else if (adev->powerplay.pp_funcs->print_clock_levels)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 462a04e..7e7f9ed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -375,4 +375,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
> }
> }
>
> +static uint32_t parse_clk(char *buf, bool min)
> +{
> + char *ptr = buf;
> + uint32_t clk = 0;
> +
> + do {
> + ptr = strchr(ptr, ':');
> + if (!ptr)
> + break;
> + ptr+=2;
> + clk = simple_strtoul(ptr, NULL, 10);
> + } while (!min);
> +
> + return clk * 100;
> +}
> +
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
> +{
> + char *buf = NULL;
> + uint32_t clk = 0;
> +
> + buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
> + if (!buf)
> + return -ENOMEM;
> +
> + adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> + clk = parse_clk(buf, lowest);
> +
> + kfree(buf);
> +
> + return clk;
> +}
> +
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
> +{
> + char *buf = NULL;
> + uint32_t clk = 0;
> +
> + buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
> + if (!buf)
> + return -ENOMEM;
> +
> + adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
> + clk = parse_clk(buf, lowest);
> +
> + kfree(buf);
> +
> + return clk;
> +}
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 722deef..584947b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
> int (*reset_gpu)(struct amdgpu_device *adev);
> int (*wait_reset)(struct amdgpu_device *adev);
> void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
> + int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
> + int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
> };
>
> /*
> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
> AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
> /* VRAM LOST by GIM */
> AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
> + /* HW PERF SIM in GIM */
> + AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
> };
>
> struct amd_sriov_msg_pf2vf_info_header {
> @@ -252,6 +256,8 @@ struct amdgpu_virt {
> struct amdgpu_vf_error_buffer vf_errors;
> struct amdgpu_virt_fw_reserve fw_reserve;
> uint32_t gim_feature;
> + /* protect DPM events to GIM */
> + struct mutex dpm_mutex;
> };
>
> #define amdgpu_sriov_enabled(adev) \
> @@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
> #endif
> }
>
> +#define amdgim_is_hwperf(adev) \
> + ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
> +
> bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
> void amdgpu_virt_init_setting(struct amdgpu_device *adev);
> uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
> @@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
> unsigned int key,
> unsigned int chksum);
> void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
>
> #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> index 73851eb..8dbad49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
> xgpu_ai_mailbox_set_valid(adev, false);
> }
>
> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
> +{
> + int r = 0;
> + u32 req, val, size;
> +
> + if (!amdgim_is_hwperf(adev) || buf == NULL)
> + return -EBADRQC;
> +
> + switch(type) {
> + case PP_SCLK:
> + req = IDH_IRQ_GET_PP_SCLK;
> + break;
> + case PP_MCLK:
> + req = IDH_IRQ_GET_PP_MCLK;
> + break;
> + default:
> + return -EBADRQC;
> + }
> +
> + mutex_lock(&adev->virt.dpm_mutex);
> +
> + xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
> +
> + r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> + if (!r && adev->fw_vram_usage.va != NULL) {
> + val = RREG32_NO_KIQ(
> + SOC15_REG_OFFSET(NBIO, 0,
> + mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
> + size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
> + val), PAGE_SIZE);
> +
> + if (size < PAGE_SIZE)
> + strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
> + else
> + size = 0;
> +
> + r = size;
> + goto out;
> + }
> +
> + r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> + if(r)
> + pr_info("%s DPM request failed",
> + (type == PP_SCLK)? "SCLK" : "MCLK");
> +
> +out:
> + mutex_unlock(&adev->virt.dpm_mutex);
> + return r;
> +}
> +
> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
> +{
> + int r = 0;
> + u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
> +
> + if (!amdgim_is_hwperf(adev))
> + return -EBADRQC;
> +
> + mutex_lock(&adev->virt.dpm_mutex);
> + xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
> +
> + r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> + if (!r)
> + goto out;
> +
> + r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> + if (!r)
> + pr_info("DPM request failed");
> + else
> + pr_info("Mailbox is broken");
> +
> +out:
> + mutex_unlock(&adev->virt.dpm_mutex);
> + return r;
> +}
> +
> static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
> enum idh_request req)
> {
> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
> .reset_gpu = xgpu_ai_request_reset,
> .wait_reset = NULL,
> .trans_msg = xgpu_ai_mailbox_trans_msg,
> + .get_pp_clk = xgpu_ai_get_pp_clk,
> + .force_dpm_level = xgpu_ai_force_dpm_level,
> };
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> index b4a9cee..39d151b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> @@ -35,6 +35,10 @@ enum idh_request {
> IDH_REL_GPU_FINI_ACCESS,
> IDH_REQ_GPU_RESET_ACCESS,
>
> + IDH_IRQ_FORCE_DPM_LEVEL = 10,
> + IDH_IRQ_GET_PP_SCLK,
> + IDH_IRQ_GET_PP_MCLK,
> +
> IDH_LOG_VF_ERROR = 200,
> };
>
> @@ -43,6 +47,8 @@ enum idh_event {
> IDH_READY_TO_ACCESS_GPU,
> IDH_FLR_NOTIFICATION,
> IDH_FLR_NOTIFICATION_CMPL,
> + IDH_SUCCESS,
> + IDH_FAIL,
> IDH_EVENT_MAX
> };
>
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 4+ messages in thread
* RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization v3
[not found] ` <CADnq5_P-P7XKwzF4Ggdwsb_tRTAkgoHEtJAVYf7JvrX2GRfCQw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
@ 2019-04-10 15:35 ` Tao, Yintian
0 siblings, 0 replies; 4+ messages in thread
From: Tao, Yintian @ 2019-04-10 15:35 UTC (permalink / raw)
To: Alex Deucher; +Cc: amd-gfx list
Hi Alex
Many thanks for your review.
Best Regards
Yintian Tao
-----Original Message-----
From: Alex Deucher <alexdeucher@gmail.com>
Sent: Wednesday, April 10, 2019 11:32 PM
To: Tao, Yintian <Yintian.Tao@amd.com>
Cc: amd-gfx list <amd-gfx@lists.freedesktop.org>
Subject: Re: [PATCH] drm/amdgpu: support dpm level modification under virtualization v3
On Wed, Apr 10, 2019 at 10:25 AM Yintian Tao <yttao@amd.com> wrote:
>
> Under vega10 virtualuzation, smu ip block will not be added.
> Therefore, we need add pp clk query and force dpm level function at
> amdgpu_virt_ops to support the feature.
>
> v2: add get_pp_clk existence check and use kzalloc to allocate buf
>
> v3: return -ENOMEM for allocation failure and correct the coding style
>
> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
> Signed-off-by: Yintian Tao <yttao@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
> drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 ++
> drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 15 ++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 49 +++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 11 +++++
> drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 78 ++++++++++++++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h | 6 +++
> 7 files changed, 164 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 3ff8899..bb0fd5a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
> mutex_init(&adev->virt.vf_errors.lock);
> hash_init(adev->mn_hash);
> mutex_init(&adev->lock_reset);
> + mutex_init(&adev->virt.dpm_mutex);
>
> amdgpu_device_check_arguments(adev);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 6190495..29ec28f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -727,6 +727,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
> if (adev->pm.dpm_enabled) {
> dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
> dev_info.max_memory_clock =
> amdgpu_dpm_get_mclk(adev, false) * 10;
> + } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
> + adev->virt.ops->get_pp_clk) {
> + dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
> + dev_info.max_memory_clock =
> + amdgpu_virt_get_mclk(adev, false) * 10;
> } else {
> dev_info.max_engine_clock = adev->clock.default_sclk * 10;
> dev_info.max_memory_clock =
> adev->clock.default_mclk * 10; diff --git
> a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index 5540259..0162d1e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -380,6 +380,17 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
> goto fail;
> }
>
> + if (amdgpu_sriov_vf(adev)) {
> + if (amdgim_is_hwperf(adev) &&
> + adev->virt.ops->force_dpm_level) {
> + mutex_lock(&adev->pm.mutex);
> + adev->virt.ops->force_dpm_level(adev, level);
> + mutex_unlock(&adev->pm.mutex);
> + return count;
> + } else
> + return -EINVAL;
Coding style. If any clause as parens, all should. E.g., this should be :
} else {
return -EINVAL;
}
With that fixed, this patch is:
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
> + }
> +
> if (current_level == level)
> return count;
>
> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
> struct drm_device *ddev = dev_get_drvdata(dev);
> struct amdgpu_device *adev = ddev->dev_private;
>
> + if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
> + adev->virt.ops->get_pp_clk)
> + return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> +
> if (is_support_sw_smu(adev))
> return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
> else if (adev->powerplay.pp_funcs->print_clock_levels)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 462a04e..7e7f9ed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -375,4 +375,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
> }
> }
>
> +static uint32_t parse_clk(char *buf, bool min) {
> + char *ptr = buf;
> + uint32_t clk = 0;
> +
> + do {
> + ptr = strchr(ptr, ':');
> + if (!ptr)
> + break;
> + ptr+=2;
> + clk = simple_strtoul(ptr, NULL, 10);
> + } while (!min);
> +
> + return clk * 100;
> +}
> +
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool
> +lowest) {
> + char *buf = NULL;
> + uint32_t clk = 0;
> +
> + buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
> + if (!buf)
> + return -ENOMEM;
> +
> + adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> + clk = parse_clk(buf, lowest);
> +
> + kfree(buf);
> +
> + return clk;
> +}
> +
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool
> +lowest) {
> + char *buf = NULL;
> + uint32_t clk = 0;
> +
> + buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
> + if (!buf)
> + return -ENOMEM;
> +
> + adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
> + clk = parse_clk(buf, lowest);
> +
> + kfree(buf);
> +
> + return clk;
> +}
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 722deef..584947b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
> int (*reset_gpu)(struct amdgpu_device *adev);
> int (*wait_reset)(struct amdgpu_device *adev);
> void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32
> data1, u32 data2, u32 data3);
> + int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
> + int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
> };
>
> /*
> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
> AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
> /* VRAM LOST by GIM */
> AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
> + /* HW PERF SIM in GIM */
> + AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
> };
>
> struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct
> amdgpu_virt {
> struct amdgpu_vf_error_buffer vf_errors;
> struct amdgpu_virt_fw_reserve fw_reserve;
> uint32_t gim_feature;
> + /* protect DPM events to GIM */
> + struct mutex dpm_mutex;
> };
>
> #define amdgpu_sriov_enabled(adev) \
> @@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
> #endif }
>
> +#define amdgim_is_hwperf(adev) \
> + ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
> +
> bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void
> amdgpu_virt_init_setting(struct amdgpu_device *adev); uint32_t
> amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@
> -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
> unsigned int key,
> unsigned int chksum); void
> amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool
> +lowest); uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev,
> +bool lowest);
>
> #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> index 73851eb..8dbad49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
> xgpu_ai_mailbox_set_valid(adev, false); }
>
> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type,
> +char *buf) {
> + int r = 0;
> + u32 req, val, size;
> +
> + if (!amdgim_is_hwperf(adev) || buf == NULL)
> + return -EBADRQC;
> +
> + switch(type) {
> + case PP_SCLK:
> + req = IDH_IRQ_GET_PP_SCLK;
> + break;
> + case PP_MCLK:
> + req = IDH_IRQ_GET_PP_MCLK;
> + break;
> + default:
> + return -EBADRQC;
> + }
> +
> + mutex_lock(&adev->virt.dpm_mutex);
> +
> + xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
> +
> + r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> + if (!r && adev->fw_vram_usage.va != NULL) {
> + val = RREG32_NO_KIQ(
> + SOC15_REG_OFFSET(NBIO, 0,
> + mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
> + size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
> + val), PAGE_SIZE);
> +
> + if (size < PAGE_SIZE)
> + strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
> + else
> + size = 0;
> +
> + r = size;
> + goto out;
> + }
> +
> + r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> + if(r)
> + pr_info("%s DPM request failed",
> + (type == PP_SCLK)? "SCLK" : "MCLK");
> +
> +out:
> + mutex_unlock(&adev->virt.dpm_mutex);
> + return r;
> +}
> +
> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
> +level) {
> + int r = 0;
> + u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
> +
> + if (!amdgim_is_hwperf(adev))
> + return -EBADRQC;
> +
> + mutex_lock(&adev->virt.dpm_mutex);
> + xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
> +
> + r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> + if (!r)
> + goto out;
> +
> + r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> + if (!r)
> + pr_info("DPM request failed");
> + else
> + pr_info("Mailbox is broken");
> +
> +out:
> + mutex_unlock(&adev->virt.dpm_mutex);
> + return r;
> +}
> +
> static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
> enum idh_request req) { @@
> -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
> .reset_gpu = xgpu_ai_request_reset,
> .wait_reset = NULL,
> .trans_msg = xgpu_ai_mailbox_trans_msg,
> + .get_pp_clk = xgpu_ai_get_pp_clk,
> + .force_dpm_level = xgpu_ai_force_dpm_level,
> };
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> index b4a9cee..39d151b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> @@ -35,6 +35,10 @@ enum idh_request {
> IDH_REL_GPU_FINI_ACCESS,
> IDH_REQ_GPU_RESET_ACCESS,
>
> + IDH_IRQ_FORCE_DPM_LEVEL = 10,
> + IDH_IRQ_GET_PP_SCLK,
> + IDH_IRQ_GET_PP_MCLK,
> +
> IDH_LOG_VF_ERROR = 200,
> };
>
> @@ -43,6 +47,8 @@ enum idh_event {
> IDH_READY_TO_ACCESS_GPU,
> IDH_FLR_NOTIFICATION,
> IDH_FLR_NOTIFICATION_CMPL,
> + IDH_SUCCESS,
> + IDH_FAIL,
> IDH_EVENT_MAX
> };
>
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 4+ messages in thread
* RE: [PATCH] drm/amdgpu: support dpm level modification under virtualization v3
[not found] ` <1554906332-10229-1-git-send-email-yttao-5C7GfCeVMHo@public.gmane.org>
2019-04-10 15:31 ` Alex Deucher
@ 2019-04-11 1:13 ` Quan, Evan
1 sibling, 0 replies; 4+ messages in thread
From: Quan, Evan @ 2019-04-11 1:13 UTC (permalink / raw)
To: amd-gfx-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW; +Cc: Tao, Yintian
Reviewed-by: Evan Quan <evan.quan@amd.com>
> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces@lists.freedesktop.org> On Behalf Of
> Yintian Tao
> Sent: Wednesday, April 10, 2019 10:26 PM
> To: amd-gfx@lists.freedesktop.org
> Cc: Tao, Yintian <Yintian.Tao@amd.com>
> Subject: [PATCH] drm/amdgpu: support dpm level modification under
> virtualization v3
>
> Under vega10 virtualuzation, smu ip block will not be added.
> Therefore, we need add pp clk query and force dpm level function at
> amdgpu_virt_ops to support the feature.
>
> v2: add get_pp_clk existence check and use kzalloc to allocate buf
>
> v3: return -ENOMEM for allocation failure and correct the coding style
>
> Change-Id: I713419c57b854082f6f739f1d32a055c7115e620
> Signed-off-by: Yintian Tao <yttao@amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
> drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 ++
> drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 15 ++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 49
> +++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 11 +++++
> drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 78
> ++++++++++++++++++++++++++++++
> drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h | 6 +++
> 7 files changed, 164 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index 3ff8899..bb0fd5a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2486,6 +2486,7 @@ int amdgpu_device_init(struct amdgpu_device
> *adev,
> mutex_init(&adev->virt.vf_errors.lock);
> hash_init(adev->mn_hash);
> mutex_init(&adev->lock_reset);
> + mutex_init(&adev->virt.dpm_mutex);
>
> amdgpu_device_check_arguments(adev);
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> index 6190495..29ec28f 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> @@ -727,6 +727,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev,
> void *data, struct drm_file
> if (adev->pm.dpm_enabled) {
> dev_info.max_engine_clock =
> amdgpu_dpm_get_sclk(adev, false) * 10;
> dev_info.max_memory_clock =
> amdgpu_dpm_get_mclk(adev, false) * 10;
> + } else if (amdgpu_sriov_vf(adev) &&
> amdgim_is_hwperf(adev) &&
> + adev->virt.ops->get_pp_clk) {
> + dev_info.max_engine_clock =
> amdgpu_virt_get_sclk(adev, false) * 10;
> + dev_info.max_memory_clock =
> amdgpu_virt_get_mclk(adev, false) * 10;
> } else {
> dev_info.max_engine_clock = adev-
> >clock.default_sclk * 10;
> dev_info.max_memory_clock = adev-
> >clock.default_mclk * 10; diff --git
> a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> index 5540259..0162d1e 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
> @@ -380,6 +380,17 @@ static ssize_t
> amdgpu_set_dpm_forced_performance_level(struct device *dev,
> goto fail;
> }
>
> + if (amdgpu_sriov_vf(adev)) {
> + if (amdgim_is_hwperf(adev) &&
> + adev->virt.ops->force_dpm_level) {
> + mutex_lock(&adev->pm.mutex);
> + adev->virt.ops->force_dpm_level(adev, level);
> + mutex_unlock(&adev->pm.mutex);
> + return count;
> + } else
> + return -EINVAL;
> + }
> +
> if (current_level == level)
> return count;
>
> @@ -843,6 +854,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct
> device *dev,
> struct drm_device *ddev = dev_get_drvdata(dev);
> struct amdgpu_device *adev = ddev->dev_private;
>
> + if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
> + adev->virt.ops->get_pp_clk)
> + return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> +
> if (is_support_sw_smu(adev))
> return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
> else if (adev->powerplay.pp_funcs->print_clock_levels)
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> index 462a04e..7e7f9ed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
> @@ -375,4 +375,53 @@ void amdgpu_virt_init_data_exchange(struct
> amdgpu_device *adev)
> }
> }
>
> +static uint32_t parse_clk(char *buf, bool min) {
> + char *ptr = buf;
> + uint32_t clk = 0;
> +
> + do {
> + ptr = strchr(ptr, ':');
> + if (!ptr)
> + break;
> + ptr+=2;
> + clk = simple_strtoul(ptr, NULL, 10);
> + } while (!min);
> +
> + return clk * 100;
> +}
> +
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
> +{
> + char *buf = NULL;
> + uint32_t clk = 0;
> +
> + buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
> + if (!buf)
> + return -ENOMEM;
> +
> + adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
> + clk = parse_clk(buf, lowest);
> +
> + kfree(buf);
> +
> + return clk;
> +}
> +
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
> +{
> + char *buf = NULL;
> + uint32_t clk = 0;
> +
> + buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
> + if (!buf)
> + return -ENOMEM;
> +
> + adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
> + clk = parse_clk(buf, lowest);
> +
> + kfree(buf);
> +
> + return clk;
> +}
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> index 722deef..584947b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
> @@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
> int (*reset_gpu)(struct amdgpu_device *adev);
> int (*wait_reset)(struct amdgpu_device *adev);
> void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1,
> u32 data2, u32 data3);
> + int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
> + int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
> };
>
> /*
> @@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
> AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
> /* VRAM LOST by GIM */
> AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
> + /* HW PERF SIM in GIM */
> + AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
> };
>
> struct amd_sriov_msg_pf2vf_info_header { @@ -252,6 +256,8 @@ struct
> amdgpu_virt {
> struct amdgpu_vf_error_buffer vf_errors;
> struct amdgpu_virt_fw_reserve fw_reserve;
> uint32_t gim_feature;
> + /* protect DPM events to GIM */
> + struct mutex dpm_mutex;
> };
>
> #define amdgpu_sriov_enabled(adev) \
> @@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void) #endif }
>
> +#define amdgim_is_hwperf(adev) \
> + ((adev)->virt.gim_feature &
> AMDGIM_FEATURE_HW_PERF_SIMULATION)
> +
> bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void
> amdgpu_virt_init_setting(struct amdgpu_device *adev); uint32_t
> amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); @@ -
> 295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
> unsigned long obj_size,
> unsigned int key,
> unsigned int chksum);
> void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
> +uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
> +uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
>
> #endif
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> index 73851eb..8dbad49 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
> @@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct
> amdgpu_device *adev,
> xgpu_ai_mailbox_set_valid(adev, false); }
>
> +static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type,
> +char *buf) {
> + int r = 0;
> + u32 req, val, size;
> +
> + if (!amdgim_is_hwperf(adev) || buf == NULL)
> + return -EBADRQC;
> +
> + switch(type) {
> + case PP_SCLK:
> + req = IDH_IRQ_GET_PP_SCLK;
> + break;
> + case PP_MCLK:
> + req = IDH_IRQ_GET_PP_MCLK;
> + break;
> + default:
> + return -EBADRQC;
> + }
> +
> + mutex_lock(&adev->virt.dpm_mutex);
> +
> + xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
> +
> + r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> + if (!r && adev->fw_vram_usage.va != NULL) {
> + val = RREG32_NO_KIQ(
> + SOC15_REG_OFFSET(NBIO, 0,
> + mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
> + size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
> + val), PAGE_SIZE);
> +
> + if (size < PAGE_SIZE)
> + strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
> + else
> + size = 0;
> +
> + r = size;
> + goto out;
> + }
> +
> + r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> + if(r)
> + pr_info("%s DPM request failed",
> + (type == PP_SCLK)? "SCLK" : "MCLK");
> +
> +out:
> + mutex_unlock(&adev->virt.dpm_mutex);
> + return r;
> +}
> +
> +static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32
> +level) {
> + int r = 0;
> + u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
> +
> + if (!amdgim_is_hwperf(adev))
> + return -EBADRQC;
> +
> + mutex_lock(&adev->virt.dpm_mutex);
> + xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
> +
> + r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
> + if (!r)
> + goto out;
> +
> + r = xgpu_ai_poll_msg(adev, IDH_FAIL);
> + if (!r)
> + pr_info("DPM request failed");
> + else
> + pr_info("Mailbox is broken");
> +
> +out:
> + mutex_unlock(&adev->virt.dpm_mutex);
> + return r;
> +}
> +
> static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
> enum idh_request req)
> {
> @@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
> .reset_gpu = xgpu_ai_request_reset,
> .wait_reset = NULL,
> .trans_msg = xgpu_ai_mailbox_trans_msg,
> + .get_pp_clk = xgpu_ai_get_pp_clk,
> + .force_dpm_level = xgpu_ai_force_dpm_level,
> };
> diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> index b4a9cee..39d151b 100644
> --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
> @@ -35,6 +35,10 @@ enum idh_request {
> IDH_REL_GPU_FINI_ACCESS,
> IDH_REQ_GPU_RESET_ACCESS,
>
> + IDH_IRQ_FORCE_DPM_LEVEL = 10,
> + IDH_IRQ_GET_PP_SCLK,
> + IDH_IRQ_GET_PP_MCLK,
> +
> IDH_LOG_VF_ERROR = 200,
> };
>
> @@ -43,6 +47,8 @@ enum idh_event {
> IDH_READY_TO_ACCESS_GPU,
> IDH_FLR_NOTIFICATION,
> IDH_FLR_NOTIFICATION_CMPL,
> + IDH_SUCCESS,
> + IDH_FAIL,
> IDH_EVENT_MAX
> };
>
> --
> 2.7.4
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2019-04-11 1:13 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-10 14:25 [PATCH] drm/amdgpu: support dpm level modification under virtualization v3 Yintian Tao
[not found] ` <1554906332-10229-1-git-send-email-yttao-5C7GfCeVMHo@public.gmane.org>
2019-04-10 15:31 ` Alex Deucher
[not found] ` <CADnq5_P-P7XKwzF4Ggdwsb_tRTAkgoHEtJAVYf7JvrX2GRfCQw-JsoAwUIsXosN+BqQ9rBEUg@public.gmane.org>
2019-04-10 15:35 ` Tao, Yintian
2019-04-11 1:13 ` Quan, Evan
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.