aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2343-drm-amd-powerplay-add-function-print_clk_levels-for-.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2343-drm-amd-powerplay-add-function-print_clk_levels-for-.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2343-drm-amd-powerplay-add-function-print_clk_levels-for-.patch324
1 files changed, 324 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2343-drm-amd-powerplay-add-function-print_clk_levels-for-.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2343-drm-amd-powerplay-add-function-print_clk_levels-for-.patch
new file mode 100644
index 00000000..65da531a
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2343-drm-amd-powerplay-add-function-print_clk_levels-for-.patch
@@ -0,0 +1,324 @@
+From 8361e249e390fdb6a3d8d87b3d1542c4a4501f01 Mon Sep 17 00:00:00 2001
+From: Kevin Wang <kevin1.wang@amd.com>
+Date: Thu, 18 Apr 2019 15:06:34 +0800
+Subject: [PATCH 2343/2940] drm/amd/powerplay: add function print_clk_levels
+ for navi10
+
+add sysfs interface of print_clk_levels sysfs for navi10.
+
+Signed-off-by: Kevin Wang <kevin1.wang@amd.com>
+Reviewed-by: Huang Rui <ray.huang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 24 +++++-----
+ .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 13 ++++--
+ drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 44 +++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 22 +++++-----
+ 4 files changed, 77 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 7be32a6a4b9f..ae9eeaf5d0e4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -726,10 +726,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
+ uint32_t size = 0;
+
+ if (is_support_sw_smu(adev)) {
+- size = smu_print_clk_levels(&adev->smu, OD_SCLK, buf);
+- size += smu_print_clk_levels(&adev->smu, OD_MCLK, buf+size);
+- size += smu_print_clk_levels(&adev->smu, OD_VDDC_CURVE, buf+size);
+- size += smu_print_clk_levels(&adev->smu, OD_RANGE, buf+size);
++ size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
++ size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
++ size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
++ size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
+ return size;
+ } else if (adev->powerplay.pp_funcs->print_clock_levels) {
+ size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
+@@ -840,7 +840,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
+ return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
+ if (is_support_sw_smu(adev))
+- return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
++ return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ else
+@@ -923,7 +923,7 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
+ return adev->virt.ops->get_pp_clk(adev,PP_MCLK,buf);
+
+ if (is_support_sw_smu(adev))
+- return smu_print_clk_levels(&adev->smu, PP_MCLK, buf);
++ return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ else
+@@ -966,7 +966,7 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ if (is_support_sw_smu(adev))
+- return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf);
++ return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
+ else
+@@ -1006,7 +1006,7 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ if (is_support_sw_smu(adev))
+- return smu_print_clk_levels(&adev->smu, PP_FCLK, buf);
++ return smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
+ else
+@@ -1046,7 +1046,7 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ if (is_support_sw_smu(adev))
+- return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf);
++ return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
+ else
+@@ -1086,7 +1086,7 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ if (is_support_sw_smu(adev))
+- return smu_print_clk_levels(&adev->smu, PP_PCIE, buf);
++ return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ else
+@@ -1127,7 +1127,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
+ uint32_t value = 0;
+
+ if (is_support_sw_smu(adev))
+- value = smu_get_od_percentage(&(adev->smu), OD_SCLK);
++ value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
+ else if (adev->powerplay.pp_funcs->get_sclk_od)
+ value = amdgpu_dpm_get_sclk_od(adev);
+
+@@ -1178,7 +1178,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
+ uint32_t value = 0;
+
+ if (is_support_sw_smu(adev))
+- value = smu_get_od_percentage(&(adev->smu), OD_MCLK);
++ value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
+ else if (adev->powerplay.pp_funcs->get_mclk_od)
+ value = amdgpu_dpm_get_mclk_od(adev);
+
+diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+index 12ab1ee53588..16d6f553ba7f 100644
+--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
++++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+@@ -244,6 +244,13 @@ enum smu_clk_type
+ SMU_PIXCLK,
+ SMU_PHYCLK,
+ SMU_FCLK,
++ SMU_SCLK,
++ SMU_MCLK,
++ SMU_PCIE,
++ SMU_OD_SCLK,
++ SMU_OD_MCLK,
++ SMU_OD_VDDC_CURVE,
++ SMU_OD_RANGE,
+ SMU_CLK_COUNT,
+ };
+
+@@ -531,7 +538,7 @@ struct pptable_funcs {
+ int (*set_default_dpm_table)(struct smu_context *smu);
+ int (*set_power_state)(struct smu_context *smu);
+ int (*populate_umd_state_clk)(struct smu_context *smu);
+- int (*print_clk_levels)(struct smu_context *smu, enum pp_clock_type type, char *buf);
++ int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
+ int (*force_clk_levels)(struct smu_context *smu, enum pp_clock_type type, uint32_t mask);
+ int (*set_default_od8_settings)(struct smu_context *smu);
+ int (*update_specified_od8_value)(struct smu_context *smu,
+@@ -767,8 +774,8 @@ struct smu_funcs
+ ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0)
+ #define smu_get_current_clk_freq(smu, clk_id, value) \
+ ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
+-#define smu_print_clk_levels(smu, type, buf) \
+- ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (type), (buf)) : 0)
++#define smu_print_clk_levels(smu, clk_type, buf) \
++ ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0)
+ #define smu_force_clk_levels(smu, type, level) \
+ ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (type), (level)) : 0)
+ #define smu_get_od_percentage(smu, type) \
+diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+index 788bdf6dfac7..5863fa691a91 100644
+--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+@@ -105,8 +105,11 @@ static int navi10_message_map[SMU_MSG_MAX_COUNT] = {
+
+ static int navi10_clk_map[SMU_CLK_COUNT] = {
+ CLK_MAP(GFXCLK, PPCLK_GFXCLK),
++ CLK_MAP(SCLK, PPCLK_GFXCLK),
+ CLK_MAP(SOCCLK, PPCLK_SOCCLK),
++ CLK_MAP(FCLK, PPCLK_SOCCLK),
+ CLK_MAP(UCLK, PPCLK_UCLK),
++ CLK_MAP(MCLK, PPCLK_UCLK),
+ CLK_MAP(DCLK, PPCLK_DCLK),
+ CLK_MAP(VCLK, PPCLK_VCLK),
+ CLK_MAP(DCEFCLK, PPCLK_DCEFCLK),
+@@ -523,6 +526,46 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
+ return ret;
+ }
+
++static int navi10_print_clk_levels(struct smu_context *smu,
++ enum smu_clk_type clk_type, char *buf)
++{
++ int i, size = 0, ret = 0;
++ uint32_t cur_value = 0, value = 0, count = 0;
++
++ switch (clk_type) {
++ case SMU_GFXCLK:
++ case SMU_SCLK:
++ case SMU_SOCCLK:
++ case SMU_MCLK:
++ case SMU_UCLK:
++ case SMU_FCLK:
++ case SMU_DCEFCLK:
++ ret = smu_get_current_clk_freq(smu, clk_type, &cur_value);
++ if (ret)
++ return size;
++
++ size += sprintf(buf, "current clk: %uMhz\n", cur_value);
++
++ ret = smu_get_dpm_level_count(smu, clk_type, &count);
++ if (ret)
++ return size;
++
++ for (i = 0; i < count; i++) {
++ ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
++ if (ret)
++ return size;
++
++ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
++ cur_value == value ? "*" : "");
++ }
++ break;
++ default:
++ break;
++ }
++
++ return size;
++}
++
+ static const struct pptable_funcs navi10_ppt_funcs = {
+ .tables_init = navi10_tables_init,
+ .alloc_dpm_context = navi10_allocate_dpm_context,
+@@ -538,6 +581,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
+ .set_default_dpm_table = navi10_set_default_dpm_table,
+ .dpm_set_uvd_enable = navi10_dpm_set_uvd_enable,
+ .get_current_clk_freq_by_table = navi10_get_current_clk_freq_by_table,
++ .print_clk_levels = navi10_print_clk_levels,
+ };
+
+ void navi10_set_ppt_funcs(struct smu_context *smu)
+diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+index e9f0230fc274..a68801d05cc1 100644
+--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
++++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+@@ -892,7 +892,7 @@ static int vega20_get_clk_table(struct smu_context *smu,
+ }
+
+ static int vega20_print_clk_levels(struct smu_context *smu,
+- enum pp_clock_type type, char *buf)
++ enum smu_clk_type type, char *buf)
+ {
+ int i, now, size = 0;
+ int ret = 0;
+@@ -912,7 +912,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+ dpm_table = smu_dpm->dpm_context;
+
+ switch (type) {
+- case PP_SCLK:
++ case SMU_SCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current gfx clk Failed!");
+@@ -933,7 +933,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+ ? "*" : "");
+ break;
+
+- case PP_MCLK:
++ case SMU_MCLK:
+ ret = smu_get_current_clk_freq(smu, SMU_UCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current mclk Failed!");
+@@ -954,7 +954,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+ ? "*" : "");
+ break;
+
+- case PP_SOCCLK:
++ case SMU_SOCCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_SOCCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current socclk Failed!");
+@@ -975,7 +975,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+ ? "*" : "");
+ break;
+
+- case PP_FCLK:
++ case SMU_FCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_FCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current fclk Failed!");
+@@ -990,7 +990,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+ ? "*" : "");
+ break;
+
+- case PP_DCEFCLK:
++ case SMU_DCEFCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_DCEFCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current dcefclk Failed!");
+@@ -1010,7 +1010,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
+ break;
+
+- case PP_PCIE:
++ case SMU_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+@@ -1035,7 +1035,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+ "*" : "");
+ break;
+
+- case OD_SCLK:
++ case SMU_OD_SCLK:
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
+@@ -1047,7 +1047,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+
+ break;
+
+- case OD_MCLK:
++ case SMU_OD_MCLK:
+ if (od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_MCLK");
+ size += sprintf(buf + size, "1: %10uMhz\n",
+@@ -1056,7 +1056,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+
+ break;
+
+- case OD_VDDC_CURVE:
++ case SMU_OD_VDDC_CURVE:
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
+@@ -1077,7 +1077,7 @@ static int vega20_print_clk_levels(struct smu_context *smu,
+
+ break;
+
+- case OD_RANGE:
++ case SMU_OD_RANGE:
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+--
+2.17.1
+