diff options
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4266-drm-amd-powerplay-add-lock-protection-for-swSMU-APIs.patch')
-rw-r--r-- | meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4266-drm-amd-powerplay-add-lock-protection-for-swSMU-APIs.patch | 1829 |
1 files changed, 1829 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4266-drm-amd-powerplay-add-lock-protection-for-swSMU-APIs.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4266-drm-amd-powerplay-add-lock-protection-for-swSMU-APIs.patch new file mode 100644 index 00000000..cbfdf0d1 --- /dev/null +++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/4266-drm-amd-powerplay-add-lock-protection-for-swSMU-APIs.patch @@ -0,0 +1,1829 @@ +From dc6cf14a58b1339db33016faef4de4f2efbb300a Mon Sep 17 00:00:00 2001 +From: Evan Quan <evan.quan@amd.com> +Date: Wed, 16 Oct 2019 14:43:07 +0800 +Subject: [PATCH 4266/4736] drm/amd/powerplay: add lock protection for swSMU + APIs V2 + +This is a quick and low risk fix. Those APIs which +are exposed to other IPs or to support sysfs/hwmon +interfaces or DAL will have lock protection. Meanwhile +no lock protection is enforced for swSMU internal used +APIs. Future optimization is needed. + +V2: strip the lock protection for all swSMU internal APIs + +Change-Id: I8392652c9da1574a85acd9b171f04380f3630852 +Signed-off-by: Evan Quan <evan.quan@amd.com> +Acked-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com> +Acked-by: Feifei Xu <Feifei.Xu@amd.com> +Acked-by: Alex Deucher <alexander.deucher@amd.com> +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c | 6 +- + drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 6 - + drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 23 +- + .../amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 6 +- + drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 700 ++++++++++++++++-- + drivers/gpu/drm/amd/powerplay/arcturus_ppt.c | 3 - + .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 162 ++-- + drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 15 +- + drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 14 +- + drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 22 +- + drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 3 - + drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 20 +- + 12 files changed, 781 insertions(+), 199 deletions(-) + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +index 263265245e19..28d32725285b 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +@@ -912,7 +912,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) + if (is_support_sw_smu(adev)) { + ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, + low ? &clk_freq : NULL, +- !low ? &clk_freq : NULL); ++ !low ? &clk_freq : NULL, ++ true); + if (ret) + return 0; + return clk_freq * 100; +@@ -930,7 +931,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) + if (is_support_sw_smu(adev)) { + ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, + low ? &clk_freq : NULL, +- !low ? &clk_freq : NULL); ++ !low ? &clk_freq : NULL, ++ true); + if (ret) + return 0; + return clk_freq * 100; +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +index 1c5c0fd76dbf..2cfb677272af 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +@@ -298,12 +298,6 @@ enum amdgpu_pcie_gen { + #define amdgpu_dpm_get_current_power_state(adev) \ + ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)) + +-#define amdgpu_smu_get_current_power_state(adev) \ +- ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu))) +- +-#define amdgpu_smu_set_power_state(adev) \ +- ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu))) +- + #define amdgpu_dpm_get_pp_num_states(adev, data) \ + ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)) + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +index 571d10de9eca..dd94467a3d5d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +@@ -158,7 +158,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, + + if (is_support_sw_smu(adev)) { + if (adev->smu.ppt_funcs->get_current_power_state) +- pm = amdgpu_smu_get_current_power_state(adev); ++ pm = smu_get_current_power_state(&adev->smu); + else + pm = adev->pm.dpm.user_state; + } else if (adev->powerplay.pp_funcs->get_current_power_state) { +@@ -904,7 +904,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, + return ret; + + if (is_support_sw_smu(adev)) +- ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); ++ ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); + else if (adev->powerplay.pp_funcs->force_clock_level) + ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + +@@ -951,7 +951,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, + return ret; + + if (is_support_sw_smu(adev)) +- ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); ++ ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); + else if (adev->powerplay.pp_funcs->force_clock_level) + ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); + +@@ -991,7 +991,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, + return ret; + + if (is_support_sw_smu(adev)) +- ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); ++ ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); + else if (adev->powerplay.pp_funcs->force_clock_level) + ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); + +@@ -1031,7 +1031,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, + return ret; + + if (is_support_sw_smu(adev)) +- ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); ++ ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); + else if (adev->powerplay.pp_funcs->force_clock_level) + ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); + +@@ -1071,7 +1071,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, + return ret; + + if (is_support_sw_smu(adev)) +- ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); ++ ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); + else if (adev->powerplay.pp_funcs->force_clock_level) + ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); + +@@ -1111,7 +1111,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, + return ret; + + if (is_support_sw_smu(adev)) +- ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); ++ ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); + else if (adev->powerplay.pp_funcs->force_clock_level) + ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); + +@@ -1303,7 +1303,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, + } + parameter[parameter_size] = profile_mode; + if (is_support_sw_smu(adev)) +- ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size); ++ ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); + else if (adev->powerplay.pp_funcs->set_power_profile_mode) + ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); + if (!ret) +@@ -2012,7 +2012,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, + uint32_t limit = 0; + + if (is_support_sw_smu(adev)) { +- smu_get_power_limit(&adev->smu, &limit, true); ++ smu_get_power_limit(&adev->smu, &limit, true, true); + return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { + adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); +@@ -2030,7 +2030,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, + uint32_t limit = 0; + + if (is_support_sw_smu(adev)) { +- smu_get_power_limit(&adev->smu, &limit, false); ++ smu_get_power_limit(&adev->smu, &limit, false, true); + return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); + } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { + adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); +@@ -3011,7 +3011,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) + struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; + smu_handle_task(&adev->smu, + smu_dpm->dpm_level, +- AMD_PP_TASK_DISPLAY_CONFIG_CHANGE); ++ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, ++ true); + } else { + if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (!amdgpu_device_has_dc_support(adev)) { +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +index 8a5eedb6a37a..c1b6abf2634c 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +@@ -866,7 +866,7 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( + if (!smu->funcs->get_max_sustainable_clocks_by_dc) + return PP_SMU_RESULT_UNSUPPORTED; + +- if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks)) ++ if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks)) + return PP_SMU_RESULT_OK; + + return PP_SMU_RESULT_FAIL; +@@ -885,7 +885,7 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, + if (!smu->ppt_funcs->get_uclk_dpm_states) + return PP_SMU_RESULT_UNSUPPORTED; + +- if (!smu->ppt_funcs->get_uclk_dpm_states(smu, ++ if (!smu_get_uclk_dpm_states(smu, + clock_values_in_khz, num_states)) + return PP_SMU_RESULT_OK; + +@@ -906,7 +906,7 @@ enum pp_smu_status pp_rn_get_dpm_clock_table( + if (!smu->ppt_funcs->get_dpm_clock_table) + return PP_SMU_RESULT_UNSUPPORTED; + +- if (!smu->ppt_funcs->get_dpm_clock_table(smu, clock_table)) ++ if (!smu_get_dpm_clock_table(smu, clock_table)) + return PP_SMU_RESULT_OK; + + return PP_SMU_RESULT_FAIL; +diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +index fb5a55091292..d748ad9c2159 100644 +--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c ++++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +@@ -67,6 +67,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) + uint32_t sort_feature[SMU_FEATURE_COUNT]; + uint64_t hw_feature_count = 0; + ++ mutex_lock(&smu->mutex); ++ + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + if (ret) + goto failed; +@@ -92,6 +94,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) + } + + failed: ++ mutex_unlock(&smu->mutex); ++ + return size; + } + +@@ -149,9 +153,11 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) + uint64_t feature_2_disabled = 0; + uint64_t feature_enables = 0; + ++ mutex_lock(&smu->mutex); ++ + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); + if (ret) +- return ret; ++ goto out; + + feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]); + +@@ -161,14 +167,17 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) + if (feature_2_enabled) { + ret = smu_feature_update_enable_state(smu, feature_2_enabled, true); + if (ret) +- return ret; ++ goto out; + } + if (feature_2_disabled) { + ret = smu_feature_update_enable_state(smu, feature_2_disabled, false); + if (ret) +- return ret; ++ goto out; + } + ++out: ++ mutex_unlock(&smu->mutex); ++ + return ret; + } + +@@ -254,7 +263,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + } + + int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, +- uint32_t *min, uint32_t *max) ++ uint32_t *min, uint32_t *max, bool lock_needed) + { + uint32_t clock_limit; + int ret = 0; +@@ -262,6 +271,9 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + if (!min && !max) + return -EINVAL; + ++ if (lock_needed) ++ mutex_lock(&smu->mutex); ++ + if (!smu_clk_dpm_is_enabled(smu, clk_type)) { + switch (clk_type) { + case SMU_MCLK: +@@ -285,14 +297,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + *min = clock_limit / 100; + if (max) + *max = clock_limit / 100; +- +- return 0; ++ } else { ++ /* ++ * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the ++ * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). ++ */ ++ ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); + } +- /* +- * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the +- * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). +- */ +- ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); ++ ++ if (lock_needed) ++ mutex_unlock(&smu->mutex); ++ + return ret; + } + +@@ -369,6 +384,8 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, + { + int ret = 0; + ++ mutex_lock(&smu->mutex); ++ + switch (block_type) { + case AMD_IP_BLOCK_TYPE_UVD: + ret = smu_dpm_set_uvd_enable(smu, gate); +@@ -386,13 +403,9 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, + break; + } + +- return ret; +-} ++ mutex_unlock(&smu->mutex); + +-enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) +-{ +- /* not support power state */ +- return POWER_STATE_TYPE_DEFAULT; ++ return ret; + } + + int smu_get_power_num_states(struct smu_context *smu, +@@ -520,16 +533,23 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) + int smu_sys_get_pp_table(struct smu_context *smu, void **table) + { + struct smu_table_context *smu_table = &smu->smu_table; ++ uint32_t powerplay_table_size; + + if (!smu_table->power_play_table && !smu_table->hardcode_pptable) + return -EINVAL; + ++ mutex_lock(&smu->mutex); ++ + if (smu_table->hardcode_pptable) + *table = smu_table->hardcode_pptable; + else + *table = smu_table->power_play_table; + +- return smu_table->power_play_table_size; ++ powerplay_table_size = smu_table->power_play_table_size; ++ ++ mutex_unlock(&smu->mutex); ++ ++ return powerplay_table_size; + } + + int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) +@@ -556,14 +576,11 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) + memcpy(smu_table->hardcode_pptable, buf, size); + smu_table->power_play_table = smu_table->hardcode_pptable; + smu_table->power_play_table_size = size; +- mutex_unlock(&smu->mutex); + + ret = smu_reset(smu); + if (ret) + pr_info("smu reset failed, ret = %d\n", ret); + +- return ret; +- + failed: + mutex_unlock(&smu->mutex); + return ret; +@@ -726,11 +743,10 @@ static int smu_late_init(void *handle) + if (!smu->pm_enabled) + return 0; + +- mutex_lock(&smu->mutex); + smu_handle_task(&adev->smu, + smu->smu_dpm.dpm_level, +- AMD_PP_TASK_COMPLETE_INIT); +- mutex_unlock(&smu->mutex); ++ AMD_PP_TASK_COMPLETE_INIT, ++ false); + + return 0; + } +@@ -1074,7 +1090,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu, + if (ret) + return ret; + +- ret = smu_get_power_limit(smu, &smu->default_power_limit, true); ++ ret = smu_get_power_limit(smu, &smu->default_power_limit, true, false); + if (ret) + return ret; + } +@@ -1160,15 +1176,19 @@ static int smu_start_smc_engine(struct smu_context *smu) + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + if (adev->asic_type < CHIP_NAVI10) { +- ret = smu_load_microcode(smu); +- if (ret) +- return ret; ++ if (smu->funcs->load_microcode) { ++ ret = smu->funcs->load_microcode(smu); ++ if (ret) ++ return ret; ++ } + } + } + +- ret = smu_check_fw_status(smu); +- if (ret) +- pr_err("SMC is not ready\n"); ++ if (smu->funcs->check_fw_status) { ++ ret = smu->funcs->check_fw_status(smu); ++ if (ret) ++ pr_err("SMC is not ready\n"); ++ } + + return ret; + } +@@ -1335,8 +1355,6 @@ static int smu_resume(void *handle) + + pr_info("SMU is resuming...\n"); + +- mutex_lock(&smu->mutex); +- + ret = smu_start_smc_engine(smu); + if (ret) { + pr_err("SMU is not ready yet!\n"); +@@ -1356,13 +1374,11 @@ static int smu_resume(void *handle) + + smu->disable_uclk_switch = 0; + +- mutex_unlock(&smu->mutex); +- + pr_info("SMU is resumed successfully!\n"); + + return 0; ++ + failed: +- mutex_unlock(&smu->mutex); + return ret; + } + +@@ -1380,8 +1396,9 @@ int smu_display_configuration_change(struct smu_context *smu, + + mutex_lock(&smu->mutex); + +- smu_set_deep_sleep_dcefclk(smu, +- display_config->min_dcef_deep_sleep_set_clk / 100); ++ if (smu->funcs->set_deep_sleep_dcefclk) ++ smu->funcs->set_deep_sleep_dcefclk(smu, ++ display_config->min_dcef_deep_sleep_set_clk / 100); + + for (index = 0; index < display_config->num_path_including_non_display; index++) { + if (display_config->displays[index].controller_id != 0) +@@ -1559,9 +1576,9 @@ static int smu_default_set_performance_level(struct smu_context *smu, enum amd_d + &soc_mask); + if (ret) + return ret; +- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); +- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); +- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); ++ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); ++ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); ++ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); + break; + case AMD_DPM_FORCED_LEVEL_MANUAL: + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: +@@ -1625,7 +1642,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, + workload = smu->workload_setting[index]; + + if (smu->power_profile_mode != workload) +- smu_set_power_profile_mode(smu, &workload, 0); ++ smu_set_power_profile_mode(smu, &workload, 0, false); + } + + return ret; +@@ -1633,18 +1650,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, + + int smu_handle_task(struct smu_context *smu, + enum amd_dpm_forced_level level, +- enum amd_pp_task task_id) ++ enum amd_pp_task task_id, ++ bool lock_needed) + { + int ret = 0; + ++ if (lock_needed) ++ mutex_lock(&smu->mutex); ++ + switch (task_id) { + case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: + ret = smu_pre_display_config_changed(smu); + if (ret) +- return ret; ++ goto out; + ret = smu_set_cpu_power_state(smu); + if (ret) +- return ret; ++ goto out; + ret = smu_adjust_power_state_dynamic(smu, level, false); + break; + case AMD_PP_TASK_COMPLETE_INIT: +@@ -1655,6 +1676,10 @@ int smu_handle_task(struct smu_context *smu, + break; + } + ++out: ++ if (lock_needed) ++ mutex_unlock(&smu->mutex); ++ + return ret; + } + +@@ -1687,7 +1712,7 @@ int smu_switch_power_profile(struct smu_context *smu, + } + + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) +- smu_set_power_profile_mode(smu, &workload, 0); ++ smu_set_power_profile_mode(smu, &workload, 0, false); + + mutex_unlock(&smu->mutex); + +@@ -1717,12 +1742,19 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) + return -EINVAL; + ++ mutex_lock(&smu->mutex); ++ + ret = smu_enable_umd_pstate(smu, &level); +- if (ret) ++ if (ret) { ++ mutex_unlock(&smu->mutex); + return ret; ++ } + + ret = smu_handle_task(smu, level, +- AMD_PP_TASK_READJUST_POWER_STATE); ++ AMD_PP_TASK_READJUST_POWER_STATE, ++ false); ++ ++ mutex_unlock(&smu->mutex); + + return ret; + } +@@ -1740,7 +1772,8 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count) + + int smu_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, +- uint32_t mask) ++ uint32_t mask, ++ bool lock_needed) + { + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + int ret = 0; +@@ -1750,9 +1783,15 @@ int smu_force_clk_levels(struct smu_context *smu, + return -EINVAL; + } + ++ if (lock_needed) ++ mutex_lock(&smu->mutex); ++ + if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) + ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); + ++ if (lock_needed) ++ mutex_unlock(&smu->mutex); ++ + return ret; + } + +@@ -1770,6 +1809,8 @@ int smu_set_mp1_state(struct smu_context *smu, + if (!smu->pm_enabled) + return 0; + ++ mutex_lock(&smu->mutex); ++ + switch (mp1_state) { + case PP_MP1_STATE_SHUTDOWN: + msg = SMU_MSG_PrepareMp1ForShutdown; +@@ -1782,17 +1823,22 @@ int smu_set_mp1_state(struct smu_context *smu, + break; + case PP_MP1_STATE_NONE: + default: ++ mutex_unlock(&smu->mutex); + return 0; + } + + /* some asics may not support those messages */ +- if (smu_msg_get_index(smu, msg) < 0) ++ if (smu_msg_get_index(smu, msg) < 0) { ++ mutex_unlock(&smu->mutex); + return 0; ++ } + + ret = smu_send_smc_msg(smu, msg); + if (ret) + pr_err("[PrepareMp1] Failed!\n"); + ++ mutex_unlock(&smu->mutex); ++ + return ret; + } + +@@ -1812,10 +1858,14 @@ int smu_set_df_cstate(struct smu_context *smu, + if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) + return 0; + ++ mutex_lock(&smu->mutex); ++ + ret = smu->ppt_funcs->set_df_cstate(smu, state); + if (ret) + pr_err("[SetDfCstate] failed!\n"); + ++ mutex_unlock(&smu->mutex); ++ + return ret; + } + +@@ -1843,6 +1893,8 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, + struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; + void *table = watermarks->cpu_addr; + ++ mutex_lock(&smu->mutex); ++ + if (!smu->disable_watermark && + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { +@@ -1851,6 +1903,8 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, + smu->watermarks_bitmap &= ~WATERMARKS_LOADED; + } + ++ mutex_unlock(&smu->mutex); ++ + return ret; + } + +@@ -1890,3 +1944,549 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block = + .rev = 0, + .funcs = &smu_ip_funcs, + }; ++ ++int smu_load_microcode(struct smu_context *smu) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->load_microcode) ++ ret = smu->funcs->load_microcode(smu); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_check_fw_status(struct smu_context *smu) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->check_fw_status) ++ ret = smu->funcs->check_fw_status(smu); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->set_gfx_cgpg) ++ ret = smu->funcs->set_gfx_cgpg(smu, enabled); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->set_fan_speed_rpm) ++ ret = smu->funcs->set_fan_speed_rpm(smu, speed); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_power_limit(struct smu_context *smu, ++ uint32_t *limit, ++ bool def, ++ bool lock_needed) ++{ ++ int ret = 0; ++ ++ if (lock_needed) ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->get_power_limit) ++ ret = smu->ppt_funcs->get_power_limit(smu, limit, def); ++ ++ if (lock_needed) ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_power_limit(struct smu_context *smu, uint32_t limit) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->set_power_limit) ++ ret = smu->funcs->set_power_limit(smu, limit); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->print_clk_levels) ++ ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->get_od_percentage) ++ ret = smu->ppt_funcs->get_od_percentage(smu, type); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->set_od_percentage) ++ ret = smu->ppt_funcs->set_od_percentage(smu, type, value); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_od_edit_dpm_table(struct smu_context *smu, ++ enum PP_OD_DPM_TABLE_COMMAND type, ++ long *input, uint32_t size) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->od_edit_dpm_table) ++ ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_read_sensor(struct smu_context *smu, ++ enum amd_pp_sensors sensor, ++ void *data, uint32_t *size) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->read_sensor) ++ ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_power_profile_mode(struct smu_context *smu, char *buf) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->get_power_profile_mode) ++ ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_power_profile_mode(struct smu_context *smu, ++ long *param, ++ uint32_t param_size, ++ bool lock_needed) ++{ ++ int ret = 0; ++ ++ if (lock_needed) ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->set_power_profile_mode) ++ ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); ++ ++ if (lock_needed) ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++ ++int smu_get_fan_control_mode(struct smu_context *smu) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->get_fan_control_mode) ++ ret = smu->funcs->get_fan_control_mode(smu); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_fan_control_mode(struct smu_context *smu, int value) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->set_fan_control_mode) ++ ret = smu->funcs->set_fan_control_mode(smu, value); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->get_fan_speed_percent) ++ ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->set_fan_speed_percent) ++ ret = smu->funcs->set_fan_speed_percent(smu, speed); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->get_fan_speed_rpm) ++ ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->set_deep_sleep_dcefclk) ++ ret = smu->funcs->set_deep_sleep_dcefclk(smu, clk); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_active_display_count(struct smu_context *smu, uint32_t count) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->set_active_display_count) ++ ret = smu->funcs->set_active_display_count(smu, count); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_clock_by_type(struct smu_context *smu, ++ enum amd_pp_clock_type type, ++ struct amd_pp_clocks *clocks) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->get_clock_by_type) ++ ret = smu->funcs->get_clock_by_type(smu, type, clocks); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_max_high_clocks(struct smu_context *smu, ++ struct amd_pp_simple_clock_info *clocks) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->get_max_high_clocks) ++ ret = smu->funcs->get_max_high_clocks(smu, clocks); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_clock_by_type_with_latency(struct smu_context *smu, ++ enum smu_clk_type clk_type, ++ struct pp_clock_levels_with_latency *clocks) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->get_clock_by_type_with_latency) ++ ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_clock_by_type_with_voltage(struct smu_context *smu, ++ enum amd_pp_clock_type type, ++ struct pp_clock_levels_with_voltage *clocks) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->get_clock_by_type_with_voltage) ++ ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++ ++int smu_display_clock_voltage_request(struct smu_context *smu, ++ struct pp_display_clock_request *clock_req) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->display_clock_voltage_request) ++ ret = smu->funcs->display_clock_voltage_request(smu, clock_req); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++ ++int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) ++{ ++ int ret = -EINVAL; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->display_disable_memory_clock_switch) ++ ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_notify_smu_enable_pwe(struct smu_context *smu) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->notify_smu_enable_pwe) ++ ret = smu->funcs->notify_smu_enable_pwe(smu); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_xgmi_pstate(struct smu_context *smu, ++ uint32_t pstate) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->set_xgmi_pstate) ++ ret = smu->funcs->set_xgmi_pstate(smu, pstate); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_set_azalia_d3_pme(struct smu_context *smu) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->set_azalia_d3_pme) ++ ret = smu->funcs->set_azalia_d3_pme(smu); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++bool smu_baco_is_support(struct smu_context *smu) ++{ ++ bool ret = false; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->baco_is_support) ++ ret = smu->funcs->baco_is_support(smu); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) ++{ ++ if (smu->funcs->baco_get_state) ++ return -EINVAL; ++ ++ mutex_lock(&smu->mutex); ++ *state = smu->funcs->baco_get_state(smu); ++ mutex_unlock(&smu->mutex); ++ ++ return 0; ++} ++ ++int smu_baco_reset(struct smu_context *smu) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->baco_reset) ++ ret = smu->funcs->baco_reset(smu); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_mode2_reset(struct smu_context *smu) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->mode2_reset) ++ ret = smu->funcs->mode2_reset(smu); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, ++ struct pp_smu_nv_clock_table *max_clocks) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->funcs->get_max_sustainable_clocks_by_dc) ++ ret = smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++int smu_get_uclk_dpm_states(struct smu_context *smu, ++ unsigned int *clock_values_in_khz, ++ unsigned int *num_states) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->get_uclk_dpm_states) ++ ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} ++ ++enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) ++{ ++ enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->get_current_power_state) ++ pm_state = smu->ppt_funcs->get_current_power_state(smu); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return pm_state; ++} ++ ++int smu_get_dpm_clock_table(struct smu_context *smu, ++ struct dpm_clocks *clock_table) ++{ ++ int ret = 0; ++ ++ mutex_lock(&smu->mutex); ++ ++ if (smu->ppt_funcs->get_dpm_clock_table) ++ ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); ++ ++ mutex_unlock(&smu->mutex); ++ ++ return ret; ++} +diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +index b33e451c7133..90b124dbdc14 100644 +--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c ++++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +@@ -763,8 +763,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu, + uint32_t soft_min_level, soft_max_level; + int ret = 0; + +- mutex_lock(&(smu->mutex)); +- + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + +@@ -883,7 +881,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu, + break; + } + +- mutex_unlock(&(smu->mutex)); + return ret; + } + +diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +index bf13bf33ba0c..3a1245f369a2 100644 +--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h ++++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +@@ -563,18 +563,17 @@ struct smu_funcs + ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0) + #define smu_fini_power(smu) \ + ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0) +-#define smu_load_microcode(smu) \ +- ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0) +-#define smu_check_fw_status(smu) \ +- ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0) ++int smu_load_microcode(struct smu_context *smu); ++ ++int smu_check_fw_status(struct smu_context *smu); ++ + #define smu_setup_pptable(smu) \ + ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0) + #define smu_powergate_sdma(smu, gate) \ + ((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0) + #define smu_powergate_vcn(smu, gate) \ + ((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0) +-#define smu_set_gfx_cgpg(smu, enabled) \ +- ((smu)->funcs->set_gfx_cgpg ? (smu)->funcs->set_gfx_cgpg((smu), (enabled)) : 0) ++int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); + #define smu_get_vbios_bootup_values(smu) \ + ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0) + #define smu_get_clk_info_from_vbios(smu) \ +@@ -605,8 +604,8 @@ struct smu_funcs + ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) + #define smu_set_default_od_settings(smu, initialize) \ + ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) +-#define smu_set_fan_speed_rpm(smu, speed) \ +- ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) ++int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed); ++ + #define smu_send_smc_msg(smu, msg) \ + ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0) + #define smu_send_smc_msg_with_param(smu, msg, param) \ +@@ -637,20 +636,22 @@ struct smu_funcs + ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0) + #define smu_set_default_od8_settings(smu) \ + ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0) +-#define smu_get_power_limit(smu, limit, def) \ +- ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0) +-#define smu_set_power_limit(smu, limit) \ +- ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0) ++ ++int smu_get_power_limit(struct smu_context *smu, ++ uint32_t *limit, ++ bool def, ++ bool lock_needed); ++ ++int smu_set_power_limit(struct smu_context *smu, uint32_t limit); + #define smu_get_current_clk_freq(smu, clk_id, value) \ + ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0) +-#define smu_print_clk_levels(smu, clk_type, buf) \ +- ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0) +-#define smu_get_od_percentage(smu, type) \ +- ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0) +-#define smu_set_od_percentage(smu, type, value) \ +- ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0) +-#define smu_od_edit_dpm_table(smu, type, input, size) \ +- ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0) ++int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf); ++int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type); ++int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value); ++ ++int smu_od_edit_dpm_table(struct smu_context *smu, ++ enum PP_OD_DPM_TABLE_COMMAND type, ++ long *input, uint32_t size); + #define smu_tables_init(smu, tab) \ + ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0) + #define smu_set_thermal_fan_table(smu) \ +@@ -659,14 +660,18 @@ struct smu_funcs + ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0) + #define smu_stop_thermal_control(smu) \ + ((smu)->funcs->stop_thermal_control? (smu)->funcs->stop_thermal_control((smu)) : 0) +-#define smu_read_sensor(smu, sensor, data, size) \ +- ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0) ++ ++int smu_read_sensor(struct smu_context *smu, ++ enum amd_pp_sensors sensor, ++ void *data, uint32_t *size); + #define smu_smc_read_sensor(smu, sensor, data, size) \ + ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL) +-#define smu_get_power_profile_mode(smu, buf) \ +- ((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0) +-#define smu_set_power_profile_mode(smu, param, param_size) \ +- ((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0) ++int smu_get_power_profile_mode(struct smu_context *smu, char *buf); ++ ++int smu_set_power_profile_mode(struct smu_context *smu, ++ long *param, ++ uint32_t param_size, ++ bool lock_needed); + #define smu_pre_display_config_changed(smu) \ + ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0) + #define smu_display_config_changed(smu) \ +@@ -683,16 +688,11 @@ struct smu_funcs + ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0) + #define smu_set_cpu_power_state(smu) \ + ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0) +-#define smu_get_fan_control_mode(smu) \ +- ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0) +-#define smu_set_fan_control_mode(smu, value) \ +- ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0) +-#define smu_get_fan_speed_percent(smu, speed) \ +- ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) +-#define smu_set_fan_speed_percent(smu, speed) \ +- ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) +-#define smu_get_fan_speed_rpm(smu, speed) \ +- ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0) ++int smu_get_fan_control_mode(struct smu_context *smu); ++int smu_set_fan_control_mode(struct smu_context *smu, int value); ++int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed); ++int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); ++int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed); + + #define smu_msg_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) +@@ -710,38 +710,44 @@ struct smu_funcs + ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0) + #define smu_get_allowed_feature_mask(smu, feature_mask, num) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0) +-#define smu_set_deep_sleep_dcefclk(smu, clk) \ +- ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0) +-#define smu_set_active_display_count(smu, count) \ +- ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0) ++int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk); ++int smu_set_active_display_count(struct smu_context *smu, uint32_t count); + #define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \ + ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0) +-#define smu_get_clock_by_type(smu, type, clocks) \ +- ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0) +-#define smu_get_max_high_clocks(smu, clocks) \ +- ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0) +-#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \ +- ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0) +-#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \ +- ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0) +-#define smu_display_clock_voltage_request(smu, clock_req) \ +- ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0) +-#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \ +- ((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL) ++ ++int smu_get_clock_by_type(struct smu_context *smu, ++ enum amd_pp_clock_type type, ++ struct amd_pp_clocks *clocks); ++ ++int smu_get_max_high_clocks(struct smu_context *smu, ++ struct amd_pp_simple_clock_info *clocks); ++ ++int smu_get_clock_by_type_with_latency(struct smu_context *smu, ++ enum smu_clk_type clk_type, ++ struct pp_clock_levels_with_latency *clocks); ++ ++int smu_get_clock_by_type_with_voltage(struct smu_context *smu, ++ enum amd_pp_clock_type type, ++ struct pp_clock_levels_with_voltage *clocks); ++ ++int smu_display_clock_voltage_request(struct smu_context *smu, ++ struct pp_display_clock_request *clock_req); ++int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch); + #define smu_get_dal_power_level(smu, clocks) \ + ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0) + #define smu_get_perf_level(smu, designation, level) \ + ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0) + #define smu_get_current_shallow_sleep_clocks(smu, clocks) \ + ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0) +-#define smu_notify_smu_enable_pwe(smu) \ +- ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0) ++int smu_notify_smu_enable_pwe(struct smu_context *smu); ++ + #define smu_dpm_set_uvd_enable(smu, enable) \ + ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0) + #define smu_dpm_set_vce_enable(smu, enable) \ + ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) +-#define smu_set_xgmi_pstate(smu, pstate) \ +- ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0) ++ ++int smu_set_xgmi_pstate(struct smu_context *smu, ++ uint32_t pstate); + #define smu_set_watermarks_table(smu, tab, clock_ranges) \ + ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) + #define smu_get_current_clk_freq_by_table(smu, clk_type, value) \ +@@ -752,22 +758,18 @@ struct smu_funcs + ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0) + #define smu_register_irq_handler(smu) \ + ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) +-#define smu_set_azalia_d3_pme(smu) \ +- ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) ++ ++int smu_set_azalia_d3_pme(struct smu_context *smu); + #define smu_get_dpm_ultimate_freq(smu, param, min, max) \ + ((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0) +-#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ +- ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0) +-#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ +- ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) +-#define smu_baco_is_support(smu) \ +- ((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false) +-#define smu_baco_get_state(smu, state) \ +- ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) +-#define smu_baco_reset(smu) \ +- ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) +-#define smu_mode2_reset(smu) \ +- ((smu)->funcs->mode2_reset? (smu)->funcs->mode2_reset((smu)) : 0) ++ ++bool smu_baco_is_support(struct smu_context *smu); ++ ++int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state); ++ ++int smu_baco_reset(struct smu_context *smu); ++ ++int smu_mode2_reset(struct smu_context *smu); + #define smu_asic_set_performance_level(smu, level) \ + ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); + #define smu_dump_pptable(smu) \ +@@ -776,8 +778,6 @@ struct smu_funcs + ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL) + #define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \ + ((smu)->funcs->set_soft_freq_limited_range ? (smu)->funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL) +-#define smu_get_dpm_clock_table(smu, clock_table) \ +- ((smu)->ppt_funcs->get_dpm_clock_table ? (smu)->ppt_funcs->get_dpm_clock_table((smu), (clock_table)) : -EINVAL) + + #define smu_override_pcie_parameters(smu) \ + ((smu)->funcs->override_pcie_parameters ? (smu)->funcs->override_pcie_parameters((smu)) : 0) +@@ -831,7 +831,8 @@ extern int smu_get_current_clocks(struct smu_context *smu, + extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate); + extern int smu_handle_task(struct smu_context *smu, + enum amd_dpm_forced_level level, +- enum amd_pp_task task_id); ++ enum amd_pp_task task_id, ++ bool lock_needed); + int smu_switch_power_profile(struct smu_context *smu, + enum PP_SMC_POWER_PROFILE type, + bool en); +@@ -841,7 +842,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ + int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *value); + int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, +- uint32_t *min, uint32_t *max); ++ uint32_t *min, uint32_t *max, bool lock_needed); + int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); + int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, +@@ -856,10 +857,21 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf); + int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask); + int smu_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, +- uint32_t mask); ++ uint32_t mask, ++ bool lock_needed); + int smu_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state); + int smu_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state); + ++int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, ++ struct pp_smu_nv_clock_table *max_clocks); ++ ++int smu_get_uclk_dpm_states(struct smu_context *smu, ++ unsigned int *clock_values_in_khz, ++ unsigned int *num_states); ++ ++int smu_get_dpm_clock_table(struct smu_context *smu, ++ struct dpm_clocks *clock_table); ++ + #endif +diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +index b88aae9bb242..ead40b2840f9 100644 +--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c ++++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +@@ -795,13 +795,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) + int ret = 0; + uint32_t min_sclk_freq = 0, min_mclk_freq = 0; + +- ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL); ++ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false); + if (ret) + return ret; + + smu->pstate_sclk = min_sclk_freq * 100; + +- ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL); ++ ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false); + if (ret) + return ret; + +@@ -854,7 +854,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu) + return ret; + + if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { +- ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq); ++ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false); + if (ret) + return ret; + ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq); +@@ -904,7 +904,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest) + + for (i = 0; i < ARRAY_SIZE(clks); i++) { + clk_type = clks[i]; +- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); ++ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); + if (ret) + return ret; + +@@ -931,7 +931,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu) + + for (i = 0; i < ARRAY_SIZE(clks); i++) { + clk_type = clks[i]; +- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); ++ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); + if (ret) + return ret; + +@@ -1266,7 +1266,10 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu) + if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { + clock_req.clock_type = amd_pp_dcef_clock; + clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10; +- if (!smu_display_clock_voltage_request(smu, &clock_req)) { ++ ++ if (smu->funcs->display_clock_voltage_request) ++ ret = smu->funcs->display_clock_voltage_request(smu, &clock_req); ++ if (!ret) { + if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetMinDeepSleepDcefclk, +diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +index 57930c9e22ff..0203da74b7d5 100644 +--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c ++++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +@@ -194,7 +194,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, + case SMU_SCLK: + /* retirve table returned paramters unit is MHz */ + cur_value = metrics.ClockFrequency[CLOCK_GFXCLK]; +- ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max); ++ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false); + if (!ret) { + /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ + if (cur_value == max) +@@ -251,7 +251,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context + !smu_dpm_ctx->dpm_current_power_state) + return -EINVAL; + +- mutex_lock(&(smu->mutex)); + switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { + case SMU_STATE_UI_LABEL_BATTERY: + pm_type = POWER_STATE_TYPE_BATTERY; +@@ -269,7 +268,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context + pm_type = POWER_STATE_TYPE_DEFAULT; + break; + } +- mutex_unlock(&(smu->mutex)); + + return pm_type; + } +@@ -314,7 +312,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) + + for (i = 0; i < ARRAY_SIZE(clks); i++) { + clk_type = clks[i]; +- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); ++ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); + if (ret) + return ret; + +@@ -348,7 +346,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) { + + clk_type = clk_feature_map[i].clk_type; + +- ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); ++ ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); + if (ret) + return ret; + +@@ -469,7 +467,7 @@ static int renoir_force_clk_levels(struct smu_context *smu, + return -EINVAL; + } + +- ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq); ++ ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, +@@ -545,7 +543,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) + int ret = 0; + uint32_t sclk_freq = 0, uclk_freq = 0; + +- ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq); ++ ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false); + if (ret) + return ret; + +@@ -553,7 +551,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu) + if (ret) + return ret; + +- ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq); ++ ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false); + if (ret) + return ret; + +diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +index ac02bcd24da0..54f9d3dd837f 100644 +--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c ++++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +@@ -792,8 +792,11 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) + if (!table_context) + return -EINVAL; + +- return smu_set_deep_sleep_dcefclk(smu, +- table_context->boot_values.dcefclk / 100); ++ if (smu->funcs->set_deep_sleep_dcefclk) ++ return smu->funcs->set_deep_sleep_dcefclk(smu, ++ table_context->boot_values.dcefclk / 100); ++ ++ return 0; + } + + static int smu_v11_0_set_tool_table_location(struct smu_context *smu) +@@ -1308,9 +1311,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, + if (clk_select == SMU_UCLK && smu->disable_uclk_switch) + return 0; + +- mutex_lock(&smu->mutex); + ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0); +- mutex_unlock(&smu->mutex); + + if(clk_select == SMU_UCLK) + smu->hard_min_uclk_req_from_dal = clk_freq; +@@ -1333,12 +1334,10 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) + case CHIP_NAVI12: + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) + return 0; +- mutex_lock(&smu->mutex); + if (enable) + ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); + else + ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); +- mutex_unlock(&smu->mutex); + break; + default: + break; +@@ -1454,10 +1453,9 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, + if (!speed) + return -EINVAL; + +- mutex_lock(&(smu->mutex)); + ret = smu_v11_0_auto_fan_control(smu, 0); + if (ret) +- goto set_fan_speed_rpm_failed; ++ return ret; + + crystal_clock_freq = amdgpu_asic_get_xclk(adev); + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); +@@ -1468,8 +1466,6 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, + + ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); + +-set_fan_speed_rpm_failed: +- mutex_unlock(&(smu->mutex)); + return ret; + } + +@@ -1480,11 +1476,9 @@ static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate) + { + int ret = 0; +- mutex_lock(&(smu->mutex)); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_SetXgmiMode, + pstate ? XGMI_STATE_D0 : XGMI_STATE_D3); +- mutex_unlock(&(smu->mutex)); + return ret; + } + +@@ -1596,9 +1590,7 @@ static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) + { + int ret = 0; + +- mutex_lock(&smu->mutex); + ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME); +- mutex_unlock(&smu->mutex); + + return ret; + } +@@ -1695,7 +1687,6 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk + int ret = 0, clk_id = 0; + uint32_t param = 0; + +- mutex_lock(&smu->mutex); + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) { + ret = -EINVAL; +@@ -1722,7 +1713,6 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk + } + + failed: +- mutex_unlock(&smu->mutex); + return ret; + } + +diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +index cac4269cf1d1..6b9eef20554b 100644 +--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c ++++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +@@ -316,8 +316,6 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk + int ret = 0; + uint32_t mclk_mask, soc_mask; + +- mutex_lock(&smu->mutex); +- + if (max) { + ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, + NULL, +@@ -387,7 +385,6 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk + } + } + failed: +- mutex_unlock(&smu->mutex); + return ret; + } + +diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +index a76ffd58404e..c249df9256c7 100644 +--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c ++++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +@@ -635,7 +635,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu) + !smu_dpm_ctx->dpm_current_power_state) + return -EINVAL; + +- mutex_lock(&(smu->mutex)); + switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { + case SMU_STATE_UI_LABEL_BATTERY: + pm_type = POWER_STATE_TYPE_BATTERY; +@@ -653,7 +652,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu) + pm_type = POWER_STATE_TYPE_DEFAULT; + break; + } +- mutex_unlock(&(smu->mutex)); + + return pm_type; + } +@@ -1277,8 +1275,6 @@ static int vega20_force_clk_levels(struct smu_context *smu, + uint32_t soft_min_level, soft_max_level, hard_min_level; + int ret = 0; + +- mutex_lock(&(smu->mutex)); +- + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + +@@ -1431,7 +1427,6 @@ static int vega20_force_clk_levels(struct smu_context *smu, + break; + } + +- mutex_unlock(&(smu->mutex)); + return ret; + } + +@@ -1446,8 +1441,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, + + dpm_table = smu_dpm->dpm_context; + +- mutex_lock(&smu->mutex); +- + switch (clk_type) { + case SMU_GFXCLK: + single_dpm_table = &(dpm_table->gfx_table); +@@ -1469,7 +1462,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, + ret = -EINVAL; + } + +- mutex_unlock(&smu->mutex); + return ret; + } + +@@ -2542,8 +2534,6 @@ static int vega20_set_od_percentage(struct smu_context *smu, + int feature_enabled; + PPCLK_e clk_id; + +- mutex_lock(&(smu->mutex)); +- + dpm_table = smu_dpm->dpm_context; + golden_table = smu_dpm->golden_dpm_context; + +@@ -2593,11 +2583,10 @@ static int vega20_set_od_percentage(struct smu_context *smu, + } + + ret = smu_handle_task(smu, smu_dpm->dpm_level, +- AMD_PP_TASK_READJUST_POWER_STATE); ++ AMD_PP_TASK_READJUST_POWER_STATE, ++ false); + + set_od_failed: +- mutex_unlock(&(smu->mutex)); +- + return ret; + } + +@@ -2822,10 +2811,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, + } + + if (type == PP_OD_COMMIT_DPM_TABLE) { +- mutex_lock(&(smu->mutex)); + ret = smu_handle_task(smu, smu_dpm->dpm_level, +- AMD_PP_TASK_READJUST_POWER_STATE); +- mutex_unlock(&(smu->mutex)); ++ AMD_PP_TASK_READJUST_POWER_STATE, ++ false); + } + + return ret; +-- +2.17.1 + |