aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.14.71/4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.14.71/4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch213
1 files changed, 213 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch
new file mode 100644
index 00000000..fda9ff8a
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4825-drm-amd-powerplay-apply-clocks-adjust-rules-on-power.patch
@@ -0,0 +1,213 @@
+From f63c81d3092beba4ed572b0d8c1ed188a1533d03 Mon Sep 17 00:00:00 2001
+From: Evan Quan <evan.quan@amd.com>
+Date: Mon, 11 Jun 2018 16:40:57 +0800
+Subject: [PATCH 4825/5725] drm/amd/powerplay: apply clocks adjust rules on
+ power state change
+
+This add the apply_clocks_adjust_rules callback which is used
+to validate the clock settings on a power state change.
+
+Signed-off-by: Evan Quan <evan.quan@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 162 +++++++++++++++++++++
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h | 2 +
+ 2 files changed, 164 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+index d0e7081..9125bc5 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+@@ -1951,6 +1951,166 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
+ return size;
+ }
+
++static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
++{
++ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
++ struct vega12_single_dpm_table *dpm_table;
++ bool vblank_too_short = false;
++ bool disable_mclk_switching;
++ uint32_t i, latency;
++
++ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
++ !hwmgr->display_config->multi_monitor_in_sync) ||
++ vblank_too_short;
++ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
++
++ /* gfxclk */
++ dpm_table = &(data->dpm_table.gfx_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* memclk */
++ dpm_table = &(data->dpm_table.mem_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* honour DAL's UCLK Hardmin */
++ if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
++ dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
++
++ /* Hardmin is dependent on displayconfig */
++ if (disable_mclk_switching) {
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
++ if (data->mclk_latency_table.entries[i].latency <= latency) {
++ if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
++ break;
++ }
++ }
++ }
++ }
++
++ if (hwmgr->display_config->nb_pstate_switch_disable)
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ /* vclk */
++ dpm_table = &(data->dpm_table.vclk_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* dclk */
++ dpm_table = &(data->dpm_table.dclk_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* socclk */
++ dpm_table = &(data->dpm_table.soc_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ /* eclk */
++ dpm_table = &(data->dpm_table.eclk_table);
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
++ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++
++ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
++ if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
++ }
++
++ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
++ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
++ }
++ }
++
++ return 0;
++}
++
+ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+ {
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+@@ -2203,6 +2363,8 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
+ .display_clock_voltage_request = vega12_display_clock_voltage_request,
+ .force_clock_level = vega12_force_clock_level,
+ .print_clock_levels = vega12_print_clock_levels,
++ .apply_clocks_adjust_rules =
++ vega12_apply_clocks_adjust_rules,
+ .display_config_changed = vega12_display_configuration_changed_task,
+ .powergate_uvd = vega12_power_gate_uvd,
+ .powergate_vce = vega12_power_gate_vce,
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+index e18c083..e17237c 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+@@ -443,6 +443,8 @@ struct vega12_hwmgr {
+ #define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3
+ #define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3
+ #define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2
++#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL 0x3
++#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL 0x3
+
+ int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
+
+--
+2.7.4
+