aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.14.71/4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.14.71/4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch918
1 files changed, 918 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch
new file mode 100644
index 00000000..acea24b0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4753-drm-amd-display-redesign-dce-dcn-clock-voltage-updat.patch
@@ -0,0 +1,918 @@
+From fc5d96cda0c50f44feef553e75df07cea74a00ee Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 23 May 2018 16:21:54 -0400
+Subject: [PATCH 4753/5725] drm/amd/display: redesign dce/dcn clock voltage
+ update request
+
+The goal of this change is to move clock programming and voltage
+requests to a single function. As of this change only dce is affected.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 22 +-
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 30 +--
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 279 +++++++++++----------
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h | 6 +-
+ .../amd/display/dc/dce100/dce100_hw_sequencer.c | 49 +++-
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 150 ++---------
+ .../amd/display/dc/dce110/dce110_hw_sequencer.h | 4 +
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 9 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 +-
+ .../gpu/drm/amd/display/dc/inc/hw/display_clock.h | 21 +-
+ 10 files changed, 250 insertions(+), 322 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index d8a3165..2b70ac6 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -1145,10 +1145,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- /*if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
++ if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+- //BREAK_TO_DEBUGGER();
+- } else*/ if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
++ BREAK_TO_DEBUGGER();
++ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1158,10 +1158,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- /*if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
++ if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else*/ if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
++ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1172,10 +1172,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ break;
+
+ case DM_PP_CLOCK_TYPE_DPPCLK:
+- /*if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
++ if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else*/ if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
++ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1189,10 +1189,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ {
+ unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+- /*if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
++ if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else */if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
++ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
+ vdd_level = dcn_bw_v_nom0p8;
+@@ -1204,10 +1204,10 @@ static unsigned int dcn_find_normalized_clock_vdd_Level(
+ break;
+
+ case DM_PP_CLOCK_TYPE_DCFCLK:
+- /*if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
++ if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+- } else */if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
++ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index af9e0db..1a6a7c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1284,27 +1284,15 @@ static enum dc_status enable_link_dp(
+ max_link_rate = LINK_RATE_HIGH3;
+
+ if (link_settings.link_rate == max_link_rate) {
+- if (state->dis_clk->funcs->set_min_clocks_state) {
+- if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL)
+- state->dis_clk->funcs->set_min_clocks_state(
+- state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
+- } else {
+- uint32_t dp_phyclk_in_khz;
+- const struct dc_clocks clocks_value =
+- state->dis_clk->clks;
+-
+- /* 27mhz = 27000000hz= 27000khz */
+- dp_phyclk_in_khz = link_settings.link_rate * 27000;
+-
+- if (dp_phyclk_in_khz > clocks_value.phyclk_khz) {
+- state->dis_clk->funcs->apply_clock_voltage_request(
+- state->dis_clk,
+- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+- dp_phyclk_in_khz,
+- false,
+- true);
+- }
+- }
++ struct dc_clocks clocks = state->bw.dcn.calc_clk;
++
++ /* dce/dcn compat, do not update dispclk */
++ clocks.dispclk_khz = 0;
++ /* 27mhz = 27000000hz= 27000khz */
++ clocks.phyclk_khz = link_settings.link_rate * 27000;
++
++ state->dis_clk->funcs->update_clocks(
++ state->dis_clk, &clocks, false);
+ }
+
+ dp_enable_link_phy(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index b749a20..d3bbac8 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -275,7 +275,7 @@ static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+ }
+ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ struct display_clock *clk,
+- struct state_dependent_clocks *req_clocks)
++ struct dc_clocks *req_clocks)
+ {
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int i;
+@@ -286,48 +286,25 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ * all required clocks
+ */
+ for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+- if (req_clocks->display_clk_khz >
++ if (req_clocks->dispclk_khz >
+ clk_dce->max_clks_by_state[i].display_clk_khz
+- || req_clocks->pixel_clk_khz >
++ || req_clocks->phyclk_khz >
+ clk_dce->max_clks_by_state[i].pixel_clk_khz)
+ break;
+
+ low_req_clk = i + 1;
+ if (low_req_clk > clk->max_clks_state) {
+- DC_LOG_WARNING("%s: clocks unsupported disp_clk %d pix_clk %d",
+- __func__,
+- req_clocks->display_clk_khz,
+- req_clocks->pixel_clk_khz);
+- low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
++ /* set max clock state for high phyclock, invalid on exceeding display clock */
++ if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
++ < req_clocks->dispclk_khz)
++ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
++ else
++ low_req_clk = clk->max_clks_state;
+ }
+
+ return low_req_clk;
+ }
+
+-static bool dce_clock_set_min_clocks_state(
+- struct display_clock *clk,
+- enum dm_pp_clocks_state clocks_state)
+-{
+- struct dm_pp_power_level_change_request level_change_req = {
+- clocks_state };
+-
+- if (clocks_state > clk->max_clks_state) {
+- /*Requested state exceeds max supported state.*/
+- DC_LOG_WARNING("Requested state exceeds max supported state");
+- return false;
+- } else if (clocks_state == clk->cur_min_clks_state) {
+- /*if we're trying to set the same state, we can just return
+- * since nothing needs to be done*/
+- return true;
+- }
+-
+- /* get max clock state from PPLIB */
+- if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
+- clk->cur_min_clks_state = clocks_state;
+-
+- return true;
+-}
+-
+ static int dce_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+@@ -488,8 +465,6 @@ static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_dce->dfs_bypass_enabled = true;
+-
+- clk_dce->use_max_disp_clk = debug->max_disp_clk;
+ }
+
+ static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+@@ -548,117 +523,160 @@ static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+ }
+ }
+
+-static bool dce_apply_clock_voltage_request(
+- struct display_clock *clk,
+- enum dm_pp_clock_type clocks_type,
+- int clocks_in_khz,
+- bool pre_mode_set,
+- bool update_dp_phyclk)
++static void dce12_update_clocks(struct display_clock *dccg,
++ struct dc_clocks *new_clocks,
++ bool safe_to_lower)
+ {
+- bool send_request = false;
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+
+- switch (clocks_type) {
+- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- break;
+- default:
+- return false;
++ if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
++ || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
++ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+
+- clock_voltage_req.clk_type = clocks_type;
+- clock_voltage_req.clocks_in_khz = clocks_in_khz;
+-
+- /* to pplib */
+- if (pre_mode_set) {
+- switch (clocks_type) {
+- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- if (clocks_in_khz > clk->clks.dispclk_khz) {
+- clk->dispclk_notify_pplib_done = true;
+- send_request = true;
+- } else
+- clk->dispclk_notify_pplib_done = false;
+- /* no matter incrase or decrase clock, update current clock value */
+- clk->clks.dispclk_khz = clocks_in_khz;
+- break;
+- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- if (clocks_in_khz > clk->clks.phyclk_khz) {
+- clk->phyclk_notify_pplib_done = true;
+- send_request = true;
+- } else
+- clk->phyclk_notify_pplib_done = false;
+- /* no matter incrase or decrase clock, update current clock value */
+- clk->clks.phyclk_khz = clocks_in_khz;
+- break;
+- default:
+- ASSERT(0);
+- break;
+- }
++ if ((new_clocks->phyclk_khz < dccg->clks.phyclk_khz && safe_to_lower)
++ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
++ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+- } else {
+- switch (clocks_type) {
+- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+- if (!clk->dispclk_notify_pplib_done)
+- send_request = true;
+- clk->dispclk_notify_pplib_done = true;
+- break;
+- case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+- if (!clk->phyclk_notify_pplib_done)
+- send_request = true;
+- clk->phyclk_notify_pplib_done = true;
+- break;
+- default:
+- ASSERT(0);
+- break;
+- }
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+- if (send_request) {
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+- if (clk->ctx->dce_version >= DCN_VERSION_1_0
++}
++
++static void dcn_update_clocks(struct display_clock *dccg,
++ struct dc_clocks *new_clocks,
++ bool safe_to_lower)
++{
++ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
++ bool send_request_to_increase = false;
++ bool send_request_to_lower = false;
++
++ if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
++ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
++ || new_clocks->fclk_khz > dccg->clks.fclk_khz
++ || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
++ send_request_to_increase = true;
++
++#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++ if (send_request_to_increase
+ ) {
+- struct dc *core_dc = clk->ctx->dc;
+- /*use dcfclk request voltage*/
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz =
+- dcn_find_dcfclk_suits_all(core_dc, &clk->clks);
+- }
++ struct dc *core_dc = dccg->ctx->dc;
++
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(core_dc, new_clocks);
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ }
+ #endif
+- dm_pp_apply_clock_for_voltage_request(
+- clk->ctx, &clock_voltage_req);
++
++ if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
++ || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
++ /* TODO: ramp up - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);*/
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ send_request_to_lower = true;
+ }
+- if (update_dp_phyclk && (clocks_in_khz >
+- clk->clks.phyclk_khz))
+- clk->clks.phyclk_khz = clocks_in_khz;
+
+- return true;
++ if ((new_clocks->phyclk_khz < dccg->clks.phyclk_khz && safe_to_lower)
++ || new_clocks->phyclk_khz > dccg->clks.phyclk_khz) {
++ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ send_request_to_lower = true;
++ }
++
++ if ((new_clocks->fclk_khz < dccg->clks.fclk_khz && safe_to_lower)
++ || new_clocks->fclk_khz > dccg->clks.fclk_khz) {
++ dccg->clks.phyclk_khz = new_clocks->fclk_khz;
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ send_request_to_lower = true;
++ }
++
++ if ((new_clocks->dcfclk_khz < dccg->clks.dcfclk_khz && safe_to_lower)
++ || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz) {
++ dccg->clks.phyclk_khz = new_clocks->dcfclk_khz;
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->dcfclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++#ifdef CONFIG_DRM_AMD_DC_DCN1_0
++ if (!send_request_to_increase && send_request_to_lower
++ ) {
++ struct dc *core_dc = dccg->ctx->dc;
++
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(core_dc, new_clocks);
++ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
++ }
++#endif
++}
++
++static void dce_update_clocks(struct display_clock *dccg,
++ struct dc_clocks *new_clocks,
++ bool safe_to_lower)
++{
++ struct dm_pp_power_level_change_request level_change_req;
++
++ level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > dccg->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
++ dccg->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
++ || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
++ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
++ }
+ }
+
++static const struct display_clock_funcs dcn_funcs = {
++ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
++ .set_dispclk = dce112_set_clock,
++ .update_clocks = dcn_update_clocks
++};
+
+ static const struct display_clock_funcs dce120_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
+- .apply_clock_voltage_request = dce_apply_clock_voltage_request,
+- .set_clock = dce112_set_clock
++ .set_dispclk = dce112_set_clock,
++ .update_clocks = dce12_update_clocks
+ };
+
+ static const struct display_clock_funcs dce112_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+- .get_required_clocks_state = dce_get_required_clocks_state,
+- .set_min_clocks_state = dce_clock_set_min_clocks_state,
+- .set_clock = dce112_set_clock
++ .set_dispclk = dce112_set_clock,
++ .update_clocks = dce_update_clocks
+ };
+
+ static const struct display_clock_funcs dce110_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+- .get_required_clocks_state = dce_get_required_clocks_state,
+- .set_min_clocks_state = dce_clock_set_min_clocks_state,
+- .set_clock = dce_psr_set_clock
++ .set_dispclk = dce_psr_set_clock,
++ .update_clocks = dce_update_clocks
+ };
+
+ static const struct display_clock_funcs dce_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+- .get_required_clocks_state = dce_get_required_clocks_state,
+- .set_min_clocks_state = dce_clock_set_min_clocks_state,
+- .set_clock = dce_set_clock
++ .set_dispclk = dce_set_clock,
++ .update_clocks = dce_update_clocks
+ };
+
+ static void dce_disp_clk_construct(
+@@ -785,7 +803,6 @@ struct display_clock *dce112_disp_clk_create(
+ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+ {
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+- struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+@@ -801,15 +818,23 @@ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+
+ clk_dce->base.funcs = &dce120_funcs;
+
+- /* new in dce120 */
+- if (!ctx->dc->debug.disable_pplib_clock_request &&
+- dm_pp_get_clock_levels_by_type_with_voltage(
+- ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info)
+- && clk_level_info.num_levels)
+- clk_dce->max_displ_clk_in_khz =
+- clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz;
+- else
+- clk_dce->max_displ_clk_in_khz = 1133000;
++ return &clk_dce->base;
++}
++
++struct display_clock *dcn_disp_clk_create(struct dc_context *ctx)
++{
++ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
++
++ if (clk_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ /* TODO strip out useful stuff out of dce constructor */
++ dce_disp_clk_construct(
++ clk_dce, ctx, NULL, NULL, NULL);
++
++ clk_dce->base.funcs = &dcn_funcs;
+
+ return &clk_dce->base;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+index 0e717e0..f9b0020 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+@@ -91,7 +91,6 @@ struct dce_disp_clk {
+ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+ struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
+
+- bool use_max_disp_clk;
+ int dentist_vco_freq_khz;
+
+ /* Cache the status of DFS-bypass feature*/
+@@ -106,9 +105,6 @@ struct dce_disp_clk {
+ int dprefclk_ss_percentage;
+ /* DPREFCLK SS percentage Divider (100 or 1000) */
+ int dprefclk_ss_divider;
+-
+- /* max disp_clk from PPLIB for max validation display clock*/
+- int max_displ_clk_in_khz;
+ };
+
+
+@@ -132,6 +128,8 @@ struct display_clock *dce112_disp_clk_create(
+
+ struct display_clock *dce120_disp_clk_create(struct dc_context *ctx);
+
++struct display_clock *dcn_disp_clk_create(struct dc_context *ctx);
++
+ void dce_disp_clk_destroy(struct display_clock **disp_clk);
+
+ #endif /* _DCE_CLOCKS_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+index 41f83ec..aabf7ca 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+@@ -125,17 +125,54 @@ static void dce100_pplib_apply_display_requirements(
+ dc->prev_display_config = *pp_display_cfg;
+ }
+
++/* unit: in_khz before mode set, get pixel clock from context. ASIC register
++ * may not be programmed yet
++ */
++static uint32_t get_max_pixel_clock_for_all_paths(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ uint32_t max_pix_clk = 0;
++ int i;
++
++ for (i = 0; i < MAX_PIPES; i++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++
++ if (pipe_ctx->stream == NULL)
++ continue;
++
++ /* do not check under lay */
++ if (pipe_ctx->top_pipe)
++ continue;
++
++ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
++ max_pix_clk =
++ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
++ }
++
++ if (max_pix_clk == 0)
++ ASSERT(0);
++
++ return max_pix_clk;
++}
++
+ void dce100_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+ {
+- if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_clock(
+- dc->res_pool->display_clock,
+- context->bw.dce.dispclk_khz * 115 / 100);
+- dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
++ struct dc_clocks req_clks;
++
++ req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
++
++ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
++
++ dc->res_pool->display_clock->funcs->update_clocks(
++ dc->res_pool->display_clock,
++ &req_clks,
++ decrease_allowed);
++
+ dce100_pplib_apply_display_requirements(dc, context);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index e540172..73c03b7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -1637,7 +1637,7 @@ static void dce110_set_displaymarks(
+ }
+ }
+
+-static void set_safe_displaymarks(
++void dce110_set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+ {
+@@ -1737,23 +1737,15 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ }
+
+ /* unit: in_khz before mode set, get pixel clock from context. ASIC register
+- * may not be programmed yet.
+- * TODO: after mode set, pre_mode_set = false,
+- * may read PLL register to get pixel clock
++ * may not be programmed yet
+ */
+ static uint32_t get_max_pixel_clock_for_all_paths(
+ struct dc *dc,
+- struct dc_state *context,
+- bool pre_mode_set)
++ struct dc_state *context)
+ {
+ uint32_t max_pix_clk = 0;
+ int i;
+
+- if (!pre_mode_set) {
+- /* TODO: read ASIC register to get pixel clock */
+- ASSERT(0);
+- }
+-
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+@@ -1776,74 +1768,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
+ }
+
+ /*
+- * Find clock state based on clock requested. if clock value is 0, simply
+- * set clock state as requested without finding clock state by clock value
+- */
+-
+-static void apply_min_clocks(
+- struct dc *dc,
+- struct dc_state *context,
+- enum dm_pp_clocks_state *clocks_state,
+- bool pre_mode_set)
+-{
+- struct state_dependent_clocks req_clocks = {0};
+-
+- if (!pre_mode_set) {
+- /* set clock_state without verification */
+- if (context->dis_clk->funcs->set_min_clocks_state) {
+- context->dis_clk->funcs->set_min_clocks_state(
+- context->dis_clk, *clocks_state);
+- return;
+- }
+-
+- /* TODO: This is incorrect. Figure out how to fix. */
+- context->dis_clk->funcs->apply_clock_voltage_request(
+- context->dis_clk,
+- DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+- context->dis_clk->clks.dispclk_khz,
+- pre_mode_set,
+- false);
+-
+- context->dis_clk->funcs->apply_clock_voltage_request(
+- context->dis_clk,
+- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+- context->dis_clk->clks.phyclk_khz,
+- pre_mode_set,
+- false);
+- return;
+- }
+-
+- /* get the required state based on state dependent clocks:
+- * display clock and pixel clock
+- */
+- req_clocks.display_clk_khz = context->bw.dce.dispclk_khz;
+-
+- req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
+- dc, context, true);
+-
+- if (context->dis_clk->funcs->get_required_clocks_state) {
+- *clocks_state = context->dis_clk->funcs->get_required_clocks_state(
+- context->dis_clk, &req_clocks);
+- context->dis_clk->funcs->set_min_clocks_state(
+- context->dis_clk, *clocks_state);
+- } else {
+- context->dis_clk->funcs->apply_clock_voltage_request(
+- context->dis_clk,
+- DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+- req_clocks.display_clk_khz,
+- pre_mode_set,
+- false);
+-
+- context->dis_clk->funcs->apply_clock_voltage_request(
+- context->dis_clk,
+- DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+- req_clocks.pixel_clk_khz,
+- pre_mode_set,
+- false);
+- }
+-}
+-
+-/*
+ * Check if FBC can be enabled
+ */
+ static bool should_enable_fbc(struct dc *dc,
+@@ -2060,7 +1984,6 @@ enum dc_status dce110_apply_ctx_to_hw(
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status status;
+ int i;
+- enum dm_pp_clocks_state clocks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+@@ -2094,54 +2017,10 @@ enum dc_status dce110_apply_ctx_to_hw(
+ PIPE_GATING_CONTROL_DISABLE);
+ }
+
+- set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+-
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
+- /*TODO: when pplib works*/
+- apply_min_clocks(dc, context, &clocks_state, true);
+-
+-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+- if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+- if (context->bw.dcn.calc_clk.fclk_khz
+- > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
+- struct dm_pp_clock_for_voltage_req clock;
+-
+- clock.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+- clock.clocks_in_khz = context->bw.dcn.calc_clk.fclk_khz;
+- dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
+- dc->current_state->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
+- context->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
+- }
+- if (context->bw.dcn.calc_clk.dcfclk_khz
+- > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
+- struct dm_pp_clock_for_voltage_req clock;
+-
+- clock.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock.clocks_in_khz = context->bw.dcn.calc_clk.dcfclk_khz;
+- dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
+- dc->current_state->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
+- context->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
+- }
+- if (context->bw.dcn.calc_clk.dispclk_khz
+- > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_clock(
+- dc->res_pool->display_clock,
+- context->bw.dcn.calc_clk.dispclk_khz);
+- dc->current_state->bw.dcn.cur_clk.dispclk_khz =
+- context->bw.dcn.calc_clk.dispclk_khz;
+- context->bw.dcn.cur_clk.dispclk_khz =
+- context->bw.dcn.calc_clk.dispclk_khz;
+- }
+- } else
+-#endif
+- if (context->bw.dce.dispclk_khz
+- > dc->current_state->bw.dce.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_clock(
+- dc->res_pool->display_clock,
+- context->bw.dce.dispclk_khz * 115 / 100);
+- }
++ dc->hwss.set_bandwidth(dc, context, false);
+
+ dce110_setup_audio_dto(dc, context);
+
+@@ -2172,7 +2051,7 @@ enum dc_status dce110_apply_ctx_to_hw(
+ }
+
+ /* to save power */
+- apply_min_clocks(dc, context, &clocks_state, false);
++ dc->hwss.set_bandwidth(dc, context, true);
+
+ dcb->funcs->set_scratch_critical_state(dcb, false);
+
+@@ -2661,15 +2540,20 @@ void dce110_set_bandwidth(
+ struct dc_state *context,
+ bool decrease_allowed)
+ {
+- dce110_set_displaymarks(dc, context);
++ struct dc_clocks req_clks;
+
+- if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_clock(
+- dc->res_pool->display_clock,
+- context->bw.dce.dispclk_khz * 115 / 100);
+- dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
++ req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++ req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
++
++ if (decrease_allowed)
++ dce110_set_displaymarks(dc, context);
++ else
++ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
++ dc->res_pool->display_clock->funcs->update_clocks(
++ dc->res_pool->display_clock,
++ &req_clks,
++ decrease_allowed);
+ pplib_apply_display_requirements(dc, context);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+index 1782757..a226a3d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+@@ -60,6 +60,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
+
+ void dce110_power_down(struct dc *dc);
+
++void dce110_set_safe_displaymarks(
++ struct resource_context *res_ctx,
++ const struct resource_pool *pool);
++
+ void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 72d0b6f6..2fdec57f 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2439,7 +2439,7 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+ int dispclk_to_dpp_threshold = determine_dppclk_threshold(dc, context);
+
+ /* set disp clk to dpp clk threshold */
+- dc->res_pool->display_clock->funcs->set_clock(
++ dc->res_pool->display_clock->funcs->set_dispclk(
+ dc->res_pool->display_clock,
+ dispclk_to_dpp_threshold);
+
+@@ -2458,7 +2458,7 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != context->bw.dcn.calc_clk.dispclk_khz) {
+- dc->res_pool->display_clock->funcs->set_clock(
++ dc->res_pool->display_clock->funcs->set_dispclk(
+ dc->res_pool->display_clock,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ }
+@@ -2488,6 +2488,11 @@ static void dcn10_set_bandwidth(
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
++ dc->res_pool->display_clock->funcs->update_clocks(
++ dc->res_pool->display_clock,
++ &context->bw.dcn.calc_clk,
++ decrease_allowed);
++
+ if (should_set_clock(
+ decrease_allowed,
+ context->bw.dcn.calc_clk.dcfclk_khz,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 704acc0..e548ce5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -1074,7 +1074,7 @@ static bool construct(
+ }
+ }
+
+- pool->base.display_clock = dce120_disp_clk_create(ctx);
++ pool->base.display_clock = dcn_disp_clk_create(ctx);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+index 6b9ca55..8ce106f 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+@@ -40,32 +40,19 @@ struct display_clock {
+ struct dc_context *ctx;
+ const struct display_clock_funcs *funcs;
+
+- bool dispclk_notify_pplib_done;
+- bool phyclk_notify_pplib_done;
+ enum dm_pp_clocks_state max_clks_state;
+ enum dm_pp_clocks_state cur_min_clks_state;
+ struct dc_clocks clks;
+ };
+
+ struct display_clock_funcs {
+- int (*set_clock)(struct display_clock *disp_clk,
++ void (*update_clocks)(struct display_clock *dccg,
++ struct dc_clocks *new_clocks,
++ bool safe_to_lower);
++ int (*set_dispclk)(struct display_clock *disp_clk,
+ int requested_clock_khz);
+
+- enum dm_pp_clocks_state (*get_required_clocks_state)(
+- struct display_clock *disp_clk,
+- struct state_dependent_clocks *req_clocks);
+-
+- bool (*set_min_clocks_state)(struct display_clock *disp_clk,
+- enum dm_pp_clocks_state dm_pp_clocks_state);
+-
+ int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
+-
+- bool (*apply_clock_voltage_request)(
+- struct display_clock *disp_clk,
+- enum dm_pp_clock_type clocks_type,
+- int clocks_in_khz,
+- bool pre_mode_set,
+- bool update_dp_phyclk);
+ };
+
+ #endif /* __DISPLAY_CLOCK_H__ */
+--
+2.7.4
+