aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3405-drm-amd-display-fixup-DPP-programming-sequence.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3405-drm-amd-display-fixup-DPP-programming-sequence.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3405-drm-amd-display-fixup-DPP-programming-sequence.patch395
1 files changed, 395 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3405-drm-amd-display-fixup-DPP-programming-sequence.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3405-drm-amd-display-fixup-DPP-programming-sequence.patch
new file mode 100644
index 00000000..3b4f39a7
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/3405-drm-amd-display-fixup-DPP-programming-sequence.patch
@@ -0,0 +1,395 @@
+From 7e39c65edcacc447951589bb95d33391f9c459a9 Mon Sep 17 00:00:00 2001
+From: Jun Lei <Jun.Lei@amd.com>
+Date: Mon, 15 Jul 2019 10:41:47 -0400
+Subject: [PATCH 3405/4256] drm/amd/display: fixup DPP programming sequence
+
+[why]
+DC does not correct account for the fact that DPP DTO is double buffered while DPP ref is not.
+This means that when DPP ref clock is lowered when it's "safe to lower", the DPP blocks that need
+an increased divider will temporarily have actual DPP clock drop below minimum while DTO
+double buffering takes effect. This results in temporary underflow.
+
+[how]
+To fix this, DPP clock cannot be programmed atomically, but rather be broken up into the DTO and the
+ref. Each has a separate "safe to lower" logic. When doing "prepare" the ref and dividers may only increase.
+When doing "optimize", both may decrease. It is guaranteed that we won't exceed max DPP clock because
+we do not use dividers larger than 1.
+
+Signed-off-by: Jun Lei <Jun.Lei@amd.com>
+Reviewed-by: Eric Yang <eric.yang2@amd.com>
+Acked-by: Leo Li <sunpeng.li@amd.com>
+---
+ .../display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 133 +++++++++++++-----
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 3 +
+ .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 3 +-
+ .../gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c | 31 +++-
+ .../gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h | 2 +-
+ .../drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +-
+ .../gpu/drm/amd/display/dc/inc/core_types.h | 1 -
+ .../amd/display/dc/inc/hw/clk_mgr_internal.h | 10 +-
+ drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 3 +-
+ 9 files changed, 141 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+index 7ff0396956b3..24775ab81216 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+@@ -104,7 +104,6 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ {
+ int i;
+
+- clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ int dpp_inst, dppclk_khz;
+
+@@ -114,28 +113,75 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+ clk_mgr->dccg->funcs->update_dpp_dto(
+- clk_mgr->dccg, dpp_inst, dppclk_khz);
++ clk_mgr->dccg, dpp_inst, dppclk_khz, false);
+ }
+ }
+
+-void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr)
++static void update_global_dpp_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
+ {
+ int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+- * clk_mgr->dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz;
+- int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+- * clk_mgr->dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
++ * clk_mgr->dentist_vco_freq_khz / khz;
+
+ uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider);
+- uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
+
+- REG_UPDATE(DENTIST_DISPCLK_CNTL,
+- DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
+-// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100);
+ REG_UPDATE(DENTIST_DISPCLK_CNTL,
+ DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100);
+ }
+
++static void update_display_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz)
++{
++ int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
++ * clk_mgr->dentist_vco_freq_khz / khz;
++
++ uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider);
++
++ REG_UPDATE(DENTIST_DISPCLK_CNTL,
++ DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider);
++}
++
++static void request_voltage_and_program_disp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
++{
++ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
++ struct dc *dc = clk_mgr_base->ctx->dc;
++ struct pp_smu_funcs_nv *pp_smu = NULL;
++ bool going_up = clk_mgr->base.clks.dispclk_khz < khz;
++
++ if (dc->res_pool->pp_smu)
++ pp_smu = &dc->res_pool->pp_smu->nv_funcs;
++
++ clk_mgr->base.clks.dispclk_khz = khz;
++
++ if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
++ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
++
++ update_display_clk(clk_mgr, khz);
++
++ if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
++ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
++}
++
++static void request_voltage_and_program_global_dpp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz)
++{
++ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
++ struct dc *dc = clk_mgr_base->ctx->dc;
++ struct pp_smu_funcs_nv *pp_smu = NULL;
++ bool going_up = clk_mgr->base.clks.dppclk_khz < khz;
++
++ if (dc->res_pool->pp_smu)
++ pp_smu = &dc->res_pool->pp_smu->nv_funcs;
++
++ clk_mgr->base.clks.dppclk_khz = khz;
++ clk_mgr->dccg->ref_dppclk = khz;
++
++ if (going_up && pp_smu && pp_smu->set_voltage_by_freq)
++ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
++
++ update_global_dpp_clk(clk_mgr, khz);
++
++ if (!going_up && pp_smu && pp_smu->set_voltage_by_freq)
++ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
++}
+
+ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+@@ -146,12 +192,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ struct pp_smu_funcs_nv *pp_smu = NULL;
+ int display_count;
+- bool update_dppclk = false;
+ bool update_dispclk = false;
+ bool enter_display_off = false;
+- bool dpp_clock_lowered = false;
+ struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
+ bool force_reset = false;
++ int i;
+
+ if (clk_mgr_base->clks.dispclk_khz == 0 ||
+ dc->debug.force_clock_mode & 0x1) {
+@@ -177,6 +222,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
+ pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PHYCLK, clk_mgr_base->clks.phyclk_khz / 1000);
+ }
+
++
+ if (dc->debug.force_min_dcfclk_mhz > 0)
+ new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
+ new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
+@@ -202,10 +248,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
+
+ if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
++
+ clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+ if (pp_smu && pp_smu->set_pstate_handshake_support)
+ pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
+ }
++ clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
+
+ if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) {
+ clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
+@@ -213,35 +261,48 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
+ pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000);
+ }
+
+- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+- if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+- dpp_clock_lowered = true;
+- clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
++ if (dc->config.forced_clocks == false) {
++ // First update display clock
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz))
++ request_voltage_and_program_disp_clk(clk_mgr_base, new_clocks->dispclk_khz);
+
+- if (pp_smu && pp_smu->set_voltage_by_freq)
+- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000);
++ // Updating DPP clock requires some more logic
++ if (!safe_to_lower) {
++ // For pre-programming, we need to make sure any DPP clock that will go up has to go up
+
+- update_dppclk = true;
+- }
++ // First raise the global reference if needed
++ if (new_clocks->dppclk_khz > clk_mgr_base->clks.dppclk_khz)
++ request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
+
+- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+- clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+- if (pp_smu && pp_smu->set_voltage_by_freq)
+- pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000);
++ // Then raise any dividers that need raising
++ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
++ int dpp_inst, dppclk_khz;
+
+- update_dispclk = true;
+- }
+- if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+- if (dpp_clock_lowered) {
+- // if clock is being lowered, increase DTO before lowering refclk
+- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
+- dcn20_update_clocks_update_dentist(clk_mgr);
++ if (!context->res_ctx.pipe_ctx[i].plane_state)
++ continue;
++
++ dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
++ dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
++
++ clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, true);
++ }
+ } else {
+- // if clock is being raised, increase refclk before lowering DTO
+- if (update_dppclk || update_dispclk)
+- dcn20_update_clocks_update_dentist(clk_mgr);
+- if (update_dppclk)
+- dcn20_update_clocks_update_dpp_dto(clk_mgr, context);
++ // For post-programming, we can lower ref clk if needed, and unconditionally set all the DTOs
++
++ if (new_clocks->dppclk_khz < clk_mgr_base->clks.dppclk_khz)
++ request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz);
++
++ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
++ int dpp_inst, dppclk_khz;
++
++ if (!context->res_ctx.pipe_ctx[i].plane_state)
++ continue;
++
++ dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
++ dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
++
++ clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, false);
++ }
+ }
+ }
+ if (update_dispclk &&
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index 51653834dab6..409d9a02f613 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -1612,6 +1612,9 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
+ for (i = 0; i < surface_count; i++)
+ updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+
++ if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0)
++ dc->optimized_required = true;
++
+ return type;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 84980d4f324d..627684213461 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2153,7 +2153,8 @@ void update_dchubp_dpp(
+ dc->res_pool->dccg->funcs->update_dpp_dto(
+ dc->res_pool->dccg,
+ dpp->inst,
+- pipe_ctx->plane_res.bw.dppclk_khz);
++ pipe_ctx->plane_res.bw.dppclk_khz,
++ false);
+ else
+ dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
+ dc->clk_mgr->clks.dispclk_khz / 2 :
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+index 23362dd4b6d3..f9b99f8cfc31 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+@@ -42,12 +42,16 @@
+ #define DC_LOGGER \
+ dccg->ctx->logger
+
+-void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
++void dccg2_update_dpp_dto(struct dccg *dccg,
++ int dpp_inst,
++ int req_dppclk,
++ bool reduce_divider_only)
+ {
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->ref_dppclk && req_dppclk) {
+ int ref_dppclk = dccg->ref_dppclk;
++ int current_phase, current_modulo;
+
+ ASSERT(req_dppclk <= ref_dppclk);
+ /* need to clamp to 8 bits */
+@@ -59,9 +63,28 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+ if (req_dppclk > ref_dppclk)
+ req_dppclk = ref_dppclk;
+ }
+- REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+- DPPCLK0_DTO_PHASE, req_dppclk,
+- DPPCLK0_DTO_MODULO, ref_dppclk);
++
++ REG_GET_2(DPPCLK_DTO_PARAM[dpp_inst],
++ DPPCLK0_DTO_PHASE, &current_phase,
++ DPPCLK0_DTO_MODULO, &current_modulo);
++
++ if (reduce_divider_only) {
++ // requested phase/modulo greater than current
++ if (req_dppclk * current_modulo >= current_phase * ref_dppclk) {
++ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
++ DPPCLK0_DTO_PHASE, req_dppclk,
++ DPPCLK0_DTO_MODULO, ref_dppclk);
++ } else {
++ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
++ DPPCLK0_DTO_PHASE, current_phase,
++ DPPCLK0_DTO_MODULO, current_modulo);
++ }
++ } else {
++ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
++ DPPCLK0_DTO_PHASE, req_dppclk,
++ DPPCLK0_DTO_MODULO, ref_dppclk);
++ }
++
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ } else {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+index 2205cb0204e7..74a074a873cd 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+@@ -97,7 +97,7 @@ struct dcn_dccg {
+ const struct dccg_mask *dccg_mask;
+ };
+
+-void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
++void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk, bool raise_divider_only);
+
+ void dccg2_get_dccg_ref_freq(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index 291846cc4f21..a26541bafc75 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -2480,7 +2480,7 @@ void dcn20_calculate_dlg_params(
+ context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
+- context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
++ context->bw_ctx.bw.dcn.clk.fclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support =
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
+ != dm_dram_clock_change_unsupported;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index a148ffde8b12..1d66c4b09612 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -228,7 +228,6 @@ struct resource_pool {
+
+ struct dcn_fe_bandwidth {
+ int dppclk_khz;
+-
+ };
+
+ struct stream_resource {
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+index 4b5505fa980c..9b6c885c0bba 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+@@ -279,8 +279,14 @@ static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_cl
+
+ static inline bool should_update_pstate_support(bool safe_to_lower, bool calc_support, bool cur_support)
+ {
+- // Whenever we are transitioning pstate support, we always want to notify prior to committing state
+- return (calc_support != cur_support) ? !safe_to_lower : false;
++ if (cur_support != calc_support) {
++ if (calc_support == true && safe_to_lower)
++ return true;
++ else if (calc_support == false && !safe_to_lower)
++ return true;
++ }
++
++ return false;
+ }
+
+ int clk_mgr_helper_get_active_display_cnt(
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+index 05ee5295d2c1..d8e744f366e5 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+@@ -38,7 +38,8 @@ struct dccg {
+ struct dccg_funcs {
+ void (*update_dpp_dto)(struct dccg *dccg,
+ int dpp_inst,
+- int req_dppclk);
++ int req_dppclk,
++ bool reduce_divider_only);
+ void (*get_dccg_ref_freq)(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz);
+--
+2.17.1
+