aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/5591-drm-amd-display-rename-dccg-to-clk_mgr.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/5591-drm-amd-display-rename-dccg-to-clk_mgr.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/5591-drm-amd-display-rename-dccg-to-clk_mgr.patch3638
1 files changed, 3638 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/5591-drm-amd-display-rename-dccg-to-clk_mgr.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/5591-drm-amd-display-rename-dccg-to-clk_mgr.patch
new file mode 100644
index 00000000..1dc90f77
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/5591-drm-amd-display-rename-dccg-to-clk_mgr.patch
@@ -0,0 +1,3638 @@
+From 699ae1c6ed5317a39cc550294bf67b9bb636c25e Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Fri, 28 Sep 2018 07:46:42 -0400
+Subject: [PATCH 5591/5725] drm/amd/display: rename dccg to clk_mgr
+
+In preparation for adding the actual dccg block since the
+current implementation of dccg is mor eof a clock manager
+than a hw block
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Charlene Liu <Charlene.Liu@amd.com>
+Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/Makefile | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | 876 +++++++++++++++++++++
+ drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h | 165 ++++
+ drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c | 876 ---------------------
+ drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h | 165 ----
+ .../amd/display/dc/dce100/dce100_hw_sequencer.c | 4 +-
+ .../drm/amd/display/dc/dce100/dce100_resource.c | 16 +-
+ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 4 +-
+ .../drm/amd/display/dc/dce110/dce110_resource.c | 16 +-
+ .../drm/amd/display/dc/dce112/dce112_resource.c | 16 +-
+ .../drm/amd/display/dc/dce120/dce120_resource.c | 10 +-
+ .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 24 +-
+ drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 2 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c | 360 +++++++++
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h | 37 +
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c | 361 ---------
+ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h | 37 -
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 18 +-
+ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 28 +-
+ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 6 +-
+ drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h | 2 +-
+ drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 47 ++
+ drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 47 --
+ 24 files changed, 1560 insertions(+), 1561 deletions(-)
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+ create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h
+ create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+ delete mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 69bb5b0..4cfc20d 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -2054,7 +2054,7 @@ void dc_resource_state_construct(
+ const struct dc *dc,
+ struct dc_state *dst_ctx)
+ {
+- dst_ctx->dccg = dc->res_pool->dccg;
++ dst_ctx->dccg = dc->res_pool->clk_mgr;
+ }
+
+ enum dc_status dc_validate_global_state(
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+index f4ce7f5..6d7b64a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
+@@ -28,7 +28,7 @@
+
+ DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
+ dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
+-dce_dccg.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
++dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
+ dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
+
+ AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+new file mode 100644
+index 0000000..02ddc94
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+@@ -0,0 +1,876 @@
++/*
++ * Copyright 2012-16 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "dce_clk_mgr.h"
++
++#include "reg_helper.h"
++#include "dmcu.h"
++#include "core_types.h"
++#include "dal_asic_id.h"
++
++#define TO_DCE_CLK_MGR(clocks)\
++ container_of(clocks, struct dce_clk_mgr, base)
++
++#define REG(reg) \
++ (clk_mgr_dce->regs->reg)
++
++#undef FN
++#define FN(reg_name, field_name) \
++ clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
++
++#define CTX \
++ clk_mgr_dce->base.ctx
++#define DC_LOGGER \
++ clk_mgr->ctx->logger
++
++/* Max clock values for each state indexed by "enum clocks_state": */
++static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
++/* ClocksStateInvalid - should not be used */
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/* ClocksStateLow */
++{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
++/* ClocksStateNominal */
++{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
++/* ClocksStatePerformance */
++{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
++
++static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
++/*ClocksStateInvalid - should not be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
++{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
++/*ClocksStateLow*/
++{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
++/*ClocksStateNominal*/
++{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
++/*ClocksStatePerformance*/
++{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
++
++static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
++/*ClocksStateInvalid - should not be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
++{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
++/*ClocksStateLow*/
++{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
++/*ClocksStateNominal*/
++{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
++/*ClocksStatePerformance*/
++{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
++
++static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
++/*ClocksStateInvalid - should not be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
++{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
++/*ClocksStateLow*/
++{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
++/*ClocksStateNominal*/
++{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
++/*ClocksStatePerformance*/
++{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
++
++static int dentist_get_divider_from_did(int did)
++{
++ if (did < DENTIST_BASE_DID_1)
++ did = DENTIST_BASE_DID_1;
++ if (did > DENTIST_MAX_DID)
++ did = DENTIST_MAX_DID;
++
++ if (did < DENTIST_BASE_DID_2) {
++ return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
++ * (did - DENTIST_BASE_DID_1);
++ } else if (did < DENTIST_BASE_DID_3) {
++ return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
++ * (did - DENTIST_BASE_DID_2);
++ } else {
++ return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
++ * (did - DENTIST_BASE_DID_3);
++ }
++}
++
++/* SW will adjust DP REF Clock average value for all purposes
++ * (DP DTO / DP Audio DTO and DP GTC)
++ if clock is spread for all cases:
++ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
++ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
++ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
++ calculations (not planned to be used, but average clock should still
++ be valid)
++ -if SS enabled on DP Ref clock and HW de-spreading disabled
++ (should not be case with CIK) then SW should program all rates
++ generated according to average value (case as with previous ASICs)
++ */
++static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz)
++{
++ if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
++ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
++ dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
++ clk_mgr_dce->dprefclk_ss_divider), 200);
++ struct fixed31_32 adj_dp_ref_clk_khz;
++
++ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
++ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
++ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
++ }
++ return dp_ref_clk_khz;
++}
++
++static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ int dprefclk_wdivider;
++ int dprefclk_src_sel;
++ int dp_ref_clk_khz = 600000;
++ int target_div;
++
++ /* ASSERT DP Reference Clock source is from DFS*/
++ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
++ ASSERT(dprefclk_src_sel == 0);
++
++ /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
++ * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
++ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
++
++ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
++ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
++
++ /* Calculate the current DFS clock, in kHz.*/
++ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
++ * clk_mgr_dce->dentist_vco_freq_khz) / target_div;
++
++ return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz);
++}
++
++int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++
++ return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz);
++}
++
++/* unit: in_khz before mode set, get pixel clock from context. ASIC register
++ * may not be programmed yet
++ */
++static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
++{
++ uint32_t max_pix_clk = 0;
++ int i;
++
++ for (i = 0; i < MAX_PIPES; i++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++
++ if (pipe_ctx->stream == NULL)
++ continue;
++
++ /* do not check under lay */
++ if (pipe_ctx->top_pipe)
++ continue;
++
++ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
++ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
++
++ /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
++ * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
++ */
++ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
++ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
++ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
++ }
++
++ return max_pix_clk;
++}
++
++static enum dm_pp_clocks_state dce_get_required_clocks_state(
++ struct clk_mgr *clk_mgr,
++ struct dc_state *context)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ int i;
++ enum dm_pp_clocks_state low_req_clk;
++ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
++
++ /* Iterate from highest supported to lowest valid state, and update
++ * lowest RequiredState with the lowest state that satisfies
++ * all required clocks
++ */
++ for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
++ if (context->bw.dce.dispclk_khz >
++ clk_mgr_dce->max_clks_by_state[i].display_clk_khz
++ || max_pix_clk >
++ clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
++ break;
++
++ low_req_clk = i + 1;
++ if (low_req_clk > clk_mgr_dce->max_clks_state) {
++ /* set max clock state for high phyclock, invalid on exceeding display clock */
++ if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
++ < context->bw.dce.dispclk_khz)
++ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
++ else
++ low_req_clk = clk_mgr_dce->max_clks_state;
++ }
++
++ return low_req_clk;
++}
++
++static int dce_set_clock(
++ struct clk_mgr *clk_mgr,
++ int requested_clk_khz)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
++ struct dc_bios *bp = clk_mgr->ctx->dc_bios;
++ int actual_clock = requested_clk_khz;
++ struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
++
++ /* Make sure requested clock isn't lower than minimum threshold*/
++ if (requested_clk_khz > 0)
++ requested_clk_khz = max(requested_clk_khz,
++ clk_mgr_dce->dentist_vco_freq_khz / 64);
++
++ /* Prepare to program display clock*/
++ pxl_clk_params.target_pixel_clock = requested_clk_khz;
++ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
++
++ if (clk_mgr_dce->dfs_bypass_active)
++ pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
++
++ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
++
++ if (clk_mgr_dce->dfs_bypass_active) {
++ /* Cache the fixed display clock*/
++ clk_mgr_dce->dfs_bypass_disp_clk =
++ pxl_clk_params.dfs_bypass_display_clock;
++ actual_clock = pxl_clk_params.dfs_bypass_display_clock;
++ }
++
++ /* from power down, we need mark the clock state as ClocksStateNominal
++ * from HWReset, so when resume we will call pplib voltage regulator.*/
++ if (requested_clk_khz == 0)
++ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++
++ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
++
++ return actual_clock;
++}
++
++int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct bp_set_dce_clock_parameters dce_clk_params;
++ struct dc_bios *bp = clk_mgr->ctx->dc_bios;
++ struct dc *core_dc = clk_mgr->ctx->dc;
++ struct dmcu *dmcu = core_dc->res_pool->dmcu;
++ int actual_clock = requested_clk_khz;
++ /* Prepare to program display clock*/
++ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
++
++ /* Make sure requested clock isn't lower than minimum threshold*/
++ if (requested_clk_khz > 0)
++ requested_clk_khz = max(requested_clk_khz,
++ clk_mgr_dce->dentist_vco_freq_khz / 62);
++
++ dce_clk_params.target_clock_frequency = requested_clk_khz;
++ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
++ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
++
++ bp->funcs->set_dce_clock(bp, &dce_clk_params);
++ actual_clock = dce_clk_params.target_clock_frequency;
++
++ /* from power down, we need mark the clock state as ClocksStateNominal
++ * from HWReset, so when resume we will call pplib voltage regulator.*/
++ if (requested_clk_khz == 0)
++ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++
++ /*Program DP ref Clock*/
++ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
++ dce_clk_params.target_clock_frequency = 0;
++ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
++ if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev))
++ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
++ (dce_clk_params.pll_id ==
++ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
++ else
++ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
++
++ bp->funcs->set_dce_clock(bp, &dce_clk_params);
++
++ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
++ if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
++ dmcu->funcs->set_psr_wait_loop(dmcu,
++ actual_clock / 1000 / 7);
++ }
++
++ clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
++ return actual_clock;
++}
++
++static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce)
++{
++ struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
++ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
++ struct integrated_info info = { { { 0 } } };
++ struct dc_firmware_info fw_info = { { 0 } };
++ int i;
++
++ if (bp->integrated_info)
++ info = *bp->integrated_info;
++
++ clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
++ if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
++ bp->funcs->get_firmware_info(bp, &fw_info);
++ clk_mgr_dce->dentist_vco_freq_khz =
++ fw_info.smu_gpu_pll_output_freq;
++ if (clk_mgr_dce->dentist_vco_freq_khz == 0)
++ clk_mgr_dce->dentist_vco_freq_khz = 3600000;
++ }
++
++ /*update the maximum display clock for each power state*/
++ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
++ enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
++
++ switch (i) {
++ case 0:
++ clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
++ break;
++
++ case 1:
++ clk_state = DM_PP_CLOCKS_STATE_LOW;
++ break;
++
++ case 2:
++ clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
++ break;
++
++ case 3:
++ clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
++ break;
++
++ default:
++ clk_state = DM_PP_CLOCKS_STATE_INVALID;
++ break;
++ }
++
++ /*Do not allow bad VBIOS/SBIOS to override with invalid values,
++ * check for > 100MHz*/
++ if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
++ clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
++ info.disp_clk_voltage[i].max_supported_clk;
++ }
++
++ if (!debug->disable_dfs_bypass && bp->integrated_info)
++ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
++ clk_mgr_dce->dfs_bypass_enabled = true;
++}
++
++void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce)
++{
++ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
++ int ss_info_num = bp->funcs->get_ss_entry_number(
++ bp, AS_SIGNAL_TYPE_GPU_PLL);
++
++ if (ss_info_num) {
++ struct spread_spectrum_info info = { { 0 } };
++ enum bp_result result = bp->funcs->get_spread_spectrum_info(
++ bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
++
++ /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
++ * even if SS not enabled and in that case
++ * SSInfo.spreadSpectrumPercentage !=0 would be sign
++ * that SS is enabled
++ */
++ if (result == BP_RESULT_OK &&
++ info.spread_spectrum_percentage != 0) {
++ clk_mgr_dce->ss_on_dprefclk = true;
++ clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
++
++ if (info.type.CENTER_MODE == 0) {
++ /* TODO: Currently for DP Reference clock we
++ * need only SS percentage for
++ * downspread */
++ clk_mgr_dce->dprefclk_ss_percentage =
++ info.spread_spectrum_percentage;
++ }
++
++ return;
++ }
++
++ result = bp->funcs->get_spread_spectrum_info(
++ bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
++
++ /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
++ * even if SS not enabled and in that case
++ * SSInfo.spreadSpectrumPercentage !=0 would be sign
++ * that SS is enabled
++ */
++ if (result == BP_RESULT_OK &&
++ info.spread_spectrum_percentage != 0) {
++ clk_mgr_dce->ss_on_dprefclk = true;
++ clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
++
++ if (info.type.CENTER_MODE == 0) {
++ /* Currently for DP Reference clock we
++ * need only SS percentage for
++ * downspread */
++ clk_mgr_dce->dprefclk_ss_percentage =
++ info.spread_spectrum_percentage;
++ }
++ }
++ }
++}
++
++void dce110_fill_display_configs(
++ const struct dc_state *context,
++ struct dm_pp_display_configuration *pp_display_cfg)
++{
++ int j;
++ int num_cfgs = 0;
++
++ for (j = 0; j < context->stream_count; j++) {
++ int k;
++
++ const struct dc_stream_state *stream = context->streams[j];
++ struct dm_pp_single_disp_config *cfg =
++ &pp_display_cfg->disp_configs[num_cfgs];
++ const struct pipe_ctx *pipe_ctx = NULL;
++
++ for (k = 0; k < MAX_PIPES; k++)
++ if (stream == context->res_ctx.pipe_ctx[k].stream) {
++ pipe_ctx = &context->res_ctx.pipe_ctx[k];
++ break;
++ }
++
++ ASSERT(pipe_ctx != NULL);
++
++ /* only notify active stream */
++ if (stream->dpms_off)
++ continue;
++
++ num_cfgs++;
++ cfg->signal = pipe_ctx->stream->signal;
++ cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
++ cfg->src_height = stream->src.height;
++ cfg->src_width = stream->src.width;
++ cfg->ddi_channel_mapping =
++ stream->sink->link->ddi_channel_mapping.raw;
++ cfg->transmitter =
++ stream->sink->link->link_enc->transmitter;
++ cfg->link_settings.lane_count =
++ stream->sink->link->cur_link_settings.lane_count;
++ cfg->link_settings.link_rate =
++ stream->sink->link->cur_link_settings.link_rate;
++ cfg->link_settings.link_spread =
++ stream->sink->link->cur_link_settings.link_spread;
++ cfg->sym_clock = stream->phy_pix_clk;
++ /* Round v_refresh*/
++ cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
++ cfg->v_refresh /= stream->timing.h_total;
++ cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
++ / stream->timing.v_total;
++ }
++
++ pp_display_cfg->display_count = num_cfgs;
++}
++
++static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
++{
++ uint8_t j;
++ uint32_t min_vertical_blank_time = -1;
++
++ for (j = 0; j < context->stream_count; j++) {
++ struct dc_stream_state *stream = context->streams[j];
++ uint32_t vertical_blank_in_pixels = 0;
++ uint32_t vertical_blank_time = 0;
++
++ vertical_blank_in_pixels = stream->timing.h_total *
++ (stream->timing.v_total
++ - stream->timing.v_addressable);
++
++ vertical_blank_time = vertical_blank_in_pixels
++ * 1000 / stream->timing.pix_clk_khz;
++
++ if (min_vertical_blank_time > vertical_blank_time)
++ min_vertical_blank_time = vertical_blank_time;
++ }
++
++ return min_vertical_blank_time;
++}
++
++static int determine_sclk_from_bounding_box(
++ const struct dc *dc,
++ int required_sclk)
++{
++ int i;
++
++ /*
++ * Some asics do not give us sclk levels, so we just report the actual
++ * required sclk
++ */
++ if (dc->sclk_lvls.num_levels == 0)
++ return required_sclk;
++
++ for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
++ if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
++ return dc->sclk_lvls.clocks_in_khz[i];
++ }
++ /*
++ * even maximum level could not satisfy requirement, this
++ * is unexpected at this stage, should have been caught at
++ * validation time
++ */
++ ASSERT(0);
++ return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
++}
++
++static void dce_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
++
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static void dce11_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->all_displays_in_sync =
++ context->bw.dce.all_displays_in_sync;
++ pp_display_cfg->nb_pstate_switch_disable =
++ context->bw.dce.nbp_state_change_enable == false;
++ pp_display_cfg->cpu_cc6_disable =
++ context->bw.dce.cpuc_state_change_enable == false;
++ pp_display_cfg->cpu_pstate_disable =
++ context->bw.dce.cpup_state_change_enable == false;
++ pp_display_cfg->cpu_pstate_separation_time =
++ context->bw.dce.blackout_recovery_time_us;
++
++ pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
++ / MEMORY_TYPE_MULTIPLIER_CZ;
++
++ pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
++ dc,
++ context->bw.dce.sclk_khz);
++
++ pp_display_cfg->min_engine_clock_deep_sleep_khz
++ = context->bw.dce.sclk_deep_sleep_khz;
++
++ pp_display_cfg->avail_mclk_switch_time_us =
++ dce110_get_min_vblank_time_us(context);
++ /* TODO: dce11.2*/
++ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
++
++ pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
++
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ /* TODO: is this still applicable?*/
++ if (pp_display_cfg->display_count == 1) {
++ const struct dc_crtc_timing *timing =
++ &context->streams[0]->timing;
++
++ pp_display_cfg->crtc_index =
++ pp_display_cfg->disp_configs[0].pipe_idx;
++ pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
++ }
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static void dce_update_clocks(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct dm_pp_power_level_change_request level_change_req;
++ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
++
++ /*TODO: W/A for dal3 linux, investigate why this works */
++ if (!clk_mgr_dce->dfs_bypass_active)
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++
++ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
++ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
++ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
++
++ context->bw.dce.dispclk_khz = unpatched_disp_clk;
++}
++
++static void dce11_update_clocks(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct dm_pp_power_level_change_request level_change_req;
++
++ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
++ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
++ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
++}
++
++static void dce112_update_clocks(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct dm_pp_power_level_change_request level_change_req;
++
++ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
++ /* get max clock state from PPLIB */
++ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
++ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
++ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
++ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
++ }
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
++ context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
++ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++ }
++ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
++}
++
++static void dce12_update_clocks(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
++ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
++ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
++ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
++
++ /*TODO: W/A for dal3 linux, investigate why this works */
++ if (!clk_mgr_dce->dfs_bypass_active)
++ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
++
++ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
++ clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
++ context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
++ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
++ }
++
++ if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) {
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
++ clock_voltage_req.clocks_in_khz = max_pix_clk;
++ clk_mgr->clks.phyclk_khz = max_pix_clk;
++
++ dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
++ }
++ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
++
++ context->bw.dce.dispclk_khz = unpatched_disp_clk;
++}
++
++static const struct clk_mgr_funcs dce120_funcs = {
++ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
++ .update_clocks = dce12_update_clocks
++};
++
++static const struct clk_mgr_funcs dce112_funcs = {
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
++ .update_clocks = dce112_update_clocks
++};
++
++static const struct clk_mgr_funcs dce110_funcs = {
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
++ .update_clocks = dce11_update_clocks,
++};
++
++static const struct clk_mgr_funcs dce_funcs = {
++ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
++ .update_clocks = dce_update_clocks
++};
++
++static void dce_clk_mgr_construct(
++ struct dce_clk_mgr *clk_mgr_dce,
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask)
++{
++ struct clk_mgr *base = &clk_mgr_dce->base;
++ struct dm_pp_static_clock_info static_clk_info = {0};
++
++ base->ctx = ctx;
++ base->funcs = &dce_funcs;
++
++ clk_mgr_dce->regs = regs;
++ clk_mgr_dce->clk_mgr_shift = clk_shift;
++ clk_mgr_dce->clk_mgr_mask = clk_mask;
++
++ clk_mgr_dce->dfs_bypass_disp_clk = 0;
++
++ clk_mgr_dce->dprefclk_ss_percentage = 0;
++ clk_mgr_dce->dprefclk_ss_divider = 1000;
++ clk_mgr_dce->ss_on_dprefclk = false;
++
++
++ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
++ clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state;
++ else
++ clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
++ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
++
++ dce_clock_read_integrated_info(clk_mgr_dce);
++ dce_clock_read_ss_info(clk_mgr_dce);
++}
++
++struct clk_mgr *dce_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask)
++{
++ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
++
++ if (clk_mgr_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(clk_mgr_dce->max_clks_by_state,
++ dce80_max_clks_by_state,
++ sizeof(dce80_max_clks_by_state));
++
++ dce_clk_mgr_construct(
++ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
++
++ return &clk_mgr_dce->base;
++}
++
++struct clk_mgr *dce110_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask)
++{
++ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
++
++ if (clk_mgr_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(clk_mgr_dce->max_clks_by_state,
++ dce110_max_clks_by_state,
++ sizeof(dce110_max_clks_by_state));
++
++ dce_clk_mgr_construct(
++ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
++
++ clk_mgr_dce->base.funcs = &dce110_funcs;
++
++ return &clk_mgr_dce->base;
++}
++
++struct clk_mgr *dce112_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask)
++{
++ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
++
++ if (clk_mgr_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(clk_mgr_dce->max_clks_by_state,
++ dce112_max_clks_by_state,
++ sizeof(dce112_max_clks_by_state));
++
++ dce_clk_mgr_construct(
++ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
++
++ clk_mgr_dce->base.funcs = &dce112_funcs;
++
++ return &clk_mgr_dce->base;
++}
++
++struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx)
++{
++ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
++
++ if (clk_mgr_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ memcpy(clk_mgr_dce->max_clks_by_state,
++ dce120_max_clks_by_state,
++ sizeof(dce120_max_clks_by_state));
++
++ dce_clk_mgr_construct(
++ clk_mgr_dce, ctx, NULL, NULL, NULL);
++
++ clk_mgr_dce->dprefclk_khz = 600000;
++ clk_mgr_dce->base.funcs = &dce120_funcs;
++
++ return &clk_mgr_dce->base;
++}
++
++void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr)
++{
++ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr);
++
++ kfree(clk_mgr_dce);
++ *clk_mgr = NULL;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+new file mode 100644
+index 0000000..2668d56
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
+@@ -0,0 +1,165 @@
++/*
++ * Copyright 2012-16 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++
++#ifndef _DCE_CLK_MGR_H_
++#define _DCE_CLK_MGR_H_
++
++#include "../inc/hw/clk_mgr.h"
++
++#define MEMORY_TYPE_MULTIPLIER_CZ 4
++
++#define CLK_COMMON_REG_LIST_DCE_BASE() \
++ .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
++ .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
++
++#define CLK_COMMON_REG_LIST_DCN_BASE() \
++ SR(DENTIST_DISPCLK_CNTL)
++
++#define CLK_SF(reg_name, field_name, post_fix)\
++ .field_name = reg_name ## __ ## field_name ## post_fix
++
++#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
++ CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
++
++#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
++ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
++
++#define CLK_REG_FIELD_LIST(type) \
++ type DPREFCLK_SRC_SEL; \
++ type DENTIST_DPREFCLK_WDIVIDER; \
++ type DENTIST_DISPCLK_WDIVIDER; \
++ type DENTIST_DISPCLK_CHG_DONE;
++
++struct clk_mgr_shift {
++ CLK_REG_FIELD_LIST(uint8_t)
++};
++
++struct clk_mgr_mask {
++ CLK_REG_FIELD_LIST(uint32_t)
++};
++
++struct clk_mgr_registers {
++ uint32_t DPREFCLK_CNTL;
++ uint32_t DENTIST_DISPCLK_CNTL;
++};
++
++struct state_dependent_clocks {
++ int display_clk_khz;
++ int pixel_clk_khz;
++};
++
++struct dce_clk_mgr {
++ struct clk_mgr base;
++ const struct clk_mgr_registers *regs;
++ const struct clk_mgr_shift *clk_mgr_shift;
++ const struct clk_mgr_mask *clk_mgr_mask;
++
++ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
++
++ int dentist_vco_freq_khz;
++
++ /* Cache the status of DFS-bypass feature*/
++ bool dfs_bypass_enabled;
++ /* True if the DFS-bypass feature is enabled and active. */
++ bool dfs_bypass_active;
++ /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
++ * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
++ int dfs_bypass_disp_clk;
++
++ /* Flag for Enabled SS on DPREFCLK */
++ bool ss_on_dprefclk;
++ /* DPREFCLK SS percentage (if down-spread enabled) */
++ int dprefclk_ss_percentage;
++ /* DPREFCLK SS percentage Divider (100 or 1000) */
++ int dprefclk_ss_divider;
++ int dprefclk_khz;
++
++ enum dm_pp_clocks_state max_clks_state;
++ enum dm_pp_clocks_state cur_min_clks_state;
++};
++
++/* Starting DID for each range */
++enum dentist_base_divider_id {
++ DENTIST_BASE_DID_1 = 0x08,
++ DENTIST_BASE_DID_2 = 0x40,
++ DENTIST_BASE_DID_3 = 0x60,
++ DENTIST_BASE_DID_4 = 0x7e,
++ DENTIST_MAX_DID = 0x7f
++};
++
++/* Starting point and step size for each divider range.*/
++enum dentist_divider_range {
++ DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
++ DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
++ DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
++ DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
++ DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
++ DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
++ DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
++ DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
++ DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
++};
++
++static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
++{
++ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
++}
++
++void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce);
++
++int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
++
++void dce110_fill_display_configs(
++ const struct dc_state *context,
++ struct dm_pp_display_configuration *pp_display_cfg);
++
++int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz);
++
++struct clk_mgr *dce_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask);
++
++struct clk_mgr *dce110_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask);
++
++struct clk_mgr *dce112_clk_mgr_create(
++ struct dc_context *ctx,
++ const struct clk_mgr_registers *regs,
++ const struct clk_mgr_shift *clk_shift,
++ const struct clk_mgr_mask *clk_mask);
++
++struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
++
++void dce_clk_mgr_destroy(struct clk_mgr **dccg);
++
++#endif /* _DCE_CLK_MGR_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c
+deleted file mode 100644
+index 97c143b..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.c
++++ /dev/null
+@@ -1,876 +0,0 @@
+-/*
+- * Copyright 2012-16 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#include "dce_dccg.h"
+-
+-#include "reg_helper.h"
+-#include "dmcu.h"
+-#include "core_types.h"
+-#include "dal_asic_id.h"
+-
+-#define TO_DCE_DCCG(clocks)\
+- container_of(clocks, struct dce_dccg, base)
+-
+-#define REG(reg) \
+- (dccg_dce->regs->reg)
+-
+-#undef FN
+-#define FN(reg_name, field_name) \
+- dccg_dce->dccg_shift->field_name, dccg_dce->dccg_mask->field_name
+-
+-#define CTX \
+- dccg_dce->base.ctx
+-#define DC_LOGGER \
+- dccg->ctx->logger
+-
+-/* Max clock values for each state indexed by "enum clocks_state": */
+-static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+-/* ClocksStateInvalid - should not be used */
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/* ClocksStateLow */
+-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+-/* ClocksStateNominal */
+-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+-/* ClocksStatePerformance */
+-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+-
+-static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+-/*ClocksStateInvalid - should not be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+-/*ClocksStateLow*/
+-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+-/*ClocksStateNominal*/
+-{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+-/*ClocksStatePerformance*/
+-{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+-
+-static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+-/*ClocksStateInvalid - should not be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+-{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+-/*ClocksStateLow*/
+-{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+-/*ClocksStateNominal*/
+-{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+-/*ClocksStatePerformance*/
+-{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+-
+-static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+-/*ClocksStateInvalid - should not be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+-/*ClocksStateLow*/
+-{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+-/*ClocksStateNominal*/
+-{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+-/*ClocksStatePerformance*/
+-{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+-
+-static int dentist_get_divider_from_did(int did)
+-{
+- if (did < DENTIST_BASE_DID_1)
+- did = DENTIST_BASE_DID_1;
+- if (did > DENTIST_MAX_DID)
+- did = DENTIST_MAX_DID;
+-
+- if (did < DENTIST_BASE_DID_2) {
+- return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+- * (did - DENTIST_BASE_DID_1);
+- } else if (did < DENTIST_BASE_DID_3) {
+- return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+- * (did - DENTIST_BASE_DID_2);
+- } else {
+- return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+- * (did - DENTIST_BASE_DID_3);
+- }
+-}
+-
+-/* SW will adjust DP REF Clock average value for all purposes
+- * (DP DTO / DP Audio DTO and DP GTC)
+- if clock is spread for all cases:
+- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+- calculations (not planned to be used, but average clock should still
+- be valid)
+- -if SS enabled on DP Ref clock and HW de-spreading disabled
+- (should not be case with CIK) then SW should program all rates
+- generated according to average value (case as with previous ASICs)
+- */
+-static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *dccg_dce, int dp_ref_clk_khz)
+-{
+- if (dccg_dce->ss_on_dprefclk && dccg_dce->dprefclk_ss_divider != 0) {
+- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+- dc_fixpt_from_fraction(dccg_dce->dprefclk_ss_percentage,
+- dccg_dce->dprefclk_ss_divider), 200);
+- struct fixed31_32 adj_dp_ref_clk_khz;
+-
+- ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+- adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+- }
+- return dp_ref_clk_khz;
+-}
+-
+-static int dce_get_dp_ref_freq_khz(struct dccg *dccg)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- int dprefclk_wdivider;
+- int dprefclk_src_sel;
+- int dp_ref_clk_khz = 600000;
+- int target_div;
+-
+- /* ASSERT DP Reference Clock source is from DFS*/
+- REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+- ASSERT(dprefclk_src_sel == 0);
+-
+- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+-
+- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+- target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+-
+- /* Calculate the current DFS clock, in kHz.*/
+- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+- * dccg_dce->dentist_vco_freq_khz) / target_div;
+-
+- return dccg_adjust_dp_ref_freq_for_ss(dccg_dce, dp_ref_clk_khz);
+-}
+-
+-int dce12_get_dp_ref_freq_khz(struct dccg *dccg)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+-
+- return dccg_adjust_dp_ref_freq_for_ss(dccg_dce, dccg_dce->dprefclk_khz);
+-}
+-
+-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+- * may not be programmed yet
+- */
+-static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
+-{
+- uint32_t max_pix_clk = 0;
+- int i;
+-
+- for (i = 0; i < MAX_PIPES; i++) {
+- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+-
+- if (pipe_ctx->stream == NULL)
+- continue;
+-
+- /* do not check under lay */
+- if (pipe_ctx->top_pipe)
+- continue;
+-
+- if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+- max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+-
+- /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
+- * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+- */
+- if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+- pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
+- max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
+- }
+-
+- return max_pix_clk;
+-}
+-
+-static enum dm_pp_clocks_state dce_get_required_clocks_state(
+- struct dccg *dccg,
+- struct dc_state *context)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- int i;
+- enum dm_pp_clocks_state low_req_clk;
+- int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+-
+- /* Iterate from highest supported to lowest valid state, and update
+- * lowest RequiredState with the lowest state that satisfies
+- * all required clocks
+- */
+- for (i = dccg_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+- if (context->bw.dce.dispclk_khz >
+- dccg_dce->max_clks_by_state[i].display_clk_khz
+- || max_pix_clk >
+- dccg_dce->max_clks_by_state[i].pixel_clk_khz)
+- break;
+-
+- low_req_clk = i + 1;
+- if (low_req_clk > dccg_dce->max_clks_state) {
+- /* set max clock state for high phyclock, invalid on exceeding display clock */
+- if (dccg_dce->max_clks_by_state[dccg_dce->max_clks_state].display_clk_khz
+- < context->bw.dce.dispclk_khz)
+- low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+- else
+- low_req_clk = dccg_dce->max_clks_state;
+- }
+-
+- return low_req_clk;
+-}
+-
+-static int dce_set_clock(
+- struct dccg *dccg,
+- int requested_clk_khz)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+- struct dc_bios *bp = dccg->ctx->dc_bios;
+- int actual_clock = requested_clk_khz;
+- struct dmcu *dmcu = dccg_dce->base.ctx->dc->res_pool->dmcu;
+-
+- /* Make sure requested clock isn't lower than minimum threshold*/
+- if (requested_clk_khz > 0)
+- requested_clk_khz = max(requested_clk_khz,
+- dccg_dce->dentist_vco_freq_khz / 64);
+-
+- /* Prepare to program display clock*/
+- pxl_clk_params.target_pixel_clock = requested_clk_khz;
+- pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+-
+- if (dccg_dce->dfs_bypass_active)
+- pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+-
+- bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+-
+- if (dccg_dce->dfs_bypass_active) {
+- /* Cache the fixed display clock*/
+- dccg_dce->dfs_bypass_disp_clk =
+- pxl_clk_params.dfs_bypass_display_clock;
+- actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+- }
+-
+- /* from power down, we need mark the clock state as ClocksStateNominal
+- * from HWReset, so when resume we will call pplib voltage regulator.*/
+- if (requested_clk_khz == 0)
+- dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+-
+- dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+-
+- return actual_clock;
+-}
+-
+-int dce112_set_clock(struct dccg *dccg, int requested_clk_khz)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct bp_set_dce_clock_parameters dce_clk_params;
+- struct dc_bios *bp = dccg->ctx->dc_bios;
+- struct dc *core_dc = dccg->ctx->dc;
+- struct dmcu *dmcu = core_dc->res_pool->dmcu;
+- int actual_clock = requested_clk_khz;
+- /* Prepare to program display clock*/
+- memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+-
+- /* Make sure requested clock isn't lower than minimum threshold*/
+- if (requested_clk_khz > 0)
+- requested_clk_khz = max(requested_clk_khz,
+- dccg_dce->dentist_vco_freq_khz / 62);
+-
+- dce_clk_params.target_clock_frequency = requested_clk_khz;
+- dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+- dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+-
+- bp->funcs->set_dce_clock(bp, &dce_clk_params);
+- actual_clock = dce_clk_params.target_clock_frequency;
+-
+- /* from power down, we need mark the clock state as ClocksStateNominal
+- * from HWReset, so when resume we will call pplib voltage regulator.*/
+- if (requested_clk_khz == 0)
+- dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+-
+- /*Program DP ref Clock*/
+- /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+- dce_clk_params.target_clock_frequency = 0;
+- dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+- if (!ASICREV_IS_VEGA20_P(dccg->ctx->asic_id.hw_internal_rev))
+- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+- (dce_clk_params.pll_id ==
+- CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+- else
+- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+-
+- bp->funcs->set_dce_clock(bp, &dce_clk_params);
+-
+- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+- if (dccg_dce->dfs_bypass_disp_clk != actual_clock)
+- dmcu->funcs->set_psr_wait_loop(dmcu,
+- actual_clock / 1000 / 7);
+- }
+-
+- dccg_dce->dfs_bypass_disp_clk = actual_clock;
+- return actual_clock;
+-}
+-
+-static void dce_clock_read_integrated_info(struct dce_dccg *dccg_dce)
+-{
+- struct dc_debug_options *debug = &dccg_dce->base.ctx->dc->debug;
+- struct dc_bios *bp = dccg_dce->base.ctx->dc_bios;
+- struct integrated_info info = { { { 0 } } };
+- struct dc_firmware_info fw_info = { { 0 } };
+- int i;
+-
+- if (bp->integrated_info)
+- info = *bp->integrated_info;
+-
+- dccg_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0) {
+- bp->funcs->get_firmware_info(bp, &fw_info);
+- dccg_dce->dentist_vco_freq_khz =
+- fw_info.smu_gpu_pll_output_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0)
+- dccg_dce->dentist_vco_freq_khz = 3600000;
+- }
+-
+- /*update the maximum display clock for each power state*/
+- for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+- enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+-
+- switch (i) {
+- case 0:
+- clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+- break;
+-
+- case 1:
+- clk_state = DM_PP_CLOCKS_STATE_LOW;
+- break;
+-
+- case 2:
+- clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+- break;
+-
+- case 3:
+- clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+- break;
+-
+- default:
+- clk_state = DM_PP_CLOCKS_STATE_INVALID;
+- break;
+- }
+-
+- /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+- * check for > 100MHz*/
+- if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+- dccg_dce->max_clks_by_state[clk_state].display_clk_khz =
+- info.disp_clk_voltage[i].max_supported_clk;
+- }
+-
+- if (!debug->disable_dfs_bypass && bp->integrated_info)
+- if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+- dccg_dce->dfs_bypass_enabled = true;
+-}
+-
+-void dce_clock_read_ss_info(struct dce_dccg *dccg_dce)
+-{
+- struct dc_bios *bp = dccg_dce->base.ctx->dc_bios;
+- int ss_info_num = bp->funcs->get_ss_entry_number(
+- bp, AS_SIGNAL_TYPE_GPU_PLL);
+-
+- if (ss_info_num) {
+- struct spread_spectrum_info info = { { 0 } };
+- enum bp_result result = bp->funcs->get_spread_spectrum_info(
+- bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+-
+- /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+- * even if SS not enabled and in that case
+- * SSInfo.spreadSpectrumPercentage !=0 would be sign
+- * that SS is enabled
+- */
+- if (result == BP_RESULT_OK &&
+- info.spread_spectrum_percentage != 0) {
+- dccg_dce->ss_on_dprefclk = true;
+- dccg_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+-
+- if (info.type.CENTER_MODE == 0) {
+- /* TODO: Currently for DP Reference clock we
+- * need only SS percentage for
+- * downspread */
+- dccg_dce->dprefclk_ss_percentage =
+- info.spread_spectrum_percentage;
+- }
+-
+- return;
+- }
+-
+- result = bp->funcs->get_spread_spectrum_info(
+- bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+-
+- /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+- * even if SS not enabled and in that case
+- * SSInfo.spreadSpectrumPercentage !=0 would be sign
+- * that SS is enabled
+- */
+- if (result == BP_RESULT_OK &&
+- info.spread_spectrum_percentage != 0) {
+- dccg_dce->ss_on_dprefclk = true;
+- dccg_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+-
+- if (info.type.CENTER_MODE == 0) {
+- /* Currently for DP Reference clock we
+- * need only SS percentage for
+- * downspread */
+- dccg_dce->dprefclk_ss_percentage =
+- info.spread_spectrum_percentage;
+- }
+- }
+- }
+-}
+-
+-void dce110_fill_display_configs(
+- const struct dc_state *context,
+- struct dm_pp_display_configuration *pp_display_cfg)
+-{
+- int j;
+- int num_cfgs = 0;
+-
+- for (j = 0; j < context->stream_count; j++) {
+- int k;
+-
+- const struct dc_stream_state *stream = context->streams[j];
+- struct dm_pp_single_disp_config *cfg =
+- &pp_display_cfg->disp_configs[num_cfgs];
+- const struct pipe_ctx *pipe_ctx = NULL;
+-
+- for (k = 0; k < MAX_PIPES; k++)
+- if (stream == context->res_ctx.pipe_ctx[k].stream) {
+- pipe_ctx = &context->res_ctx.pipe_ctx[k];
+- break;
+- }
+-
+- ASSERT(pipe_ctx != NULL);
+-
+- /* only notify active stream */
+- if (stream->dpms_off)
+- continue;
+-
+- num_cfgs++;
+- cfg->signal = pipe_ctx->stream->signal;
+- cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
+- cfg->src_height = stream->src.height;
+- cfg->src_width = stream->src.width;
+- cfg->ddi_channel_mapping =
+- stream->sink->link->ddi_channel_mapping.raw;
+- cfg->transmitter =
+- stream->sink->link->link_enc->transmitter;
+- cfg->link_settings.lane_count =
+- stream->sink->link->cur_link_settings.lane_count;
+- cfg->link_settings.link_rate =
+- stream->sink->link->cur_link_settings.link_rate;
+- cfg->link_settings.link_spread =
+- stream->sink->link->cur_link_settings.link_spread;
+- cfg->sym_clock = stream->phy_pix_clk;
+- /* Round v_refresh*/
+- cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+- cfg->v_refresh /= stream->timing.h_total;
+- cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+- / stream->timing.v_total;
+- }
+-
+- pp_display_cfg->display_count = num_cfgs;
+-}
+-
+-static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+-{
+- uint8_t j;
+- uint32_t min_vertical_blank_time = -1;
+-
+- for (j = 0; j < context->stream_count; j++) {
+- struct dc_stream_state *stream = context->streams[j];
+- uint32_t vertical_blank_in_pixels = 0;
+- uint32_t vertical_blank_time = 0;
+-
+- vertical_blank_in_pixels = stream->timing.h_total *
+- (stream->timing.v_total
+- - stream->timing.v_addressable);
+-
+- vertical_blank_time = vertical_blank_in_pixels
+- * 1000 / stream->timing.pix_clk_khz;
+-
+- if (min_vertical_blank_time > vertical_blank_time)
+- min_vertical_blank_time = vertical_blank_time;
+- }
+-
+- return min_vertical_blank_time;
+-}
+-
+-static int determine_sclk_from_bounding_box(
+- const struct dc *dc,
+- int required_sclk)
+-{
+- int i;
+-
+- /*
+- * Some asics do not give us sclk levels, so we just report the actual
+- * required sclk
+- */
+- if (dc->sclk_lvls.num_levels == 0)
+- return required_sclk;
+-
+- for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+- if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+- return dc->sclk_lvls.clocks_in_khz[i];
+- }
+- /*
+- * even maximum level could not satisfy requirement, this
+- * is unexpected at this stage, should have been caught at
+- * validation time
+- */
+- ASSERT(0);
+- return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+-}
+-
+-static void dce_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+-
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-}
+-
+-static void dce11_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->all_displays_in_sync =
+- context->bw.dce.all_displays_in_sync;
+- pp_display_cfg->nb_pstate_switch_disable =
+- context->bw.dce.nbp_state_change_enable == false;
+- pp_display_cfg->cpu_cc6_disable =
+- context->bw.dce.cpuc_state_change_enable == false;
+- pp_display_cfg->cpu_pstate_disable =
+- context->bw.dce.cpup_state_change_enable == false;
+- pp_display_cfg->cpu_pstate_separation_time =
+- context->bw.dce.blackout_recovery_time_us;
+-
+- pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+- / MEMORY_TYPE_MULTIPLIER_CZ;
+-
+- pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+- dc,
+- context->bw.dce.sclk_khz);
+-
+- pp_display_cfg->min_engine_clock_deep_sleep_khz
+- = context->bw.dce.sclk_deep_sleep_khz;
+-
+- pp_display_cfg->avail_mclk_switch_time_us =
+- dce110_get_min_vblank_time_us(context);
+- /* TODO: dce11.2*/
+- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+-
+- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+-
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- /* TODO: is this still applicable?*/
+- if (pp_display_cfg->display_count == 1) {
+- const struct dc_crtc_timing *timing =
+- &context->streams[0]->timing;
+-
+- pp_display_cfg->crtc_index =
+- pp_display_cfg->disp_configs[0].pipe_idx;
+- pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
+- }
+-
+- if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-}
+-
+-static void dce_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_power_level_change_request level_change_req;
+- int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+-
+- /*TODO: W/A for dal3 linux, investigate why this works */
+- if (!dccg_dce->dfs_bypass_active)
+- context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+-
+- level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+- /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg_dce->cur_min_clks_state = level_change_req.power_level;
+- }
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- context->bw.dce.dispclk_khz = dce_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
+- dce_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-
+- context->bw.dce.dispclk_khz = unpatched_disp_clk;
+-}
+-
+-static void dce11_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_power_level_change_request level_change_req;
+-
+- level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+- /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg_dce->cur_min_clks_state = level_change_req.power_level;
+- }
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- context->bw.dce.dispclk_khz = dce_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
+- dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-}
+-
+-static void dce112_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_power_level_change_request level_change_req;
+-
+- level_change_req.power_level = dce_get_required_clocks_state(dccg, context);
+- /* get max clock state from PPLIB */
+- if ((level_change_req.power_level < dccg_dce->cur_min_clks_state && safe_to_lower)
+- || level_change_req.power_level > dccg_dce->cur_min_clks_state) {
+- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
+- dccg_dce->cur_min_clks_state = level_change_req.power_level;
+- }
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- context->bw.dce.dispclk_khz = dce112_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+- }
+- dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-}
+-
+-static void dce12_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(dccg);
+- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+- int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+- int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+-
+- /*TODO: W/A for dal3 linux, investigate why this works */
+- if (!dccg_dce->dfs_bypass_active)
+- context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+-
+- if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, dccg->clks.dispclk_khz)) {
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+- clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
+- context->bw.dce.dispclk_khz = dce112_set_clock(dccg, context->bw.dce.dispclk_khz);
+- dccg->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+-
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- }
+-
+- if (should_set_clock(safe_to_lower, max_pix_clk, dccg->clks.phyclk_khz)) {
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+- clock_voltage_req.clocks_in_khz = max_pix_clk;
+- dccg->clks.phyclk_khz = max_pix_clk;
+-
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- }
+- dce11_pplib_apply_display_requirements(dccg->ctx->dc, context);
+-
+- context->bw.dce.dispclk_khz = unpatched_disp_clk;
+-}
+-
+-static const struct dccg_funcs dce120_funcs = {
+- .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+- .update_clocks = dce12_update_clocks
+-};
+-
+-static const struct dccg_funcs dce112_funcs = {
+- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .update_clocks = dce112_update_clocks
+-};
+-
+-static const struct dccg_funcs dce110_funcs = {
+- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .update_clocks = dce11_update_clocks,
+-};
+-
+-static const struct dccg_funcs dce_funcs = {
+- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+- .update_clocks = dce_update_clocks
+-};
+-
+-static void dce_dccg_construct(
+- struct dce_dccg *dccg_dce,
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dccg *base = &dccg_dce->base;
+- struct dm_pp_static_clock_info static_clk_info = {0};
+-
+- base->ctx = ctx;
+- base->funcs = &dce_funcs;
+-
+- dccg_dce->regs = regs;
+- dccg_dce->dccg_shift = clk_shift;
+- dccg_dce->dccg_mask = clk_mask;
+-
+- dccg_dce->dfs_bypass_disp_clk = 0;
+-
+- dccg_dce->dprefclk_ss_percentage = 0;
+- dccg_dce->dprefclk_ss_divider = 1000;
+- dccg_dce->ss_on_dprefclk = false;
+-
+-
+- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+- dccg_dce->max_clks_state = static_clk_info.max_clocks_state;
+- else
+- dccg_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+- dccg_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+-
+- dce_clock_read_integrated_info(dccg_dce);
+- dce_clock_read_ss_info(dccg_dce);
+-}
+-
+-struct dccg *dce_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce80_max_clks_by_state,
+- sizeof(dce80_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, regs, clk_shift, clk_mask);
+-
+- return &dccg_dce->base;
+-}
+-
+-struct dccg *dce110_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce110_max_clks_by_state,
+- sizeof(dce110_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, regs, clk_shift, clk_mask);
+-
+- dccg_dce->base.funcs = &dce110_funcs;
+-
+- return &dccg_dce->base;
+-}
+-
+-struct dccg *dce112_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce112_max_clks_by_state,
+- sizeof(dce112_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, regs, clk_shift, clk_mask);
+-
+- dccg_dce->base.funcs = &dce112_funcs;
+-
+- return &dccg_dce->base;
+-}
+-
+-struct dccg *dce120_dccg_create(struct dc_context *ctx)
+-{
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- memcpy(dccg_dce->max_clks_by_state,
+- dce120_max_clks_by_state,
+- sizeof(dce120_max_clks_by_state));
+-
+- dce_dccg_construct(
+- dccg_dce, ctx, NULL, NULL, NULL);
+-
+- dccg_dce->dprefclk_khz = 600000;
+- dccg_dce->base.funcs = &dce120_funcs;
+-
+- return &dccg_dce->base;
+-}
+-
+-void dce_dccg_destroy(struct dccg **dccg)
+-{
+- struct dce_dccg *dccg_dce = TO_DCE_DCCG(*dccg);
+-
+- kfree(dccg_dce);
+- *dccg = NULL;
+-}
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h
+deleted file mode 100644
+index 786d963..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dccg.h
++++ /dev/null
+@@ -1,165 +0,0 @@
+-/*
+- * Copyright 2012-16 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-
+-#ifndef _DCE_DCCG_H_
+-#define _DCE_DCCG_H_
+-
+-#include "dccg.h"
+-
+-#define MEMORY_TYPE_MULTIPLIER_CZ 4
+-
+-#define CLK_COMMON_REG_LIST_DCE_BASE() \
+- .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
+- .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+-
+-#define CLK_COMMON_REG_LIST_DCN_BASE() \
+- SR(DENTIST_DISPCLK_CNTL)
+-
+-#define CLK_SF(reg_name, field_name, post_fix)\
+- .field_name = reg_name ## __ ## field_name ## post_fix
+-
+-#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+- CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+-
+-#define CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh) \
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, mask_sh),\
+- CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+-
+-#define CLK_REG_FIELD_LIST(type) \
+- type DPREFCLK_SRC_SEL; \
+- type DENTIST_DPREFCLK_WDIVIDER; \
+- type DENTIST_DISPCLK_WDIVIDER; \
+- type DENTIST_DISPCLK_CHG_DONE;
+-
+-struct dccg_shift {
+- CLK_REG_FIELD_LIST(uint8_t)
+-};
+-
+-struct dccg_mask {
+- CLK_REG_FIELD_LIST(uint32_t)
+-};
+-
+-struct dccg_registers {
+- uint32_t DPREFCLK_CNTL;
+- uint32_t DENTIST_DISPCLK_CNTL;
+-};
+-
+-struct state_dependent_clocks {
+- int display_clk_khz;
+- int pixel_clk_khz;
+-};
+-
+-struct dce_dccg {
+- struct dccg base;
+- const struct dccg_registers *regs;
+- const struct dccg_shift *dccg_shift;
+- const struct dccg_mask *dccg_mask;
+-
+- struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+-
+- int dentist_vco_freq_khz;
+-
+- /* Cache the status of DFS-bypass feature*/
+- bool dfs_bypass_enabled;
+- /* True if the DFS-bypass feature is enabled and active. */
+- bool dfs_bypass_active;
+- /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
+- * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
+- int dfs_bypass_disp_clk;
+-
+- /* Flag for Enabled SS on DPREFCLK */
+- bool ss_on_dprefclk;
+- /* DPREFCLK SS percentage (if down-spread enabled) */
+- int dprefclk_ss_percentage;
+- /* DPREFCLK SS percentage Divider (100 or 1000) */
+- int dprefclk_ss_divider;
+- int dprefclk_khz;
+-
+- enum dm_pp_clocks_state max_clks_state;
+- enum dm_pp_clocks_state cur_min_clks_state;
+-};
+-
+-/* Starting DID for each range */
+-enum dentist_base_divider_id {
+- DENTIST_BASE_DID_1 = 0x08,
+- DENTIST_BASE_DID_2 = 0x40,
+- DENTIST_BASE_DID_3 = 0x60,
+- DENTIST_BASE_DID_4 = 0x7e,
+- DENTIST_MAX_DID = 0x7f
+-};
+-
+-/* Starting point and step size for each divider range.*/
+-enum dentist_divider_range {
+- DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
+- DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
+- DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
+- DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
+- DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+- DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
+- DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
+- DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
+- DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
+-};
+-
+-static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+-{
+- return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+-}
+-
+-void dce_clock_read_ss_info(struct dce_dccg *dccg_dce);
+-
+-int dce12_get_dp_ref_freq_khz(struct dccg *dccg);
+-
+-void dce110_fill_display_configs(
+- const struct dc_state *context,
+- struct dm_pp_display_configuration *pp_display_cfg);
+-
+-int dce112_set_clock(struct dccg *dccg, int requested_clk_khz);
+-
+-struct dccg *dce_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask);
+-
+-struct dccg *dce110_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask);
+-
+-struct dccg *dce112_dccg_create(
+- struct dc_context *ctx,
+- const struct dccg_registers *regs,
+- const struct dccg_shift *clk_shift,
+- const struct dccg_mask *clk_mask);
+-
+-struct dccg *dce120_dccg_create(struct dc_context *ctx);
+-
+-void dce_dccg_destroy(struct dccg **dccg);
+-
+-#endif /* _DCE_DCCG_H_ */
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+index 5055026..bc50a8e 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+@@ -111,8 +111,8 @@ void dce100_prepare_bandwidth(
+ {
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+- dc->res_pool->dccg->funcs->update_clocks(
+- dc->res_pool->dccg,
++ dc->res_pool->clk_mgr->funcs->update_clocks(
++ dc->res_pool->clk_mgr,
+ context,
+ false);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+index 36015f7..6ae51a5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+@@ -22,7 +22,6 @@
+ * Authors: AMD
+ *
+ */
+-#include "../dce/dce_dccg.h"
+ #include "dm_services.h"
+
+ #include "link_encoder.h"
+@@ -37,6 +36,7 @@
+ #include "dce/dce_link_encoder.h"
+ #include "dce/dce_stream_encoder.h"
+
++#include "dce/dce_clk_mgr.h"
+ #include "dce/dce_mem_input.h"
+ #include "dce/dce_ipp.h"
+ #include "dce/dce_transform.h"
+@@ -137,15 +137,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+-static const struct dccg_registers disp_clk_regs = {
++static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dccg_shift disp_clk_shift = {
++static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dccg_mask disp_clk_mask = {
++static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+@@ -722,8 +722,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+@@ -907,11 +907,11 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dce_dccg_create(ctx,
++ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+index e58a34e..d8b0533 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+@@ -2354,7 +2354,7 @@ void dce110_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+ {
+- struct dccg *dccg = dc->res_pool->dccg;
++ struct clk_mgr *dccg = dc->res_pool->clk_mgr;
+
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+@@ -2368,7 +2368,7 @@ void dce110_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+ {
+- struct dccg *dccg = dc->res_pool->dccg;
++ struct clk_mgr *dccg = dc->res_pool->clk_mgr;
+
+ dce110_set_displaymarks(dc, context);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+index d78b064..c5714eb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+@@ -31,7 +31,7 @@
+ #include "resource.h"
+ #include "dce110/dce110_resource.h"
+
+-#include "../dce/dce_dccg.h"
++#include "dce/dce_clk_mgr.h"
+ #include "include/irq_service_interface.h"
+ #include "dce/dce_audio.h"
+ #include "dce110/dce110_timing_generator.h"
+@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
+ #define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+-static const struct dccg_registers disp_clk_regs = {
++static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dccg_shift disp_clk_shift = {
++static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dccg_mask disp_clk_mask = {
++static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+@@ -760,8 +760,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+@@ -1256,11 +1256,11 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dce110_dccg_create(ctx,
++ pool->base.clk_mgr = dce110_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+index e73b139..969d4e7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+@@ -23,7 +23,6 @@
+ *
+ */
+
+-#include "../dce/dce_dccg.h"
+ #include "dm_services.h"
+
+ #include "link_encoder.h"
+@@ -36,6 +35,7 @@
+
+ #include "irq/dce110/irq_service_dce110.h"
+
++#include "dce/dce_clk_mgr.h"
+ #include "dce/dce_mem_input.h"
+ #include "dce/dce_transform.h"
+ #include "dce/dce_link_encoder.h"
+@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+-static const struct dccg_registers disp_clk_regs = {
++static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dccg_shift disp_clk_shift = {
++static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dccg_mask disp_clk_mask = {
++static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+@@ -750,8 +750,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+@@ -1199,11 +1199,11 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dce112_dccg_create(ctx,
++ pool->base.clk_mgr = dce112_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+index a69e89f..f126966 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+@@ -32,7 +32,6 @@
+ #include "include/irq_service_interface.h"
+ #include "dce120_resource.h"
+
+-#include "../dce/dce_dccg.h"
+ #include "dce112/dce112_resource.h"
+
+ #include "dce110/dce110_resource.h"
+@@ -48,6 +47,7 @@
+ #include "dce120/dce120_hw_sequencer.h"
+ #include "dce/dce_transform.h"
+
++#include "dce/dce_clk_mgr.h"
+ #include "dce/dce_audio.h"
+ #include "dce/dce_link_encoder.h"
+ #include "dce/dce_stream_encoder.h"
+@@ -574,8 +574,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+ }
+
+ static void read_dce_straps(
+@@ -975,8 +975,8 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dce120_dccg_create(ctx);
+- if (pool->base.dccg == NULL) {
++ pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto dccg_create_fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+index 1fccb52..6d40b3d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+@@ -23,7 +23,6 @@
+ *
+ */
+
+-#include "../dce/dce_dccg.h"
+ #include "dce/dce_8_0_d.h"
+ #include "dce/dce_8_0_sh_mask.h"
+
+@@ -38,6 +37,7 @@
+ #include "dce110/dce110_timing_generator.h"
+ #include "dce110/dce110_resource.h"
+ #include "dce80/dce80_timing_generator.h"
++#include "dce/dce_clk_mgr.h"
+ #include "dce/dce_mem_input.h"
+ #include "dce/dce_link_encoder.h"
+ #include "dce/dce_stream_encoder.h"
+@@ -155,15 +155,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+-static const struct dccg_registers disp_clk_regs = {
++static const struct clk_mgr_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+ };
+
+-static const struct dccg_shift disp_clk_shift = {
++static const struct clk_mgr_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+ };
+
+-static const struct dccg_mask disp_clk_mask = {
++static const struct clk_mgr_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+ };
+
+@@ -779,8 +779,8 @@ static void destruct(struct dce110_resource_pool *pool)
+ }
+ }
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+@@ -917,11 +917,11 @@ static bool dce80_construct(
+ }
+ }
+
+- pool->base.dccg = dce_dccg_create(ctx,
++ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+@@ -1122,11 +1122,11 @@ static bool dce81_construct(
+ }
+ }
+
+- pool->base.dccg = dce_dccg_create(ctx,
++ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+@@ -1323,11 +1323,11 @@ static bool dce83_construct(
+ }
+ }
+
+- pool->base.dccg = dce_dccg_create(ctx,
++ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+- if (pool->base.dccg == NULL) {
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+index e13ab66..55f293c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+@@ -24,7 +24,7 @@
+
+ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
+- dcn10_hubp.o dcn10_mpc.o dcn10_dccg.o \
++ dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \
+ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
+ dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+new file mode 100644
+index 0000000..6f329d1
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+@@ -0,0 +1,360 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#include "dcn10_clk_mgr.h"
++
++#include "reg_helper.h"
++#include "core_types.h"
++
++#define TO_DCE_CLK_MGR(clocks)\
++ container_of(clocks, struct dce_clk_mgr, base)
++
++#define REG(reg) \
++ (clk_mgr_dce->regs->reg)
++
++#undef FN
++#define FN(reg_name, field_name) \
++ clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
++
++#define CTX \
++ clk_mgr_dce->base.ctx
++#define DC_LOGGER \
++ clk_mgr->ctx->logger
++
++void dcn1_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
++
++ pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
++ pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
++ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
++ pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
++ dce110_fill_display_configs(context, pp_display_cfg);
++
++ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
++ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
++}
++
++static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
++{
++ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
++ bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz;
++ int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
++ bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz;
++
++ /* increase clock, looking for div is 0 for current, request div is 1*/
++ if (dispclk_increase) {
++ /* already divided by 2, no need to reach target clk with 2 steps*/
++ if (cur_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ /* request disp clk is lower than maximum supported dpp clk,
++ * no need to reach target clk with two steps.
++ */
++ if (new_clocks->dispclk_khz <= disp_clk_threshold)
++ return new_clocks->dispclk_khz;
++
++ /* target dpp clk not request divided by 2, still within threshold */
++ if (!request_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ } else {
++ /* decrease clock, looking for current dppclk divided by 2,
++ * request dppclk not divided by 2.
++ */
++
++ /* current dpp clk not divided by 2, no need to ramp*/
++ if (!cur_dpp_div)
++ return new_clocks->dispclk_khz;
++
++ /* current disp clk is lower than current maximum dpp clk,
++ * no need to ramp
++ */
++ if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold)
++ return new_clocks->dispclk_khz;
++
++ /* request dpp clk need to be divided by 2 */
++ if (request_dpp_div)
++ return new_clocks->dispclk_khz;
++ }
++
++ return disp_clk_threshold;
++}
++
++static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
++{
++ struct dc *dc = clk_mgr->ctx->dc;
++ int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks);
++ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
++ int i;
++
++ /* set disp clk to dpp clk threshold */
++ dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
++
++ /* update request dpp clk division option */
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
++
++ if (!pipe_ctx->plane_state)
++ continue;
++
++ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
++ pipe_ctx->plane_res.dpp,
++ request_dpp_div,
++ true);
++ }
++
++ /* If target clk not same as dppclk threshold, set to target clock */
++ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
++ dce112_set_clock(clk_mgr, new_clocks->dispclk_khz);
++
++ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
++ clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
++ clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
++}
++
++static int get_active_display_cnt(
++ struct dc *dc,
++ struct dc_state *context)
++{
++ int i, display_count;
++
++ display_count = 0;
++ for (i = 0; i < context->stream_count; i++) {
++ const struct dc_stream_state *stream = context->streams[i];
++
++ /*
++ * Only notify active stream or virtual stream.
++ * Need to notify virtual stream to work around
++ * headless case. HPD does not fire when system is in
++ * S0i2.
++ */
++ if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
++ display_count++;
++ }
++
++ return display_count;
++}
++
++static void notify_deep_sleep_dcfclk_to_smu(
++ struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
++{
++ int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
++ /*
++ * if function pointer not set up, this message is
++ * sent as part of pplib_apply_display_requirements.
++ * So just return.
++ */
++ if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
++ return;
++
++ min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
++ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
++}
++
++static void notify_hard_min_dcfclk_to_smu(
++ struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
++{
++ int min_dcf_clk_mhz; //minimum required DCF clock in mhz
++
++ /*
++ * if function pointer not set up, this message is
++ * sent as part of pplib_apply_display_requirements.
++ * So just return.
++ */
++ if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
++ return;
++
++ min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
++
++ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
++}
++
++static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower)
++{
++ struct dc *dc = clk_mgr->ctx->dc;
++ struct dc_clocks *new_clocks = &context->bw.dcn.clk;
++ struct pp_smu_display_requirement_rv *smu_req_cur =
++ &dc->res_pool->pp_smu_req;
++ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
++ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
++ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
++ bool send_request_to_increase = false;
++ bool send_request_to_lower = false;
++ int display_count;
++
++ bool enter_display_off = false;
++
++ display_count = get_active_display_cnt(dc, context);
++
++ if (display_count == 0)
++ enter_display_off = true;
++
++ if (enter_display_off == safe_to_lower) {
++ /*
++ * Notify SMU active displays
++ * if function pointer not set up, this message is
++ * sent as part of pplib_apply_display_requirements.
++ */
++ if (pp_smu->set_display_count)
++ pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
++ else
++ smu_req.display_count = display_count;
++
++ }
++
++ if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
++ || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz
++ || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz
++ || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz)
++ send_request_to_increase = true;
++
++ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
++ clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++ // F Clock
++ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
++ clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
++ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
++ smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
++
++ dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
++ send_request_to_lower = true;
++ }
++
++ //DCF Clock
++ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
++ clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
++ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++ if (should_set_clock(safe_to_lower,
++ new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
++ clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
++ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
++
++ send_request_to_lower = true;
++ }
++
++ /* make sure dcf clk is before dpp clk to
++ * make sure we have enough voltage to run dpp clk
++ */
++ if (send_request_to_increase) {
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
++
++ notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
++ if (pp_smu->set_display_requirement)
++ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++
++ notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
++ dcn1_pplib_apply_display_requirements(dc, context);
++ }
++
++ /* dcn1 dppclk is tied to dispclk */
++ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)
++ || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
++ dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
++ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
++
++ send_request_to_lower = true;
++ }
++
++ if (!send_request_to_increase && send_request_to_lower) {
++ /*use dcfclk to request voltage*/
++ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
++
++ notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
++
++ if (pp_smu->set_display_requirement)
++ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++
++ notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
++ dcn1_pplib_apply_display_requirements(dc, context);
++ }
++
++
++ *smu_req_cur = smu_req;
++}
++
++static const struct clk_mgr_funcs dcn1_funcs = {
++ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
++ .update_clocks = dcn1_update_clocks
++};
++
++struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
++{
++ struct dc_debug_options *debug = &ctx->dc->debug;
++ struct dc_bios *bp = ctx->dc_bios;
++ struct dc_firmware_info fw_info = { { 0 } };
++ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
++
++ if (clk_mgr_dce == NULL) {
++ BREAK_TO_DEBUGGER();
++ return NULL;
++ }
++
++ clk_mgr_dce->base.ctx = ctx;
++ clk_mgr_dce->base.funcs = &dcn1_funcs;
++
++ clk_mgr_dce->dfs_bypass_disp_clk = 0;
++
++ clk_mgr_dce->dprefclk_ss_percentage = 0;
++ clk_mgr_dce->dprefclk_ss_divider = 1000;
++ clk_mgr_dce->ss_on_dprefclk = false;
++
++ clk_mgr_dce->dprefclk_khz = 600000;
++ if (bp->integrated_info)
++ clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
++ if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
++ bp->funcs->get_firmware_info(bp, &fw_info);
++ clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
++ if (clk_mgr_dce->dentist_vco_freq_khz == 0)
++ clk_mgr_dce->dentist_vco_freq_khz = 3600000;
++ }
++
++ if (!debug->disable_dfs_bypass && bp->integrated_info)
++ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
++ clk_mgr_dce->dfs_bypass_enabled = true;
++
++ dce_clock_read_ss_info(clk_mgr_dce);
++
++ return &clk_mgr_dce->base;
++}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+new file mode 100644
+index 0000000..9dbaf65
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+@@ -0,0 +1,37 @@
++/*
++ * Copyright 2018 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DCN10_CLK_MGR_H__
++#define __DCN10_CLK_MGR_H__
++
++#include "../dce/dce_clk_mgr.h"
++
++void dcn1_pplib_apply_display_requirements(
++ struct dc *dc,
++ struct dc_state *context);
++
++struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
++
++#endif //__DCN10_CLK_MGR_H__
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
+deleted file mode 100644
+index 5ffc367..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.c
++++ /dev/null
+@@ -1,361 +0,0 @@
+-/*
+- * Copyright 2018 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#include "dcn10_dccg.h"
+-
+-#include "reg_helper.h"
+-#include "core_types.h"
+-
+-#define TO_DCE_DCCG(clocks)\
+- container_of(clocks, struct dce_dccg, base)
+-
+-#define REG(reg) \
+- (dccg_dce->regs->reg)
+-
+-#undef FN
+-#define FN(reg_name, field_name) \
+- dccg_dce->dccg_shift->field_name, dccg_dce->dccg_mask->field_name
+-
+-#define CTX \
+- dccg_dce->base.ctx
+-#define DC_LOGGER \
+- dccg->ctx->logger
+-
+-void dcn1_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+-
+- pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+- pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
+- pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+- dce110_fill_display_configs(context, pp_display_cfg);
+-
+- if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+-}
+-
+-static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
+-{
+- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+- bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
+- int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+- bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
+-
+- /* increase clock, looking for div is 0 for current, request div is 1*/
+- if (dispclk_increase) {
+- /* already divided by 2, no need to reach target clk with 2 steps*/
+- if (cur_dpp_div)
+- return new_clocks->dispclk_khz;
+-
+- /* request disp clk is lower than maximum supported dpp clk,
+- * no need to reach target clk with two steps.
+- */
+- if (new_clocks->dispclk_khz <= disp_clk_threshold)
+- return new_clocks->dispclk_khz;
+-
+- /* target dpp clk not request divided by 2, still within threshold */
+- if (!request_dpp_div)
+- return new_clocks->dispclk_khz;
+-
+- } else {
+- /* decrease clock, looking for current dppclk divided by 2,
+- * request dppclk not divided by 2.
+- */
+-
+- /* current dpp clk not divided by 2, no need to ramp*/
+- if (!cur_dpp_div)
+- return new_clocks->dispclk_khz;
+-
+- /* current disp clk is lower than current maximum dpp clk,
+- * no need to ramp
+- */
+- if (dccg->clks.dispclk_khz <= disp_clk_threshold)
+- return new_clocks->dispclk_khz;
+-
+- /* request dpp clk need to be divided by 2 */
+- if (request_dpp_div)
+- return new_clocks->dispclk_khz;
+- }
+-
+- return disp_clk_threshold;
+-}
+-
+-static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
+-{
+- struct dc *dc = dccg->ctx->dc;
+- int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
+- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+- int i;
+-
+- /* set disp clk to dpp clk threshold */
+- dce112_set_clock(dccg, dispclk_to_dpp_threshold);
+-
+- /* update request dpp clk division option */
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+-
+- if (!pipe_ctx->plane_state)
+- continue;
+-
+- pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+- pipe_ctx->plane_res.dpp,
+- request_dpp_div,
+- true);
+- }
+-
+- /* If target clk not same as dppclk threshold, set to target clock */
+- if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+- dce112_set_clock(dccg, new_clocks->dispclk_khz);
+-
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+- dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
+- dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+-}
+-
+-static int get_active_display_cnt(
+- struct dc *dc,
+- struct dc_state *context)
+-{
+- int i, display_count;
+-
+- display_count = 0;
+- for (i = 0; i < context->stream_count; i++) {
+- const struct dc_stream_state *stream = context->streams[i];
+-
+- /*
+- * Only notify active stream or virtual stream.
+- * Need to notify virtual stream to work around
+- * headless case. HPD does not fire when system is in
+- * S0i2.
+- */
+- if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
+- display_count++;
+- }
+-
+- return display_count;
+-}
+-
+-static void notify_deep_sleep_dcfclk_to_smu(
+- struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
+-{
+- int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
+- /*
+- * if function pointer not set up, this message is
+- * sent as part of pplib_apply_display_requirements.
+- * So just return.
+- */
+- if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
+- return;
+-
+- min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
+- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
+-}
+-
+-static void notify_hard_min_dcfclk_to_smu(
+- struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
+-{
+- int min_dcf_clk_mhz; //minimum required DCF clock in mhz
+-
+- /*
+- * if function pointer not set up, this message is
+- * sent as part of pplib_apply_display_requirements.
+- * So just return.
+- */
+- if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
+- return;
+-
+- min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
+-
+- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
+-}
+-
+-static void dcn1_update_clocks(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower)
+-{
+- struct dc *dc = dccg->ctx->dc;
+- struct dc_clocks *new_clocks = &context->bw.dcn.clk;
+- struct pp_smu_display_requirement_rv *smu_req_cur =
+- &dc->res_pool->pp_smu_req;
+- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+- bool send_request_to_increase = false;
+- bool send_request_to_lower = false;
+- int display_count;
+-
+- bool enter_display_off = false;
+-
+- display_count = get_active_display_cnt(dc, context);
+-
+- if (display_count == 0)
+- enter_display_off = true;
+-
+- if (enter_display_off == safe_to_lower) {
+- /*
+- * Notify SMU active displays
+- * if function pointer not set up, this message is
+- * sent as part of pplib_apply_display_requirements.
+- */
+- if (pp_smu->set_display_count)
+- pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
+- else
+- smu_req.display_count = display_count;
+-
+- }
+-
+- if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
+- || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
+- || new_clocks->fclk_khz > dccg->clks.fclk_khz
+- || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
+- send_request_to_increase = true;
+-
+- if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+- dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- // F Clock
+- if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+- dccg->clks.fclk_khz = new_clocks->fclk_khz;
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+- clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
+- smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
+-
+- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+- send_request_to_lower = true;
+- }
+-
+- //DCF Clock
+- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+- dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+- smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- if (should_set_clock(safe_to_lower,
+- new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
+- dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+- smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- /* make sure dcf clk is before dpp clk to
+- * make sure we have enough voltage to run dpp clk
+- */
+- if (send_request_to_increase) {
+- /*use dcfclk to request voltage*/
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+-
+- notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+-
+- if (pp_smu->set_display_requirement)
+- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+-
+- notify_deep_sleep_dcfclk_to_smu(pp_smu, dccg->clks.dcfclk_deep_sleep_khz);
+- dcn1_pplib_apply_display_requirements(dc, context);
+- }
+-
+- /* dcn1 dppclk is tied to dispclk */
+- /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
+- || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
+- dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
+- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+-
+- send_request_to_lower = true;
+- }
+-
+- if (!send_request_to_increase && send_request_to_lower) {
+- /*use dcfclk to request voltage*/
+- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+-
+- notify_hard_min_dcfclk_to_smu(pp_smu, clock_voltage_req.clocks_in_khz);
+-
+- if (pp_smu->set_display_requirement)
+- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+-
+- notify_deep_sleep_dcfclk_to_smu(pp_smu, dccg->clks.dcfclk_deep_sleep_khz);
+- dcn1_pplib_apply_display_requirements(dc, context);
+- }
+-
+-
+- *smu_req_cur = smu_req;
+-}
+-
+-static const struct dccg_funcs dcn1_funcs = {
+- .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+- .update_clocks = dcn1_update_clocks
+-};
+-
+-struct dccg *dcn1_dccg_create(struct dc_context *ctx)
+-{
+- struct dc_debug_options *debug = &ctx->dc->debug;
+- struct dc_bios *bp = ctx->dc_bios;
+- struct dc_firmware_info fw_info = { { 0 } };
+- struct dce_dccg *dccg_dce = kzalloc(sizeof(*dccg_dce), GFP_KERNEL);
+-
+- if (dccg_dce == NULL) {
+- BREAK_TO_DEBUGGER();
+- return NULL;
+- }
+-
+- dccg_dce->base.ctx = ctx;
+- dccg_dce->base.funcs = &dcn1_funcs;
+-
+- dccg_dce->dfs_bypass_disp_clk = 0;
+-
+- dccg_dce->dprefclk_ss_percentage = 0;
+- dccg_dce->dprefclk_ss_divider = 1000;
+- dccg_dce->ss_on_dprefclk = false;
+-
+- dccg_dce->dprefclk_khz = 600000;
+- if (bp->integrated_info)
+- dccg_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0) {
+- bp->funcs->get_firmware_info(bp, &fw_info);
+- dccg_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+- if (dccg_dce->dentist_vco_freq_khz == 0)
+- dccg_dce->dentist_vco_freq_khz = 3600000;
+- }
+-
+- if (!debug->disable_dfs_bypass && bp->integrated_info)
+- if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+- dccg_dce->dfs_bypass_enabled = true;
+-
+- dce_clock_read_ss_info(dccg_dce);
+-
+- return &dccg_dce->base;
+-}
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h
+deleted file mode 100644
+index 7f3dd84..0000000
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dccg.h
++++ /dev/null
+@@ -1,37 +0,0 @@
+-/*
+- * Copyright 2018 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#ifndef __DCN10_DCCG_H__
+-#define __DCN10_DCCG_H__
+-
+-#include "../dce/dce_dccg.h"
+-
+-void dcn1_pplib_apply_display_requirements(
+- struct dc *dc,
+- struct dc_state *context);
+-
+-struct dccg *dcn1_dccg_create(struct dc_context *ctx);
+-
+-#endif //__DCN10_DCCG_H__
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index d1f8c8e..5c4a4f6 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1126,7 +1126,7 @@ static void dcn10_init_hw(struct dc *dc)
+
+ enable_power_gating_plane(dc->hwseq, true);
+
+- memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
++ memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
+ }
+
+ static void reset_hw_ctx_wrap(
+@@ -2052,16 +2052,16 @@ void update_dchubp_dpp(
+ */
+ if (plane_state->update_flags.bits.full_update) {
+ bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+- dc->res_pool->dccg->clks.dispclk_khz / 2;
++ dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
+
+ dpp->funcs->dpp_dppclk_control(
+ dpp,
+ should_divided_by_2,
+ true);
+
+- dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
+- dc->res_pool->dccg->clks.dispclk_khz / 2 :
+- dc->res_pool->dccg->clks.dispclk_khz;
++ dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
++ dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
++ dc->res_pool->clk_mgr->clks.dispclk_khz;
+ }
+
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+@@ -2369,8 +2369,8 @@ static void dcn10_prepare_bandwidth(
+ if (context->stream_count == 0)
+ context->bw.dcn.clk.phyclk_khz = 0;
+
+- dc->res_pool->dccg->funcs->update_clocks(
+- dc->res_pool->dccg,
++ dc->res_pool->clk_mgr->funcs->update_clocks(
++ dc->res_pool->clk_mgr,
+ context,
+ false);
+ }
+@@ -2398,8 +2398,8 @@ static void dcn10_optimize_bandwidth(
+ if (context->stream_count == 0)
+ context->bw.dcn.clk.phyclk_khz = 0;
+
+- dc->res_pool->dccg->funcs->update_clocks(
+- dc->res_pool->dccg,
++ dc->res_pool->clk_mgr->funcs->update_clocks(
++ dc->res_pool->clk_mgr,
+ context,
+ true);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 3d9118e..acb917d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -28,23 +28,23 @@
+
+ #include "resource.h"
+ #include "include/irq_service_interface.h"
+-#include "dcn10/dcn10_resource.h"
++#include "dcn10_resource.h"
+
+-#include "dcn10/dcn10_ipp.h"
+-#include "dcn10/dcn10_mpc.h"
++#include "dcn10_ipp.h"
++#include "dcn10_mpc.h"
+ #include "irq/dcn10/irq_service_dcn10.h"
+-#include "dcn10/dcn10_dpp.h"
++#include "dcn10_dpp.h"
+ #include "dcn10_optc.h"
+-#include "dcn10/dcn10_hw_sequencer.h"
++#include "dcn10_hw_sequencer.h"
+ #include "dce110/dce110_hw_sequencer.h"
+-#include "dcn10/dcn10_opp.h"
+-#include "dcn10/dcn10_link_encoder.h"
+-#include "dcn10/dcn10_stream_encoder.h"
+-#include "dcn10/dcn10_dccg.h"
++#include "dcn10_opp.h"
++#include "dcn10_link_encoder.h"
++#include "dcn10_stream_encoder.h"
++#include "dcn10_clk_mgr.h"
+ #include "dce/dce_clock_source.h"
+ #include "dce/dce_audio.h"
+ #include "dce/dce_hwseq.h"
+-#include "../virtual/virtual_stream_encoder.h"
++#include "virtual/virtual_stream_encoder.h"
+ #include "dce110/dce110_resource.h"
+ #include "dce112/dce112_resource.h"
+ #include "dcn10_hubp.h"
+@@ -950,8 +950,8 @@ static void destruct(struct dcn10_resource_pool *pool)
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+- if (pool->base.dccg != NULL)
+- dce_dccg_destroy(&pool->base.dccg);
++ if (pool->base.clk_mgr != NULL)
++ dce_clk_mgr_destroy(&pool->base.clk_mgr);
+
+ kfree(pool->base.pp_smu);
+ }
+@@ -1277,8 +1277,8 @@ static bool construct(
+ }
+ }
+
+- pool->base.dccg = dcn1_dccg_create(ctx);
+- if (pool->base.dccg == NULL) {
++ pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
++ if (pool->base.clk_mgr == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto fail;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index d4eaf7f..4ef56ea 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+
+ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+ /********** DAL Core*********************/
+-#include "hw/dccg.h"
++#include "hw/clk_mgr.h"
+ #include "transform.h"
+ #include "dpp.h"
+
+@@ -168,7 +168,7 @@ struct resource_pool {
+ unsigned int audio_count;
+ struct audio_support audio_support;
+
+- struct dccg *dccg;
++ struct clk_mgr *clk_mgr;
+ struct irq_service *irqs;
+
+ struct abm *abm;
+@@ -287,7 +287,7 @@ struct dc_state {
+ struct dcn_bw_internal_vars dcn_bw_vars;
+ #endif
+
+- struct dccg *dccg;
++ struct clk_mgr *dccg;
+
+ struct kref refcount;
+ };
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+index ac9b4906..ece954a 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+@@ -32,7 +32,7 @@
+
+ #include "bw_fixed.h"
+ #include "../dml/display_mode_lib.h"
+-#include "hw/dccg.h"
++#include "hw/clk_mgr.h"
+
+ struct dc;
+ struct dc_state;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+new file mode 100644
+index 0000000..23a4b18
+--- /dev/null
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+@@ -0,0 +1,47 @@
++/*
++ * Copyright 2012-16 Advanced Micro Devices, Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: AMD
++ *
++ */
++
++#ifndef __DAL_CLK_MGR_H__
++#define __DAL_CLK_MGR_H__
++
++#include "dm_services_types.h"
++#include "dc.h"
++
++struct clk_mgr {
++ struct dc_context *ctx;
++ const struct clk_mgr_funcs *funcs;
++
++ struct dc_clocks clks;
++};
++
++struct clk_mgr_funcs {
++ void (*update_clocks)(struct clk_mgr *clk_mgr,
++ struct dc_state *context,
++ bool safe_to_lower);
++
++ int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
++};
++
++#endif /* __DAL_CLK_MGR_H__ */
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+deleted file mode 100644
+index 6fd923d..0000000
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
++++ /dev/null
+@@ -1,47 +0,0 @@
+-/*
+- * Copyright 2012-16 Advanced Micro Devices, Inc.
+- *
+- * Permission is hereby granted, free of charge, to any person obtaining a
+- * copy of this software and associated documentation files (the "Software"),
+- * to deal in the Software without restriction, including without limitation
+- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+- * and/or sell copies of the Software, and to permit persons to whom the
+- * Software is furnished to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included in
+- * all copies or substantial portions of the Software.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+- * OTHER DEALINGS IN THE SOFTWARE.
+- *
+- * Authors: AMD
+- *
+- */
+-
+-#ifndef __DAL_DCCG_H__
+-#define __DAL_DCCG_H__
+-
+-#include "dm_services_types.h"
+-#include "dc.h"
+-
+-struct dccg {
+- struct dc_context *ctx;
+- const struct dccg_funcs *funcs;
+-
+- struct dc_clocks clks;
+-};
+-
+-struct dccg_funcs {
+- void (*update_clocks)(struct dccg *dccg,
+- struct dc_state *context,
+- bool safe_to_lower);
+-
+- int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
+-};
+-
+-#endif /* __DAL_DCCG_H__ */
+--
+2.7.4
+