aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.14.71/4755-drm-amd-display-move-clock-programming-from-set_band.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.14.71/4755-drm-amd-display-move-clock-programming-from-set_band.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4755-drm-amd-display-move-clock-programming-from-set_band.patch284
1 files changed, 284 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4755-drm-amd-display-move-clock-programming-from-set_band.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4755-drm-amd-display-move-clock-programming-from-set_band.patch
new file mode 100644
index 00000000..c81d35a3
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4755-drm-amd-display-move-clock-programming-from-set_band.patch
@@ -0,0 +1,284 @@
+From c4e54f7f507c22e27215324c31b676fbaaa6fb63 Mon Sep 17 00:00:00 2001
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Date: Wed, 23 May 2018 17:52:04 -0400
+Subject: [PATCH 4755/5725] drm/amd/display: move clock programming from
+ set_bandwidth to dccg
+
+This change moves dcn clock programming(with exception of dispclk)
+into dccg. This should have no functional effect.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 57 +++++++++++++-------
+ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 62 +++++-----------------
+ 3 files changed, 51 insertions(+), 70 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index 2b70ac6..9acdd9d 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -997,7 +997,7 @@ bool dcn_validate_bandwidth(
+ }
+
+ context->bw.dcn.calc_clk.dppclk_khz = context->bw.dcn.calc_clk.dispclk_khz / v->dispclk_dppclk_ratio;
+-
++ context->bw.dcn.calc_clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
+ switch (v->voltage_level) {
+ case 0:
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz =
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+index 890a3ec..93e6063 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+@@ -523,14 +523,18 @@ static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
+ }
+ }
+
++static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
++{
++ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
++}
++
+ static void dce12_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+ {
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+
+- if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
+- || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+@@ -539,8 +543,7 @@ static void dce12_update_clocks(struct dccg *dccg,
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+
+- if ((new_clocks->phyclk_khz < dccg->clks.phyclk_khz && safe_to_lower)
+- || new_clocks->phyclk_khz > dccg->clks.phyclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
+ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+@@ -553,6 +556,11 @@ static void dcn_update_clocks(struct dccg *dccg,
+ struct dc_clocks *new_clocks,
+ bool safe_to_lower)
+ {
++ struct dc *dc = dccg->ctx->dc;
++ struct pp_smu_display_requirement_rv *smu_req_cur =
++ &dc->res_pool->pp_smu_req;
++ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
++ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ bool send_request_to_increase = false;
+ bool send_request_to_lower = false;
+@@ -566,17 +574,14 @@ static void dcn_update_clocks(struct dccg *dccg,
+ #ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (send_request_to_increase
+ ) {
+- struct dc *core_dc = dccg->ctx->dc;
+-
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(core_dc, new_clocks);
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+ #endif
+
+- if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
+- || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
+ /* TODO: ramp up - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);*/
+@@ -586,8 +591,7 @@ static void dcn_update_clocks(struct dccg *dccg,
+ send_request_to_lower = true;
+ }
+
+- if ((new_clocks->phyclk_khz < dccg->clks.phyclk_khz && safe_to_lower)
+- || new_clocks->phyclk_khz > dccg->clks.phyclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
+ dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
+@@ -596,36 +600,50 @@ static void dcn_update_clocks(struct dccg *dccg,
+ send_request_to_lower = true;
+ }
+
+- if ((new_clocks->fclk_khz < dccg->clks.fclk_khz && safe_to_lower)
+- || new_clocks->fclk_khz > dccg->clks.fclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
+ dccg->clks.phyclk_khz = new_clocks->fclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
++ smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ send_request_to_lower = true;
+ }
+
+- if ((new_clocks->dcfclk_khz < dccg->clks.dcfclk_khz && safe_to_lower)
+- || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
+ dccg->clks.phyclk_khz = new_clocks->dcfclk_khz;
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz = new_clocks->dcfclk_khz;
++ smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
+
+ send_request_to_lower = true;
+ }
+
++ if (should_set_clock(safe_to_lower,
++ new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
++ dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
++ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
++ }
++
+ #ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (!send_request_to_increase && send_request_to_lower
+ ) {
+- struct dc *core_dc = dccg->ctx->dc;
+-
+ /*use dcfclk to request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(core_dc, new_clocks);
++ clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+ dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
+ }
+ #endif
++
++ if (new_clocks->phyclk_khz)
++ smu_req.display_count = 1;
++ else
++ smu_req.display_count = 0;
++
++ if (pp_smu->set_display_requirement)
++ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
++
++ *smu_req_cur = smu_req;
+ }
+
+ static void dce_update_clocks(struct dccg *dccg,
+@@ -642,8 +660,7 @@ static void dce_update_clocks(struct dccg *dccg,
+ dccg->cur_min_clks_state = level_change_req.power_level;
+ }
+
+- if ((new_clocks->dispclk_khz < dccg->clks.dispclk_khz && safe_to_lower)
+- || new_clocks->dispclk_khz > dccg->clks.dispclk_khz) {
++ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 65e4189..66ecb86 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2168,11 +2168,11 @@ static void dcn10_pplib_apply_display_requirements(
+ {
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+- pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
+- pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
+- pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
+- pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
++ pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
++ pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
++ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
++ pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
+ pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+@@ -2376,11 +2376,6 @@ static void dcn10_apply_ctx_for_surface(
+ */
+ }
+
+-static inline bool should_set_clock(bool decrease_allowed, int calc_clk, int cur_clk)
+-{
+- return ((decrease_allowed && calc_clk < cur_clk) || calc_clk > cur_clk);
+-}
+-
+ static int determine_dppclk_threshold(struct dc *dc, struct dc_state *context)
+ {
+ bool request_dpp_div = context->bw.dcn.calc_clk.dispclk_khz >
+@@ -2471,16 +2466,16 @@ static void ramp_up_dispclk_with_dpp(struct dc *dc, struct dc_state *context)
+ context->bw.dcn.calc_clk.max_supported_dppclk_khz;
+ }
+
++static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
++{
++ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
++}
++
+ static void dcn10_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+ {
+- struct pp_smu_display_requirement_rv *smu_req_cur =
+- &dc->res_pool->pp_smu_req;
+- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+-
+ if (dc->debug.sanity_checks) {
+ dcn10_verify_allow_pstate_change_high(dc);
+ }
+@@ -2488,45 +2483,14 @@ static void dcn10_set_bandwidth(
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
++ if (context->stream_count == 0)
++ context->bw.dcn.calc_clk.phyclk_khz = 0;
++
+ dc->res_pool->dccg->funcs->update_clocks(
+ dc->res_pool->dccg,
+ &context->bw.dcn.calc_clk,
+ decrease_allowed);
+
+- if (should_set_clock(
+- decrease_allowed,
+- context->bw.dcn.calc_clk.dcfclk_khz,
+- dc->current_state->bw.dcn.cur_clk.dcfclk_khz)) {
+- context->bw.dcn.cur_clk.dcfclk_khz =
+- context->bw.dcn.calc_clk.dcfclk_khz;
+- smu_req.hard_min_dcefclk_khz =
+- context->bw.dcn.calc_clk.dcfclk_khz;
+- }
+-
+- if (should_set_clock(
+- decrease_allowed,
+- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+- dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz)) {
+- context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
+- context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
+- }
+-
+- if (should_set_clock(
+- decrease_allowed,
+- context->bw.dcn.calc_clk.fclk_khz,
+- dc->current_state->bw.dcn.cur_clk.fclk_khz)) {
+- context->bw.dcn.cur_clk.fclk_khz =
+- context->bw.dcn.calc_clk.fclk_khz;
+- smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
+- }
+-
+- smu_req.display_count = context->stream_count;
+-
+- if (pp_smu->set_display_requirement)
+- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+-
+- *smu_req_cur = smu_req;
+-
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+--
+2.7.4
+