aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1008-drm-amd-display-Clean-up-flattening-core_dc-to-dc.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1008-drm-amd-display-Clean-up-flattening-core_dc-to-dc.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1008-drm-amd-display-Clean-up-flattening-core_dc-to-dc.patch1092
1 files changed, 1092 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1008-drm-amd-display-Clean-up-flattening-core_dc-to-dc.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1008-drm-amd-display-Clean-up-flattening-core_dc-to-dc.patch
new file mode 100644
index 00000000..46c29164
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1008-drm-amd-display-Clean-up-flattening-core_dc-to-dc.patch
@@ -0,0 +1,1092 @@
+From 935949b43c7022ae3dab40965ae546ce185fe775 Mon Sep 17 00:00:00 2001
+From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Date: Wed, 23 Aug 2017 15:44:42 -0400
+Subject: [PATCH 1008/4131] drm/amd/display: Clean up flattening core_dc to dc
+
+Clean up some code related to flattening core_dc commit
+(Remove redundent dc = dc, which was the result of removing
+DC_TO_CORE() macro)
+
+Change-Id: Ib2e3bba1a8eca27c6229d6bac8387e3fdb91de9f
+Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
+Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
+---
+ drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 12 +-
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 335 +++++++++------------
+ .../drm/amd/display/modules/freesync/freesync.c | 19 +-
+ 3 files changed, 164 insertions(+), 202 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+index afd403c..5e5766a 100644
+--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
++++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+@@ -983,15 +983,14 @@ bool dcn_validate_bandwidth(
+ if (v->voltage_level == 0 &&
+ (dc->debug.sr_exit_time_dpm0_ns
+ || dc->debug.sr_enter_plus_exit_time_dpm0_ns)) {
+- struct dc *dc_core = dc;
+
+ if (dc->debug.sr_enter_plus_exit_time_dpm0_ns)
+ v->sr_enter_plus_exit_time =
+ dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
+ if (dc->debug.sr_exit_time_dpm0_ns)
+ v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
+- dc_core->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
+- dc_core->dml.soc.sr_exit_time_us = v->sr_exit_time;
++ dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
++ dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
+ mode_support_and_system_configuration(v);
+ }
+
+@@ -1114,11 +1113,10 @@ bool dcn_validate_bandwidth(
+ }
+
+ if (v->voltage_level == 0) {
+- struct dc *dc_core = dc;
+
+- dc_core->dml.soc.sr_enter_plus_exit_time_us =
+- dc_core->dcn_soc->sr_enter_plus_exit_time;
+- dc_core->dml.soc.sr_exit_time_us = dc_core->dcn_soc->sr_exit_time;
++ dc->dml.soc.sr_enter_plus_exit_time_us =
++ dc->dcn_soc->sr_enter_plus_exit_time;
++ dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index e5bafe2..1f0e519 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -155,20 +155,19 @@ static bool stream_adjust_vmin_vmax(struct dc *dc,
+ int vmin, int vmax)
+ {
+ /* TODO: Support multiple streams */
+- struct dc *core_dc = dc;
+ struct dc_stream_state *stream = streams[0];
+ int i = 0;
+ bool ret = false;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+- struct pipe_ctx *pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
++ struct pipe_ctx *pipe = &dc->current_context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+- core_dc->hwss.set_drr(&pipe, 1, vmin, vmax);
++ dc->hwss.set_drr(&pipe, 1, vmin, vmax);
+
+ /* build and update the info frame */
+ resource_build_info_frame(pipe);
+- core_dc->hwss.update_info_frame(pipe);
++ dc->hwss.update_info_frame(pipe);
+
+ ret = true;
+ }
+@@ -181,7 +180,6 @@ static bool stream_get_crtc_position(struct dc *dc,
+ unsigned int *v_pos, unsigned int *nom_v_pos)
+ {
+ /* TODO: Support multiple streams */
+- struct dc *core_dc = dc;
+ struct dc_stream_state *stream = streams[0];
+ int i = 0;
+ bool ret = false;
+@@ -189,10 +187,10 @@ static bool stream_get_crtc_position(struct dc *dc,
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe =
+- &core_dc->current_context->res_ctx.pipe_ctx[i];
++ &dc->current_context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+- core_dc->hwss.get_position(&pipe, 1, &position);
++ dc->hwss.get_position(&pipe, 1, &position);
+
+ *v_pos = position.vertical_count;
+ *nom_v_pos = position.nominal_vcount;
+@@ -204,15 +202,14 @@ static bool stream_get_crtc_position(struct dc *dc,
+
+ static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
+ {
+- struct dc *core_dc = dc;
+ int i = 0;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+- if (core_dc->current_context->res_ctx.pipe_ctx[i].stream == stream) {
+- pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
+- core_dc->hwss.program_gamut_remap(pipes);
++ if (dc->current_context->res_ctx.pipe_ctx[i].stream == stream) {
++ pipes = &dc->current_context->res_ctx.pipe_ctx[i];
++ dc->hwss.program_gamut_remap(pipes);
+ ret = true;
+ }
+ }
+@@ -222,17 +219,16 @@ static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
+
+ static bool program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
+ {
+- struct dc *core_dc = dc;
+ int i = 0;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+- if (core_dc->current_context->res_ctx.pipe_ctx[i].stream
++ if (dc->current_context->res_ctx.pipe_ctx[i].stream
+ == stream) {
+
+- pipes = &core_dc->current_context->res_ctx.pipe_ctx[i];
+- core_dc->hwss.program_csc_matrix(pipes,
++ pipes = &dc->current_context->res_ctx.pipe_ctx[i];
++ dc->hwss.program_csc_matrix(pipes,
+ stream->output_color_space,
+ stream->csc_color_matrix.matrix);
+ ret = true;
+@@ -247,7 +243,6 @@ static void set_static_screen_events(struct dc *dc,
+ int num_streams,
+ const struct dc_static_screen_events *events)
+ {
+- struct dc *core_dc = dc;
+ int i = 0;
+ int j = 0;
+ struct pipe_ctx *pipes_affected[MAX_PIPES];
+@@ -257,45 +252,44 @@ static void set_static_screen_events(struct dc *dc,
+ struct dc_stream_state *stream = streams[i];
+
+ for (j = 0; j < MAX_PIPES; j++) {
+- if (core_dc->current_context->res_ctx.pipe_ctx[j].stream
++ if (dc->current_context->res_ctx.pipe_ctx[j].stream
+ == stream) {
+ pipes_affected[num_pipes_affected++] =
+- &core_dc->current_context->res_ctx.pipe_ctx[j];
++ &dc->current_context->res_ctx.pipe_ctx[j];
+ }
+ }
+ }
+
+- core_dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
++ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+ }
+
+ static void set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link)
+ {
+- struct dc *core_dc = dc;
++
+ int i;
+
+- for (i = 0; i < core_dc->link_count; i++) {
+- if (core_dc->links[i] == link)
++ for (i = 0; i < dc->link_count; i++) {
++ if (dc->links[i] == link)
+ break;
+ }
+
+- if (i >= core_dc->link_count)
++ if (i >= dc->link_count)
+ ASSERT_CRITICAL(false);
+
+- dc_link_dp_set_drive_settings(core_dc->links[i], lt_settings);
++ dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+ }
+
+ static void perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+ {
+- struct dc *core_dc = dc;
+ int i;
+
+- for (i = 0; i < core_dc->link_count; i++)
++ for (i = 0; i < dc->link_count; i++)
+ dc_link_dp_perform_link_training(
+- core_dc->links[i],
++ dc->links[i],
+ link_setting,
+ skip_video_pattern);
+ }
+@@ -371,44 +365,44 @@ void set_dither_option(struct dc_stream_state *stream,
+ opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
+ }
+
+-static void allocate_dc_stream_funcs(struct dc *core_dc)
++static void allocate_dc_stream_funcs(struct dc *dc)
+ {
+- if (core_dc->hwss.set_drr != NULL) {
+- core_dc->stream_funcs.adjust_vmin_vmax =
++ if (dc->hwss.set_drr != NULL) {
++ dc->stream_funcs.adjust_vmin_vmax =
+ stream_adjust_vmin_vmax;
+ }
+
+- core_dc->stream_funcs.set_static_screen_events =
++ dc->stream_funcs.set_static_screen_events =
+ set_static_screen_events;
+
+- core_dc->stream_funcs.get_crtc_position =
++ dc->stream_funcs.get_crtc_position =
+ stream_get_crtc_position;
+
+- core_dc->stream_funcs.set_gamut_remap =
++ dc->stream_funcs.set_gamut_remap =
+ set_gamut_remap;
+
+- core_dc->stream_funcs.program_csc_matrix =
++ dc->stream_funcs.program_csc_matrix =
+ program_csc_matrix;
+
+- core_dc->stream_funcs.set_dither_option =
++ dc->stream_funcs.set_dither_option =
+ set_dither_option;
+
+- core_dc->link_funcs.set_drive_settings =
++ dc->link_funcs.set_drive_settings =
+ set_drive_settings;
+
+- core_dc->link_funcs.perform_link_training =
++ dc->link_funcs.perform_link_training =
+ perform_link_training;
+
+- core_dc->link_funcs.set_preferred_link_settings =
++ dc->link_funcs.set_preferred_link_settings =
+ set_preferred_link_settings;
+
+- core_dc->link_funcs.enable_hpd =
++ dc->link_funcs.enable_hpd =
+ enable_hpd;
+
+- core_dc->link_funcs.disable_hpd =
++ dc->link_funcs.disable_hpd =
+ disable_hpd;
+
+- core_dc->link_funcs.set_test_pattern =
++ dc->link_funcs.set_test_pattern =
+ set_test_pattern;
+ }
+
+@@ -624,41 +618,41 @@ void ProgramPixelDurationV(unsigned int pixelClockInKHz )
+
+ struct dc *dc_create(const struct dc_init_data *init_params)
+ {
+- struct dc *core_dc = dm_alloc(sizeof(*core_dc));
++ struct dc *dc = dm_alloc(sizeof(*dc));
+ unsigned int full_pipe_count;
+
+- if (NULL == core_dc)
++ if (NULL == dc)
+ goto alloc_fail;
+
+- if (false == construct(core_dc, init_params))
++ if (false == construct(dc, init_params))
+ goto construct_fail;
+
+ /*TODO: separate HW and SW initialization*/
+- core_dc->hwss.init_hw(core_dc);
++ dc->hwss.init_hw(dc);
+
+- full_pipe_count = core_dc->res_pool->pipe_count;
+- if (core_dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
++ full_pipe_count = dc->res_pool->pipe_count;
++ if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+ full_pipe_count--;
+- core_dc->caps.max_streams = min(
++ dc->caps.max_streams = min(
+ full_pipe_count,
+- core_dc->res_pool->stream_enc_count);
++ dc->res_pool->stream_enc_count);
+
+- core_dc->caps.max_links = core_dc->link_count;
+- core_dc->caps.max_audios = core_dc->res_pool->audio_count;
++ dc->caps.max_links = dc->link_count;
++ dc->caps.max_audios = dc->res_pool->audio_count;
+
+- core_dc->config = init_params->flags;
++ dc->config = init_params->flags;
+
+- dm_logger_write(core_dc->ctx->logger, LOG_DC,
++ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Display Core initialized\n");
+
+
+ /* TODO: missing feature to be enabled */
+- core_dc->debug.disable_dfs_bypass = true;
++ dc->debug.disable_dfs_bypass = true;
+
+- return core_dc;
++ return dc;
+
+ construct_fail:
+- dm_free(core_dc);
++ dm_free(dc);
+
+ alloc_fail:
+ return NULL;
+@@ -666,9 +660,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
+
+ void dc_destroy(struct dc **dc)
+ {
+- struct dc *core_dc = *dc;
+- destruct(core_dc);
+- dm_free(core_dc);
++ destruct(*dc);
++ dm_free(*dc);
+ *dc = NULL;
+ }
+
+@@ -676,7 +669,6 @@ bool dc_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *stream)
+ {
+- struct dc *core_dc = dc;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ struct validate_context *context;
+
+@@ -689,14 +681,14 @@ bool dc_validate_guaranteed(
+
+ atomic_inc(&context->ref_count);
+
+- result = core_dc->res_pool->funcs->validate_guaranteed(
+- core_dc, stream, context);
++ result = dc->res_pool->funcs->validate_guaranteed(
++ dc, stream, context);
+
+ dc_release_validate_context(context);
+
+ context_alloc_fail:
+ if (result != DC_OK) {
+- dm_logger_write(core_dc->ctx->logger, LOG_WARNING,
++ dm_logger_write(dc->ctx->logger, LOG_WARNING,
+ "%s:guaranteed validation failed, dc_status:%d\n",
+ __func__,
+ result);
+@@ -706,12 +698,12 @@ bool dc_validate_guaranteed(
+ }
+
+ static void program_timing_sync(
+- struct dc *core_dc,
++ struct dc *dc,
+ struct validate_context *ctx)
+ {
+ int i, j;
+ int group_index = 0;
+- int pipe_count = core_dc->res_pool->pipe_count;
++ int pipe_count = dc->res_pool->pipe_count;
+ struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
+
+ for (i = 0; i < pipe_count; i++) {
+@@ -772,8 +764,8 @@ static void program_timing_sync(
+ }
+
+ if (group_size > 1) {
+- core_dc->hwss.enable_timing_synchronization(
+- core_dc, group_index, group_size, pipe_set);
++ dc->hwss.enable_timing_synchronization(
++ dc, group_index, group_size, pipe_set);
+ group_index++;
+ }
+ }
+@@ -805,17 +797,16 @@ bool dc_enable_stereo(
+ bool ret = true;
+ int i, j;
+ struct pipe_ctx *pipe;
+- struct dc *core_dc = dc;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context != NULL)
+ pipe = &context->res_ctx.pipe_ctx[i];
+ else
+- pipe = &core_dc->current_context->res_ctx.pipe_ctx[i];
++ pipe = &dc->current_context->res_ctx.pipe_ctx[i];
+ for (j = 0 ; pipe && j < stream_count; j++) {
+ if (streams[j] && streams[j] == pipe->stream &&
+- core_dc->hwss.setup_stereo)
+- core_dc->hwss.setup_stereo(pipe, core_dc);
++ dc->hwss.setup_stereo)
++ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+
+@@ -829,8 +820,7 @@ bool dc_enable_stereo(
+ */
+ static bool dc_commit_context_no_check(struct dc *dc, struct validate_context *context)
+ {
+- struct dc *core_dc = dc;
+- struct dc_bios *dcb = core_dc->ctx->dc_bios;
++ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ struct pipe_ctx *pipe;
+ int i, j, k, l;
+@@ -840,22 +830,22 @@ static bool dc_commit_context_no_check(struct dc *dc, struct validate_context *c
+ dc_streams[i] = context->streams[i];
+
+ if (!dcb->funcs->is_accelerated_mode(dcb))
+- core_dc->hwss.enable_accelerated_mode(core_dc);
++ dc->hwss.enable_accelerated_mode(dc);
+
+- for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+- core_dc->hwss.wait_for_mpcc_disconnect(core_dc, core_dc->res_pool, pipe);
++ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+ }
+- result = core_dc->hwss.apply_ctx_to_hw(core_dc, context);
++ result = dc->hwss.apply_ctx_to_hw(dc, context);
+
+- program_timing_sync(core_dc, context);
++ program_timing_sync(dc, context);
+
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_sink *sink = context->streams[i]->sink;
+
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+- core_dc->hwss.apply_ctx_for_surface(
+- core_dc, context->streams[i],
++ dc->hwss.apply_ctx_for_surface(
++ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
+
+@@ -869,8 +859,8 @@ static bool dc_commit_context_no_check(struct dc *dc, struct validate_context *c
+ for (l = 0 ; pipe && l < context->stream_count; l++) {
+ if (context->streams[l] &&
+ context->streams[l] == pipe->stream &&
+- core_dc->hwss.setup_stereo)
+- core_dc->hwss.setup_stereo(pipe, core_dc);
++ dc->hwss.setup_stereo)
++ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+ }
+@@ -885,11 +875,11 @@ static bool dc_commit_context_no_check(struct dc *dc, struct validate_context *c
+
+ dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+
+- dc_release_validate_context(core_dc->current_context);
++ dc_release_validate_context(dc->current_context);
+
+- core_dc->current_context = context;
++ dc->current_context = context;
+
+- dc_retain_validate_context(core_dc->current_context);
++ dc_retain_validate_context(dc->current_context);
+
+ return (result == DC_OK);
+ }
+@@ -897,20 +887,19 @@ static bool dc_commit_context_no_check(struct dc *dc, struct validate_context *c
+ bool dc_commit_context(struct dc *dc, struct validate_context *context)
+ {
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+- struct dc *core_dc = dc;
+ int i;
+
+- if (false == context_changed(core_dc, context))
++ if (false == context_changed(dc, context))
+ return DC_OK;
+
+- dm_logger_write(core_dc->ctx->logger, LOG_DC, "%s: %d streams\n",
++ dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
+ __func__, context->stream_count);
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ dc_stream_log(stream,
+- core_dc->ctx->logger,
++ dc->ctx->logger,
+ LOG_DC);
+ }
+
+@@ -923,21 +912,20 @@ bool dc_commit_context(struct dc *dc, struct validate_context *context)
+ bool dc_post_update_surfaces_to_stream(struct dc *dc)
+ {
+ int i;
+- struct dc *core_dc = dc;
+- struct validate_context *context = core_dc->current_context;
++ struct validate_context *context = dc->current_context;
+
+ post_surface_trace(dc);
+
+- for (i = 0; i < core_dc->res_pool->pipe_count; i++)
++ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL
+ || context->res_ctx.pipe_ctx[i].plane_state == NULL)
+- core_dc->hwss.power_down_front_end(core_dc, i);
++ dc->hwss.power_down_front_end(dc, i);
+
+ /* 3rd param should be true, temp w/a for RV*/
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+- core_dc->hwss.set_bandwidth(core_dc, context, core_dc->ctx->dce_version < DCN_VERSION_1_0);
++ dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
+ #else
+- core_dc->hwss.set_bandwidth(core_dc, context, true);
++ dc->hwss.set_bandwidth(dc, context, true);
+ #endif
+ return true;
+ }
+@@ -1175,7 +1163,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status)
+ {
+- struct dc *core_dc = dc;
+ int i;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+@@ -1187,7 +1174,7 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
+
+ for (i = 0 ; i < surface_count; i++) {
+ enum surface_update_type type =
+- det_surface_update(core_dc, &updates[i], i);
++ det_surface_update(dc, &updates[i], i);
+
+ if (type == UPDATE_TYPE_FULL)
+ return type;
+@@ -1221,12 +1208,11 @@ void dc_update_planes_and_stream(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+ {
+- struct dc *core_dc = dc;
+ struct validate_context *context;
+ int i, j;
+ enum surface_update_type update_type;
+ const struct dc_stream_status *stream_status;
+- struct dc_context *dc_ctx = core_dc->ctx;
++ struct dc_context *dc_ctx = dc->ctx;
+
+ stream_status = dc_stream_get_status(stream);
+
+@@ -1240,7 +1226,7 @@ void dc_update_planes_and_stream(struct dc *dc,
+ ASSERT(0);
+ }
+ #endif
+- context = core_dc->current_context;
++ context = dc->current_context;
+
+ /* update current stream with the new updates */
+ if (stream_update) {
+@@ -1296,7 +1282,7 @@ void dc_update_planes_and_stream(struct dc *dc,
+ atomic_inc(&context->ref_count);
+
+ dc_resource_validate_ctx_copy_construct(
+- core_dc->current_context, context);
++ dc->current_context, context);
+
+ /*remove old surfaces from context */
+ if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+@@ -1358,7 +1344,7 @@ void dc_update_planes_and_stream(struct dc *dc,
+ }
+
+ if (update_type >= UPDATE_TYPE_MED) {
+- for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
++ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+@@ -1396,31 +1382,31 @@ void dc_update_planes_and_stream(struct dc *dc,
+ }
+
+ if (update_type == UPDATE_TYPE_FULL) {
+- if (!core_dc->res_pool->funcs->validate_bandwidth(core_dc, context)) {
++ if (!dc->res_pool->funcs->validate_bandwidth(dc, context)) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ } else {
+- core_dc->hwss.set_bandwidth(core_dc, context, false);
++ dc->hwss.set_bandwidth(dc, context, false);
+ context_clock_trace(dc, context);
+ }
+ }
+
+ if (update_type > UPDATE_TYPE_FAST) {
+- for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
++ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+- core_dc->hwss.wait_for_mpcc_disconnect(core_dc, core_dc->res_pool, pipe_ctx);
++ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
+ }
+ }
+
+ if (surface_count == 0)
+- core_dc->hwss.apply_ctx_for_surface(core_dc, stream, surface_count, context);
++ dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
+
+ /* Lock pipes for provided surfaces, or all active if full update*/
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+- for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
++ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (update_type != UPDATE_TYPE_FULL && pipe_ctx->plane_state != plane_state)
+@@ -1428,8 +1414,8 @@ void dc_update_planes_and_stream(struct dc *dc,
+ if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+ continue;
+
+- core_dc->hwss.pipe_control_lock(
+- core_dc,
++ dc->hwss.pipe_control_lock(
++ dc,
+ pipe_ctx,
+ true);
+ }
+@@ -1438,9 +1424,9 @@ void dc_update_planes_and_stream(struct dc *dc,
+ }
+
+ /* Full fe update*/
+- for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
++ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+- struct pipe_ctx *cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
++ struct pipe_ctx *cur_pipe_ctx = &dc->current_context->res_ctx.pipe_ctx[j];
+ bool is_new_pipe_surface = cur_pipe_ctx->plane_state != pipe_ctx->plane_state;
+ struct dc_cursor_position position = { 0 };
+
+@@ -1451,18 +1437,18 @@ void dc_update_planes_and_stream(struct dc *dc,
+ if (!pipe_ctx->top_pipe && pipe_ctx->stream) {
+ struct dc_stream_status *stream_status = stream_get_status(context, pipe_ctx->stream);
+
+- core_dc->hwss.apply_ctx_for_surface(
+- core_dc, pipe_ctx->stream, stream_status->plane_count, context);
++ dc->hwss.apply_ctx_for_surface(
++ dc, pipe_ctx->stream, stream_status->plane_count, context);
+ }
+
+ /* TODO: this is a hack w/a for switching from mpo to pipe split */
+ dc_stream_set_cursor_position(pipe_ctx->stream, &position);
+
+ if (is_new_pipe_surface) {
+- core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
+- core_dc->hwss.set_input_transfer_func(
++ dc->hwss.update_plane_addr(dc, pipe_ctx);
++ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+- core_dc->hwss.set_output_transfer_func(
++ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+ }
+@@ -1475,40 +1461,40 @@ void dc_update_planes_and_stream(struct dc *dc,
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ if (update_type == UPDATE_TYPE_MED)
+- core_dc->hwss.apply_ctx_for_surface(
+- core_dc, stream, surface_count, context);
++ dc->hwss.apply_ctx_for_surface(
++ dc, stream, surface_count, context);
+
+- for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
++ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ if (srf_updates[i].flip_addr)
+- core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
++ dc->hwss.update_plane_addr(dc, pipe_ctx);
+
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ if (srf_updates[i].in_transfer_func)
+- core_dc->hwss.set_input_transfer_func(
++ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+
+ if (stream_update != NULL &&
+ stream_update->out_transfer_func != NULL) {
+- core_dc->hwss.set_output_transfer_func(
++ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+
+ if (srf_updates[i].hdr_static_metadata) {
+ resource_build_info_frame(pipe_ctx);
+- core_dc->hwss.update_info_frame(pipe_ctx);
++ dc->hwss.update_info_frame(pipe_ctx);
+ }
+ }
+ }
+
+ /* Unlock pipes */
+- for (i = core_dc->res_pool->pipe_count - 1; i >= 0; i--) {
++ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ for (j = 0; j < surface_count; j++) {
+@@ -1518,8 +1504,8 @@ void dc_update_planes_and_stream(struct dc *dc,
+ if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+ continue;
+
+- core_dc->hwss.pipe_control_lock(
+- core_dc,
++ dc->hwss.pipe_control_lock(
++ dc,
+ pipe_ctx,
+ false);
+
+@@ -1527,7 +1513,7 @@ void dc_update_planes_and_stream(struct dc *dc,
+ }
+ }
+
+- if (core_dc->current_context != context) {
++ if (dc->current_context != context) {
+
+ /* Since memory free requires elevated IRQL, an interrupt
+ * request is generated by mem free. If this happens
+@@ -1537,9 +1523,9 @@ void dc_update_planes_and_stream(struct dc *dc,
+ * then free the old context.
+ */
+
+- struct validate_context *old = core_dc->current_context;
++ struct validate_context *old = dc->current_context;
+
+- core_dc->current_context = context;
++ dc->current_context = context;
+ dc_release_validate_context(old);
+
+ }
+@@ -1554,29 +1540,25 @@ void dc_update_planes_and_stream(struct dc *dc,
+
+ uint8_t dc_get_current_stream_count(struct dc *dc)
+ {
+- struct dc *core_dc = dc;
+- return core_dc->current_context->stream_count;
++ return dc->current_context->stream_count;
+ }
+
+ struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
+ {
+- struct dc *core_dc = dc;
+- if (i < core_dc->current_context->stream_count)
+- return core_dc->current_context->streams[i];
++ if (i < dc->current_context->stream_count)
++ return dc->current_context->streams[i];
+ return NULL;
+ }
+
+ struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
+ {
+- struct dc *core_dc = dc;
+- return core_dc->links[link_index];
++ return dc->links[link_index];
+ }
+
+ struct dwbc *dc_get_dwb_at_pipe(struct dc *dc, uint32_t pipe)
+ {
+- struct dc *core_dc = dc;
+ if ((pipe >= dwb_pipe0) && (pipe < dwb_pipe_max_num)) {
+- return core_dc->res_pool->dwbc[(int)pipe];
++ return dc->res_pool->dwbc[(int)pipe];
+ } else {
+ return NULL;
+ }
+@@ -1585,21 +1567,18 @@ struct dwbc *dc_get_dwb_at_pipe(struct dc *dc, uint32_t pipe)
+ const struct graphics_object_id dc_get_link_id_at_index(
+ struct dc *dc, uint32_t link_index)
+ {
+- struct dc *core_dc = dc;
+- return core_dc->links[link_index]->link_id;
++ return dc->links[link_index]->link_id;
+ }
+
+ enum dc_irq_source dc_get_hpd_irq_source_at_index(
+ struct dc *dc, uint32_t link_index)
+ {
+- struct dc *core_dc = dc;
+- return core_dc->links[link_index]->irq_source_hpd;
++ return dc->links[link_index]->irq_source_hpd;
+ }
+
+ const struct audio **dc_get_audios(struct dc *dc)
+ {
+- struct dc *core_dc = dc;
+- return (const struct audio **)core_dc->res_pool->audios;
++ return (const struct audio **)dc->res_pool->audios;
+ }
+
+ void dc_flip_plane_addrs(
+@@ -1633,25 +1612,21 @@ enum dc_irq_source dc_interrupt_to_irq_source(
+ uint32_t src_id,
+ uint32_t ext_id)
+ {
+- struct dc *core_dc = dc;
+- return dal_irq_service_to_irq_source(core_dc->res_pool->irqs, src_id, ext_id);
++ return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
+ }
+
+ void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
+ {
+- struct dc *core_dc;
+
+ if (dc == NULL)
+ return;
+- core_dc = dc;
+
+- dal_irq_service_set(core_dc->res_pool->irqs, src, enable);
++ dal_irq_service_set(dc->res_pool->irqs, src, enable);
+ }
+
+ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
+ {
+- struct dc *core_dc = dc;
+- dal_irq_service_ack(core_dc->res_pool->irqs, src);
++ dal_irq_service_ack(dc->res_pool->irqs, src);
+ }
+
+ void dc_set_power_state(
+@@ -1659,7 +1634,6 @@ void dc_set_power_state(
+ enum dc_acpi_cm_power_state power_state,
+ enum dc_video_power_state video_power_state)
+ {
+- struct dc *core_dc = dc;
+ atomic_t ref_count;
+
+ core_dc->previous_power_state = core_dc->current_power_state;
+@@ -1667,13 +1641,13 @@ void dc_set_power_state(
+
+ switch (power_state) {
+ case DC_ACPI_CM_POWER_STATE_D0:
+- core_dc->hwss.init_hw(core_dc);
++ dc->hwss.init_hw(dc);
+ break;
+ default:
+ /* NULL means "reset/release all DC streams" */
+ dc_commit_streams(dc, NULL, 0);
+
+- core_dc->hwss.power_down(core_dc);
++ dc->hwss.power_down(dc);
+
+ /* Zero out the current context so that on resume we start with
+ * clean state, and dc hw programming optimizations will not
+@@ -1681,11 +1655,11 @@ void dc_set_power_state(
+ */
+
+ /* Preserve refcount */
+- ref_count = core_dc->current_context->ref_count;
+- dc_resource_validate_ctx_destruct(core_dc->current_context);
+- memset(core_dc->current_context, 0,
+- sizeof(*core_dc->current_context));
+- core_dc->current_context->ref_count = ref_count;
++ ref_count = dc->current_context->ref_count;
++ dc_resource_validate_ctx_destruct(dc->current_context);
++ memset(dc->current_context, 0,
++ sizeof(*dc->current_context));
++ dc->current_context->ref_count = ref_count;
+
+ break;
+ }
+@@ -1694,12 +1668,11 @@ void dc_set_power_state(
+
+ void dc_resume(struct dc *dc)
+ {
+- struct dc *core_dc = dc;
+
+ uint32_t i;
+
+- for (i = 0; i < core_dc->link_count; i++)
+- core_link_resume(core_dc->links[i]);
++ for (i = 0; i < dc->link_count; i++)
++ core_link_resume(dc->links[i]);
+ }
+
+ bool dc_read_aux_dpcd(
+@@ -1709,9 +1682,8 @@ bool dc_read_aux_dpcd(
+ uint8_t *data,
+ uint32_t size)
+ {
+- struct dc *core_dc = dc;
+
+- struct dc_link *link = core_dc->links[link_index];
++ struct dc_link *link = dc->links[link_index];
+ enum ddc_result r = dal_ddc_service_read_dpcd_data(
+ link->ddc,
+ false,
+@@ -1729,8 +1701,7 @@ bool dc_write_aux_dpcd(
+ const uint8_t *data,
+ uint32_t size)
+ {
+- struct dc *core_dc = dc;
+- struct dc_link *link = core_dc->links[link_index];
++ struct dc_link *link = dc->links[link_index];
+
+ enum ddc_result r = dal_ddc_service_write_dpcd_data(
+ link->ddc,
+@@ -1750,9 +1721,8 @@ bool dc_read_aux_i2c(
+ uint8_t *data,
+ uint32_t size)
+ {
+- struct dc *core_dc = dc;
+
+- struct dc_link *link = core_dc->links[link_index];
++ struct dc_link *link = dc->links[link_index];
+ enum ddc_result r = dal_ddc_service_read_dpcd_data(
+ link->ddc,
+ true,
+@@ -1771,8 +1741,7 @@ bool dc_write_aux_i2c(
+ const uint8_t *data,
+ uint32_t size)
+ {
+- struct dc *core_dc = dc;
+- struct dc_link *link = core_dc->links[link_index];
++ struct dc_link *link = dc->links[link_index];
+
+ enum ddc_result r = dal_ddc_service_write_dpcd_data(
+ link->ddc,
+@@ -1793,9 +1762,8 @@ bool dc_query_ddc_data(
+ uint8_t *read_buf,
+ uint32_t read_size) {
+
+- struct dc *core_dc = dc;
+
+- struct dc_link *link = core_dc->links[link_index];
++ struct dc_link *link = dc->links[link_index];
+
+ bool result = dal_ddc_service_query_ddc_data(
+ link->ddc,
+@@ -1813,9 +1781,8 @@ bool dc_submit_i2c(
+ uint32_t link_index,
+ struct i2c_command *cmd)
+ {
+- struct dc *core_dc = dc;
+
+- struct dc_link *link = core_dc->links[link_index];
++ struct dc_link *link = dc->links[link_index];
+ struct ddc_service *ddc = link->ddc;
+
+ return dal_i2caux_submit_i2c_command(
+@@ -1932,12 +1899,11 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+ bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
+ {
+ int i;
+- struct dc *core_dc = dc;
+ struct mem_input *mi = NULL;
+
+- for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+- if (core_dc->res_pool->mis[i] != NULL) {
+- mi = core_dc->res_pool->mis[i];
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ if (dc->res_pool->mis[i] != NULL) {
++ mi = dc->res_pool->mis[i];
+ break;
+ }
+ }
+@@ -1946,10 +1912,10 @@ bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
+ return false;
+ }
+
+- if (core_dc->hwss.update_dchub)
+- core_dc->hwss.update_dchub(core_dc->hwseq, dh_data);
++ if (dc->hwss.update_dchub)
++ dc->hwss.update_dchub(dc->hwseq, dh_data);
+ else
+- ASSERT(core_dc->hwss.update_dchub);
++ ASSERT(dc->hwss.update_dchub);
+
+
+ return true;
+@@ -1958,9 +1924,8 @@ bool dc_init_dchub(struct dc *dc, struct dchub_init_data *dh_data)
+
+ void dc_log_hw_state(struct dc *dc)
+ {
+- struct dc *core_dc = dc;
+
+- if (core_dc->hwss.log_hw_state)
+- core_dc->hwss.log_hw_state(core_dc);
++ if (dc->hwss.log_hw_state)
++ dc->hwss.log_hw_state(dc);
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+index f49203b..52350d0 100644
+--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
++++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+@@ -145,7 +145,6 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
+ struct core_freesync *core_freesync =
+ dm_alloc(sizeof(struct core_freesync));
+
+- struct dc *core_dc = dc;
+
+ struct persistent_data_flag flag;
+
+@@ -176,19 +175,19 @@ struct mod_freesync *mod_freesync_create(struct dc *dc)
+ /* Create initial module folder in registry for freesync enable data */
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+- dm_write_persistent_data(core_dc->ctx, NULL, FREESYNC_REGISTRY_NAME,
++ dm_write_persistent_data(dc->ctx, NULL, FREESYNC_REGISTRY_NAME,
+ NULL, NULL, 0, &flag);
+ flag.save_per_edid = false;
+ flag.save_per_link = false;
+
+- if (dm_read_persistent_data(core_dc->ctx, NULL, NULL,
++ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY,
+ &data, sizeof(data), &flag)) {
+ core_freesync->opts.drr_internal_supported =
+ (data & 1) ? false : true;
+ }
+
+- if (dm_read_persistent_data(core_dc->ctx, NULL, NULL,
++ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ FREESYNC_NO_STATIC_FOR_EXTERNAL_DP_REGKEY,
+ &data, sizeof(data), &flag)) {
+ core_freesync->opts.drr_external_supported =
+@@ -245,7 +244,7 @@ static unsigned int map_index_from_stream(struct core_freesync *core_freesync,
+ bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream, struct mod_freesync_caps *caps)
+ {
+- struct dc *core_dc = NULL;
++ struct dc *dc = NULL;
+ struct core_freesync *core_freesync = NULL;
+ int persistent_freesync_enable = 0;
+ struct persistent_data_flag flag;
+@@ -256,7 +255,7 @@ bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+- core_dc = core_freesync->dc;
++ dc = core_freesync->dc;
+
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+@@ -287,7 +286,7 @@ bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
+ static_ramp.ramp_is_active = false;
+
+ /* get persistent data from registry */
+- if (dm_read_persistent_data(core_dc->ctx, stream->sink,
++ if (dm_read_persistent_data(dc->ctx, stream->sink,
+ FREESYNC_REGISTRY_NAME,
+ "userenable", &persistent_freesync_enable,
+ sizeof(int), &flag)) {
+@@ -970,14 +969,14 @@ bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync,
+ unsigned int stream_index, map_index;
+ int persistent_data = 0;
+ struct persistent_data_flag flag;
+- struct dc *core_dc = NULL;
++ struct dc *dc = NULL;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+- core_dc = core_freesync->dc;
++ dc = core_freesync->dc;
+
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+@@ -1001,7 +1000,7 @@ bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync,
+ enable_for_video)
+ persistent_data = persistent_data | 4;
+
+- dm_write_persistent_data(core_dc->ctx,
++ dm_write_persistent_data(dc->ctx,
+ streams[stream_index]->sink,
+ FREESYNC_REGISTRY_NAME,
+ "userenable",
+--
+2.7.4
+