aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/core')
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c152
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c40
3 files changed, 160 insertions, 71 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 188e51600070..fff95e6b46c7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -763,6 +763,29 @@ static bool disable_all_writeback_pipes_for_stream(
return true;
}
+void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
+{
+ int i = 0;
+
+ /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
+ if (dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, lock);
+ else {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ // Copied conditions that were previously in dce110_apply_ctx_for_surface
+ if (stream == pipe_ctx->stream) {
+ if (!pipe_ctx->top_pipe &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
+ break;
+ }
+ }
+ }
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -788,11 +811,20 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
- if (dc->hwss.apply_ctx_for_surface)
+
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
}
- if (dc->hwss.program_front_end_for_ctx)
- dc->hwss.program_front_end_for_ctx(dc, dangling_context);
}
current_ctx = dc->current_state;
@@ -803,11 +835,10 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
- int count = 0;
- struct pipe_ctx *pipe;
PERF_TRACE();
for (i = 0; i < MAX_PIPES; i++) {
- pipe = &context->res_ctx.pipe_ctx[i];
+ int count = 0;
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->plane_state)
continue;
@@ -1212,16 +1243,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface) {
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mode_changed)
continue;
-
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context); /* use new pipe config in new context */
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
}
+ }
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1240,19 +1274,27 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
- if (dc->hwss.program_front_end_for_ctx)
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context);
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
/*
* enable stereo
@@ -1320,10 +1362,24 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
-bool dc_is_hw_initialized(struct dc *dc)
+static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
{
- struct dc_bios *dcb = dc->ctx->dc_bios;
- return dcb->funcs->is_accelerated_mode(dcb);
+ int i;
+ struct pipe_ctx *pipe;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state)
+ continue;
+
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (pipe->plane_state->status.is_flip_pending)
+ return true;
+ }
+ return false;
}
bool dc_post_update_surfaces_to_stream(struct dc *dc)
@@ -1336,6 +1392,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
+ if (is_flip_pending_in_pipes(dc, context))
+ return true;
+
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
@@ -1736,14 +1795,15 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->wb_update)
su_flags->bits.wb_update = 1;
+
+ if (stream_update->dsc_config)
+ su_flags->bits.dsc_changed = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
if (stream_update->output_csc_transform || stream_update->output_color_space)
su_flags->bits.out_csc = 1;
-
- if (stream_update->dsc_config)
- overall_type = UPDATE_TYPE_FULL;
}
for (i = 0 ; i < surface_count; i++) {
@@ -1778,8 +1838,11 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
if (type == UPDATE_TYPE_FULL) {
- if (stream_update)
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
@@ -2095,18 +2158,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
}
- if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
- dp_update_dsc_config(pipe_ctx);
- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
- }
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
- if (stream_update->dpms_off) {
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (stream_update->dsc_config)
+ dp_update_dsc_config(pipe_ctx);
+ if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx);
/* for dpms, keep acquired resources*/
@@ -2120,8 +2179,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
core_link_enable_stream(dc->current_state, pipe_ctx);
}
-
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -2177,6 +2234,27 @@ static void commit_planes_for_stream(struct dc *dc,
context_clock_trace(dc, context);
}
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream &&
+ pipe_ctx->stream == stream) {
+ top_pipe_to_program = pipe_ctx;
+ }
+ }
+
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, true);
+ else
+ /* Lock the top pipe while updating plane addrs, since freesync requires
+ * plane addr update event triggers to be synchronized.
+ * top_pipe_to_program is expected to never be NULL
+ */
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+
+
// Stream updates
if (stream_update)
commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
@@ -2191,6 +2269,12 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ else
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+
+ dc->hwss.post_unlock_program_front_end(dc, context);
return;
}
@@ -2226,8 +2310,6 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->stream == stream) {
struct dc_stream_status *stream_status = NULL;
- top_pipe_to_program = pipe_ctx;
-
if (!pipe_ctx->plane_state)
continue;
@@ -2272,12 +2354,6 @@ static void commit_planes_for_stream(struct dc *dc,
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
- /* Lock the top pipe while updating plane addrs, since freesync requires
- * plane addr update event triggers to be synchronized.
- * top_pipe_to_program is expected to never be NULL
- */
- dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
-
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2319,9 +2395,15 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
}
+ }
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ else
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
- }
+
+ if (update_type != UPDATE_TYPE_FAST)
+ dc->hwss.post_unlock_program_front_end(dc, context);
// Fire manual trigger only when bottom plane is flipped
for (j = 0; j < dc->res_pool->pipe_count; j++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index fd9e69634c50..fbcd979438e2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -220,6 +220,30 @@ static enum dpcd_training_patterns
return dpcd_tr_pattern;
}
+static uint8_t dc_dp_initialize_scrambling_data_symbols(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern)
+{
+ uint8_t disable_scrabled_data_symbols = 0;
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ disable_scrabled_data_symbols = 1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ disable_scrabled_data_symbols = 0;
+ break;
+ default:
+ ASSERT(0);
+ DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+ return disable_scrabled_data_symbols;
+}
+
static inline bool is_repeater(struct dc_link *link, uint32_t offset)
{
return (!link->is_lttpr_mode_transparent && offset != 0);
@@ -252,6 +276,9 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
+ dc_dp_initialize_scrambling_data_symbols(link, pattern);
+
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
@@ -2888,6 +2915,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ link->dc->hwss.blank_stream(pipe_ctx);
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
break;
}
@@ -2904,6 +2937,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_link_reallocate_mst_payload(link);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
+ }
+
status = false;
if (out_link_loss)
*out_link_loss = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 6ddbb00ed37a..4f0e7203dba4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status(
return dc_stream_get_status_from_state(dc->current_state, stream);
}
-static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
-{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- unsigned int vupdate_line;
- unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
- struct dc_stream_state *stream = pipe_ctx->stream;
- unsigned int us_per_line;
-
- if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
- ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
-
- vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
- return;
-
- if (vpos >= vupdate_line)
- return;
-
- us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
- lines_to_vupdate = vupdate_line - vpos;
- us_to_vupdate = lines_to_vupdate * us_per_line;
-
- /* 70 us is a conservative estimate of cursor update time*/
- if (us_to_vupdate < 70)
- udelay(us_to_vupdate);
- }
-#endif
-}
/**
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
@@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
-
- delay_cursor_until_vupdate(pipe_ctx, dc);
- dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes(
}
if (pipe_to_program)
- dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
return true;
}
@@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
-
- delay_cursor_until_vupdate(pipe_ctx, dc);
- dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_position(pipe_ctx);
}
if (pipe_to_program)
- dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
return true;
}