aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/1760-drm-amd-display-Clean-up-locking-in-dcn-_apply_ctx_f.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.19.8/1760-drm-amd-display-Clean-up-locking-in-dcn-_apply_ctx_f.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.19.8/1760-drm-amd-display-Clean-up-locking-in-dcn-_apply_ctx_f.patch162
1 files changed, 162 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.19.8/1760-drm-amd-display-Clean-up-locking-in-dcn-_apply_ctx_f.patch b/common/recipes-kernel/linux/linux-yocto-4.19.8/1760-drm-amd-display-Clean-up-locking-in-dcn-_apply_ctx_f.patch
new file mode 100644
index 00000000..38a67bb0
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.19.8/1760-drm-amd-display-Clean-up-locking-in-dcn-_apply_ctx_f.patch
@@ -0,0 +1,162 @@
+From b670a131d3aad8ae0a1e2b732cef66c450b19ed5 Mon Sep 17 00:00:00 2001
+From: Leo Li <sunpeng.li@amd.com>
+Date: Wed, 20 Mar 2019 09:52:14 -0400
+Subject: [PATCH 1760/2940] drm/amd/display: Clean up locking in
+ dcn*_apply_ctx_for_surface()
+
+[Why]
+
+dcn*_disable_plane() doesn't unlock the pipe anymore, making the extra
+lock unnecessary.
+
+In addition - during full plane updates - all necessary pipes should be
+locked/unlocked together when modifying hubp to avoid tearing in
+pipesplit setups.
+
+[How]
+
+Remove redundant locks, and add function to lock all pipes. If an
+interdependent pipe update is required, lock down all pipes. Otherwise,
+lock only the top pipe for the updated pipe tree.
+
+Signed-off-by: Leo Li <sunpeng.li@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
+---
+ .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 66 +++++++++++++------
+ .../amd/display/dc/dcn10/dcn10_hw_sequencer.h | 4 ++
+ 2 files changed, 49 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index de788298e6a5..dcb133433b17 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -2328,6 +2328,7 @@ static void dcn10_apply_ctx_for_surface(
+ int i;
+ struct timing_generator *tg;
+ bool removed_pipe[4] = { false };
++ bool interdependent_update = false;
+ struct pipe_ctx *top_pipe_to_program =
+ find_top_pipe_for_stream(dc, context, stream);
+ DC_LOGGER_INIT(dc->ctx->logger);
+@@ -2337,7 +2338,13 @@ static void dcn10_apply_ctx_for_surface(
+
+ tg = top_pipe_to_program->stream_res.tg;
+
+- dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
++ interdependent_update = top_pipe_to_program->plane_state &&
++ top_pipe_to_program->plane_state->update_flags.bits.full_update;
++
++ if (interdependent_update)
++ lock_all_pipes(dc, context, true);
++ else
++ dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
+
+ if (num_planes == 0) {
+ /* OTG blank before remove all front end */
+@@ -2357,15 +2364,9 @@ static void dcn10_apply_ctx_for_surface(
+ */
+ if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
+ if (old_pipe_ctx->stream_res.tg == tg &&
+- old_pipe_ctx->plane_res.hubp &&
+- old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
++ old_pipe_ctx->plane_res.hubp &&
++ old_pipe_ctx->plane_res.hubp->opp_id != 0xf)
+ dcn10_disable_plane(dc, old_pipe_ctx);
+- /*
+- * power down fe will unlock when calling reset, need
+- * to lock it back here. Messy, need rework.
+- */
+- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+- }
+ }
+
+ if ((!pipe_ctx->plane_state ||
+@@ -2384,29 +2385,25 @@ static void dcn10_apply_ctx_for_surface(
+ if (num_planes > 0)
+ program_all_pipe_in_tree(dc, top_pipe_to_program, context);
+
+- dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+-
+- if (top_pipe_to_program->plane_state &&
+- top_pipe_to_program->plane_state->update_flags.bits.full_update)
++ if (interdependent_update)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+- tg = pipe_ctx->stream_res.tg;
+ /* Skip inactive pipes and ones already updated */
+- if (!pipe_ctx->stream || pipe_ctx->stream == stream
+- || !pipe_ctx->plane_state
+- || !tg->funcs->is_tg_enabled(tg))
++ if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
++ !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
+ continue;
+
+- tg->funcs->lock(tg);
+-
+ pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
+ pipe_ctx->plane_res.hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+-
+- tg->funcs->unlock(tg);
+ }
+
++ if (interdependent_update)
++ lock_all_pipes(dc, context, false);
++ else
++ dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
++
+ if (num_planes == 0)
+ false_optc_underflow_wa(dc, stream, tg);
+
+@@ -2813,6 +2810,33 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
+ return vertical_line_start;
+ }
+
++void lock_all_pipes(struct dc *dc,
++ struct dc_state *context,
++ bool lock)
++{
++ struct pipe_ctx *pipe_ctx;
++ struct timing_generator *tg;
++ int i;
++
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ pipe_ctx = &context->res_ctx.pipe_ctx[i];
++ tg = pipe_ctx->stream_res.tg;
++ /*
++ * Only lock the top pipe's tg to prevent redundant
++ * (un)locking. Also skip if pipe is disabled.
++ */
++ if (pipe_ctx->top_pipe ||
++ !pipe_ctx->stream || !pipe_ctx->plane_state ||
++ !tg->funcs->is_tg_enabled(tg))
++ continue;
++
++ if (lock)
++ tg->funcs->lock(tg);
++ else
++ tg->funcs->unlock(tg);
++ }
++}
++
+ static void calc_vupdate_position(
+ struct pipe_ctx *pipe_ctx,
+ uint32_t *start_line,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+index 6d66084df55f..4b3b27a5d23b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+@@ -83,4 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream(
+
+ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+
++void lock_all_pipes(struct dc *dc,
++ struct dc_state *context,
++ bool lock);
++
+ #endif /* __DC_HWSS_DCN10_H__ */
+--
+2.17.1
+