aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/1760-drm-amd-display-Clean-up-locking-in-dcn-_apply_ctx_f.patch
blob: 38a67bb0563a56dcbe1ae017899fc40fc1aa2d21 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
From b670a131d3aad8ae0a1e2b732cef66c450b19ed5 Mon Sep 17 00:00:00 2001
From: Leo Li <sunpeng.li@amd.com>
Date: Wed, 20 Mar 2019 09:52:14 -0400
Subject: [PATCH 1760/2940] drm/amd/display: Clean up locking in
 dcn*_apply_ctx_for_surface()

[Why]

dcn*_disable_plane() doesn't unlock the pipe anymore, making the extra
lock unnecessary.

In addition - during full plane updates - all necessary pipes should be
locked/unlocked together when modifying hubp to avoid tearing in
pipesplit setups.

[How]

Remove redundant locks, and add function to lock all pipes. If an
interdependent pipe update is required, lock down all pipes. Otherwise,
lock only the top pipe for the updated pipe tree.

Signed-off-by: Leo Li <sunpeng.li@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Nicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
---
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 66 +++++++++++++------
 .../amd/display/dc/dcn10/dcn10_hw_sequencer.h |  4 ++
 2 files changed, 49 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index de788298e6a5..dcb133433b17 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2328,6 +2328,7 @@ static void dcn10_apply_ctx_for_surface(
 	int i;
 	struct timing_generator *tg;
 	bool removed_pipe[4] = { false };
+	bool interdependent_update = false;
 	struct pipe_ctx *top_pipe_to_program =
 			find_top_pipe_for_stream(dc, context, stream);
 	DC_LOGGER_INIT(dc->ctx->logger);
@@ -2337,7 +2338,13 @@ static void dcn10_apply_ctx_for_surface(
 
 	tg = top_pipe_to_program->stream_res.tg;
 
-	dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
+	interdependent_update = top_pipe_to_program->plane_state &&
+		top_pipe_to_program->plane_state->update_flags.bits.full_update;
+
+	if (interdependent_update)
+		lock_all_pipes(dc, context, true);
+	else
+		dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
 
 	if (num_planes == 0) {
 		/* OTG blank before remove all front end */
@@ -2357,15 +2364,9 @@ static void dcn10_apply_ctx_for_surface(
 		 */
 		if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
 			if (old_pipe_ctx->stream_res.tg == tg &&
-				old_pipe_ctx->plane_res.hubp &&
-				old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
+			    old_pipe_ctx->plane_res.hubp &&
+			    old_pipe_ctx->plane_res.hubp->opp_id != 0xf)
 				dcn10_disable_plane(dc, old_pipe_ctx);
-				/*
-				 * power down fe will unlock when calling reset, need
-				 * to lock it back here. Messy, need rework.
-				 */
-				pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
-			}
 		}
 
 		if ((!pipe_ctx->plane_state ||
@@ -2384,29 +2385,25 @@ static void dcn10_apply_ctx_for_surface(
 	if (num_planes > 0)
 		program_all_pipe_in_tree(dc, top_pipe_to_program, context);
 
-	dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
-
-	if (top_pipe_to_program->plane_state &&
-			top_pipe_to_program->plane_state->update_flags.bits.full_update)
+	if (interdependent_update)
 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-			tg = pipe_ctx->stream_res.tg;
 			/* Skip inactive pipes and ones already updated */
-			if (!pipe_ctx->stream || pipe_ctx->stream == stream
-					|| !pipe_ctx->plane_state
-					|| !tg->funcs->is_tg_enabled(tg))
+			if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
+			    !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
 				continue;
 
-			tg->funcs->lock(tg);
-
 			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
 				pipe_ctx->plane_res.hubp,
 				&pipe_ctx->dlg_regs,
 				&pipe_ctx->ttu_regs);
-
-			tg->funcs->unlock(tg);
 		}
 
+	if (interdependent_update)
+		lock_all_pipes(dc, context, false);
+	else
+		dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+
 	if (num_planes == 0)
 		false_optc_underflow_wa(dc, stream, tg);
 
@@ -2813,6 +2810,33 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
 	return vertical_line_start;
 }
 
+void lock_all_pipes(struct dc *dc,
+	struct dc_state *context,
+	bool lock)
+{
+	struct pipe_ctx *pipe_ctx;
+	struct timing_generator *tg;
+	int i;
+
+	for (i = 0; i < dc->res_pool->pipe_count; i++) {
+		pipe_ctx = &context->res_ctx.pipe_ctx[i];
+		tg = pipe_ctx->stream_res.tg;
+		/*
+		 * Only lock the top pipe's tg to prevent redundant
+		 * (un)locking. Also skip if pipe is disabled.
+		 */
+		if (pipe_ctx->top_pipe ||
+		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
+		    !tg->funcs->is_tg_enabled(tg))
+			continue;
+
+		if (lock)
+			tg->funcs->lock(tg);
+		else
+			tg->funcs->unlock(tg);
+	}
+}
+
 static void calc_vupdate_position(
 		struct pipe_ctx *pipe_ctx,
 		uint32_t *start_line,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 6d66084df55f..4b3b27a5d23b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -83,4 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream(
 
 int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
 
+void lock_all_pipes(struct dc *dc,
+	struct dc_state *context,
+	bool lock);
+
 #endif /* __DC_HWSS_DCN10_H__ */
-- 
2.17.1