aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/0528-drm-amd-display-do-full-fe-reprogram-on-full-update.patch
blob: 820ff891606676700447e1c73674b8318202c9af (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
From 990cf1c87162b32799733dd4278270650e1c227e Mon Sep 17 00:00:00 2001
From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Date: Tue, 13 Jun 2017 17:14:51 -0400
Subject: [PATCH 0528/4131] drm/amd/display: do full fe reprogram on full
 update

Fixes MPO entry in 2x4k scenario on raven

Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
Acked-by: Harry Wentland <Harry.Wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/display/dc/core/dc.c | 81 +++++++++++++++++++++-----------
 1 file changed, 54 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 3481ef1..a4cc6ea 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1401,15 +1401,18 @@ void dc_update_surfaces_and_stream(struct dc *dc,
 	if (!surface_count)  /* reset */
 		core_dc->hwss.apply_ctx_for_surface(core_dc, NULL, context);
 
-	/* Lock pipes for provided surfaces */
+	/* Lock pipes for provided surfaces, or all active if full update*/
 	for (i = 0; i < surface_count; i++) {
 		struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
 
 		for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
 
-			if (pipe_ctx->surface != surface)
+			if (update_type != UPDATE_TYPE_FULL && pipe_ctx->surface != surface)
+				continue;
+			if (!pipe_ctx->surface || pipe_ctx->top_pipe)
 				continue;
+
 			if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
 				core_dc->hwss.pipe_control_lock(
 						core_dc,
@@ -1417,22 +1420,49 @@ void dc_update_surfaces_and_stream(struct dc *dc,
 						true);
 			}
 		}
+		if (update_type == UPDATE_TYPE_FULL)
+			break;
 	}
 
+	/* Full fe update*/
+	for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
+		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+		struct pipe_ctx *cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
+		bool is_new_pipe_surface = cur_pipe_ctx->surface != pipe_ctx->surface;
+		struct dc_cursor_position position = { 0 };
+
+		if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->surface)
+			continue;
+
+		if (!pipe_ctx->top_pipe)
+			core_dc->hwss.apply_ctx_for_surface(
+					core_dc, pipe_ctx->surface, context);
+
+		/* TODO: this is a hack w/a for switching from mpo to pipe split */
+		dc_stream_set_cursor_position(&pipe_ctx->stream->public, &position);
+
+		if (is_new_pipe_surface) {
+			core_dc->hwss.update_plane_addr(core_dc, pipe_ctx);
+			core_dc->hwss.set_input_transfer_func(
+					pipe_ctx, pipe_ctx->surface);
+			core_dc->hwss.set_output_transfer_func(
+					pipe_ctx, pipe_ctx->stream);
+		}
+	}
+
+	if (update_type > UPDATE_TYPE_FAST)
+		context_timing_trace(dc, &context->res_ctx);
+
 	/* Perform requested Updates */
 	for (i = 0; i < surface_count; i++) {
 		struct core_surface *surface = DC_SURFACE_TO_CORE(srf_updates[i].surface);
 
-		if (update_type >= UPDATE_TYPE_MED) {
-				core_dc->hwss.apply_ctx_for_surface(
-						core_dc, surface, context);
-				context_timing_trace(dc, &context->res_ctx);
-		}
+		if (update_type == UPDATE_TYPE_MED)
+			core_dc->hwss.apply_ctx_for_surface(
+					core_dc, surface, context);
 
 		for (j = 0; j < core_dc->res_pool->pipe_count; j++) {
 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-			struct pipe_ctx *cur_pipe_ctx;
-			bool is_new_pipe_surface = true;
 
 			if (pipe_ctx->surface != surface)
 				continue;
@@ -1443,19 +1473,12 @@ void dc_update_surfaces_and_stream(struct dc *dc,
 			if (update_type == UPDATE_TYPE_FAST)
 				continue;
 
-			cur_pipe_ctx = &core_dc->current_context->res_ctx.pipe_ctx[j];
-			if (cur_pipe_ctx->surface == pipe_ctx->surface)
-				is_new_pipe_surface = false;
-
-			if (is_new_pipe_surface ||
-					srf_updates[i].in_transfer_func)
+			if (srf_updates[i].in_transfer_func)
 				core_dc->hwss.set_input_transfer_func(
 						pipe_ctx, pipe_ctx->surface);
 
-			if (is_new_pipe_surface ||
-				(stream_update != NULL &&
-					stream_update->out_transfer_func !=
-							NULL)) {
+			if (stream_update != NULL &&
+					stream_update->out_transfer_func != NULL) {
 				core_dc->hwss.set_output_transfer_func(
 						pipe_ctx, pipe_ctx->stream);
 			}
@@ -1472,15 +1495,19 @@ void dc_update_surfaces_and_stream(struct dc *dc,
 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 
 		for (j = 0; j < surface_count; j++) {
-			if (srf_updates[j].surface == &pipe_ctx->surface->public) {
-				if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
-					core_dc->hwss.pipe_control_lock(
-							core_dc,
-							pipe_ctx,
-							false);
-				}
-				break;
+			if (update_type != UPDATE_TYPE_FULL &&
+					srf_updates[j].surface != &pipe_ctx->surface->public)
+				continue;
+			if (!pipe_ctx->surface || pipe_ctx->top_pipe)
+				continue;
+
+			if (!pipe_ctx->tg->funcs->is_blanked(pipe_ctx->tg)) {
+				core_dc->hwss.pipe_control_lock(
+						core_dc,
+						pipe_ctx,
+						false);
 			}
+			break;
 		}
 	}
 
-- 
2.7.4