aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.14.71/5350-drm-amd-display-Build-stream-update-and-plane-update.patch
blob: e87939fa57a4b40299df7ce06ac414c432bf7398 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
From 01ee85a2b1c0c9a2caaeeac730662ddb9c2dfee8 Mon Sep 17 00:00:00 2001
From: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Date: Mon, 20 Aug 2018 13:32:07 -0400
Subject: [PATCH 5350/5725] drm/amd/display: Build stream update and plane
 updates in dm

[Why]
We currently lock modeset by setting a boolean in dm. We want to lock
Based on what DC tells us.

[How]
Build stream_updates and plane_update based on what changed. Then we
call check_update_surfaces_for_stream() to get the update type
We lock only if update_type is not fast

Change-Id: I70dbee91970830632be8abb960ae47b3adf9ab8d
Signed-off-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Reviewed-by: Harry Wentland <Harry.Wentland@amd.com>
Acked-by: Bhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
---
 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 116 +++++++++++++++++++++-
 1 file changed, 114 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 8eb935e..8132f40 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5441,6 +5441,100 @@ static int dm_update_planes_state(struct dc *dc,
 
 	return ret;
 }
+enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state)
+{
+
+
+	int i, j, num_plane;
+	struct drm_plane_state *old_plane_state, *new_plane_state;
+	struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
+	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+	struct drm_plane *plane;
+
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *new_crtc_state, *old_crtc_state;
+	struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state;
+	struct dc_stream_status *status = NULL;
+
+	struct dc_surface_update *updates = kzalloc(MAX_SURFACES * sizeof(struct dc_surface_update), GFP_KERNEL);
+	struct dc_plane_state *surface = kzalloc(MAX_SURFACES * sizeof(struct dc_plane_state), GFP_KERNEL);
+	struct dc_stream_update stream_update;
+	enum surface_update_type update_type = UPDATE_TYPE_FAST;
+
+
+	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+		old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
+		num_plane = 0;
+
+		if (new_dm_crtc_state->stream) {
+
+			for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) {
+				new_plane_crtc = new_plane_state->crtc;
+				old_plane_crtc = old_plane_state->crtc;
+				new_dm_plane_state = to_dm_plane_state(new_plane_state);
+				old_dm_plane_state = to_dm_plane_state(old_plane_state);
+
+				if (plane->type == DRM_PLANE_TYPE_CURSOR)
+					continue;
+
+				if (!state->allow_modeset)
+					continue;
+
+				if (crtc == new_plane_crtc) {
+					updates[num_plane].surface = &surface[num_plane];
+
+					if (new_crtc_state->mode_changed) {
+						updates[num_plane].surface->src_rect =
+									new_dm_plane_state->dc_state->src_rect;
+						updates[num_plane].surface->dst_rect =
+									new_dm_plane_state->dc_state->dst_rect;
+						updates[num_plane].surface->rotation =
+									new_dm_plane_state->dc_state->rotation;
+						updates[num_plane].surface->in_transfer_func =
+									new_dm_plane_state->dc_state->in_transfer_func;
+						stream_update.dst = new_dm_crtc_state->stream->dst;
+						stream_update.src = new_dm_crtc_state->stream->src;
+					}
+
+					if (new_crtc_state->color_mgmt_changed) {
+						updates[num_plane].gamma =
+								new_dm_plane_state->dc_state->gamma_correction;
+						updates[num_plane].in_transfer_func =
+								new_dm_plane_state->dc_state->in_transfer_func;
+						stream_update.gamut_remap =
+								&new_dm_crtc_state->stream->gamut_remap_matrix;
+						stream_update.out_transfer_func =
+								new_dm_crtc_state->stream->out_transfer_func;
+					}
+
+					num_plane++;
+				}
+			}
+
+			if (num_plane > 0) {
+				status = dc_stream_get_status(new_dm_crtc_state->stream);
+				update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
+										  &stream_update, status);
+
+				if (update_type > UPDATE_TYPE_MED) {
+					update_type = UPDATE_TYPE_FULL;
+					goto ret;
+				}
+			}
+
+		} else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
+			update_type = UPDATE_TYPE_FULL;
+			goto ret;
+		}
+	}
+
+ret:
+	kfree(updates);
+	kfree(surface);
+
+	return update_type;
+}
 
 static int amdgpu_dm_atomic_check(struct drm_device *dev,
 				  struct drm_atomic_state *state)
@@ -5452,6 +5546,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
         struct drm_connector_state *old_con_state, *new_con_state;
         struct drm_crtc *crtc;
         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+	enum surface_update_type update_type = UPDATE_TYPE_FAST;
+	enum surface_update_type overall_update_type = UPDATE_TYPE_FAST;
         int ret, i;
 
         /*
@@ -5537,7 +5633,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
                         continue;
 
-                lock_and_validation_needed = true;
+		overall_update_type = UPDATE_TYPE_FULL;
+		lock_and_validation_needed = true;
         }
 
         /*
@@ -5549,8 +5646,23 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
          * will wait for completion of any outstanding flip using DRMs
          * synchronization events.
          */
+	update_type = dm_determine_update_type_for_commit(dc, state);
+
+	if (overall_update_type < update_type)
+		overall_update_type = update_type;
+
+	/*
+	 * lock_and_validation_needed was an old way to determine if we need to set
+	 * the global lock. Leaving it in to check if we broke any corner cases
+	 * lock_and_validation_needed true = UPDATE_TYPE_FULL or UPDATE_TYPE_MED
+	 * lock_and_validation_needed false = UPDATE_TYPE_FAST
+	 */
+	if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
+		WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
+	else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
+		WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
 
-        if (lock_and_validation_needed) {
+        if (overall_update_type > UPDATE_TYPE_FAST) {
 
                 ret = do_aquire_global_lock(dev, state);
                 if (ret)
-- 
2.7.4