aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/1607-drm-amd-display-Fix-plane-address-updates-for-video-.patch
blob: ac50dffe7a5ba63447eba31149f6e13c51c6068b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
From 7672bc8c3f6320597af8462b18c9c6e530bb3590 Mon Sep 17 00:00:00 2001
From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Date: Mon, 11 Mar 2019 09:38:35 -0400
Subject: [PATCH 1607/2940] drm/amd/display: Fix plane address updates for
 video surface formats

[Why]
For new DC planes the correct plane address fields are filled based
on whether the plane had a graphics or video format.

However, when we perform stream and plane updates using DC we only ever
fill in the graphics format fields. This causing corruption and hangs
when using video surface formats like NV12 for planes.

[How]
Use the same logic everywhere we update dc_plane_address - always
fill in the correct fields based on the surface format type.

There are 3 places this is done:

- Atomic check, during DC plane creation
- Atomic commit, during plane prepare_fb
- Atomic commit tail, during amdgpu_dm_commit_planes

We use the fill_plane_tiling_attributes in all 3 locations and it
already needs the address to update DCC attributes, so the surface
address update logic can be moved into this helper.

Change-Id: I757b0110997d02ba24b8e77bf6286f0bd9f707de
Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Reviewed-by: Leo Li <sunpeng.li@amd.com>
---
 .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 57 +++++++++----------
 1 file changed, 26 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index fc04cbc719a5..5922eedc7a98 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2476,6 +2476,27 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev,
 
 	memset(tiling_info, 0, sizeof(*tiling_info));
 	memset(dcc, 0, sizeof(*dcc));
+	memset(address, 0, sizeof(*address));
+
+	if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+		address->type = PLN_ADDR_TYPE_GRAPHICS;
+		address->grph.addr.low_part = lower_32_bits(afb->address);
+		address->grph.addr.high_part = upper_32_bits(afb->address);
+	} else {
+		const struct drm_framebuffer *fb = &afb->base;
+		uint64_t awidth = ALIGN(fb->width, 64);
+		uint64_t chroma_addr = afb->address + awidth * fb->height;
+
+		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+		address->video_progressive.luma_addr.low_part =
+			lower_32_bits(afb->address);
+		address->video_progressive.luma_addr.high_part =
+			upper_32_bits(afb->address);
+		address->video_progressive.chroma_addr.low_part =
+			lower_32_bits(chroma_addr);
+		address->video_progressive.chroma_addr.high_part =
+			upper_32_bits(chroma_addr);
+	}
 
 	/* Fill GFX8 params */
 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
@@ -2590,7 +2611,6 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
 	memset(&plane_state->address, 0, sizeof(plane_state->address));
 
 	if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
-		plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
 		plane_state->plane_size.grph.surface_size.x = 0;
 		plane_state->plane_size.grph.surface_size.y = 0;
 		plane_state->plane_size.grph.surface_size.width = fb->width;
@@ -2602,7 +2622,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
 
 	} else {
 		awidth = ALIGN(fb->width, 64);
-		plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+
 		plane_state->plane_size.video.luma_size.x = 0;
 		plane_state->plane_size.video.luma_size.y = 0;
 		plane_state->plane_size.video.luma_size.width = awidth;
@@ -3812,10 +3832,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
 	struct drm_gem_object *obj;
 	struct amdgpu_device *adev;
 	struct amdgpu_bo *rbo;
-	uint64_t chroma_addr = 0;
 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
-	uint64_t tiling_flags, dcc_address;
-	unsigned int awidth;
+	uint64_t tiling_flags;
 	uint32_t domain;
 	int r;
 
@@ -3868,29 +3886,9 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
 			dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
 		struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
 
-		if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
-			plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
-			plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
-
-			dcc_address =
-				get_dcc_address(afb->address, tiling_flags);
-			plane_state->address.grph.meta_addr.low_part =
-				lower_32_bits(dcc_address);
-			plane_state->address.grph.meta_addr.high_part =
-				upper_32_bits(dcc_address);
-		} else {
-			awidth = ALIGN(new_state->fb->width, 64);
-			plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
-			plane_state->address.video_progressive.luma_addr.low_part
-							= lower_32_bits(afb->address);
-			plane_state->address.video_progressive.luma_addr.high_part
-							= upper_32_bits(afb->address);
-			chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
-			plane_state->address.video_progressive.chroma_addr.low_part
-							= lower_32_bits(chroma_addr);
-			plane_state->address.video_progressive.chroma_addr.high_part
-							= upper_32_bits(chroma_addr);
-		}
+		fill_plane_tiling_attributes(
+			adev, afb, plane_state, &plane_state->tiling_info,
+			&plane_state->dcc, &plane_state->address, tiling_flags);
 	}
 
 	return 0;
@@ -4962,9 +4960,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 
 		amdgpu_bo_unreserve(abo);
 
-		bundle->flip_addrs[planes_count].address.grph.addr.low_part = lower_32_bits(afb->address);
-		bundle->flip_addrs[planes_count].address.grph.addr.high_part = upper_32_bits(afb->address);
-
 		fill_plane_tiling_attributes(dm->adev, afb, dc_plane,
 			&bundle->plane_infos[planes_count].tiling_info,
 			&bundle->plane_infos[planes_count].dcc,
-- 
2.17.1