aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/4431-drm-amdgpu-vg20-Restruct-uvd.idle_work-to-support-mu.patch
blob: c42be53d472d412362f048e729e0eda55ae5903c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
From 324b2b32fef0f7032c728cfe6709e978220e2d2e Mon Sep 17 00:00:00 2001
From: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
Date: Wed, 9 Jan 2019 19:31:08 +0530
Subject: [PATCH 4431/5725] drm/amdgpu/vg20:Restruct uvd.idle_work to support
 multiple instance (v2)

Vega20 dual-UVD Hardware need two idle_works, restruct to support
multiple instance.

v2: squash in indentation fix

Signed-off-by: James Zhu <James.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Chaudhary Amit Kumar <chaudharyamit.kumar@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 17 +++++++++--------
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h |  7 ++++++-
 2 files changed, 15 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index c9ed917..3c040f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -129,8 +129,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 	unsigned version_major, version_minor, family_id;
 	int i, j, r;
 
-	INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
-
 	switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
 	case CHIP_BONAIRE:
@@ -237,6 +235,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 		bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
 
 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+		adev->uvd.inst[j].delayed_work.ip_instance = j;
+		INIT_DELAYED_WORK(&adev->uvd.inst[j].delayed_work.idle_work, amdgpu_uvd_idle_work_handler);
 
 		r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
 					    AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
@@ -317,7 +317,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
                 if (adev->uvd.inst[j].vcpu_bo == NULL)
                         continue;
 
-		cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
+		cancel_delayed_work_sync(&adev->uvd.inst[j].delayed_work.idle_work);
 
                 /* only valid for physical mode */
                 if (adev->asic_type < CHIP_POLARIS10) {
@@ -1144,9 +1144,10 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
+	struct amdgpu_delayed_work *my_work = (struct amdgpu_delayed_work *)work;
 	struct amdgpu_device *adev =
-		container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
-	unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring);
+		container_of(work, struct amdgpu_device, uvd.inst[my_work->ip_instance].delayed_work.idle_work.work);
+	unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst[my_work->ip_instance].ring);
 
 	if (fences == 0) {
 		if (adev->pm.dpm_enabled) {
@@ -1160,7 +1161,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 							       AMD_CG_STATE_GATE);
 		}
 	} else {
-		schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+		schedule_delayed_work(&adev->uvd.inst[my_work->ip_instance].delayed_work.idle_work, UVD_IDLE_TIMEOUT);
 	}
 }
 
@@ -1172,7 +1173,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 	if (amdgpu_sriov_vf(adev))
 		return;
 
-	set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+	set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst[ring->me].delayed_work.idle_work);
 	if (set_clocks) {
 		if (adev->pm.dpm_enabled) {
 			amdgpu_dpm_enable_uvd(adev, true);
@@ -1189,7 +1190,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
 {
 	if (!amdgpu_sriov_vf(ring->adev))
-		schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+		schedule_delayed_work(&ring->adev->uvd.inst[ring->me].delayed_work.idle_work, UVD_IDLE_TIMEOUT);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index b1579fb..7801eb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -37,6 +37,11 @@
 	(AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
 			       8) - AMDGPU_UVD_FIRMWARE_OFFSET)
 
+struct amdgpu_delayed_work{
+	struct delayed_work idle_work;
+	unsigned ip_instance;
+};
+
 struct amdgpu_uvd_inst {
 	struct amdgpu_bo	*vcpu_bo;
 	void			*cpu_addr;
@@ -44,12 +49,12 @@ struct amdgpu_uvd_inst {
 	void			*saved_bo;
 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES];
 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES];
-	struct delayed_work	idle_work;
 	struct amdgpu_ring	ring;
 	struct amdgpu_ring	ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
 	struct amdgpu_irq_src	irq;
 	struct drm_sched_entity entity;
 	struct drm_sched_entity entity_enc;
+	struct amdgpu_delayed_work	delayed_work;
 	uint32_t                srbm_soft_reset;
 };
 
-- 
2.7.4