aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3481-drm-amdgpu-restructure-amdgpu_vmid_grab.patch
blob: bbec4cf3e719e746dd95ff2432ee5f60500d6e2d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
From b6a2432384c5c177d07e1ca9f7b89911ab5f786e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Wed, 31 Jan 2018 14:24:45 +0100
Subject: [PATCH 3481/4131] drm/amdgpu: restructure amdgpu_vmid_grab
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Now that we have the different cases for grabbing a VMID in separate
functions, restructure the top level function to only have one place
where VMIDs are assigned to jobs.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 89 ++++++++++++++++-----------------
 1 file changed, 42 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index f57a6b8..e4b72d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -267,33 +267,34 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 				     struct amdgpu_ring *ring,
 				     struct amdgpu_sync *sync,
 				     struct dma_fence *fence,
-				     struct amdgpu_job *job)
+				     struct amdgpu_job *job,
+				     struct amdgpu_vmid **id)
 {
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
 	uint64_t fence_context = adev->fence_context + ring->idx;
-	struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
-	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 	struct dma_fence *updates = sync->last_vm_update;
 	bool needs_flush = vm->use_cpu_for_update;
 	int r = 0;
 
-	if (updates && id->flushed_updates &&
-	    updates->context == id->flushed_updates->context &&
-	    !dma_fence_is_later(updates, id->flushed_updates))
+	*id = vm->reserved_vmid[vmhub];
+	if (updates && (*id)->flushed_updates &&
+	    updates->context == (*id)->flushed_updates->context &&
+	    !dma_fence_is_later(updates, (*id)->flushed_updates))
 	    updates = NULL;
 
-	if (id->owner != vm->entity.fence_context ||
-	    job->vm_pd_addr != id->pd_gpu_addr ||
-	    updates || !id->last_flush ||
-	    (id->last_flush->context != fence_context &&
-	     !dma_fence_is_signaled(id->last_flush))) {
+	if ((*id)->owner != vm->entity.fence_context ||
+	    job->vm_pd_addr != (*id)->pd_gpu_addr ||
+	    updates || !(*id)->last_flush ||
+	    ((*id)->last_flush->context != fence_context &&
+	     !dma_fence_is_signaled((*id)->last_flush))) {
 		struct dma_fence *tmp;
 
 		/* to prevent one context starved by another context */
-		id->pd_gpu_addr = 0;
-		tmp = amdgpu_sync_peek_fence(&id->active, ring);
+		(*id)->pd_gpu_addr = 0;
+		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
 		if (tmp) {
+			*id = NULL;
 			r = amdgpu_sync_fence(adev, sync, tmp, false);
 			return r;
 		}
@@ -303,24 +304,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 	/* Good we can use this VMID. Remember this submission as
 	* user of the VMID.
 	*/
-	r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+	r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
 	if (r)
 		return r;
 
 	if (updates) {
-		dma_fence_put(id->flushed_updates);
-		id->flushed_updates = dma_fence_get(updates);
+		dma_fence_put((*id)->flushed_updates);
+		(*id)->flushed_updates = dma_fence_get(updates);
 	}
-	id->pd_gpu_addr = job->vm_pd_addr;
-	id->owner = vm->entity.fence_context;
 	job->vm_needs_flush = needs_flush;
-	if (needs_flush) {
-		dma_fence_put(id->last_flush);
-		id->last_flush = NULL;
-	}
-	job->vmid = id - id_mgr->ids;
-	job->pasid = vm->pasid;
-	trace_amdgpu_vm_grab_id(vm, ring, job);
 	return 0;
 }
 
@@ -415,7 +407,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
-	struct dma_fence *updates = sync->last_vm_update;
 	struct amdgpu_vmid *id, *idle;
 	int r = 0;
 
@@ -425,37 +416,41 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		goto error;
 
 	if (vm->reserved_vmid[vmhub]) {
-		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
-		mutex_unlock(&id_mgr->lock);
-		return r;
-	}
+		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
+		if (r || !id)
+			goto error;
+	} else {
+		r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
+		if (r)
+			goto error;
 
-	r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
-	if (r)
-		goto error;
+		if (!id) {
+			struct dma_fence *updates = sync->last_vm_update;
 
-	if (!id) {
-		/* Still no ID to use? Then use the idle one found earlier */
-		id = idle;
+			/* Still no ID to use? Then use the idle one found earlier */
+			id = idle;
 
-		/* Remember this submission as user of the VMID */
-		r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
-		if (r)
-			goto error;
+			/* Remember this submission as user of the VMID */
+			r = amdgpu_sync_fence(ring->adev, &id->active,
+					      fence, false);
+			if (r)
+				goto error;
 
-		id->pd_gpu_addr = job->vm_pd_addr;
-		dma_fence_put(id->flushed_updates);
-		id->flushed_updates = dma_fence_get(updates);
-		id->owner = vm->entity.fence_context;
-		job->vm_needs_flush = true;
+			dma_fence_put(id->flushed_updates);
+			id->flushed_updates = dma_fence_get(updates);
+			job->vm_needs_flush = true;
+		}
+
+		list_move_tail(&id->list, &id_mgr->ids_lru);
 	}
 
+	id->pd_gpu_addr = job->vm_pd_addr;
+	id->owner = vm->entity.fence_context;
+
 	if (job->vm_needs_flush) {
 		dma_fence_put(id->last_flush);
 		id->last_flush = NULL;
 	}
-	list_move_tail(&id->list, &id_mgr->ids_lru);
-
 	job->vmid = id - id_mgr->ids;
 	job->pasid = vm->pasid;
 	trace_amdgpu_vm_grab_id(vm, ring, job);
-- 
2.7.4