aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3479-drm-amdgpu-cleanup-and-simplify-amdgpu_vmid_grab_res.patch
blob: fd8d76f11559a20c86140ca303adda87110825df (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
From e2fb674fe4c509809a52d5b71779b2a58034f5c5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Wed, 31 Jan 2018 11:56:53 +0100
Subject: [PATCH 3479/4131] drm/amdgpu: cleanup and simplify
 amdgpu_vmid_grab_reserved
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Drop the "_locked" from the name, cleanup and simplify the logic a bit.
Add missing comments.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 58 ++++++++++++++++++++-------------
 1 file changed, 35 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index ac24e4d..bf513ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -252,12 +252,22 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 	return 0;
 }
 
-/* idr_mgr->lock must be held */
-static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
-					    struct amdgpu_ring *ring,
-					    struct amdgpu_sync *sync,
-					    struct dma_fence *fence,
-					    struct amdgpu_job *job)
+/**
+ * amdgpu_vm_grab_reserved - try to assign reserved VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
+ * @job: job who wants to use the VMID
+ *
+ * Try to assign a reserved VMID.
+ */
+static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
+				     struct amdgpu_ring *ring,
+				     struct amdgpu_sync *sync,
+				     struct dma_fence *fence,
+				     struct amdgpu_job *job)
 {
 	struct amdgpu_device *adev = ring->adev;
 	unsigned vmhub = ring->funcs->vmhub;
@@ -265,18 +275,21 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 	struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
 	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
 	struct dma_fence *updates = sync->last_vm_update;
-	int r = 0;
-	struct dma_fence *flushed, *tmp;
 	bool needs_flush = vm->use_cpu_for_update;
+	int r = 0;
+
+	if (updates && id->flushed_updates &&
+	    updates->context == id->flushed_updates->context &&
+	    !dma_fence_is_later(updates, id->flushed_updates))
+	    updates = NULL;
+
+	if (id->owner != vm->entity.fence_context ||
+	    job->vm_pd_addr != id->pd_gpu_addr ||
+	    updates || !id->last_flush ||
+	    (id->last_flush->context != fence_context &&
+	     !dma_fence_is_signaled(id->last_flush))) {
+		struct dma_fence *tmp;
 
-	flushed  = id->flushed_updates;
-	if ((id->owner != vm->entity.fence_context) ||
-	    (job->vm_pd_addr != id->pd_gpu_addr) ||
-	    (updates && (!flushed || updates->context != flushed->context ||
-			dma_fence_is_later(updates, flushed))) ||
-	    (!id->last_flush || (id->last_flush->context != fence_context &&
-				 !dma_fence_is_signaled(id->last_flush)))) {
-		needs_flush = true;
 		/* to prevent one context starved by another context */
 		id->pd_gpu_addr = 0;
 		tmp = amdgpu_sync_peek_fence(&id->active, ring);
@@ -284,6 +297,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 			r = amdgpu_sync_fence(adev, sync, tmp, false);
 			return r;
 		}
+		needs_flush = true;
 	}
 
 	/* Good we can use this VMID. Remember this submission as
@@ -291,10 +305,9 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 	*/
 	r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
 	if (r)
-		goto out;
+		return r;
 
-	if (updates && (!flushed || updates->context != flushed->context ||
-			dma_fence_is_later(updates, flushed))) {
+	if (updates) {
 		dma_fence_put(id->flushed_updates);
 		id->flushed_updates = dma_fence_get(updates);
 	}
@@ -308,8 +321,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
 	job->vmid = id - id_mgr->ids;
 	job->pasid = vm->pasid;
 	trace_amdgpu_vm_grab_id(vm, ring, job);
-out:
-	return r;
+	return 0;
 }
 
 /**
@@ -319,6 +331,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
  * @ring: ring we want to submit job to
  * @sync: sync object where we add dependencies
  * @fence: fence protecting ID from reuse
+ * @job: job who wants to use the VMID
  *
  * Allocate an id for the vm, adding fences to the sync obj as necessary.
  */
@@ -340,8 +353,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
 		goto error;
 
 	if (vm->reserved_vmid[vmhub]) {
-		r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
-						     fence, job);
+		r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
 		mutex_unlock(&id_mgr->lock);
 		return r;
 	}
-- 
2.7.4