aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3479-drm-amdgpu-cleanup-and-simplify-amdgpu_vmid_grab_res.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3479-drm-amdgpu-cleanup-and-simplify-amdgpu_vmid_grab_res.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3479-drm-amdgpu-cleanup-and-simplify-amdgpu_vmid_grab_res.patch136
1 files changed, 136 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3479-drm-amdgpu-cleanup-and-simplify-amdgpu_vmid_grab_res.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3479-drm-amdgpu-cleanup-and-simplify-amdgpu_vmid_grab_res.patch
new file mode 100644
index 00000000..fd8d76f1
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3479-drm-amdgpu-cleanup-and-simplify-amdgpu_vmid_grab_res.patch
@@ -0,0 +1,136 @@
+From e2fb674fe4c509809a52d5b71779b2a58034f5c5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 31 Jan 2018 11:56:53 +0100
+Subject: [PATCH 3479/4131] drm/amdgpu: cleanup and simplify
+ amdgpu_vmid_grab_reserved
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Drop the "_locked" from the name, cleanup and simplify the logic a bit.
+Add missing comments.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 58 ++++++++++++++++++++-------------
+ 1 file changed, 35 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+index ac24e4d..bf513ea 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+@@ -252,12 +252,22 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
+ return 0;
+ }
+
+-/* idr_mgr->lock must be held */
+-static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+- struct amdgpu_ring *ring,
+- struct amdgpu_sync *sync,
+- struct dma_fence *fence,
+- struct amdgpu_job *job)
++/**
++ * amdgpu_vm_grab_reserved - try to assign reserved VMID
++ *
++ * @vm: vm to allocate id for
++ * @ring: ring we want to submit job to
++ * @sync: sync object where we add dependencies
++ * @fence: fence protecting ID from reuse
++ * @job: job who wants to use the VMID
++ *
++ * Try to assign a reserved VMID.
++ */
++static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
++ struct amdgpu_ring *ring,
++ struct amdgpu_sync *sync,
++ struct dma_fence *fence,
++ struct amdgpu_job *job)
+ {
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+@@ -265,18 +275,21 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+ struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct dma_fence *updates = sync->last_vm_update;
+- int r = 0;
+- struct dma_fence *flushed, *tmp;
+ bool needs_flush = vm->use_cpu_for_update;
++ int r = 0;
++
++ if (updates && id->flushed_updates &&
++ updates->context == id->flushed_updates->context &&
++ !dma_fence_is_later(updates, id->flushed_updates))
++ updates = NULL;
++
++ if (id->owner != vm->entity.fence_context ||
++ job->vm_pd_addr != id->pd_gpu_addr ||
++ updates || !id->last_flush ||
++ (id->last_flush->context != fence_context &&
++ !dma_fence_is_signaled(id->last_flush))) {
++ struct dma_fence *tmp;
+
+- flushed = id->flushed_updates;
+- if ((id->owner != vm->entity.fence_context) ||
+- (job->vm_pd_addr != id->pd_gpu_addr) ||
+- (updates && (!flushed || updates->context != flushed->context ||
+- dma_fence_is_later(updates, flushed))) ||
+- (!id->last_flush || (id->last_flush->context != fence_context &&
+- !dma_fence_is_signaled(id->last_flush)))) {
+- needs_flush = true;
+ /* to prevent one context starved by another context */
+ id->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&id->active, ring);
+@@ -284,6 +297,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+ r = amdgpu_sync_fence(adev, sync, tmp, false);
+ return r;
+ }
++ needs_flush = true;
+ }
+
+ /* Good we can use this VMID. Remember this submission as
+@@ -291,10 +305,9 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+ */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+ if (r)
+- goto out;
++ return r;
+
+- if (updates && (!flushed || updates->context != flushed->context ||
+- dma_fence_is_later(updates, flushed))) {
++ if (updates) {
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
+ }
+@@ -308,8 +321,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+ job->vmid = id - id_mgr->ids;
+ job->pasid = vm->pasid;
+ trace_amdgpu_vm_grab_id(vm, ring, job);
+-out:
+- return r;
++ return 0;
+ }
+
+ /**
+@@ -319,6 +331,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
++ * @job: job who wants to use the VMID
+ *
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
+ */
+@@ -340,8 +353,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ goto error;
+
+ if (vm->reserved_vmid[vmhub]) {
+- r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
+- fence, job);
++ r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
+ mutex_unlock(&id_mgr->lock);
+ return r;
+ }
+--
+2.7.4
+