aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3481-drm-amdgpu-restructure-amdgpu_vmid_grab.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3481-drm-amdgpu-restructure-amdgpu_vmid_grab.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3481-drm-amdgpu-restructure-amdgpu_vmid_grab.patch173
1 files changed, 173 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3481-drm-amdgpu-restructure-amdgpu_vmid_grab.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3481-drm-amdgpu-restructure-amdgpu_vmid_grab.patch
new file mode 100644
index 00000000..bbec4cf3
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3481-drm-amdgpu-restructure-amdgpu_vmid_grab.patch
@@ -0,0 +1,173 @@
+From b6a2432384c5c177d07e1ca9f7b89911ab5f786e Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 31 Jan 2018 14:24:45 +0100
+Subject: [PATCH 3481/4131] drm/amdgpu: restructure amdgpu_vmid_grab
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Now that we have the different cases for grabbing a VMID in separate
+functions, restructure the top level function to only have one place
+where VMIDs are assigned to jobs.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 89 ++++++++++++++++-----------------
+ 1 file changed, 42 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+index f57a6b8..e4b72d1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+@@ -267,33 +267,34 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
+ struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync,
+ struct dma_fence *fence,
+- struct amdgpu_job *job)
++ struct amdgpu_job *job,
++ struct amdgpu_vmid **id)
+ {
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ uint64_t fence_context = adev->fence_context + ring->idx;
+- struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
+- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct dma_fence *updates = sync->last_vm_update;
+ bool needs_flush = vm->use_cpu_for_update;
+ int r = 0;
+
+- if (updates && id->flushed_updates &&
+- updates->context == id->flushed_updates->context &&
+- !dma_fence_is_later(updates, id->flushed_updates))
++ *id = vm->reserved_vmid[vmhub];
++ if (updates && (*id)->flushed_updates &&
++ updates->context == (*id)->flushed_updates->context &&
++ !dma_fence_is_later(updates, (*id)->flushed_updates))
+ updates = NULL;
+
+- if (id->owner != vm->entity.fence_context ||
+- job->vm_pd_addr != id->pd_gpu_addr ||
+- updates || !id->last_flush ||
+- (id->last_flush->context != fence_context &&
+- !dma_fence_is_signaled(id->last_flush))) {
++ if ((*id)->owner != vm->entity.fence_context ||
++ job->vm_pd_addr != (*id)->pd_gpu_addr ||
++ updates || !(*id)->last_flush ||
++ ((*id)->last_flush->context != fence_context &&
++ !dma_fence_is_signaled((*id)->last_flush))) {
+ struct dma_fence *tmp;
+
+ /* to prevent one context starved by another context */
+- id->pd_gpu_addr = 0;
+- tmp = amdgpu_sync_peek_fence(&id->active, ring);
++ (*id)->pd_gpu_addr = 0;
++ tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
+ if (tmp) {
++ *id = NULL;
+ r = amdgpu_sync_fence(adev, sync, tmp, false);
+ return r;
+ }
+@@ -303,24 +304,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
+- r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
++ r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
+ if (r)
+ return r;
+
+ if (updates) {
+- dma_fence_put(id->flushed_updates);
+- id->flushed_updates = dma_fence_get(updates);
++ dma_fence_put((*id)->flushed_updates);
++ (*id)->flushed_updates = dma_fence_get(updates);
+ }
+- id->pd_gpu_addr = job->vm_pd_addr;
+- id->owner = vm->entity.fence_context;
+ job->vm_needs_flush = needs_flush;
+- if (needs_flush) {
+- dma_fence_put(id->last_flush);
+- id->last_flush = NULL;
+- }
+- job->vmid = id - id_mgr->ids;
+- job->pasid = vm->pasid;
+- trace_amdgpu_vm_grab_id(vm, ring, job);
+ return 0;
+ }
+
+@@ -415,7 +407,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+- struct dma_fence *updates = sync->last_vm_update;
+ struct amdgpu_vmid *id, *idle;
+ int r = 0;
+
+@@ -425,37 +416,41 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ goto error;
+
+ if (vm->reserved_vmid[vmhub]) {
+- r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job);
+- mutex_unlock(&id_mgr->lock);
+- return r;
+- }
++ r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
++ if (r || !id)
++ goto error;
++ } else {
++ r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
++ if (r)
++ goto error;
+
+- r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
+- if (r)
+- goto error;
++ if (!id) {
++ struct dma_fence *updates = sync->last_vm_update;
+
+- if (!id) {
+- /* Still no ID to use? Then use the idle one found earlier */
+- id = idle;
++ /* Still no ID to use? Then use the idle one found earlier */
++ id = idle;
+
+- /* Remember this submission as user of the VMID */
+- r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+- if (r)
+- goto error;
++ /* Remember this submission as user of the VMID */
++ r = amdgpu_sync_fence(ring->adev, &id->active,
++ fence, false);
++ if (r)
++ goto error;
+
+- id->pd_gpu_addr = job->vm_pd_addr;
+- dma_fence_put(id->flushed_updates);
+- id->flushed_updates = dma_fence_get(updates);
+- id->owner = vm->entity.fence_context;
+- job->vm_needs_flush = true;
++ dma_fence_put(id->flushed_updates);
++ id->flushed_updates = dma_fence_get(updates);
++ job->vm_needs_flush = true;
++ }
++
++ list_move_tail(&id->list, &id_mgr->ids_lru);
+ }
+
++ id->pd_gpu_addr = job->vm_pd_addr;
++ id->owner = vm->entity.fence_context;
++
+ if (job->vm_needs_flush) {
+ dma_fence_put(id->last_flush);
+ id->last_flush = NULL;
+ }
+- list_move_tail(&id->list, &id_mgr->ids_lru);
+-
+ job->vmid = id - id_mgr->ids;
+ job->pasid = vm->pasid;
+ trace_amdgpu_vm_grab_id(vm, ring, job);
+--
+2.7.4
+