aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3476-drm-amdgpu-split-finding-idle-VMID-into-separate-fun.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3476-drm-amdgpu-split-finding-idle-VMID-into-separate-fun.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3476-drm-amdgpu-split-finding-idle-VMID-into-separate-fun.patch158
1 files changed, 158 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3476-drm-amdgpu-split-finding-idle-VMID-into-separate-fun.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3476-drm-amdgpu-split-finding-idle-VMID-into-separate-fun.patch
new file mode 100644
index 00000000..5500b36e
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3476-drm-amdgpu-split-finding-idle-VMID-into-separate-fun.patch
@@ -0,0 +1,158 @@
+From ed31273c178a08229640c7793cf307b4ae2ce4a0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 31 Jan 2018 11:10:19 +0100
+Subject: [PATCH 3476/4131] drm/amdgpu: split finding idle VMID into separate
+ function
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+No functional change, but makes it easier to maintain the code.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c | 116 +++++++++++++++++++-------------
+ 1 file changed, 69 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+index da2b051..0fd6706 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+@@ -186,6 +186,72 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ atomic_read(&adev->gpu_reset_counter);
+ }
+
++/**
++ * amdgpu_vm_grab_idle - grab idle VMID
++ *
++ * @vm: vm to allocate id for
++ * @ring: ring we want to submit job to
++ * @sync: sync object where we add dependencies
++ * @idle: resulting idle VMID
++ *
++ * Try to find an idle VMID, if none is idle add a fence to wait to the sync
++ * object. Returns -ENOMEM when we are out of memory.
++ */
++static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
++ struct amdgpu_ring *ring,
++ struct amdgpu_sync *sync,
++ struct amdgpu_vmid **idle)
++{
++ struct amdgpu_device *adev = ring->adev;
++ unsigned vmhub = ring->funcs->vmhub;
++ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
++ struct dma_fence **fences;
++ unsigned i;
++ int r;
++
++ fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
++ if (!fences)
++ return -ENOMEM;
++
++ /* Check if we have an idle VMID */
++ i = 0;
++ list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
++ fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
++ if (!fences[i])
++ break;
++ ++i;
++ }
++
++ /* If we can't find a idle VMID to use, wait till one becomes available */
++ if (&(*idle)->list == &id_mgr->ids_lru) {
++ u64 fence_context = adev->vm_manager.fence_context + ring->idx;
++ unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
++ struct dma_fence_array *array;
++ unsigned j;
++
++ *idle = NULL;
++ for (j = 0; j < i; ++j)
++ dma_fence_get(fences[j]);
++
++ array = dma_fence_array_create(i, fences, fence_context,
++ seqno, true);
++ if (!array) {
++ for (j = 0; j < i; ++j)
++ dma_fence_put(fences[j]);
++ kfree(fences);
++ return -ENOMEM;
++ }
++
++ r = amdgpu_sync_fence(adev, sync, &array->base, false);
++ dma_fence_put(&array->base);
++ return r;
++
++ }
++ kfree(fences);
++
++ return 0;
++}
++
+ /* idr_mgr->lock must be held */
+ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+ struct amdgpu_ring *ring,
+@@ -267,56 +333,12 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ uint64_t fence_context = adev->fence_context + ring->idx;
+ struct dma_fence *updates = sync->last_vm_update;
+ struct amdgpu_vmid *id, *idle;
+- struct dma_fence **fences;
+- unsigned i;
+ int r = 0;
+
+ mutex_lock(&id_mgr->lock);
+- fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
+- if (!fences) {
+- mutex_unlock(&id_mgr->lock);
+- return -ENOMEM;
+- }
+- /* Check if we have an idle VMID */
+- i = 0;
+- list_for_each_entry(idle, &id_mgr->ids_lru, list) {
+- fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+- if (!fences[i])
+- break;
+- ++i;
+- }
+-
+- /* If we can't find a idle VMID to use, wait till one becomes available */
+- if (&idle->list == &id_mgr->ids_lru) {
+- u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+- unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+- struct dma_fence_array *array;
+- unsigned j;
+-
+- for (j = 0; j < i; ++j)
+- dma_fence_get(fences[j]);
+-
+- array = dma_fence_array_create(i, fences, fence_context,
+- seqno, true);
+- if (!array) {
+- for (j = 0; j < i; ++j)
+- dma_fence_put(fences[j]);
+- kfree(fences);
+- r = -ENOMEM;
+- goto error;
+- }
+-
+-
+- r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
+- dma_fence_put(&array->base);
+- if (r)
+- goto error;
+-
+- mutex_unlock(&id_mgr->lock);
+- return 0;
+-
+- }
+- kfree(fences);
++ r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
++ if (r || !idle)
++ goto error;
+
+ if (vm->reserved_vmid[vmhub]) {
+ r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync,
+--
+2.7.4
+