aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1455-drm-amdgpu-Restore-queues-after-VMPT-update-fences-s.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1455-drm-amdgpu-Restore-queues-after-VMPT-update-fences-s.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1455-drm-amdgpu-Restore-queues-after-VMPT-update-fences-s.patch73
1 files changed, 73 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1455-drm-amdgpu-Restore-queues-after-VMPT-update-fences-s.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1455-drm-amdgpu-Restore-queues-after-VMPT-update-fences-s.patch
new file mode 100644
index 00000000..f5202a7f
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1455-drm-amdgpu-Restore-queues-after-VMPT-update-fences-s.patch
@@ -0,0 +1,73 @@
+From 979866b47262abccb19397d7bcedb1a5dfce9d86 Mon Sep 17 00:00:00 2001
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+Date: Tue, 21 Jun 2016 09:41:04 -0400
+Subject: [PATCH 1455/4131] drm/amdgpu: Restore queues after VMPT update fences
+ signalled
+
+unreserve_bo_and_vms waits for the BO fences to signal.
+kgd2kfd->restore_mm must be called after that to make sure the
+queues don't get restarted before the page table update is
+complete.
+
+This fixes intermittent VM faults after userptr evictions seen
+without the HW scheduler.
+
+Change-Id: I0aa7dab32828f759e35f33da6fce8c38ccc5fee7
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 25 ++++++++++++++++--------
+ 1 file changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index f80206e5..72d1256 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -1613,7 +1613,7 @@ int amdgpu_amdkfd_gpuvm_restore_mem(struct kgd_mem *mem, struct mm_struct *mm)
+
+ if (unlikely(!have_pages)) {
+ entry->is_mapped = false;
+- goto resume_kfd;
++ continue;
+ }
+
+ r = map_bo_to_gpuvm(adev, mem->data2.bo, entry->bo_va, domain);
+@@ -1623,11 +1623,23 @@ int amdgpu_amdkfd_gpuvm_restore_mem(struct kgd_mem *mem, struct mm_struct *mm)
+ if (ret == 0)
+ ret = r;
+ }
++ }
++
++ if (have_pages)
++ unreserve_bo_and_vms(&ctx, true);
++
++ /* Resume queues after unreserving the BOs and most
++ * importantly, waiting for the BO fences to guarantee that
++ * the page table updates have completed.
++ */
++ list_for_each_entry(entry, &mem->data2.bo_va_list, bo_list) {
++ struct amdgpu_device *adev;
++
++ if (!entry->is_mapped)
++ continue;
++
++ adev = (struct amdgpu_device *)entry->kgd_dev;
+
+- /* Resume queues even if restore failed. Worst case
+- * the app will get a GPUVM fault. That's better than
+- * hanging the queues indefinitely. */
+-resume_kfd:
+ r = kgd2kfd->resume_mm(adev->kfd, mm);
+ if (ret != 0) {
+ pr_err("Failed to resume KFD\n");
+@@ -1636,8 +1648,5 @@ int amdgpu_amdkfd_gpuvm_restore_mem(struct kgd_mem *mem, struct mm_struct *mm)
+ }
+ }
+
+- if (have_pages)
+- unreserve_bo_and_vms(&ctx, true);
+-
+ return ret;
+ }
+--
+2.7.4
+