aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0758-drm-amdgpu-reserve-unreserve-objects-out-of-map-unma.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0758-drm-amdgpu-reserve-unreserve-objects-out-of-map-unma.patch')
-rw-r--r--meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0758-drm-amdgpu-reserve-unreserve-objects-out-of-map-unma.patch186
1 files changed, 186 insertions, 0 deletions
diff --git a/meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0758-drm-amdgpu-reserve-unreserve-objects-out-of-map-unma.patch b/meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0758-drm-amdgpu-reserve-unreserve-objects-out-of-map-unma.patch
new file mode 100644
index 00000000..d9c6335c
--- /dev/null
+++ b/meta-amdfalconx86/recipes-kernel/linux/linux-yocto/0758-drm-amdgpu-reserve-unreserve-objects-out-of-map-unma.patch
@@ -0,0 +1,186 @@
+From 768345f71061d5f6a3ad927fe4ee28d2876ad327 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <David1.Zhou@amd.com>
+Date: Fri, 13 Nov 2015 14:18:38 +0800
+Subject: [PATCH 0758/1050] drm/amdgpu: reserve/unreserve objects out of
+ map/unmap operations
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Change-Id: Id6514f2fb6e002437fdbe99353d5d35f4ac736c7
+Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 21 ++++++++++++++++++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 29 +++++++----------------------
+ 2 files changed, 25 insertions(+), 25 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+index 16dca46..00c5b58 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+@@ -515,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_bo *rbo;
+ struct amdgpu_bo_va *bo_va;
++ struct ttm_validate_buffer tv, tv_pd;
++ struct ww_acquire_ctx ticket;
++ struct list_head list, duplicates;
+ uint32_t invalid_flags, va_flags = 0;
+ int r = 0;
+
+@@ -552,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+ return -ENOENT;
+ mutex_lock(&fpriv->vm.mutex);
+ rbo = gem_to_amdgpu_bo(gobj);
+- r = amdgpu_bo_reserve(rbo, false);
++ INIT_LIST_HEAD(&list);
++ INIT_LIST_HEAD(&duplicates);
++ tv.bo = &rbo->tbo;
++ tv.shared = true;
++ list_add(&tv.head, &list);
++
++ if (args->operation == AMDGPU_VA_OP_MAP) {
++ tv_pd.bo = &fpriv->vm.page_directory->tbo;
++ tv_pd.shared = true;
++ list_add(&tv_pd.head, &list);
++ }
++ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
+ if (r) {
+ mutex_unlock(&fpriv->vm.mutex);
+ drm_gem_object_unreference_unlocked(gobj);
+@@ -561,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+
+ bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
+ if (!bo_va) {
+- amdgpu_bo_unreserve(rbo);
++ ttm_eu_backoff_reservation(&ticket, &list);
++ drm_gem_object_unreference_unlocked(gobj);
+ mutex_unlock(&fpriv->vm.mutex);
+ return -ENOENT;
+ }
+@@ -584,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+ default:
+ break;
+ }
+-
++ ttm_eu_backoff_reservation(&ticket, &list);
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
+ amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
+ mutex_unlock(&fpriv->vm.mutex);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index e6dc19b..159ce54 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -985,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ * Add a mapping of the BO at the specefied addr into the VM.
+ * Returns 0 for success, error for failure.
+ *
+- * Object has to be reserved and gets unreserved by this function!
++ * Object has to be reserved and unreserved outside!
+ */
+ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+@@ -1001,23 +1001,18 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+
+ /* validate the parameters */
+ if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
+- size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
+- amdgpu_bo_unreserve(bo_va->bo);
++ size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
+- }
+
+ /* make sure object fit at this offset */
+ eaddr = saddr + size;
+- if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
+- amdgpu_bo_unreserve(bo_va->bo);
++ if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
+ return -EINVAL;
+- }
+
+ last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
+ if (last_pfn > adev->vm_manager.max_pfn) {
+ dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ last_pfn, adev->vm_manager.max_pfn);
+- amdgpu_bo_unreserve(bo_va->bo);
+ return -EINVAL;
+ }
+
+@@ -1034,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
+ "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
+ tmp->it.start, tmp->it.last + 1);
+- amdgpu_bo_unreserve(bo_va->bo);
+ r = -EINVAL;
+ goto error;
+ }
+
+ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping) {
+- amdgpu_bo_unreserve(bo_va->bo);
+ r = -ENOMEM;
+ goto error;
+ }
+@@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ if (eaddr > vm->max_pde_used)
+ vm->max_pde_used = eaddr;
+
+- amdgpu_bo_unreserve(bo_va->bo);
+-
+ /* walk over the address space and allocate the page tables */
+ for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
+ struct reservation_object *resv = vm->page_directory->tbo.resv;
+@@ -1077,18 +1068,15 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ if (vm->page_tables[pt_idx].bo)
+ continue;
+
+- ww_mutex_lock(&resv->lock, NULL);
+ r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
+ AMDGPU_GPU_PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ NULL, resv, &pt);
+- if (r) {
+- ww_mutex_unlock(&resv->lock);
++ if (r)
+ goto error_free;
+- }
++
+ r = amdgpu_vm_clear_bo(adev, pt);
+- ww_mutex_unlock(&resv->lock);
+ if (r) {
+ amdgpu_bo_unref(&pt);
+ goto error_free;
+@@ -1122,7 +1110,7 @@ error:
+ * Remove a mapping of the BO at the specefied addr from the VM.
+ * Returns 0 for success, error for failure.
+ *
+- * Object has to be reserved and gets unreserved by this function!
++ * Object has to be reserved and unreserved outside!
+ */
+ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+@@ -1147,10 +1135,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+ break;
+ }
+
+- if (&mapping->list == &bo_va->invalids) {
+- amdgpu_bo_unreserve(bo_va->bo);
++ if (&mapping->list == &bo_va->invalids)
+ return -ENOENT;
+- }
+ }
+
+ list_del(&mapping->list);
+@@ -1163,7 +1149,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+ list_add(&mapping->list, &vm->freed);
+ else
+ kfree(mapping);
+- amdgpu_bo_unreserve(bo_va->bo);
+
+ return 0;
+ }
+--
+1.9.1
+