aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/files/0415-drm-amdgpu-use-kernel-fence-for-last_pt_update.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/files/0415-drm-amdgpu-use-kernel-fence-for-last_pt_update.patch')
-rw-r--r--common/recipes-kernel/linux/files/0415-drm-amdgpu-use-kernel-fence-for-last_pt_update.patch132
1 files changed, 132 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/files/0415-drm-amdgpu-use-kernel-fence-for-last_pt_update.patch b/common/recipes-kernel/linux/files/0415-drm-amdgpu-use-kernel-fence-for-last_pt_update.patch
new file mode 100644
index 00000000..af539d14
--- /dev/null
+++ b/common/recipes-kernel/linux/files/0415-drm-amdgpu-use-kernel-fence-for-last_pt_update.patch
@@ -0,0 +1,132 @@
+From bb1e38a4bead5025ecca90544f0f733f59996b13 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <david1.zhou@amd.com>
+Date: Mon, 3 Aug 2015 18:19:38 +0800
+Subject: [PATCH 0415/1050] drm/amdgpu: use kernel fence for last_pt_update
+
+Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian K?nig <christian.koenig@amd.com>
+Reviewed-by: Jammy Zhou <jammy.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 24 ++++++++++++------------
+ 3 files changed, 15 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 5b8e1ae..371ff08 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -539,7 +539,7 @@ struct amdgpu_bo_va_mapping {
+ struct amdgpu_bo_va {
+ /* protected by bo being reserved */
+ struct list_head bo_list;
+- struct amdgpu_fence *last_pt_update;
++ struct fence *last_pt_update;
+ unsigned ref_count;
+
+ /* protected by vm mutex and spinlock */
+@@ -1241,7 +1241,7 @@ union amdgpu_sched_job_param {
+ struct amdgpu_vm *vm;
+ uint64_t start;
+ uint64_t last;
+- struct amdgpu_fence **fence;
++ struct fence **fence;
+
+ } vm_mapping;
+ struct {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index fe81b46..aee5911 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -581,7 +581,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
+ if (r)
+ return r;
+
+- f = &bo_va->last_pt_update->base;
++ f = bo_va->last_pt_update;
+ r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
+ if (r)
+ return r;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 8745d4c..d90254f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -737,7 +737,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
+ */
+ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
+ uint64_t start, uint64_t end,
+- struct amdgpu_fence *fence)
++ struct fence *fence)
+ {
+ unsigned i;
+
+@@ -745,20 +745,20 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
+ end >>= amdgpu_vm_block_size;
+
+ for (i = start; i <= end; ++i)
+- amdgpu_bo_fence(vm->page_tables[i].bo, &fence->base, true);
++ amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
+ }
+
+ static int amdgpu_vm_bo_update_mapping_run_job(
+ struct amdgpu_cs_parser *sched_job)
+ {
+- struct amdgpu_fence **fence = sched_job->job_param.vm_mapping.fence;
++ struct fence **fence = sched_job->job_param.vm_mapping.fence;
+ amdgpu_vm_fence_pts(sched_job->job_param.vm_mapping.vm,
+ sched_job->job_param.vm_mapping.start,
+ sched_job->job_param.vm_mapping.last + 1,
+- sched_job->ibs[sched_job->num_ibs -1].fence);
++ &sched_job->ibs[sched_job->num_ibs -1].fence->base);
+ if (fence) {
+- amdgpu_fence_unref(fence);
+- *fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs -1].fence);
++ fence_put(*fence);
++ *fence = fence_get(&sched_job->ibs[sched_job->num_ibs -1].fence->base);
+ }
+ return 0;
+ }
+@@ -781,7 +781,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t addr, uint32_t gtt_flags,
+- struct amdgpu_fence **fence)
++ struct fence **fence)
+ {
+ struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+ unsigned nptes, ncmds, ndw;
+@@ -902,10 +902,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+ }
+
+ amdgpu_vm_fence_pts(vm, mapping->it.start,
+- mapping->it.last + 1, ib->fence);
++ mapping->it.last + 1, &ib->fence->base);
+ if (fence) {
+- amdgpu_fence_unref(fence);
+- *fence = amdgpu_fence_ref(ib->fence);
++ fence_put(*fence);
++ *fence = fence_get(&ib->fence->base);
+ }
+
+ amdgpu_ib_free(adev, ib);
+@@ -1038,7 +1038,7 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
+ spin_unlock(&vm->status_lock);
+
+ if (bo_va)
+- r = amdgpu_sync_fence(adev, sync, &bo_va->last_pt_update->base);
++ r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
+
+ return r;
+ }
+@@ -1318,7 +1318,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+ kfree(mapping);
+ }
+
+- amdgpu_fence_unref(&bo_va->last_pt_update);
++ fence_put(bo_va->last_pt_update);
+ kfree(bo_va);
+
+ mutex_unlock(&vm->mutex);
+--
+1.9.1
+