aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/files/0302-drm-amdgpu-use-per-VM-entity-for-page-table-updates-.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/files/0302-drm-amdgpu-use-per-VM-entity-for-page-table-updates-.patch')
-rw-r--r--common/recipes-kernel/linux/files/0302-drm-amdgpu-use-per-VM-entity-for-page-table-updates-.patch227
1 files changed, 227 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/files/0302-drm-amdgpu-use-per-VM-entity-for-page-table-updates-.patch b/common/recipes-kernel/linux/files/0302-drm-amdgpu-use-per-VM-entity-for-page-table-updates-.patch
new file mode 100644
index 00000000..6d64ac8d
--- /dev/null
+++ b/common/recipes-kernel/linux/files/0302-drm-amdgpu-use-per-VM-entity-for-page-table-updates-.patch
@@ -0,0 +1,227 @@
+From 5d7d7a1ff3f0f07db6cfc158a5896d321ce26776 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Mon, 1 Feb 2016 12:53:58 +0100
+Subject: [PATCH 0302/1110] drm/amdgpu: use per VM entity for page table
+ updates (v2)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Updates from different VMs can be processed independently.
+
+v2: agd: rebase on upstream
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 ++++-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 8 ++++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 48 ++++++++++++++++++++++-----------
+ 5 files changed, 46 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index c882c7c..08e771d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -758,7 +758,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
+
+ void amdgpu_job_free(struct amdgpu_job *job);
+ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+- void *owner, struct fence **f);
++ struct amd_sched_entity *entity, void *owner,
++ struct fence **f);
+
+ struct amdgpu_ring {
+ struct amdgpu_device *adev;
+@@ -876,6 +877,9 @@ struct amdgpu_vm {
+
+ /* protecting freed */
+ spinlock_t freed_lock;
++
++ /* Scheduler entity for page table updates */
++ struct amd_sched_entity entity;
+ };
+
+ struct amdgpu_vm_manager_id {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index fda8ebc..a16c43f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -80,13 +80,17 @@ void amdgpu_job_free(struct amdgpu_job *job)
+ }
+
+ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
+- void *owner, struct fence **f)
++ struct amd_sched_entity *entity, void *owner,
++ struct fence **f)
+ {
+ struct amdgpu_device *adev = job->adev;
++
++ if (!entity)
++ entity = &adev->kernel_ctx.rings[ring->idx].entity;
+
+ job->ring = ring;
+ job->base.sched = &ring->sched;
+- job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
++ job->base.s_entity = entity;
+ job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
+ if (!job->base.s_fence)
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 8b11edc..4acfddf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -900,7 +900,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+
+ amdgpu_job_free(job);
+ } else {
+- r = amdgpu_job_submit(job, ring,
++ r = amdgpu_job_submit(job, ring, NULL,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err_free;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+index 5564a46..4239083 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+@@ -508,7 +508,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+
+ amdgpu_job_free(job);
+ } else {
+- r = amdgpu_job_submit(job, ring,
++ r = amdgpu_job_submit(job, ring, NULL,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ if (r)
+ goto err;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 8877f15..b99afc3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -329,6 +329,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
+ * need to reserve bo first before calling it.
+ */
+ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
++ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo)
+ {
+ struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+@@ -357,7 +358,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+
+ WARN_ON(job->ibs[0].length_dw > 64);
+- r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence);
++ r = amdgpu_job_submit(job, ring, &vm->entity,
++ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error_free;
+
+@@ -479,7 +481,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+ amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM);
+ WARN_ON(ib->length_dw > ndw);
+- r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence);
++ r = amdgpu_job_submit(job, ring, &vm->entity,
++ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error_free;
+
+@@ -735,7 +738,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
+
+ amdgpu_ring_pad_ib(ring, ib);
+ WARN_ON(ib->length_dw > ndw);
+- r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &f);
++ r = amdgpu_job_submit(job, ring, &vm->entity,
++ AMDGPU_FENCE_OWNER_VM, &f);
+ if (r)
+ goto error_free;
+
+@@ -1110,7 +1114,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+ */
+ pt->parent = amdgpu_bo_ref(vm->page_directory);
+
+- r = amdgpu_vm_clear_bo(adev, pt);
++ r = amdgpu_vm_clear_bo(adev, vm, pt);
+ if (r) {
+ amdgpu_bo_unref(&pt);
+ goto error_free;
+@@ -1271,9 +1275,11 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+ */
+ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ {
++ struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+ const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
+ AMDGPU_VM_PTE_COUNT * 8);
+ unsigned pd_size, pd_entries;
++ struct amd_sched_rq *rq;
+ int i, r;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+@@ -1297,6 +1303,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ return -ENOMEM;
+ }
+
++ /* create scheduler entity for page table updates */
++ rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
++ r = amd_sched_entity_init(&ring->sched, &vm->entity,
++ rq, amdgpu_sched_jobs);
++ if (r)
++ return r;
++
+ vm->page_directory_fence = NULL;
+
+ r = amdgpu_bo_create(adev, pd_size, align, true,
+@@ -1304,22 +1317,24 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
+ NULL, NULL, &vm->page_directory);
+ if (r)
+- return r;
++ goto error_free_sched_entity;
++
+ r = amdgpu_bo_reserve(vm->page_directory, false);
+- if (r) {
+- amdgpu_bo_unref(&vm->page_directory);
+- vm->page_directory = NULL;
+- return r;
+- }
+ r = amdgpu_vm_clear_bo(adev, vm->page_directory);
+ amdgpu_bo_unreserve(vm->page_directory);
+- if (r) {
+- amdgpu_bo_unref(&vm->page_directory);
+- vm->page_directory = NULL;
+- return r;
+- }
++ if (r)
++ goto error_free_page_directory;
+
+ return 0;
++
++error_free_page_directory:
++ amdgpu_bo_unref(&vm->page_directory);
++ vm->page_directory = NULL;
++
++error_free_sched_entity:
++ amd_sched_entity_fini(&ring->sched, &vm->entity);
++
++ return r;
+ }
+
+ /**
+@@ -1333,9 +1348,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ */
+ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ {
++ struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring;
+ struct amdgpu_bo_va_mapping *mapping, *tmp;
+ int i;
+
++ amd_sched_entity_fini(&ring->sched, &vm->entity);
++
+ if (!RB_EMPTY_ROOT(&vm->va)) {
+ dev_err(adev->dev, "still active bo inside vm\n");
+ }
+--
+2.7.4
+