aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3540-drm-amdgpu-cleanup-VCN-IB-generation.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3540-drm-amdgpu-cleanup-VCN-IB-generation.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3540-drm-amdgpu-cleanup-VCN-IB-generation.patch186
1 files changed, 186 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3540-drm-amdgpu-cleanup-VCN-IB-generation.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3540-drm-amdgpu-cleanup-VCN-IB-generation.patch
new file mode 100644
index 00000000..5555efa3
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/3540-drm-amdgpu-cleanup-VCN-IB-generation.patch
@@ -0,0 +1,186 @@
+From 61d07627f27ccfdf35543c66aa04217cd17b27c6 Mon Sep 17 00:00:00 2001
+From: = Christian leichtzumerken <ckoenig.leichtzumerken@gmail.com>
+Date: Wed, 7 Feb 2018 20:48:22 +0100
+Subject: [PATCH 3540/4131] drm/amdgpu: cleanup VCN IB generation
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Start to use amdgpu_bo_create_reserved v2.
+
+v2:
+Fix missing pointer init to NULL.
+Remove extra new lines.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Tested-and-Reviewed-by: Leo Liu <leo.liu@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 88 ++++++++-------------------------
+ 1 file changed, 20 insertions(+), 68 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+index 61ff044..5953fdd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+@@ -274,33 +274,17 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
+ return r;
+ }
+
+-static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
+- bool direct, struct dma_fence **fence)
++static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
++ struct amdgpu_bo *bo, bool direct,
++ struct dma_fence **fence)
+ {
+- struct ttm_validate_buffer tv;
+- struct ww_acquire_ctx ticket;
+- struct list_head head;
++ struct amdgpu_device *adev = ring->adev;
++ struct dma_fence *f = NULL;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+- struct dma_fence *f = NULL;
+- struct amdgpu_device *adev = ring->adev;
+ uint64_t addr;
+ int i, r;
+
+- memset(&tv, 0, sizeof(tv));
+- tv.bo = &bo->tbo;
+-
+- INIT_LIST_HEAD(&head);
+- list_add(&tv.head, &head);
+-
+- r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
+- if (r)
+- return r;
+-
+- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+- if (r)
+- goto err;
+-
+ r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ if (r)
+ goto err;
+@@ -333,11 +317,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
+ goto err_free;
+ }
+
+- ttm_eu_fence_buffer_objects(&ticket, &head, f);
++ amdgpu_bo_fence(bo, f, false);
++ amdgpu_bo_unreserve(bo);
++ amdgpu_bo_unref(&bo);
+
+ if (fence)
+ *fence = dma_fence_get(f);
+- amdgpu_bo_unref(&bo);
+ dma_fence_put(f);
+
+ return 0;
+@@ -346,7 +331,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
+ amdgpu_job_free(job);
+
+ err:
+- ttm_eu_backoff_reservation(&ticket, &head);
++ amdgpu_bo_unreserve(bo);
++ amdgpu_bo_unref(&bo);
+ return r;
+ }
+
+@@ -354,30 +340,16 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+ struct dma_fence **fence)
+ {
+ struct amdgpu_device *adev = ring->adev;
+- struct amdgpu_bo *bo;
++ struct amdgpu_bo *bo = NULL;
+ uint32_t *msg;
+ int r, i;
+
+- r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+- NULL, NULL, &bo);
+- if (r)
+- return r;
+-
+- r = amdgpu_bo_reserve(bo, false);
+- if (r) {
+- amdgpu_bo_unref(&bo);
+- return r;
+- }
++ r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &bo, NULL, (void **)&msg);
+
+- r = amdgpu_bo_kmap(bo, (void **)&msg);
+- if (r) {
+- amdgpu_bo_unreserve(bo);
+- amdgpu_bo_unref(&bo);
++ if (r)
+ return r;
+- }
+
+ msg[0] = cpu_to_le32(0x00000028);
+ msg[1] = cpu_to_le32(0x00000038);
+@@ -396,9 +368,6 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
+ for (i = 14; i < 1024; ++i)
+ msg[i] = cpu_to_le32(0x0);
+
+- amdgpu_bo_kunmap(bo);
+- amdgpu_bo_unreserve(bo);
+-
+ return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
+ }
+
+@@ -406,30 +375,16 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+ bool direct, struct dma_fence **fence)
+ {
+ struct amdgpu_device *adev = ring->adev;
+- struct amdgpu_bo *bo;
++ struct amdgpu_bo *bo = NULL;
+ uint32_t *msg;
+ int r, i;
+
+- r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+- NULL, NULL, &bo);
+- if (r)
+- return r;
+-
+- r = amdgpu_bo_reserve(bo, false);
+- if (r) {
+- amdgpu_bo_unref(&bo);
+- return r;
+- }
++ r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &bo, NULL, (void **)&msg);
+
+- r = amdgpu_bo_kmap(bo, (void **)&msg);
+- if (r) {
+- amdgpu_bo_unreserve(bo);
+- amdgpu_bo_unref(&bo);
++ if (r)
+ return r;
+- }
+
+ msg[0] = cpu_to_le32(0x00000028);
+ msg[1] = cpu_to_le32(0x00000018);
+@@ -440,9 +395,6 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
+ for (i = 6; i < 1024; ++i)
+ msg[i] = cpu_to_le32(0x0);
+
+- amdgpu_bo_kunmap(bo);
+- amdgpu_bo_unreserve(bo);
+-
+ return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
+ }
+
+--
+2.7.4
+