From ef9eab00a57485ee1bd2440a04ca76086847fc58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 7 Feb 2018 20:48:21 +0100 Subject: [PATCH 3539/4131] drm/amdgpu: fix and cleanup UVD IB generation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We didn't synced the BO after validating it. Also sart to use amdgpu_bo_create_reserved to simplify things. Change-Id: I4956f054cd23736b605ab058acacb078207a53cb Signed-off-by: Christian König Signed-off-by: Andrey Grodzovsky Signed-off-by: Alex Deucher Reviewed-by: Leo Liu Signed-off-by: Kalyan Alle --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 109 ++++++++++++-------------------- 1 file changed, 40 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index fa33d04..2019e12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -953,36 +953,26 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, bool direct, struct dma_fence **fence) { - struct ttm_validate_buffer tv; - struct ww_acquire_ctx ticket; - struct list_head head; + struct amdgpu_device *adev = ring->adev; + struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - struct dma_fence *f = NULL; - struct amdgpu_device *adev = ring->adev; - uint64_t addr; uint32_t data[4]; - int i, r; - - memset(&tv, 0, sizeof(tv)); - tv.bo = &bo->tbo; - - INIT_LIST_HEAD(&head); - list_add(&tv.head, &head); - - r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); - if (r) - return r; + uint64_t addr; + long r; + int i; + + amdgpu_bo_kunmap(bo); + amdgpu_bo_unpin(bo); if (!ring->adev->uvd.address_64_bit) { amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_uvd_force_into_uvd_segment(bo); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + if (r) + goto err; } - - r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); - if (r) - goto err; - + r = amdgpu_job_alloc_with_ib(adev, 64, &job); if (r) goto err; @@ -1014,6 +1004,14 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, + true, false, + msecs_to_jiffies(10)); + if (r == 0) + r = -ETIMEDOUT; + if (r < 0) + goto err_free; + r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); job->fence = dma_fence_get(f); if (r) @@ -1021,17 +1019,23 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, amdgpu_job_free(job); } else { + r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv, + AMDGPU_FENCE_OWNER_UNDEFINED, false); + if (r) + goto err_free; + r = amdgpu_job_submit(job, ring, &adev->uvd.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err_free; } - ttm_eu_fence_buffer_objects(&ticket, &head, f); + amdgpu_bo_fence(bo, f, false); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); if (fence) *fence = dma_fence_get(f); - amdgpu_bo_unref(&bo); dma_fence_put(f); return 0; @@ -1040,7 +1044,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, amdgpu_job_free(job); err: - ttm_eu_backoff_reservation(&ticket, &head); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); return r; } @@ -1051,30 +1056,16 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo; + struct amdgpu_bo *bo = NULL; uint32_t *msg; int r, i; - r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); - if (r) - return r; - - r = amdgpu_bo_reserve(bo, false); - if (r) { - amdgpu_bo_unref(&bo); - return r; - } + r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, (void **)&msg); - r = amdgpu_bo_kmap(bo, (void **)&msg); - if (r) { - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + if (r) return r; - } /* stitch together an UVD create msg */ msg[0] = cpu_to_le32(0x00000de4); @@ -1091,9 +1082,6 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = 11; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - amdgpu_bo_kunmap(bo); - amdgpu_bo_unreserve(bo); - return amdgpu_uvd_send_msg(ring, bo, true, fence); } @@ -1101,30 +1089,16 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_bo *bo; + struct amdgpu_bo *bo = NULL; uint32_t *msg; int r, i; - r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - NULL, NULL, &bo); - if (r) - return r; - - r = amdgpu_bo_reserve(bo, false); - if (r) { - amdgpu_bo_unref(&bo); - return r; - } + r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL, (void **)&msg); - r = amdgpu_bo_kmap(bo, (void **)&msg); - if (r) { - amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + if (r) return r; - } /* stitch together an UVD destroy msg */ msg[0] = cpu_to_le32(0x00000de4); @@ -1134,9 +1108,6 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = 4; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - amdgpu_bo_kunmap(bo); - amdgpu_bo_unreserve(bo); - return amdgpu_uvd_send_msg(ring, bo, direct, fence); } -- 2.7.4