From e814a90845b6824221c26465f71ef475c45e3a9e Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Tue, 10 Oct 2017 16:50:17 -0400 Subject: [PATCH 2000/4131] drm/amdgpu: Move old fence waiting before reservation lock is aquired v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Helps avoiding deadlock during GPU reset. Added mutex to amdgpu_ctx to preserve order of fences on a ring. v2: Put waiting logic in a function in a seperate function in amdgpu_ctx.c Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Conflicts: drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c Change-Id: I56f1b6d60eefd53417a7123e017184f45689cbd2 --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 ++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 30 ++++++++++++++++++++++++------ 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 606cf9e..da8003e 100755 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -762,6 +762,7 @@ struct amdgpu_ctx { bool preamble_presented; enum amd_sched_priority init_priority; enum amd_sched_priority override_priority; + struct mutex lock; }; struct amdgpu_ctx_mgr { @@ -784,9 +785,12 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); + void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); + /* * file private structure */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 5133b7a..8a936a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -89,6 +89,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) ret = -EINVAL; goto free_chunk; } + + mutex_lock(&p->ctx->lock); /* get chunks */ chunk_array_user = u64_to_user_ptr(cs->in.chunks); @@ -774,8 +776,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, dma_fence_put(parser->fence); - if (parser->ctx) + if (parser->ctx) { + mutex_unlock(&parser->ctx->lock); amdgpu_ctx_put(parser->ctx); + } if (parser->bo_list) amdgpu_bo_list_put(parser->bo_list); @@ -936,9 +940,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, r = amdgpu_ring_parse_cs(ring, p, j); if (r) return r; - } - j++; } @@ -1026,7 +1028,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE)) return -EINVAL; - return 0; + return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx); } static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 0ec5985..064247b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -67,6 +67,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, if (!ctx->fences) return -ENOMEM; + mutex_init(&ctx->lock); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { ctx->rings[i].sequence = 1; ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; @@ -143,6 +145,8 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) &ctx->rings[i].entity); amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); + + mutex_destroy(&ctx->lock); } static int amdgpu_ctx_alloc(struct amdgpu_device *adev, @@ -313,12 +317,8 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, idx = seq & (amdgpu_sched_jobs - 1); other = cring->fences[idx]; - if (other) { - signed long r; - r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT); - if (r < 0) - return r; - } + if (other) + BUG_ON(!dma_fence_is_signaled(other)); dma_fence_get(fence); @@ -389,6 +389,24 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, } } +int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) +{ + struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id]; + unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1); + struct dma_fence *other = cring->fences[idx]; + + if (other) { + signed long r; + r = kcl_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) { + DRM_ERROR("Error (%ld) waiting for fence!\n", r); + return r; + } + } + + return 0; +} + void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) { mutex_init(&mgr->lock); -- 2.7.4