aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.14.71/4965-drm-amdgpu-remove-job-ring.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.14.71/4965-drm-amdgpu-remove-job-ring.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4965-drm-amdgpu-remove-job-ring.patch278
1 files changed, 278 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4965-drm-amdgpu-remove-job-ring.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4965-drm-amdgpu-remove-job-ring.patch
new file mode 100644
index 00000000..f18cfe95
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4965-drm-amdgpu-remove-job-ring.patch
@@ -0,0 +1,278 @@
+From f91c9ea9bbc5f9d97125812d12f24aeaf499fc4c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 13 Jul 2018 15:08:44 +0200
+Subject: [PATCH 4965/5725] drm/amdgpu: remove job->ring
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+We can easily get that from the scheduler.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 18 +++++++++---------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 23 ++++++++++++-----------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 1 -
+ drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 8 ++++----
+ drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +-
+ 7 files changed, 29 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index d3a2e16..d83a206 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1047,6 +1047,7 @@ struct amdgpu_cs_parser {
+
+ /* scheduler job object */
+ struct amdgpu_job *job;
++ struct amdgpu_ring *ring;
+
+ /* buffer objects */
+ struct ww_acquire_ctx ticket;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 4385446..0e31215 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -914,11 +914,11 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
+ {
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+- struct amdgpu_ring *ring = p->job->ring;
++ struct amdgpu_ring *ring = p->ring;
+ int r;
+
+ /* Only for UVD/VCE VM emulation */
+- if (p->job->ring->funcs->parse_cs) {
++ if (p->ring->funcs->parse_cs) {
+ unsigned i, j;
+
+ for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
+@@ -1032,10 +1032,10 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
+ }
+ }
+
+- if (parser->job->ring && parser->job->ring != ring)
++ if (parser->ring && parser->ring != ring)
+ return -EINVAL;
+
+- parser->job->ring = ring;
++ parser->ring = ring;
+
+ r = amdgpu_ib_get(adev, vm,
+ ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
+@@ -1054,11 +1054,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
+
+ /* UVD & VCE fw doesn't support user fences */
+ if (parser->job->uf_addr && (
+- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
++ parser->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
++ parser->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
+ return -EINVAL;
+
+- return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
++ return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->ring->idx);
+ }
+
+ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
+@@ -1209,7 +1209,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
+ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
+ {
+- struct amdgpu_ring *ring = p->job->ring;
++ struct amdgpu_ring *ring = p->ring;
+ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ struct amdgpu_job *job;
+ unsigned i;
+@@ -1258,7 +1258,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ job->uf_sequence = seq;
+
+ amdgpu_job_free_resources(job);
+- amdgpu_ring_priority_get(job->ring, job->base.s_priority);
++ amdgpu_ring_priority_get(p->ring, job->base.s_priority);
+
+ trace_amdgpu_cs_ioctl(job);
+ drm_sched_entity_push_job(&job->base, entity);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index ebf370c..ea19758 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3316,7 +3316,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+
+ kthread_park(ring->sched.thread);
+
+- if (job && job->ring->idx != i)
++ if (job && job->base.sched == &ring->sched)
+ continue;
+
+ drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
+@@ -3340,7 +3340,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ * or all rings (in the case @job is NULL)
+ * after above amdgpu_reset accomplished
+ */
+- if ((!job || job->ring->idx == i) && !r)
++ if ((!job || job->base.sched == &ring->sched) && !r)
+ drm_sched_job_recovery(&ring->sched);
+
+ kthread_unpark(adev->rings[i]->sched.thread);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+index 4dd5aac..27263f2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+@@ -30,12 +30,12 @@
+
+ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+ {
+- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
++ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
++ struct amdgpu_job *job = to_amdgpu_job(s_job);
+
+ DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
+- job->base.sched->name,
+- atomic_read(&job->ring->fence_drv.last_seq),
+- job->ring->fence_drv.sync_seq);
++ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
++ ring->fence_drv.sync_seq);
+
+ amdgpu_device_gpu_recover(job->adev, job, false);
+ }
+@@ -98,9 +98,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
+
+ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
+ {
+- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
++ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
++ struct amdgpu_job *job = to_amdgpu_job(s_job);
+
+- amdgpu_ring_priority_put(job->ring, s_job->s_priority);
++ amdgpu_ring_priority_put(ring, s_job->s_priority);
+ dma_fence_put(job->fence);
+ amdgpu_sync_free(&job->sync);
+ amdgpu_sync_free(&job->sched_sync);
+@@ -120,6 +121,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
+ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ void *owner, struct dma_fence **f)
+ {
++ struct amdgpu_ring *ring = to_amdgpu_ring(entity->sched);
+ int r;
+
+ if (!f)
+@@ -130,10 +132,9 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ return r;
+
+ job->owner = owner;
+- job->ring = to_amdgpu_ring(entity->sched);
+ *f = dma_fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
+- amdgpu_ring_priority_get(job->ring, job->base.s_priority);
++ amdgpu_ring_priority_get(ring, job->base.s_priority);
+ drm_sched_entity_push_job(&job->base, entity);
+
+ return 0;
+@@ -142,6 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+ {
++ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
+ struct amdgpu_job *job = to_amdgpu_job(sched_job);
+ struct amdgpu_vm *vm = job->vm;
+ bool explicit = false;
+@@ -157,8 +159,6 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ }
+
+ while (fence == NULL && vm && !job->vmid) {
+- struct amdgpu_ring *ring = job->ring;
+-
+ r = amdgpu_vmid_grab(vm, ring, &job->sync,
+ &job->base.s_fence->finished,
+ job);
+@@ -173,6 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+
+ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+ {
++ struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
+ struct dma_fence *fence = NULL, *finished;
+ struct amdgpu_device *adev;
+ struct amdgpu_job *job;
+@@ -196,7 +197,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
+ if (finished->error < 0) {
+ DRM_INFO("Skip scheduling IBs!\n");
+ } else {
+- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
++ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
+ &fence);
+ if (r)
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+index 39f4230..c663c19 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+@@ -37,7 +37,6 @@ struct amdgpu_job {
+ struct drm_sched_job base;
+ struct amdgpu_device *adev;
+ struct amdgpu_vm *vm;
+- struct amdgpu_ring *ring;
+ struct amdgpu_sync sync;
+ struct amdgpu_sync sched_sync;
+ struct amdgpu_ib *ibs;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index e96e26d..7692003 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -150,10 +150,10 @@ TRACE_EVENT(amdgpu_cs,
+
+ TP_fast_assign(
+ __entry->bo_list = p->bo_list;
+- __entry->ring = p->job->ring->idx;
++ __entry->ring = p->ring->idx;
+ __entry->dw = p->job->ibs[i].length_dw;
+ __entry->fences = amdgpu_fence_count_emitted(
+- p->job->ring);
++ p->ring);
+ ),
+ TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
+ __entry->bo_list, __entry->ring, __entry->dw,
+@@ -178,7 +178,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
+ __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __entry->context = job->base.s_fence->finished.context;
+ __entry->seqno = job->base.s_fence->finished.seqno;
+- __entry->ring_name = job->ring->name;
++ __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __entry->num_ibs = job->num_ibs;
+ ),
+ TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
+@@ -203,7 +203,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
+ __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __entry->context = job->base.s_fence->finished.context;
+ __entry->seqno = job->base.s_fence->finished.seqno;
+- __entry->ring_name = job->ring->name;
++ __entry->ring_name = to_amdgpu_ring(job->base.sched)->name;
+ __entry->num_ibs = job->num_ibs;
+ ),
+ TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+index 5ee3151..7c3b3ac 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+@@ -693,11 +693,11 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
+ struct amdgpu_bo *bo, unsigned offset)
+ {
+ struct amdgpu_device *adev = ctx->parser->adev;
++ uint32_t ip_instance = ctx->parser->ring->me;
+ int32_t *msg, msg_type, handle;
+ void *ptr;
+ long r;
+ int i;
+- uint32_t ip_instance = ctx->parser->job->ring->me;
+
+ if (offset & 0x3F) {
+ DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance);
+--
+2.7.4
+