aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/files/0449-drm-amdgpu-get-rid-of-incorrect-TDR.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/files/0449-drm-amdgpu-get-rid-of-incorrect-TDR.patch')
-rw-r--r--common/recipes-kernel/linux/files/0449-drm-amdgpu-get-rid-of-incorrect-TDR.patch117
1 files changed, 117 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/files/0449-drm-amdgpu-get-rid-of-incorrect-TDR.patch b/common/recipes-kernel/linux/files/0449-drm-amdgpu-get-rid-of-incorrect-TDR.patch
new file mode 100644
index 00000000..6d7ec197
--- /dev/null
+++ b/common/recipes-kernel/linux/files/0449-drm-amdgpu-get-rid-of-incorrect-TDR.patch
@@ -0,0 +1,117 @@
+From 8ff407d79fae9ff8f93ab3e024697a24da1c893f Mon Sep 17 00:00:00 2001
+From: Monk Liu <Monk.Liu@amd.com>
+Date: Fri, 4 Mar 2016 14:42:26 +0800
+Subject: [PATCH 0449/1110] drm/amdgpu: get rid of incorrect TDR
+
+original time out detect routine is incorrect, cuz it measures
+the gap from job scheduled, but we should only measure the
+gap from processed by hw.
+
+Signed-off-by: Monk Liu <Monk.Liu@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 39 +--------------------------
+ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 4 ---
+ 2 files changed, 1 insertion(+), 42 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+index af846f2..9a9fffd 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+@@ -418,46 +418,18 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+ amd_sched_fence_signal(s_fence);
+- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+- cancel_delayed_work(&s_fence->dwork);
+- spin_lock_irqsave(&sched->fence_list_lock, flags);
+- list_del_init(&s_fence->list);
+- spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+- }
++
+ trace_amd_sched_process_job(s_fence);
+ fence_put(&s_fence->base);
+ wake_up_interruptible(&sched->wake_up_worker);
+ }
+
+-static void amd_sched_fence_work_func(struct work_struct *work)
+-{
+- struct amd_sched_fence *s_fence =
+- container_of(work, struct amd_sched_fence, dwork.work);
+- struct amd_gpu_scheduler *sched = s_fence->sched;
+- struct amd_sched_fence *entity, *tmp;
+- unsigned long flags;
+-
+- DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
+-
+- /* Clean all pending fences */
+- spin_lock_irqsave(&sched->fence_list_lock, flags);
+- list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
+- DRM_ERROR(" fence no %d\n", entity->base.seqno);
+- cancel_delayed_work(&entity->dwork);
+- list_del_init(&entity->list);
+- fence_put(&entity->base);
+- }
+- spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+-}
+-
+ static int amd_sched_main(void *param)
+ {
+ struct sched_param sparam = {.sched_priority = 1};
+ struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
+ int r, count;
+
+- spin_lock_init(&sched->fence_list_lock);
+- INIT_LIST_HEAD(&sched->fence_list);
+ sched_setscheduler(current, SCHED_FIFO, &sparam);
+
+ while (!kthread_should_stop()) {
+@@ -465,7 +437,6 @@ static int amd_sched_main(void *param)
+ struct amd_sched_fence *s_fence;
+ struct amd_sched_job *sched_job;
+ struct fence *fence;
+- unsigned long flags;
+
+ wait_event_interruptible(sched->wake_up_worker,
+ (entity = amd_sched_select_entity(sched)) ||
+@@ -480,14 +451,6 @@ static int amd_sched_main(void *param)
+
+ s_fence = sched_job->s_fence;
+
+- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
+- INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
+- schedule_delayed_work(&s_fence->dwork, sched->timeout);
+- spin_lock_irqsave(&sched->fence_list_lock, flags);
+- list_add_tail(&s_fence->list, &sched->fence_list);
+- spin_unlock_irqrestore(&sched->fence_list_lock, flags);
+- }
+-
+ atomic_inc(&sched->hw_rq_count);
+ amd_sched_job_pre_schedule(sched, sched_job);
+ fence = sched->ops->run_job(sched_job);
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+index 2e3b830..b26148d 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+@@ -74,8 +74,6 @@ struct amd_sched_fence {
+ struct amd_gpu_scheduler *sched;
+ spinlock_t lock;
+ void *owner;
+- struct delayed_work dwork;
+- struct list_head list;
+ struct amd_sched_job *s_job;
+ };
+
+@@ -127,8 +125,6 @@ struct amd_gpu_scheduler {
+ wait_queue_head_t wake_up_worker;
+ wait_queue_head_t job_scheduled;
+ atomic_t hw_rq_count;
+- struct list_head fence_list;
+- spinlock_t fence_list_lock;
+ struct task_struct *thread;
+ struct list_head ring_mirror_list;
+ spinlock_t job_list_lock;
+--
+2.7.4
+