aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/files/0506-drm-amdgpu-fix-wait-queue-handling-in-the-scheduler.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/files/0506-drm-amdgpu-fix-wait-queue-handling-in-the-scheduler.patch')
-rw-r--r--common/recipes-kernel/linux/files/0506-drm-amdgpu-fix-wait-queue-handling-in-the-scheduler.patch120
1 files changed, 120 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/files/0506-drm-amdgpu-fix-wait-queue-handling-in-the-scheduler.patch b/common/recipes-kernel/linux/files/0506-drm-amdgpu-fix-wait-queue-handling-in-the-scheduler.patch
new file mode 100644
index 00000000..285cb9cc
--- /dev/null
+++ b/common/recipes-kernel/linux/files/0506-drm-amdgpu-fix-wait-queue-handling-in-the-scheduler.patch
@@ -0,0 +1,120 @@
+From c2b6bd7e91aad8440a2f55bdbde6f5a8ae19fac5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 25 Aug 2015 21:39:31 +0200
+Subject: [PATCH 0506/1050] drm/amdgpu: fix wait queue handling in the
+ scheduler
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Freeing up a queue after signalling it isn't race free.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 16 ++++++++--------
+ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 4 ++--
+ 2 files changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+index 6dfbdea..d99fe90 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+@@ -117,7 +117,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+ memset(entity, 0, sizeof(struct amd_sched_entity));
+ entity->belongto_rq = rq;
+ entity->scheduler = sched;
+- init_waitqueue_head(&entity->wait_queue);
+ entity->fence_context = fence_context_alloc(1);
+ if(kfifo_alloc(&entity->job_queue,
+ jobs * sizeof(void *),
+@@ -183,7 +182,7 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+ * The client will not queue more IBs during this fini, consume existing
+ * queued IBs
+ */
+- wait_event(entity->wait_queue, amd_sched_entity_is_idle(entity));
++ wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
+
+ amd_sched_rq_remove_entity(rq, entity);
+ kfifo_free(&entity->job_queue);
+@@ -236,7 +235,7 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
+ fence_get(&fence->base);
+ sched_job->s_fence = fence;
+
+- r = wait_event_interruptible(entity->wait_queue,
++ r = wait_event_interruptible(entity->scheduler->job_scheduled,
+ amd_sched_entity_in(sched_job));
+
+ return r;
+@@ -257,7 +256,7 @@ static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
+ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
+ {
+ if (amd_sched_ready(sched))
+- wake_up_interruptible(&sched->wait_queue);
++ wake_up_interruptible(&sched->wake_up_worker);
+ }
+
+ /**
+@@ -290,7 +289,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
+ atomic_dec(&sched->hw_rq_count);
+ fence_put(&sched_job->s_fence->base);
+ sched->ops->process_job(sched_job);
+- wake_up_interruptible(&sched->wait_queue);
++ wake_up_interruptible(&sched->wake_up_worker);
+ }
+
+ static int amd_sched_main(void *param)
+@@ -306,7 +305,7 @@ static int amd_sched_main(void *param)
+ struct amd_sched_job *job;
+ struct fence *fence;
+
+- wait_event_interruptible(sched->wait_queue,
++ wait_event_interruptible(sched->wake_up_worker,
+ kthread_should_stop() ||
+ (c_entity = amd_sched_select_context(sched)));
+
+@@ -329,7 +328,7 @@ static int amd_sched_main(void *param)
+ fence_put(fence);
+ }
+
+- wake_up(&c_entity->wait_queue);
++ wake_up(&sched->job_scheduled);
+ }
+ return 0;
+ }
+@@ -361,7 +360,8 @@ struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
+ amd_sched_rq_init(&sched->sched_rq);
+ amd_sched_rq_init(&sched->kernel_rq);
+
+- init_waitqueue_head(&sched->wait_queue);
++ init_waitqueue_head(&sched->wake_up_worker);
++ init_waitqueue_head(&sched->job_scheduled);
+ atomic_set(&sched->hw_rq_count, 0);
+ /* Each scheduler will run on a seperate kernel thread */
+ sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+index 13349a6..e797796 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+@@ -44,7 +44,6 @@ struct amd_sched_entity {
+ struct kfifo job_queue;
+ spinlock_t queue_lock;
+ struct amd_gpu_scheduler *scheduler;
+- wait_queue_head_t wait_queue;
+ uint64_t fence_context;
+ };
+
+@@ -104,7 +103,8 @@ struct amd_gpu_scheduler {
+ atomic_t hw_rq_count;
+ struct amd_sched_backend_ops *ops;
+ uint32_t ring_id;
+- wait_queue_head_t wait_queue;
++ wait_queue_head_t wake_up_worker;
++ wait_queue_head_t job_scheduled;
+ uint32_t hw_submission_limit;
+ char name[20];
+ void *priv;
+--
+1.9.1
+