aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/files/0494-drm-amdgpu-fix-and-cleanup-amd_sched_entity_push_job.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/files/0494-drm-amdgpu-fix-and-cleanup-amd_sched_entity_push_job.patch')
-rw-r--r--common/recipes-kernel/linux/files/0494-drm-amdgpu-fix-and-cleanup-amd_sched_entity_push_job.patch193
1 files changed, 193 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/files/0494-drm-amdgpu-fix-and-cleanup-amd_sched_entity_push_job.patch b/common/recipes-kernel/linux/files/0494-drm-amdgpu-fix-and-cleanup-amd_sched_entity_push_job.patch
new file mode 100644
index 00000000..c95279ad
--- /dev/null
+++ b/common/recipes-kernel/linux/files/0494-drm-amdgpu-fix-and-cleanup-amd_sched_entity_push_job.patch
@@ -0,0 +1,193 @@
+From 6c859274f363be9dc13f8849bdc59bb64f922f26 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Thu, 20 Aug 2015 16:12:50 +0200
+Subject: [PATCH 0494/1050] drm/amdgpu: fix and cleanup
+ amd_sched_entity_push_job
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Calling schedule() is probably the worse things we can do.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 2 +-
+ drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 78 +++++++++++++++------------
+ drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 4 +-
+ 4 files changed, 48 insertions(+), 38 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index 780c011..82e1432 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -857,7 +857,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+
+ job->free_job = amdgpu_cs_free_job;
+ mutex_lock(&job->job_lock);
+- r = amd_sched_push_job((struct amd_sched_job *)job);
++ r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+ if (r) {
+ mutex_unlock(&job->job_lock);
+ amdgpu_cs_free_job(job);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+index 964b543..1aa72ed 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+@@ -105,7 +105,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
+ mutex_init(&job->job_lock);
+ job->free_job = free_job;
+ mutex_lock(&job->job_lock);
+- r = amd_sched_push_job((struct amd_sched_job *)job);
++ r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+ if (r) {
+ mutex_unlock(&job->job_lock);
+ kfree(job);
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+index acae855..29c45ed 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+@@ -121,7 +121,6 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+ entity->fence_context = fence_context_alloc(1);
+ snprintf(name, sizeof(name), "c_entity[%llu]", entity->fence_context);
+ memcpy(entity->name, name, 20);
+- entity->need_wakeup = false;
+ if(kfifo_alloc(&entity->job_queue,
+ jobs * sizeof(void *),
+ GFP_KERNEL))
+@@ -182,7 +181,7 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+
+ if (!amd_sched_entity_is_initialized(sched, entity))
+ return 0;
+- entity->need_wakeup = true;
++
+ /**
+ * The client will not queue more IBs during this fini, consume existing
+ * queued IBs
+@@ -201,38 +200,55 @@ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+ }
+
+ /**
+- * Submit a normal job to the job queue
++ * Helper to submit a job to the job queue
+ *
+- * @sched The pointer to the scheduler
+- * @c_entity The pointer to amd_sched_entity
+ * @job The pointer to job required to submit
+- * return 0 if succeed. -1 if failed.
+- * -2 indicate queue is full for this client, client should wait untill
+- * scheduler consum some queued command.
+- * -1 other fail.
+-*/
+-int amd_sched_push_job(struct amd_sched_job *sched_job)
++ *
++ * Returns true if we could submit the job.
++ */
++static bool amd_sched_entity_in(struct amd_sched_job *job)
++{
++ struct amd_sched_entity *entity = job->s_entity;
++ bool added, first = false;
++
++ spin_lock(&entity->queue_lock);
++ added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
++
++ if (added && kfifo_len(&entity->job_queue) == sizeof(job))
++ first = true;
++
++ spin_unlock(&entity->queue_lock);
++
++ /* first job wakes up scheduler */
++ if (first)
++ wake_up_interruptible(&job->sched->wait_queue);
++
++ return added;
++}
++
++/**
++ * Submit a job to the job queue
++ *
++ * @job The pointer to job required to submit
++ *
++ * Returns 0 for success, negative error code otherwise.
++ */
++int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
+ {
+- struct amd_sched_fence *fence =
+- amd_sched_fence_create(sched_job->s_entity);
++ struct amd_sched_entity *entity = sched_job->s_entity;
++ struct amd_sched_fence *fence = amd_sched_fence_create(entity);
++ int r;
++
+ if (!fence)
+- return -EINVAL;
++ return -ENOMEM;
++
+ fence_get(&fence->base);
+ sched_job->s_fence = fence;
+- while (kfifo_in_spinlocked(&sched_job->s_entity->job_queue,
+- &sched_job, sizeof(void *),
+- &sched_job->s_entity->queue_lock) !=
+- sizeof(void *)) {
+- /**
+- * Current context used up all its IB slots
+- * wait here, or need to check whether GPU is hung
+- */
+- schedule();
+- }
+- /* first job wake up scheduler */
+- if ((kfifo_len(&sched_job->s_entity->job_queue) / sizeof(void *)) == 1)
+- wake_up_interruptible(&sched_job->sched->wait_queue);
+- return 0;
++
++ r = wait_event_interruptible(entity->wait_queue,
++ amd_sched_entity_in(sched_job));
++
++ return r;
+ }
+
+ /**
+@@ -313,11 +329,7 @@ static int amd_sched_main(void *param)
+ fence_put(fence);
+ }
+
+- if (c_entity->need_wakeup) {
+- c_entity->need_wakeup = false;
+- wake_up(&c_entity->wait_queue);
+- }
+-
++ wake_up(&c_entity->wait_queue);
+ }
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+index df365ab..46b528d 100644
+--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
++++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+@@ -49,7 +49,6 @@ struct amd_sched_entity {
+ wait_queue_head_t wait_queue;
+ uint64_t fence_context;
+ char name[20];
+- bool need_wakeup;
+ };
+
+ /**
+@@ -119,14 +118,13 @@ amd_sched_create(struct amd_sched_backend_ops *ops,
+ uint32_t ring, uint32_t hw_submission);
+ int amd_sched_destroy(struct amd_gpu_scheduler *sched);
+
+-int amd_sched_push_job(struct amd_sched_job *sched_job);
+-
+ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity,
+ struct amd_sched_rq *rq,
+ uint32_t jobs);
+ int amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
+ struct amd_sched_entity *entity);
++int amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+
+ struct amd_sched_fence *amd_sched_fence_create(
+ struct amd_sched_entity *s_entity);
+--
+1.9.1
+