aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.14.71/4447-drm-scheduler-Remove-obsolete-spinlock.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.14.71/4447-drm-scheduler-Remove-obsolete-spinlock.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.14.71/4447-drm-scheduler-Remove-obsolete-spinlock.patch76
1 files changed, 76 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.14.71/4447-drm-scheduler-Remove-obsolete-spinlock.patch b/common/recipes-kernel/linux/linux-yocto-4.14.71/4447-drm-scheduler-Remove-obsolete-spinlock.patch
new file mode 100644
index 00000000..1bfe7393
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.14.71/4447-drm-scheduler-Remove-obsolete-spinlock.patch
@@ -0,0 +1,76 @@
+From 34147464b6e29c6468a3bec16b2e1e2f8b3200eb Mon Sep 17 00:00:00 2001
+From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Date: Tue, 15 May 2018 14:42:20 -0400
+Subject: [PATCH 4447/5725] drm/scheduler: Remove obsolete spinlock.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This spinlock is superfluous, any call to drm_sched_entity_push_job
+should already be under a lock together with matching drm_sched_job_init
+to match the order of insertion into queue with job's fence seqence
+number.
+
+v2:
+Improve patch description.
+Add functions documentation describing the locking considerations
+
+Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
+Acked-by: Chunming Zhou <david1.zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index 026e0d8..be5d321 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -138,7 +138,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ entity->last_scheduled = NULL;
+
+ spin_lock_init(&entity->rq_lock);
+- spin_lock_init(&entity->queue_lock);
+ spsc_queue_init(&entity->job_queue);
+
+ atomic_set(&entity->fence_seq, 0);
+@@ -412,6 +411,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+ *
+ * @sched_job The pointer to job required to submit
+ *
++ * Note: To guarantee that the order of insertion to queue matches
++ * the job's fence sequence number this function should be
++ * called with drm_sched_job_init under common lock.
++ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+@@ -422,11 +425,8 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+
+ trace_drm_sched_job(sched_job, entity);
+
+- spin_lock(&entity->queue_lock);
+ first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
+
+- spin_unlock(&entity->queue_lock);
+-
+ /* first job wakes up scheduler */
+ if (first) {
+ /* Add the entity to the run queue */
+@@ -592,7 +592,12 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
+ }
+ EXPORT_SYMBOL(drm_sched_job_recovery);
+
+-/* init a sched_job with basic field */
++/**
++ * Init a sched_job with basic field
++ *
++ * Note: Refer to drm_sched_entity_push_job documentation
++ * for locking considerations.
++ */
+ int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+--
+2.7.4
+