aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/1131-drm-scheduler-Add-drm_sched_suspend-resume_timeout.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.19.8/1131-drm-scheduler-Add-drm_sched_suspend-resume_timeout.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.19.8/1131-drm-scheduler-Add-drm_sched_suspend-resume_timeout.patch221
1 files changed, 221 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.19.8/1131-drm-scheduler-Add-drm_sched_suspend-resume_timeout.patch b/common/recipes-kernel/linux/linux-yocto-4.19.8/1131-drm-scheduler-Add-drm_sched_suspend-resume_timeout.patch
new file mode 100644
index 00000000..ed663c77
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.19.8/1131-drm-scheduler-Add-drm_sched_suspend-resume_timeout.patch
@@ -0,0 +1,221 @@
+From 943206ea979d3e38024df99792be02ae2f285473 Mon Sep 17 00:00:00 2001
+From: Sharat Masetty <smasetty@codeaurora.org>
+Date: Thu, 29 Nov 2018 15:35:20 +0530
+Subject: [PATCH 1131/2940] drm/scheduler: Add
+ drm_sched_suspend/resume_timeout()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This patch adds two new functions to help client drivers suspend and
+resume the scheduler job timeout. This can be useful in cases where the
+hardware has preemption support enabled. Using this, it is possible to have
+the timeout active only for the ring which is active on the ringbuffer.
+This patch also makes the job_list_lock IRQ safe.
+
+Suggested-by: Christian Koenig <Christian.Koenig@amd.com>
+Signed-off-by: Sharat Masetty <smasetty@codeaurora.org>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Chaudhary Amit Kumar <Chaudharyamit.Kumar@amd.com>
+---
+ drivers/gpu/drm/scheduler/sched_main.c | 83 ++++++++++++++++++++++----
+ include/drm/gpu_scheduler.h | 4 ++
+ 2 files changed, 77 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index 0f9d473d572c..fef0282eb398 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -209,6 +209,62 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
+ schedule_delayed_work(&sched->work_tdr, sched->timeout);
+ }
+
++/**
++ * drm_sched_suspend_timeout - Suspend scheduler job timeout
++ *
++ * @sched: scheduler instance for which to suspend the timeout
++ *
++ * Suspend the delayed work timeout for the scheduler. This is done by
++ * modifying the delayed work timeout to an arbitrary large value,
++ * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
++ * called from an IRQ context.
++ *
++ * Returns the timeout remaining
++ *
++ */
++unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
++{
++ unsigned long sched_timeout, now = jiffies;
++
++ sched_timeout = sched->work_tdr.timer.expires;
++
++ /*
++ * Modify the timeout to an arbitrarily large value. This also prevents
++ * the timeout to be restarted when new submissions arrive
++ */
++ if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
++ && time_after(sched_timeout, now))
++ return sched_timeout - now;
++ else
++ return sched->timeout;
++}
++EXPORT_SYMBOL(drm_sched_suspend_timeout);
++
++/**
++ * drm_sched_resume_timeout - Resume scheduler job timeout
++ *
++ * @sched: scheduler instance for which to resume the timeout
++ * @remaining: remaining timeout
++ *
++ * Resume the delayed work timeout for the scheduler. Note that
++ * this function can be called from an IRQ context.
++ */
++void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
++ unsigned long remaining)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&sched->job_list_lock, flags);
++
++ if (list_empty(&sched->ring_mirror_list))
++ cancel_delayed_work(&sched->work_tdr);
++ else
++ mod_delayed_work(system_wq, &sched->work_tdr, remaining);
++
++ spin_unlock_irqrestore(&sched->job_list_lock, flags);
++}
++EXPORT_SYMBOL(drm_sched_resume_timeout);
++
+ /* job_finish is called after hw fence signaled
+ */
+ static void drm_sched_job_finish(struct work_struct *work)
+@@ -216,6 +272,7 @@ static void drm_sched_job_finish(struct work_struct *work)
+ struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
+ finish_work);
+ struct drm_gpu_scheduler *sched = s_job->sched;
++ unsigned long flags;
+
+ /*
+ * Canceling the timeout without removing our job from the ring mirror
+@@ -226,12 +283,12 @@ static void drm_sched_job_finish(struct work_struct *work)
+ */
+ cancel_delayed_work_sync(&sched->work_tdr);
+
+- spin_lock(&sched->job_list_lock);
++ spin_lock_irqsave(&sched->job_list_lock, flags);
+ /* remove job from ring_mirror_list */
+ list_del(&s_job->node);
+ /* queue TDR for next job */
+ drm_sched_start_timeout(sched);
+- spin_unlock(&sched->job_list_lock);
++ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+ sched->ops->free_job(s_job);
+ }
+@@ -247,20 +304,22 @@ static void drm_sched_job_finish_cb(struct dma_fence *f,
+ static void drm_sched_job_begin(struct drm_sched_job *s_job)
+ {
+ struct drm_gpu_scheduler *sched = s_job->sched;
++ unsigned long flags;
+
+ dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
+ drm_sched_job_finish_cb);
+
+- spin_lock(&sched->job_list_lock);
++ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_add_tail(&s_job->node, &sched->ring_mirror_list);
+ drm_sched_start_timeout(sched);
+- spin_unlock(&sched->job_list_lock);
++ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ }
+
+ static void drm_sched_job_timedout(struct work_struct *work)
+ {
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_job *job;
++ unsigned long flags;
+
+ sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
+ job = list_first_entry_or_null(&sched->ring_mirror_list,
+@@ -269,7 +328,9 @@ static void drm_sched_job_timedout(struct work_struct *work)
+ if (job)
+ job->sched->ops->timedout_job(job);
+
++ spin_lock_irqsave(&sched->job_list_lock, flags);
+ drm_sched_start_timeout(sched);
++ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ }
+
+ /**
+@@ -283,9 +344,10 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
+ {
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *entity, *tmp;
++ unsigned long flags;
+ int i;
+
+- spin_lock(&sched->job_list_lock);
++ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
+ if (s_job->s_fence->parent &&
+ dma_fence_remove_callback(s_job->s_fence->parent,
+@@ -295,7 +357,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
+ atomic_dec(&sched->hw_rq_count);
+ }
+ }
+- spin_unlock(&sched->job_list_lock);
++ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+
+ if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
+ atomic_inc(&bad->karma);
+@@ -333,9 +395,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
+ {
+ struct drm_sched_job *s_job, *tmp;
+ bool found_guilty = false;
++ unsigned long flags;
+ int r;
+
+- spin_lock(&sched->job_list_lock);
++ spin_lock_irqsave(&sched->job_list_lock, flags);
+ list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+ struct dma_fence *fence;
+@@ -349,7 +412,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
+ if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
+ dma_fence_set_error(&s_fence->finished, -ECANCELED);
+
+- spin_unlock(&sched->job_list_lock);
++ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ fence = sched->ops->run_job(s_job);
+ atomic_inc(&sched->hw_rq_count);
+
+@@ -366,10 +429,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
+ } else {
+ drm_sched_process_job(NULL, &s_fence->cb);
+ }
+- spin_lock(&sched->job_list_lock);
++ spin_lock_irqsave(&sched->job_list_lock, flags);
+ }
+ drm_sched_start_timeout(sched);
+- spin_unlock(&sched->job_list_lock);
++ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ }
+ EXPORT_SYMBOL(drm_sched_job_recovery);
+
+diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
+index dfc0baa795d8..046620acd6de 100644
+--- a/include/drm/gpu_scheduler.h
++++ b/include/drm/gpu_scheduler.h
+@@ -344,4 +344,8 @@ bool drm_sched_dependency_optimized(struct dma_fence* fence,
+ struct drm_sched_entity *entity);
+ void drm_sched_job_kickout(struct drm_sched_job *s_job);
+
++unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
++void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
++ unsigned long remaining);
++
+ #endif
+--
+2.17.1
+