aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0697-drm-scheduler-move-idle-entities-to-scheduler-with-l.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0697-drm-scheduler-move-idle-entities-to-scheduler-with-l.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0697-drm-scheduler-move-idle-entities-to-scheduler-with-l.patch76
1 files changed, 76 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0697-drm-scheduler-move-idle-entities-to-scheduler-with-l.patch b/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0697-drm-scheduler-move-idle-entities-to-scheduler-with-l.patch
new file mode 100644
index 00000000..8e12dda7
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0697-drm-scheduler-move-idle-entities-to-scheduler-with-l.patch
@@ -0,0 +1,76 @@
+From b5f0b2e0a9f772b8fe606053cc5c1856f5e5f07f Mon Sep 17 00:00:00 2001
+From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
+Date: Wed, 1 Aug 2018 13:50:02 +0530
+Subject: [PATCH 0697/2940] drm/scheduler: move idle entities to scheduler with
+ less load v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This is the first attempt to move entities between schedulers to
+have dynamic load balancing. We just move entities with no jobs for
+now as moving the ones with jobs will lead to other compilcations
+like ensuring that the other scheduler does not remove a job from
+the current entity while we are moving.
+
+v2: remove unused variable and an unecessary check
+
+Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index cc3b310da2e7..010211765daa 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -555,6 +555,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+ if (!sched_job)
+ return NULL;
+
++ sched_job->sched = sched;
++ sched_job->s_fence->sched = sched;
+ while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
+ if (drm_sched_entity_add_dependency_cb(entity))
+ return NULL;
+@@ -585,11 +587,23 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *entity)
+ {
+- struct drm_gpu_scheduler *sched = sched_job->sched;
+- bool first = false;
++ struct drm_sched_rq *rq = entity->rq;
++ bool first, reschedule, idle;
+
+- trace_drm_sched_job(sched_job, entity);
++ idle = entity->last_scheduled == NULL ||
++ dma_fence_is_signaled(entity->last_scheduled);
++ first = spsc_queue_count(&entity->job_queue) == 0;
++ reschedule = idle && first && (entity->num_rq_list > 1);
+
++ if (reschedule) {
++ rq = drm_sched_entity_get_free_sched(entity);
++ spin_lock(&entity->rq_lock);
++ drm_sched_rq_remove_entity(entity->rq, entity);
++ entity->rq = rq;
++ spin_unlock(&entity->rq_lock);
++ }
++
++ trace_drm_sched_job(sched_job, entity);
+ atomic_inc(&entity->rq->sched->num_jobs);
+ WRITE_ONCE(entity->last_user, current->group_leader);
+ first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
+@@ -605,7 +619,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+ }
+ drm_sched_rq_add_entity(entity->rq, entity);
+ spin_unlock(&entity->rq_lock);
+- drm_sched_wakeup(sched);
++ drm_sched_wakeup(entity->rq->sched);
+ }
+ }
+ EXPORT_SYMBOL(drm_sched_entity_push_job);
+--
+2.17.1
+