aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0703-drm-scheduler-change-entities-rq-even-earlier.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0703-drm-scheduler-change-entities-rq-even-earlier.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0703-drm-scheduler-change-entities-rq-even-earlier.patch112
1 files changed, 112 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0703-drm-scheduler-change-entities-rq-even-earlier.patch b/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0703-drm-scheduler-change-entities-rq-even-earlier.patch
new file mode 100644
index 00000000..87af5240
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0703-drm-scheduler-change-entities-rq-even-earlier.patch
@@ -0,0 +1,112 @@
+From 0246cd36606f3e5e5e218d62ed5b7445c5d668a9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Wed, 8 Aug 2018 13:07:11 +0200
+Subject: [PATCH 0703/2940] drm/scheduler: change entities rq even earlier
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Looks like for correct debugging we need to know the scheduler even
+earlier. So move picking a rq for an entity into job creation.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/scheduler/gpu_scheduler.c | 50 +++++++++++++++--------
+ drivers/gpu/drm/scheduler/sched_fence.c | 2 +-
+ 2 files changed, 33 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+index 4fa54f4e1d78..cf88f8346ccc 100644
+--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
++++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
+@@ -584,6 +584,34 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+ return sched_job;
+ }
+
++/**
++ * drm_sched_entity_select_rq - select a new rq for the entity
++ *
++ * @entity: scheduler entity
++ *
++ * Check all prerequisites and select a new rq for the entity for load
++ * balancing.
++ */
++static void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
++{
++ struct dma_fence *fence;
++ struct drm_sched_rq *rq;
++
++ if (!spsc_queue_count(&entity->job_queue) == 0 ||
++ entity->num_rq_list <= 1)
++ return;
++
++ fence = READ_ONCE(entity->last_scheduled);
++ if (fence && !dma_fence_is_signaled(fence))
++ return;
++
++ rq = drm_sched_entity_get_free_sched(entity);
++ spin_lock(&entity->rq_lock);
++ drm_sched_rq_remove_entity(entity->rq, entity);
++ entity->rq = rq;
++ spin_unlock(&entity->rq_lock);
++}
++
+ /**
+ * drm_sched_entity_push_job - Submit a job to the entity's job queue
+ *
+@@ -599,25 +627,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *entity)
+ {
+- struct drm_sched_rq *rq = entity->rq;
+ bool first;
+
+- first = spsc_queue_count(&entity->job_queue) == 0;
+- if (first && (entity->num_rq_list > 1)) {
+- struct dma_fence *fence;
+-
+- fence = READ_ONCE(entity->last_scheduled);
+- if (fence == NULL || dma_fence_is_signaled(fence)) {
+- rq = drm_sched_entity_get_free_sched(entity);
+- spin_lock(&entity->rq_lock);
+- drm_sched_rq_remove_entity(entity->rq, entity);
+- entity->rq = rq;
+- spin_unlock(&entity->rq_lock);
+- }
+- }
+-
+- sched_job->sched = entity->rq->sched;
+- sched_job->s_fence->sched = entity->rq->sched;
+ trace_drm_sched_job(sched_job, entity);
+ atomic_inc(&entity->rq->sched->num_jobs);
+ WRITE_ONCE(entity->last_user, current->group_leader);
+@@ -821,7 +832,10 @@ int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_sched_entity *entity,
+ void *owner)
+ {
+- struct drm_gpu_scheduler *sched = entity->rq->sched;
++ struct drm_gpu_scheduler *sched;
++
++ drm_sched_entity_select_rq(entity);
++ sched = entity->rq->sched;
+
+ job->sched = sched;
+ job->entity = entity;
+diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
+index 20e4da377890..d8d2dff9ea2f 100644
+--- a/drivers/gpu/drm/scheduler/sched_fence.c
++++ b/drivers/gpu/drm/scheduler/sched_fence.c
+@@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
+ return NULL;
+
+ fence->owner = owner;
+- fence->sched = NULL;
++ fence->sched = entity->rq->sched;
+ spin_lock_init(&fence->lock);
+
+ seq = atomic_inc_return(&entity->fence_seq);
+--
+2.17.1
+