aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/1280-drm-sched-Fix-entities-with-0-rqs.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.19.8/1280-drm-sched-Fix-entities-with-0-rqs.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.19.8/1280-drm-sched-Fix-entities-with-0-rqs.patch118
1 files changed, 118 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.19.8/1280-drm-sched-Fix-entities-with-0-rqs.patch b/common/recipes-kernel/linux/linux-yocto-4.19.8/1280-drm-sched-Fix-entities-with-0-rqs.patch
new file mode 100644
index 00000000..b7a049f4
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.19.8/1280-drm-sched-Fix-entities-with-0-rqs.patch
@@ -0,0 +1,118 @@
+From feb7195c5ed2ee6d6fbcedd1072be2ac5d44eba4 Mon Sep 17 00:00:00 2001
+From: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
+Date: Wed, 30 Jan 2019 02:53:19 +0100
+Subject: [PATCH 1280/2940] drm/sched: Fix entities with 0 rqs.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Some blocks in amdgpu can have 0 rqs.
+
+Job creation already fails with -ENOENT when entity->rq is NULL,
+so jobs cannot be pushed. Without a rq there is no scheduler to
+pop jobs, and rq selection already does the right thing with a
+list of length 0.
+
+So the operations we need to fix are:
+ - Creation, do not set rq to rq_list[0] if the list can have length 0.
+ - Do not flush any jobs when there is no rq.
+ - On entity destruction handle the rq = NULL case.
+ - on set_priority, do not try to change the rq if it is NULL.
+
+Signed-off-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+---
+ drivers/gpu/drm/scheduler/sched_entity.c | 39 ++++++++++++++++--------
+ 1 file changed, 26 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
+index e2942c9a11a7..35ddbec1375a 100644
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -52,12 +52,12 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
+ {
+ int i;
+
+- if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
++ if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
+ return -EINVAL;
+
+ memset(entity, 0, sizeof(struct drm_sched_entity));
+ INIT_LIST_HEAD(&entity->list);
+- entity->rq = rq_list[0];
++ entity->rq = NULL;
+ entity->guilty = guilty;
+ entity->num_rq_list = num_rq_list;
+ entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
+@@ -67,6 +67,10 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
+
+ for (i = 0; i < num_rq_list; ++i)
+ entity->rq_list[i] = rq_list[i];
++
++ if (num_rq_list)
++ entity->rq = rq_list[0];
++
+ entity->last_scheduled = NULL;
+
+ spin_lock_init(&entity->rq_lock);
+@@ -165,6 +169,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
+ struct task_struct *last_user;
+ long ret = timeout;
+
++ if (!entity->rq)
++ return 0;
++
+ sched = entity->rq->sched;
+ /**
+ * The client will not queue more IBs during this fini, consume existing
+@@ -264,20 +271,24 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
+ */
+ void drm_sched_entity_fini(struct drm_sched_entity *entity)
+ {
+- struct drm_gpu_scheduler *sched;
++ struct drm_gpu_scheduler *sched = NULL;
+
+- sched = entity->rq->sched;
+- drm_sched_rq_remove_entity(entity->rq, entity);
++ if (entity->rq) {
++ sched = entity->rq->sched;
++ drm_sched_rq_remove_entity(entity->rq, entity);
++ }
+
+ /* Consumption of existing IBs wasn't completed. Forcefully
+ * remove them here.
+ */
+ if (spsc_queue_peek(&entity->job_queue)) {
+- /* Park the kernel for a moment to make sure it isn't processing
+- * our enity.
+- */
+- kthread_park(sched->thread);
+- kthread_unpark(sched->thread);
++ if (sched) {
++ /* Park the kernel for a moment to make sure it isn't processing
++ * our enity.
++ */
++ kthread_park(sched->thread);
++ kthread_unpark(sched->thread);
++ }
+ if (entity->dependency) {
+ dma_fence_remove_callback(entity->dependency,
+ &entity->cb);
+@@ -362,9 +373,11 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
+ for (i = 0; i < entity->num_rq_list; ++i)
+ drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
+
+- drm_sched_rq_remove_entity(entity->rq, entity);
+- drm_sched_entity_set_rq_priority(&entity->rq, priority);
+- drm_sched_rq_add_entity(entity->rq, entity);
++ if (entity->rq) {
++ drm_sched_rq_remove_entity(entity->rq, entity);
++ drm_sched_entity_set_rq_priority(&entity->rq, priority);
++ drm_sched_rq_add_entity(entity->rq, entity);
++ }
+
+ spin_unlock(&entity->rq_lock);
+ }
+--
+2.17.1
+