aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/0697-drm-scheduler-move-idle-entities-to-scheduler-with-l.patch
blob: 8e12dda7ed33803ee3a469d110c100e7c82f2806 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
From b5f0b2e0a9f772b8fe606053cc5c1856f5e5f07f Mon Sep 17 00:00:00 2001
From: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Date: Wed, 1 Aug 2018 13:50:02 +0530
Subject: [PATCH 0697/2940] drm/scheduler: move idle entities to scheduler with
 less load v2
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This is the first attempt to move entities between schedulers to
have dynamic load balancing. We just move entities with no jobs for
now as moving the ones with jobs will lead to other compilcations
like ensuring that the other scheduler does not remove a job from
the current entity while we are moving.

v2: remove unused variable and an unecessary check

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 22 ++++++++++++++++++----
 1 file changed, 18 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index cc3b310da2e7..010211765daa 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -555,6 +555,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 	if (!sched_job)
 		return NULL;
 
+	sched_job->sched = sched;
+	sched_job->s_fence->sched = sched;
 	while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
 		if (drm_sched_entity_add_dependency_cb(entity))
 			return NULL;
@@ -585,11 +587,23 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 			       struct drm_sched_entity *entity)
 {
-	struct drm_gpu_scheduler *sched = sched_job->sched;
-	bool first = false;
+	struct drm_sched_rq *rq = entity->rq;
+	bool first, reschedule, idle;
 
-	trace_drm_sched_job(sched_job, entity);
+	idle = entity->last_scheduled == NULL ||
+		dma_fence_is_signaled(entity->last_scheduled);
+	first = spsc_queue_count(&entity->job_queue) == 0;
+	reschedule = idle && first && (entity->num_rq_list > 1);
 
+	if (reschedule) {
+		rq = drm_sched_entity_get_free_sched(entity);
+		spin_lock(&entity->rq_lock);
+		drm_sched_rq_remove_entity(entity->rq, entity);
+		entity->rq = rq;
+		spin_unlock(&entity->rq_lock);
+	}
+
+	trace_drm_sched_job(sched_job, entity);
 	atomic_inc(&entity->rq->sched->num_jobs);
 	WRITE_ONCE(entity->last_user, current->group_leader);
 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
@@ -605,7 +619,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 		}
 		drm_sched_rq_add_entity(entity->rq, entity);
 		spin_unlock(&entity->rq_lock);
-		drm_sched_wakeup(sched);
+		drm_sched_wakeup(entity->rq->sched);
 	}
 }
 EXPORT_SYMBOL(drm_sched_entity_push_job);
-- 
2.17.1