aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux-4.19/linux-yocto-4.19.8/0703-drm-scheduler-change-entities-rq-even-earlier.patch
blob: 87af524080bc65c2690254bb67b680e1a00f8cc8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
From 0246cd36606f3e5e5e218d62ed5b7445c5d668a9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Wed, 8 Aug 2018 13:07:11 +0200
Subject: [PATCH 0703/2940] drm/scheduler: change entities rq even earlier
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Looks like for correct debugging we need to know the scheduler even
earlier. So move picking a rq for an entity into job creation.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 50 +++++++++++++++--------
 drivers/gpu/drm/scheduler/sched_fence.c   |  2 +-
 2 files changed, 33 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 4fa54f4e1d78..cf88f8346ccc 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -584,6 +584,34 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 	return sched_job;
 }
 
+/**
+ * drm_sched_entity_select_rq - select a new rq for the entity
+ *
+ * @entity: scheduler entity
+ *
+ * Check all prerequisites and select a new rq for the entity for load
+ * balancing.
+ */
+static void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
+{
+	struct dma_fence *fence;
+	struct drm_sched_rq *rq;
+
+	if (!spsc_queue_count(&entity->job_queue) == 0 ||
+	    entity->num_rq_list <= 1)
+		return;
+
+	fence = READ_ONCE(entity->last_scheduled);
+	if (fence && !dma_fence_is_signaled(fence))
+		return;
+
+	rq = drm_sched_entity_get_free_sched(entity);
+	spin_lock(&entity->rq_lock);
+	drm_sched_rq_remove_entity(entity->rq, entity);
+	entity->rq = rq;
+	spin_unlock(&entity->rq_lock);
+}
+
 /**
  * drm_sched_entity_push_job - Submit a job to the entity's job queue
  *
@@ -599,25 +627,8 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 			       struct drm_sched_entity *entity)
 {
-	struct drm_sched_rq *rq = entity->rq;
 	bool first;
 
-	first = spsc_queue_count(&entity->job_queue) == 0;
-	if (first && (entity->num_rq_list > 1)) {
-		struct dma_fence *fence;
-
-		fence = READ_ONCE(entity->last_scheduled);
-		if (fence == NULL || dma_fence_is_signaled(fence)) {
-			rq = drm_sched_entity_get_free_sched(entity);
-			spin_lock(&entity->rq_lock);
-			drm_sched_rq_remove_entity(entity->rq, entity);
-			entity->rq = rq;
-			spin_unlock(&entity->rq_lock);
-		}
-	}
-
-	sched_job->sched = entity->rq->sched;
-	sched_job->s_fence->sched = entity->rq->sched;
 	trace_drm_sched_job(sched_job, entity);
 	atomic_inc(&entity->rq->sched->num_jobs);
 	WRITE_ONCE(entity->last_user, current->group_leader);
@@ -821,7 +832,10 @@ int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_sched_entity *entity,
 		       void *owner)
 {
-	struct drm_gpu_scheduler *sched = entity->rq->sched;
+	struct drm_gpu_scheduler *sched;
+
+	drm_sched_entity_select_rq(entity);
+	sched = entity->rq->sched;
 
 	job->sched = sched;
 	job->entity = entity;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 20e4da377890..d8d2dff9ea2f 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
 		return NULL;
 
 	fence->owner = owner;
-	fence->sched = NULL;
+	fence->sched = entity->rq->sched;
 	spin_lock_init(&fence->lock);
 
 	seq = atomic_inc_return(&entity->fence_seq);
-- 
2.17.1