aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/4592-drm-scheduler-Avoid-using-wait_event_killable-for-dy.patch
blob: 8c2c49a28bb627d6ecf86c85a0c92424c19537ca (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
From 113558d1a3710de4b73789b286fed4e4859cab4c Mon Sep 17 00:00:00 2001
From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Date: Wed, 30 May 2018 15:11:01 -0400
Subject: [PATCH 4592/5725] drm/scheduler: Avoid using wait_event_killable for
 dying process (V4)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Dying process might be blocked from receiving any more signals
so avoid using it.

Also retire enity->fini_status and just check the SW queue,
if it's not empty do the fallback cleanup.

Also handle entity->last_scheduled == NULL use case which
happens when HW ring is already hangged whem a  new entity
tried to enqeue jobs.

v2:
Return the remaining timeout and use that as parameter for the next call.
This way when we need to cleanup multiple queues we don't wait for the
entire TO period for each queue but rather in total.
Styling comments.
Rebase.

v3:
Update types from unsigned to long.
Work with jiffies instead of ms.
Return 0 when TO expires.
Rebase.

v4:
Remove unnecessary timeout calculation.

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>

Conflicts:
      drivers/gpu/drm/scheduler/gpu_scheduler.c

Change-Id: I63143d46855d80439773f25e1a7cb3296eb755da
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 67 ++++++++++++++++++++++---------
 1 file changed, 49 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index a403e47..9eb6cca 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -181,7 +181,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 	entity->rq = rq;
 	entity->sched = sched;
 	entity->guilty = guilty;
-	entity->fini_status = 0;
 	entity->last_scheduled = NULL;
 
 	spin_lock_init(&entity->rq_lock);
@@ -219,7 +218,8 @@ static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
 static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
 {
 	rmb();
-	if (spsc_queue_peek(&entity->job_queue) == NULL)
+
+	if (!entity->rq || spsc_queue_peek(&entity->job_queue) == NULL)
 		return true;
 
 	return false;
@@ -260,25 +260,39 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
  *
  * @sched: scheduler instance
  * @entity: scheduler entity
+ * @timeout: time to wait in for Q to become empty in jiffies.
  *
  * Splitting drm_sched_entity_fini() into two functions, The first one does the waiting,
  * removes the entity from the runqueue and returns an error when the process was killed.
+ *
+ * Returns the remaining time in jiffies left from the input timeout
  */
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
-			   struct drm_sched_entity *entity)
+long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
+			   struct drm_sched_entity *entity, long timeout)
 {
+	long ret = timeout;
+
 	if (!drm_sched_entity_is_initialized(sched, entity))
-		return;
+		return ret;
 	/**
 	 * The client will not queue more IBs during this fini, consume existing
 	 * queued IBs or discard them on SIGKILL
 	*/
-	if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
-		entity->fini_status = -ERESTARTSYS;
-	else
-		entity->fini_status = wait_event_killable(sched->job_scheduled,
-					drm_sched_entity_is_idle(entity));
-	drm_sched_entity_set_rq(entity, NULL);
+	if (current->flags & PF_EXITING) {
+		if (timeout)
+			ret = wait_event_timeout(
+					sched->job_scheduled,
+					drm_sched_entity_is_idle(entity),
+					timeout);
+	} else
+		wait_event_killable(sched->job_scheduled, drm_sched_entity_is_idle(entity));
+
+
+	/* For killed process disable any more IBs enqueue right now */
+	if ((current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+		drm_sched_entity_set_rq(entity, NULL);
+
+	return ret;
 }
 EXPORT_SYMBOL(drm_sched_entity_do_release);
 
@@ -290,11 +304,18 @@ EXPORT_SYMBOL(drm_sched_entity_do_release);
  *
  * This should be called after @drm_sched_entity_do_release. It goes over the
  * entity and signals all jobs with an error code if the process was killed.
+ *
  */
 void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
 			   struct drm_sched_entity *entity)
 {
-	if (entity->fini_status) {
+
+	drm_sched_entity_set_rq(entity, NULL);
+
+	/* Consumption of existing IBs wasn't completed. Forcefully
+	 * remove them here.
+	 */
+	if (spsc_queue_peek(&entity->job_queue)) {
 		struct drm_sched_job *job;
 		int r;
 
@@ -314,12 +335,22 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
 			struct drm_sched_fence *s_fence = job->s_fence;
 			drm_sched_fence_scheduled(s_fence);
 			dma_fence_set_error(&s_fence->finished, -ESRCH);
-			r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
-							drm_sched_entity_kill_jobs_cb);
-			if (r == -ENOENT)
+
+			/*
+			 * When pipe is hanged by older entity, new entity might
+			 * not even have chance to submit it's first job to HW
+			 * and so entity->last_scheduled will remain NULL
+			*/
+			if (!entity->last_scheduled) {
 				drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
-			else if (r)
-				DRM_ERROR("fence add callback failed (%d)\n", r);
+			} else {
+				r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb,
+								drm_sched_entity_kill_jobs_cb);
+				if (r == -ENOENT)
+					drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
+				else if (r)
+					DRM_ERROR("fence add callback failed (%d)\n", r);
+			}
 		}
 	}
 
@@ -339,7 +370,7 @@ EXPORT_SYMBOL(drm_sched_entity_cleanup);
 void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
 				struct drm_sched_entity *entity)
 {
-	drm_sched_entity_do_release(sched, entity);
+	drm_sched_entity_do_release(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
 	drm_sched_entity_cleanup(sched, entity);
 }
 EXPORT_SYMBOL(drm_sched_entity_fini);
-- 
2.7.4