aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2223-drm-amdgpu-mark-the-partial-job-as-preempted-in-mcbp.patch
blob: 653496eb69faf2f1f64da5efd2b770e512ea6000 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
From 883d8397f36cacdf2e98fdb91a9632deef436659 Mon Sep 17 00:00:00 2001
From: Jack Xiao <Jack.Xiao@amd.com>
Date: Wed, 23 Jan 2019 13:54:26 +0800
Subject: [PATCH 2223/2940] drm/amdgpu: mark the partial job as preempted in
 mcbp unit test

In mcbp unit test, the test should detect the preempted job which may
be a partial execution ib and mark it as preempted; so that the gfx
block can correctly generate PM4 frame.

Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Jack Xiao <Jack.Xiao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 44 +++++++++++++++------
 1 file changed, 32 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index c30396b8a664..28614dd05f88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -976,12 +976,40 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
 	spin_unlock(&sched->job_list_lock);
 }
 
+static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
+{
+	struct amdgpu_job *job;
+	struct drm_sched_job *s_job;
+	uint32_t preempt_seq;
+	struct dma_fence *fence, **ptr;
+	struct amdgpu_fence_driver *drv = &ring->fence_drv;
+	struct drm_gpu_scheduler *sched = &ring->sched;
+
+	if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
+		return;
+
+	preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
+	if (preempt_seq <= atomic_read(&drv->last_seq))
+		return;
+
+	preempt_seq &= drv->num_fences_mask;
+	ptr = &drv->fences[preempt_seq];
+	fence = rcu_dereference_protected(*ptr, 1);
+
+	spin_lock(&sched->job_list_lock);
+	list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+		job = to_amdgpu_job(s_job);
+		if (job->fence == fence)
+			/* mark the job as preempted */
+			job->preemption_status |= AMDGPU_IB_PREEMPTED;
+	}
+	spin_unlock(&sched->job_list_lock);
+}
+
 static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
 {
 	int r, resched, length;
 	struct amdgpu_ring *ring;
-	struct drm_sched_job *s_job;
-	struct amdgpu_job *job;
 	struct dma_fence **fences = NULL;
 	struct amdgpu_device *adev = (struct amdgpu_device *)data;
 
@@ -1020,21 +1048,13 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
 	    ring->fence_drv.sync_seq) {
 		DRM_INFO("ring %d was preempted\n", ring->idx);
 
+		amdgpu_ib_preempt_mark_partial_job(ring);
+
 		/* swap out the old fences */
 		amdgpu_ib_preempt_fences_swap(ring, fences);
 
 		amdgpu_fence_driver_force_completion(ring);
 
-		s_job = list_first_entry_or_null(
-			&ring->sched.ring_mirror_list,
-			struct drm_sched_job, node);
-		if (s_job) {
-			job = to_amdgpu_job(s_job);
-			/* mark the job as preempted */
-			/* job->preemption_status |=
-			   AMDGPU_IB_PREEMPTED; */
-		}
-
 		/* resubmit unfinished jobs */
 		amdgpu_ib_preempt_job_recovery(&ring->sched);
 
-- 
2.17.1