aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2223-drm-amdgpu-mark-the-partial-job-as-preempted-in-mcbp.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2223-drm-amdgpu-mark-the-partial-job-as-preempted-in-mcbp.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2223-drm-amdgpu-mark-the-partial-job-as-preempted-in-mcbp.patch91
1 files changed, 91 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2223-drm-amdgpu-mark-the-partial-job-as-preempted-in-mcbp.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2223-drm-amdgpu-mark-the-partial-job-as-preempted-in-mcbp.patch
new file mode 100644
index 00000000..653496eb
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2223-drm-amdgpu-mark-the-partial-job-as-preempted-in-mcbp.patch
@@ -0,0 +1,91 @@
+From 883d8397f36cacdf2e98fdb91a9632deef436659 Mon Sep 17 00:00:00 2001
+From: Jack Xiao <Jack.Xiao@amd.com>
+Date: Wed, 23 Jan 2019 13:54:26 +0800
+Subject: [PATCH 2223/2940] drm/amdgpu: mark the partial job as preempted in
+ mcbp unit test
+
+In mcbp unit test, the test should detect the preempted job which may
+be a partial execution ib and mark it as preempted; so that the gfx
+block can correctly generate PM4 frame.
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Jack Xiao <Jack.Xiao@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 44 +++++++++++++++------
+ 1 file changed, 32 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index c30396b8a664..28614dd05f88 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -976,12 +976,40 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
+ spin_unlock(&sched->job_list_lock);
+ }
+
++static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
++{
++ struct amdgpu_job *job;
++ struct drm_sched_job *s_job;
++ uint32_t preempt_seq;
++ struct dma_fence *fence, **ptr;
++ struct amdgpu_fence_driver *drv = &ring->fence_drv;
++ struct drm_gpu_scheduler *sched = &ring->sched;
++
++ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
++ return;
++
++ preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
++ if (preempt_seq <= atomic_read(&drv->last_seq))
++ return;
++
++ preempt_seq &= drv->num_fences_mask;
++ ptr = &drv->fences[preempt_seq];
++ fence = rcu_dereference_protected(*ptr, 1);
++
++ spin_lock(&sched->job_list_lock);
++ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
++ job = to_amdgpu_job(s_job);
++ if (job->fence == fence)
++ /* mark the job as preempted */
++ job->preemption_status |= AMDGPU_IB_PREEMPTED;
++ }
++ spin_unlock(&sched->job_list_lock);
++}
++
+ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
+ {
+ int r, resched, length;
+ struct amdgpu_ring *ring;
+- struct drm_sched_job *s_job;
+- struct amdgpu_job *job;
+ struct dma_fence **fences = NULL;
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+
+@@ -1020,21 +1048,13 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
+ ring->fence_drv.sync_seq) {
+ DRM_INFO("ring %d was preempted\n", ring->idx);
+
++ amdgpu_ib_preempt_mark_partial_job(ring);
++
+ /* swap out the old fences */
+ amdgpu_ib_preempt_fences_swap(ring, fences);
+
+ amdgpu_fence_driver_force_completion(ring);
+
+- s_job = list_first_entry_or_null(
+- &ring->sched.ring_mirror_list,
+- struct drm_sched_job, node);
+- if (s_job) {
+- job = to_amdgpu_job(s_job);
+- /* mark the job as preempted */
+- /* job->preemption_status |=
+- AMDGPU_IB_PREEMPTED; */
+- }
+-
+ /* resubmit unfinished jobs */
+ amdgpu_ib_preempt_job_recovery(&ring->sched);
+
+--
+2.17.1
+