aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2222-drm-amdgpu-add-mcbp-unit-test-in-debugfs-v2.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2222-drm-amdgpu-add-mcbp-unit-test-in-debugfs-v2.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2222-drm-amdgpu-add-mcbp-unit-test-in-debugfs-v2.patch239
1 files changed, 239 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2222-drm-amdgpu-add-mcbp-unit-test-in-debugfs-v2.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2222-drm-amdgpu-add-mcbp-unit-test-in-debugfs-v2.patch
new file mode 100644
index 00000000..cc7f79d9
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/2222-drm-amdgpu-add-mcbp-unit-test-in-debugfs-v2.patch
@@ -0,0 +1,239 @@
+From 16bddd4927a7ff137338683d685b2d37d08ab11c Mon Sep 17 00:00:00 2001
+From: Jack Xiao <Jack.Xiao@amd.com>
+Date: Thu, 20 Jun 2019 10:17:31 -0500
+Subject: [PATCH 2222/2940] drm/amdgpu: add mcbp unit test in debugfs (v2)
+
+The MCBP unit test is used to test the functionality of MCBP.
+It emualtes to send preemption request and resubmit the unfinished
+jobs.
+
+v2: squash in fixes (Alex)
+
+Change-Id: I8ff9448b0dbc5975c39d254c4d7f9eb142f194c0
+Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: Jack Xiao <Jack.Xiao@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 156 ++++++++++++++++++++
+ drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h | 1 +
+ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 +
+ 4 files changed, 159 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 458a6eded228..1d562cbb21c0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -795,6 +795,7 @@ struct amdgpu_device {
+ struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
+ unsigned debugfs_count;
+ #if defined(CONFIG_DEBUG_FS)
++ struct dentry *debugfs_preempt;
+ struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
+ #endif
+ struct amdgpu_atif *atif;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+index 8930d66f2204..c30396b8a664 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+@@ -920,17 +920,173 @@ static const struct drm_info_list amdgpu_debugfs_list[] = {
+ {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
+ };
+
++static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
++ struct dma_fence **fences)
++{
++ struct amdgpu_fence_driver *drv = &ring->fence_drv;
++ uint32_t sync_seq, last_seq;
++
++ last_seq = atomic_read(&ring->fence_drv.last_seq);
++ sync_seq = ring->fence_drv.sync_seq;
++
++ last_seq &= drv->num_fences_mask;
++ sync_seq &= drv->num_fences_mask;
++
++ do {
++ struct dma_fence *fence, **ptr;
++
++ ++last_seq;
++ last_seq &= drv->num_fences_mask;
++ ptr = &drv->fences[last_seq];
++
++ fence = rcu_dereference_protected(*ptr, 1);
++ RCU_INIT_POINTER(*ptr, NULL);
++
++ if (!fence)
++ continue;
++
++ fences[last_seq] = fence;
++
++ } while (last_seq != sync_seq);
++}
++
++static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
++ int length)
++{
++ int i;
++ struct dma_fence *fence;
++
++ for (i = 0; i < length; i++) {
++ fence = fences[i];
++ if (!fence)
++ continue;
++ dma_fence_signal(fence);
++ dma_fence_put(fence);
++ }
++}
++
++static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
++{
++ struct drm_sched_job *s_job;
++
++ spin_lock(&sched->job_list_lock);
++ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
++ sched->ops->run_job(s_job);
++ }
++ spin_unlock(&sched->job_list_lock);
++}
++
++static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
++{
++ int r, resched, length;
++ struct amdgpu_ring *ring;
++ struct drm_sched_job *s_job;
++ struct amdgpu_job *job;
++ struct dma_fence **fences = NULL;
++ struct amdgpu_device *adev = (struct amdgpu_device *)data;
++
++ if (val >= AMDGPU_MAX_RINGS)
++ return -EINVAL;
++
++ ring = adev->rings[val];
++
++ if (!ring || !ring->funcs->preempt_ib || !ring->sched.thread)
++ return -EINVAL;
++
++ /* the last preemption failed */
++ if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
++ return -EBUSY;
++
++ length = ring->fence_drv.num_fences_mask + 1;
++ fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
++ if (!fences)
++ return -ENOMEM;
++
++ /* stop the scheduler */
++ kthread_park(ring->sched.thread);
++
++ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
++
++ /* preempt the IB */
++ r = amdgpu_ring_preempt_ib(ring);
++ if (r) {
++ DRM_WARN("failed to preempt ring %d\n", ring->idx);
++ goto failure;
++ }
++
++ amdgpu_fence_process(ring);
++
++ if (atomic_read(&ring->fence_drv.last_seq) !=
++ ring->fence_drv.sync_seq) {
++ DRM_INFO("ring %d was preempted\n", ring->idx);
++
++ /* swap out the old fences */
++ amdgpu_ib_preempt_fences_swap(ring, fences);
++
++ amdgpu_fence_driver_force_completion(ring);
++
++ s_job = list_first_entry_or_null(
++ &ring->sched.ring_mirror_list,
++ struct drm_sched_job, node);
++ if (s_job) {
++ job = to_amdgpu_job(s_job);
++ /* mark the job as preempted */
++ /* job->preemption_status |=
++ AMDGPU_IB_PREEMPTED; */
++ }
++
++ /* resubmit unfinished jobs */
++ amdgpu_ib_preempt_job_recovery(&ring->sched);
++
++ /* wait for jobs finished */
++ amdgpu_fence_wait_empty(ring);
++
++ /* signal the old fences */
++ amdgpu_ib_preempt_signal_fences(fences, length);
++ }
++
++failure:
++ /* restart the scheduler */
++ kthread_unpark(ring->sched.thread);
++
++ ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
++
++ if (fences)
++ kfree(fences);
++
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
++ amdgpu_debugfs_ib_preempt, "%llu\n");
++
+ int amdgpu_debugfs_init(struct amdgpu_device *adev)
+ {
++ adev->debugfs_preempt =
++ debugfs_create_file("amdgpu_preempt_ib", 0600,
++ adev->ddev->primary->debugfs_root,
++ (void *)adev, &fops_ib_preempt);
++ if (!(adev->debugfs_preempt)) {
++ DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
++ return -EIO;
++ }
++
+ return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
+ ARRAY_SIZE(amdgpu_debugfs_list));
+ }
+
++void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
++{
++ if (adev->debugfs_preempt)
++ debugfs_remove(adev->debugfs_preempt);
++}
++
+ #else
+ int amdgpu_debugfs_init(struct amdgpu_device *adev)
+ {
+ return 0;
+ }
++void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { }
+ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+ {
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+index 8260d8073c26..f289d28ad6b2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+@@ -34,6 +34,7 @@ struct amdgpu_debugfs {
+ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
+ void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
+ int amdgpu_debugfs_init(struct amdgpu_device *adev);
++void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev);
+ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ const struct drm_info_list *files,
+ unsigned nfiles);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 2acf005782bb..73d1f84640f1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -2841,6 +2841,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
+ amdgpu_debugfs_regs_cleanup(adev);
+ device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
+ amdgpu_ucode_sysfs_fini(adev);
++ amdgpu_debugfs_preempt_cleanup(adev);
+ }
+
+
+--
+2.17.1
+