aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/files/0663-drm-amdgpu-use-a-timer-for-fence-fallback.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/files/0663-drm-amdgpu-use-a-timer-for-fence-fallback.patch')
-rw-r--r--common/recipes-kernel/linux/files/0663-drm-amdgpu-use-a-timer-for-fence-fallback.patch198
1 files changed, 198 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/files/0663-drm-amdgpu-use-a-timer-for-fence-fallback.patch b/common/recipes-kernel/linux/files/0663-drm-amdgpu-use-a-timer-for-fence-fallback.patch
new file mode 100644
index 00000000..73218ddb
--- /dev/null
+++ b/common/recipes-kernel/linux/files/0663-drm-amdgpu-use-a-timer-for-fence-fallback.patch
@@ -0,0 +1,198 @@
+From 41224d9abf95a78d4b89a41545f526812f380861 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Tue, 3 Nov 2015 13:27:39 +0100
+Subject: [PATCH 0663/1050] drm/amdgpu: use a timer for fence fallback
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Less overhead than a work item and also adds proper cleanup handling.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +-
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 78 ++++++++++++++-----------------
+ 2 files changed, 35 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index 81766db7..99d8b1c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -389,7 +389,6 @@ struct amdgpu_clock {
+ * Fences.
+ */
+ struct amdgpu_fence_driver {
+- struct amdgpu_ring *ring;
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
+ /* sync_seq is protected by ring emission lock */
+@@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
+ bool initialized;
+ struct amdgpu_irq_src *irq_src;
+ unsigned irq_type;
+- struct delayed_work lockup_work;
++ struct timer_list fallback_timer;
+ wait_queue_head_t fence_queue;
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index b92c193..257fce3 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -85,24 +85,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
+ }
+
+ /**
+- * amdgpu_fence_schedule_check - schedule lockup check
+- *
+- * @ring: pointer to struct amdgpu_ring
+- *
+- * Queues a delayed work item to check for lockups.
+- */
+-static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
+-{
+- /*
+- * Do not reset the timer here with mod_delayed_work,
+- * this can livelock in an interaction with TTM delayed destroy.
+- */
+- queue_delayed_work(system_power_efficient_wq,
+- &ring->fence_drv.lockup_work,
+- AMDGPU_FENCE_JIFFIES_TIMEOUT);
+-}
+-
+-/**
+ * amdgpu_fence_emit - emit a fence on the requested ring
+ *
+ * @ring: ring the fence is associated with
+@@ -136,6 +118,19 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
+ }
+
+ /**
++ * amdgpu_fence_schedule_fallback - schedule fallback check
++ *
++ * @ring: pointer to struct amdgpu_ring
++ *
++ * Start a timer as fallback to our interrupts.
++ */
++static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
++{
++ mod_timer(&ring->fence_drv.fallback_timer,
++ jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
++}
++
++/**
+ * amdgpu_fence_activity - check for fence activity
+ *
+ * @ring: pointer to struct amdgpu_ring
+@@ -201,45 +196,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
+ } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
+
+ if (seq < last_emitted)
+- amdgpu_fence_schedule_check(ring);
++ amdgpu_fence_schedule_fallback(ring);
+
+ return wake;
+ }
+
+ /**
+- * amdgpu_fence_check_lockup - check for hardware lockup
++ * amdgpu_fence_process - process a fence
+ *
+- * @work: delayed work item
++ * @adev: amdgpu_device pointer
++ * @ring: ring index the fence is associated with
+ *
+- * Checks for fence activity and if there is none probe
+- * the hardware if a lockup occured.
++ * Checks the current fence value and wakes the fence queue
++ * if the sequence number has increased (all asics).
+ */
+-static void amdgpu_fence_check_lockup(struct work_struct *work)
++void amdgpu_fence_process(struct amdgpu_ring *ring)
+ {
+- struct amdgpu_fence_driver *fence_drv;
+- struct amdgpu_ring *ring;
+-
+- fence_drv = container_of(work, struct amdgpu_fence_driver,
+- lockup_work.work);
+- ring = fence_drv->ring;
+-
+ if (amdgpu_fence_activity(ring))
+ wake_up_all(&ring->fence_drv.fence_queue);
+ }
+
+ /**
+- * amdgpu_fence_process - process a fence
++ * amdgpu_fence_fallback - fallback for hardware interrupts
+ *
+- * @adev: amdgpu_device pointer
+- * @ring: ring index the fence is associated with
++ * @work: delayed work item
+ *
+- * Checks the current fence value and wakes the fence queue
+- * if the sequence number has increased (all asics).
++ * Checks for fence activity.
+ */
+-void amdgpu_fence_process(struct amdgpu_ring *ring)
++static void amdgpu_fence_fallback(unsigned long arg)
+ {
+- if (amdgpu_fence_activity(ring))
+- wake_up_all(&ring->fence_drv.fence_queue);
++ struct amdgpu_ring *ring = (void *)arg;
++
++ amdgpu_fence_process(ring);
+ }
+
+ /**
+@@ -289,7 +277,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
+ if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
+ return 0;
+
+- amdgpu_fence_schedule_check(ring);
++ amdgpu_fence_schedule_fallback(ring);
+ wait_event(ring->fence_drv.fence_queue, (
+ (signaled = amdgpu_fence_seq_signaled(ring, seq))));
+
+@@ -490,9 +478,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
+ atomic64_set(&ring->fence_drv.last_seq, 0);
+ ring->fence_drv.initialized = false;
+
+- INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
+- amdgpu_fence_check_lockup);
+- ring->fence_drv.ring = ring;
++ setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
++ (unsigned long)ring);
+
+ init_waitqueue_head(&ring->fence_drv.fence_queue);
+
+@@ -556,6 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+ mutex_lock(&adev->ring_lock);
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
++
+ if (!ring || !ring->fence_drv.initialized)
+ continue;
+ r = amdgpu_fence_wait_empty(ring);
+@@ -567,6 +555,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+ amd_sched_fini(&ring->sched);
++ del_timer_sync(&ring->fence_drv.fallback_timer);
+ ring->fence_drv.initialized = false;
+ }
+ mutex_unlock(&adev->ring_lock);
+@@ -750,7 +739,8 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
+ fence->fence_wake.func = amdgpu_fence_check_signaled;
+ __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
+ fence_get(f);
+- amdgpu_fence_schedule_check(ring);
++ if (!timer_pending(&ring->fence_drv.fallback_timer))
++ amdgpu_fence_schedule_fallback(ring);
+ FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+ return true;
+ }
+--
+1.9.1
+