aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/files/1050-drm-amdgpu-use-fence_context-to-judge-ctx-switch.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/files/1050-drm-amdgpu-use-fence_context-to-judge-ctx-switch.patch')
-rw-r--r--common/recipes-kernel/linux/files/1050-drm-amdgpu-use-fence_context-to-judge-ctx-switch.patch72
1 files changed, 72 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/files/1050-drm-amdgpu-use-fence_context-to-judge-ctx-switch.patch b/common/recipes-kernel/linux/files/1050-drm-amdgpu-use-fence_context-to-judge-ctx-switch.patch
new file mode 100644
index 00000000..e0f4f541
--- /dev/null
+++ b/common/recipes-kernel/linux/files/1050-drm-amdgpu-use-fence_context-to-judge-ctx-switch.patch
@@ -0,0 +1,72 @@
+From 9bdfa3ac3efa9e42c24cd9263a18ab7639bd4fc1 Mon Sep 17 00:00:00 2001
+From: Monk Liu <Monk.Liu@amd.com>
+Date: Fri, 22 Apr 2016 18:15:44 +0800
+Subject: [PATCH 1050/1110] drm/amdgpu: use fence_context to judge ctx switch
+
+use ctx pointer is not safe, cuz they are likely already
+be assigned to another ctx when doing comparing.
+
+fence_context is always increasing and have rare chance
+to overback to used number for jobs that scheduled to
+ring continueonsly
+
+Signed-off-by: Monk Liu <Monk.Liu@amd.com>
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+index 1d15ac6..f879ffb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+@@ -119,7 +119,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ {
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ib *ib = &ibs[0];
+- struct amdgpu_ctx *ctx, *old_ctx;
++ uint64_t fence_context = 0, old = ring->last_fence_context;
+ struct fence *hwf;
+ struct amdgpu_vm *vm = NULL;
+ unsigned i, patch_offset = ~0;
+@@ -129,9 +129,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ if (num_ibs == 0)
+ return -EINVAL;
+
+- ctx = ibs->ctx;
+- if (job) /* for domain0 job like ring test, ibs->job is not assigned */
++ if (job) {/* for domain0 job like ring test, ibs->job is not assigned */
+ vm = job->vm;
++ fence_context = job->fence_context;
++ }
+
+ if (!ring->ready) {
+ dev_err(adev->dev, "couldn't schedule ib\n");
+@@ -171,19 +172,19 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ for (i = 0; i < num_ibs; ++i) {
+ ib = &ibs[i];
+
+- amdgpu_ring_emit_ib(ring, ib);
+- ring->current_ctx = ctx;
++ amdgpu_ring_emit_ib(ring, ib, (i == 0 && old != fence_context));
+ }
+
+ if (vm) {
+ if (ring->funcs->emit_hdp_invalidate)
+ amdgpu_ring_emit_hdp_invalidate(ring);
+ }
++ ring->last_fence_context = fence_context;
+
+ r = amdgpu_fence_emit(ring, &hwf);
+ if (r) {
+ dev_err(adev->dev, "failed to emit fence (%d)\n", r);
+- ring->current_ctx = old_ctx;
++ ring->last_fence_context = old;
+ if (ib->vm_id)
+ amdgpu_vm_reset_id(adev, ib->vm_id);
+ amdgpu_ring_undo(ring);
+--
+2.7.4
+