aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/files/0026-drm-amdgpu-unify-AMDGPU_CTX_MAX_CS_PENDING-and-amdgp.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/files/0026-drm-amdgpu-unify-AMDGPU_CTX_MAX_CS_PENDING-and-amdgp.patch')
-rw-r--r--common/recipes-kernel/linux/files/0026-drm-amdgpu-unify-AMDGPU_CTX_MAX_CS_PENDING-and-amdgp.patch120
1 files changed, 120 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/files/0026-drm-amdgpu-unify-AMDGPU_CTX_MAX_CS_PENDING-and-amdgp.patch b/common/recipes-kernel/linux/files/0026-drm-amdgpu-unify-AMDGPU_CTX_MAX_CS_PENDING-and-amdgp.patch
new file mode 100644
index 00000000..da130896
--- /dev/null
+++ b/common/recipes-kernel/linux/files/0026-drm-amdgpu-unify-AMDGPU_CTX_MAX_CS_PENDING-and-amdgp.patch
@@ -0,0 +1,120 @@
+From 12db7286ff75575c9cac9afc5309c26e8ae21527 Mon Sep 17 00:00:00 2001
+From: Chunming Zhou <David1.Zhou@amd.com>
+Date: Thu, 10 Dec 2015 15:45:11 +0800
+Subject: [PATCH 0026/1110] drm/amdgpu: unify AMDGPU_CTX_MAX_CS_PENDING and
+ amdgpu_sched_jobs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu.h | 5 ++---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 25 ++++++++++++++++++-------
+ 2 files changed, 20 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index e85ed1b..f6563fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -1022,11 +1022,9 @@ int amdgpu_vm_free_job(struct amdgpu_job *job);
+ * context related structures
+ */
+
+-#define AMDGPU_CTX_MAX_CS_PENDING 16
+-
+ struct amdgpu_ctx_ring {
+ uint64_t sequence;
+- struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING];
++ struct fence **fences;
+ struct amd_sched_entity entity;
+ };
+
+@@ -1035,6 +1033,7 @@ struct amdgpu_ctx {
+ struct amdgpu_device *adev;
+ unsigned reset_counter;
+ spinlock_t ring_lock;
++ struct fence **fences;
+ struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
+ };
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+index 15e3416..ee121ec 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+@@ -35,15 +35,24 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
+ ctx->adev = adev;
+ kref_init(&ctx->refcount);
+ spin_lock_init(&ctx->ring_lock);
+- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+- ctx->rings[i].sequence = 1;
++ ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
++ AMDGPU_MAX_RINGS, GFP_KERNEL);
++ if (!ctx->fences)
++ return -ENOMEM;
+
++ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
++ ctx->rings[i].sequence = 1;
++ ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
++ amdgpu_sched_jobs * i;
++ }
+ if (amdgpu_enable_scheduler) {
+ /* create context entity for each ring */
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amd_sched_rq *rq;
+- if (pri >= AMD_SCHED_MAX_PRIORITY)
++ if (pri >= AMD_SCHED_MAX_PRIORITY) {
++ kfree(ctx->fences);
+ return -EINVAL;
++ }
+ rq = &adev->rings[i]->sched.sched_rq[pri];
+ r = amd_sched_entity_init(&adev->rings[i]->sched,
+ &ctx->rings[i].entity,
+@@ -56,6 +65,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
+ for (j = 0; j < i; j++)
+ amd_sched_entity_fini(&adev->rings[j]->sched,
+ &ctx->rings[j].entity);
++ kfree(ctx->fences);
+ return r;
+ }
+ }
+@@ -71,8 +81,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+ return;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+- for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
++ for (j = 0; j < amdgpu_sched_jobs; ++j)
+ fence_put(ctx->rings[i].fences[j]);
++ kfree(ctx->fences);
+
+ if (amdgpu_enable_scheduler) {
+ for (i = 0; i < adev->num_rings; i++)
+@@ -241,7 +252,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ unsigned idx = 0;
+ struct fence *other = NULL;
+
+- idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
++ idx = seq % amdgpu_sched_jobs;
+ other = cring->fences[idx];
+ if (other) {
+ signed long r;
+@@ -276,12 +287,12 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ }
+
+
+- if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
++ if (seq + amdgpu_sched_jobs < cring->sequence) {
+ spin_unlock(&ctx->ring_lock);
+ return NULL;
+ }
+
+- fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]);
++ fence = fence_get(cring->fences[seq % amdgpu_sched_jobs]);
+ spin_unlock(&ctx->ring_lock);
+
+ return fence;
+--
+2.7.4
+