aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amdfalconx86/recipes-kernel/linux/files/0383-drm-amdgpu-add-context-entity-init.patch
blob: 76efe5d6b3b9b9e759604ec658a1de011532839c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
From 9cb7e5a91f6cd4dc018cca7120d2da067f816d3a Mon Sep 17 00:00:00 2001
From: Chunming Zhou <david1.zhou@amd.com>
Date: Tue, 21 Jul 2015 13:17:19 +0800
Subject: [PATCH 0383/1050] drm/amdgpu: add context entity init

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h     |  2 ++
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 36 ++++++++++++++++++++++++++++++++-
 2 files changed, 37 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 815d40f..776339c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -994,10 +994,12 @@ struct amdgpu_vm_manager {
 struct amdgpu_ctx_ring {
 	uint64_t	sequence;
 	struct fence	*fences[AMDGPU_CTX_MAX_CS_PENDING];
+	struct amd_context_entity c_entity;
 };
 
 struct amdgpu_ctx {
 	struct kref		refcount;
+	struct amdgpu_device    *adev;
 	unsigned		reset_counter;
 	spinlock_t		ring_lock;
 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 144edc9..557fb60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -28,13 +28,23 @@
 static void amdgpu_ctx_do_release(struct kref *ref)
 {
 	struct amdgpu_ctx *ctx;
+	struct amdgpu_device *adev;
 	unsigned i, j;
 
 	ctx = container_of(ref, struct amdgpu_ctx, refcount);
+	adev = ctx->adev;
+
 
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j)
 			fence_put(ctx->rings[i].fences[j]);
+
+	if (amdgpu_enable_scheduler) {
+		for (i = 0; i < adev->num_rings; i++)
+			amd_context_entity_fini(adev->rings[i]->scheduler,
+						&ctx->rings[i].c_entity);
+	}
+
 	kfree(ctx);
 }
 
@@ -43,7 +53,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
 {
 	struct amdgpu_ctx *ctx;
 	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
-	int i, r;
+	int i, j, r;
 
 	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
 	if (!ctx)
@@ -59,11 +69,35 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
 	*id = (uint32_t)r;
 
 	memset(ctx, 0, sizeof(*ctx));
+	ctx->adev = adev;
 	kref_init(&ctx->refcount);
 	spin_lock_init(&ctx->ring_lock);
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
 		ctx->rings[i].sequence = 1;
 	mutex_unlock(&mgr->lock);
+	if (amdgpu_enable_scheduler) {
+		/* create context entity for each ring */
+		for (i = 0; i < adev->num_rings; i++) {
+			struct amd_run_queue *rq;
+			if (fpriv)
+				rq = &adev->rings[i]->scheduler->sched_rq;
+			else
+				rq = &adev->rings[i]->scheduler->kernel_rq;
+			r = amd_context_entity_init(adev->rings[i]->scheduler,
+						    &ctx->rings[i].c_entity,
+						    NULL, rq, *id);
+			if (r)
+				break;
+		}
+
+		if (i < adev->num_rings) {
+			for (j = 0; j < i; j++)
+				amd_context_entity_fini(adev->rings[j]->scheduler,
+							&ctx->rings[j].c_entity);
+			kfree(ctx);
+			return -EINVAL;
+		}
+	}
 
 	return 0;
 }
-- 
1.9.1