aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8/0742-drm-amdgpu-Refine-CSA-related-functions.patch
blob: c9c1695ae694e8138da4c10f3f0b6a8a2182934b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
From d229bf62740828143a32c7314d3ab300beb90738 Mon Sep 17 00:00:00 2001
From: Rex Zhu <Rex.Zhu@amd.com>
Date: Mon, 15 Oct 2018 17:08:38 +0800
Subject: [PATCH 0742/2940] drm/amdgpu: Refine CSA related functions

There is no functional changes,
Use function arguments for SRIOV special variables which
is hardcode in those functions.

so we can share those functions in baremetal.

Reviewed-by: Monk Liu <Monk.Liu@amd.com>
Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |  6 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c    |  5 +++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c   | 32 +++++++++++-----------
 drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h   |  8 ++++--
 4 files changed, 29 insertions(+), 22 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4c16bc088faa..f2bf778fa256 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1704,7 +1704,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
 
 			/* right after GMC hw init, we create CSA */
 			if (amdgpu_sriov_vf(adev)) {
-				r = amdgpu_allocate_static_csa(adev);
+				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
+								AMDGPU_GEM_DOMAIN_VRAM,
+								AMDGPU_CSA_SIZE);
 				if (r) {
 					DRM_ERROR("allocate CSA failed %d\n", r);
 					return r;
@@ -1938,7 +1940,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
 
 		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
 			amdgpu_ucode_free_bo(adev);
-			amdgpu_free_static_csa(adev);
+			amdgpu_free_static_csa(&adev->virt.csa_obj);
 			amdgpu_device_wb_fini(adev);
 			amdgpu_device_vram_scratch_fini(adev);
 		}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 090a06341e7d..3d33cea894a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1007,7 +1007,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
 	}
 
 	if (amdgpu_sriov_vf(adev)) {
-		r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
+		uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+
+		r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+						&fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
 		if (r)
 			goto error_vm;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 9ff16b790c92..f71bc6feea7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -41,25 +41,25 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
 }
 
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+				u32 domain, uint32_t size)
 {
 	int r;
 	void *ptr;
 
-	r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
-				AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
+	r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+				domain, bo,
 				NULL, &ptr);
-	if (r)
-		return r;
+	if (!bo)
+		return -ENOMEM;
 
-	memset(ptr, 0, AMDGPU_CSA_SIZE);
+	memset(ptr, 0, size);
 	return 0;
 }
 
-void amdgpu_free_static_csa(struct amdgpu_device *adev) {
-	amdgpu_bo_free_kernel(&adev->virt.csa_obj,
-						NULL,
-						NULL);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo)
+{
+	amdgpu_bo_free_kernel(bo, NULL, NULL);
 }
 
 /*
@@ -69,9 +69,9 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
  * package to support SRIOV gfx preemption.
  */
 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			  struct amdgpu_bo_va **bo_va)
+			  struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+			  uint64_t csa_addr,  uint32_t size)
 {
-	uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
 	struct ww_acquire_ctx ticket;
 	struct list_head list;
 	struct amdgpu_bo_list_entry pd;
@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
 	INIT_LIST_HEAD(&list);
 	INIT_LIST_HEAD(&csa_tv.head);
-	csa_tv.bo = &adev->virt.csa_obj->tbo;
+	csa_tv.bo = &bo->tbo;
 	csa_tv.shared = true;
 
 	list_add(&csa_tv.head, &list);
@@ -92,7 +92,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		return r;
 	}
 
-	*bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
+	*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
 	if (!*bo_va) {
 		ttm_eu_backoff_reservation(&ticket, &list);
 		DRM_ERROR("failed to create bo_va for static CSA\n");
@@ -100,7 +100,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	}
 
 	r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
-				AMDGPU_CSA_SIZE);
+				size);
 	if (r) {
 		DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
 		amdgpu_vm_bo_rmv(adev, *bo_va);
@@ -108,7 +108,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		return r;
 	}
 
-	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
+	r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
 			     AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
 			     AMDGPU_PTE_EXECUTABLE);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index f1a6a50d9444..09a7ebe964d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -280,10 +280,12 @@ struct amdgpu_vm;
 
 uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+				u32 domain, uint32_t size);
 int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-			  struct amdgpu_bo_va **bo_va);
-void amdgpu_free_static_csa(struct amdgpu_device *adev);
+			  struct amdgpu_bo *bo,
+			  struct amdgpu_bo_va **bo_va, uint64_t csa_addr, uint32_t size);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
-- 
2.17.1