aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1421-drm-amdgpu-Create-symmetry-between-map_bo_to_gpuvm-a.patch
blob: f381d30abe0adedd824deb1b328de528de7610f5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
From 0f6374b004d7ce2479da3daff0cd448716bd3f85 Mon Sep 17 00:00:00 2001
From: Yong Zhao <yong.zhao@amd.com>
Date: Wed, 27 Apr 2016 20:18:31 -0400
Subject: [PATCH 1421/4131] drm/amdgpu: Create symmetry between map_bo_to_gpuvm
 and unmap_bo_from_gpuvm

1. Move all unpin operations into unmap_bo_from_gpuvm
2. Move reservation of BOs and page tables out of unmap_bo_from_gpuvm
This way the BO and page tables are reserved only once for all unmap and
unpin operations, instead of reserving and unreserving individual BO and
page table for each unmap and unpin.

Change-Id: I64158cdd8eded0ec546ecab9ca3d78820660fdea
Signed-off-by: Yong Zhao <yong.zhao@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 76 ++++++++++--------------
 1 file changed, 31 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index c9f7af6..023ef5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -478,6 +478,7 @@ static int reserve_bo_and_vms(struct kgd_mem *mem,
 				ctx->n_vms++;
 		}
 	}
+
 	if (ctx->n_vms == 0)
 		ctx->vm_pd = NULL;
 	else {
@@ -1015,32 +1016,10 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
 }
 
 static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
+				struct amdgpu_bo *bo,
 				struct amdgpu_bo_va *bo_va)
 {
-	struct amdgpu_vm *vm;
-	int ret;
-	struct ttm_validate_buffer tv;
-	struct ww_acquire_ctx ticket;
-	struct amdgpu_bo_list_entry vm_pd;
-	struct list_head list, duplicates;
-
-	INIT_LIST_HEAD(&list);
-	INIT_LIST_HEAD(&duplicates);
-
-	vm = bo_va->vm;
-	tv.bo = &bo_va->bo->tbo;
-	tv.shared = true;
-	list_add(&tv.head, &list);
-
-	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
-
-	ret = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
-	if (ret) {
-		pr_err("amdkfd: Failed to reserve buffers in ttm\n");
-		return ret;
-	}
-
-	amdgpu_vm_get_pt_bos(vm, &duplicates);
+	struct amdgpu_vm *vm = bo_va->vm;
 
 	/*
 	 * The previously "released" BOs are really released and their VAs are
@@ -1054,7 +1033,12 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
 
 	amdgpu_vm_clear_invalids(adev, vm, NULL);
 
-	ttm_eu_backoff_reservation(&ticket, &list);
+	/* Unpin the PD directory*/
+	unpin_bo(bo_va->vm->page_directory, false);
+	/* Unpin PTs */
+	unpin_pts(bo_va, bo_va->vm, false);
+	/* Unpin BO*/
+	unpin_bo(bo, false);
 
 	return 0;
 }
@@ -1079,6 +1063,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 	struct amdgpu_device *adev;
 	unsigned mapped_before;
 	int ret = 0;
+	struct bo_vm_reservation_context ctx;
 
 	BUG_ON(kgd == NULL);
 	BUG_ON(mem == NULL);
@@ -1103,6 +1088,10 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 	}
 	mapped_before = mem->data2.mapped_to_gpu_memory;
 
+	ret = reserve_bo_and_vms(mem, vm, true, &ctx);
+	if (unlikely(ret != 0))
+		goto out;
+
 	list_for_each_entry(entry, &mem->data2.bo_va_list, bo_list) {
 		if (entry->kgd_dev == kgd &&
 				entry->bo_va->vm == vm &&
@@ -1114,7 +1103,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 				ret = kgd2kfd->resume_mm(adev->kfd,
 							 current->mm);
 				if (ret != 0)
-					goto out;
+					goto unreserve_out;
 				entry->is_mapped = false;
 				mem->data2.mapped_to_gpu_memory--;
 				continue;
@@ -1124,28 +1113,22 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 				mem->data2.va,
 				mem->data2.bo->tbo.mem.size);
 
-			ret = unmap_bo_from_gpuvm(adev, entry->bo_va);
+			ret = unmap_bo_from_gpuvm(adev, mem->data2.bo,
+						entry->bo_va);
 			if (ret == 0) {
 				entry->is_mapped = false;
 			} else {
 				pr_err("amdgpu: failed unmap va 0x%llx\n",
 						mem->data2.va);
-				goto out;
+				goto unreserve_out;
 			}
 
-			/* Unpin the PD directory*/
-			unpin_bo(entry->bo_va->vm->page_directory, true);
-			/* Unpin PTs */
-			unpin_pts(entry->bo_va, entry->bo_va->vm, true);
-
-			/* Unpin BO*/
-			unpin_bo(mem->data2.bo, true);
-
 			mem->data2.mapped_to_gpu_memory--;
 			pr_debug("amdgpu: DEC mapping count %d\n",
 					mem->data2.mapped_to_gpu_memory);
 		}
 	}
+
 	if (mapped_before == mem->data2.mapped_to_gpu_memory) {
 		pr_debug("BO size %lu bytes at va 0x%llx is not mapped on GPU %x:%x.%x\n",
 			 mem->data2.bo->tbo.mem.size, mem->data2.va,
@@ -1154,6 +1137,8 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
 		ret = -EINVAL;
 	}
 
+unreserve_out:
+	unreserve_bo_and_vms(&ctx, true);
 out:
 	mutex_unlock(&mem->data2.lock);
 	return ret;
@@ -1426,6 +1411,7 @@ int amdgpu_amdkfd_gpuvm_evict_mem(struct kgd_mem *mem, struct mm_struct *mm)
 	struct kfd_bo_va_list *entry;
 	unsigned n_evicted;
 	int r = 0;
+	struct bo_vm_reservation_context ctx;
 
 	pr_debug("Evicting buffer %p\n", mem);
 
@@ -1438,6 +1424,11 @@ int amdgpu_amdkfd_gpuvm_evict_mem(struct kgd_mem *mem, struct mm_struct *mm)
 	 * number of evicted mappings so we can roll back if something
 	 * goes wrong. */
 	n_evicted = 0;
+
+	r = reserve_bo_and_vms(mem, NULL, true, &ctx);
+	if (unlikely(r != 0))
+		return r;
+
 	list_for_each_entry(entry, &mem->data2.bo_va_list, bo_list) {
 		struct amdgpu_device *adev;
 
@@ -1452,7 +1443,7 @@ int amdgpu_amdkfd_gpuvm_evict_mem(struct kgd_mem *mem, struct mm_struct *mm)
 			goto fail;
 		}
 
-		r = unmap_bo_from_gpuvm(adev, entry->bo_va);
+		r = unmap_bo_from_gpuvm(adev, mem->data2.bo, entry->bo_va);
 		if (r != 0) {
 			pr_err("failed unmap va 0x%llx\n",
 			       mem->data2.va);
@@ -1460,20 +1451,15 @@ int amdgpu_amdkfd_gpuvm_evict_mem(struct kgd_mem *mem, struct mm_struct *mm)
 			goto fail;
 		}
 
-		/* Unpin the PD directory*/
-		unpin_bo(entry->bo_va->vm->page_directory, true);
-		/* Unpin PTs */
-		unpin_pts(entry->bo_va, entry->bo_va->vm, true);
-
-		/* Unpin BO*/
-		unpin_bo(mem->data2.bo, true);
-
 		n_evicted++;
 	}
 
+	unreserve_bo_and_vms(&ctx, true);
+
 	return 0;
 
 fail:
+	unreserve_bo_and_vms(&ctx, true);
 	/* To avoid hangs and keep state consistent, roll back partial
 	 * eviction by restoring queues and marking mappings as
 	 * unmapped. Access to now unmapped buffers will fault. */
-- 
2.7.4