aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1653-drm-amdgpu-Schedule-restore-of-userptr-BOs-on-first-.patch
blob: 7e1190db40de8735dc37c58cdc1bfa0e50cd73db (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
From 89d9bef98dca8fe4c97a3386d3d74b285aff4a91 Mon Sep 17 00:00:00 2001
From: Felix Kuehling <Felix.Kuehling@amd.com>
Date: Wed, 5 Apr 2017 19:41:01 -0400
Subject: [PATCH 1653/4131] drm/amdgpu: Schedule restore of userptr BOs on
 first eviction

Scheduling retore in another MMU notifier can result in queues being
stalled indefinitely if the last userptr BO is freed in the mean time.
Instead schedule restore when the first eviction happens.

Bug: SWDEV-117996
Change-Id: Idabadebf2a7e40609288d62170a0840d3b6f1d9b
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>

 Conflicts:
	drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h       |  2 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 18 +--------
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c           | 48 ------------------------
 3 files changed, 1 insertion(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 9e89aee..81b8bc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -140,8 +140,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
 
 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
-int amdgpu_amdkfd_schedule_restore_userptr(struct kgd_mem *mem,
-					   unsigned long delay);
 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
 				uint32_t vmid, uint64_t gpu_addr,
 				uint32_t *ib_cmd, uint32_t ib_len);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b0527bd..bde4f6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1915,6 +1915,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem,
 		r = kgd2kfd->quiesce_mm(NULL, mm);
 		if (r != 0)
 			pr_err("Failed to quiesce KFD\n");
+		schedule_delayed_work(&process_info->work, 1);
 	}
 
 	return r;
@@ -2190,23 +2191,6 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
 		schedule_delayed_work(&process_info->work, 1);
 }
 
-/* Schedule delayed restoring of userptr BOs
- *
- * This runs in an MMU notifier. See limitations above. The scheduled
- * worker is free of those limitations. Delaying the restore allows
- * multiple MMU notifiers to happen in rapid succession, for example
- * when fork COWs many BOs at once.
- */
-int amdgpu_amdkfd_schedule_restore_userptr(struct kgd_mem *mem,
-					   unsigned long delay)
-{
-	struct amdkfd_process_info *process_info = mem->process_info;
-
-	schedule_delayed_work(&process_info->work, delay);
-
-	return 0;
-}
-
 /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
  *   KFD process identified by process_info
  *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 9d78a4f..fb960ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -235,53 +235,6 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
 	mutex_unlock(&rmn->lock);
 }
 
-/**
- * amdgpu_mn_invalidate_range_end_hsa - callback to notify about mm change
- *
- * @mn: our notifier
- * @mn: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
- *
- * Restore BOs between start and end. Once the last BO is restored,
- * the queues can be reenabled. Restoring a BO can itself trigger
- * another recursive MMU notifier. Therefore this needs to be
- * scheduled in a worker thread. Adding a slight delay (1 jiffy)
- * avoids excessive repeated evictions.
- */
-static void amdgpu_mn_invalidate_range_end_hsa(struct mmu_notifier *mn,
-					       struct mm_struct *mm,
-					       unsigned long start,
-					       unsigned long end)
-{
-	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
-	struct interval_tree_node *it;
-
-	/* notification is exclusive, but interval is inclusive */
-	end -= 1;
-
-	mutex_lock(&rmn->lock);
-
-	it = interval_tree_iter_first(&rmn->objects, start, end);
-	while (it) {
-		struct amdgpu_mn_node *node;
-		struct amdgpu_bo *bo;
-
-		node = container_of(it, struct amdgpu_mn_node, it);
-		it = interval_tree_iter_next(it, start, end);
-
-		list_for_each_entry(bo, &node->bos, mn_list) {
-			struct kgd_mem *mem = bo->kfd_bo;
-
-			if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
-							 start, end))
-				amdgpu_amdkfd_schedule_restore_userptr(mem, 1);
-		}
-	}
-
-	mutex_unlock(&rmn->lock);
-}
-
 static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
 	[AMDGPU_MN_TYPE_GFX] = {
 		.release = amdgpu_mn_release,
@@ -290,7 +243,6 @@ static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
 	[AMDGPU_MN_TYPE_HSA] = {
 		.release = amdgpu_mn_release,
 		.invalidate_range_start = amdgpu_mn_invalidate_range_start_hsa,
-		.invalidate_range_end = amdgpu_mn_invalidate_range_end_hsa,
 	},
 };
 
-- 
2.7.4