aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/1805-drm-amdgpu-keep-the-MMU-lock-until-the-update-ends-v.patch
blob: 5fafbad35d329e63b5357357d7b414604291cf1a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
From cbce205156a7873406e007a7b5bf6103a926e337 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Mon, 11 Sep 2017 18:24:37 -0400
Subject: [PATCH 1805/4131] drm/amdgpu: keep the MMU lock until the update ends
 v4
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This is quite controversial because it adds another lock which is held during
page table updates, but I don't see much other option.

v2: allow multiple updates to be in flight at the same time
v3: simplify the patch, take the read side only once
v4: correctly fix rebase conflict

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>

 Conflicts:
	drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c

Change-Id: I932e17b4e1564a3974004b501b26e00d8259782a
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  4 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 56 ++++++++++++++++++++++++++++++++--
 2 files changed, 56 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5bf1051..85a557c 100755
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1230,11 +1230,11 @@ void amdgpu_test_moves(struct amdgpu_device *adev);
  * MMU Notifier
  */
 #if defined(CONFIG_MMU_NOTIFIER)
+void amdgpu_mn_lock(struct amdgpu_mn *mn);
+void amdgpu_mn_unlock(struct amdgpu_mn *mn);
 struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
 void amdgpu_mn_unregister(struct amdgpu_bo *bo);
-void amdgpu_mn_lock(struct amdgpu_mn *mn);
-void amdgpu_mn_unlock(struct amdgpu_mn *mn);
 #else
 static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
 static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index d0e42b0..f8093c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -59,6 +59,8 @@ struct amdgpu_mn {
 	/* objects protected by lock */
 	struct rw_semaphore	lock;
 	struct rb_root		objects;
+	struct mutex		read_lock;
+	atomic_t		recursion;
 };
 
 struct amdgpu_mn_node {
@@ -133,6 +135,34 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
 }
 
 /**
+ * amdgpu_mn_read_lock - take the rmn read lock
+ *
+ * @rmn: our notifier
+ *
+ * Take the rmn read side lock.
+ */
+static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
+{
+	mutex_lock(&rmn->read_lock);
+	if (atomic_inc_return(&rmn->recursion) == 1)
+		down_read_non_owner(&rmn->lock);
+	mutex_unlock(&rmn->read_lock);
+}
+
+/**
+ * amdgpu_mn_read_unlock - drop the rmn read lock
+ *
+ * @rmn: our notifier
+ *
+ * Drop the rmn read side lock.
+ */
+static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
+{
+	if (atomic_dec_return(&rmn->recursion) == 0)
+		up_read_non_owner(&rmn->lock);
+}
+
+/**
  * amdgpu_mn_invalidate_node - unmap all BOs of a node
  *
  * @node: the node with the BOs to unmap
@@ -183,7 +213,7 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
 	/* notification is exclusive, but interval is inclusive */
 	end -= 1;
 
-	down_read(&rmn->lock);
+	amdgpu_mn_read_lock(rmn);
 
 	it = interval_tree_iter_first(&rmn->objects, start, end);
 	while (it) {
@@ -194,8 +224,26 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
 
 		amdgpu_mn_invalidate_node(node, start, end);
 	}
+}
+
+/**
+ * amdgpu_mn_invalidate_range_end - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+ * Release the lock again to allow new command submissions.
+ */
+static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
+					   struct mm_struct *mm,
+					   unsigned long start,
+					   unsigned long end)
+{
+	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
 
-	up_read(&rmn->lock);
+	amdgpu_mn_read_unlock(rmn);
 }
 
 /**
@@ -247,10 +295,12 @@ static const struct mmu_notifier_ops amdgpu_mn_ops[] = {
 	[AMDGPU_MN_TYPE_GFX] = {
 		.release = amdgpu_mn_release,
 		.invalidate_range_start = amdgpu_mn_invalidate_range_start_gfx,
+		.invalidate_range_end = amdgpu_mn_invalidate_range_end,
 	},
 	[AMDGPU_MN_TYPE_HSA] = {
 		.release = amdgpu_mn_release,
 		.invalidate_range_start = amdgpu_mn_invalidate_range_start_hsa,
+		.invalidate_range_end = amdgpu_mn_invalidate_range_end,
 	},
 };
 
@@ -308,6 +358,8 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
 	rmn->mn.ops = &amdgpu_mn_ops[type];
 	init_rwsem(&rmn->lock);
 	rmn->objects = RB_ROOT;
+	mutex_init(&rmn->read_lock);
+	atomic_set(&rmn->recursion, 0);
 
 	r = __mmu_notifier_register(&rmn->mn, mm);
 	if (r)
-- 
2.7.4