aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/0920-drm-amdgpu-keep-the-MMU-lock-until-the-update-ends-v.patch
blob: 630399389096af02eccff079845e28f79a6f9dea (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
From a19f5011670341975abb182247038c378baf4234 Mon Sep 17 00:00:00 2001
From: Christian Koenig <christian.koenig@amd.com>
Date: Mon, 11 Sep 2017 18:24:37 -0400
Subject: [PATCH 0920/4131] drm/amdgpu: keep the MMU lock until the update ends
 v4
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

This is quite controversial because it adds another lock which is held during
page table updates, but I don't see much other option.

v2: allow multiple updates to be in flight at the same time
v3: simplify the patch, take the read side only once
v4: correctly fix rebase conflict

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>

 Conflicts:
        drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c

Change-Id: I932e17b4e1564a3974004b501b26e00d8259782a
---
 drivers/gpu/drm/amd/amdgpu/amdgpu.h    |  4 +--
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 55 +++++++++++++++++++++++++++++++++-
 2 files changed, 56 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 3c7a9bf..547fc1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1221,11 +1221,11 @@ void amdgpu_test_moves(struct amdgpu_device *adev);
  * MMU Notifier
  */
 #if defined(CONFIG_MMU_NOTIFIER)
+void amdgpu_mn_lock(struct amdgpu_mn *mn);
+void amdgpu_mn_unlock(struct amdgpu_mn *mn);
 struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
 void amdgpu_mn_unregister(struct amdgpu_bo *bo);
-void amdgpu_mn_lock(struct amdgpu_mn *mn);
-void amdgpu_mn_unlock(struct amdgpu_mn *mn);
 #else
 static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
 static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index da6ffaa..89d45aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -52,6 +52,8 @@ struct amdgpu_mn {
 	/* objects protected by lock */
 	struct rw_semaphore     lock;
 	struct rb_root_cached	objects;
+	struct mutex            read_lock;
+	atomic_t                recursion;
 };
 
 struct amdgpu_mn_node {
@@ -126,6 +128,34 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
 }
 
 /**
+ * amdgpu_mn_read_lock - take the rmn read lock
+ *
+ * @rmn: our notifier
+ *
+ * Take the rmn read side lock.
+ */
+static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
+{
+	mutex_lock(&rmn->read_lock);
+	if (atomic_inc_return(&rmn->recursion) == 1)
+		down_read_non_owner(&rmn->lock);
+	mutex_unlock(&rmn->read_lock);
+}
+
+/**
+ * amdgpu_mn_read_unlock - drop the rmn read lock
+ *
+ * @rmn: our notifier
+ *
+ * Drop the rmn read side lock.
+ */
+static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
+{
+	if (atomic_dec_return(&rmn->recursion) == 0)
+		up_read_non_owner(&rmn->lock);
+}
+
+/**
  * amdgpu_mn_invalidate_node - unmap all BOs of a node
  *
  * @node: the node with the BOs to unmap
@@ -176,7 +206,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
 	/* notification is exclusive, but interval is inclusive */
 	end -= 1;
 
-	down_read(&rmn->lock);
+	amdgpu_mn_read_lock(rmn);
 
 	it = interval_tree_iter_first(&rmn->objects, start, end);
 	while (it) {
@@ -191,9 +221,30 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
 	up_read(&rmn->lock);
 }
 
+/**
+ * amdgpu_mn_invalidate_range_end - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+ * Release the lock again to allow new command submissions.
+ */
+static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
+					   struct mm_struct *mm,
+					   unsigned long start,
+					   unsigned long end)
+{
+	struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+
+	amdgpu_mn_read_unlock(rmn);
+}
+
 static const struct mmu_notifier_ops amdgpu_mn_ops = {
 	.release = amdgpu_mn_release,
 	.invalidate_range_start = amdgpu_mn_invalidate_range_start,
+	.invalidate_range_end = amdgpu_mn_invalidate_range_end,
 };
 
 /**
@@ -231,6 +282,8 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
 	rmn->mn.ops = &amdgpu_mn_ops;
 	init_rwsem(&rmn->lock);
 	rmn->objects = RB_ROOT_CACHED;
+	mutex_init(&rmn->read_lock);
+	atomic_set(&rmn->recursion, 0);
 
 	r = __mmu_notifier_register(&rmn->mn, mm);
 	if (r)
-- 
2.7.4