1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
|
From 75943799af069bd6d8e1a603250a49089ee8d6ff Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
Date: Fri, 18 Mar 2016 19:29:51 +0100
Subject: [PATCH 0416/1110] drm/amdgpu: Revert "remove the userptr rmn->lock"
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This reverts commit c02196834456f2d5fad334088b70e98ce4967c34.
In the meantime we moved get_user_pages() outside of the reservation lock,
so that shouldn't be an issue any more
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index d7ec9bd..c47f222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -48,7 +48,8 @@ struct amdgpu_mn {
/* protected by adev->mn_lock */
struct hlist_node node;
- /* objects protected by mm->mmap_sem */
+ /* objects protected by lock */
+ struct mutex lock;
struct rb_root objects;
};
@@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
- down_write(&rmn->mm->mmap_sem);
+ mutex_lock(&rmn->lock);
hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
it.rb) {
@@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
}
kfree(node);
}
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn);
@@ -126,6 +127,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end -= 1;
+ mutex_lock(&rmn->lock);
+
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
@@ -160,6 +163,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
amdgpu_bo_unreserve(bo);
}
}
+
+ mutex_unlock(&rmn->lock);
}
static const struct mmu_notifier_ops amdgpu_mn_ops = {
@@ -196,6 +201,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->adev = adev;
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
+ mutex_init(&rmn->lock);
rmn->objects = RB_ROOT;
r = __mmu_notifier_register(&rmn->mn, mm);
@@ -242,7 +248,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
- down_write(&rmn->mm->mmap_sem);
+ mutex_lock(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node);
@@ -256,7 +262,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) {
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
return -ENOMEM;
}
}
@@ -271,7 +277,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects);
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
return 0;
}
@@ -297,7 +303,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
return;
}
- down_write(&rmn->mm->mmap_sem);
+ mutex_lock(&rmn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
@@ -312,6 +318,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
kfree(node);
}
- up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&rmn->lock);
mutex_unlock(&adev->mn_lock);
}
--
2.7.4
|