aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch340
1 files changed, 340 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch
new file mode 100644
index 00000000..2ce2b0eb
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71/4617-drm-amdgpu-rename-rmn-to-amn-in-the-MMU-notifier-cod.patch
@@ -0,0 +1,340 @@
+From 0649da717ce493d05db377795b66e2dfb4ceae34 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Christian=20K=C3=B6nig?= <christian.koenig@amd.com>
+Date: Fri, 1 Jun 2018 16:53:11 +0200
+Subject: [PATCH 4617/5725] drm/amdgpu: rename rmn to amn in the MMU notifier
+ code
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Just a copy&paste leftover from radeon.
+
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
+
+Conflicts:
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+
+Change-Id: I5ab6787fad32f873b859588661090e2b54c77a42
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 140 ++++++++++++++++-----------------
+ 1 file changed, 70 insertions(+), 70 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+index 3fc0917..211539f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -68,7 +68,7 @@ struct amdgpu_mn_node {
+ };
+
+ /**
+- * amdgpu_mn_destroy - destroy the rmn
++ * amdgpu_mn_destroy - destroy the amn
+ *
+ * @work: previously sheduled work item
+ *
+@@ -76,19 +76,19 @@ struct amdgpu_mn_node {
+ */
+ static void amdgpu_mn_destroy(struct work_struct *work)
+ {
+- struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
+- struct amdgpu_device *adev = rmn->adev;
++ struct amdgpu_mn *amn = container_of(work, struct amdgpu_mn, work);
++ struct amdgpu_device *adev = amn->adev;
+ struct amdgpu_mn_node *node, *next_node;
+ struct amdgpu_bo *bo, *next_bo;
+
+ mutex_lock(&adev->mn_lock);
+- down_write(&rmn->lock);
+- hash_del(&rmn->node);
++ down_write(&amn->lock);
++ hash_del(&amn->node);
+ rbtree_postorder_for_each_entry_safe(node, next_node,
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+- &rmn->objects, it.rb) {
++ &amn->objects, it.rb) {
+ #else
+- &rmn->objects.rb_root, it.rb) {
++ &amn->objects.rb_root, it.rb) {
+ #endif
+ list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
+ bo->mn = NULL;
+@@ -96,10 +96,10 @@ static void amdgpu_mn_destroy(struct work_struct *work)
+ }
+ kfree(node);
+ }
+- up_write(&rmn->lock);
++ up_write(&amn->lock);
+ mutex_unlock(&adev->mn_lock);
+- mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
+- kfree(rmn);
++ mmu_notifier_unregister_no_release(&amn->mn, amn->mm);
++ kfree(amn);
+ }
+
+ /**
+@@ -113,9 +113,9 @@ static void amdgpu_mn_destroy(struct work_struct *work)
+ static void amdgpu_mn_release(struct mmu_notifier *mn,
+ struct mm_struct *mm)
+ {
+- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+- INIT_WORK(&rmn->work, amdgpu_mn_destroy);
+- schedule_work(&rmn->work);
++ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
++ INIT_WORK(&amn->work, amdgpu_mn_destroy);
++ schedule_work(&amn->work);
+ }
+
+
+@@ -138,31 +138,31 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
+ }
+
+ /**
+- * amdgpu_mn_read_lock - take the rmn read lock
++ * amdgpu_mn_read_lock - take the amn read lock
+ *
+- * @rmn: our notifier
++ * @amn: our notifier
+ *
+- * Take the rmn read side lock.
++ * Take the amn read side lock.
+ */
+-static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
++static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
+ {
+- mutex_lock(&rmn->read_lock);
+- if (atomic_inc_return(&rmn->recursion) == 1)
+- down_read_non_owner(&rmn->lock);
+- mutex_unlock(&rmn->read_lock);
++ mutex_lock(&amn->read_lock);
++ if (atomic_inc_return(&amn->recursion) == 1)
++ down_read_non_owner(&amn->lock);
++ mutex_unlock(&amn->read_lock);
+ }
+
+ /**
+- * amdgpu_mn_read_unlock - drop the rmn read lock
++ * amdgpu_mn_read_unlock - drop the amn read lock
+ *
+- * @rmn: our notifier
++ * @amn: our notifier
+ *
+- * Drop the rmn read side lock.
++ * Drop the amn read side lock.
+ */
+-static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
++static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn)
+ {
+- if (atomic_dec_return(&rmn->recursion) == 0)
+- up_read_non_owner(&rmn->lock);
++ if (atomic_dec_return(&amn->recursion) == 0)
++ up_read_non_owner(&amn->lock);
+ }
+
+ /**
+@@ -210,15 +210,15 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
+ unsigned long start,
+ unsigned long end)
+ {
+- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
++ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+ struct interval_tree_node *it;
+
+ /* notification is exclusive, but interval is inclusive */
+ end -= 1;
+
+- amdgpu_mn_read_lock(rmn);
++ amdgpu_mn_read_lock(amn);
+
+- it = interval_tree_iter_first(&rmn->objects, start, end);
++ it = interval_tree_iter_first(&amn->objects, start, end);
+ while (it) {
+ struct amdgpu_mn_node *node;
+
+@@ -244,9 +244,9 @@ static void amdgpu_mn_invalidate_range_end_gfx(struct mmu_notifier *mn,
+ unsigned long start,
+ unsigned long end)
+ {
+- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
++ struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+
+- amdgpu_mn_read_unlock(rmn);
++ amdgpu_mn_read_unlock(amn);
+ }
+
+ /**
+@@ -332,7 +332,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
+ enum amdgpu_mn_type type)
+ {
+ struct mm_struct *mm = current->mm;
+- struct amdgpu_mn *rmn;
++ struct amdgpu_mn *amn;
+ unsigned long key = AMDGPU_MN_KEY(mm, type);
+ int r;
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+@@ -350,48 +350,48 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev,
+ #endif
+
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+- hash_for_each_possible(adev->mn_hash, rmn, node, node, key)
++ hash_for_each_possible(adev->mn_hash, amn, node, node, key)
+ #else
+- hash_for_each_possible(adev->mn_hash, rmn, node, key)
++ hash_for_each_possible(adev->mn_hash, amn, node, key)
+ #endif
+- if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key)
++ if (AMDGPU_MN_KEY(amn->mm, amn->type) == key)
+ goto release_locks;
+
+- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
+- if (!rmn) {
+- rmn = ERR_PTR(-ENOMEM);
++ amn = kzalloc(sizeof(*amn), GFP_KERNEL);
++ if (!amn) {
++ amn = ERR_PTR(-ENOMEM);
+ goto release_locks;
+ }
+
+- rmn->adev = adev;
+- rmn->mm = mm;
+- init_rwsem(&rmn->lock);
++ amn->adev = adev;
++ amn->mm = mm;
++ init_rwsem(&amn->lock);
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+- rmn->objects = RB_ROOT;
++ amn->objects = RB_ROOT;
+ #else
+- rmn->objects = RB_ROOT_CACHED;
++ amn->objects = RB_ROOT_CACHED;
+ #endif
+- rmn->type = type;
+- rmn->mn.ops = &amdgpu_mn_ops[type];
+- mutex_init(&rmn->read_lock);
+- atomic_set(&rmn->recursion, 0);
++ amn->type = type;
++ amn->mn.ops = &amdgpu_mn_ops[type];
++ mutex_init(&amn->read_lock);
++ atomic_set(&amn->recursion, 0);
+
+- r = __mmu_notifier_register(&rmn->mn, mm);
++ r = __mmu_notifier_register(&amn->mn, mm);
+ if (r)
+- goto free_rmn;
++ goto free_amn;
+
+- hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type));
++ hash_add(adev->mn_hash, &amn->node, AMDGPU_MN_KEY(mm, type));
+
+ release_locks:
+ up_write(&mm->mmap_sem);
+ mutex_unlock(&adev->mn_lock);
+
+- return rmn;
++ return amn;
+
+-free_rmn:
++free_amn:
+ up_write(&mm->mmap_sem);
+ mutex_unlock(&adev->mn_lock);
+- kfree(rmn);
++ kfree(amn);
+
+ return ERR_PTR(r);
+ }
+@@ -411,14 +411,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ enum amdgpu_mn_type type =
+ bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
+- struct amdgpu_mn *rmn;
++ struct amdgpu_mn *amn;
+ struct amdgpu_mn_node *node = NULL, *new_node;
+ struct list_head bos;
+ struct interval_tree_node *it;
+
+- rmn = amdgpu_mn_get(adev, type);
+- if (IS_ERR(rmn))
+- return PTR_ERR(rmn);
++ amn = amdgpu_mn_get(adev, type);
++ if (IS_ERR(amn))
++ return PTR_ERR(amn);
+
+ new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node)
+@@ -426,12 +426,12 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+
+ INIT_LIST_HEAD(&bos);
+
+- down_write(&rmn->lock);
++ down_write(&amn->lock);
+
+- while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
++ while ((it = interval_tree_iter_first(&amn->objects, addr, end))) {
+ kfree(node);
+ node = container_of(it, struct amdgpu_mn_node, it);
+- interval_tree_remove(&node->it, &rmn->objects);
++ interval_tree_remove(&node->it, &amn->objects);
+ addr = min(it->start, addr);
+ end = max(it->last, end);
+ list_splice(&node->bos, &bos);
+@@ -442,7 +442,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ else
+ kfree(new_node);
+
+- bo->mn = rmn;
++ bo->mn = amn;
+
+ node->it.start = addr;
+ node->it.last = end;
+@@ -450,9 +450,9 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ list_splice(&bos, &node->bos);
+ list_add(&bo->mn_list, &node->bos);
+
+- interval_tree_insert(&node->it, &rmn->objects);
++ interval_tree_insert(&node->it, &amn->objects);
+
+- up_write(&rmn->lock);
++ up_write(&amn->lock);
+
+ return 0;
+ }
+@@ -467,18 +467,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
+ {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+- struct amdgpu_mn *rmn;
++ struct amdgpu_mn *amn;
+ struct list_head *head;
+
+ mutex_lock(&adev->mn_lock);
+
+- rmn = bo->mn;
+- if (rmn == NULL) {
++ amn = bo->mn;
++ if (amn == NULL) {
+ mutex_unlock(&adev->mn_lock);
+ return;
+ }
+
+- down_write(&rmn->lock);
++ down_write(&amn->lock);
+
+ /* save the next list entry for later */
+ head = bo->mn_list.next;
+@@ -489,11 +489,11 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
+ if (list_empty(head)) {
+ struct amdgpu_mn_node *node;
+ node = container_of(head, struct amdgpu_mn_node, bos);
+- interval_tree_remove(&node->it, &rmn->objects);
++ interval_tree_remove(&node->it, &amn->objects);
+ kfree(node);
+ }
+
+- up_write(&rmn->lock);
++ up_write(&amn->lock);
+ mutex_unlock(&adev->mn_lock);
+ }
+
+--
+2.7.4
+