aboutsummaryrefslogtreecommitdiffstats
path: root/common/recipes-kernel/linux/linux-yocto-4.19.8/1286-mm-hmm-use-a-structure-for-update-callback-parameter.patch
diff options
context:
space:
mode:
Diffstat (limited to 'common/recipes-kernel/linux/linux-yocto-4.19.8/1286-mm-hmm-use-a-structure-for-update-callback-parameter.patch')
-rw-r--r--common/recipes-kernel/linux/linux-yocto-4.19.8/1286-mm-hmm-use-a-structure-for-update-callback-parameter.patch215
1 files changed, 215 insertions, 0 deletions
diff --git a/common/recipes-kernel/linux/linux-yocto-4.19.8/1286-mm-hmm-use-a-structure-for-update-callback-parameter.patch b/common/recipes-kernel/linux/linux-yocto-4.19.8/1286-mm-hmm-use-a-structure-for-update-callback-parameter.patch
new file mode 100644
index 00000000..dc77b315
--- /dev/null
+++ b/common/recipes-kernel/linux/linux-yocto-4.19.8/1286-mm-hmm-use-a-structure-for-update-callback-parameter.patch
@@ -0,0 +1,215 @@
+From fcac15a1101c75ab73cac6d3e403e6588743de83 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= <jglisse@redhat.com>
+Date: Tue, 30 Oct 2018 15:04:24 -0700
+Subject: [PATCH 1286/2940] mm/hmm: use a structure for update callback
+ parameters
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Use a structure to gather all the parameters for the update callback.
+This make it easier when adding new parameters by avoiding having to
+update all callback function signature.
+
+The hmm_update structure is always associated with a mmu_notifier
+callbacks so we are not planing on grouping multiple updates together.
+Nor do we care about page size for the range as range will over fully
+cover the page being invalidated (this is a mmu_notifier property).
+
+Link: http://lkml.kernel.org/r/20181019160442.18723-6-jglisse@redhat.com
+Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
+Cc: Ralph Campbell <rcampbell@nvidia.com>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chaudhary Amit Kumar <Chaudharyamit.Kumar@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 30 -----------------------
+ include/linux/hmm.h | 31 +++++++++++++++++-------
+ mm/hmm.c | 33 +++++++++++++++++---------
+ 3 files changed, 44 insertions(+), 50 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+index 4ac567dc4646..e483bf5191cd 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+@@ -269,26 +269,6 @@ static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror,
+ return 0;
+ }
+
+-/**
+- * amdgpu_mn_invalidate_range_end_gfx - callback to notify about mm change
+- *
+- * @mn: our notifier
+- * @mm: the mm this callback is about
+- * @start: start of updated range
+- * @end: end of updated range
+- *
+- * Release the lock again to allow new command submissions.
+- */
+-static void amdgpu_mn_invalidate_range_end_gfx(struct mmu_notifier *mn,
+- struct mm_struct *mm,
+- unsigned long start,
+- unsigned long end)
+-{
+- struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+-
+- amdgpu_mn_read_unlock(amn);
+-}
+-
+ /**
+ * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change
+ *
+@@ -348,16 +328,6 @@ static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror,
+
+ #define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type))
+
+-static void amdgpu_mn_invalidate_range_end_hsa(struct mmu_notifier *mn,
+- struct mm_struct *mm,
+- unsigned long start,
+- unsigned long end)
+-{
+- struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
+-
+- amdgpu_mn_read_unlock(amn);
+-}
+-
+ static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = {
+ [AMDGPU_MN_TYPE_GFX] = {
+ .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx,
+diff --git a/include/linux/hmm.h b/include/linux/hmm.h
+index 1d0d19e8db54..eeb9a8fbc873 100644
+--- a/include/linux/hmm.h
++++ b/include/linux/hmm.h
+@@ -276,13 +276,28 @@ static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
+ struct hmm_mirror;
+
+ /*
+- * enum hmm_update_type - type of update
++ * enum hmm_update_event - type of update
+ * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
+ */
+-enum hmm_update_type {
++enum hmm_update_event {
+ HMM_UPDATE_INVALIDATE,
+ };
+
++/*
++ * struct hmm_update - HMM update informations for callback
++ *
++ * @start: virtual start address of the range to update
++ * @end: virtual end address of the range to update
++ * @event: event triggering the update (what is happening)
++ * @blockable: can the callback block/sleep ?
++ */
++struct hmm_update {
++ unsigned long start;
++ unsigned long end;
++ enum hmm_update_event event;
++ bool blockable;
++};
++
+ /*
+ * struct hmm_mirror_ops - HMM mirror device operations callback
+ *
+@@ -302,9 +317,9 @@ struct hmm_mirror_ops {
+ /* sync_cpu_device_pagetables() - synchronize page tables
+ *
+ * @mirror: pointer to struct hmm_mirror
+- * @update_type: type of update that occurred to the CPU page table
+- * @start: virtual start address of the range to update
+- * @end: virtual end address of the range to update
++ * @update: update informations (see struct hmm_update)
++ * Returns: -EAGAIN if update.blockable false and callback need to
++ * block, 0 otherwise.
+ *
+ * This callback ultimately originates from mmu_notifiers when the CPU
+ * page table is updated. The device driver must update its page table
+@@ -315,10 +330,8 @@ struct hmm_mirror_ops {
+ * page tables are completely updated (TLBs flushed, etc); this is a
+ * synchronous call.
+ */
+- void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
+- enum hmm_update_type update_type,
+- unsigned long start,
+- unsigned long end);
++ int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
++ const struct hmm_update *update);
+ };
+
+ /*
+diff --git a/mm/hmm.c b/mm/hmm.c
+index 36593a494756..c5cb352f5276 100644
+--- a/mm/hmm.c
++++ b/mm/hmm.c
+@@ -166,10 +166,8 @@ void hmm_mm_destroy(struct mm_struct *mm)
+ spin_unlock(&mm->page_table_lock);
+ }
+
+-static void hmm_invalidate_range(struct hmm *hmm,
+- enum hmm_update_type action,
+- unsigned long start,
+- unsigned long end)
++static int hmm_invalidate_range(struct hmm *hmm,
++ const struct hmm_update *update)
+ {
+ struct hmm_mirror *mirror;
+ struct hmm_range *range;
+@@ -178,22 +176,30 @@ static void hmm_invalidate_range(struct hmm *hmm,
+ list_for_each_entry(range, &hmm->ranges, list) {
+ unsigned long addr, idx, npages;
+
+- if (end < range->start || start >= range->end)
++ if (update->end < range->start || update->start >= range->end)
+ continue;
+
+ range->valid = false;
+- addr = max(start, range->start);
++ addr = max(update->start, range->start);
+ idx = (addr - range->start) >> PAGE_SHIFT;
+- npages = (min(range->end, end) - addr) >> PAGE_SHIFT;
++ npages = (min(range->end, update->end) - addr) >> PAGE_SHIFT;
+ memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages);
+ }
+ spin_unlock(&hmm->lock);
+
+ down_read(&hmm->mirrors_sem);
+- list_for_each_entry(mirror, &hmm->mirrors, list)
+- mirror->ops->sync_cpu_device_pagetables(mirror, action,
+- start, end);
++ list_for_each_entry(mirror, &hmm->mirrors, list) {
++ int ret;
++
++ ret = mirror->ops->sync_cpu_device_pagetables(mirror, update);
++ if (!update->blockable && ret == -EAGAIN) {
++ up_read(&hmm->mirrors_sem);
++ return -EAGAIN;
++ }
++ }
+ up_read(&hmm->mirrors_sem);
++
++ return 0;
+ }
+
+ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
+@@ -244,11 +250,16 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
+ unsigned long start,
+ unsigned long end)
+ {
++ struct hmm_update update;
+ struct hmm *hmm = mm->hmm;
+
+ VM_BUG_ON(!hmm);
+
+- hmm_invalidate_range(mm->hmm, HMM_UPDATE_INVALIDATE, start, end);
++ update.start = start;
++ update.end = end;
++ update.event = HMM_UPDATE_INVALIDATE;
++ update.blockable = true;
++ hmm_invalidate_range(hmm, &update);
+ }
+
+ static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
+--
+2.17.1
+