aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch')
-rw-r--r--features/rt/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch84
1 files changed, 84 insertions, 0 deletions
diff --git a/features/rt/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch b/features/rt/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
new file mode 100644
index 00000000..30c5cea1
--- /dev/null
+++ b/features/rt/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
@@ -0,0 +1,84 @@
+From a93cd2c33ab23c4dce5b28971b24ad3bb0a85d55 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 30 Oct 2020 13:59:06 +0100
+Subject: [PATCH 001/191] highmem: Don't disable preemption on RT in
+ kmap_atomic()
+
+Disabling preemption makes it impossible to acquire sleeping locks within
+kmap_atomic() section.
+For PREEMPT_RT it is sufficient to disable migration.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/highmem-internal.h | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
+index 7902c7d8b55f..4aa1031d3e4c 100644
+--- a/include/linux/highmem-internal.h
++++ b/include/linux/highmem-internal.h
+@@ -90,7 +90,11 @@ static inline void __kunmap_local(void *vaddr)
+
+ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
++
+ pagefault_disable();
+ return __kmap_local_page_prot(page, prot);
+ }
+@@ -102,7 +106,11 @@ static inline void *kmap_atomic(struct page *page)
+
+ static inline void *kmap_atomic_pfn(unsigned long pfn)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
++
+ pagefault_disable();
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+ }
+@@ -111,7 +119,10 @@ static inline void __kunmap_atomic(void *addr)
+ {
+ kunmap_local_indexed(addr);
+ pagefault_enable();
+- preempt_enable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_enable();
++ else
++ preempt_enable();
+ }
+
+ unsigned int __nr_free_highpages(void);
+@@ -179,7 +190,10 @@ static inline void __kunmap_local(void *addr)
+
+ static inline void *kmap_atomic(struct page *page)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
+ pagefault_disable();
+ return page_address(page);
+ }
+@@ -200,7 +214,10 @@ static inline void __kunmap_atomic(void *addr)
+ kunmap_flush_on_unmap(addr);
+ #endif
+ pagefault_enable();
+- preempt_enable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_enable();
++ else
++ preempt_enable();
+ }
+
+ static inline unsigned int nr_free_highpages(void) { return 0; }
+--
+2.19.1
+