aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--features/rt/ARM-Allow-to-enable-RT.patch35
-rw-r--r--features/rt/ARM-enable-irq-in-translation-section-permission-fau.patch94
-rw-r--r--features/rt/ARM64-Allow-to-enable-RT.patch35
-rw-r--r--features/rt/Add-localversion-for-RT-release.patch21
-rw-r--r--features/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch58
-rw-r--r--features/rt/PCI-hv-Use-tasklet_disable_in_atomic.patch45
-rw-r--r--features/rt/POWERPC-Allow-to-enable-RT.patch35
-rw-r--r--features/rt/arch-arm64-Add-lazy-preempt-support.patch170
-rw-r--r--features/rt/arm-Add-support-for-lazy-preemption.patch136
-rw-r--r--features/rt/arm64-fpsimd-Delay-freeing-memory-in-fpsimd_flush_th.patch65
-rw-r--r--features/rt/ath9k-Use-tasklet_disable_in_atomic.patch47
-rw-r--r--features/rt/atm-eni-Use-tasklet_disable_in_atomic-in-the-send-ca.patch41
-rw-r--r--features/rt/block-mq-do-not-invoke-preempt_disable.patch39
-rw-r--r--features/rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch49
-rw-r--r--features/rt/console-add-write_atomic-interface.patch162
-rw-r--r--features/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch328
-rw-r--r--features/rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch84
-rw-r--r--features/rt/crypto-limit-more-FPU-enabled-sections.patch73
-rw-r--r--features/rt/debugobjects-Make-RT-aware.patch31
-rw-r--r--features/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch92
-rw-r--r--features/rt/drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch79
-rw-r--r--features/rt/drm-i915-disable-tracing-on-RT.patch46
-rw-r--r--features/rt/drm-i915-gt-Only-disable-interrupts-for-the-timeline.patch51
-rw-r--r--features/rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch32
-rw-r--r--features/rt/drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch60
-rw-r--r--features/rt/efi-Allow-efi-runtime.patch31
-rw-r--r--features/rt/efi-Disable-runtime-services-on-RT.patch45
-rw-r--r--features/rt/firewire-ohci-Use-tasklet_disable_in_atomic-where-re.patch60
-rw-r--r--features/rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch98
-rw-r--r--features/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch262
-rw-r--r--features/rt/fs-namespace-Use-cpu_chill-in-trylock-loops.patch43
-rw-r--r--features/rt/futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch117
-rw-r--r--features/rt/genirq-Disable-irqpoll-on-rt.patch42
-rw-r--r--features/rt/genirq-Move-prio-assignment-into-the-newly-created-t.patch61
-rw-r--r--features/rt/genirq-update-irq_set_irqchip_state-documentation.patch31
-rw-r--r--features/rt/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch84
-rw-r--r--features/rt/irqtime-Make-accounting-correct-on-RT.patch53
-rw-r--r--features/rt/irqwork-push-most-work-into-softirq-context.patch200
-rw-r--r--features/rt/jump-label-disable-if-stop_machine-is-used.patch41
-rw-r--r--features/rt/kconfig-Disable-config-options-which-are-not-RT-comp.patch29
-rw-r--r--features/rt/kcov-Remove-kcov-include-from-sched.h-and-move-it-to.patch109
-rw-r--r--features/rt/kernel-sched-add-put-get-_cpu_light.patch27
-rw-r--r--features/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch80
-rw-r--r--features/rt/kthread-Move-prio-affinite-change-into-the-newly-cre.patch85
-rw-r--r--features/rt/leds-trigger-disable-CPU-trigger-on-RT.patch39
-rw-r--r--features/rt/lockdep-Make-it-RT-aware.patch77
-rw-r--r--features/rt/lockdep-Reduce-header-files-in-debug_locks.h.patch32
-rw-r--r--features/rt/lockdep-disable-self-test.patch34
-rw-r--r--features/rt/lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch61
-rw-r--r--features/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch148
-rw-r--r--features/rt/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch125
-rw-r--r--features/rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch165
-rw-r--r--features/rt/locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch36
-rw-r--r--features/rt/locking-rtmutex-Avoid-include-hell.patch29
-rw-r--r--features/rt/locking-rtmutex-Handle-the-various-new-futex-race-co.patch255
-rw-r--r--features/rt/locking-rtmutex-Make-lock_killable-work.patch49
-rw-r--r--features/rt/locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch59
-rw-r--r--features/rt/locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch144
-rw-r--r--features/rt/locking-rtmutex-Remove-cruft.patch98
-rw-r--r--features/rt/locking-rtmutex-Remove-output-from-deadlock-detector.patch311
-rw-r--r--features/rt/locking-rtmutex-Remove-rt_mutex_timed_lock.patch97
-rw-r--r--features/rt/locking-rtmutex-Use-custom-scheduling-function-for-s.patch242
-rw-r--r--features/rt/locking-rtmutex-add-mutex-implementation-based-on-rt.patch384
-rw-r--r--features/rt/locking-rtmutex-add-rwlock-implementation-based-on-r.patch557
-rw-r--r--features/rt/locking-rtmutex-add-rwsem-implementation-based-on-rt.patch454
-rw-r--r--features/rt/locking-rtmutex-add-sleeping-lock-implementation.patch1232
-rw-r--r--features/rt/locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch455
-rw-r--r--features/rt/locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch129
-rw-r--r--features/rt/locking-rtmutex-wire-up-RT-s-locking.patch346
-rw-r--r--features/rt/locking-spinlock-Split-the-lock-types-header.patch252
-rw-r--r--features/rt/locking-split-out-the-rbtree-definition.patch119
-rw-r--r--features/rt/md-raid5-Make-raid5_percpu-handling-RT-aware.patch68
-rw-r--r--features/rt/mm-Allow-only-SLUB-on-RT.patch46
-rw-r--r--features/rt/mm-memcontrol-Disable-preemption-in-__mod_memcg_lruv.patch43
-rw-r--r--features/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch73
-rw-r--r--features/rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch143
-rw-r--r--features/rt/mm-memcontrol-Replace-local_irq_disable-with-local-l.patch122
-rw-r--r--features/rt/mm-page_alloc-Use-a-local_lock-instead-of-explicit-l.patch210
-rw-r--r--features/rt/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch38
-rw-r--r--features/rt/mm-scatterlist-Do-not-disable-irqs-on-RT.patch29
-rw-r--r--features/rt/mm-sl-au-b-Change-list_lock-to-raw_spinlock_t.patch602
-rw-r--r--features/rt/mm-slub-Don-t-enable-partial-CPU-caches-on-PREEMPT_R.patch32
-rw-r--r--features/rt/mm-slub-Don-t-resize-the-location-tracking-cache-on-.patch35
-rw-r--r--features/rt/mm-slub-Enable-irqs-for-__GFP_WAIT.patch76
-rw-r--r--features/rt/mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch49
-rw-r--r--features/rt/mm-slub-Move-discard_slab-invocations-out-of-IRQ-off.patch416
-rw-r--r--features/rt/mm-slub-Move-flush_cpu_slab-invocations-__free_slab-.patch120
-rw-r--r--features/rt/mm-vmalloc-Another-preempt-disable-region-which-suck.patch72
-rw-r--r--features/rt/mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch144
-rw-r--r--features/rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch48
-rw-r--r--features/rt/mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch211
-rw-r--r--features/rt/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch36
-rw-r--r--features/rt/net-Move-lockdep-where-it-belongs.patch46
-rw-r--r--features/rt/net-Properly-annotate-the-try-lock-for-the-seqlock.patch69
-rw-r--r--features/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch298
-rw-r--r--features/rt/net-Remove-preemption-disabling-in-netif_rx.patch67
-rw-r--r--features/rt/net-Use-skbufhead-with-raw-lock.patch73
-rw-r--r--features/rt/net-core-disable-NET_RX_BUSY_POLL-on-RT.patch43
-rw-r--r--features/rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch40
-rw-r--r--features/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch41
-rw-r--r--features/rt/net-jme-Replace-link-change-tasklet-with-work.patch87
-rw-r--r--features/rt/net-sundance-Use-tasklet_disable_in_atomic.patch38
-rw-r--r--features/rt/notifier-Make-atomic_notifiers-use-raw_spinlock.patch131
-rw-r--r--features/rt/panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch33
-rw-r--r--features/rt/pid.h-include-atomic.h.patch42
-rw-r--r--features/rt/powerpc-Add-support-for-lazy-preemption.patch215
-rw-r--r--features/rt/powerpc-Avoid-recursive-header-includes.patch47
-rw-r--r--features/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch44
-rw-r--r--features/rt/powerpc-mm-Move-the-linear_mapping_mutex-to-the-ifde.patch43
-rw-r--r--features/rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch116
-rw-r--r--features/rt/powerpc-stackprotector-work-around-stack-guard-init-.patch35
-rw-r--r--features/rt/powerpc-traps-Use-PREEMPT_RT.patch38
-rw-r--r--features/rt/preempt-Provide-preempt_-_-no-rt-variants.patch52
-rw-r--r--features/rt/printk-add-console-handover.patch75
-rw-r--r--features/rt/printk-add-pr_flush.patch210
-rw-r--r--features/rt/printk-add-syslog_lock.patch158
-rw-r--r--features/rt/printk-change-console_seq-to-atomic64_t.patch131
-rw-r--r--features/rt/printk-combine-boot_delay_msec-into-printk_delay.patch43
-rw-r--r--features/rt/printk-console-remove-unnecessary-safe-buffer-usage.patch47
-rw-r--r--features/rt/printk-consolidate-kmsg_dump_get_buffer-syslog_print.patch146
-rw-r--r--features/rt/printk-convert-syslog_lock-to-spin_lock.patch118
-rw-r--r--features/rt/printk-introduce-CONSOLE_LOG_MAX-for-improved-multi-.patch94
-rw-r--r--features/rt/printk-introduce-a-kmsg_dump-iterator.patch560
-rw-r--r--features/rt/printk-introduce-kernel-sync-mode.patch308
-rw-r--r--features/rt/printk-kmsg_dump-remove-_nolock-variants.patch225
-rw-r--r--features/rt/printk-kmsg_dump-remove-unused-fields.patch42
-rw-r--r--features/rt/printk-kmsg_dump-use-kmsg_dump_rewind.patch41
-rw-r--r--features/rt/printk-limit-second-loop-of-syslog_print_all.patch55
-rw-r--r--features/rt/printk-move-console-printing-to-kthreads.patch846
-rw-r--r--features/rt/printk-refactor-kmsg_dump_get_buffer.patch144
-rw-r--r--features/rt/printk-relocate-printk_delay-and-vprintk_default.patch88
-rw-r--r--features/rt/printk-remove-deferred-printing.patch432
-rw-r--r--features/rt/printk-remove-logbuf_lock.patch485
-rw-r--r--features/rt/printk-remove-safe-buffers.patch875
-rw-r--r--features/rt/printk-track-limit-recursion.patch142
-rw-r--r--features/rt/printk-use-atomic64_t-for-devkmsg_user.seq.patch111
-rw-r--r--features/rt/printk-use-seqcount_latch-for-clear_seq.patch146
-rw-r--r--features/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch165
-rw-r--r--features/rt/ptrace-fix-ptrace_unfreeze_traced-race-with-rt-lock.patch64
-rw-r--r--features/rt/random-Make-it-work-on-rt.patch185
-rw-r--r--features/rt/rcu-Delay-RCU-selftests.patch75
-rw-r--r--features/rt/rcu-Prevent-false-positive-softirq-warning-on-RT.patch34
-rw-r--r--features/rt/rcutorture-Avoid-problematic-critical-section-nestin.patch195
-rw-r--r--features/rt/rt-Add-local-irq-locks.patch210
-rw-r--r--features/rt/rt-Introduce-cpu_chill.patch121
-rw-r--r--features/rt/rt.scc191
-rw-r--r--features/rt/sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch115
-rw-r--r--features/rt/sched-Add-support-for-lazy-preemption.patch690
-rw-r--r--features/rt/sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch34
-rw-r--r--features/rt/sched-Disable-TTWU_QUEUE-on-RT.patch37
-rw-r--r--features/rt/sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch56
-rw-r--r--features/rt/sched-Limit-the-number-of-task-migrations-per-batch.patch32
-rw-r--r--features/rt/sched-Move-mmdrop-to-RCU-on-RT.patch114
-rw-r--r--features/rt/scsi-fcoe-Make-RT-aware.patch115
-rw-r--r--features/rt/serial-8250-implement-write_atomic.patch499
-rw-r--r--features/rt/shmem-Use-raw_spinlock_t-for-stat_lock.patch146
-rw-r--r--features/rt/signal-Revert-ptrace-preempt-magic.patch38
-rw-r--r--features/rt/signal-x86-Delay-calling-signals-in-atomic.patch139
-rw-r--r--features/rt/signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch216
-rw-r--r--features/rt/smp-Wake-ksoftirqd-on-PREEMPT_RT-instead-do_softirq.patch47
-rw-r--r--features/rt/softirq-Add-RT-specific-softirq-accounting.patch74
-rw-r--r--features/rt/softirq-Check-preemption-after-reenabling-interrupts.patch150
-rw-r--r--features/rt/softirq-Disable-softirq-stacks-for-RT.patch174
-rw-r--r--features/rt/softirq-Make-softirq-control-and-processing-RT-aware.patch266
-rw-r--r--features/rt/softirq-Move-various-protections-into-inline-helpers.patch107
-rw-r--r--features/rt/sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch59
-rw-r--r--features/rt/sysfs-Add-sys-kernel-realtime-entry.patch53
-rw-r--r--features/rt/tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch108
-rw-r--r--features/rt/tasklets-Provide-tasklet_disable_in_atomic.patch67
-rw-r--r--features/rt/tasklets-Replace-barrier-with-cpu_relax-in-tasklet_u.patch34
-rw-r--r--features/rt/tasklets-Replace-spin-wait-in-tasklet_kill.patch73
-rw-r--r--features/rt/tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch89
-rw-r--r--features/rt/tasklets-Switch-tasklet_disable-to-the-sleep-wait-va.patch34
-rw-r--r--features/rt/tasklets-Use-spin-wait-in-tasklet_disable-temporaril.patch32
-rw-r--r--features/rt/tasklets-Use-static-inlines-for-stub-implementations.patch34
-rw-r--r--features/rt/tcp-Remove-superfluous-BH-disable-around-listening_h.patch108
-rw-r--r--features/rt/tick-sched-Prevent-false-positive-softirq-pending-wa.patch83
-rw-r--r--features/rt/timers-Move-clearing-of-base-timer_running-under-bas.patch62
-rw-r--r--features/rt/tpm_tis-fix-stall-after-iowrite-s.patch83
-rw-r--r--features/rt/trace-Add-migrate-disabled-counter-to-tracing-output.patch122
-rw-r--r--features/rt/tty-serial-omap-Make-the-locking-RT-aware.patch48
-rw-r--r--features/rt/tty-serial-pl011-Make-the-locking-work-on-RT.patch59
-rw-r--r--features/rt/u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch151
-rw-r--r--features/rt/um-synchronize-kmsg_dumper.patch60
-rw-r--r--features/rt/wait.h-include-atomic.h.patch41
-rw-r--r--features/rt/x86-Allow-to-enable-RT.patch27
-rw-r--r--features/rt/x86-Enable-RT-also-on-32bit.patch33
-rw-r--r--features/rt/x86-Support-for-lazy-preemption.patch155
-rw-r--r--features/rt/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch34
-rw-r--r--features/rt/x86-kvm-Require-const-tsc-for-RT.patch37
-rw-r--r--features/rt/x86-stackprotector-Avoid-random-pool-on-rt.patch50
-rw-r--r--features/rt/xfrm-Use-sequence-counter-with-associated-spinlock.patch65
192 files changed, 25158 insertions, 0 deletions
diff --git a/features/rt/ARM-Allow-to-enable-RT.patch b/features/rt/ARM-Allow-to-enable-RT.patch
new file mode 100644
index 00000000..cf78c0cb
--- /dev/null
+++ b/features/rt/ARM-Allow-to-enable-RT.patch
@@ -0,0 +1,35 @@
+From b543f40eada157accc05e9e419a6ffc0168647d6 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 11 Oct 2019 13:14:29 +0200
+Subject: [PATCH 178/191] ARM: Allow to enable RT
+
+Allow to select RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index fe49c7cb76bd..876e4d478e57 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -31,6 +31,7 @@ config ARM
+ select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
+ select ARCH_SUPPORTS_ATOMIC_RMW
++ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
+@@ -123,6 +124,7 @@ config ARM
+ select OLD_SIGSUSPEND3
+ select PCI_SYSCALL if PCI
+ select PERF_USE_VMALLOC
++ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
+ select RTC_LIB
+ select SET_FS
+ select SYS_SUPPORTS_APM_EMULATION
+--
+2.19.1
+
diff --git a/features/rt/ARM-enable-irq-in-translation-section-permission-fau.patch b/features/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
new file mode 100644
index 00000000..47dd0be4
--- /dev/null
+++ b/features/rt/ARM-enable-irq-in-translation-section-permission-fau.patch
@@ -0,0 +1,94 @@
+From ced2a927997124c1cceecd5da04917ad03992490 Mon Sep 17 00:00:00 2001
+From: "Yadi.hu" <yadi.hu@windriver.com>
+Date: Wed, 10 Dec 2014 10:32:09 +0800
+Subject: [PATCH 173/191] ARM: enable irq in translation/section permission
+ fault handlers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Probably happens on all ARM, with
+CONFIG_PREEMPT_RT
+CONFIG_DEBUG_ATOMIC_SLEEP
+
+This simple program....
+
+int main() {
+ *((char*)0xc0001000) = 0;
+};
+
+[ 512.742724] BUG: sleeping function called from invalid context at kernel/rtmutex.c:658
+[ 512.743000] in_atomic(): 0, irqs_disabled(): 128, pid: 994, name: a
+[ 512.743217] INFO: lockdep is turned off.
+[ 512.743360] irq event stamp: 0
+[ 512.743482] hardirqs last enabled at (0): [< (null)>] (null)
+[ 512.743714] hardirqs last disabled at (0): [<c0426370>] copy_process+0x3b0/0x11c0
+[ 512.744013] softirqs last enabled at (0): [<c0426370>] copy_process+0x3b0/0x11c0
+[ 512.744303] softirqs last disabled at (0): [< (null)>] (null)
+[ 512.744631] [<c041872c>] (unwind_backtrace+0x0/0x104)
+[ 512.745001] [<c09af0c4>] (dump_stack+0x20/0x24)
+[ 512.745355] [<c0462490>] (__might_sleep+0x1dc/0x1e0)
+[ 512.745717] [<c09b6770>] (rt_spin_lock+0x34/0x6c)
+[ 512.746073] [<c0441bf0>] (do_force_sig_info+0x34/0xf0)
+[ 512.746457] [<c0442668>] (force_sig_info+0x18/0x1c)
+[ 512.746829] [<c041d880>] (__do_user_fault+0x9c/0xd8)
+[ 512.747185] [<c041d938>] (do_bad_area+0x7c/0x94)
+[ 512.747536] [<c041d990>] (do_sect_fault+0x40/0x48)
+[ 512.747898] [<c040841c>] (do_DataAbort+0x40/0xa0)
+[ 512.748181] Exception stack(0xecaa1fb0 to 0xecaa1ff8)
+
+Oxc0000000 belongs to kernel address space, user task can not be
+allowed to access it. For above condition, correct result is that
+test case should receive a “segment fault” and exits but not stacks.
+
+the root cause is commit 02fe2845d6a8 ("avoid enabling interrupts in
+prefetch/data abort handlers"),it deletes irq enable block in Data
+abort assemble code and move them into page/breakpiont/alignment fault
+handlers instead. But author does not enable irq in translation/section
+permission fault handlers. ARM disables irq when it enters exception/
+interrupt mode, if kernel doesn't enable irq, it would be still disabled
+during translation/section permission fault.
+
+We see the above splat because do_force_sig_info is still called with
+IRQs off, and that code eventually does a:
+
+ spin_lock_irqsave(&t->sighand->siglock, flags);
+
+As this is architecture independent code, and we've not seen any other
+need for other arch to have the siglock converted to raw lock, we can
+conclude that we should enable irq for ARM translation/section
+permission exception.
+
+Signed-off-by: Yadi.hu <yadi.hu@windriver.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/mm/fault.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index da78740faf7b..07f79e533a29 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -400,6 +400,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+ if (addr < TASK_SIZE)
+ return do_page_fault(addr, fsr, regs);
+
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ if (user_mode(regs))
+ goto bad_area;
+
+@@ -479,6 +482,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
+ static int
+ do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+ {
++ if (interrupts_enabled(regs))
++ local_irq_enable();
++
+ do_bad_area(addr, fsr, regs);
+ return 0;
+ }
+--
+2.19.1
+
diff --git a/features/rt/ARM64-Allow-to-enable-RT.patch b/features/rt/ARM64-Allow-to-enable-RT.patch
new file mode 100644
index 00000000..3f265dc0
--- /dev/null
+++ b/features/rt/ARM64-Allow-to-enable-RT.patch
@@ -0,0 +1,35 @@
+From 49cc082d1b5e913dd79529b9fbed93801c11af68 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 11 Oct 2019 13:14:35 +0200
+Subject: [PATCH 179/191] ARM64: Allow to enable RT
+
+Allow to select RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 93a4dd767825..9cc92459812c 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -78,6 +78,7 @@ config ARM64
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
+ select ARCH_SUPPORTS_NUMA_BALANCING
++ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
+ select ARCH_WANT_DEFAULT_BPF_JIT
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+@@ -203,6 +204,7 @@ config ARM64
+ select PCI_DOMAINS_GENERIC if PCI
+ select PCI_ECAM if (ACPI && PCI)
+ select PCI_SYSCALL if PCI
++ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
+ select POWER_RESET
+ select POWER_SUPPLY
+ select SPARSE_IRQ
+--
+2.19.1
+
diff --git a/features/rt/Add-localversion-for-RT-release.patch b/features/rt/Add-localversion-for-RT-release.patch
new file mode 100644
index 00000000..f18bce5a
--- /dev/null
+++ b/features/rt/Add-localversion-for-RT-release.patch
@@ -0,0 +1,21 @@
+From 98e2d66a6a93107c8552dfa1023d83c9f757dfd5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 8 Jul 2011 20:25:16 +0200
+Subject: [PATCH 191/191] Add localversion for -RT release
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ localversion-rt | 1 +
+ 1 file changed, 1 insertion(+)
+ create mode 100644 localversion-rt
+
+diff --git a/localversion-rt b/localversion-rt
+new file mode 100644
+index 000000000000..6f206be67cd2
+--- /dev/null
++++ b/localversion-rt
+@@ -0,0 +1 @@
++-rt1
+--
+2.19.1
+
diff --git a/features/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/features/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
new file mode 100644
index 00000000..b42375b5
--- /dev/null
+++ b/features/rt/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -0,0 +1,58 @@
+From 67396da80e5247521bb3a295c0a17ea970b143d6 Mon Sep 17 00:00:00 2001
+From: Josh Cartwright <joshc@ni.com>
+Date: Thu, 11 Feb 2016 11:54:01 -0600
+Subject: [PATCH 175/191] KVM: arm/arm64: downgrade preempt_disable()d region
+ to migrate_disable()
+
+kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating
+the vgic and timer states to prevent the calling task from migrating to
+another CPU. It does so to prevent the task from writing to the
+incorrect per-CPU GIC distributor registers.
+
+On -rt kernels, it's possible to maintain the same guarantee with the
+use of migrate_{disable,enable}(), with the added benefit that the
+migrate-disabled region is preemptible. Update
+kvm_arch_vcpu_ioctl_run() to do so.
+
+Cc: Christoffer Dall <christoffer.dall@linaro.org>
+Reported-by: Manish Jaggi <Manish.Jaggi@caviumnetworks.com>
+Signed-off-by: Josh Cartwright <joshc@ni.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/kvm/arm.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 7f06ba76698d..f9318256d6fb 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -737,7 +737,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ * involves poking the GIC, which must be done in a
+ * non-preemptible context.
+ */
+- preempt_disable();
++ migrate_disable();
+
+ kvm_pmu_flush_hwstate(vcpu);
+
+@@ -786,7 +786,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ kvm_timer_sync_user(vcpu);
+ kvm_vgic_sync_hwstate(vcpu);
+ local_irq_enable();
+- preempt_enable();
++ migrate_enable();
+ continue;
+ }
+
+@@ -858,7 +858,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ /* Exit types that need handling before we can be preempted */
+ handle_exit_early(vcpu, ret);
+
+- preempt_enable();
++ migrate_enable();
+
+ /*
+ * The ARMv8 architecture doesn't give the hypervisor
+--
+2.19.1
+
diff --git a/features/rt/PCI-hv-Use-tasklet_disable_in_atomic.patch b/features/rt/PCI-hv-Use-tasklet_disable_in_atomic.patch
new file mode 100644
index 00000000..497e17e0
--- /dev/null
+++ b/features/rt/PCI-hv-Use-tasklet_disable_in_atomic.patch
@@ -0,0 +1,45 @@
+From 9fc84c65954200bfb07bc299f48f8fed7905ba92 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:15 +0100
+Subject: [PATCH 052/191] PCI: hv: Use tasklet_disable_in_atomic()
+
+The hv_compose_msi_msg() callback in irq_chip::irq_compose_msi_msg is
+invoked via irq_chip_compose_msi_msg(), which itself is always invoked from
+atomic contexts from the guts of the interrupt core code.
+
+There is no way to change this w/o rewriting the whole driver, so use
+tasklet_disable_in_atomic() which allows to make tasklet_disable()
+sleepable once the remaining atomic users are addressed.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "K. Y. Srinivasan" <kys@microsoft.com>
+Cc: Haiyang Zhang <haiyangz@microsoft.com>
+Cc: Stephen Hemminger <sthemmin@microsoft.com>
+Cc: Wei Liu <wei.liu@kernel.org>
+Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Cc: Rob Herring <robh@kernel.org>
+Cc: Bjorn Helgaas <bhelgaas@google.com>
+Cc: linux-hyperv@vger.kernel.org
+Cc: linux-pci@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/pci/controller/pci-hyperv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
+index 27a17a1e4a7c..a313708bcf75 100644
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -1458,7 +1458,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+ * Prevents hv_pci_onchannelcallback() from running concurrently
+ * in the tasklet.
+ */
+- tasklet_disable(&channel->callback_event);
++ tasklet_disable_in_atomic(&channel->callback_event);
+
+ /*
+ * Since this function is called with IRQ locks held, can't
+--
+2.19.1
+
diff --git a/features/rt/POWERPC-Allow-to-enable-RT.patch b/features/rt/POWERPC-Allow-to-enable-RT.patch
new file mode 100644
index 00000000..b0222f0a
--- /dev/null
+++ b/features/rt/POWERPC-Allow-to-enable-RT.patch
@@ -0,0 +1,35 @@
+From 76fabe2ab5dd6e98cc51f7289944552db0f5480e Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 11 Oct 2019 13:14:41 +0200
+Subject: [PATCH 185/191] POWERPC: Allow to enable RT
+
+Allow to select RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index bbee9b2f2bc7..a4fe480b4e9d 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -147,6 +147,7 @@ config PPC
+ select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC32 || PPC_BOOK3S_64
++ select ARCH_SUPPORTS_RT if HAVE_POSIX_CPU_TIMERS_TASK_WORK
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+ select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS
+@@ -240,6 +241,7 @@ config PPC
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_VIRT_CPU_ACCOUNTING
+ select HAVE_IRQ_TIME_ACCOUNTING
++ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
+ select HAVE_RSEQ
+ select IOMMU_HELPER if PPC64
+ select IRQ_DOMAIN
+--
+2.19.1
+
diff --git a/features/rt/arch-arm64-Add-lazy-preempt-support.patch b/features/rt/arch-arm64-Add-lazy-preempt-support.patch
new file mode 100644
index 00000000..cc7a56d7
--- /dev/null
+++ b/features/rt/arch-arm64-Add-lazy-preempt-support.patch
@@ -0,0 +1,170 @@
+From f4aa6daf2286c2e06200d7909bb812c7ceb09e69 Mon Sep 17 00:00:00 2001
+From: Anders Roxell <anders.roxell@linaro.org>
+Date: Thu, 14 May 2015 17:52:17 +0200
+Subject: [PATCH 168/191] arch/arm64: Add lazy preempt support
+
+arm64 is missing support for PREEMPT_RT. The main feature which is
+lacking is support for lazy preemption. The arch-specific entry code,
+thread information structure definitions, and associated data tables
+have to be extended to provide this support. Then the Kconfig file has
+to be extended to indicate the support is available, and also to
+indicate that support for full RT preemption is now available.
+
+Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
+---
+ arch/arm64/Kconfig | 1 +
+ arch/arm64/include/asm/preempt.h | 25 ++++++++++++++++++++++++-
+ arch/arm64/include/asm/thread_info.h | 8 +++++++-
+ arch/arm64/kernel/asm-offsets.c | 1 +
+ arch/arm64/kernel/entry.S | 13 +++++++++++--
+ arch/arm64/kernel/signal.c | 2 +-
+ 6 files changed, 45 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 5656e7aacd69..93a4dd767825 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -182,6 +182,7 @@ config ARM64
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_REGS_AND_STACK_ACCESS_API
++ select HAVE_PREEMPT_LAZY
+ select HAVE_FUNCTION_ARG_ACCESS_API
+ select HAVE_FUTEX_CMPXCHG if FUTEX
+ select MMU_GATHER_RCU_TABLE_FREE
+diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
+index f06a23898540..994f997b1572 100644
+--- a/arch/arm64/include/asm/preempt.h
++++ b/arch/arm64/include/asm/preempt.h
+@@ -70,13 +70,36 @@ static inline bool __preempt_count_dec_and_test(void)
+ * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
+ * pair.
+ */
+- return !pc || !READ_ONCE(ti->preempt_count);
++ if (!pc || !READ_ONCE(ti->preempt_count))
++ return true;
++#ifdef CONFIG_PREEMPT_LAZY
++ if ((pc & ~PREEMPT_NEED_RESCHED))
++ return false;
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
++ return false;
++#endif
+ }
+
+ static inline bool should_resched(int preempt_offset)
+ {
++#ifdef CONFIG_PREEMPT_LAZY
++ u64 pc = READ_ONCE(current_thread_info()->preempt_count);
++ if (pc == preempt_offset)
++ return true;
++
++ if ((pc & ~PREEMPT_NEED_RESCHED) != preempt_offset)
++ return false;
++
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
+ u64 pc = READ_ONCE(current_thread_info()->preempt_count);
+ return pc == preempt_offset;
++#endif
+ }
+
+ #ifdef CONFIG_PREEMPTION
+diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
+index 9f4e3b266f21..d3fa570c7235 100644
+--- a/arch/arm64/include/asm/thread_info.h
++++ b/arch/arm64/include/asm/thread_info.h
+@@ -26,6 +26,7 @@ struct thread_info {
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ u64 ttbr0; /* saved TTBR0_EL1 */
+ #endif
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ union {
+ u64 preempt_count; /* 0 => preemptible, <0 => bug */
+ struct {
+@@ -65,6 +66,7 @@ void arch_release_task_struct(struct task_struct *tsk);
+ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
+ #define TIF_MTE_ASYNC_FAULT 5 /* MTE Asynchronous Tag Check Fault */
+ #define TIF_NOTIFY_SIGNAL 6 /* signal notifications exist */
++#define TIF_NEED_RESCHED_LAZY 7
+ #define TIF_SYSCALL_TRACE 8 /* syscall trace active */
+ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
+ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
+@@ -95,8 +97,10 @@ void arch_release_task_struct(struct task_struct *tsk);
+ #define _TIF_SVE (1 << TIF_SVE)
+ #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT)
+ #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+
+-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
++#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
++ _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \
+ _TIF_NOTIFY_SIGNAL)
+@@ -105,6 +109,8 @@ void arch_release_task_struct(struct task_struct *tsk);
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+ _TIF_SYSCALL_EMU)
+
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
++
+ #ifdef CONFIG_SHADOW_CALL_STACK
+ #define INIT_SCS \
+ .scs_base = init_shadow_call_stack, \
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index a36e2fc330d4..b94354a3af96 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -30,6 +30,7 @@ int main(void)
+ BLANK();
+ DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+ DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
++ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count));
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
+ #endif
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index a31a0a713c85..63e1ad8c2ea8 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -678,9 +678,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ mrs x0, daif
+ orr x24, x24, x0
+ alternative_else_nop_endif
+- cbnz x24, 1f // preempt count != 0 || NMI return path
+- bl arm64_preempt_schedule_irq // irq en/disable is done inside
++
++ cbz x24, 1f // (need_resched + count) == 0
++ cbnz w24, 2f // count != 0
++
++ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
++ cbnz w24, 2f // preempt lazy count != 0
++
++ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
+ 1:
++ bl arm64_preempt_schedule_irq // irq en/disable is done inside
++2:
+ #endif
+
+ mov x0, sp
+diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
+index 6237486ff6bb..ab411f336c39 100644
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -915,7 +915,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
+ unsigned long thread_flags)
+ {
+ do {
+- if (thread_flags & _TIF_NEED_RESCHED) {
++ if (thread_flags & _TIF_NEED_RESCHED_MASK) {
+ /* Unmask Debug and SError for the next task */
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+
+--
+2.19.1
+
diff --git a/features/rt/arm-Add-support-for-lazy-preemption.patch b/features/rt/arm-Add-support-for-lazy-preemption.patch
new file mode 100644
index 00000000..b6e2718c
--- /dev/null
+++ b/features/rt/arm-Add-support-for-lazy-preemption.patch
@@ -0,0 +1,136 @@
+From 71f7a0c901c01218f119d2e16cb22e1069a61900 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 31 Oct 2012 12:04:11 +0100
+Subject: [PATCH 166/191] arm: Add support for lazy preemption
+
+Implement the arm pieces for lazy preempt.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/arm/Kconfig | 1 +
+ arch/arm/include/asm/thread_info.h | 6 +++++-
+ arch/arm/kernel/asm-offsets.c | 1 +
+ arch/arm/kernel/entry-armv.S | 19 ++++++++++++++++---
+ arch/arm/kernel/signal.c | 3 ++-
+ 5 files changed, 25 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 5da96f5df48f..5179eb3a35d5 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -107,6 +107,7 @@ config ARM
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
+diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
+index 70d4cbc49ae1..b86418b4dfef 100644
+--- a/arch/arm/include/asm/thread_info.h
++++ b/arch/arm/include/asm/thread_info.h
+@@ -54,6 +54,7 @@ struct cpu_context_save {
+ struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => bug */
++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ __u32 cpu; /* cpu */
+@@ -146,6 +147,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
+ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+ #define TIF_SECCOMP 7 /* seccomp syscall filtering active */
+ #define TIF_NOTIFY_SIGNAL 8 /* signal notifications exist */
++#define TIF_NEED_RESCHED_LAZY 9
+
+ #define TIF_USING_IWMMXT 17
+ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
+@@ -160,6 +162,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
+ #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+ #define _TIF_SECCOMP (1 << TIF_SECCOMP)
+ #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
+
+ /* Checks for any syscall work in entry-common.S */
+@@ -169,7 +172,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
+ /*
+ * Change these and you break ASM code in entry-common.S
+ */
+-#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
++#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
++ _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_NOTIFY_SIGNAL)
+
+diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
+index be8050b0c3df..884e40a525ce 100644
+--- a/arch/arm/kernel/asm-offsets.c
++++ b/arch/arm/kernel/asm-offsets.c
+@@ -42,6 +42,7 @@ int main(void)
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
+index 0ea8529a4872..fa0d155d21b3 100644
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -206,11 +206,18 @@ __irq_svc:
+
+ #ifdef CONFIG_PREEMPTION
+ ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
+- ldr r0, [tsk, #TI_FLAGS] @ get flags
+ teq r8, #0 @ if preempt count != 0
++ bne 1f @ return from exeption
++ ldr r0, [tsk, #TI_FLAGS] @ get flags
++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set
++ blne svc_preempt @ preempt!
++
++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r8, #0 @ if preempt lazy count != 0
+ movne r0, #0 @ force flags to 0
+- tst r0, #_TIF_NEED_RESCHED
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ blne svc_preempt
++1:
+ #endif
+
+ svc_exit r5, irq = 1 @ return from exception
+@@ -225,8 +232,14 @@ svc_preempt:
+ 1: bl preempt_schedule_irq @ irq en/disable is done inside
+ ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
+ tst r0, #_TIF_NEED_RESCHED
++ bne 1b
++ tst r0, #_TIF_NEED_RESCHED_LAZY
+ reteq r8 @ go again
+- b 1b
++ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count
++ teq r0, #0 @ if preempt lazy count != 0
++ beq 1b
++ ret r8 @ go again
++
+ #endif
+
+ __und_fault:
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index a3a38d0a4c85..f04ccf19ab1f 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -649,7 +649,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
+ */
+ trace_hardirqs_off();
+ do {
+- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
++ if (likely(thread_flags & (_TIF_NEED_RESCHED |
++ _TIF_NEED_RESCHED_LAZY))) {
+ schedule();
+ } else {
+ if (unlikely(!user_mode(regs)))
+--
+2.19.1
+
diff --git a/features/rt/arm64-fpsimd-Delay-freeing-memory-in-fpsimd_flush_th.patch b/features/rt/arm64-fpsimd-Delay-freeing-memory-in-fpsimd_flush_th.patch
new file mode 100644
index 00000000..75e9f204
--- /dev/null
+++ b/features/rt/arm64-fpsimd-Delay-freeing-memory-in-fpsimd_flush_th.patch
@@ -0,0 +1,65 @@
+From c17705510939368390130106b37160bb2ff08e1c Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 25 Jul 2018 14:02:38 +0200
+Subject: [PATCH 176/191] arm64: fpsimd: Delay freeing memory in
+ fpsimd_flush_thread()
+
+fpsimd_flush_thread() invokes kfree() via sve_free() within a preempt disabled
+section which is not working on -RT.
+
+Delay freeing of memory until preemption is enabled again.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/kernel/fpsimd.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 062b21f30f94..0ea2df6554e5 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -226,6 +226,16 @@ static void sve_free(struct task_struct *task)
+ __sve_free(task);
+ }
+
++static void *sve_free_atomic(struct task_struct *task)
++{
++ void *sve_state = task->thread.sve_state;
++
++ WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
++
++ task->thread.sve_state = NULL;
++ return sve_state;
++}
++
+ /*
+ * TIF_SVE controls whether a task can use SVE without trapping while
+ * in userspace, and also the way a task's FPSIMD/SVE state is stored
+@@ -1022,6 +1032,7 @@ void fpsimd_thread_switch(struct task_struct *next)
+ void fpsimd_flush_thread(void)
+ {
+ int vl, supported_vl;
++ void *mem = NULL;
+
+ if (!system_supports_fpsimd())
+ return;
+@@ -1034,7 +1045,7 @@ void fpsimd_flush_thread(void)
+
+ if (system_supports_sve()) {
+ clear_thread_flag(TIF_SVE);
+- sve_free(current);
++ mem = sve_free_atomic(current);
+
+ /*
+ * Reset the task vector length as required.
+@@ -1068,6 +1079,7 @@ void fpsimd_flush_thread(void)
+ }
+
+ put_cpu_fpsimd_context();
++ kfree(mem);
+ }
+
+ /*
+--
+2.19.1
+
diff --git a/features/rt/ath9k-Use-tasklet_disable_in_atomic.patch b/features/rt/ath9k-Use-tasklet_disable_in_atomic.patch
new file mode 100644
index 00000000..d14560b6
--- /dev/null
+++ b/features/rt/ath9k-Use-tasklet_disable_in_atomic.patch
@@ -0,0 +1,47 @@
+From 0fdf02e085f667a14612fe8555ffffcdc7b3044e Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:13 +0100
+Subject: [PATCH 050/191] ath9k: Use tasklet_disable_in_atomic()
+
+All callers of ath9k_beacon_ensure_primary_slot() are preemptible /
+acquire a mutex except for this callchain:
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+ ath_complete_reset()
+ -> ath9k_calculate_summary_state()
+ -> ath9k_beacon_ensure_primary_slot()
+
+It's unclear how that can be distangled, so use tasklet_disable_in_atomic()
+for now. This allows tasklet_disable() to become sleepable once the
+remaining atomic users are cleaned up.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: ath9k-devel@qca.qualcomm.com
+Cc: Kalle Valo <kvalo@codeaurora.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Jakub Kicinski <kuba@kernel.org>
+Cc: linux-wireless@vger.kernel.org
+Cc: netdev@vger.kernel.org
+Acked-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/wireless/ath/ath9k/beacon.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
+index 71e2ada86793..72e2e71aac0e 100644
+--- a/drivers/net/wireless/ath/ath9k/beacon.c
++++ b/drivers/net/wireless/ath/ath9k/beacon.c
+@@ -251,7 +251,7 @@ void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc)
+ int first_slot = ATH_BCBUF;
+ int slot;
+
+- tasklet_disable(&sc->bcon_tasklet);
++ tasklet_disable_in_atomic(&sc->bcon_tasklet);
+
+ /* Find first taken slot. */
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+--
+2.19.1
+
diff --git a/features/rt/atm-eni-Use-tasklet_disable_in_atomic-in-the-send-ca.patch b/features/rt/atm-eni-Use-tasklet_disable_in_atomic-in-the-send-ca.patch
new file mode 100644
index 00000000..475b8b02
--- /dev/null
+++ b/features/rt/atm-eni-Use-tasklet_disable_in_atomic-in-the-send-ca.patch
@@ -0,0 +1,41 @@
+From 3a6d1acf6c1ff722cb9479fba99a7ea0c6c50ee7 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:14 +0100
+Subject: [PATCH 051/191] atm: eni: Use tasklet_disable_in_atomic() in the
+ send() callback
+
+The atmdev_ops::send callback which calls tasklet_disable() is invoked with
+bottom halfs disabled from net_device_ops::ndo_start_xmit(). All other
+invocations of tasklet_disable() in this driver happen in preemptible
+context.
+
+Change the send() call to use tasklet_disable_in_atomic() which allows
+tasklet_disable() to be made sleepable once the remaining atomic context
+usage sites are cleaned up.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Chas Williams <3chas3@gmail.com>
+Cc: linux-atm-general@lists.sourceforge.net
+Cc: netdev@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/atm/eni.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
+index b574cce98dc3..422753d52244 100644
+--- a/drivers/atm/eni.c
++++ b/drivers/atm/eni.c
+@@ -2054,7 +2054,7 @@ static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb)
+ }
+ submitted++;
+ ATM_SKB(skb)->vcc = vcc;
+- tasklet_disable(&ENI_DEV(vcc->dev)->task);
++ tasklet_disable_in_atomic(&ENI_DEV(vcc->dev)->task);
+ res = do_tx(skb);
+ tasklet_enable(&ENI_DEV(vcc->dev)->task);
+ if (res == enq_ok) return 0;
+--
+2.19.1
+
diff --git a/features/rt/block-mq-do-not-invoke-preempt_disable.patch b/features/rt/block-mq-do-not-invoke-preempt_disable.patch
new file mode 100644
index 00000000..c869f8a1
--- /dev/null
+++ b/features/rt/block-mq-do-not-invoke-preempt_disable.patch
@@ -0,0 +1,39 @@
+From daef692486de5a4273e1fdff8121fca46992bc5b Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 14 Jul 2015 14:26:34 +0200
+Subject: [PATCH 134/191] block/mq: do not invoke preempt_disable()
+
+preempt_disable() and get_cpu() don't play well together with the sleeping
+locks it tries to allocate later.
+It seems to be enough to replace it with get_cpu_light() and migrate_disable().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ block/blk-mq.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index d4d7c1caa439..2003865f725b 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1560,14 +1560,14 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
+ return;
+
+ if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
+- int cpu = get_cpu();
++ int cpu = get_cpu_light();
+ if (cpumask_test_cpu(cpu, hctx->cpumask)) {
+ __blk_mq_run_hw_queue(hctx);
+- put_cpu();
++ put_cpu_light();
+ return;
+ }
+
+- put_cpu();
++ put_cpu_light();
+ }
+
+ kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
+--
+2.19.1
+
diff --git a/features/rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/features/rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
new file mode 100644
index 00000000..a215e848
--- /dev/null
+++ b/features/rt/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
@@ -0,0 +1,49 @@
+From 79683d22afc9a06abcf0b901d55a36b471da6a43 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 3 Jul 2018 18:19:48 +0200
+Subject: [PATCH 035/191] cgroup: use irqsave in cgroup_rstat_flush_locked()
+
+All callers of cgroup_rstat_flush_locked() acquire cgroup_rstat_lock
+either with spin_lock_irq() or spin_lock_irqsave().
+cgroup_rstat_flush_locked() itself acquires cgroup_rstat_cpu_lock which
+is a raw_spin_lock. This lock is also acquired in cgroup_rstat_updated()
+in IRQ context and therefore requires _irqsave() locking suffix in
+cgroup_rstat_flush_locked().
+Since there is no difference between spin_lock_t and raw_spin_lock_t
+on !RT lockdep does not complain here. On RT lockdep complains because
+the interrupts were not disabled here and a deadlock is possible.
+
+Acquire the raw_spin_lock_t with disabled interrupts.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/cgroup/rstat.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
+index d51175cedfca..b424f3157b34 100644
+--- a/kernel/cgroup/rstat.c
++++ b/kernel/cgroup/rstat.c
+@@ -149,8 +149,9 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
+ raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
+ cpu);
+ struct cgroup *pos = NULL;
++ unsigned long flags;
+
+- raw_spin_lock(cpu_lock);
++ raw_spin_lock_irqsave(cpu_lock, flags);
+ while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
+ struct cgroup_subsys_state *css;
+
+@@ -162,7 +163,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
+ css->ss->css_rstat_flush(css, cpu);
+ rcu_read_unlock();
+ }
+- raw_spin_unlock(cpu_lock);
++ raw_spin_unlock_irqrestore(cpu_lock, flags);
+
+ /* if @may_sleep, play nice and yield if necessary */
+ if (may_sleep && (need_resched() ||
+--
+2.19.1
+
diff --git a/features/rt/console-add-write_atomic-interface.patch b/features/rt/console-add-write_atomic-interface.patch
new file mode 100644
index 00000000..3495b552
--- /dev/null
+++ b/features/rt/console-add-write_atomic-interface.patch
@@ -0,0 +1,162 @@
+From 92323b9fa8b3aad4e8ba5e08e23fdf18e9ad1545 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:01 +0106
+Subject: [PATCH 024/191] console: add write_atomic interface
+
+Add a write_atomic() callback to the console. This is an optional
+function for console drivers. The function must be atomic (including
+NMI safe) for writing to the console.
+
+Console drivers must still implement the write() callback. The
+write_atomic() callback will only be used in special situations,
+such as when the kernel panics.
+
+Creating an NMI safe write_atomic() that must synchronize with
+write() requires a careful implementation of the console driver. To
+aid with the implementation, a set of console_atomic_*() functions
+are provided:
+
+ void console_atomic_lock(unsigned int *flags);
+ void console_atomic_unlock(unsigned int flags);
+
+These functions synchronize using a processor-reentrant spinlock
+(called a cpulock).
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 4 ++
+ kernel/printk/printk.c | 100 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 104 insertions(+)
+
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 20874db50bc8..919c8d43d986 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -140,6 +140,7 @@ static inline int con_debug_leave(void)
+ struct console {
+ char name[16];
+ void (*write)(struct console *, const char *, unsigned);
++ void (*write_atomic)(struct console *co, const char *s, unsigned int count);
+ int (*read)(struct console *, char *, unsigned);
+ struct tty_driver *(*device)(struct console *, int *);
+ void (*unblank)(void);
+@@ -229,4 +230,7 @@ extern void console_init(void);
+ void dummycon_register_output_notifier(struct notifier_block *nb);
+ void dummycon_unregister_output_notifier(struct notifier_block *nb);
+
++extern void console_atomic_lock(unsigned int *flags);
++extern void console_atomic_unlock(unsigned int flags);
++
+ #endif /* _LINUX_CONSOLE_H */
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 3e0b9bf28e12..5af6f757818f 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3546,3 +3546,103 @@ void kmsg_dump_rewind(struct kmsg_dumper_iter *iter)
+ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+
+ #endif
++
++struct prb_cpulock {
++ atomic_t owner;
++ unsigned long __percpu *irqflags;
++};
++
++#define DECLARE_STATIC_PRINTKRB_CPULOCK(name) \
++static DEFINE_PER_CPU(unsigned long, _##name##_percpu_irqflags); \
++static struct prb_cpulock name = { \
++ .owner = ATOMIC_INIT(-1), \
++ .irqflags = &_##name##_percpu_irqflags, \
++}
++
++static bool __prb_trylock(struct prb_cpulock *cpu_lock,
++ unsigned int *cpu_store)
++{
++ unsigned long *flags;
++ unsigned int cpu;
++
++ cpu = get_cpu();
++
++ *cpu_store = atomic_read(&cpu_lock->owner);
++ /* memory barrier to ensure the current lock owner is visible */
++ smp_rmb();
++ if (*cpu_store == -1) {
++ flags = per_cpu_ptr(cpu_lock->irqflags, cpu);
++ local_irq_save(*flags);
++ if (atomic_try_cmpxchg_acquire(&cpu_lock->owner,
++ cpu_store, cpu)) {
++ return true;
++ }
++ local_irq_restore(*flags);
++ } else if (*cpu_store == cpu) {
++ return true;
++ }
++
++ put_cpu();
++ return false;
++}
++
++/*
++ * prb_lock: Perform a processor-reentrant spin lock.
++ * @cpu_lock: A pointer to the lock object.
++ * @cpu_store: A "flags" pointer to store lock status information.
++ *
++ * If no processor has the lock, the calling processor takes the lock and
++ * becomes the owner. If the calling processor is already the owner of the
++ * lock, this function succeeds immediately. If lock is locked by another
++ * processor, this function spins until the calling processor becomes the
++ * owner.
++ *
++ * It is safe to call this function from any context and state.
++ */
++static void prb_lock(struct prb_cpulock *cpu_lock, unsigned int *cpu_store)
++{
++ for (;;) {
++ if (__prb_trylock(cpu_lock, cpu_store))
++ break;
++ cpu_relax();
++ }
++}
++
++/*
++ * prb_unlock: Perform a processor-reentrant spin unlock.
++ * @cpu_lock: A pointer to the lock object.
++ * @cpu_store: A "flags" object storing lock status information.
++ *
++ * Release the lock. The calling processor must be the owner of the lock.
++ *
++ * It is safe to call this function from any context and state.
++ */
++static void prb_unlock(struct prb_cpulock *cpu_lock, unsigned int cpu_store)
++{
++ unsigned long *flags;
++ unsigned int cpu;
++
++ cpu = atomic_read(&cpu_lock->owner);
++ atomic_set_release(&cpu_lock->owner, cpu_store);
++
++ if (cpu_store == -1) {
++ flags = per_cpu_ptr(cpu_lock->irqflags, cpu);
++ local_irq_restore(*flags);
++ }
++
++ put_cpu();
++}
++
++DECLARE_STATIC_PRINTKRB_CPULOCK(printk_cpulock);
++
++void console_atomic_lock(unsigned int *flags)
++{
++ prb_lock(&printk_cpulock, flags);
++}
++EXPORT_SYMBOL(console_atomic_lock);
++
++void console_atomic_unlock(unsigned int flags)
++{
++ prb_unlock(&printk_cpulock, flags);
++}
++EXPORT_SYMBOL(console_atomic_unlock);
+--
+2.19.1
+
diff --git a/features/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/features/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
new file mode 100644
index 00000000..115f4072
--- /dev/null
+++ b/features/rt/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
@@ -0,0 +1,328 @@
+From f2031242ce940802f8703826b47dfd2e1290f97c Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <efault@gmx.de>
+Date: Sun, 8 Jan 2017 09:32:25 +0100
+Subject: [PATCH 160/191] cpuset: Convert callback_lock to raw_spinlock_t
+
+The two commits below add up to a cpuset might_sleep() splat for RT:
+
+8447a0fee974 cpuset: convert callback_mutex to a spinlock
+344736f29b35 cpuset: simplify cpuset_node_allowed API
+
+BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:995
+in_atomic(): 0, irqs_disabled(): 1, pid: 11718, name: cset
+CPU: 135 PID: 11718 Comm: cset Tainted: G E 4.10.0-rt1-rt #4
+Hardware name: Intel Corporation BRICKLAND/BRICKLAND, BIOS BRHSXSD1.86B.0056.R01.1409242327 09/24/2014
+Call Trace:
+ ? dump_stack+0x5c/0x81
+ ? ___might_sleep+0xf4/0x170
+ ? rt_spin_lock+0x1c/0x50
+ ? __cpuset_node_allowed+0x66/0xc0
+ ? ___slab_alloc+0x390/0x570 <disables IRQs>
+ ? anon_vma_fork+0x8f/0x140
+ ? copy_page_range+0x6cf/0xb00
+ ? anon_vma_fork+0x8f/0x140
+ ? __slab_alloc.isra.74+0x5a/0x81
+ ? anon_vma_fork+0x8f/0x140
+ ? kmem_cache_alloc+0x1b5/0x1f0
+ ? anon_vma_fork+0x8f/0x140
+ ? copy_process.part.35+0x1670/0x1ee0
+ ? _do_fork+0xdd/0x3f0
+ ? _do_fork+0xdd/0x3f0
+ ? do_syscall_64+0x61/0x170
+ ? entry_SYSCALL64_slow_path+0x25/0x25
+
+The later ensured that a NUMA box WILL take callback_lock in atomic
+context by removing the allocator and reclaim path __GFP_HARDWALL
+usage which prevented such contexts from taking callback_mutex.
+
+One option would be to reinstate __GFP_HARDWALL protections for
+RT, however, as the 8447a0fee974 changelog states:
+
+The callback_mutex is only used to synchronize reads/updates of cpusets'
+flags and cpu/node masks. These operations should always proceed fast so
+there's no reason why we can't use a spinlock instead of the mutex.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/cgroup/cpuset.c | 70 +++++++++++++++++++++---------------------
+ 1 file changed, 35 insertions(+), 35 deletions(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 5258b68153e0..fb8715c1edfc 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -345,7 +345,7 @@ void cpuset_read_unlock(void)
+ percpu_up_read(&cpuset_rwsem);
+ }
+
+-static DEFINE_SPINLOCK(callback_lock);
++static DEFINE_RAW_SPINLOCK(callback_lock);
+
+ static struct workqueue_struct *cpuset_migrate_mm_wq;
+
+@@ -1280,7 +1280,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
+ * Newly added CPUs will be removed from effective_cpus and
+ * newly deleted ones will be added back to effective_cpus.
+ */
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ if (adding) {
+ cpumask_or(parent->subparts_cpus,
+ parent->subparts_cpus, tmp->addmask);
+@@ -1299,7 +1299,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
+ }
+
+ parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+
+ return cmd == partcmd_update;
+ }
+@@ -1404,7 +1404,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
+ continue;
+ rcu_read_unlock();
+
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+
+ cpumask_copy(cp->effective_cpus, tmp->new_cpus);
+ if (cp->nr_subparts_cpus &&
+@@ -1435,7 +1435,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
+ = cpumask_weight(cp->subparts_cpus);
+ }
+ }
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+
+ WARN_ON(!is_in_v2_mode() &&
+ !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
+@@ -1553,7 +1553,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ return -EINVAL;
+ }
+
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+
+ /*
+@@ -1564,7 +1564,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
+ cs->cpus_allowed);
+ cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
+ }
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+
+ update_cpumasks_hier(cs, &tmp);
+
+@@ -1758,9 +1758,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
+ continue;
+ rcu_read_unlock();
+
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ cp->effective_mems = *new_mems;
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+
+ WARN_ON(!is_in_v2_mode() &&
+ !nodes_equal(cp->mems_allowed, cp->effective_mems));
+@@ -1828,9 +1828,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
+ if (retval < 0)
+ goto done;
+
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ cs->mems_allowed = trialcs->mems_allowed;
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+
+ /* use trialcs->mems_allowed as a temp variable */
+ update_nodemasks_hier(cs, &trialcs->mems_allowed);
+@@ -1921,9 +1921,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
+ spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
+ || (is_spread_page(cs) != is_spread_page(trialcs)));
+
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ cs->flags = trialcs->flags;
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+
+ if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
+ rebuild_sched_domains_locked();
+@@ -2432,7 +2432,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
+ cpuset_filetype_t type = seq_cft(sf)->private;
+ int ret = 0;
+
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+
+ switch (type) {
+ case FILE_CPULIST:
+@@ -2454,7 +2454,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
+ ret = -EINVAL;
+ }
+
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+ return ret;
+ }
+
+@@ -2767,14 +2767,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+
+ cpuset_inc();
+
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ if (is_in_v2_mode()) {
+ cpumask_copy(cs->effective_cpus, parent->effective_cpus);
+ cs->effective_mems = parent->effective_mems;
+ cs->use_parent_ecpus = true;
+ parent->child_ecpus_count++;
+ }
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+
+ if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
+ goto out_unlock;
+@@ -2801,12 +2801,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
+ }
+ rcu_read_unlock();
+
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ cs->mems_allowed = parent->mems_allowed;
+ cs->effective_mems = parent->mems_allowed;
+ cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+ cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+ out_unlock:
+ percpu_up_write(&cpuset_rwsem);
+ put_online_cpus();
+@@ -2862,7 +2862,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
+ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+ {
+ percpu_down_write(&cpuset_rwsem);
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+
+ if (is_in_v2_mode()) {
+ cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
+@@ -2873,7 +2873,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+ top_cpuset.mems_allowed = top_cpuset.effective_mems;
+ }
+
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+ percpu_up_write(&cpuset_rwsem);
+ }
+
+@@ -2970,12 +2970,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
+ {
+ bool is_empty;
+
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ cpumask_copy(cs->cpus_allowed, new_cpus);
+ cpumask_copy(cs->effective_cpus, new_cpus);
+ cs->mems_allowed = *new_mems;
+ cs->effective_mems = *new_mems;
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+
+ /*
+ * Don't call update_tasks_cpumask() if the cpuset becomes empty,
+@@ -3012,10 +3012,10 @@ hotplug_update_tasks(struct cpuset *cs,
+ if (nodes_empty(*new_mems))
+ *new_mems = parent_cs(cs)->effective_mems;
+
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ cpumask_copy(cs->effective_cpus, new_cpus);
+ cs->effective_mems = *new_mems;
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+
+ if (cpus_updated)
+ update_tasks_cpumask(cs);
+@@ -3170,7 +3170,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
+
+ /* synchronize cpus_allowed to cpu_active_mask */
+ if (cpus_updated) {
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ if (!on_dfl)
+ cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
+ /*
+@@ -3190,17 +3190,17 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
+ }
+ }
+ cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+ /* we don't mess with cpumasks of tasks in top_cpuset */
+ }
+
+ /* synchronize mems_allowed to N_MEMORY */
+ if (mems_updated) {
+- spin_lock_irq(&callback_lock);
++ raw_spin_lock_irq(&callback_lock);
+ if (!on_dfl)
+ top_cpuset.mems_allowed = new_mems;
+ top_cpuset.effective_mems = new_mems;
+- spin_unlock_irq(&callback_lock);
++ raw_spin_unlock_irq(&callback_lock);
+ update_tasks_nodemask(&top_cpuset);
+ }
+
+@@ -3301,11 +3301,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&callback_lock, flags);
++ raw_spin_lock_irqsave(&callback_lock, flags);
+ rcu_read_lock();
+ guarantee_online_cpus(task_cs(tsk), pmask);
+ rcu_read_unlock();
+- spin_unlock_irqrestore(&callback_lock, flags);
++ raw_spin_unlock_irqrestore(&callback_lock, flags);
+ }
+
+ /**
+@@ -3366,11 +3366,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
+ nodemask_t mask;
+ unsigned long flags;
+
+- spin_lock_irqsave(&callback_lock, flags);
++ raw_spin_lock_irqsave(&callback_lock, flags);
+ rcu_read_lock();
+ guarantee_online_mems(task_cs(tsk), &mask);
+ rcu_read_unlock();
+- spin_unlock_irqrestore(&callback_lock, flags);
++ raw_spin_unlock_irqrestore(&callback_lock, flags);
+
+ return mask;
+ }
+@@ -3462,14 +3462,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
+ return true;
+
+ /* Not hardwall and node outside mems_allowed: scan up cpusets */
+- spin_lock_irqsave(&callback_lock, flags);
++ raw_spin_lock_irqsave(&callback_lock, flags);
+
+ rcu_read_lock();
+ cs = nearest_hardwall_ancestor(task_cs(current));
+ allowed = node_isset(node, cs->mems_allowed);
+ rcu_read_unlock();
+
+- spin_unlock_irqrestore(&callback_lock, flags);
++ raw_spin_unlock_irqrestore(&callback_lock, flags);
+ return allowed;
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch b/features/rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
new file mode 100644
index 00000000..68cc1e5a
--- /dev/null
+++ b/features/rt/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
@@ -0,0 +1,84 @@
+From bdce470739405260aab0f96686cd9b15cecde177 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Jul 2018 18:52:00 +0200
+Subject: [PATCH 146/191] crypto: cryptd - add a lock instead
+ preempt_disable/local_bh_disable
+
+cryptd has a per-CPU lock which protected with local_bh_disable() and
+preempt_disable().
+Add an explicit spin_lock to make the locking context more obvious and
+visible to lockdep. Since it is a per-CPU lock, there should be no lock
+contention on the actual spinlock.
+There is a small race-window where we could be migrated to another CPU
+after the cpu_queue has been obtain. This is not a problem because the
+actual ressource is protected by the spinlock.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ crypto/cryptd.c | 19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index a1bea0f4baa8..5f8ca8c1f59c 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -36,6 +36,7 @@ static struct workqueue_struct *cryptd_wq;
+ struct cryptd_cpu_queue {
+ struct crypto_queue queue;
+ struct work_struct work;
++ spinlock_t qlock;
+ };
+
+ struct cryptd_queue {
+@@ -105,6 +106,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
+ INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
++ spin_lock_init(&cpu_queue->qlock);
+ }
+ pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
+ return 0;
+@@ -129,8 +131,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
+ struct cryptd_cpu_queue *cpu_queue;
+ refcount_t *refcnt;
+
+- cpu = get_cpu();
+- cpu_queue = this_cpu_ptr(queue->cpu_queue);
++ cpu_queue = raw_cpu_ptr(queue->cpu_queue);
++ spin_lock_bh(&cpu_queue->qlock);
++ cpu = smp_processor_id();
++
+ err = crypto_enqueue_request(&cpu_queue->queue, request);
+
+ refcnt = crypto_tfm_ctx(request->tfm);
+@@ -146,7 +150,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
+ refcount_inc(refcnt);
+
+ out_put_cpu:
+- put_cpu();
++ spin_unlock_bh(&cpu_queue->qlock);
+
+ return err;
+ }
+@@ -162,16 +166,11 @@ static void cryptd_queue_worker(struct work_struct *work)
+ cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+ /*
+ * Only handle one request at a time to avoid hogging crypto workqueue.
+- * preempt_disable/enable is used to prevent being preempted by
+- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
+- * cryptd_enqueue_request() being accessed from software interrupts.
+ */
+- local_bh_disable();
+- preempt_disable();
++ spin_lock_bh(&cpu_queue->qlock);
+ backlog = crypto_get_backlog(&cpu_queue->queue);
+ req = crypto_dequeue_request(&cpu_queue->queue);
+- preempt_enable();
+- local_bh_enable();
++ spin_unlock_bh(&cpu_queue->qlock);
+
+ if (!req)
+ return;
+--
+2.19.1
+
diff --git a/features/rt/crypto-limit-more-FPU-enabled-sections.patch b/features/rt/crypto-limit-more-FPU-enabled-sections.patch
new file mode 100644
index 00000000..b807de7f
--- /dev/null
+++ b/features/rt/crypto-limit-more-FPU-enabled-sections.patch
@@ -0,0 +1,73 @@
+From 7773b9611cf2e303a1744ba642f49ba967c24ad0 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 30 Nov 2017 13:40:10 +0100
+Subject: [PATCH 145/191] crypto: limit more FPU-enabled sections
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Those crypto drivers use SSE/AVX/… for their crypto work and in order to
+do so in kernel they need to enable the "FPU" in kernel mode which
+disables preemption.
+There are two problems with the way they are used:
+- the while loop which processes X bytes may create latency spikes and
+ should be avoided or limited.
+- the cipher-walk-next part may allocate/free memory and may use
+ kmap_atomic().
+
+The whole kernel_fpu_begin()/end() processing isn't probably that cheap.
+It most likely makes sense to process as much of those as possible in one
+go. The new *_fpu_sched_rt() schedules only if a RT task is pending.
+
+Probably we should measure the performance those ciphers in pure SW
+mode and with this optimisations to see if it makes sense to keep them
+for RT.
+
+This kernel_fpu_resched() makes the code more preemptible which might hurt
+performance.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/include/asm/fpu/api.h | 1 +
+ arch/x86/kernel/fpu/core.c | 12 ++++++++++++
+ 2 files changed, 13 insertions(+)
+
+diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
+index ed33a14188f6..716727dfd1af 100644
+--- a/arch/x86/include/asm/fpu/api.h
++++ b/arch/x86/include/asm/fpu/api.h
+@@ -28,6 +28,7 @@ extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
+ extern void kernel_fpu_end(void);
+ extern bool irq_fpu_usable(void);
+ extern void fpregs_mark_activate(void);
++extern void kernel_fpu_resched(void);
+
+ /* Code that is unaware of kernel_fpu_begin_mask() can use this */
+ static inline void kernel_fpu_begin(void)
+diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
+index 571220ac8bea..d315d45b64fa 100644
+--- a/arch/x86/kernel/fpu/core.c
++++ b/arch/x86/kernel/fpu/core.c
+@@ -159,6 +159,18 @@ void kernel_fpu_end(void)
+ }
+ EXPORT_SYMBOL_GPL(kernel_fpu_end);
+
++void kernel_fpu_resched(void)
++{
++ WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
++
++ if (should_resched(PREEMPT_OFFSET)) {
++ kernel_fpu_end();
++ cond_resched();
++ kernel_fpu_begin();
++ }
++}
++EXPORT_SYMBOL_GPL(kernel_fpu_resched);
++
+ /*
+ * Save the FPU state (mark it for reload if necessary):
+ *
+--
+2.19.1
+
diff --git a/features/rt/debugobjects-Make-RT-aware.patch b/features/rt/debugobjects-Make-RT-aware.patch
new file mode 100644
index 00000000..59a4e281
--- /dev/null
+++ b/features/rt/debugobjects-Make-RT-aware.patch
@@ -0,0 +1,31 @@
+From e4cdd4565da1b21cf7b71738d85a794dab064aa5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 21:41:35 +0200
+Subject: [PATCH 140/191] debugobjects: Make RT aware
+
+Avoid filling the pool / allocating memory with irqs off().
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ lib/debugobjects.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/lib/debugobjects.c b/lib/debugobjects.c
+index 9e14ae02306b..083882a3cf2f 100644
+--- a/lib/debugobjects.c
++++ b/lib/debugobjects.c
+@@ -557,7 +557,10 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
+ struct debug_obj *obj;
+ unsigned long flags;
+
+- fill_pool();
++#ifdef CONFIG_PREEMPT_RT
++ if (preempt_count() == 0 && !irqs_disabled())
++#endif
++ fill_pool();
+
+ db = get_bucket((unsigned long) addr);
+
+--
+2.19.1
+
diff --git a/features/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/features/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
new file mode 100644
index 00000000..4ed6aa46
--- /dev/null
+++ b/features/rt/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -0,0 +1,92 @@
+From 66e6d402c94d47b29c4a7f3f5872fee8d2be6ebb Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <umgwanakikbuti@gmail.com>
+Date: Thu, 31 Mar 2016 04:08:28 +0200
+Subject: [PATCH 186/191] drivers/block/zram: Replace bit spinlocks with
+ rtmutex for -rt
+
+They're nondeterministic, and lead to ___might_sleep() splats in -rt.
+OTOH, they're a lot less wasteful than an rtmutex per page.
+
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/block/zram/zram_drv.c | 36 +++++++++++++++++++++++++++++++++++
+ drivers/block/zram/zram_drv.h | 1 +
+ 2 files changed, 37 insertions(+)
+
+diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
+index cf8deecc39ef..5c7999ebc4e0 100644
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -59,6 +59,40 @@ static void zram_free_page(struct zram *zram, size_t index);
+ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
+ u32 index, int offset, struct bio *bio);
+
++#ifdef CONFIG_PREEMPT_RT
++static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages)
++{
++ size_t index;
++
++ for (index = 0; index < num_pages; index++)
++ spin_lock_init(&zram->table[index].lock);
++}
++
++static int zram_slot_trylock(struct zram *zram, u32 index)
++{
++ int ret;
++
++ ret = spin_trylock(&zram->table[index].lock);
++ if (ret)
++ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
++ return ret;
++}
++
++static void zram_slot_lock(struct zram *zram, u32 index)
++{
++ spin_lock(&zram->table[index].lock);
++ __set_bit(ZRAM_LOCK, &zram->table[index].flags);
++}
++
++static void zram_slot_unlock(struct zram *zram, u32 index)
++{
++ __clear_bit(ZRAM_LOCK, &zram->table[index].flags);
++ spin_unlock(&zram->table[index].lock);
++}
++
++#else
++
++static void zram_meta_init_table_locks(struct zram *zram, size_t num_pages) { }
+
+ static int zram_slot_trylock(struct zram *zram, u32 index)
+ {
+@@ -74,6 +108,7 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
+ {
+ bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
+ }
++#endif
+
+ static inline bool init_done(struct zram *zram)
+ {
+@@ -1169,6 +1204,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
+
+ if (!huge_class_size)
+ huge_class_size = zs_huge_class_size(zram->mem_pool);
++ zram_meta_init_table_locks(zram, num_pages);
+ return true;
+ }
+
+diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
+index 419a7e8281ee..561c7ba1421f 100644
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -63,6 +63,7 @@ struct zram_table_entry {
+ unsigned long element;
+ };
+ unsigned long flags;
++ spinlock_t lock;
+ #ifdef CONFIG_ZRAM_MEMORY_TRACKING
+ ktime_t ac_time;
+ #endif
+--
+2.19.1
+
diff --git a/features/rt/drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch b/features/rt/drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
new file mode 100644
index 00000000..2c1a9fa8
--- /dev/null
+++ b/features/rt/drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
@@ -0,0 +1,79 @@
+From bd74da1edb71b84b90b8f68b9a8d28b0f580b1f0 Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <umgwanakikbuti@gmail.com>
+Date: Sat, 27 Feb 2016 09:01:42 +0100
+Subject: [PATCH 156/191] drm/i915: Don't disable interrupts on PREEMPT_RT
+ during atomic updates
+
+Commit
+ 8d7849db3eab7 ("drm/i915: Make sprite updates atomic")
+
+started disabling interrupts across atomic updates. This breaks on PREEMPT_RT
+because within this section the code attempt to acquire spinlock_t locks which
+are sleeping locks on PREEMPT_RT.
+
+According to the comment the interrupts are disabled to avoid random delays and
+not required for protection or synchronisation.
+
+Don't disable interrupts on PREEMPT_RT during atomic updates.
+
+[bigeasy: drop local locks, commit message]
+
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/display/intel_sprite.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
+index 993543334a1e..cc0dd8a1e420 100644
+--- a/drivers/gpu/drm/i915/display/intel_sprite.c
++++ b/drivers/gpu/drm/i915/display/intel_sprite.c
+@@ -127,7 +127,8 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
+ "PSR idle timed out 0x%x, atomic update may fail\n",
+ psr_status);
+
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+
+ crtc->debug.min_vbl = min;
+ crtc->debug.max_vbl = max;
+@@ -152,11 +153,13 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
+ break;
+ }
+
+- local_irq_enable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_enable();
+
+ timeout = schedule_timeout(timeout);
+
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+ }
+
+ finish_wait(wq, &wait);
+@@ -189,7 +192,8 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
+ return;
+
+ irq_disable:
+- local_irq_disable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_disable();
+ }
+
+ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
+@@ -268,7 +272,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
+ new_crtc_state->uapi.event = NULL;
+ }
+
+- local_irq_enable();
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ local_irq_enable();
+
+ /* Send VRR Push to terminate Vblank */
+ intel_vrr_send_push(new_crtc_state);
+--
+2.19.1
+
diff --git a/features/rt/drm-i915-disable-tracing-on-RT.patch b/features/rt/drm-i915-disable-tracing-on-RT.patch
new file mode 100644
index 00000000..41898919
--- /dev/null
+++ b/features/rt/drm-i915-disable-tracing-on-RT.patch
@@ -0,0 +1,46 @@
+From 5e250a1f215a8a39bc712c70a8a6215f570e3a9c Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 6 Dec 2018 09:52:20 +0100
+Subject: [PATCH 157/191] drm/i915: disable tracing on -RT
+
+Luca Abeni reported this:
+| BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003
+| CPU: 1 PID: 15203 Comm: kworker/u8:2 Not tainted 4.19.1-rt3 #10
+| Call Trace:
+| rt_spin_lock+0x3f/0x50
+| gen6_read32+0x45/0x1d0 [i915]
+| g4x_get_vblank_counter+0x36/0x40 [i915]
+| trace_event_raw_event_i915_pipe_update_start+0x7d/0xf0 [i915]
+
+The tracing events use trace_i915_pipe_update_start() among other events
+use functions acquire spin locks. A few trace points use
+intel_get_crtc_scanline(), others use ->get_vblank_counter() wich also
+might acquire a sleeping lock.
+
+Based on this I don't see any other way than disable trace points on RT.
+
+Cc: stable-rt@vger.kernel.org
+Reported-by: Luca Abeni <lucabe72@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_trace.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
+index a4addcc64978..0ba5a0a0fd25 100644
+--- a/drivers/gpu/drm/i915/i915_trace.h
++++ b/drivers/gpu/drm/i915/i915_trace.h
+@@ -2,6 +2,10 @@
+ #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+ #define _I915_TRACE_H_
+
++#ifdef CONFIG_PREEMPT_RT
++#define NOTRACE
++#endif
++
+ #include <linux/stringify.h>
+ #include <linux/types.h>
+ #include <linux/tracepoint.h>
+--
+2.19.1
+
diff --git a/features/rt/drm-i915-gt-Only-disable-interrupts-for-the-timeline.patch b/features/rt/drm-i915-gt-Only-disable-interrupts-for-the-timeline.patch
new file mode 100644
index 00000000..e8de6823
--- /dev/null
+++ b/features/rt/drm-i915-gt-Only-disable-interrupts-for-the-timeline.patch
@@ -0,0 +1,51 @@
+From 0dfb9c223f7310e28f5a5384a96aa99b563e219b Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 7 Jul 2020 12:25:11 +0200
+Subject: [PATCH 159/191] drm/i915/gt: Only disable interrupts for the timeline
+ lock on !force-threaded
+
+According to commit
+ d67739268cf0e ("drm/i915/gt: Mark up the nested engine-pm timeline lock as irqsafe")
+
+the intrrupts are disabled the code may be called from an interrupt
+handler and from preemptible context.
+With `force_irqthreads' set the timeline mutex is never observed in IRQ
+context so it is not neede to disable interrupts.
+
+Disable only interrupts if not in `force_irqthreads' mode.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/gt/intel_engine_pm.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+index e67d09259dd0..2a480b47dac2 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
++++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+@@ -81,9 +81,10 @@ static int __engine_unpark(struct intel_wakeref *wf)
+
+ static unsigned long __timeline_mark_lock(struct intel_context *ce)
+ {
+- unsigned long flags;
++ unsigned long flags = 0;
+
+- local_irq_save(flags);
++ if (!force_irqthreads)
++ local_irq_save(flags);
+ mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
+
+ return flags;
+@@ -93,7 +94,8 @@ static void __timeline_mark_unlock(struct intel_context *ce,
+ unsigned long flags)
+ {
+ mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_);
+- local_irq_restore(flags);
++ if (!force_irqthreads)
++ local_irq_restore(flags);
+ }
+
+ #else
+--
+2.19.1
+
diff --git a/features/rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch b/features/rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
new file mode 100644
index 00000000..16b96b72
--- /dev/null
+++ b/features/rt/drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
@@ -0,0 +1,32 @@
+From 90efb159f7ef762ff40fc78ca4a4aad63dcb530c Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 19 Dec 2018 10:47:02 +0100
+Subject: [PATCH 158/191] drm/i915: skip DRM_I915_LOW_LEVEL_TRACEPOINTS with
+ NOTRACE
+
+The order of the header files is important. If this header file is
+included after tracepoint.h was included then the NOTRACE here becomes a
+nop. Currently this happens for two .c files which use the tracepoitns
+behind DRM_I915_LOW_LEVEL_TRACEPOINTS.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_trace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
+index 0ba5a0a0fd25..396b6598694d 100644
+--- a/drivers/gpu/drm/i915/i915_trace.h
++++ b/drivers/gpu/drm/i915/i915_trace.h
+@@ -782,7 +782,7 @@ DEFINE_EVENT(i915_request, i915_request_add,
+ TP_ARGS(rq)
+ );
+
+-#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
++#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) && !defined(NOTRACE)
+ DEFINE_EVENT(i915_request, i915_request_submit,
+ TP_PROTO(struct i915_request *rq),
+ TP_ARGS(rq)
+--
+2.19.1
+
diff --git a/features/rt/drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch b/features/rt/drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch
new file mode 100644
index 00000000..5535ee9e
--- /dev/null
+++ b/features/rt/drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch
@@ -0,0 +1,60 @@
+From bcd7c523e0e70fe91424916891e64bc520ea1262 Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <umgwanakikbuti@gmail.com>
+Date: Sat, 27 Feb 2016 08:09:11 +0100
+Subject: [PATCH 155/191] drm,radeon,i915: Use preempt_disable/enable_rt()
+ where recommended
+
+DRM folks identified the spots, so use them.
+
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: linux-rt-users <linux-rt-users@vger.kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_irq.c | 2 ++
+ drivers/gpu/drm/radeon/radeon_display.c | 2 ++
+ 2 files changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 1a701367a718..32db99bead7d 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -888,6 +888,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
++ preempt_disable_rt();
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+@@ -952,6 +953,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
++ preempt_enable_rt();
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 652af7a134bd..a2f5a4c0134f 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1813,6 +1813,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
++ preempt_disable_rt();
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+@@ -1905,6 +1906,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
++ preempt_enable_rt();
+
+ /* Decode into vertical and horizontal scanout position. */
+ *vpos = position & 0x1fff;
+--
+2.19.1
+
diff --git a/features/rt/efi-Allow-efi-runtime.patch b/features/rt/efi-Allow-efi-runtime.patch
new file mode 100644
index 00000000..84be1d7e
--- /dev/null
+++ b/features/rt/efi-Allow-efi-runtime.patch
@@ -0,0 +1,31 @@
+From 2b3184121c862ea3b57dc901f864eed390eed7c8 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Jul 2018 15:06:10 +0200
+Subject: [PATCH 098/191] efi: Allow efi=runtime
+
+In case the command line option "efi=noruntime" is default at built-time, the user
+could overwrite its state by `efi=runtime' and allow it again.
+
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/firmware/efi/efi.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 1819da1c9fec..709c65c0a816 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -97,6 +97,9 @@ static int __init parse_efi_cmdline(char *str)
+ if (parse_option_str(str, "noruntime"))
+ disable_runtime = true;
+
++ if (parse_option_str(str, "runtime"))
++ disable_runtime = false;
++
+ if (parse_option_str(str, "nosoftreserve"))
+ set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
+
+--
+2.19.1
+
diff --git a/features/rt/efi-Disable-runtime-services-on-RT.patch b/features/rt/efi-Disable-runtime-services-on-RT.patch
new file mode 100644
index 00000000..fbc4df1d
--- /dev/null
+++ b/features/rt/efi-Disable-runtime-services-on-RT.patch
@@ -0,0 +1,45 @@
+From 8ff328d63beab0f17efde608bcaa09ff553a8e94 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Jul 2018 15:03:16 +0200
+Subject: [PATCH 097/191] efi: Disable runtime services on RT
+
+Based on meassurements the EFI functions get_variable /
+get_next_variable take up to 2us which looks okay.
+The functions get_time, set_time take around 10ms. Those 10ms are too
+much. Even one ms would be too much.
+Ard mentioned that SetVariable might even trigger larger latencies if
+the firware will erase flash blocks on NOR.
+
+The time-functions are used by efi-rtc and can be triggered during
+runtimed (either via explicit read/write or ntp sync).
+
+The variable write could be used by pstore.
+These functions can be disabled without much of a loss. The poweroff /
+reboot hooks may be provided by PSCI.
+
+Disable EFI's runtime wrappers.
+
+This was observed on "EFI v2.60 by SoftIron Overdrive 1000".
+
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/firmware/efi/efi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index df3f9bcab581..1819da1c9fec 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -66,7 +66,7 @@ struct mm_struct efi_mm = {
+
+ struct workqueue_struct *efi_rts_wq;
+
+-static bool disable_runtime;
++static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT);
+ static int __init setup_noefi(char *arg)
+ {
+ disable_runtime = true;
+--
+2.19.1
+
diff --git a/features/rt/firewire-ohci-Use-tasklet_disable_in_atomic-where-re.patch b/features/rt/firewire-ohci-Use-tasklet_disable_in_atomic-where-re.patch
new file mode 100644
index 00000000..60edded7
--- /dev/null
+++ b/features/rt/firewire-ohci-Use-tasklet_disable_in_atomic-where-re.patch
@@ -0,0 +1,60 @@
+From 784277e7d29b56d2fc367068d909e1219c082315 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:16 +0100
+Subject: [PATCH 053/191] firewire: ohci: Use tasklet_disable_in_atomic() where
+ required
+
+tasklet_disable() is invoked in several places. Some of them are in atomic
+context which prevents a conversion of tasklet_disable() to a sleepable
+function.
+
+The atomic callchains are:
+
+ ar_context_tasklet()
+ ohci_cancel_packet()
+ tasklet_disable()
+
+ ...
+ ohci_flush_iso_completions()
+ tasklet_disable()
+
+The invocation of tasklet_disable() from at_context_flush() is always in
+preemptible context.
+
+Use tasklet_disable_in_atomic() for the two invocations in
+ohci_cancel_packet() and ohci_flush_iso_completions().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Stefan Richter <stefanr@s5r6.in-berlin.de>
+Cc: linux1394-devel@lists.sourceforge.net
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/firewire/ohci.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index 9811c40956e5..17c9d825188b 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -2545,7 +2545,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
+ struct driver_data *driver_data = packet->driver_data;
+ int ret = -ENOENT;
+
+- tasklet_disable(&ctx->tasklet);
++ tasklet_disable_in_atomic(&ctx->tasklet);
+
+ if (packet->ack != 0)
+ goto out;
+@@ -3465,7 +3465,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ int ret = 0;
+
+- tasklet_disable(&ctx->context.tasklet);
++ tasklet_disable_in_atomic(&ctx->context.tasklet);
+
+ if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
+ context_tasklet((unsigned long)&ctx->context);
+--
+2.19.1
+
diff --git a/features/rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/features/rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
new file mode 100644
index 00000000..0e3c58f2
--- /dev/null
+++ b/features/rt/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
@@ -0,0 +1,98 @@
+From 3280c5c16fc06b062dce646186bf6d9449b03a5e Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 20 Oct 2017 11:29:53 +0200
+Subject: [PATCH 090/191] fs/dcache: disable preemption on i_dir_seq's write
+ side
+
+i_dir_seq is an opencoded seqcounter. Based on the code it looks like we
+could have two writers in parallel despite the fact that the d_lock is
+held. The problem is that during the write process on RT the preemption
+is still enabled and if this process is interrupted by a reader with RT
+priority then we lock up.
+To avoid that lock up I am disabling the preemption during the update.
+The rename of i_dir_seq is here to ensure to catch new write sides in
+future.
+
+Cc: stable-rt@vger.kernel.org
+Reported-by: Oleg.Karfich@wago.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/dcache.c | 12 +++++++-----
+ fs/inode.c | 2 +-
+ include/linux/fs.h | 2 +-
+ 3 files changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 2c36711a222e..cd6405fd483a 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2537,9 +2537,10 @@ EXPORT_SYMBOL(d_rehash);
+ static inline unsigned start_dir_add(struct inode *dir)
+ {
+
++ preempt_disable_rt();
+ for (;;) {
+- unsigned n = dir->i_dir_seq;
+- if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
++ unsigned n = dir->__i_dir_seq;
++ if (!(n & 1) && cmpxchg(&dir->__i_dir_seq, n, n + 1) == n)
+ return n;
+ cpu_relax();
+ }
+@@ -2547,7 +2548,8 @@ static inline unsigned start_dir_add(struct inode *dir)
+
+ static inline void end_dir_add(struct inode *dir, unsigned n)
+ {
+- smp_store_release(&dir->i_dir_seq, n + 2);
++ smp_store_release(&dir->__i_dir_seq, n + 2);
++ preempt_enable_rt();
+ }
+
+ static void d_wait_lookup(struct dentry *dentry)
+@@ -2583,7 +2585,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
+
+ retry:
+ rcu_read_lock();
+- seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
++ seq = smp_load_acquire(&parent->d_inode->__i_dir_seq);
+ r_seq = read_seqbegin(&rename_lock);
+ dentry = __d_lookup_rcu(parent, name, &d_seq);
+ if (unlikely(dentry)) {
+@@ -2611,7 +2613,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
+ }
+
+ hlist_bl_lock(b);
+- if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
++ if (unlikely(READ_ONCE(parent->d_inode->__i_dir_seq) != seq)) {
+ hlist_bl_unlock(b);
+ rcu_read_unlock();
+ goto retry;
+diff --git a/fs/inode.c b/fs/inode.c
+index 0672530acf7d..70a1230062e3 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -158,7 +158,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
+ inode->i_pipe = NULL;
+ inode->i_cdev = NULL;
+ inode->i_link = NULL;
+- inode->i_dir_seq = 0;
++ inode->__i_dir_seq = 0;
+ inode->i_rdev = 0;
+ inode->dirtied_when = 0;
+
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 4fa4b24535fd..9ac35f563f6e 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -700,7 +700,7 @@ struct inode {
+ struct pipe_inode_info *i_pipe;
+ struct cdev *i_cdev;
+ char *i_link;
+- unsigned i_dir_seq;
++ unsigned __i_dir_seq;
+ };
+
+ __u32 i_generation;
+--
+2.19.1
+
diff --git a/features/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/features/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
new file mode 100644
index 00000000..281405e5
--- /dev/null
+++ b/features/rt/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -0,0 +1,262 @@
+From 36dc3d1f121e7e2b2d5d102de86220b6eb1d827d Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Sep 2016 14:35:49 +0200
+Subject: [PATCH 089/191] fs/dcache: use swait_queue instead of waitqueue
+
+__d_lookup_done() invokes wake_up_all() while holding a hlist_bl_lock()
+which disables preemption. As a workaround convert it to swait.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/afs/dir_silly.c | 2 +-
+ fs/cifs/readdir.c | 2 +-
+ fs/dcache.c | 27 +++++++++++++++------------
+ fs/fuse/readdir.c | 2 +-
+ fs/namei.c | 4 ++--
+ fs/nfs/dir.c | 4 ++--
+ fs/nfs/unlink.c | 4 ++--
+ fs/proc/base.c | 3 ++-
+ fs/proc/proc_sysctl.c | 2 +-
+ include/linux/dcache.h | 4 ++--
+ include/linux/nfs_xdr.h | 2 +-
+ kernel/sched/swait.c | 1 +
+ 12 files changed, 31 insertions(+), 26 deletions(-)
+
+diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
+index 04f75a44f243..60cbce1995a5 100644
+--- a/fs/afs/dir_silly.c
++++ b/fs/afs/dir_silly.c
+@@ -236,7 +236,7 @@ int afs_silly_iput(struct dentry *dentry, struct inode *inode)
+ struct dentry *alias;
+ int ret;
+
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ _enter("%p{%pd},%llx", dentry, dentry, vnode->fid.vnode);
+
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index 80bf4c6f4c7b..a975cfed14f5 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -82,7 +82,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
+ struct inode *inode;
+ struct super_block *sb = parent->d_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
+
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 00e97c9ae7c4..2c36711a222e 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2552,21 +2552,24 @@ static inline void end_dir_add(struct inode *dir, unsigned n)
+
+ static void d_wait_lookup(struct dentry *dentry)
+ {
+- if (d_in_lookup(dentry)) {
+- DECLARE_WAITQUEUE(wait, current);
+- add_wait_queue(dentry->d_wait, &wait);
+- do {
+- set_current_state(TASK_UNINTERRUPTIBLE);
+- spin_unlock(&dentry->d_lock);
+- schedule();
+- spin_lock(&dentry->d_lock);
+- } while (d_in_lookup(dentry));
+- }
++ struct swait_queue __wait;
++
++ if (!d_in_lookup(dentry))
++ return;
++
++ INIT_LIST_HEAD(&__wait.task_list);
++ do {
++ prepare_to_swait_exclusive(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
++ spin_unlock(&dentry->d_lock);
++ schedule();
++ spin_lock(&dentry->d_lock);
++ } while (d_in_lookup(dentry));
++ finish_swait(dentry->d_wait, &__wait);
+ }
+
+ struct dentry *d_alloc_parallel(struct dentry *parent,
+ const struct qstr *name,
+- wait_queue_head_t *wq)
++ struct swait_queue_head *wq)
+ {
+ unsigned int hash = name->hash;
+ struct hlist_bl_head *b = in_lookup_hash(parent, hash);
+@@ -2681,7 +2684,7 @@ void __d_lookup_done(struct dentry *dentry)
+ hlist_bl_lock(b);
+ dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
+ __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
+- wake_up_all(dentry->d_wait);
++ swake_up_all(dentry->d_wait);
+ dentry->d_wait = NULL;
+ hlist_bl_unlock(b);
+ INIT_HLIST_NODE(&dentry->d_u.d_alias);
+diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
+index 3441ffa740f3..2fcae5cfd272 100644
+--- a/fs/fuse/readdir.c
++++ b/fs/fuse/readdir.c
+@@ -158,7 +158,7 @@ static int fuse_direntplus_link(struct file *file,
+ struct inode *dir = d_inode(parent);
+ struct fuse_conn *fc;
+ struct inode *inode;
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ if (!o->nodeid) {
+ /*
+diff --git a/fs/namei.c b/fs/namei.c
+index 216f16e74351..06ffd9490a34 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1602,7 +1602,7 @@ static struct dentry *__lookup_slow(const struct qstr *name,
+ {
+ struct dentry *dentry, *old;
+ struct inode *inode = dir->d_inode;
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ /* Don't go there if it's already dead */
+ if (unlikely(IS_DEADDIR(inode)))
+@@ -3131,7 +3131,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file,
+ struct dentry *dentry;
+ int error, create_error = 0;
+ umode_t mode = op->mode;
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+
+ if (unlikely(IS_DEADDIR(dir_inode)))
+ return ERR_PTR(-ENOENT);
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index fc4f490f2d78..8f00a87b7ae2 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -636,7 +636,7 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry,
+ unsigned long dir_verifier)
+ {
+ struct qstr filename = QSTR_INIT(entry->name, entry->len);
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct dentry *dentry;
+ struct dentry *alias;
+ struct inode *inode;
+@@ -1868,7 +1868,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned open_flags,
+ umode_t mode)
+ {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct nfs_open_context *ctx;
+ struct dentry *res;
+ struct iattr attr = { .ia_valid = ATTR_OPEN };
+diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
+index 5fa11e1aca4c..984f26eb888c 100644
+--- a/fs/nfs/unlink.c
++++ b/fs/nfs/unlink.c
+@@ -13,7 +13,7 @@
+ #include <linux/sunrpc/clnt.h>
+ #include <linux/nfs_fs.h>
+ #include <linux/sched.h>
+-#include <linux/wait.h>
++#include <linux/swait.h>
+ #include <linux/namei.h>
+ #include <linux/fsnotify.h>
+
+@@ -180,7 +180,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
+
+ data->cred = get_current_cred();
+ data->res.dir_attr = &data->dir_attr;
+- init_waitqueue_head(&data->wq);
++ init_swait_queue_head(&data->wq);
+
+ status = -EBUSY;
+ spin_lock(&dentry->d_lock);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index d632ddd5f5ee..4ea6111fae71 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -95,6 +95,7 @@
+ #include <linux/posix-timers.h>
+ #include <linux/time_namespace.h>
+ #include <linux/resctrl.h>
++#include <linux/swait.h>
+ #include <trace/events/oom.h>
+ #include "internal.h"
+ #include "fd.h"
+@@ -2037,7 +2038,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
+
+ child = d_hash_and_lookup(dir, &qname);
+ if (!child) {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ child = d_alloc_parallel(dir, &qname, &wq);
+ if (IS_ERR(child))
+ goto end_instantiate;
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 984e42f8cb11..2bc8e2409308 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -683,7 +683,7 @@ static bool proc_sys_fill_cache(struct file *file,
+
+ child = d_lookup(dir, &qname);
+ if (!child) {
+- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
++ DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq);
+ child = d_alloc_parallel(dir, &qname, &wq);
+ if (IS_ERR(child))
+ return false;
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index c1e48014106f..f2dc84edbcf1 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -107,7 +107,7 @@ struct dentry {
+
+ union {
+ struct list_head d_lru; /* LRU list */
+- wait_queue_head_t *d_wait; /* in-lookup ones only */
++ struct swait_queue_head *d_wait; /* in-lookup ones only */
+ };
+ struct list_head d_child; /* child of parent list */
+ struct list_head d_subdirs; /* our children */
+@@ -239,7 +239,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
+ extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
+ extern struct dentry * d_alloc_anon(struct super_block *);
+ extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
+- wait_queue_head_t *);
++ struct swait_queue_head *);
+ extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
+ extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
+ extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 0d62b8db4d24..0278c359e3ef 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1684,7 +1684,7 @@ struct nfs_unlinkdata {
+ struct nfs_removeargs args;
+ struct nfs_removeres res;
+ struct dentry *dentry;
+- wait_queue_head_t wq;
++ struct swait_queue_head wq;
+ const struct cred *cred;
+ struct nfs_fattr dir_attr;
+ long timeout;
+diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
+index e1c655f928c7..f230b1ac7f91 100644
+--- a/kernel/sched/swait.c
++++ b/kernel/sched/swait.c
+@@ -64,6 +64,7 @@ void swake_up_all(struct swait_queue_head *q)
+ struct swait_queue *curr;
+ LIST_HEAD(tmp);
+
++ WARN_ON(irqs_disabled());
+ raw_spin_lock_irq(&q->lock);
+ list_splice_init(&q->task_list, &tmp);
+ while (!list_empty(&tmp)) {
+--
+2.19.1
+
diff --git a/features/rt/fs-namespace-Use-cpu_chill-in-trylock-loops.patch b/features/rt/fs-namespace-Use-cpu_chill-in-trylock-loops.patch
new file mode 100644
index 00000000..a4536630
--- /dev/null
+++ b/features/rt/fs-namespace-Use-cpu_chill-in-trylock-loops.patch
@@ -0,0 +1,43 @@
+From 411705d51a89aa7c844f7cae5379a406d720f619 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 7 Mar 2012 21:00:34 +0100
+Subject: [PATCH 139/191] fs: namespace: Use cpu_chill() in trylock loops
+
+Retry loops on RT might loop forever when the modifying side was
+preempted. Use cpu_chill() instead of cpu_relax() to let the system
+make progress.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ fs/namespace.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 74957d5edaa8..a02b35e791a3 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -14,6 +14,7 @@
+ #include <linux/mnt_namespace.h>
+ #include <linux/user_namespace.h>
+ #include <linux/namei.h>
++#include <linux/hrtimer.h>
+ #include <linux/security.h>
+ #include <linux/cred.h>
+ #include <linux/idr.h>
+@@ -342,8 +343,11 @@ int __mnt_want_write(struct vfsmount *m)
+ * incremented count after it has set MNT_WRITE_HOLD.
+ */
+ smp_mb();
+- while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
+- cpu_relax();
++ while (READ_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
++ preempt_enable();
++ cpu_chill();
++ preempt_disable();
++ }
+ /*
+ * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
+ * be set to match its requirements. So we must not load that until
+--
+2.19.1
+
diff --git a/features/rt/futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch b/features/rt/futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
new file mode 100644
index 00000000..1e9a9e63
--- /dev/null
+++ b/features/rt/futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
@@ -0,0 +1,117 @@
+From 1301c0809848cdcd4a0fae002d13707fa3e3db5d Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Tue, 14 Jul 2015 14:26:34 +0200
+Subject: [PATCH 066/191] futex: Fix bug on when a requeued RT task times out
+
+Requeue with timeout causes a bug with PREEMPT_RT.
+
+The bug comes from a timed out condition.
+
+ TASK 1 TASK 2
+ ------ ------
+ futex_wait_requeue_pi()
+ futex_wait_queue_me()
+ <timed out>
+
+ double_lock_hb();
+
+ raw_spin_lock(pi_lock);
+ if (current->pi_blocked_on) {
+ } else {
+ current->pi_blocked_on = PI_WAKE_INPROGRESS;
+ run_spin_unlock(pi_lock);
+ spin_lock(hb->lock); <-- blocked!
+
+ plist_for_each_entry_safe(this) {
+ rt_mutex_start_proxy_lock();
+ task_blocks_on_rt_mutex();
+ BUG_ON(task->pi_blocked_on)!!!!
+
+The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the
+problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to
+grab the hb->lock, which it fails to do so. As the hb->lock is a mutex,
+it will block and set the "pi_blocked_on" to the hb->lock.
+
+When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails
+because the task1's pi_blocked_on is no longer set to that, but instead,
+set to the hb->lock.
+
+The fix:
+
+When calling rt_mutex_start_proxy_lock() a check is made to see
+if the proxy tasks pi_blocked_on is set. If so, exit out early.
+Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies
+the proxy task that it is being requeued, and will handle things
+appropriately.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 31 ++++++++++++++++++++++++++++++-
+ kernel/locking/rtmutex_common.h | 1 +
+ 2 files changed, 31 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 7fff3b88b96b..4d2a57e8dcb9 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -138,7 +138,8 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+
+ static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
+ {
+- return waiter && waiter != PI_WAKEUP_INPROGRESS;
++ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
++ waiter != PI_REQUEUE_INPROGRESS;
+ }
+
+ /*
+@@ -1707,6 +1708,34 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ if (try_to_take_rt_mutex(lock, task, NULL))
+ return 1;
+
++#ifdef CONFIG_PREEMPT_RT
++ /*
++ * In PREEMPT_RT there's an added race.
++ * If the task, that we are about to requeue, times out,
++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
++ * to skip this task. But right after the task sets
++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
++ * This will replace the PI_WAKEUP_INPROGRESS with the actual
++ * lock that it blocks on. We *must not* place this task
++ * on this proxy lock in that case.
++ *
++ * To prevent this race, we first take the task's pi_lock
++ * and check if it has updated its pi_blocked_on. If it has,
++ * we assume that it woke up and we return -EAGAIN.
++ * Otherwise, we set the task's pi_blocked_on to
++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
++ * it will know that we are in the process of requeuing it.
++ */
++ raw_spin_lock(&task->pi_lock);
++ if (task->pi_blocked_on) {
++ raw_spin_unlock(&task->pi_lock);
++ return -EAGAIN;
++ }
++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
++ raw_spin_unlock(&task->pi_lock);
++#endif
++
+ /* We enforce deadlock detection for futexes */
+ ret = task_blocks_on_rt_mutex(lock, waiter, task,
+ RT_MUTEX_FULL_CHAINWALK);
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index 096b16cfb096..37cd6b3bf6f4 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -126,6 +126,7 @@ enum rtmutex_chainwalk {
+ * PI-futex support (proxy locking functions, etc.):
+ */
+ #define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
+
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+--
+2.19.1
+
diff --git a/features/rt/genirq-Disable-irqpoll-on-rt.patch b/features/rt/genirq-Disable-irqpoll-on-rt.patch
new file mode 100644
index 00000000..37c2cf92
--- /dev/null
+++ b/features/rt/genirq-Disable-irqpoll-on-rt.patch
@@ -0,0 +1,42 @@
+From 838f295da60b853b16a3bfbfdd0790472ce70d94 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:57 -0500
+Subject: [PATCH 189/191] genirq: Disable irqpoll on -rt
+
+Creates long latencies for no value
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/irq/spurious.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
+index f865e5f4d382..dc7311dd74b1 100644
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -443,6 +443,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
+
+ static int __init irqfixup_setup(char *str)
+ {
++#ifdef CONFIG_PREEMPT_RT
++ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT\n");
++ return 1;
++#endif
+ irqfixup = 1;
+ printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
+ printk(KERN_WARNING "This may impact system performance.\n");
+@@ -455,6 +459,10 @@ module_param(irqfixup, int, 0644);
+
+ static int __init irqpoll_setup(char *str)
+ {
++#ifdef CONFIG_PREEMPT_RT
++ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT\n");
++ return 1;
++#endif
+ irqfixup = 2;
+ printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
+ "enabled\n");
+--
+2.19.1
+
diff --git a/features/rt/genirq-Move-prio-assignment-into-the-newly-created-t.patch b/features/rt/genirq-Move-prio-assignment-into-the-newly-created-t.patch
new file mode 100644
index 00000000..3c62cff6
--- /dev/null
+++ b/features/rt/genirq-Move-prio-assignment-into-the-newly-created-t.patch
@@ -0,0 +1,61 @@
+From a5c8ce0345b9379be44f67042816beb0afda8176 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 9 Nov 2020 23:32:39 +0100
+Subject: [PATCH 004/191] genirq: Move prio assignment into the newly created
+ thread
+
+With enabled threaded interrupts the nouveau driver reported the
+following:
+| Chain exists of:
+| &mm->mmap_lock#2 --> &device->mutex --> &cpuset_rwsem
+|
+| Possible unsafe locking scenario:
+|
+| CPU0 CPU1
+| ---- ----
+| lock(&cpuset_rwsem);
+| lock(&device->mutex);
+| lock(&cpuset_rwsem);
+| lock(&mm->mmap_lock#2);
+
+The device->mutex is nvkm_device::mutex.
+
+Unblocking the lockchain at `cpuset_rwsem' is probably the easiest thing
+to do.
+Move the priority assignment to the start of the newly created thread.
+
+Fixes: 710da3c8ea7df ("sched/core: Prevent race condition between cpuset and __sched_setscheduler()")
+Reported-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[bigeasy: Patch description]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/a23a826af7c108ea5651e73b8fbae5e653f16e86.camel@gmx.de
+---
+ kernel/irq/manage.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index dec3f73e8db9..ac2c57429750 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1225,6 +1225,8 @@ static int irq_thread(void *data)
+ irqreturn_t (*handler_fn)(struct irq_desc *desc,
+ struct irqaction *action);
+
++ sched_set_fifo(current);
++
+ if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
+ &action->thread_flags))
+ handler_fn = irq_forced_thread_fn;
+@@ -1390,8 +1392,6 @@ setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+
+- sched_set_fifo(t);
+-
+ /*
+ * We keep the reference to the task struct even if
+ * the thread dies to avoid that the interrupt code
+--
+2.19.1
+
diff --git a/features/rt/genirq-update-irq_set_irqchip_state-documentation.patch b/features/rt/genirq-update-irq_set_irqchip_state-documentation.patch
new file mode 100644
index 00000000..d203983a
--- /dev/null
+++ b/features/rt/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -0,0 +1,31 @@
+From a7f1d1036ba191caf447313670c2d682537c6f51 Mon Sep 17 00:00:00 2001
+From: Josh Cartwright <joshc@ni.com>
+Date: Thu, 11 Feb 2016 11:54:00 -0600
+Subject: [PATCH 174/191] genirq: update irq_set_irqchip_state documentation
+
+On -rt kernels, the use of migrate_disable()/migrate_enable() is
+sufficient to guarantee a task isn't moved to another CPU. Update the
+irq_set_irqchip_state() documentation to reflect this.
+
+Signed-off-by: Josh Cartwright <joshc@ni.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/irq/manage.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index f4d18858d079..b279a8683c38 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -2787,7 +2787,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
+ * This call sets the internal irqchip state of an interrupt,
+ * depending on the value of @which.
+ *
+- * This function should be called with preemption disabled if the
++ * This function should be called with migration disabled if the
+ * interrupt controller has per-cpu registers.
+ */
+ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+--
+2.19.1
+
diff --git a/features/rt/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch b/features/rt/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
new file mode 100644
index 00000000..30c5cea1
--- /dev/null
+++ b/features/rt/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
@@ -0,0 +1,84 @@
+From a93cd2c33ab23c4dce5b28971b24ad3bb0a85d55 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 30 Oct 2020 13:59:06 +0100
+Subject: [PATCH 001/191] highmem: Don't disable preemption on RT in
+ kmap_atomic()
+
+Disabling preemption makes it impossible to acquire sleeping locks within
+kmap_atomic() section.
+For PREEMPT_RT it is sufficient to disable migration.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/highmem-internal.h | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
+index 7902c7d8b55f..4aa1031d3e4c 100644
+--- a/include/linux/highmem-internal.h
++++ b/include/linux/highmem-internal.h
+@@ -90,7 +90,11 @@ static inline void __kunmap_local(void *vaddr)
+
+ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
++
+ pagefault_disable();
+ return __kmap_local_page_prot(page, prot);
+ }
+@@ -102,7 +106,11 @@ static inline void *kmap_atomic(struct page *page)
+
+ static inline void *kmap_atomic_pfn(unsigned long pfn)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
++
+ pagefault_disable();
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+ }
+@@ -111,7 +119,10 @@ static inline void __kunmap_atomic(void *addr)
+ {
+ kunmap_local_indexed(addr);
+ pagefault_enable();
+- preempt_enable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_enable();
++ else
++ preempt_enable();
+ }
+
+ unsigned int __nr_free_highpages(void);
+@@ -179,7 +190,10 @@ static inline void __kunmap_local(void *addr)
+
+ static inline void *kmap_atomic(struct page *page)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
+ pagefault_disable();
+ return page_address(page);
+ }
+@@ -200,7 +214,10 @@ static inline void __kunmap_atomic(void *addr)
+ kunmap_flush_on_unmap(addr);
+ #endif
+ pagefault_enable();
+- preempt_enable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_enable();
++ else
++ preempt_enable();
+ }
+
+ static inline unsigned int nr_free_highpages(void) { return 0; }
+--
+2.19.1
+
diff --git a/features/rt/irqtime-Make-accounting-correct-on-RT.patch b/features/rt/irqtime-Make-accounting-correct-on-RT.patch
new file mode 100644
index 00000000..bb84cb23
--- /dev/null
+++ b/features/rt/irqtime-Make-accounting-correct-on-RT.patch
@@ -0,0 +1,53 @@
+From 02a22f4cb3638326b34be9b98415ab6f2718f071 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:55:54 +0100
+Subject: [PATCH 056/191] irqtime: Make accounting correct on RT
+
+vtime_account_irq and irqtime_account_irq() base checks on preempt_count()
+which fails on RT because preempt_count() does not contain the softirq
+accounting which is seperate on RT.
+
+These checks do not need the full preempt count as they only operate on the
+hard and softirq sections.
+
+Use irq_count() instead which provides the correct value on both RT and non
+RT kernels. The compiler is clever enough to fold the masking for !RT:
+
+ 99b: 65 8b 05 00 00 00 00 mov %gs:0x0(%rip),%eax
+ - 9a2: 25 ff ff ff 7f and $0x7fffffff,%eax
+ + 9a2: 25 00 ff ff 00 and $0xffff00,%eax
+
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/cputime.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 5f611658eeab..2c36a5fad589 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -60,7 +60,7 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
+ cpu = smp_processor_id();
+ delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
+ irqtime->irq_start_time += delta;
+- pc = preempt_count() - offset;
++ pc = irq_count() - offset;
+
+ /*
+ * We do not account for softirq time from ksoftirqd here.
+@@ -421,7 +421,7 @@ void vtime_task_switch(struct task_struct *prev)
+
+ void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
+ {
+- unsigned int pc = preempt_count() - offset;
++ unsigned int pc = irq_count() - offset;
+
+ if (pc & HARDIRQ_OFFSET) {
+ vtime_account_hardirq(tsk);
+--
+2.19.1
+
diff --git a/features/rt/irqwork-push-most-work-into-softirq-context.patch b/features/rt/irqwork-push-most-work-into-softirq-context.patch
new file mode 100644
index 00000000..0b69634b
--- /dev/null
+++ b/features/rt/irqwork-push-most-work-into-softirq-context.patch
@@ -0,0 +1,200 @@
+From 79182fbaac5c336ea2aadecca97ad3e96ba2d2f0 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 23 Jun 2015 15:32:51 +0200
+Subject: [PATCH 144/191] irqwork: push most work into softirq context
+
+Initially we defered all irqwork into softirq because we didn't want the
+latency spikes if perf or another user was busy and delayed the RT task.
+The NOHZ trigger (nohz_full_kick_work) was the first user that did not work
+as expected if it did not run in the original irqwork context so we had to
+bring it back somehow for it. push_irq_work_func is the second one that
+requires this.
+
+This patch adds the IRQ_WORK_HARD_IRQ which makes sure the callback runs
+in raw-irq context. Everything else is defered into softirq context. Without
+-RT we have the orignal behavior.
+
+This patch incorporates tglx orignal work which revoked a little bringing back
+the arch_irq_work_raise() if possible and a few fixes from Steven Rostedt and
+Mike Galbraith,
+
+[bigeasy: melt tglx's irq_work_tick_soft() which splits irq_work_tick() into a
+ hard and soft variant]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/irq_work.h | 6 ++++
+ kernel/irq_work.c | 69 ++++++++++++++++++++++++++++++++--------
+ kernel/sched/topology.c | 3 +-
+ kernel/time/timer.c | 2 ++
+ 4 files changed, 66 insertions(+), 14 deletions(-)
+
+diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
+index ec2a47a81e42..dbbef9089789 100644
+--- a/include/linux/irq_work.h
++++ b/include/linux/irq_work.h
+@@ -64,4 +64,10 @@ static inline void irq_work_run(void) { }
+ static inline void irq_work_single(void *arg) { }
+ #endif
+
++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT)
++void irq_work_tick_soft(void);
++#else
++static inline void irq_work_tick_soft(void) { }
++#endif
++
+ #endif /* _LINUX_IRQ_WORK_H */
+diff --git a/kernel/irq_work.c b/kernel/irq_work.c
+index e8da1e71583a..c3455910196f 100644
+--- a/kernel/irq_work.c
++++ b/kernel/irq_work.c
+@@ -18,6 +18,7 @@
+ #include <linux/cpu.h>
+ #include <linux/notifier.h>
+ #include <linux/smp.h>
++#include <linux/interrupt.h>
+ #include <asm/processor.h>
+
+
+@@ -52,13 +53,27 @@ void __weak arch_irq_work_raise(void)
+ /* Enqueue on current CPU, work must already be claimed and preempt disabled */
+ static void __irq_work_queue_local(struct irq_work *work)
+ {
+- /* If the work is "lazy", handle it from next tick if any */
+- if (atomic_read(&work->node.a_flags) & IRQ_WORK_LAZY) {
+- if (llist_add(&work->node.llist, this_cpu_ptr(&lazy_list)) &&
+- tick_nohz_tick_stopped())
+- arch_irq_work_raise();
+- } else {
+- if (llist_add(&work->node.llist, this_cpu_ptr(&raised_list)))
++ struct llist_head *list;
++ bool lazy_work;
++ int work_flags;
++
++ work_flags = atomic_read(&work->node.a_flags);
++ if (work_flags & IRQ_WORK_LAZY)
++ lazy_work = true;
++ else if (IS_ENABLED(CONFIG_PREEMPT_RT) &&
++ !(work_flags & IRQ_WORK_HARD_IRQ))
++ lazy_work = true;
++ else
++ lazy_work = false;
++
++ if (lazy_work)
++ list = this_cpu_ptr(&lazy_list);
++ else
++ list = this_cpu_ptr(&raised_list);
++
++ if (llist_add(&work->node.llist, list)) {
++ /* If the work is "lazy", handle it from next tick if any */
++ if (!lazy_work || tick_nohz_tick_stopped())
+ arch_irq_work_raise();
+ }
+ }
+@@ -102,7 +117,14 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
+ if (cpu != smp_processor_id()) {
+ /* Arch remote IPI send/receive backend aren't NMI safe */
+ WARN_ON_ONCE(in_nmi());
+- __smp_call_single_queue(cpu, &work->node.llist);
++
++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) {
++ if (llist_add(&work->node.llist, &per_cpu(lazy_list, cpu)))
++ /* && tick_nohz_tick_stopped_cpu(cpu) */
++ arch_send_call_function_single_ipi(cpu);
++ } else {
++ __smp_call_single_queue(cpu, &work->node.llist);
++ }
+ } else {
+ __irq_work_queue_local(work);
+ }
+@@ -120,9 +142,8 @@ bool irq_work_needs_cpu(void)
+ raised = this_cpu_ptr(&raised_list);
+ lazy = this_cpu_ptr(&lazy_list);
+
+- if (llist_empty(raised) || arch_irq_work_has_interrupt())
+- if (llist_empty(lazy))
+- return false;
++ if (llist_empty(raised) && llist_empty(lazy))
++ return false;
+
+ /* All work should have been flushed before going offline */
+ WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
+@@ -165,8 +186,12 @@ static void irq_work_run_list(struct llist_head *list)
+ struct irq_work *work, *tmp;
+ struct llist_node *llnode;
+
++#ifndef CONFIG_PREEMPT_RT
++ /*
++ * nort: On RT IRQ-work may run in SOFTIRQ context.
++ */
+ BUG_ON(!irqs_disabled());
+-
++#endif
+ if (llist_empty(list))
+ return;
+
+@@ -182,7 +207,16 @@ static void irq_work_run_list(struct llist_head *list)
+ void irq_work_run(void)
+ {
+ irq_work_run_list(this_cpu_ptr(&raised_list));
+- irq_work_run_list(this_cpu_ptr(&lazy_list));
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ /*
++ * NOTE: we raise softirq via IPI for safety,
++ * and execute in irq_work_tick() to move the
++ * overhead from hard to soft irq context.
++ */
++ if (!llist_empty(this_cpu_ptr(&lazy_list)))
++ raise_softirq(TIMER_SOFTIRQ);
++ } else
++ irq_work_run_list(this_cpu_ptr(&lazy_list));
+ }
+ EXPORT_SYMBOL_GPL(irq_work_run);
+
+@@ -192,8 +226,17 @@ void irq_work_tick(void)
+
+ if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
+ irq_work_run_list(raised);
++
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
++ irq_work_run_list(this_cpu_ptr(&lazy_list));
++}
++
++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT)
++void irq_work_tick_soft(void)
++{
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
+ }
++#endif
+
+ /*
+ * Synchronize against the irq_work @entry, ensures the entry is not
+diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
+index 09d35044bd88..6f7c0f493be3 100644
+--- a/kernel/sched/topology.c
++++ b/kernel/sched/topology.c
+@@ -526,7 +526,8 @@ static int init_rootdomain(struct root_domain *rd)
+ #ifdef HAVE_RT_PUSH_IPI
+ rd->rto_cpu = -1;
+ raw_spin_lock_init(&rd->rto_lock);
+- init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
++// init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
++ rd->rto_push_work = IRQ_WORK_INIT_HARD(rto_push_irq_work_func);
+ #endif
+
+ rd->visit_gen = 0;
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index a0ec4450b1d8..4f7602724f9a 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1757,6 +1757,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
+ {
+ struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
++ irq_work_tick_soft();
++
+ __run_timers(base);
+ if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
+ __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
+--
+2.19.1
+
diff --git a/features/rt/jump-label-disable-if-stop_machine-is-used.patch b/features/rt/jump-label-disable-if-stop_machine-is-used.patch
new file mode 100644
index 00000000..7ed59db6
--- /dev/null
+++ b/features/rt/jump-label-disable-if-stop_machine-is-used.patch
@@ -0,0 +1,41 @@
+From 14fd102dd9a45ac6a7e986617ed27fcdcd6f860f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 8 Jul 2015 17:14:48 +0200
+Subject: [PATCH 169/191] jump-label: disable if stop_machine() is used
+
+Some architectures are using stop_machine() while switching the opcode which
+leads to latency spikes.
+The architectures which use stop_machine() atm:
+- ARM stop machine
+- s390 stop machine
+
+The architecures which use other sorcery:
+- MIPS
+- X86
+- powerpc
+- sparc
+- arm64
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[bigeasy: only ARM for now]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 5179eb3a35d5..fe49c7cb76bd 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -67,7 +67,7 @@ config ARM
+ select HARDIRQS_SW_RESEND
+ select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT
+ select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT
+ select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU
+ select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
+--
+2.19.1
+
diff --git a/features/rt/kconfig-Disable-config-options-which-are-not-RT-comp.patch b/features/rt/kconfig-Disable-config-options-which-are-not-RT-comp.patch
new file mode 100644
index 00000000..3bb7f987
--- /dev/null
+++ b/features/rt/kconfig-Disable-config-options-which-are-not-RT-comp.patch
@@ -0,0 +1,29 @@
+From 6e80f68f3fae004cd7506a5584ed2e5216bacd62 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 24 Jul 2011 12:11:43 +0200
+Subject: [PATCH 093/191] kconfig: Disable config options which are not RT
+ compatible
+
+Disable stuff which is known to have issues on RT
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ mm/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 24c045b24b95..ec06984ec4ac 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -387,7 +387,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+
+ config TRANSPARENT_HUGEPAGE
+ bool "Transparent Hugepage Support"
+- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE
++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT
+ select COMPACTION
+ select XARRAY_MULTI
+ help
+--
+2.19.1
+
diff --git a/features/rt/kcov-Remove-kcov-include-from-sched.h-and-move-it-to.patch b/features/rt/kcov-Remove-kcov-include-from-sched.h-and-move-it-to.patch
new file mode 100644
index 00000000..2cb22e3a
--- /dev/null
+++ b/features/rt/kcov-Remove-kcov-include-from-sched.h-and-move-it-to.patch
@@ -0,0 +1,109 @@
+From 4dcdff69e55d504a126793e1126d5d475b8c40b0 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 18 Feb 2021 18:31:24 +0100
+Subject: [PATCH 034/191] kcov: Remove kcov include from sched.h and move it to
+ its users.
+
+The recent addition of in_serving_softirq() to kconv.h results in
+compile failure on PREEMPT_RT because it requires
+task_struct::softirq_disable_cnt. This is not available if kconv.h is
+included from sched.h.
+
+It is not needed to include kconv.h from sched.h. All but the net/ user
+already include the kconv header file.
+
+Move the include of the kconv.h header from sched.h it its users.
+Additionally include sched.h from kconv.h to ensure that everything
+task_struct related is available.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Johannes Berg <johannes@sipsolutions.net>
+Acked-by: Andrey Konovalov <andreyknvl@google.com>
+Link: https://lkml.kernel.org/r/20210218173124.iy5iyqv3a4oia4vv@linutronix.de
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/usb/usbip/usbip_common.h | 1 +
+ include/linux/kcov.h | 1 +
+ include/linux/sched.h | 1 -
+ net/core/skbuff.c | 1 +
+ net/mac80211/iface.c | 1 +
+ net/mac80211/rx.c | 1 +
+ 6 files changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
+index d60ce17d3dd2..a7dd6c66aee5 100644
+--- a/drivers/usb/usbip/usbip_common.h
++++ b/drivers/usb/usbip/usbip_common.h
+@@ -18,6 +18,7 @@
+ #include <linux/usb.h>
+ #include <linux/wait.h>
+ #include <linux/sched/task.h>
++#include <linux/kcov.h>
+ #include <uapi/linux/usbip.h>
+
+ #undef pr_fmt
+diff --git a/include/linux/kcov.h b/include/linux/kcov.h
+index 4e3037dc1204..55dc338f6bcd 100644
+--- a/include/linux/kcov.h
++++ b/include/linux/kcov.h
+@@ -2,6 +2,7 @@
+ #ifndef _LINUX_KCOV_H
+ #define _LINUX_KCOV_H
+
++#include <linux/sched.h>
+ #include <uapi/linux/kcov.h>
+
+ struct task_struct;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index ef00bb22164c..cf245bc237e7 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -14,7 +14,6 @@
+ #include <linux/pid.h>
+ #include <linux/sem.h>
+ #include <linux/shm.h>
+-#include <linux/kcov.h>
+ #include <linux/mutex.h>
+ #include <linux/plist.h>
+ #include <linux/hrtimer.h>
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c421c8f80925..4275b88726f4 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -60,6 +60,7 @@
+ #include <linux/prefetch.h>
+ #include <linux/if_vlan.h>
+ #include <linux/mpls.h>
++#include <linux/kcov.h>
+
+ #include <net/protocol.h>
+ #include <net/dst.h>
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index b80c9b016b2b..c127debdc12e 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -15,6 +15,7 @@
+ #include <linux/if_arp.h>
+ #include <linux/netdevice.h>
+ #include <linux/rtnetlink.h>
++#include <linux/kcov.h>
+ #include <net/mac80211.h>
+ #include <net/ieee80211_radiotap.h>
+ #include "ieee80211_i.h"
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index c1343c028b76..62047e93e217 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -17,6 +17,7 @@
+ #include <linux/etherdevice.h>
+ #include <linux/rcupdate.h>
+ #include <linux/export.h>
++#include <linux/kcov.h>
+ #include <linux/bitops.h>
+ #include <net/mac80211.h>
+ #include <net/ieee80211_radiotap.h>
+--
+2.19.1
+
diff --git a/features/rt/kernel-sched-add-put-get-_cpu_light.patch b/features/rt/kernel-sched-add-put-get-_cpu_light.patch
new file mode 100644
index 00000000..8cb9d552
--- /dev/null
+++ b/features/rt/kernel-sched-add-put-get-_cpu_light.patch
@@ -0,0 +1,27 @@
+From 7497b8dc12591dd237a63c9748b71666d48a2eb4 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Sat, 27 May 2017 19:02:06 +0200
+Subject: [PATCH 101/191] kernel/sched: add {put|get}_cpu_light()
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/smp.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/include/linux/smp.h b/include/linux/smp.h
+index 70c6f6284dcf..4c602ca3bc13 100644
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -238,6 +238,9 @@ static inline int get_boot_cpu_id(void)
+ #define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
+ #define put_cpu() preempt_enable()
+
++#define get_cpu_light() ({ migrate_disable(); __smp_processor_id(); })
++#define put_cpu_light() migrate_enable()
++
+ /*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+--
+2.19.1
+
diff --git a/features/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/features/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
new file mode 100644
index 00000000..085edde5
--- /dev/null
+++ b/features/rt/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -0,0 +1,80 @@
+From cb9ddd5eec36831a80572aa3d33be62eff791e74 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 21 Nov 2016 19:31:08 +0100
+Subject: [PATCH 121/191] kernel/sched: move stack + kprobe clean up to
+ __put_task_struct()
+
+There is no need to free the stack before the task struct (except for reasons
+mentioned in commit 68f24b08ee89 ("sched/core: Free the stack early if
+CONFIG_THREAD_INFO_IN_TASK")). This also comes handy on -RT because we can't
+free memory in preempt disabled region.
+vfree_atomic() delays the memory cleanup to a worker. Since we move everything
+to the RCU callback, we can also free it immediately.
+
+Cc: stable-rt@vger.kernel.org #for kprobe_flush_task()
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/fork.c | 12 +++++++++++-
+ kernel/sched/core.c | 9 ---------
+ 2 files changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index da1b307cbf73..5fdb0a1bbad8 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -42,6 +42,7 @@
+ #include <linux/mmu_notifier.h>
+ #include <linux/fs.h>
+ #include <linux/mm.h>
++#include <linux/kprobes.h>
+ #include <linux/vmacache.h>
+ #include <linux/nsproxy.h>
+ #include <linux/capability.h>
+@@ -288,7 +289,7 @@ static inline void free_thread_stack(struct task_struct *tsk)
+ return;
+ }
+
+- vfree_atomic(tsk->stack);
++ vfree(tsk->stack);
+ return;
+ }
+ #endif
+@@ -743,6 +744,15 @@ void __put_task_struct(struct task_struct *tsk)
+ WARN_ON(refcount_read(&tsk->usage));
+ WARN_ON(tsk == current);
+
++ /*
++ * Remove function-return probe instances associated with this
++ * task and put them back on the free list.
++ */
++ kprobe_flush_task(tsk);
++
++ /* Task is done with its stack. */
++ put_task_stack(tsk);
++
+ io_uring_free(tsk);
+ cgroup_free(tsk);
+ task_numa_free(tsk, true);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 9ec24e4188f4..2069022bdce5 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4282,15 +4282,6 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+ if (prev->sched_class->task_dead)
+ prev->sched_class->task_dead(prev);
+
+- /*
+- * Remove function-return probe instances associated with this
+- * task and put them back on the free list.
+- */
+- kprobe_flush_task(prev);
+-
+- /* Task is done with its stack. */
+- put_task_stack(prev);
+-
+ put_task_struct_rcu_user(prev);
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/kthread-Move-prio-affinite-change-into-the-newly-cre.patch b/features/rt/kthread-Move-prio-affinite-change-into-the-newly-cre.patch
new file mode 100644
index 00000000..ee7fef7e
--- /dev/null
+++ b/features/rt/kthread-Move-prio-affinite-change-into-the-newly-cre.patch
@@ -0,0 +1,85 @@
+From 278ed17d1f399724a529b1b7c3730b364f136e16 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 9 Nov 2020 21:30:41 +0100
+Subject: [PATCH 003/191] kthread: Move prio/affinite change into the newly
+ created thread
+
+With enabled threaded interrupts the nouveau driver reported the
+following:
+| Chain exists of:
+| &mm->mmap_lock#2 --> &device->mutex --> &cpuset_rwsem
+|
+| Possible unsafe locking scenario:
+|
+| CPU0 CPU1
+| ---- ----
+| lock(&cpuset_rwsem);
+| lock(&device->mutex);
+| lock(&cpuset_rwsem);
+| lock(&mm->mmap_lock#2);
+
+The device->mutex is nvkm_device::mutex.
+
+Unblocking the lockchain at `cpuset_rwsem' is probably the easiest thing
+to do.
+Move the priority reset to the start of the newly created thread.
+
+Fixes: 710da3c8ea7df ("sched/core: Prevent race condition between cpuset and __sched_setscheduler()")
+Reported-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/a23a826af7c108ea5651e73b8fbae5e653f16e86.camel@gmx.de
+---
+ kernel/kthread.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/kthread.c b/kernel/kthread.c
+index 1578973c5740..bb0602597ffd 100644
+--- a/kernel/kthread.c
++++ b/kernel/kthread.c
+@@ -243,6 +243,7 @@ EXPORT_SYMBOL_GPL(kthread_parkme);
+
+ static int kthread(void *_create)
+ {
++ static const struct sched_param param = { .sched_priority = 0 };
+ /* Copy data: it's on kthread's stack */
+ struct kthread_create_info *create = _create;
+ int (*threadfn)(void *data) = create->threadfn;
+@@ -273,6 +274,13 @@ static int kthread(void *_create)
+ init_completion(&self->parked);
+ current->vfork_done = &self->exited;
+
++ /*
++ * The new thread inherited kthreadd's priority and CPU mask. Reset
++ * back to default in case they have been changed.
++ */
++ sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
++ set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_KTHREAD));
++
+ /* OK, tell user we're spawned, wait for stop or wakeup */
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ create->result = current;
+@@ -370,7 +378,6 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
+ }
+ task = create->result;
+ if (!IS_ERR(task)) {
+- static const struct sched_param param = { .sched_priority = 0 };
+ char name[TASK_COMM_LEN];
+
+ /*
+@@ -379,13 +386,6 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
+ */
+ vsnprintf(name, sizeof(name), namefmt, args);
+ set_task_comm(task, name);
+- /*
+- * root may have changed our (kthreadd's) priority or CPU mask.
+- * The kernel thread should not inherit these properties.
+- */
+- sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
+- set_cpus_allowed_ptr(task,
+- housekeeping_cpumask(HK_FLAG_KTHREAD));
+ }
+ kfree(create);
+ return task;
+--
+2.19.1
+
diff --git a/features/rt/leds-trigger-disable-CPU-trigger-on-RT.patch b/features/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
new file mode 100644
index 00000000..c0182d45
--- /dev/null
+++ b/features/rt/leds-trigger-disable-CPU-trigger-on-RT.patch
@@ -0,0 +1,39 @@
+From fc70c36c08df5b41b07e2c8aa2833f78c0b5b090 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 23 Jan 2014 14:45:59 +0100
+Subject: [PATCH 170/191] leds: trigger: disable CPU trigger on -RT
+
+as it triggers:
+|CPU: 0 PID: 0 Comm: swapper Not tainted 3.12.8-rt10 #141
+|[<c0014aa4>] (unwind_backtrace+0x0/0xf8) from [<c0012788>] (show_stack+0x1c/0x20)
+|[<c0012788>] (show_stack+0x1c/0x20) from [<c043c8dc>] (dump_stack+0x20/0x2c)
+|[<c043c8dc>] (dump_stack+0x20/0x2c) from [<c004c5e8>] (__might_sleep+0x13c/0x170)
+|[<c004c5e8>] (__might_sleep+0x13c/0x170) from [<c043f270>] (__rt_spin_lock+0x28/0x38)
+|[<c043f270>] (__rt_spin_lock+0x28/0x38) from [<c043fa00>] (rt_read_lock+0x68/0x7c)
+|[<c043fa00>] (rt_read_lock+0x68/0x7c) from [<c036cf74>] (led_trigger_event+0x2c/0x5c)
+|[<c036cf74>] (led_trigger_event+0x2c/0x5c) from [<c036e0bc>] (ledtrig_cpu+0x54/0x5c)
+|[<c036e0bc>] (ledtrig_cpu+0x54/0x5c) from [<c000ffd8>] (arch_cpu_idle_exit+0x18/0x1c)
+|[<c000ffd8>] (arch_cpu_idle_exit+0x18/0x1c) from [<c00590b8>] (cpu_startup_entry+0xa8/0x234)
+|[<c00590b8>] (cpu_startup_entry+0xa8/0x234) from [<c043b2cc>] (rest_init+0xb8/0xe0)
+|[<c043b2cc>] (rest_init+0xb8/0xe0) from [<c061ebe0>] (start_kernel+0x2c4/0x380)
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/leds/trigger/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
+index b77a01bd27f4..aa74e2a05798 100644
+--- a/drivers/leds/trigger/Kconfig
++++ b/drivers/leds/trigger/Kconfig
+@@ -64,6 +64,7 @@ config LEDS_TRIGGER_BACKLIGHT
+
+ config LEDS_TRIGGER_CPU
+ bool "LED CPU Trigger"
++ depends on !PREEMPT_RT
+ help
+ This allows LEDs to be controlled by active CPUs. This shows
+ the active CPUs across an array of LEDs so you can see which
+--
+2.19.1
+
diff --git a/features/rt/lockdep-Make-it-RT-aware.patch b/features/rt/lockdep-Make-it-RT-aware.patch
new file mode 100644
index 00000000..7b9c216e
--- /dev/null
+++ b/features/rt/lockdep-Make-it-RT-aware.patch
@@ -0,0 +1,77 @@
+From 41fd5cd7a4baa5c51e83bd2637358791447960a7 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 18:51:23 +0200
+Subject: [PATCH 151/191] lockdep: Make it RT aware
+
+teach lockdep that we don't really do softirqs on -RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/irqflags.h | 23 +++++++++++++++--------
+ kernel/locking/lockdep.c | 2 ++
+ 2 files changed, 17 insertions(+), 8 deletions(-)
+
+diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
+index 600c10da321a..4b140938b03e 100644
+--- a/include/linux/irqflags.h
++++ b/include/linux/irqflags.h
+@@ -71,14 +71,6 @@ do { \
+ do { \
+ __this_cpu_dec(hardirq_context); \
+ } while (0)
+-# define lockdep_softirq_enter() \
+-do { \
+- current->softirq_context++; \
+-} while (0)
+-# define lockdep_softirq_exit() \
+-do { \
+- current->softirq_context--; \
+-} while (0)
+
+ # define lockdep_hrtimer_enter(__hrtimer) \
+ ({ \
+@@ -140,6 +132,21 @@ do { \
+ # define lockdep_irq_work_exit(__work) do { } while (0)
+ #endif
+
++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT)
++# define lockdep_softirq_enter() \
++do { \
++ current->softirq_context++; \
++} while (0)
++# define lockdep_softirq_exit() \
++do { \
++ current->softirq_context--; \
++} while (0)
++
++#else
++# define lockdep_softirq_enter() do { } while (0)
++# define lockdep_softirq_exit() do { } while (0)
++#endif
++
+ #if defined(CONFIG_IRQSOFF_TRACER) || \
+ defined(CONFIG_PREEMPT_TRACER)
+ extern void stop_critical_timings(void);
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index ac177ade7d9b..82fd5cacc492 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -5360,6 +5360,7 @@ static noinstr void check_flags(unsigned long flags)
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ /*
+ * We dont accurately track softirq state in e.g.
+ * hardirq contexts (such as on 4KSTACKS), so only
+@@ -5374,6 +5375,7 @@ static noinstr void check_flags(unsigned long flags)
+ DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
+ }
+ }
++#endif
+
+ if (!debug_locks)
+ print_irqtrace_events(current);
+--
+2.19.1
+
diff --git a/features/rt/lockdep-Reduce-header-files-in-debug_locks.h.patch b/features/rt/lockdep-Reduce-header-files-in-debug_locks.h.patch
new file mode 100644
index 00000000..24d5c811
--- /dev/null
+++ b/features/rt/lockdep-Reduce-header-files-in-debug_locks.h.patch
@@ -0,0 +1,32 @@
+From b127ff5cf25163e47794c314b66398cef8fec54f Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 14 Aug 2020 16:55:25 +0200
+Subject: [PATCH 070/191] lockdep: Reduce header files in debug_locks.h
+
+The inclusion of printk.h leads to circular dependency if spinlock_t is
+based on rt_mutex.
+
+Include only atomic.h (xchg()) and cache.h (__read_mostly).
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/debug_locks.h | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
+index 2915f56ad421..5a9e3e3769ce 100644
+--- a/include/linux/debug_locks.h
++++ b/include/linux/debug_locks.h
+@@ -3,8 +3,7 @@
+ #define __LINUX_DEBUG_LOCKING_H
+
+ #include <linux/atomic.h>
+-#include <linux/bug.h>
+-#include <linux/printk.h>
++#include <linux/cache.h>
+
+ struct task_struct;
+
+--
+2.19.1
+
diff --git a/features/rt/lockdep-disable-self-test.patch b/features/rt/lockdep-disable-self-test.patch
new file mode 100644
index 00000000..fae3936a
--- /dev/null
+++ b/features/rt/lockdep-disable-self-test.patch
@@ -0,0 +1,34 @@
+From 87bd0ee2b2c1a55ad355c37f1143996b0c9f654d Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 17 Oct 2017 16:36:18 +0200
+Subject: [PATCH 154/191] lockdep: disable self-test
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The self-test wasn't always 100% accurate for RT. We disabled a few
+tests which failed because they had a different semantic for RT. Some
+still reported false positives. Now the selftest locks up the system
+during boot and it needs to be investigated…
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ lib/Kconfig.debug | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 2779c29d9981..74a0a9e54fdb 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1392,7 +1392,7 @@ config DEBUG_ATOMIC_SLEEP
+
+ config DEBUG_LOCKING_API_SELFTESTS
+ bool "Locking API boot-time self-tests"
+- depends on DEBUG_KERNEL
++ depends on DEBUG_KERNEL && !PREEMPT_RT
+ help
+ Say Y here if you want the kernel to run a short self-test during
+ bootup. The self-test checks whether common types of locking bugs
+--
+2.19.1
+
diff --git a/features/rt/lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch b/features/rt/lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch
new file mode 100644
index 00000000..decdc693
--- /dev/null
+++ b/features/rt/lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch
@@ -0,0 +1,61 @@
+From 976fc02f2b164dce981f5e3c6a5aca9d9aba1a73 Mon Sep 17 00:00:00 2001
+From: Yong Zhang <yong.zhang@windriver.com>
+Date: Mon, 16 Apr 2012 15:01:56 +0800
+Subject: [PATCH 152/191] lockdep: selftest: Only do hardirq context test for
+ raw spinlock
+
+On -rt there is no softirq context any more and rwlock is sleepable,
+disable softirq context test and rwlock+irq test.
+
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Cc: Yong Zhang <yong.zhang@windriver.com>
+Link: http://lkml.kernel.org/r/1334559716-18447-3-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ lib/locking-selftest.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
+index 2d85abac1744..5ff07ae1cc67 100644
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -2841,6 +2841,7 @@ void locking_selftest(void)
+
+ printk(" --------------------------------------------------------------------------\n");
+
++#ifndef CONFIG_PREEMPT_RT
+ /*
+ * irq-context testcases:
+ */
+@@ -2855,6 +2856,28 @@ void locking_selftest(void)
+ DO_TESTCASE_6x2x2RW("irq read-recursion #2", irq_read_recursion2);
+ DO_TESTCASE_6x2x2RW("irq read-recursion #3", irq_read_recursion3);
+
++#else
++ /* On -rt, we only do hardirq context test for raw spinlock */
++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
++
++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
++
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
++
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
++#endif
+ ww_tests();
+
+ force_read_lock_recursive = 0;
+--
+2.19.1
+
diff --git a/features/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/features/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
new file mode 100644
index 00000000..a0354b31
--- /dev/null
+++ b/features/rt/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
@@ -0,0 +1,148 @@
+From d161010c471cc0d0b18c8875379b75194cd124e8 Mon Sep 17 00:00:00 2001
+From: Josh Cartwright <josh.cartwright@ni.com>
+Date: Wed, 28 Jan 2015 13:08:45 -0600
+Subject: [PATCH 153/191] lockdep: selftest: fix warnings due to missing
+ PREEMPT_RT conditionals
+
+"lockdep: Selftest: Only do hardirq context test for raw spinlock"
+disabled the execution of certain tests with PREEMPT_RT, but did
+not prevent the tests from still being defined. This leads to warnings
+like:
+
+ ./linux/lib/locking-selftest.c:574:1: warning: 'irqsafe1_hard_rlock_12' defined but not used [-Wunused-function]
+ ./linux/lib/locking-selftest.c:574:1: warning: 'irqsafe1_hard_rlock_21' defined but not used [-Wunused-function]
+ ./linux/lib/locking-selftest.c:577:1: warning: 'irqsafe1_hard_wlock_12' defined but not used [-Wunused-function]
+ ./linux/lib/locking-selftest.c:577:1: warning: 'irqsafe1_hard_wlock_21' defined but not used [-Wunused-function]
+ ./linux/lib/locking-selftest.c:580:1: warning: 'irqsafe1_soft_spin_12' defined but not used [-Wunused-function]
+ ...
+
+Fixed by wrapping the test definitions in #ifndef CONFIG_PREEMPT_RT
+conditionals.
+
+Signed-off-by: Josh Cartwright <josh.cartwright@ni.com>
+Signed-off-by: Xander Huff <xander.huff@ni.com>
+Acked-by: Gratian Crisan <gratian.crisan@ni.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ lib/locking-selftest.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
+index 5ff07ae1cc67..3d2d99d8ed13 100644
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -794,6 +794,8 @@ GENERATE_TESTCASE(init_held_rtmutex);
+ #include "locking-selftest-spin-hardirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
+
++#ifndef CONFIG_PREEMPT_RT
++
+ #include "locking-selftest-rlock-hardirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
+
+@@ -809,9 +811,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
+
++#endif
++
+ #undef E1
+ #undef E2
+
++#ifndef CONFIG_PREEMPT_RT
+ /*
+ * Enabling hardirqs with a softirq-safe lock held:
+ */
+@@ -844,6 +849,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
+ #undef E1
+ #undef E2
+
++#endif
++
+ /*
+ * Enabling irqs with an irq-safe lock held:
+ */
+@@ -867,6 +874,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
+ #include "locking-selftest-spin-hardirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
+
++#ifndef CONFIG_PREEMPT_RT
++
+ #include "locking-selftest-rlock-hardirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
+
+@@ -882,6 +891,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
+
++#endif
++
+ #undef E1
+ #undef E2
+
+@@ -913,6 +924,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
+ #include "locking-selftest-spin-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
+
++#ifndef CONFIG_PREEMPT_RT
++
+ #include "locking-selftest-rlock-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
+
+@@ -928,6 +941,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
+
++#endif
++
+ #undef E1
+ #undef E2
+ #undef E3
+@@ -961,6 +976,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
+ #include "locking-selftest-spin-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
+
++#ifndef CONFIG_PREEMPT_RT
++
+ #include "locking-selftest-rlock-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
+
+@@ -976,10 +993,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
+
++#endif
++
+ #undef E1
+ #undef E2
+ #undef E3
+
++#ifndef CONFIG_PREEMPT_RT
++
+ /*
+ * read-lock / write-lock irq inversion.
+ *
+@@ -1169,6 +1190,11 @@ GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3_R3W1)
+ #undef E1
+ #undef E2
+ #undef E3
++
++#endif
++
++#ifndef CONFIG_PREEMPT_RT
++
+ /*
+ * read-lock / write-lock recursion that is actually safe.
+ */
+@@ -1215,6 +1241,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
+ #undef E2
+ #undef E3
+
++#endif
++
+ /*
+ * read-lock / write-lock recursion that is unsafe.
+ */
+--
+2.19.1
+
diff --git a/features/rt/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch b/features/rt/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch
new file mode 100644
index 00000000..665a4bb1
--- /dev/null
+++ b/features/rt/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch
@@ -0,0 +1,125 @@
+From 634b7dbcbf43a51f9c149ff014b99cf95ec798d6 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 19 Nov 2019 09:25:04 +0100
+Subject: [PATCH 131/191] locking: Make spinlock_t and rwlock_t a RCU section
+ on RT
+
+On !RT a locked spinlock_t and rwlock_t disables preemption which
+implies a RCU read section. There is code that relies on that behaviour.
+
+Add an explicit RCU read section on RT while a sleeping lock (a lock
+which would disables preemption on !RT) acquired.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 6 ++++++
+ kernel/locking/rwlock-rt.c | 6 ++++++
+ 2 files changed, 12 insertions(+)
+
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 4cd9d6c4cd68..d4da971759fb 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1118,6 +1118,7 @@ void __lockfunc rt_spin_lock(spinlock_t *lock)
+ {
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ rcu_read_lock();
+ migrate_disable();
+ }
+ EXPORT_SYMBOL(rt_spin_lock);
+@@ -1132,6 +1133,7 @@ void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+ {
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ rcu_read_lock();
+ migrate_disable();
+ }
+ EXPORT_SYMBOL(rt_spin_lock_nested);
+@@ -1141,6 +1143,7 @@ void __lockfunc rt_spin_lock_nest_lock(spinlock_t *lock,
+ {
+ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ rcu_read_lock();
+ migrate_disable();
+ }
+ EXPORT_SYMBOL(rt_spin_lock_nest_lock);
+@@ -1151,6 +1154,7 @@ void __lockfunc rt_spin_unlock(spinlock_t *lock)
+ /* NOTE: we always pass in '1' for nested, for simplicity */
+ spin_release(&lock->dep_map, _RET_IP_);
+ migrate_enable();
++ rcu_read_unlock();
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
+ }
+ EXPORT_SYMBOL(rt_spin_unlock);
+@@ -1180,6 +1184,7 @@ int __lockfunc rt_spin_trylock(spinlock_t *lock)
+ ret = __rt_mutex_trylock(&lock->lock);
+ if (ret) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ rcu_read_lock();
+ migrate_disable();
+ }
+ return ret;
+@@ -1194,6 +1199,7 @@ int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
+ ret = __rt_mutex_trylock(&lock->lock);
+ if (ret) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ rcu_read_lock();
+ migrate_disable();
+ } else {
+ local_bh_enable();
+diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c
+index 948d10214ee2..cc8bc2ef4ba2 100644
+--- a/kernel/locking/rwlock-rt.c
++++ b/kernel/locking/rwlock-rt.c
+@@ -270,6 +270,7 @@ int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+ ret = __read_rt_trylock(rwlock);
+ if (ret) {
+ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
++ rcu_read_lock();
+ migrate_disable();
+ }
+ return ret;
+@@ -283,6 +284,7 @@ int __lockfunc rt_write_trylock(rwlock_t *rwlock)
+ ret = __write_rt_trylock(rwlock);
+ if (ret) {
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ rcu_read_lock();
+ migrate_disable();
+ }
+ return ret;
+@@ -293,6 +295,7 @@ void __lockfunc rt_read_lock(rwlock_t *rwlock)
+ {
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __read_rt_lock(rwlock);
++ rcu_read_lock();
+ migrate_disable();
+ }
+ EXPORT_SYMBOL(rt_read_lock);
+@@ -301,6 +304,7 @@ void __lockfunc rt_write_lock(rwlock_t *rwlock)
+ {
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __write_rt_lock(rwlock);
++ rcu_read_lock();
+ migrate_disable();
+ }
+ EXPORT_SYMBOL(rt_write_lock);
+@@ -309,6 +313,7 @@ void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+ {
+ rwlock_release(&rwlock->dep_map, _RET_IP_);
+ migrate_enable();
++ rcu_read_unlock();
+ __read_rt_unlock(rwlock);
+ }
+ EXPORT_SYMBOL(rt_read_unlock);
+@@ -317,6 +322,7 @@ void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+ {
+ rwlock_release(&rwlock->dep_map, _RET_IP_);
+ migrate_enable();
++ rcu_read_unlock();
+ __write_rt_unlock(rwlock);
+ }
+ EXPORT_SYMBOL(rt_write_unlock);
+--
+2.19.1
+
diff --git a/features/rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/features/rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
new file mode 100644
index 00000000..198af701
--- /dev/null
+++ b/features/rt/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
@@ -0,0 +1,165 @@
+From 4c06cc850cb5404dedbdefec937342ce96422992 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 4 Aug 2017 17:40:42 +0200
+Subject: [PATCH 103/191] locking: don't check for __LINUX_SPINLOCK_TYPES_H on
+ -RT archs
+
+Upstream uses arch_spinlock_t within spinlock_t and requests that
+spinlock_types.h header file is included first.
+On -RT we have the rt_mutex with its raw_lock wait_lock which needs
+architectures' spinlock_types.h header file for its definition. However
+we need rt_mutex first because it is used to build the spinlock_t so
+that check does not work for us.
+Therefore I am dropping that check.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/alpha/include/asm/spinlock_types.h | 4 ----
+ arch/arm/include/asm/spinlock_types.h | 4 ----
+ arch/arm64/include/asm/spinlock_types.h | 4 ----
+ arch/hexagon/include/asm/spinlock_types.h | 4 ----
+ arch/ia64/include/asm/spinlock_types.h | 4 ----
+ arch/powerpc/include/asm/spinlock_types.h | 4 ----
+ arch/s390/include/asm/spinlock_types.h | 4 ----
+ arch/sh/include/asm/spinlock_types.h | 4 ----
+ arch/xtensa/include/asm/spinlock_types.h | 4 ----
+ 9 files changed, 36 deletions(-)
+
+diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h
+index 1d5716bc060b..6883bc952d22 100644
+--- a/arch/alpha/include/asm/spinlock_types.h
++++ b/arch/alpha/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef _ALPHA_SPINLOCK_TYPES_H
+ #define _ALPHA_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
+index 5976958647fe..a37c0803954b 100644
+--- a/arch/arm/include/asm/spinlock_types.h
++++ b/arch/arm/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #define TICKET_SHIFT 16
+
+ typedef struct {
+diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
+index 18782f0c4721..6672b05350b4 100644
+--- a/arch/arm64/include/asm/spinlock_types.h
++++ b/arch/arm64/include/asm/spinlock_types.h
+@@ -5,10 +5,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
+-# error "please don't include this file directly"
+-#endif
+-
+ #include <asm-generic/qspinlock_types.h>
+ #include <asm-generic/qrwlock_types.h>
+
+diff --git a/arch/hexagon/include/asm/spinlock_types.h b/arch/hexagon/include/asm/spinlock_types.h
+index 19d233497ba5..de72fb23016d 100644
+--- a/arch/hexagon/include/asm/spinlock_types.h
++++ b/arch/hexagon/include/asm/spinlock_types.h
+@@ -8,10 +8,6 @@
+ #ifndef _ASM_SPINLOCK_TYPES_H
+ #define _ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
+index 6e345fefcdca..681408d6816f 100644
+--- a/arch/ia64/include/asm/spinlock_types.h
++++ b/arch/ia64/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_IA64_SPINLOCK_TYPES_H
+ #define _ASM_IA64_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
+index c5d742f18021..cc6922a011ba 100644
+--- a/arch/powerpc/include/asm/spinlock_types.h
++++ b/arch/powerpc/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
+ #define _ASM_POWERPC_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ #ifdef CONFIG_PPC_QUEUED_SPINLOCKS
+ #include <asm-generic/qspinlock_types.h>
+ #include <asm-generic/qrwlock_types.h>
+diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
+index cfed272e4fd5..8e28e8176ec8 100644
+--- a/arch/s390/include/asm/spinlock_types.h
++++ b/arch/s390/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ int lock;
+ } __attribute__ ((aligned (4))) arch_spinlock_t;
+diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h
+index e82369f286a2..22ca9a98bbb8 100644
+--- a/arch/sh/include/asm/spinlock_types.h
++++ b/arch/sh/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SH_SPINLOCK_TYPES_H
+ #define __ASM_SH_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
+-# error "please don't include this file directly"
+-#endif
+-
+ typedef struct {
+ volatile unsigned int lock;
+ } arch_spinlock_t;
+diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h
+index 64c9389254f1..dc846323b1cd 100644
+--- a/arch/xtensa/include/asm/spinlock_types.h
++++ b/arch/xtensa/include/asm/spinlock_types.h
+@@ -2,10 +2,6 @@
+ #ifndef __ASM_SPINLOCK_TYPES_H
+ #define __ASM_SPINLOCK_TYPES_H
+
+-#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
+-# error "please don't include this file directly"
+-#endif
+-
+ #include <asm-generic/qspinlock_types.h>
+ #include <asm-generic/qrwlock_types.h>
+
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch b/features/rt/locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
new file mode 100644
index 00000000..2e0997d6
--- /dev/null
+++ b/features/rt/locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
@@ -0,0 +1,36 @@
+From 50f8ea4c311b4bc5100595c2c43f02754afa2798 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 2 Dec 2015 11:34:07 +0100
+Subject: [PATCH 076/191] locking/rtmutex: Allow rt_mutex_trylock() on
+ PREEMPT_RT
+
+Non PREEMPT_RT kernel can deadlock on rt_mutex_trylock() in softirq
+context.
+On PREEMPT_RT the softirq context is handled in thread context. This
+avoids the deadlock in the slow path and PI-boosting will be done on the
+correct thread.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 03ffb955b286..f0bc7fcae441 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1866,7 +1866,11 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
+
+ int __sched __rt_mutex_trylock(struct rt_mutex *lock)
+ {
++#ifdef CONFIG_PREEMPT_RT
++ if (WARN_ON_ONCE(in_irq() || in_nmi()))
++#else
+ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
++#endif
+ return 0;
+
+ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-Avoid-include-hell.patch b/features/rt/locking-rtmutex-Avoid-include-hell.patch
new file mode 100644
index 00000000..57b13749
--- /dev/null
+++ b/features/rt/locking-rtmutex-Avoid-include-hell.patch
@@ -0,0 +1,29 @@
+From 1fec4e5db16df775436f8ababd922f46ee58e7e0 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 29 Jun 2011 20:06:39 +0200
+Subject: [PATCH 069/191] locking/rtmutex: Avoid include hell
+
+Include only the required raw types. This avoids pulling in the
+complete spinlock header which in turn requires rtmutex.h at some point.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rtmutex.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 243fabc2c85f..add1dab27df5 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -15,7 +15,7 @@
+
+ #include <linux/linkage.h>
+ #include <linux/rbtree.h>
+-#include <linux/spinlock_types.h>
++#include <linux/spinlock_types_raw.h>
+
+ extern int max_lock_depth; /* for sysctl */
+
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-Handle-the-various-new-futex-race-co.patch b/features/rt/locking-rtmutex-Handle-the-various-new-futex-race-co.patch
new file mode 100644
index 00000000..12a83eab
--- /dev/null
+++ b/features/rt/locking-rtmutex-Handle-the-various-new-futex-race-co.patch
@@ -0,0 +1,255 @@
+From d8b0c4e14162b18ee6a955f443a6a1b3fa8473e1 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 10 Jun 2011 11:04:15 +0200
+Subject: [PATCH 065/191] locking/rtmutex: Handle the various new futex race
+ conditions
+
+RT opens a few new interesting race conditions in the rtmutex/futex
+combo due to futex hash bucket lock being a 'sleeping' spinlock and
+therefor not disabling preemption.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/futex.c | 78 ++++++++++++++++++++++++++-------
+ kernel/locking/rtmutex.c | 36 ++++++++++++---
+ kernel/locking/rtmutex_common.h | 2 +
+ 3 files changed, 95 insertions(+), 21 deletions(-)
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index e68db7745039..0315333c0587 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2154,6 +2154,16 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ */
+ requeue_pi_wake_futex(this, &key2, hb2);
+ continue;
++ } else if (ret == -EAGAIN) {
++ /*
++ * Waiter was woken by timeout or
++ * signal and has set pi_blocked_on to
++ * PI_WAKEUP_INPROGRESS before we
++ * tried to enqueue it on the rtmutex.
++ */
++ this->pi_state = NULL;
++ put_pi_state(pi_state);
++ continue;
+ } else if (ret) {
+ /*
+ * rt_mutex_start_proxy_lock() detected a
+@@ -3171,7 +3181,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ {
+ struct hrtimer_sleeper timeout, *to;
+ struct rt_mutex_waiter rt_waiter;
+- struct futex_hash_bucket *hb;
++ struct futex_hash_bucket *hb, *hb2;
+ union futex_key key2 = FUTEX_KEY_INIT;
+ struct futex_q q = futex_q_init;
+ int res, ret;
+@@ -3223,20 +3233,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ futex_wait_queue_me(hb, &q, to);
+
+- spin_lock(&hb->lock);
+- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+- spin_unlock(&hb->lock);
+- if (ret)
+- goto out;
++ /*
++ * On RT we must avoid races with requeue and trying to block
++ * on two mutexes (hb->lock and uaddr2's rtmutex) by
++ * serializing access to pi_blocked_on with pi_lock.
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ if (current->pi_blocked_on) {
++ /*
++ * We have been requeued or are in the process of
++ * being requeued.
++ */
++ raw_spin_unlock_irq(&current->pi_lock);
++ } else {
++ /*
++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
++ * prevents a concurrent requeue from moving us to the
++ * uaddr2 rtmutex. After that we can safely acquire
++ * (and possibly block on) hb->lock.
++ */
++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ spin_lock(&hb->lock);
++
++ /*
++ * Clean up pi_blocked_on. We might leak it otherwise
++ * when we succeeded with the hb->lock in the fast
++ * path.
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ current->pi_blocked_on = NULL;
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
++ spin_unlock(&hb->lock);
++ if (ret)
++ goto out;
++ }
+
+ /*
+- * In order for us to be here, we know our q.key == key2, and since
+- * we took the hb->lock above, we also know that futex_requeue() has
+- * completed and we no longer have to concern ourselves with a wakeup
+- * race with the atomic proxy lock acquisition by the requeue code. The
+- * futex_requeue dropped our key1 reference and incremented our key2
+- * reference count.
++ * In order to be here, we have either been requeued, are in
++ * the process of being requeued, or requeue successfully
++ * acquired uaddr2 on our behalf. If pi_blocked_on was
++ * non-null above, we may be racing with a requeue. Do not
++ * rely on q->lock_ptr to be hb2->lock until after blocking on
++ * hb->lock or hb2->lock. The futex_requeue dropped our key1
++ * reference and incremented our key2 reference count.
+ */
++ hb2 = hash_futex(&key2);
+
+ /* Check if the requeue code acquired the second futex for us. */
+ if (!q.rt_waiter) {
+@@ -3245,14 +3290,16 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * did a lock-steal - fix up the PI-state in that case.
+ */
+ if (q.pi_state && (q.pi_state->owner != current)) {
+- spin_lock(q.lock_ptr);
++ spin_lock(&hb2->lock);
++ BUG_ON(&hb2->lock != q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
+ /*
+ * Drop the reference to the pi state which
+ * the requeue_pi() code acquired for us.
+ */
+ put_pi_state(q.pi_state);
+- spin_unlock(q.lock_ptr);
++ spin_unlock(&hb2->lock);
++
+ /*
+ * Adjust the return value. It's either -EFAULT or
+ * success (1) but the caller expects 0 for success.
+@@ -3271,7 +3318,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ pi_mutex = &q.pi_state->pi_mutex;
+ ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
+
+- spin_lock(q.lock_ptr);
++ spin_lock(&hb2->lock);
++ BUG_ON(&hb2->lock != q.lock_ptr);
+ if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
+ ret = 0;
+
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index c9a37b6d6ea0..7fff3b88b96b 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -136,6 +136,11 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
+ }
+
++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
++{
++ return waiter && waiter != PI_WAKEUP_INPROGRESS;
++}
++
+ /*
+ * We can speed up the acquire/release, if there's no debugging state to be
+ * set up.
+@@ -360,7 +365,8 @@ int max_lock_depth = 1024;
+
+ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+ {
+- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
++ return rt_mutex_real_waiter(p->pi_blocked_on) ?
++ p->pi_blocked_on->lock : NULL;
+ }
+
+ /*
+@@ -496,7 +502,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * reached or the state of the chain has changed while we
+ * dropped the locks.
+ */
+- if (!waiter)
++ if (!rt_mutex_real_waiter(waiter))
+ goto out_unlock_pi;
+
+ /*
+@@ -929,6 +935,22 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ return -EDEADLK;
+
+ raw_spin_lock(&task->pi_lock);
++ /*
++ * In the case of futex requeue PI, this will be a proxy
++ * lock. The task will wake unaware that it is enqueueed on
++ * this lock. Avoid blocking on two locks and corrupting
++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
++ * flag. futex_wait_requeue_pi() sets this when it wakes up
++ * before requeue (due to a signal or timeout). Do not enqueue
++ * the task if PI_WAKEUP_INPROGRESS is set.
++ */
++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
++ raw_spin_unlock(&task->pi_lock);
++ return -EAGAIN;
++ }
++
++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
++
+ waiter->task = task;
+ waiter->lock = lock;
+ waiter->prio = task->prio;
+@@ -952,7 +974,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ rt_mutex_enqueue_pi(owner, waiter);
+
+ rt_mutex_adjust_prio(owner);
+- if (owner->pi_blocked_on)
++ if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ chain_walk = 1;
+ } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
+ chain_walk = 1;
+@@ -1048,7 +1070,7 @@ static void remove_waiter(struct rt_mutex *lock,
+ {
+ bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
+ struct task_struct *owner = rt_mutex_owner(lock);
+- struct rt_mutex *next_lock;
++ struct rt_mutex *next_lock = NULL;
+
+ lockdep_assert_held(&lock->wait_lock);
+
+@@ -1074,7 +1096,8 @@ static void remove_waiter(struct rt_mutex *lock,
+ rt_mutex_adjust_prio(owner);
+
+ /* Store the lock on which owner is blocked or NULL */
+- next_lock = task_blocked_on_lock(owner);
++ if (rt_mutex_real_waiter(owner->pi_blocked_on))
++ next_lock = task_blocked_on_lock(owner);
+
+ raw_spin_unlock(&owner->pi_lock);
+
+@@ -1110,7 +1133,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+- if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
++ if (!rt_mutex_real_waiter(waiter) ||
++ rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index b1455dc2366f..096b16cfb096 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -125,6 +125,8 @@ enum rtmutex_chainwalk {
+ /*
+ * PI-futex support (proxy locking functions, etc.):
+ */
++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
++
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-Make-lock_killable-work.patch b/features/rt/locking-rtmutex-Make-lock_killable-work.patch
new file mode 100644
index 00000000..f68466c3
--- /dev/null
+++ b/features/rt/locking-rtmutex-Make-lock_killable-work.patch
@@ -0,0 +1,49 @@
+From fee27667d6f81e0cd5a7e9ca9fe1422c613775d0 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 1 Apr 2017 12:50:59 +0200
+Subject: [PATCH 067/191] locking/rtmutex: Make lock_killable work
+
+Locking an rt mutex killable does not work because signal handling is
+restricted to TASK_INTERRUPTIBLE.
+
+Use signal_pending_state() unconditionally.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 19 +++++++------------
+ 1 file changed, 7 insertions(+), 12 deletions(-)
+
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 4d2a57e8dcb9..b346dbc37d6d 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1179,18 +1179,13 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ if (try_to_take_rt_mutex(lock, current, waiter))
+ break;
+
+- /*
+- * TASK_INTERRUPTIBLE checks for signals and
+- * timeout. Ignored otherwise.
+- */
+- if (likely(state == TASK_INTERRUPTIBLE)) {
+- /* Signal pending? */
+- if (signal_pending(current))
+- ret = -EINTR;
+- if (timeout && !timeout->task)
+- ret = -ETIMEDOUT;
+- if (ret)
+- break;
++ if (timeout && !timeout->task) {
++ ret = -ETIMEDOUT;
++ break;
++ }
++ if (signal_pending_state(state, current)) {
++ ret = -EINTR;
++ break;
+ }
+
+ raw_spin_unlock_irq(&lock->wait_lock);
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch b/features/rt/locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch
new file mode 100644
index 00000000..61fe6760
--- /dev/null
+++ b/features/rt/locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch
@@ -0,0 +1,59 @@
+From d3f167aba99034929675a15535bcbd7deccf97bc Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 29 Sep 2020 16:32:49 +0200
+Subject: [PATCH 063/191] locking/rtmutex: Move rt_mutex_init() outside of
+ CONFIG_DEBUG_RT_MUTEXES
+
+rt_mutex_init() only initializes lockdep if CONFIG_DEBUG_RT_MUTEXES is
+enabled. The static initializer (DEFINE_RT_MUTEX) does not have such a
+restriction.
+
+Move rt_mutex_init() outside of CONFIG_DEBUG_RT_MUTEXES.
+Move the remaining functions in this CONFIG_DEBUG_RT_MUTEXES block to
+the upper block.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 12 +++---------
+ 1 file changed, 3 insertions(+), 9 deletions(-)
+
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 88a0ba806066..2dc10b582d4a 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -43,6 +43,7 @@ struct hrtimer_sleeper;
+ extern int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len);
+ extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
++ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+ #else
+ static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len)
+@@ -50,22 +51,15 @@ struct hrtimer_sleeper;
+ return 0;
+ }
+ # define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
++# define rt_mutex_debug_task_free(t) do { } while (0)
+ #endif
+
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+-
+-# define rt_mutex_init(mutex) \
++#define rt_mutex_init(mutex) \
+ do { \
+ static struct lock_class_key __key; \
+ __rt_mutex_init(mutex, __func__, &__key); \
+ } while (0)
+
+- extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+-#else
+-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL)
+-# define rt_mutex_debug_task_free(t) do { } while (0)
+-#endif
+-
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+ , .dep_map = { .name = #mutexname }
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch b/features/rt/locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
new file mode 100644
index 00000000..3d8c655e
--- /dev/null
+++ b/features/rt/locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
@@ -0,0 +1,144 @@
+From e5c1aab430c174bc70abbda6936ea83bd7eb4db6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 12 Oct 2017 16:14:22 +0200
+Subject: [PATCH 072/191] locking/rtmutex: Provide rt_mutex_slowlock_locked()
+
+This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 67 +++++++++++++++++++--------------
+ kernel/locking/rtmutex_common.h | 7 ++++
+ 2 files changed, 45 insertions(+), 29 deletions(-)
+
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index b346dbc37d6d..670c4a577322 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1216,35 +1216,16 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ }
+ }
+
+-/*
+- * Slow path lock function:
+- */
+-static int __sched
+-rt_mutex_slowlock(struct rt_mutex *lock, int state,
+- struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk)
++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
++ struct hrtimer_sleeper *timeout,
++ enum rtmutex_chainwalk chwalk,
++ struct rt_mutex_waiter *waiter)
+ {
+- struct rt_mutex_waiter waiter;
+- unsigned long flags;
+- int ret = 0;
+-
+- rt_mutex_init_waiter(&waiter);
+-
+- /*
+- * Technically we could use raw_spin_[un]lock_irq() here, but this can
+- * be called in early boot if the cmpxchg() fast path is disabled
+- * (debug, no architecture support). In this case we will acquire the
+- * rtmutex with lock->wait_lock held. But we cannot unconditionally
+- * enable interrupts in that early boot case. So we need to use the
+- * irqsave/restore variants.
+- */
+- raw_spin_lock_irqsave(&lock->wait_lock, flags);
++ int ret;
+
+ /* Try to acquire the lock again: */
+- if (try_to_take_rt_mutex(lock, current, NULL)) {
+- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++ if (try_to_take_rt_mutex(lock, current, NULL))
+ return 0;
+- }
+
+ set_current_state(state);
+
+@@ -1252,16 +1233,16 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ if (unlikely(timeout))
+ hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
+
+- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
++ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
+
+ if (likely(!ret))
+ /* sleep on the mutex */
+- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
++ ret = __rt_mutex_slowlock(lock, state, timeout, waiter);
+
+ if (unlikely(ret)) {
+ __set_current_state(TASK_RUNNING);
+- remove_waiter(lock, &waiter);
+- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
++ remove_waiter(lock, waiter);
++ rt_mutex_handle_deadlock(ret, chwalk, waiter);
+ }
+
+ /*
+@@ -1269,6 +1250,34 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ * unconditionally. We might have to fix that up.
+ */
+ fixup_rt_mutex_waiters(lock);
++ return ret;
++}
++
++/*
++ * Slow path lock function:
++ */
++static int __sched
++rt_mutex_slowlock(struct rt_mutex *lock, int state,
++ struct hrtimer_sleeper *timeout,
++ enum rtmutex_chainwalk chwalk)
++{
++ struct rt_mutex_waiter waiter;
++ unsigned long flags;
++ int ret = 0;
++
++ rt_mutex_init_waiter(&waiter);
++
++ /*
++ * Technically we could use raw_spin_[un]lock_irq() here, but this can
++ * be called in early boot if the cmpxchg() fast path is disabled
++ * (debug, no architecture support). In this case we will acquire the
++ * rtmutex with lock->wait_lock held. But we cannot unconditionally
++ * enable interrupts in that early boot case. So we need to use the
++ * irqsave/restore variants.
++ */
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++
++ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, &waiter);
+
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index 37cd6b3bf6f4..b5a2affa59d5 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -15,6 +15,7 @@
+
+ #include <linux/rtmutex.h>
+ #include <linux/sched/wake_q.h>
++#include <linux/sched/debug.h>
+
+ /*
+ * This is the control structure for tasks blocked on a rt_mutex,
+@@ -153,6 +154,12 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
+ struct wake_q_head *wqh);
+
+ extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
++/* RW semaphore special interface */
++
++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
++ struct hrtimer_sleeper *timeout,
++ enum rtmutex_chainwalk chwalk,
++ struct rt_mutex_waiter *waiter);
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # include "rtmutex-debug.h"
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-Remove-cruft.patch b/features/rt/locking-rtmutex-Remove-cruft.patch
new file mode 100644
index 00000000..e2db5d4c
--- /dev/null
+++ b/features/rt/locking-rtmutex-Remove-cruft.patch
@@ -0,0 +1,98 @@
+From e859f18657493031c5e83651fea4298b1e5dd97b Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 29 Sep 2020 15:21:17 +0200
+Subject: [PATCH 061/191] locking/rtmutex: Remove cruft
+
+Most of this is around since the very beginning. I'm not sure if this
+was used while the rtmutex-deadlock-tester was around but today it seems
+to only waste memory:
+- save_state: No users
+- name: Assigned and printed if a dead lock was detected. I'm keeping it
+ but want to point out that lockdep has the same information.
+- file + line: Printed if ::name was NULL. This is only used for
+ in-kernel locks so it ::name shouldn't be NULL and then ::file and
+ ::line isn't used.
+- magic: Assigned to NULL by rt_mutex_destroy().
+
+Remove members of rt_mutex which are not used.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 7 ++-----
+ kernel/locking/rtmutex-debug.c | 7 +------
+ kernel/locking/rtmutex.c | 3 ---
+ kernel/locking/rtmutex_common.h | 1 -
+ 4 files changed, 3 insertions(+), 15 deletions(-)
+
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 6fd615a0eea9..16f974a22f51 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -32,10 +32,7 @@ struct rt_mutex {
+ struct rb_root_cached waiters;
+ struct task_struct *owner;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+- int save_state;
+- const char *name, *file;
+- int line;
+- void *magic;
++ const char *name;
+ #endif
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+@@ -60,7 +57,7 @@ struct hrtimer_sleeper;
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+- , .name = #mutexname, .file = __FILE__, .line = __LINE__
++ , .name = #mutexname
+
+ # define rt_mutex_init(mutex) \
+ do { \
+diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
+index 36e69100e8e0..7e411b946d4c 100644
+--- a/kernel/locking/rtmutex-debug.c
++++ b/kernel/locking/rtmutex-debug.c
+@@ -42,12 +42,7 @@ static void printk_task(struct task_struct *p)
+
+ static void printk_lock(struct rt_mutex *lock, int print_owner)
+ {
+- if (lock->name)
+- printk(" [%p] {%s}\n",
+- lock, lock->name);
+- else
+- printk(" [%p] {%s:%d}\n",
+- lock, lock->file, lock->line);
++ printk(" [%p] {%s}\n", lock, lock->name);
+
+ if (print_owner && rt_mutex_owner(lock)) {
+ printk(".. ->owner: %p\n", lock->owner);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 48fff6437901..170e160fc0b5 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1640,9 +1640,6 @@ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
+ void rt_mutex_destroy(struct rt_mutex *lock)
+ {
+ WARN_ON(rt_mutex_is_locked(lock));
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+- lock->magic = NULL;
+-#endif
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index ca6fb489007b..e6913103d7ff 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -30,7 +30,6 @@ struct rt_mutex_waiter {
+ struct task_struct *task;
+ struct rt_mutex *lock;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+- unsigned long ip;
+ struct pid *deadlock_task_pid;
+ struct rt_mutex *deadlock_lock;
+ #endif
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-Remove-output-from-deadlock-detector.patch b/features/rt/locking-rtmutex-Remove-output-from-deadlock-detector.patch
new file mode 100644
index 00000000..fbcd6d07
--- /dev/null
+++ b/features/rt/locking-rtmutex-Remove-output-from-deadlock-detector.patch
@@ -0,0 +1,311 @@
+From 485d93ce4bd0dfae3a0324cc8b0257ae3de02d01 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 29 Sep 2020 16:05:11 +0200
+Subject: [PATCH 062/191] locking/rtmutex: Remove output from deadlock
+ detector.
+
+In commit
+ f5694788ad8da ("rt_mutex: Add lockdep annotations")
+
+rtmutex gained lockdep annotation for rt_mutex_lock() and and related
+functions.
+lockdep will see the locking order and may complain about a deadlock
+before rtmutex' own mechanism gets a chance to detect it.
+The rtmutex deadlock detector will only complain locks with the
+RT_MUTEX_MIN_CHAINWALK and a waiter must be pending. That means it
+works only for in-kernel locks because the futex interface always uses
+RT_MUTEX_FULL_CHAINWALK.
+The requirement for an active waiter limits the detector to actual
+deadlocks and makes it possible to report potential deadlocks like
+lockdep does.
+It looks like lockdep is better suited for reporting deadlocks.
+
+Remove rtmutex' debug print on deadlock detection.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 7 ---
+ kernel/locking/rtmutex-debug.c | 97 ---------------------------------
+ kernel/locking/rtmutex-debug.h | 11 ----
+ kernel/locking/rtmutex.c | 9 ---
+ kernel/locking/rtmutex.h | 7 ---
+ kernel/locking/rtmutex_common.h | 4 --
+ 6 files changed, 135 deletions(-)
+
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 16f974a22f51..88a0ba806066 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -31,9 +31,6 @@ struct rt_mutex {
+ raw_spinlock_t wait_lock;
+ struct rb_root_cached waiters;
+ struct task_struct *owner;
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+- const char *name;
+-#endif
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ #endif
+@@ -56,8 +53,6 @@ struct hrtimer_sleeper;
+ #endif
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+-# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+- , .name = #mutexname
+
+ # define rt_mutex_init(mutex) \
+ do { \
+@@ -67,7 +62,6 @@ do { \
+
+ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+ #else
+-# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+ # define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL)
+ # define rt_mutex_debug_task_free(t) do { } while (0)
+ #endif
+@@ -83,7 +77,6 @@ do { \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .waiters = RB_ROOT_CACHED \
+ , .owner = NULL \
+- __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
+
+ #define DEFINE_RT_MUTEX(mutexname) \
+diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
+index 7e411b946d4c..fb150100335f 100644
+--- a/kernel/locking/rtmutex-debug.c
++++ b/kernel/locking/rtmutex-debug.c
+@@ -32,105 +32,12 @@
+
+ #include "rtmutex_common.h"
+
+-static void printk_task(struct task_struct *p)
+-{
+- if (p)
+- printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio);
+- else
+- printk("<none>");
+-}
+-
+-static void printk_lock(struct rt_mutex *lock, int print_owner)
+-{
+- printk(" [%p] {%s}\n", lock, lock->name);
+-
+- if (print_owner && rt_mutex_owner(lock)) {
+- printk(".. ->owner: %p\n", lock->owner);
+- printk(".. held by: ");
+- printk_task(rt_mutex_owner(lock));
+- printk("\n");
+- }
+-}
+-
+ void rt_mutex_debug_task_free(struct task_struct *task)
+ {
+ DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
+ DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
+ }
+
+-/*
+- * We fill out the fields in the waiter to store the information about
+- * the deadlock. We print when we return. act_waiter can be NULL in
+- * case of a remove waiter operation.
+- */
+-void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
+- struct rt_mutex_waiter *act_waiter,
+- struct rt_mutex *lock)
+-{
+- struct task_struct *task;
+-
+- if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter)
+- return;
+-
+- task = rt_mutex_owner(act_waiter->lock);
+- if (task && task != current) {
+- act_waiter->deadlock_task_pid = get_pid(task_pid(task));
+- act_waiter->deadlock_lock = lock;
+- }
+-}
+-
+-void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
+-{
+- struct task_struct *task;
+-
+- if (!waiter->deadlock_lock || !debug_locks)
+- return;
+-
+- rcu_read_lock();
+- task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID);
+- if (!task) {
+- rcu_read_unlock();
+- return;
+- }
+-
+- if (!debug_locks_off()) {
+- rcu_read_unlock();
+- return;
+- }
+-
+- pr_warn("\n");
+- pr_warn("============================================\n");
+- pr_warn("WARNING: circular locking deadlock detected!\n");
+- pr_warn("%s\n", print_tainted());
+- pr_warn("--------------------------------------------\n");
+- printk("%s/%d is deadlocking current task %s/%d\n\n",
+- task->comm, task_pid_nr(task),
+- current->comm, task_pid_nr(current));
+-
+- printk("\n1) %s/%d is trying to acquire this lock:\n",
+- current->comm, task_pid_nr(current));
+- printk_lock(waiter->lock, 1);
+-
+- printk("\n2) %s/%d is blocked on this lock:\n",
+- task->comm, task_pid_nr(task));
+- printk_lock(waiter->deadlock_lock, 1);
+-
+- debug_show_held_locks(current);
+- debug_show_held_locks(task);
+-
+- printk("\n%s/%d's [blocked] stackdump:\n\n",
+- task->comm, task_pid_nr(task));
+- show_stack(task, NULL, KERN_DEFAULT);
+- printk("\n%s/%d's [current] stackdump:\n\n",
+- current->comm, task_pid_nr(current));
+- dump_stack();
+- debug_show_all_locks();
+- rcu_read_unlock();
+-
+- printk("[ turning off deadlock detection."
+- "Please report this trace. ]\n\n");
+-}
+-
+ void debug_rt_mutex_lock(struct rt_mutex *lock)
+ {
+ }
+@@ -153,12 +60,10 @@ void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
+ void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
+ {
+ memset(waiter, 0x11, sizeof(*waiter));
+- waiter->deadlock_task_pid = NULL;
+ }
+
+ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
+ {
+- put_pid(waiter->deadlock_task_pid);
+ memset(waiter, 0x22, sizeof(*waiter));
+ }
+
+@@ -168,10 +73,8 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_cl
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+- lock->name = name;
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+ #endif
+ }
+-
+diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
+index fc549713bba3..659e93e256c6 100644
+--- a/kernel/locking/rtmutex-debug.h
++++ b/kernel/locking/rtmutex-debug.h
+@@ -18,20 +18,9 @@ extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
+ extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
+ struct task_struct *powner);
+ extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
+-extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
+- struct rt_mutex_waiter *waiter,
+- struct rt_mutex *lock);
+-extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
+-# define debug_rt_mutex_reset_waiter(w) \
+- do { (w)->deadlock_lock = NULL; } while (0)
+
+ static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
+ enum rtmutex_chainwalk walk)
+ {
+ return (waiter != NULL);
+ }
+-
+-static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+-{
+- debug_rt_mutex_print_deadlock(w);
+-}
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 170e160fc0b5..5e04d7a247d9 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -579,7 +579,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * walk, we detected a deadlock.
+ */
+ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+- debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
+ raw_spin_unlock(&lock->wait_lock);
+ ret = -EDEADLK;
+ goto out_unlock_pi;
+@@ -1171,8 +1170,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+- debug_rt_mutex_print_deadlock(waiter);
+-
+ schedule();
+
+ raw_spin_lock_irq(&lock->wait_lock);
+@@ -1193,10 +1190,6 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ if (res != -EDEADLOCK || detect_deadlock)
+ return;
+
+- /*
+- * Yell lowdly and stop the task right here.
+- */
+- rt_mutex_print_deadlock(w);
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+@@ -1750,8 +1743,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ ret = 0;
+ }
+
+- debug_rt_mutex_print_deadlock(waiter);
+-
+ return ret;
+ }
+
+diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
+index 732f96abf462..338ccd29119a 100644
+--- a/kernel/locking/rtmutex.h
++++ b/kernel/locking/rtmutex.h
+@@ -19,15 +19,8 @@
+ #define debug_rt_mutex_proxy_unlock(l) do { } while (0)
+ #define debug_rt_mutex_unlock(l) do { } while (0)
+ #define debug_rt_mutex_init(m, n, k) do { } while (0)
+-#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
+-#define debug_rt_mutex_print_deadlock(w) do { } while (0)
+ #define debug_rt_mutex_reset_waiter(w) do { } while (0)
+
+-static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+-{
+- WARN(1, "rtmutex deadlock detected\n");
+-}
+-
+ static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *w,
+ enum rtmutex_chainwalk walk)
+ {
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index e6913103d7ff..b1455dc2366f 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -29,10 +29,6 @@ struct rt_mutex_waiter {
+ struct rb_node pi_tree_entry;
+ struct task_struct *task;
+ struct rt_mutex *lock;
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+- struct pid *deadlock_task_pid;
+- struct rt_mutex *deadlock_lock;
+-#endif
+ int prio;
+ u64 deadline;
+ };
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-Remove-rt_mutex_timed_lock.patch b/features/rt/locking-rtmutex-Remove-rt_mutex_timed_lock.patch
new file mode 100644
index 00000000..33f0f4da
--- /dev/null
+++ b/features/rt/locking-rtmutex-Remove-rt_mutex_timed_lock.patch
@@ -0,0 +1,97 @@
+From 0f2d138c83afbe1bb580d0d1ab2e50418bdb7e6e Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 7 Oct 2020 12:11:33 +0200
+Subject: [PATCH 064/191] locking/rtmutex: Remove rt_mutex_timed_lock()
+
+rt_mutex_timed_lock() has no callers since commit
+ c051b21f71d1f ("rtmutex: Confine deadlock logic to futex")
+
+Remove rt_mutex_timed_lock().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 3 ---
+ kernel/locking/rtmutex.c | 46 ----------------------------------------
+ 2 files changed, 49 deletions(-)
+
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index 2dc10b582d4a..243fabc2c85f 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -99,9 +99,6 @@ extern void rt_mutex_lock(struct rt_mutex *lock);
+ #endif
+
+ extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
+-extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+- struct hrtimer_sleeper *timeout);
+-
+ extern int rt_mutex_trylock(struct rt_mutex *lock);
+
+ extern void rt_mutex_unlock(struct rt_mutex *lock);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 5e04d7a247d9..c9a37b6d6ea0 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1387,21 +1387,6 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
+ }
+
+-static inline int
+-rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
+- struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk,
+- int (*slowfn)(struct rt_mutex *lock, int state,
+- struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk))
+-{
+- if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
+- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+- return 0;
+-
+- return slowfn(lock, state, timeout, chwalk);
+-}
+-
+ static inline int
+ rt_mutex_fasttrylock(struct rt_mutex *lock,
+ int (*slowfn)(struct rt_mutex *lock))
+@@ -1509,37 +1494,6 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
+ return __rt_mutex_slowtrylock(lock);
+ }
+
+-/**
+- * rt_mutex_timed_lock - lock a rt_mutex interruptible
+- * the timeout structure is provided
+- * by the caller
+- *
+- * @lock: the rt_mutex to be locked
+- * @timeout: timeout structure or NULL (no timeout)
+- *
+- * Returns:
+- * 0 on success
+- * -EINTR when interrupted by a signal
+- * -ETIMEDOUT when the timeout expired
+- */
+-int
+-rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
+-{
+- int ret;
+-
+- might_sleep();
+-
+- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+- ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+- RT_MUTEX_MIN_CHAINWALK,
+- rt_mutex_slowlock);
+- if (ret)
+- mutex_release(&lock->dep_map, _RET_IP_);
+-
+- return ret;
+-}
+-EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+-
+ /**
+ * rt_mutex_trylock - try to lock a rt_mutex
+ *
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-Use-custom-scheduling-function-for-s.patch b/features/rt/locking-rtmutex-Use-custom-scheduling-function-for-s.patch
new file mode 100644
index 00000000..3ba23b2f
--- /dev/null
+++ b/features/rt/locking-rtmutex-Use-custom-scheduling-function-for-s.patch
@@ -0,0 +1,242 @@
+From 7e9de9093f6ab9c69a00c038c12e21054214f301 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 6 Oct 2020 13:07:17 +0200
+Subject: [PATCH 082/191] locking/rtmutex: Use custom scheduling function for
+ spin-schedule()
+
+PREEMPT_RT builds the rwsem, mutex, spinlock and rwlock typed locks on
+top of a rtmutex lock. While blocked task->pi_blocked_on is set
+(tsk_is_pi_blocked()) and task needs to schedule away while waiting.
+
+The schedule process must distinguish between blocking on a regular
+sleeping lock (rwsem and mutex) and a RT-only sleeping lock (spinlock
+and rwlock):
+- rwsem and mutex must flush block requests (blk_schedule_flush_plug())
+ even if blocked on a lock. This can not deadlock because this also
+ happens for non-RT.
+ There should be a warning if the scheduling point is within a RCU read
+ section.
+
+- spinlock and rwlock must not flush block requests. This will deadlock
+ if the callback attempts to acquire a lock which is already acquired.
+ Similarly to being preempted, there should be no warning if the
+ scheduling point is within a RCU read section.
+
+Add preempt_schedule_lock() which is invoked if scheduling is required
+while blocking on a PREEMPT_RT-only sleeping lock.
+Remove tsk_is_pi_blocked() from the scheduler path which is no longer
+needed with the additional scheduler entry point.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/include/asm/preempt.h | 3 +++
+ arch/x86/include/asm/preempt.h | 3 +++
+ include/asm-generic/preempt.h | 3 +++
+ include/linux/sched/rt.h | 8 --------
+ kernel/locking/rtmutex.c | 2 +-
+ kernel/locking/rwlock-rt.c | 2 +-
+ kernel/sched/core.c | 32 +++++++++++++++++++++-----------
+ 7 files changed, 32 insertions(+), 21 deletions(-)
+
+diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
+index 80e946b2abee..f06a23898540 100644
+--- a/arch/arm64/include/asm/preempt.h
++++ b/arch/arm64/include/asm/preempt.h
+@@ -81,6 +81,9 @@ static inline bool should_resched(int preempt_offset)
+
+ #ifdef CONFIG_PREEMPTION
+ void preempt_schedule(void);
++#ifdef CONFIG_PREEMPT_RT
++void preempt_schedule_lock(void);
++#endif
+ #define __preempt_schedule() preempt_schedule()
+ void preempt_schedule_notrace(void);
+ #define __preempt_schedule_notrace() preempt_schedule_notrace()
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index f8cb8af4de5c..7e0358f99d22 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -104,6 +104,9 @@ static __always_inline bool should_resched(int preempt_offset)
+ }
+
+ #ifdef CONFIG_PREEMPTION
++#ifdef CONFIG_PREEMPT_RT
++ extern void preempt_schedule_lock(void);
++#endif
+
+ extern asmlinkage void preempt_schedule(void);
+ extern asmlinkage void preempt_schedule_thunk(void);
+diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
+index d683f5e6d791..71c1535db56a 100644
+--- a/include/asm-generic/preempt.h
++++ b/include/asm-generic/preempt.h
+@@ -79,6 +79,9 @@ static __always_inline bool should_resched(int preempt_offset)
+ }
+
+ #ifdef CONFIG_PREEMPTION
++#ifdef CONFIG_PREEMPT_RT
++extern void preempt_schedule_lock(void);
++#endif
+ extern asmlinkage void preempt_schedule(void);
+ #define __preempt_schedule() preempt_schedule()
+ extern asmlinkage void preempt_schedule_notrace(void);
+diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
+index e5af028c08b4..994c25640e15 100644
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -39,20 +39,12 @@ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
+ }
+ extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
+-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+-{
+- return tsk->pi_blocked_on != NULL;
+-}
+ #else
+ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+ {
+ return NULL;
+ }
+ # define rt_mutex_adjust_pi(p) do { } while (0)
+-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+-{
+- return false;
+-}
+ #endif
+
+ extern void normalize_rt_tasks(void);
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 3fc947dd2e60..4cd9d6c4cd68 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1049,7 +1049,7 @@ void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
+- schedule();
++ preempt_schedule_lock();
+
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c
+index 64c4f5728b11..948d10214ee2 100644
+--- a/kernel/locking/rwlock-rt.c
++++ b/kernel/locking/rwlock-rt.c
+@@ -211,7 +211,7 @@ static void __write_rt_lock(struct rt_rw_lock *lock)
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
+
+ if (atomic_read(&lock->readers) != 0)
+- schedule();
++ preempt_schedule_lock();
+
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 46e7db92b343..59fb8b18799f 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5004,7 +5004,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ *
+ * WARNING: must be called with preemption disabled!
+ */
+-static void __sched notrace __schedule(bool preempt)
++static void __sched notrace __schedule(bool preempt, bool spinning_lock)
+ {
+ struct task_struct *prev, *next;
+ unsigned long *switch_count;
+@@ -5057,7 +5057,7 @@ static void __sched notrace __schedule(bool preempt)
+ * - ptrace_{,un}freeze_traced() can change ->state underneath us.
+ */
+ prev_state = prev->state;
+- if (!preempt && prev_state) {
++ if ((!preempt || spinning_lock) && prev_state) {
+ if (signal_pending_state(prev_state, prev)) {
+ prev->state = TASK_RUNNING;
+ } else {
+@@ -5141,7 +5141,7 @@ void __noreturn do_task_dead(void)
+ /* Tell freezer to ignore us: */
+ current->flags |= PF_NOFREEZE;
+
+- __schedule(false);
++ __schedule(false, false);
+ BUG();
+
+ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
+@@ -5174,9 +5174,6 @@ static inline void sched_submit_work(struct task_struct *tsk)
+ preempt_enable_no_resched();
+ }
+
+- if (tsk_is_pi_blocked(tsk))
+- return;
+-
+ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
+@@ -5202,7 +5199,7 @@ asmlinkage __visible void __sched schedule(void)
+ sched_submit_work(tsk);
+ do {
+ preempt_disable();
+- __schedule(false);
++ __schedule(false, false);
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
+ sched_update_worker(tsk);
+@@ -5230,7 +5227,7 @@ void __sched schedule_idle(void)
+ */
+ WARN_ON_ONCE(current->state);
+ do {
+- __schedule(false);
++ __schedule(false, false);
+ } while (need_resched());
+ }
+
+@@ -5283,7 +5280,7 @@ static void __sched notrace preempt_schedule_common(void)
+ */
+ preempt_disable_notrace();
+ preempt_latency_start(1);
+- __schedule(true);
++ __schedule(true, false);
+ preempt_latency_stop(1);
+ preempt_enable_no_resched_notrace();
+
+@@ -5313,6 +5310,19 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
+ NOKPROBE_SYMBOL(preempt_schedule);
+ EXPORT_SYMBOL(preempt_schedule);
+
++#ifdef CONFIG_PREEMPT_RT
++void __sched notrace preempt_schedule_lock(void)
++{
++ do {
++ preempt_disable();
++ __schedule(true, true);
++ sched_preempt_enable_no_resched();
++ } while (need_resched());
++}
++NOKPROBE_SYMBOL(preempt_schedule_lock);
++EXPORT_SYMBOL(preempt_schedule_lock);
++#endif
++
+ #ifdef CONFIG_PREEMPT_DYNAMIC
+ DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func);
+ EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
+@@ -5362,7 +5372,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+ * an infinite recursion.
+ */
+ prev_ctx = exception_enter();
+- __schedule(true);
++ __schedule(true, false);
+ exception_exit(prev_ctx);
+
+ preempt_latency_stop(1);
+@@ -5580,7 +5590,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
+ do {
+ preempt_disable();
+ local_irq_enable();
+- __schedule(true);
++ __schedule(true, false);
+ local_irq_disable();
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-add-mutex-implementation-based-on-rt.patch b/features/rt/locking-rtmutex-add-mutex-implementation-based-on-rt.patch
new file mode 100644
index 00000000..287478c2
--- /dev/null
+++ b/features/rt/locking-rtmutex-add-mutex-implementation-based-on-rt.patch
@@ -0,0 +1,384 @@
+From d1a9c2854e127015b149b26f5d1bf143c7f2c8ce Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 12 Oct 2017 17:17:03 +0200
+Subject: [PATCH 077/191] locking/rtmutex: add mutex implementation based on
+ rtmutex
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/mutex_rt.h | 130 ++++++++++++++++++++++
+ kernel/locking/mutex-rt.c | 224 ++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 354 insertions(+)
+ create mode 100644 include/linux/mutex_rt.h
+ create mode 100644 kernel/locking/mutex-rt.c
+
+diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h
+new file mode 100644
+index 000000000000..f0b2e07cd5c5
+--- /dev/null
++++ b/include/linux/mutex_rt.h
+@@ -0,0 +1,130 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#ifndef __LINUX_MUTEX_RT_H
++#define __LINUX_MUTEX_RT_H
++
++#ifndef __LINUX_MUTEX_H
++#error "Please include mutex.h"
++#endif
++
++#include <linux/rtmutex.h>
++
++/* FIXME: Just for __lockfunc */
++#include <linux/spinlock.h>
++
++struct mutex {
++ struct rt_mutex lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __MUTEX_INITIALIZER(mutexname) \
++ { \
++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
++ }
++
++#define DEFINE_MUTEX(mutexname) \
++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
++
++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
++extern void __lockfunc _mutex_lock(struct mutex *lock);
++extern void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
++extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_trylock(struct mutex *lock);
++extern void __lockfunc _mutex_unlock(struct mutex *lock);
++
++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
++#define mutex_lock(l) _mutex_lock(l)
++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
++#define mutex_lock_killable(l) _mutex_lock_killable(l)
++#define mutex_trylock(l) _mutex_trylock(l)
++#define mutex_unlock(l) _mutex_unlock(l)
++#define mutex_lock_io(l) _mutex_lock_io_nested(l, 0);
++
++#define __mutex_owner(l) ((l)->lock.owner)
++
++#ifdef CONFIG_DEBUG_MUTEXES
++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
++#else
++static inline void mutex_destroy(struct mutex *lock) {}
++#endif
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
++# define mutex_lock_interruptible_nested(l, s) \
++ _mutex_lock_interruptible_nested(l, s)
++# define mutex_lock_killable_nested(l, s) \
++ _mutex_lock_killable_nested(l, s)
++# define mutex_lock_io_nested(l, s) _mutex_lock_io_nested(l, s)
++
++# define mutex_lock_nest_lock(lock, nest_lock) \
++do { \
++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
++} while (0)
++
++#else
++# define mutex_lock_nested(l, s) _mutex_lock(l)
++# define mutex_lock_interruptible_nested(l, s) \
++ _mutex_lock_interruptible(l)
++# define mutex_lock_killable_nested(l, s) \
++ _mutex_lock_killable(l)
++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
++# define mutex_lock_io_nested(l, s) _mutex_lock_io_nested(l, s)
++#endif
++
++# define mutex_init(mutex) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(mutex)->lock); \
++ __mutex_do_init((mutex), #mutex, &__key); \
++} while (0)
++
++# define __mutex_init(mutex, name, key) \
++do { \
++ rt_mutex_init(&(mutex)->lock); \
++ __mutex_do_init((mutex), name, key); \
++} while (0)
++
++/**
++ * These values are chosen such that FAIL and SUCCESS match the
++ * values of the regular mutex_trylock().
++ */
++enum mutex_trylock_recursive_enum {
++ MUTEX_TRYLOCK_FAILED = 0,
++ MUTEX_TRYLOCK_SUCCESS = 1,
++ MUTEX_TRYLOCK_RECURSIVE,
++};
++/**
++ * mutex_trylock_recursive - trylock variant that allows recursive locking
++ * @lock: mutex to be locked
++ *
++ * This function should not be used, _ever_. It is purely for hysterical GEM
++ * raisins, and once those are gone this will be removed.
++ *
++ * Returns:
++ * MUTEX_TRYLOCK_FAILED - trylock failed,
++ * MUTEX_TRYLOCK_SUCCESS - lock acquired,
++ * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
++ */
++int __rt_mutex_owner_current(struct rt_mutex *lock);
++
++static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
++mutex_trylock_recursive(struct mutex *lock)
++{
++ if (unlikely(__rt_mutex_owner_current(&lock->lock)))
++ return MUTEX_TRYLOCK_RECURSIVE;
++
++ return mutex_trylock(lock);
++}
++
++extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
++
++#endif
+diff --git a/kernel/locking/mutex-rt.c b/kernel/locking/mutex-rt.c
+new file mode 100644
+index 000000000000..2b849e6b9b4a
+--- /dev/null
++++ b/kernel/locking/mutex-rt.c
+@@ -0,0 +1,224 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Real-Time Preemption Support
++ *
++ * started by Ingo Molnar:
++ *
++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
++ *
++ * historic credit for proving that Linux spinlocks can be implemented via
++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
++ * and others) who prototyped it on 2.4 and did lots of comparative
++ * research and analysis; TimeSys, for proving that you can implement a
++ * fully preemptible kernel via the use of IRQ threading and mutexes;
++ * Bill Huey for persuasively arguing on lkml that the mutex model is the
++ * right one; and to MontaVista, who ported pmutexes to 2.6.
++ *
++ * This code is a from-scratch implementation and is not based on pmutexes,
++ * but the idea of converting spinlocks to mutexes is used here too.
++ *
++ * lock debugging, locking tree, deadlock detection:
++ *
++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
++ * Released under the General Public License (GPL).
++ *
++ * Includes portions of the generic R/W semaphore implementation from:
++ *
++ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
++ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
++ * - Derived also from comments by Linus
++ *
++ * Pending ownership of locks and ownership stealing:
++ *
++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
++ *
++ * (also by Steven Rostedt)
++ * - Converted single pi_lock to individual task locks.
++ *
++ * By Esben Nielsen:
++ * Doing priority inheritance with help of the scheduler.
++ *
++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
++ * - major rework based on Esben Nielsens initial patch
++ * - replaced thread_info references by task_struct refs
++ * - removed task->pending_owner dependency
++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
++ * in the scheduler return path as discussed with Steven Rostedt
++ *
++ * Copyright (C) 2006, Kihon Technologies Inc.
++ * Steven Rostedt <rostedt@goodmis.org>
++ * - debugged and patched Thomas Gleixner's rework.
++ * - added back the cmpxchg to the rework.
++ * - turned atomic require back on for SMP.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/rtmutex.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/syscalls.h>
++#include <linux/interrupt.h>
++#include <linux/plist.h>
++#include <linux/fs.h>
++#include <linux/futex.h>
++#include <linux/hrtimer.h>
++#include <linux/blkdev.h>
++
++#include "rtmutex_common.h"
++
++/*
++ * struct mutex functions
++ */
++void __mutex_do_init(struct mutex *mutex, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
++ lockdep_init_map(&mutex->dep_map, name, key, 0);
++#endif
++ mutex->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__mutex_do_init);
++
++static int _mutex_lock_blk_flush(struct mutex *lock, int state)
++{
++ /*
++ * Flush blk before ->pi_blocked_on is set. At schedule() time it is too
++ * late if one of the callbacks needs to acquire a sleeping lock.
++ */
++ if (blk_needs_flush_plug(current))
++ blk_schedule_flush_plug(current);
++ return __rt_mutex_lock_state(&lock->lock, state);
++}
++
++void __lockfunc _mutex_lock(struct mutex *lock)
++{
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ _mutex_lock_blk_flush(lock, TASK_UNINTERRUPTIBLE);
++}
++EXPORT_SYMBOL(_mutex_lock);
++
++void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass)
++{
++ int token;
++
++ token = io_schedule_prepare();
++
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ __rt_mutex_lock_state(&lock->lock, TASK_UNINTERRUPTIBLE);
++
++ io_schedule_finish(token);
++}
++EXPORT_SYMBOL_GPL(_mutex_lock_io_nested);
++
++int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ ret = _mutex_lock_blk_flush(lock, TASK_INTERRUPTIBLE);
++ if (ret)
++ mutex_release(&lock->dep_map, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible);
++
++int __lockfunc _mutex_lock_killable(struct mutex *lock)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ ret = _mutex_lock_blk_flush(lock, TASK_KILLABLE);
++ if (ret)
++ mutex_release(&lock->dep_map, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
++{
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ _mutex_lock_blk_flush(lock, TASK_UNINTERRUPTIBLE);
++}
++EXPORT_SYMBOL(_mutex_lock_nested);
++
++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
++{
++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
++ _mutex_lock_blk_flush(lock, TASK_UNINTERRUPTIBLE);
++}
++EXPORT_SYMBOL(_mutex_lock_nest_lock);
++
++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
++{
++ int ret;
++
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ ret = _mutex_lock_blk_flush(lock, TASK_INTERRUPTIBLE);
++ if (ret)
++ mutex_release(&lock->dep_map, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
++
++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ ret = _mutex_lock_blk_flush(lock, TASK_KILLABLE);
++ if (ret)
++ mutex_release(&lock->dep_map, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable_nested);
++#endif
++
++int __lockfunc _mutex_trylock(struct mutex *lock)
++{
++ int ret = __rt_mutex_trylock(&lock->lock);
++
++ if (ret)
++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_trylock);
++
++void __lockfunc _mutex_unlock(struct mutex *lock)
++{
++ mutex_release(&lock->dep_map, _RET_IP_);
++ __rt_mutex_unlock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_unlock);
++
++/**
++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
++ * @cnt: the atomic which we are to dec
++ * @lock: the mutex to return holding if we dec to 0
++ *
++ * return true and hold lock if we dec to 0, return false otherwise
++ */
++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
++{
++ /* dec if we can't possibly hit 0 */
++ if (atomic_add_unless(cnt, -1, 1))
++ return 0;
++ /* we might hit 0, so take the lock */
++ mutex_lock(lock);
++ if (!atomic_dec_and_test(cnt)) {
++ /* when we actually did the dec, we didn't hit 0 */
++ mutex_unlock(lock);
++ return 0;
++ }
++ /* we hit 0, and we hold the lock */
++ return 1;
++}
++EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-add-rwlock-implementation-based-on-r.patch b/features/rt/locking-rtmutex-add-rwlock-implementation-based-on-r.patch
new file mode 100644
index 00000000..c45a4242
--- /dev/null
+++ b/features/rt/locking-rtmutex-add-rwlock-implementation-based-on-r.patch
@@ -0,0 +1,557 @@
+From 157ad15dc4fba83f4959571c08ebc2654dbdacd6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 12 Oct 2017 17:18:06 +0200
+Subject: [PATCH 079/191] locking/rtmutex: add rwlock implementation based on
+ rtmutex
+
+The implementation is bias-based, similar to the rwsem implementation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rwlock_rt.h | 113 +++++++++++
+ include/linux/rwlock_types_rt.h | 56 ++++++
+ kernel/Kconfig.locks | 2 +-
+ kernel/locking/rwlock-rt.c | 334 ++++++++++++++++++++++++++++++++
+ 4 files changed, 504 insertions(+), 1 deletion(-)
+ create mode 100644 include/linux/rwlock_rt.h
+ create mode 100644 include/linux/rwlock_types_rt.h
+ create mode 100644 kernel/locking/rwlock-rt.c
+
+diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
+new file mode 100644
+index 000000000000..552b8d69cdb3
+--- /dev/null
++++ b/include/linux/rwlock_rt.h
+@@ -0,0 +1,113 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#ifndef __LINUX_RWLOCK_RT_H
++#define __LINUX_RWLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
++extern int __lockfunc rt_read_can_lock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_can_lock(rwlock_t *rwlock);
++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
++extern int __lockfunc rt_rwlock_is_contended(rwlock_t *rwlock);
++
++#define read_can_lock(rwlock) rt_read_can_lock(rwlock)
++#define write_can_lock(rwlock) rt_write_can_lock(rwlock)
++
++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
++
++static inline int __write_trylock_rt_irqsave(rwlock_t *lock, unsigned long *flags)
++{
++ *flags = 0;
++ return rt_write_trylock(lock);
++}
++
++#define write_trylock_irqsave(lock, flags) \
++ __cond_lock(lock, __write_trylock_rt_irqsave(lock, &(flags)))
++
++#define read_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ rt_read_lock(lock); \
++ flags = 0; \
++ } while (0)
++
++#define write_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ rt_write_lock(lock); \
++ flags = 0; \
++ } while (0)
++
++#define read_lock(lock) rt_read_lock(lock)
++
++#define read_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ rt_read_lock(lock); \
++ } while (0)
++
++#define read_lock_irq(lock) read_lock(lock)
++
++#define write_lock(lock) rt_write_lock(lock)
++
++#define write_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ rt_write_lock(lock); \
++ } while (0)
++
++#define write_lock_irq(lock) write_lock(lock)
++
++#define read_unlock(lock) rt_read_unlock(lock)
++
++#define read_unlock_bh(lock) \
++ do { \
++ rt_read_unlock(lock); \
++ local_bh_enable(); \
++ } while (0)
++
++#define read_unlock_irq(lock) read_unlock(lock)
++
++#define write_unlock(lock) rt_write_unlock(lock)
++
++#define write_unlock_bh(lock) \
++ do { \
++ rt_write_unlock(lock); \
++ local_bh_enable(); \
++ } while (0)
++
++#define write_unlock_irq(lock) write_unlock(lock)
++
++#define read_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ rt_read_unlock(lock); \
++ } while (0)
++
++#define write_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ rt_write_unlock(lock); \
++ } while (0)
++
++#define rwlock_init(rwl) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ __rt_rwlock_init(rwl, #rwl, &__key); \
++} while (0)
++
++#define rwlock_is_contended(lock) \
++ rt_rwlock_is_contended(lock)
++
++#endif
+diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h
+new file mode 100644
+index 000000000000..4762391d659b
+--- /dev/null
++++ b/include/linux/rwlock_types_rt.h
+@@ -0,0 +1,56 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#ifndef __LINUX_RWLOCK_TYPES_RT_H
++#define __LINUX_RWLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
++#else
++# define RW_DEP_MAP_INIT(lockname)
++#endif
++
++typedef struct rt_rw_lock rwlock_t;
++
++#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
++
++#define DEFINE_RWLOCK(name) \
++ rwlock_t name = __RW_LOCK_UNLOCKED(name)
++
++/*
++ * A reader biased implementation primarily for CPU pinning.
++ *
++ * Can be selected as general replacement for the single reader RT rwlock
++ * variant
++ */
++struct rt_rw_lock {
++ struct rt_mutex rtmutex;
++ atomic_t readers;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define READER_BIAS (1U << 31)
++#define WRITER_BIAS (1U << 30)
++
++#define __RWLOCK_RT_INITIALIZER(name) \
++{ \
++ .readers = ATOMIC_INIT(READER_BIAS), \
++ .rtmutex = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.rtmutex), \
++ RW_DEP_MAP_INIT(name) \
++}
++
++void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name,
++ struct lock_class_key *key);
++
++#define rwlock_biased_rt_init(rwlock) \
++ do { \
++ static struct lock_class_key __key; \
++ \
++ __rwlock_biased_rt_init((rwlock), #rwlock, &__key); \
++ } while (0)
++
++#endif
+diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
+index 3de8fd11873b..4198f0273ecd 100644
+--- a/kernel/Kconfig.locks
++++ b/kernel/Kconfig.locks
+@@ -251,7 +251,7 @@ config ARCH_USE_QUEUED_RWLOCKS
+
+ config QUEUED_RWLOCKS
+ def_bool y if ARCH_USE_QUEUED_RWLOCKS
+- depends on SMP
++ depends on SMP && !PREEMPT_RT
+
+ config ARCH_HAS_MMIOWB
+ bool
+diff --git a/kernel/locking/rwlock-rt.c b/kernel/locking/rwlock-rt.c
+new file mode 100644
+index 000000000000..64c4f5728b11
+--- /dev/null
++++ b/kernel/locking/rwlock-rt.c
+@@ -0,0 +1,334 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#include <linux/sched/debug.h>
++#include <linux/export.h>
++
++#include "rtmutex_common.h"
++#include <linux/rwlock_types_rt.h>
++
++/*
++ * RT-specific reader/writer locks
++ *
++ * write_lock()
++ * 1) Lock lock->rtmutex
++ * 2) Remove the reader BIAS to force readers into the slow path
++ * 3) Wait until all readers have left the critical region
++ * 4) Mark it write locked
++ *
++ * write_unlock()
++ * 1) Remove the write locked marker
++ * 2) Set the reader BIAS so readers can use the fast path again
++ * 3) Unlock lock->rtmutex to release blocked readers
++ *
++ * read_lock()
++ * 1) Try fast path acquisition (reader BIAS is set)
++ * 2) Take lock->rtmutex.wait_lock which protects the writelocked flag
++ * 3) If !writelocked, acquire it for read
++ * 4) If writelocked, block on lock->rtmutex
++ * 5) unlock lock->rtmutex, goto 1)
++ *
++ * read_unlock()
++ * 1) Try fast path release (reader count != 1)
++ * 2) Wake the writer waiting in write_lock()#3
++ *
++ * read_lock()#3 has the consequence, that rw locks on RT are not writer
++ * fair, but writers, which should be avoided in RT tasks (think tasklist
++ * lock), are subject to the rtmutex priority/DL inheritance mechanism.
++ *
++ * It's possible to make the rw locks writer fair by keeping a list of
++ * active readers. A blocked writer would force all newly incoming readers
++ * to block on the rtmutex, but the rtmutex would have to be proxy locked
++ * for one reader after the other. We can't use multi-reader inheritance
++ * because there is no way to support that with
++ * SCHED_DEADLINE. Implementing the one by one reader boosting/handover
++ * mechanism is a major surgery for a very dubious value.
++ *
++ * The risk of writer starvation is there, but the pathological use cases
++ * which trigger it are not necessarily the typical RT workloads.
++ */
++
++void __rwlock_biased_rt_init(struct rt_rw_lock *lock, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held semaphore:
++ */
++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
++ lockdep_init_map(&lock->dep_map, name, key, 0);
++#endif
++ atomic_set(&lock->readers, READER_BIAS);
++ rt_mutex_init(&lock->rtmutex);
++ lock->rtmutex.save_state = 1;
++}
++
++static int __read_rt_trylock(struct rt_rw_lock *lock)
++{
++ int r, old;
++
++ /*
++ * Increment reader count, if lock->readers < 0, i.e. READER_BIAS is
++ * set.
++ */
++ for (r = atomic_read(&lock->readers); r < 0;) {
++ old = atomic_cmpxchg(&lock->readers, r, r + 1);
++ if (likely(old == r))
++ return 1;
++ r = old;
++ }
++ return 0;
++}
++
++static void __read_rt_lock(struct rt_rw_lock *lock)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++ struct rt_mutex_waiter waiter;
++ unsigned long flags;
++
++ if (__read_rt_trylock(lock))
++ return;
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ /*
++ * Allow readers as long as the writer has not completely
++ * acquired the semaphore for write.
++ */
++ if (atomic_read(&lock->readers) != WRITER_BIAS) {
++ atomic_inc(&lock->readers);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ return;
++ }
++
++ /*
++ * Call into the slow lock path with the rtmutex->wait_lock
++ * held, so this can't result in the following race:
++ *
++ * Reader1 Reader2 Writer
++ * read_lock()
++ * write_lock()
++ * rtmutex_lock(m)
++ * swait()
++ * read_lock()
++ * unlock(m->wait_lock)
++ * read_unlock()
++ * swake()
++ * lock(m->wait_lock)
++ * lock->writelocked=true
++ * unlock(m->wait_lock)
++ *
++ * write_unlock()
++ * lock->writelocked=false
++ * rtmutex_unlock(m)
++ * read_lock()
++ * write_lock()
++ * rtmutex_lock(m)
++ * swait()
++ * rtmutex_lock(m)
++ *
++ * That would put Reader1 behind the writer waiting on
++ * Reader2 to call read_unlock() which might be unbound.
++ */
++ rt_mutex_init_waiter(&waiter, true);
++ rt_spin_lock_slowlock_locked(m, &waiter, flags);
++ /*
++ * The slowlock() above is guaranteed to return with the rtmutex is
++ * now held, so there can't be a writer active. Increment the reader
++ * count and immediately drop the rtmutex again.
++ */
++ atomic_inc(&lock->readers);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ rt_spin_lock_slowunlock(m);
++
++ debug_rt_mutex_free_waiter(&waiter);
++}
++
++static void __read_rt_unlock(struct rt_rw_lock *lock)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++ struct task_struct *tsk;
++
++ /*
++ * sem->readers can only hit 0 when a writer is waiting for the
++ * active readers to leave the critical region.
++ */
++ if (!atomic_dec_and_test(&lock->readers))
++ return;
++
++ raw_spin_lock_irq(&m->wait_lock);
++ /*
++ * Wake the writer, i.e. the rtmutex owner. It might release the
++ * rtmutex concurrently in the fast path, but to clean up the rw
++ * lock it needs to acquire m->wait_lock. The worst case which can
++ * happen is a spurious wakeup.
++ */
++ tsk = rt_mutex_owner(m);
++ if (tsk)
++ wake_up_process(tsk);
++
++ raw_spin_unlock_irq(&m->wait_lock);
++}
++
++static void __write_unlock_common(struct rt_rw_lock *lock, int bias,
++ unsigned long flags)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++
++ atomic_add(READER_BIAS - bias, &lock->readers);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ rt_spin_lock_slowunlock(m);
++}
++
++static void __write_rt_lock(struct rt_rw_lock *lock)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++ struct task_struct *self = current;
++ unsigned long flags;
++
++ /* Take the rtmutex as a first step */
++ __rt_spin_lock(m);
++
++ /* Force readers into slow path */
++ atomic_sub(READER_BIAS, &lock->readers);
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++
++ raw_spin_lock(&self->pi_lock);
++ self->saved_state = self->state;
++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
++ raw_spin_unlock(&self->pi_lock);
++
++ for (;;) {
++ /* Have all readers left the critical region? */
++ if (!atomic_read(&lock->readers)) {
++ atomic_set(&lock->readers, WRITER_BIAS);
++ raw_spin_lock(&self->pi_lock);
++ __set_current_state_no_track(self->saved_state);
++ self->saved_state = TASK_RUNNING;
++ raw_spin_unlock(&self->pi_lock);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ return;
++ }
++
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++
++ if (atomic_read(&lock->readers) != 0)
++ schedule();
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++
++ raw_spin_lock(&self->pi_lock);
++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
++ raw_spin_unlock(&self->pi_lock);
++ }
++}
++
++static int __write_rt_trylock(struct rt_rw_lock *lock)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++ unsigned long flags;
++
++ if (!__rt_mutex_trylock(m))
++ return 0;
++
++ atomic_sub(READER_BIAS, &lock->readers);
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ if (!atomic_read(&lock->readers)) {
++ atomic_set(&lock->readers, WRITER_BIAS);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ return 1;
++ }
++ __write_unlock_common(lock, 0, flags);
++ return 0;
++}
++
++static void __write_rt_unlock(struct rt_rw_lock *lock)
++{
++ struct rt_mutex *m = &lock->rtmutex;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ __write_unlock_common(lock, WRITER_BIAS, flags);
++}
++
++int __lockfunc rt_read_can_lock(rwlock_t *rwlock)
++{
++ return atomic_read(&rwlock->readers) < 0;
++}
++
++int __lockfunc rt_write_can_lock(rwlock_t *rwlock)
++{
++ return atomic_read(&rwlock->readers) == READER_BIAS;
++}
++
++/*
++ * The common functions which get wrapped into the rwlock API.
++ */
++int __lockfunc rt_read_trylock(rwlock_t *rwlock)
++{
++ int ret;
++
++ ret = __read_rt_trylock(rwlock);
++ if (ret) {
++ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
++ migrate_disable();
++ }
++ return ret;
++}
++EXPORT_SYMBOL(rt_read_trylock);
++
++int __lockfunc rt_write_trylock(rwlock_t *rwlock)
++{
++ int ret;
++
++ ret = __write_rt_trylock(rwlock);
++ if (ret) {
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ migrate_disable();
++ }
++ return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock);
++
++void __lockfunc rt_read_lock(rwlock_t *rwlock)
++{
++ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
++ __read_rt_lock(rwlock);
++ migrate_disable();
++}
++EXPORT_SYMBOL(rt_read_lock);
++
++void __lockfunc rt_write_lock(rwlock_t *rwlock)
++{
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++ __write_rt_lock(rwlock);
++ migrate_disable();
++}
++EXPORT_SYMBOL(rt_write_lock);
++
++void __lockfunc rt_read_unlock(rwlock_t *rwlock)
++{
++ rwlock_release(&rwlock->dep_map, _RET_IP_);
++ migrate_enable();
++ __read_rt_unlock(rwlock);
++}
++EXPORT_SYMBOL(rt_read_unlock);
++
++void __lockfunc rt_write_unlock(rwlock_t *rwlock)
++{
++ rwlock_release(&rwlock->dep_map, _RET_IP_);
++ migrate_enable();
++ __write_rt_unlock(rwlock);
++}
++EXPORT_SYMBOL(rt_write_unlock);
++
++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
++{
++ __rwlock_biased_rt_init(rwlock, name, key);
++}
++EXPORT_SYMBOL(__rt_rwlock_init);
++
++int __lockfunc rt_rwlock_is_contended(rwlock_t *rwlock)
++{
++ return rt_mutex_has_waiters(&rwlock->rtmutex);
++}
++EXPORT_SYMBOL(rt_rwlock_is_contended);
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-add-rwsem-implementation-based-on-rt.patch b/features/rt/locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
new file mode 100644
index 00000000..335f9ac9
--- /dev/null
+++ b/features/rt/locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
@@ -0,0 +1,454 @@
+From 94f118173e2b3760d995876193660096e0284c87 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 12 Oct 2017 17:28:34 +0200
+Subject: [PATCH 078/191] locking/rtmutex: add rwsem implementation based on
+ rtmutex
+
+The RT specific R/W semaphore implementation restricts the number of readers
+to one because a writer cannot block on multiple readers and inherit its
+priority or budget.
+
+The single reader restricting is painful in various ways:
+
+ - Performance bottleneck for multi-threaded applications in the page fault
+ path (mmap sem)
+
+ - Progress blocker for drivers which are carefully crafted to avoid the
+ potential reader/writer deadlock in mainline.
+
+The analysis of the writer code paths shows, that properly written RT tasks
+should not take them. Syscalls like mmap(), file access which take mmap sem
+write locked have unbound latencies which are completely unrelated to mmap
+sem. Other R/W sem users like graphics drivers are not suitable for RT tasks
+either.
+
+So there is little risk to hurt RT tasks when the RT rwsem implementation is
+changed in the following way:
+
+ - Allow concurrent readers
+
+ - Make writers block until the last reader left the critical section. This
+ blocking is not subject to priority/budget inheritance.
+
+ - Readers blocked on a writer inherit their priority/budget in the normal
+ way.
+
+There is a drawback with this scheme. R/W semaphores become writer unfair
+though the applications which have triggered writer starvation (mostly on
+mmap_sem) in the past are not really the typical workloads running on a RT
+system. So while it's unlikely to hit writer starvation, it's possible. If
+there are unexpected workloads on RT systems triggering it, we need to rethink
+the approach.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rwsem-rt.h | 70 +++++++++
+ kernel/locking/rwsem-rt.c | 318 ++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 388 insertions(+)
+ create mode 100644 include/linux/rwsem-rt.h
+ create mode 100644 kernel/locking/rwsem-rt.c
+
+diff --git a/include/linux/rwsem-rt.h b/include/linux/rwsem-rt.h
+new file mode 100644
+index 000000000000..0ba8aae9a198
+--- /dev/null
++++ b/include/linux/rwsem-rt.h
+@@ -0,0 +1,70 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#ifndef _LINUX_RWSEM_RT_H
++#define _LINUX_RWSEM_RT_H
++
++#ifndef _LINUX_RWSEM_H
++#error "Include rwsem.h"
++#endif
++
++#include <linux/rtmutex.h>
++#include <linux/swait.h>
++
++#define READER_BIAS (1U << 31)
++#define WRITER_BIAS (1U << 30)
++
++struct rw_semaphore {
++ atomic_t readers;
++ struct rt_mutex rtmutex;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __RWSEM_INITIALIZER(name) \
++{ \
++ .readers = ATOMIC_INIT(READER_BIAS), \
++ .rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex), \
++ RW_DEP_MAP_INIT(name) \
++}
++
++#define DECLARE_RWSEM(lockname) \
++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
++
++extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name,
++ struct lock_class_key *key);
++
++#define __init_rwsem(sem, name, key) \
++do { \
++ rt_mutex_init(&(sem)->rtmutex); \
++ __rwsem_init((sem), (name), (key)); \
++} while (0)
++
++#define init_rwsem(sem) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ __init_rwsem((sem), #sem, &__key); \
++} while (0)
++
++static inline int rwsem_is_locked(struct rw_semaphore *sem)
++{
++ return atomic_read(&sem->readers) != READER_BIAS;
++}
++
++static inline int rwsem_is_contended(struct rw_semaphore *sem)
++{
++ return atomic_read(&sem->readers) > 0;
++}
++
++extern void __down_read(struct rw_semaphore *sem);
++extern int __down_read_interruptible(struct rw_semaphore *sem);
++extern int __down_read_killable(struct rw_semaphore *sem);
++extern int __down_read_trylock(struct rw_semaphore *sem);
++extern void __down_write(struct rw_semaphore *sem);
++extern int __must_check __down_write_killable(struct rw_semaphore *sem);
++extern int __down_write_trylock(struct rw_semaphore *sem);
++extern void __up_read(struct rw_semaphore *sem);
++extern void __up_write(struct rw_semaphore *sem);
++extern void __downgrade_write(struct rw_semaphore *sem);
++
++#endif
+diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c
+new file mode 100644
+index 000000000000..a0771c150041
+--- /dev/null
++++ b/kernel/locking/rwsem-rt.c
+@@ -0,0 +1,318 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#include <linux/rwsem.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/signal.h>
++#include <linux/export.h>
++#include <linux/blkdev.h>
++
++#include "rtmutex_common.h"
++
++/*
++ * RT-specific reader/writer semaphores
++ *
++ * down_write()
++ * 1) Lock sem->rtmutex
++ * 2) Remove the reader BIAS to force readers into the slow path
++ * 3) Wait until all readers have left the critical region
++ * 4) Mark it write locked
++ *
++ * up_write()
++ * 1) Remove the write locked marker
++ * 2) Set the reader BIAS so readers can use the fast path again
++ * 3) Unlock sem->rtmutex to release blocked readers
++ *
++ * down_read()
++ * 1) Try fast path acquisition (reader BIAS is set)
++ * 2) Take sem->rtmutex.wait_lock which protects the writelocked flag
++ * 3) If !writelocked, acquire it for read
++ * 4) If writelocked, block on sem->rtmutex
++ * 5) unlock sem->rtmutex, goto 1)
++ *
++ * up_read()
++ * 1) Try fast path release (reader count != 1)
++ * 2) Wake the writer waiting in down_write()#3
++ *
++ * down_read()#3 has the consequence, that rw semaphores on RT are not writer
++ * fair, but writers, which should be avoided in RT tasks (think mmap_sem),
++ * are subject to the rtmutex priority/DL inheritance mechanism.
++ *
++ * It's possible to make the rw semaphores writer fair by keeping a list of
++ * active readers. A blocked writer would force all newly incoming readers to
++ * block on the rtmutex, but the rtmutex would have to be proxy locked for one
++ * reader after the other. We can't use multi-reader inheritance because there
++ * is no way to support that with SCHED_DEADLINE. Implementing the one by one
++ * reader boosting/handover mechanism is a major surgery for a very dubious
++ * value.
++ *
++ * The risk of writer starvation is there, but the pathological use cases
++ * which trigger it are not necessarily the typical RT workloads.
++ */
++
++void __rwsem_init(struct rw_semaphore *sem, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held semaphore:
++ */
++ debug_check_no_locks_freed((void *)sem, sizeof(*sem));
++ lockdep_init_map(&sem->dep_map, name, key, 0);
++#endif
++ atomic_set(&sem->readers, READER_BIAS);
++}
++EXPORT_SYMBOL(__rwsem_init);
++
++int __down_read_trylock(struct rw_semaphore *sem)
++{
++ int r, old;
++
++ /*
++ * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
++ * set.
++ */
++ for (r = atomic_read(&sem->readers); r < 0;) {
++ old = atomic_cmpxchg(&sem->readers, r, r + 1);
++ if (likely(old == r))
++ return 1;
++ r = old;
++ }
++ return 0;
++}
++
++static int __sched __down_read_common(struct rw_semaphore *sem, int state)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ struct rt_mutex_waiter waiter;
++ int ret;
++
++ if (__down_read_trylock(sem))
++ return 0;
++
++ /*
++ * Flush blk before ->pi_blocked_on is set. At schedule() time it is too
++ * late if one of the callbacks needs to acquire a sleeping lock.
++ */
++ if (blk_needs_flush_plug(current))
++ blk_schedule_flush_plug(current);
++
++ might_sleep();
++ raw_spin_lock_irq(&m->wait_lock);
++ /*
++ * Allow readers as long as the writer has not completely
++ * acquired the semaphore for write.
++ */
++ if (atomic_read(&sem->readers) != WRITER_BIAS) {
++ atomic_inc(&sem->readers);
++ raw_spin_unlock_irq(&m->wait_lock);
++ return 0;
++ }
++
++ /*
++ * Call into the slow lock path with the rtmutex->wait_lock
++ * held, so this can't result in the following race:
++ *
++ * Reader1 Reader2 Writer
++ * down_read()
++ * down_write()
++ * rtmutex_lock(m)
++ * swait()
++ * down_read()
++ * unlock(m->wait_lock)
++ * up_read()
++ * swake()
++ * lock(m->wait_lock)
++ * sem->writelocked=true
++ * unlock(m->wait_lock)
++ *
++ * up_write()
++ * sem->writelocked=false
++ * rtmutex_unlock(m)
++ * down_read()
++ * down_write()
++ * rtmutex_lock(m)
++ * swait()
++ * rtmutex_lock(m)
++ *
++ * That would put Reader1 behind the writer waiting on
++ * Reader2 to call up_read() which might be unbound.
++ */
++ rt_mutex_init_waiter(&waiter, false);
++ ret = rt_mutex_slowlock_locked(m, state, NULL, RT_MUTEX_MIN_CHAINWALK,
++ &waiter);
++ /*
++ * The slowlock() above is guaranteed to return with the rtmutex (for
++ * ret = 0) is now held, so there can't be a writer active. Increment
++ * the reader count and immediately drop the rtmutex again.
++ * For ret != 0 we don't hold the rtmutex and need unlock the wait_lock.
++ * We don't own the lock then.
++ */
++ if (!ret)
++ atomic_inc(&sem->readers);
++ raw_spin_unlock_irq(&m->wait_lock);
++ if (!ret)
++ __rt_mutex_unlock(m);
++
++ debug_rt_mutex_free_waiter(&waiter);
++ return ret;
++}
++
++void __down_read(struct rw_semaphore *sem)
++{
++ int ret;
++
++ ret = __down_read_common(sem, TASK_UNINTERRUPTIBLE);
++ WARN_ON_ONCE(ret);
++}
++
++int __down_read_interruptible(struct rw_semaphore *sem)
++{
++ int ret;
++
++ ret = __down_read_common(sem, TASK_INTERRUPTIBLE);
++ if (likely(!ret))
++ return ret;
++ WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret);
++ return -EINTR;
++}
++
++int __down_read_killable(struct rw_semaphore *sem)
++{
++ int ret;
++
++ ret = __down_read_common(sem, TASK_KILLABLE);
++ if (likely(!ret))
++ return ret;
++ WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret);
++ return -EINTR;
++}
++
++void __up_read(struct rw_semaphore *sem)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ struct task_struct *tsk;
++
++ /*
++ * sem->readers can only hit 0 when a writer is waiting for the
++ * active readers to leave the critical region.
++ */
++ if (!atomic_dec_and_test(&sem->readers))
++ return;
++
++ might_sleep();
++ raw_spin_lock_irq(&m->wait_lock);
++ /*
++ * Wake the writer, i.e. the rtmutex owner. It might release the
++ * rtmutex concurrently in the fast path (due to a signal), but to
++ * clean up the rwsem it needs to acquire m->wait_lock. The worst
++ * case which can happen is a spurious wakeup.
++ */
++ tsk = rt_mutex_owner(m);
++ if (tsk)
++ wake_up_process(tsk);
++
++ raw_spin_unlock_irq(&m->wait_lock);
++}
++
++static void __up_write_unlock(struct rw_semaphore *sem, int bias,
++ unsigned long flags)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++
++ atomic_add(READER_BIAS - bias, &sem->readers);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ __rt_mutex_unlock(m);
++}
++
++static int __sched __down_write_common(struct rw_semaphore *sem, int state)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ unsigned long flags;
++
++ /*
++ * Flush blk before ->pi_blocked_on is set. At schedule() time it is too
++ * late if one of the callbacks needs to acquire a sleeping lock.
++ */
++ if (blk_needs_flush_plug(current))
++ blk_schedule_flush_plug(current);
++
++ /* Take the rtmutex as a first step */
++ if (__rt_mutex_lock_state(m, state))
++ return -EINTR;
++
++ /* Force readers into slow path */
++ atomic_sub(READER_BIAS, &sem->readers);
++ might_sleep();
++
++ set_current_state(state);
++ for (;;) {
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ /* Have all readers left the critical region? */
++ if (!atomic_read(&sem->readers)) {
++ atomic_set(&sem->readers, WRITER_BIAS);
++ __set_current_state(TASK_RUNNING);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ return 0;
++ }
++
++ if (signal_pending_state(state, current)) {
++ __set_current_state(TASK_RUNNING);
++ __up_write_unlock(sem, 0, flags);
++ return -EINTR;
++ }
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++
++ if (atomic_read(&sem->readers) != 0) {
++ schedule();
++ set_current_state(state);
++ }
++ }
++}
++
++void __sched __down_write(struct rw_semaphore *sem)
++{
++ __down_write_common(sem, TASK_UNINTERRUPTIBLE);
++}
++
++int __sched __down_write_killable(struct rw_semaphore *sem)
++{
++ return __down_write_common(sem, TASK_KILLABLE);
++}
++
++int __down_write_trylock(struct rw_semaphore *sem)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ unsigned long flags;
++
++ if (!__rt_mutex_trylock(m))
++ return 0;
++
++ atomic_sub(READER_BIAS, &sem->readers);
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ if (!atomic_read(&sem->readers)) {
++ atomic_set(&sem->readers, WRITER_BIAS);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ return 1;
++ }
++ __up_write_unlock(sem, 0, flags);
++ return 0;
++}
++
++void __up_write(struct rw_semaphore *sem)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ __up_write_unlock(sem, WRITER_BIAS, flags);
++}
++
++void __downgrade_write(struct rw_semaphore *sem)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ /* Release it and account current as reader */
++ __up_write_unlock(sem, WRITER_BIAS - 1, flags);
++}
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-add-sleeping-lock-implementation.patch b/features/rt/locking-rtmutex-add-sleeping-lock-implementation.patch
new file mode 100644
index 00000000..91588d81
--- /dev/null
+++ b/features/rt/locking-rtmutex-add-sleeping-lock-implementation.patch
@@ -0,0 +1,1232 @@
+From 479dce6155bca792fbc3d2907d5d576e0d38c554 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 12 Oct 2017 17:11:19 +0200
+Subject: [PATCH 075/191] locking/rtmutex: add sleeping lock implementation
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/kernel.h | 5 +
+ include/linux/preempt.h | 4 +
+ include/linux/rtmutex.h | 19 +-
+ include/linux/sched.h | 7 +
+ include/linux/sched/wake_q.h | 13 +-
+ include/linux/spinlock_rt.h | 155 ++++++++++
+ include/linux/spinlock_types_rt.h | 38 +++
+ kernel/fork.c | 1 +
+ kernel/futex.c | 10 +-
+ kernel/locking/rtmutex.c | 451 +++++++++++++++++++++++++++---
+ kernel/locking/rtmutex_common.h | 14 +-
+ kernel/sched/core.c | 39 ++-
+ 12 files changed, 698 insertions(+), 58 deletions(-)
+ create mode 100644 include/linux/spinlock_rt.h
+ create mode 100644 include/linux/spinlock_types_rt.h
+
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 5b7ed6dc99ac..0f9315891c17 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -122,6 +122,10 @@ extern void __cant_migrate(const char *file, int line);
+ */
+ # define might_sleep() \
+ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
++
++# define might_sleep_no_state_check() \
++ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
++
+ /**
+ * cant_sleep - annotation for functions that cannot sleep
+ *
+@@ -165,6 +169,7 @@ extern void __cant_migrate(const char *file, int line);
+ static inline void __might_sleep(const char *file, int line,
+ int preempt_offset) { }
+ # define might_sleep() do { might_resched(); } while (0)
++# define might_sleep_no_state_check() do { might_resched(); } while (0)
+ # define cant_sleep() do { } while (0)
+ # define cant_migrate() do { } while (0)
+ # define sched_annotate_sleep() do { } while (0)
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 9881eac0698f..4d244e295e85 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -121,7 +121,11 @@
+ /*
+ * The preempt_count offset after spin_lock()
+ */
++#if !defined(CONFIG_PREEMPT_RT)
+ #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
++#else
++#define PREEMPT_LOCK_OFFSET 0
++#endif
+
+ /*
+ * The preempt_count offset needed for things like:
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index b828b938c876..b02009f53026 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -19,6 +19,10 @@
+
+ extern int max_lock_depth; /* for sysctl */
+
++#ifdef CONFIG_DEBUG_MUTEXES
++#include <linux/debug_locks.h>
++#endif
++
+ /**
+ * The rt_mutex structure
+ *
+@@ -31,6 +35,7 @@ struct rt_mutex {
+ raw_spinlock_t wait_lock;
+ struct rb_root_cached waiters;
+ struct task_struct *owner;
++ int save_state;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ #endif
+@@ -67,11 +72,19 @@ do { \
+ #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
+ #endif
+
+-#define __RT_MUTEX_INITIALIZER(mutexname) \
+- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .waiters = RB_ROOT_CACHED \
+ , .owner = NULL \
+- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
++ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
++
++#define __RT_MUTEX_INITIALIZER(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ , .save_state = 0 }
++
++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ , .save_state = 1 }
+
+ #define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 3f05b8c29132..fb5350358bc8 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -141,6 +141,9 @@ struct task_group;
+ smp_store_mb(current->state, (state_value)); \
+ } while (0)
+
++#define __set_current_state_no_track(state_value) \
++ current->state = (state_value);
++
+ #define set_special_state(state_value) \
+ do { \
+ unsigned long flags; /* may shadow */ \
+@@ -194,6 +197,9 @@ struct task_group;
+ #define set_current_state(state_value) \
+ smp_store_mb(current->state, (state_value))
+
++#define __set_current_state_no_track(state_value) \
++ __set_current_state(state_value)
++
+ /*
+ * set_special_state() should be used for those states when the blocking task
+ * can not use the regular condition based wait-loop. In that case we must
+@@ -1018,6 +1024,7 @@ struct task_struct {
+ raw_spinlock_t pi_lock;
+
+ struct wake_q_node wake_q;
++ struct wake_q_node wake_q_sleeper;
+
+ #ifdef CONFIG_RT_MUTEXES
+ /* PI waiters blocked on a rt_mutex held by this task: */
+diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
+index 26a2013ac39c..6e2dff721547 100644
+--- a/include/linux/sched/wake_q.h
++++ b/include/linux/sched/wake_q.h
+@@ -58,6 +58,17 @@ static inline bool wake_q_empty(struct wake_q_head *head)
+
+ extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
+ extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
+-extern void wake_up_q(struct wake_q_head *head);
++extern void wake_q_add_sleeper(struct wake_q_head *head, struct task_struct *task);
++extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
++
++static inline void wake_up_q(struct wake_q_head *head)
++{
++ __wake_up_q(head, false);
++}
++
++static inline void wake_up_q_sleeper(struct wake_q_head *head)
++{
++ __wake_up_q(head, true);
++}
+
+ #endif /* _LINUX_SCHED_WAKE_Q_H */
+diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
+new file mode 100644
+index 000000000000..3085132eae38
+--- /dev/null
++++ b/include/linux/spinlock_rt.h
+@@ -0,0 +1,155 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#ifndef __LINUX_SPINLOCK_RT_H
++#define __LINUX_SPINLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++#include <linux/bug.h>
++
++extern void
++__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key);
++
++#define spin_lock_init(slock) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(slock)->lock); \
++ __rt_spin_lock_init(slock, #slock, &__key); \
++} while (0)
++
++extern void __lockfunc rt_spin_lock(spinlock_t *lock);
++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
++extern void __lockfunc rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock);
++extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
++extern void __lockfunc rt_spin_lock_unlock(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
++
++/*
++ * lockdep-less calls, for derived types like rwlock:
++ * (for trylock they can use rt_mutex_trylock() directly.
++ * Migrate disable handling must be done at the call site.
++ */
++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
++extern void __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
++
++#define spin_lock(lock) rt_spin_lock(lock)
++
++#define spin_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ rt_spin_lock(lock); \
++ } while (0)
++
++#define spin_lock_irq(lock) spin_lock(lock)
++
++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
++
++#define spin_trylock(lock) \
++({ \
++ int __locked; \
++ __locked = spin_do_trylock(lock); \
++ __locked; \
++})
++
++#ifdef CONFIG_LOCKDEP
++# define spin_lock_nested(lock, subclass) \
++ do { \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++
++#define spin_lock_bh_nested(lock, subclass) \
++ do { \
++ local_bh_disable(); \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++
++# define spin_lock_nest_lock(lock, subclass) \
++ do { \
++ typecheck(struct lockdep_map *, &(subclass)->dep_map); \
++ rt_spin_lock_nest_lock(lock, &(subclass)->dep_map); \
++ } while (0)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++#else
++# define spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock)))
++# define spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock)))
++# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(((void)(subclass), (lock)))
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ spin_lock(((void)(subclass), (lock))); \
++ } while (0)
++#endif
++
++#define spin_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ spin_lock(lock); \
++ } while (0)
++
++#define spin_unlock(lock) rt_spin_unlock(lock)
++
++#define spin_unlock_bh(lock) \
++ do { \
++ rt_spin_unlock(lock); \
++ local_bh_enable(); \
++ } while (0)
++
++#define spin_unlock_irq(lock) spin_unlock(lock)
++
++#define spin_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ spin_unlock(lock); \
++ } while (0)
++
++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
++#define spin_trylock_irq(lock) spin_trylock(lock)
++
++#define spin_trylock_irqsave(lock, flags) \
++({ \
++ int __locked; \
++ \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ __locked = spin_trylock(lock); \
++ __locked; \
++})
++
++#ifdef CONFIG_GENERIC_LOCKBREAK
++# define spin_is_contended(lock) ((lock)->break_lock)
++#else
++# define spin_is_contended(lock) (((void)(lock), 0))
++#endif
++
++static inline int spin_can_lock(spinlock_t *lock)
++{
++ return !rt_mutex_is_locked(&lock->lock);
++}
++
++static inline int spin_is_locked(spinlock_t *lock)
++{
++ return rt_mutex_is_locked(&lock->lock);
++}
++
++static inline void assert_spin_locked(spinlock_t *lock)
++{
++ BUG_ON(!spin_is_locked(lock));
++}
++
++#endif
+diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h
+new file mode 100644
+index 000000000000..446da786e5d5
+--- /dev/null
++++ b/include/linux/spinlock_types_rt.h
+@@ -0,0 +1,38 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#ifndef __LINUX_SPINLOCK_TYPES_RT_H
++#define __LINUX_SPINLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++#include <linux/cache.h>
++
++/*
++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
++ */
++typedef struct spinlock {
++ struct rt_mutex lock;
++ unsigned int break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} spinlock_t;
++
++#define __RT_SPIN_INITIALIZER(name) \
++ { \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ .save_state = 1, \
++ }
++/*
++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
++*/
++
++#define __SPIN_LOCK_UNLOCKED(name) \
++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \
++ SPIN_DEP_MAP_INIT(name) }
++
++#define DEFINE_SPINLOCK(name) \
++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
++
++#endif
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 882c91a52aa5..c0cfae6e545c 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -927,6 +927,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
+ tsk->splice_pipe = NULL;
+ tsk->task_frag.page = NULL;
+ tsk->wake_q.next = NULL;
++ tsk->wake_q_sleeper.next = NULL;
+
+ account_kernel_stack(tsk, 1);
+
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 0315333c0587..93961549dc4c 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1497,6 +1497,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
+ struct task_struct *new_owner;
+ bool postunlock = false;
+ DEFINE_WAKE_Q(wake_q);
++ DEFINE_WAKE_Q(wake_sleeper_q);
+ int ret = 0;
+
+ new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
+@@ -1546,14 +1547,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
+ * not fail.
+ */
+ pi_state_update_owner(pi_state, new_owner);
+- postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
++ postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
++ &wake_sleeper_q);
+ }
+
+ out_unlock:
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+
+ if (postunlock)
+- rt_mutex_postunlock(&wake_q);
++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
+
+ return ret;
+ }
+@@ -2857,7 +2859,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
+ goto no_block;
+ }
+
+- rt_mutex_init_waiter(&rt_waiter);
++ rt_mutex_init_waiter(&rt_waiter, false);
+
+ /*
+ * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
+@@ -3202,7 +3204,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * The waiter is allocated on our stack, manipulated by the requeue
+ * code while we sleep on uaddr.
+ */
+- rt_mutex_init_waiter(&rt_waiter);
++ rt_mutex_init_waiter(&rt_waiter, false);
+
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE);
+ if (unlikely(ret != 0))
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 86012b16f0c2..03ffb955b286 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -8,6 +8,11 @@
+ * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
+ * Copyright (C) 2006 Esben Nielsen
++ * Adaptive Spinlocks:
++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
++ * and Peter Morreale,
++ * Adaptive Spinlocks simplification:
++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
+ *
+ * See Documentation/locking/rt-mutex-design.rst for details.
+ */
+@@ -233,7 +238,7 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+ * Only use with rt_mutex_waiter_{less,equal}()
+ */
+ #define task_to_waiter(p) \
+- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
++ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
+
+ static inline int
+ rt_mutex_waiter_less(struct rt_mutex_waiter *left,
+@@ -273,6 +278,27 @@ rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
+ return 1;
+ }
+
++#define STEAL_NORMAL 0
++#define STEAL_LATERAL 1
++
++static inline int
++rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode)
++{
++ struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
++
++ if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))
++ return 1;
++
++ /*
++ * Note that RT tasks are excluded from lateral-steals
++ * to prevent the introduction of an unbounded latency.
++ */
++ if (mode == STEAL_NORMAL || rt_task(waiter->task))
++ return 0;
++
++ return rt_mutex_waiter_equal(waiter, top_waiter);
++}
++
+ #define __node_2_waiter(node) \
+ rb_entry((node), struct rt_mutex_waiter, tree_entry)
+
+@@ -359,6 +385,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
+ return debug_rt_mutex_detect_deadlock(waiter, chwalk);
+ }
+
++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
++{
++ if (waiter->savestate)
++ wake_up_lock_sleeper(waiter->task);
++ else
++ wake_up_process(waiter->task);
++}
++
+ /*
+ * Max number of times we'll walk the boosting chain:
+ */
+@@ -682,13 +716,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * follow here. This is the end of the chain we are walking.
+ */
+ if (!rt_mutex_owner(lock)) {
++ struct rt_mutex_waiter *lock_top_waiter;
++
+ /*
+ * If the requeue [7] above changed the top waiter,
+ * then we need to wake the new top waiter up to try
+ * to get the lock.
+ */
+- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
+- wake_up_process(rt_mutex_top_waiter(lock)->task);
++ lock_top_waiter = rt_mutex_top_waiter(lock);
++ if (prerequeue_top_waiter != lock_top_waiter)
++ rt_mutex_wake_waiter(lock_top_waiter);
+ raw_spin_unlock_irq(&lock->wait_lock);
+ return 0;
+ }
+@@ -789,9 +826,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
+ * @task: The task which wants to acquire the lock
+ * @waiter: The waiter that is queued to the lock's wait tree if the
+ * callsite called task_blocked_on_lock(), otherwise NULL
++ * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
+ */
+-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+- struct rt_mutex_waiter *waiter)
++static int __try_to_take_rt_mutex(struct rt_mutex *lock,
++ struct task_struct *task,
++ struct rt_mutex_waiter *waiter, int mode)
+ {
+ lockdep_assert_held(&lock->wait_lock);
+
+@@ -827,12 +866,11 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ */
+ if (waiter) {
+ /*
+- * If waiter is not the highest priority waiter of
+- * @lock, give up.
++ * If waiter is not the highest priority waiter of @lock,
++ * or its peer when lateral steal is allowed, give up.
+ */
+- if (waiter != rt_mutex_top_waiter(lock))
++ if (!rt_mutex_steal(lock, waiter, mode))
+ return 0;
+-
+ /*
+ * We can acquire the lock. Remove the waiter from the
+ * lock waiters tree.
+@@ -850,14 +888,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ */
+ if (rt_mutex_has_waiters(lock)) {
+ /*
+- * If @task->prio is greater than or equal to
+- * the top waiter priority (kernel view),
+- * @task lost.
++ * If @task->prio is greater than the top waiter
++ * priority (kernel view), or equal to it when a
++ * lateral steal is forbidden, @task lost.
+ */
+- if (!rt_mutex_waiter_less(task_to_waiter(task),
+- rt_mutex_top_waiter(lock)))
++ if (!rt_mutex_steal(lock, task_to_waiter(task), mode))
+ return 0;
+-
+ /*
+ * The current top waiter stays enqueued. We
+ * don't have to change anything in the lock
+@@ -904,6 +940,289 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ return 1;
+ }
+
++#ifdef CONFIG_PREEMPT_RT
++/*
++ * preemptible spin_lock functions:
++ */
++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
++ void (*slowfn)(struct rt_mutex *lock))
++{
++ might_sleep_no_state_check();
++
++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
++ return;
++ else
++ slowfn(lock);
++}
++
++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
++ void (*slowfn)(struct rt_mutex *lock))
++{
++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
++ return;
++ else
++ slowfn(lock);
++}
++#ifdef CONFIG_SMP
++/*
++ * Note that owner is a speculative pointer and dereferencing relies
++ * on rcu_read_lock() and the check against the lock owner.
++ */
++static int adaptive_wait(struct rt_mutex *lock,
++ struct task_struct *owner)
++{
++ int res = 0;
++
++ rcu_read_lock();
++ for (;;) {
++ if (owner != rt_mutex_owner(lock))
++ break;
++ /*
++ * Ensure that owner->on_cpu is dereferenced _after_
++ * checking the above to be valid.
++ */
++ barrier();
++ if (!owner->on_cpu) {
++ res = 1;
++ break;
++ }
++ cpu_relax();
++ }
++ rcu_read_unlock();
++ return res;
++}
++#else
++static int adaptive_wait(struct rt_mutex *lock,
++ struct task_struct *orig_owner)
++{
++ return 1;
++}
++#endif
++
++static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
++ struct rt_mutex_waiter *waiter,
++ struct task_struct *task,
++ enum rtmutex_chainwalk chwalk);
++/*
++ * Slow path lock function spin_lock style: this variant is very
++ * careful not to miss any non-lock wakeups.
++ *
++ * We store the current state under p->pi_lock in p->saved_state and
++ * the try_to_wake_up() code handles this accordingly.
++ */
++void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
++ struct rt_mutex_waiter *waiter,
++ unsigned long flags)
++{
++ struct task_struct *lock_owner, *self = current;
++ struct rt_mutex_waiter *top_waiter;
++ int ret;
++
++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL))
++ return;
++
++ BUG_ON(rt_mutex_owner(lock) == self);
++
++ /*
++ * We save whatever state the task is in and we'll restore it
++ * after acquiring the lock taking real wakeups into account
++ * as well. We are serialized via pi_lock against wakeups. See
++ * try_to_wake_up().
++ */
++ raw_spin_lock(&self->pi_lock);
++ self->saved_state = self->state;
++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
++ raw_spin_unlock(&self->pi_lock);
++
++ ret = task_blocks_on_rt_mutex(lock, waiter, self, RT_MUTEX_MIN_CHAINWALK);
++ BUG_ON(ret);
++
++ for (;;) {
++ /* Try to acquire the lock again. */
++ if (__try_to_take_rt_mutex(lock, self, waiter, STEAL_LATERAL))
++ break;
++
++ top_waiter = rt_mutex_top_waiter(lock);
++ lock_owner = rt_mutex_owner(lock);
++
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++
++ if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
++ schedule();
++
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++
++ raw_spin_lock(&self->pi_lock);
++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
++ raw_spin_unlock(&self->pi_lock);
++ }
++
++ /*
++ * Restore the task state to current->saved_state. We set it
++ * to the original state above and the try_to_wake_up() code
++ * has possibly updated it when a real (non-rtmutex) wakeup
++ * happened while we were blocked. Clear saved_state so
++ * try_to_wakeup() does not get confused.
++ */
++ raw_spin_lock(&self->pi_lock);
++ __set_current_state_no_track(self->saved_state);
++ self->saved_state = TASK_RUNNING;
++ raw_spin_unlock(&self->pi_lock);
++
++ /*
++ * try_to_take_rt_mutex() sets the waiter bit
++ * unconditionally. We might have to fix that up:
++ */
++ fixup_rt_mutex_waiters(lock);
++
++ BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock));
++ BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry));
++}
++
++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
++{
++ struct rt_mutex_waiter waiter;
++ unsigned long flags;
++
++ rt_mutex_init_waiter(&waiter, true);
++
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++ rt_spin_lock_slowlock_locked(lock, &waiter, flags);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++ debug_rt_mutex_free_waiter(&waiter);
++}
++
++static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
++ struct wake_q_head *wake_q,
++ struct wake_q_head *wq_sleeper);
++/*
++ * Slow path to release a rt_mutex spin_lock style
++ */
++void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
++{
++ unsigned long flags;
++ DEFINE_WAKE_Q(wake_q);
++ DEFINE_WAKE_Q(wake_sleeper_q);
++ bool postunlock;
++
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++ postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
++
++ if (postunlock)
++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
++}
++
++void __lockfunc rt_spin_lock(spinlock_t *lock)
++{
++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ migrate_disable();
++}
++EXPORT_SYMBOL(rt_spin_lock);
++
++void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
++}
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
++{
++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ migrate_disable();
++}
++EXPORT_SYMBOL(rt_spin_lock_nested);
++
++void __lockfunc rt_spin_lock_nest_lock(spinlock_t *lock,
++ struct lockdep_map *nest_lock)
++{
++ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ migrate_disable();
++}
++EXPORT_SYMBOL(rt_spin_lock_nest_lock);
++#endif
++
++void __lockfunc rt_spin_unlock(spinlock_t *lock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ spin_release(&lock->dep_map, _RET_IP_);
++ migrate_enable();
++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(rt_spin_unlock);
++
++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(__rt_spin_unlock);
++
++/*
++ * Wait for the lock to get unlocked: instead of polling for an unlock
++ * (like raw spinlocks do), we lock and unlock, to force the kernel to
++ * schedule if there's contention:
++ */
++void __lockfunc rt_spin_lock_unlock(spinlock_t *lock)
++{
++ spin_lock(lock);
++ spin_unlock(lock);
++}
++EXPORT_SYMBOL(rt_spin_lock_unlock);
++
++int __lockfunc rt_spin_trylock(spinlock_t *lock)
++{
++ int ret;
++
++ ret = __rt_mutex_trylock(&lock->lock);
++ if (ret) {
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ migrate_disable();
++ }
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock);
++
++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
++{
++ int ret;
++
++ local_bh_disable();
++ ret = __rt_mutex_trylock(&lock->lock);
++ if (ret) {
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ migrate_disable();
++ } else {
++ local_bh_enable();
++ }
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_bh);
++
++void
++__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
++ lockdep_init_map(&lock->dep_map, name, key, 0);
++#endif
++}
++EXPORT_SYMBOL(__rt_spin_lock_init);
++
++#endif /* PREEMPT_RT */
++
++static inline int
++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
++ struct rt_mutex_waiter *waiter)
++{
++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
++}
++
+ /*
+ * Task blocks on lock.
+ *
+@@ -1017,6 +1336,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ * Called with lock->wait_lock held and interrupts disabled.
+ */
+ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
++ struct wake_q_head *wake_sleeper_q,
+ struct rt_mutex *lock)
+ {
+ struct rt_mutex_waiter *waiter;
+@@ -1056,7 +1376,10 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
+ * Pairs with preempt_enable() in rt_mutex_postunlock();
+ */
+ preempt_disable();
+- wake_q_add(wake_q, waiter->task);
++ if (waiter->savestate)
++ wake_q_add_sleeper(wake_sleeper_q, waiter->task);
++ else
++ wake_q_add(wake_q, waiter->task);
+ raw_spin_unlock(&current->pi_lock);
+ }
+
+@@ -1140,21 +1463,22 @@ void rt_mutex_adjust_pi(struct task_struct *task)
+ return;
+ }
+ next_lock = waiter->lock;
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(task);
+
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
+ next_lock, NULL, task);
+ }
+
+-void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
++void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
+ {
+ debug_rt_mutex_init_waiter(waiter);
+ RB_CLEAR_NODE(&waiter->pi_tree_entry);
+ RB_CLEAR_NODE(&waiter->tree_entry);
+ waiter->task = NULL;
++ waiter->savestate = savestate;
+ }
+
+ /**
+@@ -1265,7 +1589,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ unsigned long flags;
+ int ret = 0;
+
+- rt_mutex_init_waiter(&waiter);
++ rt_mutex_init_waiter(&waiter, false);
+
+ /*
+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
+@@ -1338,7 +1662,8 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
+ * Return whether the current task needs to call rt_mutex_postunlock().
+ */
+ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+- struct wake_q_head *wake_q)
++ struct wake_q_head *wake_q,
++ struct wake_q_head *wake_sleeper_q)
+ {
+ unsigned long flags;
+
+@@ -1392,7 +1717,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+ *
+ * Queue the next waiter for wakeup once we release the wait_lock.
+ */
+- mark_wakeup_next_waiter(wake_q, lock);
++ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ return true; /* call rt_mutex_postunlock() */
+@@ -1429,9 +1754,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
+ /*
+ * Performs the wakeup of the top-waiter and re-enables preemption.
+ */
+-void rt_mutex_postunlock(struct wake_q_head *wake_q)
++void rt_mutex_postunlock(struct wake_q_head *wake_q,
++ struct wake_q_head *wake_sleeper_q)
+ {
+ wake_up_q(wake_q);
++ wake_up_q_sleeper(wake_sleeper_q);
+
+ /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
+ preempt_enable();
+@@ -1440,15 +1767,17 @@ void rt_mutex_postunlock(struct wake_q_head *wake_q)
+ static inline void
+ rt_mutex_fastunlock(struct rt_mutex *lock,
+ bool (*slowfn)(struct rt_mutex *lock,
+- struct wake_q_head *wqh))
++ struct wake_q_head *wqh,
++ struct wake_q_head *wq_sleeper))
+ {
+ DEFINE_WAKE_Q(wake_q);
++ DEFINE_WAKE_Q(wake_sleeper_q);
+
+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
+ return;
+
+- if (slowfn(lock, &wake_q))
+- rt_mutex_postunlock(&wake_q);
++ if (slowfn(lock, &wake_q, &wake_sleeper_q))
++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
+ }
+
+ int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
+@@ -1579,19 +1908,13 @@ void __sched __rt_mutex_unlock(struct rt_mutex *lock)
+ void __sched rt_mutex_unlock(struct rt_mutex *lock)
+ {
+ mutex_release(&lock->dep_map, _RET_IP_);
+- rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
++ __rt_mutex_unlock(lock);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+
+-/**
+- * __rt_mutex_futex_unlock - Futex variant, that since futex variants
+- * do not use the fast-path, can be simple and will not need to retry.
+- *
+- * @lock: The rt_mutex to be unlocked
+- * @wake_q: The wake queue head from which to get the next lock waiter
+- */
+-bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
+- struct wake_q_head *wake_q)
++static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
++ struct wake_q_head *wake_q,
++ struct wake_q_head *wq_sleeper)
+ {
+ lockdep_assert_held(&lock->wait_lock);
+
+@@ -1608,23 +1931,39 @@ bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
+ * avoid inversion prior to the wakeup. preempt_disable()
+ * therein pairs with rt_mutex_postunlock().
+ */
+- mark_wakeup_next_waiter(wake_q, lock);
++ mark_wakeup_next_waiter(wake_q, wq_sleeper, lock);
+
+ return true; /* call postunlock() */
+ }
+
++/**
++ * __rt_mutex_futex_unlock - Futex variant, that since futex variants
++ * do not use the fast-path, can be simple and will not need to retry.
++ *
++ * @lock: The rt_mutex to be unlocked
++ * @wake_q: The wake queue head from which to get the next lock waiter
++ * @wq_sleeper: The sleeper wake queue head from which to get the next lock waiter
++ */
++bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
++ struct wake_q_head *wake_q,
++ struct wake_q_head *wq_sleeper)
++{
++ return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper);
++}
++
+ void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
+ {
+ DEFINE_WAKE_Q(wake_q);
++ DEFINE_WAKE_Q(wake_sleeper_q);
+ unsigned long flags;
+ bool postunlock;
+
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+- postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
++ postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ if (postunlock)
+- rt_mutex_postunlock(&wake_q);
++ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
+ }
+
+ /**
+@@ -1662,7 +2001,7 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name,
+ if (name && key)
+ debug_rt_mutex_init(lock, name, key);
+ }
+-EXPORT_SYMBOL_GPL(__rt_mutex_init);
++EXPORT_SYMBOL(__rt_mutex_init);
+
+ /**
+ * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
+@@ -1682,6 +2021,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner)
+ {
+ __rt_mutex_init(lock, NULL, NULL);
++#ifdef CONFIG_DEBUG_SPINLOCK
++ /*
++ * get another key class for the wait_lock. LOCK_PI and UNLOCK_PI is
++ * holding the ->wait_lock of the proxy_lock while unlocking a sleeping
++ * lock.
++ */
++ raw_spin_lock_init(&lock->wait_lock);
++#endif
+ debug_rt_mutex_proxy_lock(lock, proxy_owner);
+ rt_mutex_set_owner(lock, proxy_owner);
+ }
+@@ -1704,6 +2051,26 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock)
+ rt_mutex_set_owner(lock, NULL);
+ }
+
++static void fixup_rt_mutex_blocked(struct rt_mutex *lock)
++{
++ struct task_struct *tsk = current;
++ /*
++ * RT has a problem here when the wait got interrupted by a timeout
++ * or a signal. task->pi_blocked_on is still set. The task must
++ * acquire the hash bucket lock when returning from this function.
++ *
++ * If the hash bucket lock is contended then the
++ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
++ * task_blocks_on_rt_mutex() will trigger. This can be avoided by
++ * clearing task->pi_blocked_on which removes the task from the
++ * boosting chain of the rtmutex. That's correct because the task
++ * is not longer blocked on it.
++ */
++ raw_spin_lock(&tsk->pi_lock);
++ tsk->pi_blocked_on = NULL;
++ raw_spin_unlock(&tsk->pi_lock);
++}
++
+ /**
+ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock: the rt_mutex to take
+@@ -1776,6 +2143,9 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ ret = 0;
+ }
+
++ if (ret)
++ fixup_rt_mutex_blocked(lock);
++
+ return ret;
+ }
+
+@@ -1865,6 +2235,9 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
+ * have to fix that up.
+ */
+ fixup_rt_mutex_waiters(lock);
++ if (ret)
++ fixup_rt_mutex_blocked(lock);
++
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ return ret;
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index 9d1e974ca9c3..c1a280167e3c 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -31,6 +31,7 @@ struct rt_mutex_waiter {
+ struct task_struct *task;
+ struct rt_mutex *lock;
+ int prio;
++ bool savestate;
+ u64 deadline;
+ };
+
+@@ -133,7 +134,7 @@ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
+ extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
+-extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
++extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savetate);
+ extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ struct task_struct *task);
+@@ -151,9 +152,12 @@ extern int __rt_mutex_futex_trylock(struct rt_mutex *l);
+
+ extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
+ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
+- struct wake_q_head *wqh);
++ struct wake_q_head *wqh,
++ struct wake_q_head *wq_sleeper);
++
++extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
++ struct wake_q_head *wake_sleeper_q);
+
+-extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
+ /* RW semaphore special interface */
+
+ extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
+@@ -163,6 +167,10 @@ int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ enum rtmutex_chainwalk chwalk,
+ struct rt_mutex_waiter *waiter);
++void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
++ struct rt_mutex_waiter *waiter,
++ unsigned long flags);
++void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock);
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # include "rtmutex-debug.h"
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 4efe6fd72719..46e7db92b343 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -502,9 +502,15 @@ static bool set_nr_if_polling(struct task_struct *p)
+ #endif
+ #endif
+
+-static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task,
++ bool sleeper)
+ {
+- struct wake_q_node *node = &task->wake_q;
++ struct wake_q_node *node;
++
++ if (sleeper)
++ node = &task->wake_q_sleeper;
++ else
++ node = &task->wake_q;
+
+ /*
+ * Atomically grab the task, if ->wake_q is !nil already it means
+@@ -540,7 +546,13 @@ static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
+ */
+ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+ {
+- if (__wake_q_add(head, task))
++ if (__wake_q_add(head, task, false))
++ get_task_struct(task);
++}
++
++void wake_q_add_sleeper(struct wake_q_head *head, struct task_struct *task)
++{
++ if (__wake_q_add(head, task, true))
+ get_task_struct(task);
+ }
+
+@@ -563,28 +575,39 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+ */
+ void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
+ {
+- if (!__wake_q_add(head, task))
++ if (!__wake_q_add(head, task, false))
+ put_task_struct(task);
+ }
+
+-void wake_up_q(struct wake_q_head *head)
++void __wake_up_q(struct wake_q_head *head, bool sleeper)
+ {
+ struct wake_q_node *node = head->first;
+
+ while (node != WAKE_Q_TAIL) {
+ struct task_struct *task;
+
+- task = container_of(node, struct task_struct, wake_q);
++ if (sleeper)
++ task = container_of(node, struct task_struct, wake_q_sleeper);
++ else
++ task = container_of(node, struct task_struct, wake_q);
++
+ BUG_ON(!task);
+ /* Task can safely be re-inserted now: */
+ node = node->next;
+- task->wake_q.next = NULL;
+
++ if (sleeper)
++ task->wake_q_sleeper.next = NULL;
++ else
++ task->wake_q.next = NULL;
+ /*
+ * wake_up_process() executes a full barrier, which pairs with
+ * the queueing in wake_q_add() so as not to miss wakeups.
+ */
+- wake_up_process(task);
++ if (sleeper)
++ wake_up_lock_sleeper(task);
++ else
++ wake_up_process(task);
++
+ put_task_struct(task);
+ }
+ }
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/features/rt/locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
new file mode 100644
index 00000000..4453ee0a
--- /dev/null
+++ b/features/rt/locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
@@ -0,0 +1,455 @@
+From 301251ee29aa4e2fc60f83f4826f4c1d15ad6bdc Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 12 Oct 2017 17:34:38 +0200
+Subject: [PATCH 081/191] locking/rtmutex: add ww_mutex addon for mutex-rt
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/mutex.h | 8 -
+ include/linux/ww_mutex.h | 8 +
+ kernel/locking/rtmutex.c | 262 ++++++++++++++++++++++++++++++--
+ kernel/locking/rtmutex_common.h | 2 +
+ kernel/locking/rwsem-rt.c | 2 +-
+ 5 files changed, 262 insertions(+), 20 deletions(-)
+
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 50c873744ab0..d7cfc5dc817b 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -82,14 +82,6 @@ struct mutex {
+ struct ww_class;
+ struct ww_acquire_ctx;
+
+-struct ww_mutex {
+- struct mutex base;
+- struct ww_acquire_ctx *ctx;
+-#ifdef CONFIG_DEBUG_MUTEXES
+- struct ww_class *ww_class;
+-#endif
+-};
+-
+ /*
+ * This is the control structure for tasks blocked on mutex,
+ * which resides on the blocked task's kernel stack:
+diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
+index 850424e5d030..8ef2feb0d8fe 100644
+--- a/include/linux/ww_mutex.h
++++ b/include/linux/ww_mutex.h
+@@ -28,6 +28,14 @@ struct ww_class {
+ unsigned int is_wait_die;
+ };
+
++struct ww_mutex {
++ struct mutex base;
++ struct ww_acquire_ctx *ctx;
++#ifdef CONFIG_DEBUG_MUTEXES
++ struct ww_class *ww_class;
++#endif
++};
++
+ struct ww_acquire_ctx {
+ struct task_struct *task;
+ unsigned long stamp;
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index f0bc7fcae441..3fc947dd2e60 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -24,6 +24,7 @@
+ #include <linux/sched/wake_q.h>
+ #include <linux/sched/debug.h>
+ #include <linux/timer.h>
++#include <linux/ww_mutex.h>
+
+ #include "rtmutex_common.h"
+
+@@ -1216,6 +1217,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+
+ #endif /* PREEMPT_RT */
+
++#ifdef CONFIG_PREEMPT_RT
++ static inline int __sched
++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
++ struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
++
++ if (!hold_ctx)
++ return 0;
++
++ if (unlikely(ctx == hold_ctx))
++ return -EALREADY;
++
++ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
++ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
++#ifdef CONFIG_DEBUG_MUTEXES
++ DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
++ ctx->contending_lock = ww;
++#endif
++ return -EDEADLK;
++ }
++
++ return 0;
++}
++#else
++ static inline int __sched
++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++ BUG();
++ return 0;
++}
++
++#endif
++
+ static inline int
+ try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ struct rt_mutex_waiter *waiter)
+@@ -1494,7 +1529,8 @@ void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
+ static int __sched
+ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- struct rt_mutex_waiter *waiter)
++ struct rt_mutex_waiter *waiter,
++ struct ww_acquire_ctx *ww_ctx)
+ {
+ int ret = 0;
+
+@@ -1512,6 +1548,12 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ break;
+ }
+
++ if (ww_ctx && ww_ctx->acquired > 0) {
++ ret = __mutex_lock_check_stamp(lock, ww_ctx);
++ if (ret)
++ break;
++ }
++
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ schedule();
+@@ -1540,16 +1582,106 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+ }
+ }
+
++static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
++ struct ww_acquire_ctx *ww_ctx)
++{
++#ifdef CONFIG_DEBUG_MUTEXES
++ /*
++ * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
++ * but released with a normal mutex_unlock in this call.
++ *
++ * This should never happen, always use ww_mutex_unlock.
++ */
++ DEBUG_LOCKS_WARN_ON(ww->ctx);
++
++ /*
++ * Not quite done after calling ww_acquire_done() ?
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
++
++ if (ww_ctx->contending_lock) {
++ /*
++ * After -EDEADLK you tried to
++ * acquire a different ww_mutex? Bad!
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
++
++ /*
++ * You called ww_mutex_lock after receiving -EDEADLK,
++ * but 'forgot' to unlock everything else first?
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
++ ww_ctx->contending_lock = NULL;
++ }
++
++ /*
++ * Naughty, using a different class will lead to undefined behavior!
++ */
++ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
++#endif
++ ww_ctx->acquired++;
++}
++
++#ifdef CONFIG_PREEMPT_RT
++static void ww_mutex_account_lock(struct rt_mutex *lock,
++ struct ww_acquire_ctx *ww_ctx)
++{
++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
++ struct rt_mutex_waiter *waiter, *n;
++
++ /*
++ * This branch gets optimized out for the common case,
++ * and is only important for ww_mutex_lock.
++ */
++ ww_mutex_lock_acquired(ww, ww_ctx);
++ ww->ctx = ww_ctx;
++
++ /*
++ * Give any possible sleeping processes the chance to wake up,
++ * so they can recheck if they have to back off.
++ */
++ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters.rb_root,
++ tree_entry) {
++ /* XXX debug rt mutex waiter wakeup */
++
++ BUG_ON(waiter->lock != lock);
++ rt_mutex_wake_waiter(waiter);
++ }
++}
++
++#else
++
++static void ww_mutex_account_lock(struct rt_mutex *lock,
++ struct ww_acquire_ctx *ww_ctx)
++{
++ BUG();
++}
++#endif
++
+ int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx,
+ struct rt_mutex_waiter *waiter)
+ {
+ int ret;
+
++#ifdef CONFIG_PREEMPT_RT
++ if (ww_ctx) {
++ struct ww_mutex *ww;
++
++ ww = container_of(lock, struct ww_mutex, base.lock);
++ if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
++ return -EALREADY;
++ }
++#endif
++
+ /* Try to acquire the lock again: */
+- if (try_to_take_rt_mutex(lock, current, NULL))
++ if (try_to_take_rt_mutex(lock, current, NULL)) {
++ if (ww_ctx)
++ ww_mutex_account_lock(lock, ww_ctx);
+ return 0;
++ }
+
+ set_current_state(state);
+
+@@ -1559,14 +1691,24 @@ int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
+
+ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
+
+- if (likely(!ret))
++ if (likely(!ret)) {
+ /* sleep on the mutex */
+- ret = __rt_mutex_slowlock(lock, state, timeout, waiter);
++ ret = __rt_mutex_slowlock(lock, state, timeout, waiter,
++ ww_ctx);
++ } else if (ww_ctx) {
++ /* ww_mutex received EDEADLK, let it become EALREADY */
++ ret = __mutex_lock_check_stamp(lock, ww_ctx);
++ BUG_ON(!ret);
++ }
+
+ if (unlikely(ret)) {
+ __set_current_state(TASK_RUNNING);
+ remove_waiter(lock, waiter);
+- rt_mutex_handle_deadlock(ret, chwalk, waiter);
++ /* ww_mutex wants to report EDEADLK/EALREADY, let it */
++ if (!ww_ctx)
++ rt_mutex_handle_deadlock(ret, chwalk, waiter);
++ } else if (ww_ctx) {
++ ww_mutex_account_lock(lock, ww_ctx);
+ }
+
+ /*
+@@ -1583,7 +1725,8 @@ int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
+ static int __sched
+ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk)
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx)
+ {
+ struct rt_mutex_waiter waiter;
+ unsigned long flags;
+@@ -1601,7 +1744,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ */
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+- ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, &waiter);
++ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,
++ &waiter);
+
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+@@ -1731,14 +1875,16 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+ */
+ static inline int
+ rt_mutex_fastlock(struct rt_mutex *lock, int state,
++ struct ww_acquire_ctx *ww_ctx,
+ int (*slowfn)(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk))
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx))
+ {
+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+ return 0;
+
+- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
+ }
+
+ static inline int
+@@ -1783,7 +1929,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
+ int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
+ {
+ might_sleep();
+- return rt_mutex_fastlock(lock, state, rt_mutex_slowlock);
++ return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
+ }
+
+ /**
+@@ -2233,7 +2379,7 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
+ raw_spin_lock_irq(&lock->wait_lock);
+ /* sleep on the mutex */
+ set_current_state(TASK_INTERRUPTIBLE);
+- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
++ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
+ * have to fix that up.
+@@ -2303,3 +2449,97 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
+
+ return cleanup;
+ }
++
++static inline int
++ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
++ unsigned int tmp;
++
++ if (ctx->deadlock_inject_countdown-- == 0) {
++ tmp = ctx->deadlock_inject_interval;
++ if (tmp > UINT_MAX/4)
++ tmp = UINT_MAX;
++ else
++ tmp = tmp*2 + tmp + tmp/2;
++
++ ctx->deadlock_inject_interval = tmp;
++ ctx->deadlock_inject_countdown = tmp;
++ ctx->contending_lock = lock;
++
++ ww_mutex_unlock(lock);
++
++ return -EDEADLK;
++ }
++#endif
++
++ return 0;
++}
++
++#ifdef CONFIG_PREEMPT_RT
++int __sched
++ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++ int ret;
++
++ might_sleep();
++
++ mutex_acquire_nest(&lock->base.dep_map, 0, 0,
++ ctx ? &ctx->dep_map : NULL, _RET_IP_);
++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0,
++ ctx);
++ if (ret)
++ mutex_release(&lock->base.dep_map, _RET_IP_);
++ else if (!ret && ctx && ctx->acquired > 1)
++ return ww_mutex_deadlock_injection(lock, ctx);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
++
++int __sched
++ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
++{
++ int ret;
++
++ might_sleep();
++
++ mutex_acquire_nest(&lock->base.dep_map, 0, 0,
++ ctx ? &ctx->dep_map : NULL, _RET_IP_);
++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0,
++ ctx);
++ if (ret)
++ mutex_release(&lock->base.dep_map, _RET_IP_);
++ else if (!ret && ctx && ctx->acquired > 1)
++ return ww_mutex_deadlock_injection(lock, ctx);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(ww_mutex_lock);
++
++void __sched ww_mutex_unlock(struct ww_mutex *lock)
++{
++ /*
++ * The unlocking fastpath is the 0->1 transition from 'locked'
++ * into 'unlocked' state:
++ */
++ if (lock->ctx) {
++#ifdef CONFIG_DEBUG_MUTEXES
++ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
++#endif
++ if (lock->ctx->acquired > 0)
++ lock->ctx->acquired--;
++ lock->ctx = NULL;
++ }
++
++ mutex_release(&lock->base.dep_map, _RET_IP_);
++ __rt_mutex_unlock(&lock->base.lock);
++}
++EXPORT_SYMBOL(ww_mutex_unlock);
++
++int __rt_mutex_owner_current(struct rt_mutex *lock)
++{
++ return rt_mutex_owner(lock) == current;
++}
++EXPORT_SYMBOL(__rt_mutex_owner_current);
++#endif
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index c1a280167e3c..248a7d91583b 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -159,6 +159,7 @@ extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
+ struct wake_q_head *wake_sleeper_q);
+
+ /* RW semaphore special interface */
++struct ww_acquire_ctx;
+
+ extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
+ extern int __rt_mutex_trylock(struct rt_mutex *lock);
+@@ -166,6 +167,7 @@ extern void __rt_mutex_unlock(struct rt_mutex *lock);
+ int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx,
+ struct rt_mutex_waiter *waiter);
+ void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c
+index a0771c150041..274172d5bb3a 100644
+--- a/kernel/locking/rwsem-rt.c
++++ b/kernel/locking/rwsem-rt.c
+@@ -138,7 +138,7 @@ static int __sched __down_read_common(struct rw_semaphore *sem, int state)
+ */
+ rt_mutex_init_waiter(&waiter, false);
+ ret = rt_mutex_slowlock_locked(m, state, NULL, RT_MUTEX_MIN_CHAINWALK,
+- &waiter);
++ NULL, &waiter);
+ /*
+ * The slowlock() above is guaranteed to return with the rtmutex (for
+ * ret = 0) is now held, so there can't be a writer active. Increment
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch b/features/rt/locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
new file mode 100644
index 00000000..af6400f3
--- /dev/null
+++ b/features/rt/locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
@@ -0,0 +1,129 @@
+From 9d29157866e3702ace5facd72443ded3418db861 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 12 Oct 2017 16:36:39 +0200
+Subject: [PATCH 073/191] locking/rtmutex: export lockdep-less version of
+ rt_mutex's lock, trylock and unlock
+
+Required for lock implementation ontop of rtmutex.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 54 +++++++++++++++++++++++----------
+ kernel/locking/rtmutex_common.h | 3 ++
+ 2 files changed, 41 insertions(+), 16 deletions(-)
+
+diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
+index 670c4a577322..86012b16f0c2 100644
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1451,12 +1451,33 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
+ rt_mutex_postunlock(&wake_q);
+ }
+
+-static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
++int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
+ {
+ might_sleep();
++ return rt_mutex_fastlock(lock, state, rt_mutex_slowlock);
++}
++
++/**
++ * rt_mutex_lock_state - lock a rt_mutex with a given state
++ *
++ * @lock: The rt_mutex to be locked
++ * @state: The state to set when blocking on the rt_mutex
++ */
++static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock,
++ unsigned int subclass, int state)
++{
++ int ret;
+
+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
++ ret = __rt_mutex_lock_state(lock, state);
++ if (ret)
++ mutex_release(&lock->dep_map, _RET_IP_);
++ return ret;
++}
++
++static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
++{
++ rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE);
+ }
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+@@ -1497,16 +1518,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+ */
+ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
+ {
+- int ret;
+-
+- might_sleep();
+-
+- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+- ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
+- if (ret)
+- mutex_release(&lock->dep_map, _RET_IP_);
+-
+- return ret;
++ return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+@@ -1523,6 +1535,14 @@ int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
+ return __rt_mutex_slowtrylock(lock);
+ }
+
++int __sched __rt_mutex_trylock(struct rt_mutex *lock)
++{
++ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
++ return 0;
++
++ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
++}
++
+ /**
+ * rt_mutex_trylock - try to lock a rt_mutex
+ *
+@@ -1538,10 +1558,7 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
+ {
+ int ret;
+
+- if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
+- return 0;
+-
+- ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
++ ret = __rt_mutex_trylock(lock);
+ if (ret)
+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+
+@@ -1549,6 +1566,11 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
+
++void __sched __rt_mutex_unlock(struct rt_mutex *lock)
++{
++ rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
++}
++
+ /**
+ * rt_mutex_unlock - unlock a rt_mutex
+ *
+diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
+index b5a2affa59d5..9d1e974ca9c3 100644
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -156,6 +156,9 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
+ extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
+ /* RW semaphore special interface */
+
++extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
++extern int __rt_mutex_trylock(struct rt_mutex *lock);
++extern void __rt_mutex_unlock(struct rt_mutex *lock);
+ int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ enum rtmutex_chainwalk chwalk,
+--
+2.19.1
+
diff --git a/features/rt/locking-rtmutex-wire-up-RT-s-locking.patch b/features/rt/locking-rtmutex-wire-up-RT-s-locking.patch
new file mode 100644
index 00000000..c6b001b5
--- /dev/null
+++ b/features/rt/locking-rtmutex-wire-up-RT-s-locking.patch
@@ -0,0 +1,346 @@
+From cd8326d4ea23e407e1168cc2c6f1005d5c0ec743 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 12 Oct 2017 17:31:14 +0200
+Subject: [PATCH 080/191] locking/rtmutex: wire up RT's locking
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/mutex.h | 25 +++++++++++++++----------
+ include/linux/rwsem.h | 12 ++++++++++++
+ include/linux/spinlock.h | 12 +++++++++++-
+ include/linux/spinlock_api_smp.h | 4 +++-
+ include/linux/spinlock_types.h | 11 ++++++++---
+ include/linux/spinlock_types_up.h | 2 +-
+ kernel/Kconfig.preempt | 1 +
+ kernel/locking/Makefile | 10 +++++++---
+ kernel/locking/rwsem.c | 6 ++++++
+ kernel/locking/spinlock.c | 7 +++++++
+ kernel/locking/spinlock_debug.c | 5 +++++
+ 11 files changed, 76 insertions(+), 19 deletions(-)
+
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 0cd631a19727..50c873744ab0 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -22,6 +22,20 @@
+
+ struct ww_acquire_ctx;
+
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
++ , .dep_map = { \
++ .name = #lockname, \
++ .wait_type_inner = LD_WAIT_SLEEP, \
++ }
++#else
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
++#endif
++
++#ifdef CONFIG_PREEMPT_RT
++# include <linux/mutex_rt.h>
++#else
++
+ /*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+@@ -119,16 +133,6 @@ do { \
+ __mutex_init((mutex), #mutex, &__key); \
+ } while (0)
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+- , .dep_map = { \
+- .name = #lockname, \
+- .wait_type_inner = LD_WAIT_SLEEP, \
+- }
+-#else
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+-#endif
+-
+ #define __MUTEX_INITIALIZER(lockname) \
+ { .owner = ATOMIC_LONG_INIT(0) \
+ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+@@ -199,4 +203,5 @@ extern void mutex_unlock(struct mutex *lock);
+
+ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+
++#endif /* !PREEMPT_RT */
+ #endif /* __LINUX_MUTEX_H */
+diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
+index 4c715be48717..9323af8a9244 100644
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
+@@ -16,6 +16,11 @@
+ #include <linux/spinlock.h>
+ #include <linux/atomic.h>
+ #include <linux/err.h>
++
++#ifdef CONFIG_PREEMPT_RT
++#include <linux/rwsem-rt.h>
++#else /* PREEMPT_RT */
++
+ #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ #include <linux/osq_lock.h>
+ #endif
+@@ -119,6 +124,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
+ return !list_empty(&sem->wait_list);
+ }
+
++#endif /* !PREEMPT_RT */
++
++/*
++ * The functions below are the same for all rwsem implementations including
++ * the RT specific variant.
++ */
++
+ /*
+ * lock for reading
+ */
+diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
+index 79897841a2cc..c3c70291b46c 100644
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -309,7 +309,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
+ })
+
+ /* Include rwlock functions */
+-#include <linux/rwlock.h>
++#ifdef CONFIG_PREEMPT_RT
++# include <linux/rwlock_rt.h>
++#else
++# include <linux/rwlock.h>
++#endif
+
+ /*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+@@ -320,6 +324,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
+ # include <linux/spinlock_api_up.h>
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT
++# include <linux/spinlock_rt.h>
++#else /* PREEMPT_RT */
++
+ /*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+@@ -454,6 +462,8 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
+
+ #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
+
++#endif /* !PREEMPT_RT */
++
+ /*
+ * Pull the atomic_t declaration:
+ * (asm-mips/atomic.h needs above definitions)
+diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
+index 19a9be9d97ee..da38149f2843 100644
+--- a/include/linux/spinlock_api_smp.h
++++ b/include/linux/spinlock_api_smp.h
+@@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+ return 0;
+ }
+
+-#include <linux/rwlock_api_smp.h>
++#ifndef CONFIG_PREEMPT_RT
++# include <linux/rwlock_api_smp.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_API_SMP_H */
+diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
+index 5c8664d57fb8..8d896d3e1a01 100644
+--- a/include/linux/spinlock_types.h
++++ b/include/linux/spinlock_types.h
+@@ -11,8 +11,13 @@
+
+ #include <linux/spinlock_types_raw.h>
+
+-#include <linux/spinlock_types_nort.h>
+-
+-#include <linux/rwlock_types.h>
++#ifndef CONFIG_PREEMPT_RT
++# include <linux/spinlock_types_nort.h>
++# include <linux/rwlock_types.h>
++#else
++# include <linux/rtmutex.h>
++# include <linux/spinlock_types_rt.h>
++# include <linux/rwlock_types_rt.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_TYPES_H */
+diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
+index c09b6407ae1b..d9b371fa13e0 100644
+--- a/include/linux/spinlock_types_up.h
++++ b/include/linux/spinlock_types_up.h
+@@ -1,7 +1,7 @@
+ #ifndef __LINUX_SPINLOCK_TYPES_UP_H
+ #define __LINUX_SPINLOCK_TYPES_UP_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
++#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__LINUX_RT_MUTEX_H)
+ # error "please don't include this file directly"
+ #endif
+
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index 416017301660..c8e516f6db20 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -60,6 +60,7 @@ config PREEMPT_RT
+ bool "Fully Preemptible Kernel (Real-Time)"
+ depends on EXPERT && ARCH_SUPPORTS_RT
+ select PREEMPTION
++ select RT_MUTEXES
+ help
+ This option turns the kernel into a real-time kernel by replacing
+ various locking primitives (spinlocks, rwlocks, etc.) with
+diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
+index 8838f1d7c4a2..b4bdab93e2fc 100644
+--- a/kernel/locking/Makefile
++++ b/kernel/locking/Makefile
+@@ -3,7 +3,7 @@
+ # and is generally not a function of system call inputs.
+ KCOV_INSTRUMENT := n
+
+-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
++obj-y += semaphore.o rwsem.o percpu-rwsem.o
+
+ # Avoid recursion lockdep -> KCSAN -> ... -> lockdep.
+ KCSAN_SANITIZE_lockdep.o := n
+@@ -16,19 +16,23 @@ CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
+ endif
+
+ obj-$(CONFIG_DEBUG_IRQFLAGS) += irqflag-debug.o
+-obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
+ obj-$(CONFIG_LOCKDEP) += lockdep.o
+ ifeq ($(CONFIG_PROC_FS),y)
+ obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+ endif
+ obj-$(CONFIG_SMP) += spinlock.o
+-obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
+ obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
+ obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
+ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
++ifneq ($(CONFIG_PREEMPT_RT),y)
++obj-y += mutex.o
++obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
++obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
++endif
++obj-$(CONFIG_PREEMPT_RT) += mutex-rt.o rwsem-rt.o rwlock-rt.o
+ obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
+ obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+ obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index abba5df50006..bed9a5e32a16 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -28,6 +28,7 @@
+ #include <linux/rwsem.h>
+ #include <linux/atomic.h>
+
++#ifndef CONFIG_PREEMPT_RT
+ #include "lock_events.h"
+
+ /*
+@@ -1343,6 +1344,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ if (tmp & RWSEM_FLAG_WAITERS)
+ rwsem_downgrade_wake(sem);
+ }
++#endif
+
+ /*
+ * lock for reading
+@@ -1506,7 +1508,9 @@ void down_read_non_owner(struct rw_semaphore *sem)
+ {
+ might_sleep();
+ __down_read(sem);
++#ifndef CONFIG_PREEMPT_RT
+ __rwsem_set_reader_owned(sem, NULL);
++#endif
+ }
+ EXPORT_SYMBOL(down_read_non_owner);
+
+@@ -1535,7 +1539,9 @@ EXPORT_SYMBOL(down_write_killable_nested);
+
+ void up_read_non_owner(struct rw_semaphore *sem)
+ {
++#ifndef CONFIG_PREEMPT_RT
+ DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
++#endif
+ __up_read(sem);
+ }
+ EXPORT_SYMBOL(up_read_non_owner);
+diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
+index 0ff08380f531..45445a2f1799 100644
+--- a/kernel/locking/spinlock.c
++++ b/kernel/locking/spinlock.c
+@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
+ * __[spin|read|write]_lock_bh()
+ */
+ BUILD_LOCK_OPS(spin, raw_spinlock);
++
++#ifndef CONFIG_PREEMPT_RT
+ BUILD_LOCK_OPS(read, rwlock);
+ BUILD_LOCK_OPS(write, rwlock);
++#endif
+
+ #endif
+
+@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
+ EXPORT_SYMBOL(_raw_spin_unlock_bh);
+ #endif
+
++#ifndef CONFIG_PREEMPT_RT
++
+ #ifndef CONFIG_INLINE_READ_TRYLOCK
+ int __lockfunc _raw_read_trylock(rwlock_t *lock)
+ {
+@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
+ EXPORT_SYMBOL(_raw_write_unlock_bh);
+ #endif
+
++#endif /* !PREEMPT_RT */
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
+index b9d93087ee66..72e306e0e8a3 100644
+--- a/kernel/locking/spinlock_debug.c
++++ b/kernel/locking/spinlock_debug.c
+@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+
+ EXPORT_SYMBOL(__raw_spin_lock_init);
+
++#ifndef CONFIG_PREEMPT_RT
+ void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key)
+ {
+@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
+ }
+
+ EXPORT_SYMBOL(__rwlock_init);
++#endif
+
+ static void spin_dump(raw_spinlock_t *lock, const char *msg)
+ {
+@@ -139,6 +141,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
+ arch_spin_unlock(&lock->raw_lock);
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ static void rwlock_bug(rwlock_t *lock, const char *msg)
+ {
+ if (!debug_locks_off())
+@@ -228,3 +231,5 @@ void do_raw_write_unlock(rwlock_t *lock)
+ debug_write_unlock(lock);
+ arch_write_unlock(&lock->raw_lock);
+ }
++
++#endif
+--
+2.19.1
+
diff --git a/features/rt/locking-spinlock-Split-the-lock-types-header.patch b/features/rt/locking-spinlock-Split-the-lock-types-header.patch
new file mode 100644
index 00000000..6dccd49d
--- /dev/null
+++ b/features/rt/locking-spinlock-Split-the-lock-types-header.patch
@@ -0,0 +1,252 @@
+From e51cfeec84cee412a53528dc1069ace2ab24ae44 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 29 Jun 2011 19:34:01 +0200
+Subject: [PATCH 068/191] locking/spinlock: Split the lock types header
+
+Split raw_spinlock into its own file and the remaining spinlock_t into
+its own non-RT header. The non-RT header will be replaced later by sleeping
+spinlocks.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rwlock_types.h | 4 ++
+ include/linux/spinlock_types.h | 87 +----------------------------
+ include/linux/spinlock_types_nort.h | 39 +++++++++++++
+ include/linux/spinlock_types_raw.h | 65 +++++++++++++++++++++
+ 4 files changed, 110 insertions(+), 85 deletions(-)
+ create mode 100644 include/linux/spinlock_types_nort.h
+ create mode 100644 include/linux/spinlock_types_raw.h
+
+diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
+index 3bd03e18061c..0ad226b5d8fd 100644
+--- a/include/linux/rwlock_types.h
++++ b/include/linux/rwlock_types.h
+@@ -1,6 +1,10 @@
+ #ifndef __LINUX_RWLOCK_TYPES_H
+ #define __LINUX_RWLOCK_TYPES_H
+
++#if !defined(__LINUX_SPINLOCK_TYPES_H)
++# error "Do not include directly, include spinlock_types.h"
++#endif
++
+ /*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
+index b981caafe8bf..5c8664d57fb8 100644
+--- a/include/linux/spinlock_types.h
++++ b/include/linux/spinlock_types.h
+@@ -9,92 +9,9 @@
+ * Released under the General Public License (GPL).
+ */
+
+-#if defined(CONFIG_SMP)
+-# include <asm/spinlock_types.h>
+-#else
+-# include <linux/spinlock_types_up.h>
+-#endif
++#include <linux/spinlock_types_raw.h>
+
+-#include <linux/lockdep_types.h>
+-
+-typedef struct raw_spinlock {
+- arch_spinlock_t raw_lock;
+-#ifdef CONFIG_DEBUG_SPINLOCK
+- unsigned int magic, owner_cpu;
+- void *owner;
+-#endif
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+- struct lockdep_map dep_map;
+-#endif
+-} raw_spinlock_t;
+-
+-#define SPINLOCK_MAGIC 0xdead4ead
+-
+-#define SPINLOCK_OWNER_INIT ((void *)-1L)
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define RAW_SPIN_DEP_MAP_INIT(lockname) \
+- .dep_map = { \
+- .name = #lockname, \
+- .wait_type_inner = LD_WAIT_SPIN, \
+- }
+-# define SPIN_DEP_MAP_INIT(lockname) \
+- .dep_map = { \
+- .name = #lockname, \
+- .wait_type_inner = LD_WAIT_CONFIG, \
+- }
+-#else
+-# define RAW_SPIN_DEP_MAP_INIT(lockname)
+-# define SPIN_DEP_MAP_INIT(lockname)
+-#endif
+-
+-#ifdef CONFIG_DEBUG_SPINLOCK
+-# define SPIN_DEBUG_INIT(lockname) \
+- .magic = SPINLOCK_MAGIC, \
+- .owner_cpu = -1, \
+- .owner = SPINLOCK_OWNER_INIT,
+-#else
+-# define SPIN_DEBUG_INIT(lockname)
+-#endif
+-
+-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+- { \
+- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+- SPIN_DEBUG_INIT(lockname) \
+- RAW_SPIN_DEP_MAP_INIT(lockname) }
+-
+-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+-
+-typedef struct spinlock {
+- union {
+- struct raw_spinlock rlock;
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+- struct {
+- u8 __padding[LOCK_PADSIZE];
+- struct lockdep_map dep_map;
+- };
+-#endif
+- };
+-} spinlock_t;
+-
+-#define ___SPIN_LOCK_INITIALIZER(lockname) \
+- { \
+- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+- SPIN_DEBUG_INIT(lockname) \
+- SPIN_DEP_MAP_INIT(lockname) }
+-
+-#define __SPIN_LOCK_INITIALIZER(lockname) \
+- { { .rlock = ___SPIN_LOCK_INITIALIZER(lockname) } }
+-
+-#define __SPIN_LOCK_UNLOCKED(lockname) \
+- (spinlock_t) __SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
++#include <linux/spinlock_types_nort.h>
+
+ #include <linux/rwlock_types.h>
+
+diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h
+new file mode 100644
+index 000000000000..e4549f0dd197
+--- /dev/null
++++ b/include/linux/spinlock_types_nort.h
+@@ -0,0 +1,39 @@
++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
++#define __LINUX_SPINLOCK_TYPES_NORT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * The non RT version maps spinlocks to raw_spinlocks
++ */
++typedef struct spinlock {
++ union {
++ struct raw_spinlock rlock;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
++ struct {
++ u8 __padding[LOCK_PADSIZE];
++ struct lockdep_map dep_map;
++ };
++#endif
++ };
++} spinlock_t;
++
++#define ___SPIN_LOCK_INITIALIZER(lockname) \
++{ \
++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
++ SPIN_DEBUG_INIT(lockname) \
++ SPIN_DEP_MAP_INIT(lockname) }
++
++#define __SPIN_LOCK_INITIALIZER(lockname) \
++ { { .rlock = ___SPIN_LOCK_INITIALIZER(lockname) } }
++
++#define __SPIN_LOCK_UNLOCKED(lockname) \
++ (spinlock_t) __SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
++
++#endif
+diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h
+new file mode 100644
+index 000000000000..1d4a180e983d
+--- /dev/null
++++ b/include/linux/spinlock_types_raw.h
+@@ -0,0 +1,65 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
++#define __LINUX_SPINLOCK_TYPES_RAW_H
++
++#include <linux/types.h>
++
++#if defined(CONFIG_SMP)
++# include <asm/spinlock_types.h>
++#else
++# include <linux/spinlock_types_up.h>
++#endif
++
++#include <linux/lockdep_types.h>
++
++typedef struct raw_spinlock {
++ arch_spinlock_t raw_lock;
++#ifdef CONFIG_DEBUG_SPINLOCK
++ unsigned int magic, owner_cpu;
++ void *owner;
++#endif
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} raw_spinlock_t;
++
++#define SPINLOCK_MAGIC 0xdead4ead
++
++#define SPINLOCK_OWNER_INIT ((void *)-1L)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define RAW_SPIN_DEP_MAP_INIT(lockname) \
++ .dep_map = { \
++ .name = #lockname, \
++ .wait_type_inner = LD_WAIT_SPIN, \
++ }
++# define SPIN_DEP_MAP_INIT(lockname) \
++ .dep_map = { \
++ .name = #lockname, \
++ .wait_type_inner = LD_WAIT_CONFIG, \
++ }
++#else
++# define RAW_SPIN_DEP_MAP_INIT(lockname)
++# define SPIN_DEP_MAP_INIT(lockname)
++#endif
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++# define SPIN_DEBUG_INIT(lockname) \
++ .magic = SPINLOCK_MAGIC, \
++ .owner_cpu = -1, \
++ .owner = SPINLOCK_OWNER_INIT,
++#else
++# define SPIN_DEBUG_INIT(lockname)
++#endif
++
++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
++{ \
++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
++ SPIN_DEBUG_INIT(lockname) \
++ RAW_SPIN_DEP_MAP_INIT(lockname) }
++
++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
++
++#endif
+--
+2.19.1
+
diff --git a/features/rt/locking-split-out-the-rbtree-definition.patch b/features/rt/locking-split-out-the-rbtree-definition.patch
new file mode 100644
index 00000000..8af42b5d
--- /dev/null
+++ b/features/rt/locking-split-out-the-rbtree-definition.patch
@@ -0,0 +1,119 @@
+From e13218659647185acc44046be640f023fd392f7b Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 14 Aug 2020 17:08:41 +0200
+Subject: [PATCH 071/191] locking: split out the rbtree definition
+
+rtmutex.h needs the definition for rb_root_cached. By including kernel.h
+we will get to spinlock.h which requires rtmutex.h again.
+
+Split out the required struct definition and move it into its own header
+file which can be included by rtmutex.h
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rbtree.h | 27 +--------------------------
+ include/linux/rbtree_type.h | 31 +++++++++++++++++++++++++++++++
+ include/linux/rtmutex.h | 2 +-
+ 3 files changed, 33 insertions(+), 27 deletions(-)
+ create mode 100644 include/linux/rbtree_type.h
+
+diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
+index d31ecaf4fdd3..e711efc2e009 100644
+--- a/include/linux/rbtree.h
++++ b/include/linux/rbtree.h
+@@ -19,19 +19,9 @@
+
+ #include <linux/kernel.h>
+ #include <linux/stddef.h>
++#include <linux/rbtree_type.h>
+ #include <linux/rcupdate.h>
+
+-struct rb_node {
+- unsigned long __rb_parent_color;
+- struct rb_node *rb_right;
+- struct rb_node *rb_left;
+-} __attribute__((aligned(sizeof(long))));
+- /* The alignment might seem pointless, but allegedly CRIS needs it */
+-
+-struct rb_root {
+- struct rb_node *rb_node;
+-};
+-
+ #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
+
+ #define RB_ROOT (struct rb_root) { NULL, }
+@@ -112,21 +102,6 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent
+ typeof(*pos), field); 1; }); \
+ pos = n)
+
+-/*
+- * Leftmost-cached rbtrees.
+- *
+- * We do not cache the rightmost node based on footprint
+- * size vs number of potential users that could benefit
+- * from O(1) rb_last(). Just not worth it, users that want
+- * this feature can always implement the logic explicitly.
+- * Furthermore, users that want to cache both pointers may
+- * find it a bit asymmetric, but that's ok.
+- */
+-struct rb_root_cached {
+- struct rb_root rb_root;
+- struct rb_node *rb_leftmost;
+-};
+-
+ #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
+
+ /* Same as rb_first(), but O(1) */
+diff --git a/include/linux/rbtree_type.h b/include/linux/rbtree_type.h
+new file mode 100644
+index 000000000000..77a89dd2c7c6
+--- /dev/null
++++ b/include/linux/rbtree_type.h
+@@ -0,0 +1,31 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++#ifndef _LINUX_RBTREE_TYPE_H
++#define _LINUX_RBTREE_TYPE_H
++
++struct rb_node {
++ unsigned long __rb_parent_color;
++ struct rb_node *rb_right;
++ struct rb_node *rb_left;
++} __attribute__((aligned(sizeof(long))));
++/* The alignment might seem pointless, but allegedly CRIS needs it */
++
++struct rb_root {
++ struct rb_node *rb_node;
++};
++
++/*
++ * Leftmost-cached rbtrees.
++ *
++ * We do not cache the rightmost node based on footprint
++ * size vs number of potential users that could benefit
++ * from O(1) rb_last(). Just not worth it, users that want
++ * this feature can always implement the logic explicitly.
++ * Furthermore, users that want to cache both pointers may
++ * find it a bit asymmetric, but that's ok.
++ */
++struct rb_root_cached {
++ struct rb_root rb_root;
++ struct rb_node *rb_leftmost;
++};
++
++#endif
+diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
+index add1dab27df5..b828b938c876 100644
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -14,7 +14,7 @@
+ #define __LINUX_RT_MUTEX_H
+
+ #include <linux/linkage.h>
+-#include <linux/rbtree.h>
++#include <linux/rbtree_type.h>
+ #include <linux/spinlock_types_raw.h>
+
+ extern int max_lock_depth; /* for sysctl */
+--
+2.19.1
+
diff --git a/features/rt/md-raid5-Make-raid5_percpu-handling-RT-aware.patch b/features/rt/md-raid5-Make-raid5_percpu-handling-RT-aware.patch
new file mode 100644
index 00000000..43380924
--- /dev/null
+++ b/features/rt/md-raid5-Make-raid5_percpu-handling-RT-aware.patch
@@ -0,0 +1,68 @@
+From 28893cffb6465718d2196b201ec4dc9e41aca776 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 6 Apr 2010 16:51:31 +0200
+Subject: [PATCH 135/191] md: raid5: Make raid5_percpu handling RT aware
+
+__raid_run_ops() disables preemption with get_cpu() around the access
+to the raid5_percpu variables. That causes scheduling while atomic
+spews on RT.
+
+Serialize the access to the percpu data with a lock and keep the code
+preemptible.
+
+Reported-by: Udo van den Heuvel <udovdh@xs4all.nl>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
+---
+ drivers/md/raid5.c | 7 +++++--
+ drivers/md/raid5.h | 1 +
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 5d57a5bd171f..86d6a676a509 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2216,8 +2216,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+ struct raid5_percpu *percpu;
+ unsigned long cpu;
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ percpu = per_cpu_ptr(conf->percpu, cpu);
++ spin_lock(&percpu->lock);
+ if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
+ ops_run_biofill(sh);
+ overlap_clear++;
+@@ -2276,7 +2277,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
+ if (test_and_clear_bit(R5_Overlap, &dev->flags))
+ wake_up(&sh->raid_conf->wait_for_overlap);
+ }
+- put_cpu();
++ spin_unlock(&percpu->lock);
++ put_cpu_light();
+ }
+
+ static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
+@@ -7079,6 +7081,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node)
+ __func__, cpu);
+ return -ENOMEM;
+ }
++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
+ return 0;
+ }
+
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index 5c05acf20e1f..665fe138ab4f 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -635,6 +635,7 @@ struct r5conf {
+ int recovery_disabled;
+ /* per cpu variables */
+ struct raid5_percpu {
++ spinlock_t lock; /* Protection for -RT */
+ struct page *spare_page; /* Used when checking P/Q in raid6 */
+ void *scribble; /* space for constructing buffer
+ * lists and performing address
+--
+2.19.1
+
diff --git a/features/rt/mm-Allow-only-SLUB-on-RT.patch b/features/rt/mm-Allow-only-SLUB-on-RT.patch
new file mode 100644
index 00000000..54012244
--- /dev/null
+++ b/features/rt/mm-Allow-only-SLUB-on-RT.patch
@@ -0,0 +1,46 @@
+From 3049abb997a5dbe4e0c89c1fd51cb372c85ce15a Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:44:03 -0500
+Subject: [PATCH 094/191] mm: Allow only SLUB on RT
+
+Memory allocation disables interrupts as part of the allocation and freeing
+process. For -RT it is important that this section remain short and don't
+depend on the size of the request or an internal state of the memory allocator.
+At the beginning the SLAB memory allocator was adopted for RT's needs and it
+required substantial changes. Later, with the addition of the SLUB memory
+allocator we adopted this one as well and the changes were smaller. More
+important, due to the design of the SLUB allocator it performs better and its
+worst case latency was smaller. In the end only SLUB remained supported.
+
+Disable SLAB and SLOB on -RT. Only SLUB is adopted to -RT needs.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ init/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 5f5c776ef192..d51c16a3f355 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1886,6 +1886,7 @@ choice
+
+ config SLAB
+ bool "SLAB"
++ depends on !PREEMPT_RT
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
+ help
+ The regular slab allocator that is established and known to work
+@@ -1906,6 +1907,7 @@ config SLUB
+ config SLOB
+ depends on EXPERT
+ bool "SLOB (Simple Allocator)"
++ depends on !PREEMPT_RT
+ help
+ SLOB replaces the stock allocator with a drastically simpler
+ allocator. SLOB is generally more space efficient but
+--
+2.19.1
+
diff --git a/features/rt/mm-memcontrol-Disable-preemption-in-__mod_memcg_lruv.patch b/features/rt/mm-memcontrol-Disable-preemption-in-__mod_memcg_lruv.patch
new file mode 100644
index 00000000..077738b2
--- /dev/null
+++ b/features/rt/mm-memcontrol-Disable-preemption-in-__mod_memcg_lruv.patch
@@ -0,0 +1,43 @@
+From 96e36ffab463f8f5823778279568a9cdd349c6c3 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Oct 2020 18:15:32 +0100
+Subject: [PATCH 086/191] mm/memcontrol: Disable preemption in
+ __mod_memcg_lruvec_state()
+
+The callers expect disabled preemption/interrupts while invoking
+__mod_memcg_lruvec_state(). This works mainline because a lock of
+somekind is acquired.
+
+Use preempt_disable_rt() where per-CPU variables are accessed and a
+stable pointer is expected. This is also done in __mod_zone_page_state()
+for the same reason.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/memcontrol.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index e064ac0d850a..24091f7a64e2 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -809,6 +809,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ memcg = pn->memcg;
+
++ preempt_disable_rt();
+ /* Update memcg */
+ __mod_memcg_state(memcg, idx, val);
+
+@@ -828,6 +829,7 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
+ x = 0;
+ }
+ __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
++ preempt_enable_rt();
+ }
+
+ /**
+--
+2.19.1
+
diff --git a/features/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/features/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
new file mode 100644
index 00000000..bc68ff78
--- /dev/null
+++ b/features/rt/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -0,0 +1,73 @@
+From 0951cba216f7340f7bdb138df663828817b3261a Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@windriver.com>
+Date: Wed, 30 Oct 2013 11:48:33 -0700
+Subject: [PATCH 114/191] mm/memcontrol: Don't call schedule_work_on in
+ preemption disabled context
+
+The following trace is triggered when running ltp oom test cases:
+
+BUG: sleeping function called from invalid context at kernel/rtmutex.c:659
+in_atomic(): 1, irqs_disabled(): 0, pid: 17188, name: oom03
+Preemption disabled at:[<ffffffff8112ba70>] mem_cgroup_reclaim+0x90/0xe0
+
+CPU: 2 PID: 17188 Comm: oom03 Not tainted 3.10.10-rt3 #2
+Hardware name: Intel Corporation Calpella platform/MATXM-CORE-411-B, BIOS 4.6.3 08/18/2010
+ffff88007684d730 ffff880070df9b58 ffffffff8169918d ffff880070df9b70
+ffffffff8106db31 ffff88007688b4a0 ffff880070df9b88 ffffffff8169d9c0
+ffff88007688b4a0 ffff880070df9bc8 ffffffff81059da1 0000000170df9bb0
+Call Trace:
+[<ffffffff8169918d>] dump_stack+0x19/0x1b
+[<ffffffff8106db31>] __might_sleep+0xf1/0x170
+[<ffffffff8169d9c0>] rt_spin_lock+0x20/0x50
+[<ffffffff81059da1>] queue_work_on+0x61/0x100
+[<ffffffff8112b361>] drain_all_stock+0xe1/0x1c0
+[<ffffffff8112ba70>] mem_cgroup_reclaim+0x90/0xe0
+[<ffffffff8112beda>] __mem_cgroup_try_charge+0x41a/0xc40
+[<ffffffff810f1c91>] ? release_pages+0x1b1/0x1f0
+[<ffffffff8106f200>] ? sched_exec+0x40/0xb0
+[<ffffffff8112cc87>] mem_cgroup_charge_common+0x37/0x70
+[<ffffffff8112e2c6>] mem_cgroup_newpage_charge+0x26/0x30
+[<ffffffff8110af68>] handle_pte_fault+0x618/0x840
+[<ffffffff8103ecf6>] ? unpin_current_cpu+0x16/0x70
+[<ffffffff81070f94>] ? migrate_enable+0xd4/0x200
+[<ffffffff8110cde5>] handle_mm_fault+0x145/0x1e0
+[<ffffffff810301e1>] __do_page_fault+0x1a1/0x4c0
+[<ffffffff8169c9eb>] ? preempt_schedule_irq+0x4b/0x70
+[<ffffffff8169e3b7>] ? retint_kernel+0x37/0x40
+[<ffffffff8103053e>] do_page_fault+0xe/0x10
+[<ffffffff8169e4c2>] page_fault+0x22/0x30
+
+So, to prevent schedule_work_on from being called in preempt disabled context,
+replace the pair of get/put_cpu() to get/put_cpu_light().
+
+Signed-off-by: Yang Shi <yang.shi@windriver.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/memcontrol.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 74a65d87866c..0d741fcf9f53 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2357,7 +2357,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
+ * as well as workers from this path always operate on the local
+ * per-cpu data. CPU up doesn't touch memcg_stock at all.
+ */
+- curcpu = get_cpu();
++ curcpu = get_cpu_light();
+ for_each_online_cpu(cpu) {
+ struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
+ struct mem_cgroup *memcg;
+@@ -2380,7 +2380,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg)
+ schedule_work_on(cpu, &stock->work);
+ }
+ }
+- put_cpu();
++ put_cpu_light();
+ mutex_unlock(&percpu_charge_mutex);
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch b/features/rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
new file mode 100644
index 00000000..d85633ab
--- /dev/null
+++ b/features/rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
@@ -0,0 +1,143 @@
+From a597d1d521af35edd6fbb86d1934a0f92bc9d54e Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 18 Aug 2020 10:30:00 +0200
+Subject: [PATCH 113/191] mm: memcontrol: Provide a local_lock for per-CPU
+ memcg_stock
+
+The interrupts are disabled to ensure CPU-local access to the per-CPU
+variable `memcg_stock'.
+As the code inside the interrupt disabled section acquires regular
+spinlocks, which are converted to 'sleeping' spinlocks on a PREEMPT_RT
+kernel, this conflicts with the RT semantics.
+
+Convert it to a local_lock which allows RT kernels to substitute them with
+a real per CPU lock. On non RT kernels this maps to local_irq_save() as
+before, but provides also lockdep coverage of the critical region.
+No functional change.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/memcontrol.c | 31 ++++++++++++++++++-------------
+ 1 file changed, 18 insertions(+), 13 deletions(-)
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 24091f7a64e2..74a65d87866c 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2210,6 +2210,7 @@ void unlock_page_memcg(struct page *page)
+ EXPORT_SYMBOL(unlock_page_memcg);
+
+ struct memcg_stock_pcp {
++ local_lock_t lock;
+ struct mem_cgroup *cached; /* this never be root cgroup */
+ unsigned int nr_pages;
+
+@@ -2261,7 +2262,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ if (nr_pages > MEMCG_CHARGE_BATCH)
+ return ret;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&memcg_stock.lock, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
+@@ -2269,7 +2270,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&memcg_stock.lock, flags);
+
+ return ret;
+ }
+@@ -2304,14 +2305,14 @@ static void drain_local_stock(struct work_struct *dummy)
+ * The only protection from memory hotplug vs. drain_stock races is
+ * that we always operate on local CPU stock here with IRQ disabled
+ */
+- local_irq_save(flags);
++ local_lock_irqsave(&memcg_stock.lock, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ drain_obj_stock(stock);
+ drain_stock(stock);
+ clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&memcg_stock.lock, flags);
+ }
+
+ /*
+@@ -2323,7 +2324,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ struct memcg_stock_pcp *stock;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&memcg_stock.lock, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (stock->cached != memcg) { /* reset if necessary */
+@@ -2336,7 +2337,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ if (stock->nr_pages > MEMCG_CHARGE_BATCH)
+ drain_stock(stock);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&memcg_stock.lock, flags);
+ }
+
+ /*
+@@ -3158,7 +3159,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+ unsigned long flags;
+ bool ret = false;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&memcg_stock.lock, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
+@@ -3166,7 +3167,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&memcg_stock.lock, flags);
+
+ return ret;
+ }
+@@ -3225,7 +3226,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+ struct memcg_stock_pcp *stock;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&memcg_stock.lock, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (stock->cached_objcg != objcg) { /* reset if necessary */
+@@ -3239,7 +3240,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+ if (stock->nr_bytes > PAGE_SIZE)
+ drain_obj_stock(stock);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&memcg_stock.lock, flags);
+ }
+
+ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
+@@ -7065,9 +7066,13 @@ static int __init mem_cgroup_init(void)
+ cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
+ memcg_hotplug_cpu_dead);
+
+- for_each_possible_cpu(cpu)
+- INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
+- drain_local_stock);
++ for_each_possible_cpu(cpu) {
++ struct memcg_stock_pcp *stock;
++
++ stock = per_cpu_ptr(&memcg_stock, cpu);
++ INIT_WORK(&stock->work, drain_local_stock);
++ local_lock_init(&stock->lock);
++ }
+
+ for_each_node(node) {
+ struct mem_cgroup_tree_per_node *rtpn;
+--
+2.19.1
+
diff --git a/features/rt/mm-memcontrol-Replace-local_irq_disable-with-local-l.patch b/features/rt/mm-memcontrol-Replace-local_irq_disable-with-local-l.patch
new file mode 100644
index 00000000..0fd436ea
--- /dev/null
+++ b/features/rt/mm-memcontrol-Replace-local_irq_disable-with-local-l.patch
@@ -0,0 +1,122 @@
+From d4e7b31f217ee00026ee090133e88a9bcc33826b Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 28 Jan 2015 17:14:16 +0100
+Subject: [PATCH 115/191] mm/memcontrol: Replace local_irq_disable with local
+ locks
+
+There are a few local_irq_disable() which then take sleeping locks. This
+patch converts them local locks.
+
+[bigeasy: Move unlock after memcg_check_events() in mem_cgroup_swapout(),
+ pointed out by Matt Fleming <matt@codeblueprint.co.uk>]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/memcontrol.c | 29 +++++++++++++++++++++--------
+ 1 file changed, 21 insertions(+), 8 deletions(-)
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 0d741fcf9f53..ae52cebfecfc 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -66,6 +66,7 @@
+ #include <net/sock.h>
+ #include <net/ip.h>
+ #include "slab.h"
++#include <linux/local_lock.h>
+
+ #include <linux/uaccess.h>
+
+@@ -96,6 +97,13 @@ bool cgroup_memory_noswap __read_mostly;
+ static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
+ #endif
+
++struct event_lock {
++ local_lock_t l;
++};
++static DEFINE_PER_CPU(struct event_lock, event_lock) = {
++ .l = INIT_LOCAL_LOCK(l),
++};
++
+ /* Whether legacy memory+swap accounting is active */
+ static bool do_memsw_account(void)
+ {
+@@ -5677,12 +5685,12 @@ static int mem_cgroup_move_account(struct page *page,
+
+ ret = 0;
+
+- local_irq_disable();
++ local_lock_irq(&event_lock.l);
+ mem_cgroup_charge_statistics(to, page, nr_pages);
+ memcg_check_events(to, page);
+ mem_cgroup_charge_statistics(from, page, -nr_pages);
+ memcg_check_events(from, page);
+- local_irq_enable();
++ local_unlock_irq(&event_lock.l);
+ out_unlock:
+ unlock_page(page);
+ out:
+@@ -6739,10 +6747,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
+ css_get(&memcg->css);
+ commit_charge(page, memcg);
+
+- local_irq_disable();
++ local_lock_irq(&event_lock.l);
+ mem_cgroup_charge_statistics(memcg, page, nr_pages);
+ memcg_check_events(memcg, page);
+- local_irq_enable();
++ local_unlock_irq(&event_lock.l);
+
+ /*
+ * Cgroup1's unified memory+swap counter has been charged with the
+@@ -6798,11 +6806,11 @@ static void uncharge_batch(const struct uncharge_gather *ug)
+ memcg_oom_recover(ug->memcg);
+ }
+
+- local_irq_save(flags);
++ local_lock_irqsave(&event_lock.l, flags);
+ __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
+ __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
+ memcg_check_events(ug->memcg, ug->dummy_page);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&event_lock.l, flags);
+
+ /* drop reference from uncharge_page */
+ css_put(&ug->memcg->css);
+@@ -6935,10 +6943,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
+ css_get(&memcg->css);
+ commit_charge(newpage, memcg);
+
+- local_irq_save(flags);
++ local_lock_irqsave(&event_lock.l, flags);
+ mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
+ memcg_check_events(memcg, newpage);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&event_lock.l, flags);
+ }
+
+ DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
+@@ -7121,6 +7129,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+ struct mem_cgroup *memcg, *swap_memcg;
+ unsigned int nr_entries;
+ unsigned short oldid;
++ unsigned long flags;
+
+ VM_BUG_ON_PAGE(PageLRU(page), page);
+ VM_BUG_ON_PAGE(page_count(page), page);
+@@ -7169,9 +7178,13 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+ * important here to have the interrupts disabled because it is the
+ * only synchronisation we have for updating the per-CPU variables.
+ */
++ local_lock_irqsave(&event_lock.l, flags);
++#ifndef CONFIG_PREEMPT_RT
+ VM_BUG_ON(!irqs_disabled());
++#endif
+ mem_cgroup_charge_statistics(memcg, page, -nr_entries);
+ memcg_check_events(memcg, page);
++ local_unlock_irqrestore(&event_lock.l, flags);
+
+ css_put(&memcg->css);
+ }
+--
+2.19.1
+
diff --git a/features/rt/mm-page_alloc-Use-a-local_lock-instead-of-explicit-l.patch b/features/rt/mm-page_alloc-Use-a-local_lock-instead-of-explicit-l.patch
new file mode 100644
index 00000000..619ba657
--- /dev/null
+++ b/features/rt/mm-page_alloc-Use-a-local_lock-instead-of-explicit-l.patch
@@ -0,0 +1,210 @@
+From 1398d3c431f6fd53914f1b71dd30732092e6baa7 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:37 -0500
+Subject: [PATCH 111/191] mm: page_alloc: Use a local_lock instead of explicit
+ local_irq_save().
+
+The page-allocator disables interrupts for a few reasons:
+- Decouple interrupt the irqsave operation from spin_lock() so it can be
+ extended over the actual lock region and cover other areas. Areas like
+ counters increments where the preemptible version can be avoided.
+
+- Access to the per-CPU pcp from struct zone.
+
+Replace the irqsave with a local-lock. The counters are expected to be
+always modified with disabled preemption and no access from interrupt
+context.
+
+Contains fixes from:
+ Peter Zijlstra <a.p.zijlstra@chello.nl>
+ Thomas Gleixner <tglx@linutronix.de>
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/page_alloc.c | 49 ++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 30 insertions(+), 19 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index db713dd3e08e..72993fb19c99 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -62,6 +62,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/sched/rt.h>
+ #include <linux/sched/mm.h>
++#include <linux/local_lock.h>
+ #include <linux/page_owner.h>
+ #include <linux/kthread.h>
+ #include <linux/memcontrol.h>
+@@ -363,6 +364,13 @@ EXPORT_SYMBOL(nr_online_nodes);
+
+ int page_group_by_mobility_disabled __read_mostly;
+
++struct pa_lock {
++ local_lock_t l;
++};
++static DEFINE_PER_CPU(struct pa_lock, pa_lock) = {
++ .l = INIT_LOCAL_LOCK(l),
++};
++
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+ /*
+ * During boot we initialize deferred pages on-demand, as needed, but once
+@@ -1541,11 +1549,11 @@ static void __free_pages_ok(struct page *page, unsigned int order,
+ return;
+
+ migratetype = get_pfnblock_migratetype(page, pfn);
+- local_irq_save(flags);
++ local_lock_irqsave(&pa_lock.l, flags);
+ __count_vm_events(PGFREE, 1 << order);
+ free_one_page(page_zone(page), page, pfn, order, migratetype,
+ fpi_flags);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&pa_lock.l, flags);
+ }
+
+ void __free_pages_core(struct page *page, unsigned int order)
+@@ -2962,12 +2970,12 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ unsigned long flags;
+ int to_drain, batch;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&pa_lock.l, flags);
+ batch = READ_ONCE(pcp->batch);
+ to_drain = min(pcp->count, batch);
+ if (to_drain > 0)
+ free_pcppages_bulk(zone, to_drain, pcp);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&pa_lock.l, flags);
+ }
+ #endif
+
+@@ -2984,13 +2992,13 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&pa_lock.l, flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
+
+ pcp = &pset->pcp;
+ if (pcp->count)
+ free_pcppages_bulk(zone, pcp->count, pcp);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&pa_lock.l, flags);
+ }
+
+ /*
+@@ -3253,9 +3261,9 @@ void free_unref_page(struct page *page)
+ if (!free_unref_page_prepare(page, pfn))
+ return;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&pa_lock.l, flags);
+ free_unref_page_commit(page, pfn);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&pa_lock.l, flags);
+ }
+
+ /*
+@@ -3275,7 +3283,7 @@ void free_unref_page_list(struct list_head *list)
+ set_page_private(page, pfn);
+ }
+
+- local_irq_save(flags);
++ local_lock_irqsave(&pa_lock.l, flags);
+ list_for_each_entry_safe(page, next, list, lru) {
+ unsigned long pfn = page_private(page);
+
+@@ -3288,12 +3296,12 @@ void free_unref_page_list(struct list_head *list)
+ * a large list of pages to free.
+ */
+ if (++batch_count == SWAP_CLUSTER_MAX) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&pa_lock.l, flags);
+ batch_count = 0;
+- local_irq_save(flags);
++ local_lock_irqsave(&pa_lock.l, flags);
+ }
+ }
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&pa_lock.l, flags);
+ }
+
+ /*
+@@ -3449,7 +3457,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
+ struct page *page;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&pa_lock.l, flags);
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
+ page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
+@@ -3457,7 +3465,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
+ zone_statistics(preferred_zone, zone);
+ }
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&pa_lock.l, flags);
+ return page;
+ }
+
+@@ -3491,7 +3499,9 @@ struct page *rmqueue(struct zone *preferred_zone,
+ * allocate greater than order-1 page units with __GFP_NOFAIL.
+ */
+ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
+- spin_lock_irqsave(&zone->lock, flags);
++
++ local_lock_irqsave(&pa_lock.l, flags);
++ spin_lock(&zone->lock);
+
+ do {
+ page = NULL;
+@@ -3512,12 +3522,13 @@ struct page *rmqueue(struct zone *preferred_zone,
+ spin_unlock(&zone->lock);
+ if (!page)
+ goto failed;
++
+ __mod_zone_freepage_state(zone, -(1 << order),
+ get_pcppage_migratetype(page));
+
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
+ zone_statistics(preferred_zone, zone);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&pa_lock.l, flags);
+
+ out:
+ /* Separate test+clear to avoid unnecessary atomics */
+@@ -3530,7 +3541,7 @@ struct page *rmqueue(struct zone *preferred_zone,
+ return page;
+
+ failed:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&pa_lock.l, flags);
+ return NULL;
+ }
+
+@@ -8810,7 +8821,7 @@ void zone_pcp_reset(struct zone *zone)
+ struct per_cpu_pageset *pset;
+
+ /* avoid races with drain_pages() */
+- local_irq_save(flags);
++ local_lock_irqsave(&pa_lock.l, flags);
+ if (zone->pageset != &boot_pageset) {
+ for_each_online_cpu(cpu) {
+ pset = per_cpu_ptr(zone->pageset, cpu);
+@@ -8819,7 +8830,7 @@ void zone_pcp_reset(struct zone *zone)
+ free_percpu(zone->pageset);
+ zone->pageset = &boot_pageset;
+ }
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&pa_lock.l, flags);
+ }
+
+ #ifdef CONFIG_MEMORY_HOTREMOVE
+--
+2.19.1
+
diff --git a/features/rt/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch b/features/rt/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
new file mode 100644
index 00000000..3d068182
--- /dev/null
+++ b/features/rt/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
@@ -0,0 +1,38 @@
+From 6e618c2aeb0d445978c2cbdec84fc15eea6edb82 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 2 Jul 2020 14:27:23 +0200
+Subject: [PATCH 110/191] mm: page_alloc: Use migrate_disable() in
+ drain_local_pages_wq()
+
+drain_local_pages_wq() disables preemption to avoid CPU migration during
+CPU hotplug and can't use cpus_read_lock().
+
+Using migrate_disable() works here, too. The scheduler won't take the
+CPU offline until the task left the migrate-disable section.
+
+Use migrate_disable() in drain_local_pages_wq().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/page_alloc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index cfc72873961d..db713dd3e08e 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -3038,9 +3038,9 @@ static void drain_local_pages_wq(struct work_struct *work)
+ * cpu which is allright but we also have to make sure to not move to
+ * a different one.
+ */
+- preempt_disable();
++ migrate_disable();
+ drain_local_pages(drain->zone);
+- preempt_enable();
++ migrate_enable();
+ }
+
+ /*
+--
+2.19.1
+
diff --git a/features/rt/mm-scatterlist-Do-not-disable-irqs-on-RT.patch b/features/rt/mm-scatterlist-Do-not-disable-irqs-on-RT.patch
new file mode 100644
index 00000000..de9d9047
--- /dev/null
+++ b/features/rt/mm-scatterlist-Do-not-disable-irqs-on-RT.patch
@@ -0,0 +1,29 @@
+From e548a9846277b0cf05cd81fc6441d578728d20f3 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:34 -0500
+Subject: [PATCH 162/191] mm/scatterlist: Do not disable irqs on RT
+
+For -RT it is enough to keep pagefault disabled (which is currently handled by
+kmap_atomic()).
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ lib/scatterlist.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/scatterlist.c b/lib/scatterlist.c
+index a59778946404..907f59045998 100644
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -892,7 +892,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
+ flush_kernel_dcache_page(miter->page);
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+- WARN_ON_ONCE(preemptible());
++ WARN_ON_ONCE(!pagefault_disabled());
+ kunmap_atomic(miter->addr);
+ } else
+ kunmap(miter->page);
+--
+2.19.1
+
diff --git a/features/rt/mm-sl-au-b-Change-list_lock-to-raw_spinlock_t.patch b/features/rt/mm-sl-au-b-Change-list_lock-to-raw_spinlock_t.patch
new file mode 100644
index 00000000..c6c7ab42
--- /dev/null
+++ b/features/rt/mm-sl-au-b-Change-list_lock-to-raw_spinlock_t.patch
@@ -0,0 +1,602 @@
+From 8583607fe6b30d9952332612f6093f40282d0906 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 28 May 2018 15:24:22 +0200
+Subject: [PATCH 104/191] mm: sl[au]b: Change list_lock to raw_spinlock_t
+
+The list_lock is used with used with IRQs off on PREEMPT_RT. Make it a
+raw_spinlock_t otherwise the interrupts won't be disabled on PREEMPT_RT.
+The locking rules remain unchanged.
+The lock is updated for SLAB and SLUB since both share the same header
+file for struct kmem_cache_node defintion.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/slab.c | 90 +++++++++++++++++++++++++++----------------------------
+ mm/slab.h | 2 +-
+ mm/slub.c | 50 +++++++++++++++----------------
+ 3 files changed, 71 insertions(+), 71 deletions(-)
+
+diff --git a/mm/slab.c b/mm/slab.c
+index ae651bf540b7..afe196fcf579 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -234,7 +234,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
+ parent->shared = NULL;
+ parent->alien = NULL;
+ parent->colour_next = 0;
+- spin_lock_init(&parent->list_lock);
++ raw_spin_lock_init(&parent->list_lock);
+ parent->free_objects = 0;
+ parent->free_touched = 0;
+ }
+@@ -559,9 +559,9 @@ static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep,
+ page_node = page_to_nid(page);
+ n = get_node(cachep, page_node);
+
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ free_block(cachep, &objp, 1, page_node, &list);
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ slabs_destroy(cachep, &list);
+ }
+@@ -699,7 +699,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
+ struct kmem_cache_node *n = get_node(cachep, node);
+
+ if (ac->avail) {
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ /*
+ * Stuff objects into the remote nodes shared array first.
+ * That way we could avoid the overhead of putting the objects
+@@ -710,7 +710,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
+
+ free_block(cachep, ac->entry, ac->avail, node, list);
+ ac->avail = 0;
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ }
+ }
+
+@@ -783,9 +783,9 @@ static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
+ slabs_destroy(cachep, &list);
+ } else {
+ n = get_node(cachep, page_node);
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ free_block(cachep, &objp, 1, page_node, &list);
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ slabs_destroy(cachep, &list);
+ }
+ return 1;
+@@ -826,10 +826,10 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
+ */
+ n = get_node(cachep, node);
+ if (n) {
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+ n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
+ cachep->num;
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+
+ return 0;
+ }
+@@ -908,7 +908,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
+ goto fail;
+
+ n = get_node(cachep, node);
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+ if (n->shared && force_change) {
+ free_block(cachep, n->shared->entry,
+ n->shared->avail, node, &list);
+@@ -926,7 +926,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
+ new_alien = NULL;
+ }
+
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+ slabs_destroy(cachep, &list);
+
+ /*
+@@ -965,7 +965,7 @@ static void cpuup_canceled(long cpu)
+ if (!n)
+ continue;
+
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+
+ /* Free limit for this kmem_cache_node */
+ n->free_limit -= cachep->batchcount;
+@@ -976,7 +976,7 @@ static void cpuup_canceled(long cpu)
+ nc->avail = 0;
+
+ if (!cpumask_empty(mask)) {
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+ goto free_slab;
+ }
+
+@@ -990,7 +990,7 @@ static void cpuup_canceled(long cpu)
+ alien = n->alien;
+ n->alien = NULL;
+
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+
+ kfree(shared);
+ if (alien) {
+@@ -1174,7 +1174,7 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *
+ /*
+ * Do not assume that spinlocks can be initialized via memcpy:
+ */
+- spin_lock_init(&ptr->list_lock);
++ raw_spin_lock_init(&ptr->list_lock);
+
+ MAKE_ALL_LISTS(cachep, ptr, nodeid);
+ cachep->node[nodeid] = ptr;
+@@ -1345,11 +1345,11 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
+ for_each_kmem_cache_node(cachep, node, n) {
+ unsigned long total_slabs, free_slabs, free_objs;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ total_slabs = n->total_slabs;
+ free_slabs = n->free_slabs;
+ free_objs = n->free_objects;
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+
+ pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
+ node, total_slabs - free_slabs, total_slabs,
+@@ -2107,7 +2107,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
+ {
+ #ifdef CONFIG_SMP
+ check_irq_off();
+- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
++ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
+ #endif
+ }
+
+@@ -2115,7 +2115,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
+ {
+ #ifdef CONFIG_SMP
+ check_irq_off();
+- assert_spin_locked(&get_node(cachep, node)->list_lock);
++ assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
+ #endif
+ }
+
+@@ -2155,9 +2155,9 @@ static void do_drain(void *arg)
+ check_irq_off();
+ ac = cpu_cache_get(cachep);
+ n = get_node(cachep, node);
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ free_block(cachep, ac->entry, ac->avail, node, &list);
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ ac->avail = 0;
+ slabs_destroy(cachep, &list);
+ }
+@@ -2175,9 +2175,9 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
+ drain_alien_cache(cachep, n->alien);
+
+ for_each_kmem_cache_node(cachep, node, n) {
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+ drain_array_locked(cachep, n->shared, node, true, &list);
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+
+ slabs_destroy(cachep, &list);
+ }
+@@ -2199,10 +2199,10 @@ static int drain_freelist(struct kmem_cache *cache,
+ nr_freed = 0;
+ while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
+
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+ p = n->slabs_free.prev;
+ if (p == &n->slabs_free) {
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+ goto out;
+ }
+
+@@ -2215,7 +2215,7 @@ static int drain_freelist(struct kmem_cache *cache,
+ * to the cache.
+ */
+ n->free_objects -= cache->num;
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+ slab_destroy(cache, page);
+ nr_freed++;
+ }
+@@ -2651,7 +2651,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
+ INIT_LIST_HEAD(&page->slab_list);
+ n = get_node(cachep, page_to_nid(page));
+
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ n->total_slabs++;
+ if (!page->active) {
+ list_add_tail(&page->slab_list, &n->slabs_free);
+@@ -2661,7 +2661,7 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
+
+ STATS_INC_GROWN(cachep);
+ n->free_objects += cachep->num - page->active;
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ fixup_objfreelist_debug(cachep, &list);
+ }
+@@ -2827,7 +2827,7 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
+ {
+ struct page *page;
+
+- assert_spin_locked(&n->list_lock);
++ assert_raw_spin_locked(&n->list_lock);
+ page = list_first_entry_or_null(&n->slabs_partial, struct page,
+ slab_list);
+ if (!page) {
+@@ -2854,10 +2854,10 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
+ if (!gfp_pfmemalloc_allowed(flags))
+ return NULL;
+
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ page = get_first_slab(n, true);
+ if (!page) {
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ return NULL;
+ }
+
+@@ -2866,7 +2866,7 @@ static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep,
+
+ fixup_slab_list(cachep, n, page, &list);
+
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ fixup_objfreelist_debug(cachep, &list);
+
+ return obj;
+@@ -2925,7 +2925,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
+ if (!n->free_objects && (!shared || !shared->avail))
+ goto direct_grow;
+
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ shared = READ_ONCE(n->shared);
+
+ /* See if we can refill from the shared array */
+@@ -2949,7 +2949,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
+ must_grow:
+ n->free_objects -= ac->avail;
+ alloc_done:
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ fixup_objfreelist_debug(cachep, &list);
+
+ direct_grow:
+@@ -3174,7 +3174,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+ BUG_ON(!n);
+
+ check_irq_off();
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ page = get_first_slab(n, false);
+ if (!page)
+ goto must_grow;
+@@ -3192,12 +3192,12 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
+
+ fixup_slab_list(cachep, n, page, &list);
+
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ fixup_objfreelist_debug(cachep, &list);
+ return obj;
+
+ must_grow:
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
+ if (page) {
+ /* This slab isn't counted yet so don't update free_objects */
+@@ -3385,7 +3385,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
+
+ check_irq_off();
+ n = get_node(cachep, node);
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ if (n->shared) {
+ struct array_cache *shared_array = n->shared;
+ int max = shared_array->limit - shared_array->avail;
+@@ -3414,7 +3414,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
+ STATS_SET_FREEABLE(cachep, i);
+ }
+ #endif
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ ac->avail -= batchcount;
+ memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
+ slabs_destroy(cachep, &list);
+@@ -3870,9 +3870,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+
+ node = cpu_to_mem(cpu);
+ n = get_node(cachep, node);
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+ free_block(cachep, ac->entry, ac->avail, node, &list);
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+ slabs_destroy(cachep, &list);
+ }
+ free_percpu(prev);
+@@ -3967,9 +3967,9 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
+ return;
+ }
+
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+ drain_array_locked(cachep, ac, node, false, &list);
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+
+ slabs_destroy(cachep, &list);
+ }
+@@ -4053,7 +4053,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
+
+ for_each_kmem_cache_node(cachep, node, n) {
+ check_irq_on();
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+
+ total_slabs += n->total_slabs;
+ free_slabs += n->free_slabs;
+@@ -4062,7 +4062,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
+ if (n->shared)
+ shared_avail += n->shared->avail;
+
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+ }
+ num_objs = total_slabs * cachep->num;
+ active_slabs = total_slabs - free_slabs;
+diff --git a/mm/slab.h b/mm/slab.h
+index 076582f58f68..21c670da13ac 100644
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -527,7 +527,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
+ * The slab lists for all objects.
+ */
+ struct kmem_cache_node {
+- spinlock_t list_lock;
++ raw_spinlock_t list_lock;
+
+ #ifdef CONFIG_SLAB
+ struct list_head slabs_partial; /* partial list first, better asm code */
+diff --git a/mm/slub.c b/mm/slub.c
+index 3021ce9bf1b3..07e662d2aef0 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1225,7 +1225,7 @@ static noinline int free_debug_processing(
+ unsigned long flags;
+ int ret = 0;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ slab_lock(page);
+
+ if (s->flags & SLAB_CONSISTENCY_CHECKS) {
+@@ -1260,7 +1260,7 @@ static noinline int free_debug_processing(
+ bulk_cnt, cnt);
+
+ slab_unlock(page);
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ if (!ret)
+ slab_fix(s, "Object at 0x%p not freed", object);
+ return ret;
+@@ -1984,7 +1984,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+ if (!n || !n->nr_partial)
+ return NULL;
+
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
+ void *t;
+
+@@ -2009,7 +2009,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+ break;
+
+ }
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+ return object;
+ }
+
+@@ -2252,7 +2252,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+ * that acquire_slab() will see a slab page that
+ * is frozen
+ */
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+ } else {
+ m = M_FULL;
+@@ -2263,7 +2263,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+ * slabs from diagnostic functions will not see
+ * any frozen slabs.
+ */
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+ }
+
+@@ -2287,7 +2287,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+ goto redo;
+
+ if (lock)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ if (m == M_PARTIAL)
+ stat(s, tail);
+@@ -2326,10 +2326,10 @@ static void unfreeze_partials(struct kmem_cache *s,
+ n2 = get_node(s, page_to_nid(page));
+ if (n != n2) {
+ if (n)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ n = n2;
+- spin_lock(&n->list_lock);
++ raw_spin_lock(&n->list_lock);
+ }
+
+ do {
+@@ -2358,7 +2358,7 @@ static void unfreeze_partials(struct kmem_cache *s,
+ }
+
+ if (n)
+- spin_unlock(&n->list_lock);
++ raw_spin_unlock(&n->list_lock);
+
+ while (discard_page) {
+ page = discard_page;
+@@ -2525,10 +2525,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
+ unsigned long x = 0;
+ struct page *page;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, slab_list)
+ x += get_count(page);
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return x;
+ }
+ #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
+@@ -2997,7 +2997,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+
+ do {
+ if (unlikely(n)) {
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ n = NULL;
+ }
+ prior = page->freelist;
+@@ -3029,7 +3029,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+ * Otherwise the list_lock will synchronize with
+ * other processors updating the list of slabs.
+ */
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ }
+ }
+@@ -3071,7 +3071,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+ add_partial(n, page, DEACTIVATE_TO_TAIL);
+ stat(s, FREE_ADD_PARTIAL);
+ }
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return;
+
+ slab_empty:
+@@ -3086,7 +3086,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+ remove_full(s, n, page);
+ }
+
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ stat(s, FREE_SLAB);
+ discard_slab(s, page);
+ }
+@@ -3518,7 +3518,7 @@ static void
+ init_kmem_cache_node(struct kmem_cache_node *n)
+ {
+ n->nr_partial = 0;
+- spin_lock_init(&n->list_lock);
++ raw_spin_lock_init(&n->list_lock);
+ INIT_LIST_HEAD(&n->partial);
+ #ifdef CONFIG_SLUB_DEBUG
+ atomic_long_set(&n->nr_slabs, 0);
+@@ -3918,7 +3918,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+ struct page *page, *h;
+
+ BUG_ON(irqs_disabled());
+- spin_lock_irq(&n->list_lock);
++ raw_spin_lock_irq(&n->list_lock);
+ list_for_each_entry_safe(page, h, &n->partial, slab_list) {
+ if (!page->inuse) {
+ remove_partial(n, page);
+@@ -3928,7 +3928,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
+ "Objects remaining in %s on __kmem_cache_shutdown()");
+ }
+ }
+- spin_unlock_irq(&n->list_lock);
++ raw_spin_unlock_irq(&n->list_lock);
+
+ list_for_each_entry_safe(page, h, &discard, slab_list)
+ discard_slab(s, page);
+@@ -4243,7 +4243,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
+ for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
+ INIT_LIST_HEAD(promote + i);
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ /*
+ * Build lists of slabs to discard or promote.
+@@ -4274,7 +4274,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
+ for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
+ list_splice(promote + i, &n->partial);
+
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+
+ /* Release empty slabs */
+ list_for_each_entry_safe(page, t, &discard, slab_list)
+@@ -4644,7 +4644,7 @@ static int validate_slab_node(struct kmem_cache *s,
+ struct page *page;
+ unsigned long flags;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+
+ list_for_each_entry(page, &n->partial, slab_list) {
+ validate_slab(s, page);
+@@ -4666,7 +4666,7 @@ static int validate_slab_node(struct kmem_cache *s,
+ s->name, count, atomic_long_read(&n->nr_slabs));
+
+ out:
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ return count;
+ }
+
+@@ -4845,12 +4845,12 @@ static int list_locations(struct kmem_cache *s, char *buf,
+ if (!atomic_long_read(&n->nr_slabs))
+ continue;
+
+- spin_lock_irqsave(&n->list_lock, flags);
++ raw_spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(page, &n->partial, slab_list)
+ process_slab(&t, s, page, alloc);
+ list_for_each_entry(page, &n->full, slab_list)
+ process_slab(&t, s, page, alloc);
+- spin_unlock_irqrestore(&n->list_lock, flags);
++ raw_spin_unlock_irqrestore(&n->list_lock, flags);
+ }
+
+ for (i = 0; i < t.count; i++) {
+--
+2.19.1
+
diff --git a/features/rt/mm-slub-Don-t-enable-partial-CPU-caches-on-PREEMPT_R.patch b/features/rt/mm-slub-Don-t-enable-partial-CPU-caches-on-PREEMPT_R.patch
new file mode 100644
index 00000000..d3c75a7e
--- /dev/null
+++ b/features/rt/mm-slub-Don-t-enable-partial-CPU-caches-on-PREEMPT_R.patch
@@ -0,0 +1,32 @@
+From b49dbaa0391f05adefc6384ea61cb9e60c4a452a Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 2 Mar 2021 18:58:04 +0100
+Subject: [PATCH 112/191] mm: slub: Don't enable partial CPU caches on
+ PREEMPT_RT by default
+
+SLUB's partial CPU caches lead to higher latencies in a hackbench
+benchmark.
+
+Don't enable partial CPU caches by default on PREEMPT_RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ init/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index 37686a22a769..45f2231c7131 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1974,7 +1974,7 @@ config SHUFFLE_PAGE_ALLOCATOR
+ Say Y if unsure.
+
+ config SLUB_CPU_PARTIAL
+- default y
++ default y if !PREEMPT_RT
+ depends on SLUB && SMP
+ bool "SLUB per cpu partial cache"
+ help
+--
+2.19.1
+
diff --git a/features/rt/mm-slub-Don-t-resize-the-location-tracking-cache-on-.patch b/features/rt/mm-slub-Don-t-resize-the-location-tracking-cache-on-.patch
new file mode 100644
index 00000000..601403f6
--- /dev/null
+++ b/features/rt/mm-slub-Don-t-resize-the-location-tracking-cache-on-.patch
@@ -0,0 +1,35 @@
+From cc489539ca4480363e75c1f71bf9e47f835fb2ef Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 26 Feb 2021 17:26:04 +0100
+Subject: [PATCH 109/191] mm: slub: Don't resize the location tracking cache on
+ PREEMPT_RT
+
+The location tracking cache has a size of a page and is resized if its
+current size is too small.
+This allocation happens with disabled interrupts and can't happen on
+PREEMPT_RT.
+Should one page be too small, then we have to allocate more at the
+beginning. The only downside is that less callers will be visible.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/slub.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index ec608c1d5fdb..26cf2872a7ff 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -4819,6 +4819,9 @@ static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
+ struct location *l;
+ int order;
+
++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && flags == GFP_ATOMIC)
++ return 0;
++
+ order = get_order(sizeof(struct location) * max);
+
+ l = (void *)__get_free_pages(flags, order);
+--
+2.19.1
+
diff --git a/features/rt/mm-slub-Enable-irqs-for-__GFP_WAIT.patch b/features/rt/mm-slub-Enable-irqs-for-__GFP_WAIT.patch
new file mode 100644
index 00000000..82d38ebf
--- /dev/null
+++ b/features/rt/mm-slub-Enable-irqs-for-__GFP_WAIT.patch
@@ -0,0 +1,76 @@
+From 9a1bfa7cf125b54a5ad856c32adf4a21359db777 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 9 Jan 2013 12:08:15 +0100
+Subject: [PATCH 106/191] mm: slub: Enable irqs for __GFP_WAIT
+
+SYSTEM_RUNNING might be too late for enabling interrupts. Allocations
+with GFP_WAIT can happen before that. So use this as an indicator.
+
+[bigeasy: Add warning on RT for allocations in atomic context.
+ Don't enable interrupts on allocations during SYSTEM_SUSPEND. This is done
+ during suspend by ACPI, noticed by Liwei Song <liwei.song@windriver.com>
+]
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/slub.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index 863b0304b229..1382845c3802 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1759,10 +1759,18 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+ void *start, *p, *next;
+ int idx;
+ bool shuffle;
++ bool enableirqs = false;
+
+ flags &= gfp_allowed_mask;
+
+ if (gfpflags_allow_blocking(flags))
++ enableirqs = true;
++
++#ifdef CONFIG_PREEMPT_RT
++ if (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND)
++ enableirqs = true;
++#endif
++ if (enableirqs)
+ local_irq_enable();
+
+ flags |= s->allocflags;
+@@ -1823,7 +1831,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
+ page->frozen = 1;
+
+ out:
+- if (gfpflags_allow_blocking(flags))
++ if (enableirqs)
+ local_irq_disable();
+ if (!page)
+ return NULL;
+@@ -2823,6 +2831,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
+ unsigned long tid;
+ struct obj_cgroup *objcg = NULL;
+
++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP))
++ WARN_ON_ONCE(!preemptible() &&
++ (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND));
++
+ s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
+ if (!s)
+ return NULL;
+@@ -3304,6 +3316,10 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ int i;
+ struct obj_cgroup *objcg = NULL;
+
++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP))
++ WARN_ON_ONCE(!preemptible() &&
++ (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND));
++
+ /* memcg and kmem_cache debug support */
+ s = slab_pre_alloc_hook(s, &objcg, size, flags);
+ if (unlikely(!s))
+--
+2.19.1
+
diff --git a/features/rt/mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch b/features/rt/mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch
new file mode 100644
index 00000000..c2597842
--- /dev/null
+++ b/features/rt/mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch
@@ -0,0 +1,49 @@
+From ebdeb55b60cffd57cc349a1a5c657c5905bc01ab Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 16 Jul 2020 18:47:50 +0200
+Subject: [PATCH 105/191] mm: slub: Make object_map_lock a raw_spinlock_t
+
+The variable object_map is protected by object_map_lock. The lock is always
+acquired in debug code and within already atomic context
+
+Make object_map_lock a raw_spinlock_t.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/slub.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index 07e662d2aef0..863b0304b229 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -445,7 +445,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
+
+ #ifdef CONFIG_SLUB_DEBUG
+ static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
+-static DEFINE_SPINLOCK(object_map_lock);
++static DEFINE_RAW_SPINLOCK(object_map_lock);
+
+ /*
+ * Determine a map of object in use on a page.
+@@ -461,7 +461,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
+
+ VM_BUG_ON(!irqs_disabled());
+
+- spin_lock(&object_map_lock);
++ raw_spin_lock(&object_map_lock);
+
+ bitmap_zero(object_map, page->objects);
+
+@@ -474,7 +474,7 @@ static unsigned long *get_map(struct kmem_cache *s, struct page *page)
+ static void put_map(unsigned long *map) __releases(&object_map_lock)
+ {
+ VM_BUG_ON(map != object_map);
+- spin_unlock(&object_map_lock);
++ raw_spin_unlock(&object_map_lock);
+ }
+
+ static inline unsigned int size_from_object(struct kmem_cache *s)
+--
+2.19.1
+
diff --git a/features/rt/mm-slub-Move-discard_slab-invocations-out-of-IRQ-off.patch b/features/rt/mm-slub-Move-discard_slab-invocations-out-of-IRQ-off.patch
new file mode 100644
index 00000000..9f53a2dd
--- /dev/null
+++ b/features/rt/mm-slub-Move-discard_slab-invocations-out-of-IRQ-off.patch
@@ -0,0 +1,416 @@
+From a554a721d714cba4bf3c8eb17e25913fa593a6bf Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 26 Feb 2021 15:14:15 +0100
+Subject: [PATCH 107/191] mm: slub: Move discard_slab() invocations out of
+ IRQ-off sections
+
+discard_slab() gives the memory back to the page-allocator. Some of its
+invocation occur from IRQ-disabled sections which were disabled by SLUB.
+An example is the deactivate_slab() invocation from within
+___slab_alloc() or put_cpu_partial().
+
+Instead of giving the memory back directly, put the pages on a list and
+process it once the caller is out of the known IRQ-off region.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/slub.c | 114 +++++++++++++++++++++++++++++++++++++-----------------
+ 1 file changed, 78 insertions(+), 36 deletions(-)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index 1382845c3802..af9c0fbe2cf5 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1889,12 +1889,29 @@ static void free_slab(struct kmem_cache *s, struct page *page)
+ __free_slab(s, page);
+ }
+
++static void discard_slab_delayed(struct kmem_cache *s, struct page *page,
++ struct list_head *delayed_free)
++{
++ dec_slabs_node(s, page_to_nid(page), page->objects);
++ list_add(&page->lru, delayed_free);
++}
++
+ static void discard_slab(struct kmem_cache *s, struct page *page)
+ {
+ dec_slabs_node(s, page_to_nid(page), page->objects);
+ free_slab(s, page);
+ }
+
++static void discard_delayed(struct list_head *l)
++{
++ while (!list_empty(l)) {
++ struct page *page = list_first_entry(l, struct page, lru);
++
++ list_del(&page->lru);
++ __free_slab(page->slab_cache, page);
++ }
++}
++
+ /*
+ * Management of partially allocated slabs.
+ */
+@@ -1968,15 +1985,16 @@ static inline void *acquire_slab(struct kmem_cache *s,
+ WARN_ON(!freelist);
+ return freelist;
+ }
+-
+-static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
++static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain,
++ struct list_head *delayed_free);
+ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
+
+ /*
+ * Try to allocate a partial slab from a specific node.
+ */
+ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+- struct kmem_cache_cpu *c, gfp_t flags)
++ struct kmem_cache_cpu *c, gfp_t flags,
++ struct list_head *delayed_free)
+ {
+ struct page *page, *page2;
+ void *object = NULL;
+@@ -2009,7 +2027,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+ stat(s, ALLOC_FROM_PARTIAL);
+ object = t;
+ } else {
+- put_cpu_partial(s, page, 0);
++ put_cpu_partial(s, page, 0, delayed_free);
+ stat(s, CPU_PARTIAL_NODE);
+ }
+ if (!kmem_cache_has_cpu_partial(s)
+@@ -2025,7 +2043,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
+ * Get a page from somewhere. Search in increasing NUMA distances.
+ */
+ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
+- struct kmem_cache_cpu *c)
++ struct kmem_cache_cpu *c,
++ struct list_head *delayed_free)
+ {
+ #ifdef CONFIG_NUMA
+ struct zonelist *zonelist;
+@@ -2067,7 +2086,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
+
+ if (n && cpuset_zone_allowed(zone, flags) &&
+ n->nr_partial > s->min_partial) {
+- object = get_partial_node(s, n, c, flags);
++ object = get_partial_node(s, n, c, flags, delayed_free);
+ if (object) {
+ /*
+ * Don't check read_mems_allowed_retry()
+@@ -2089,7 +2108,8 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
+ * Get a partial page, lock it and return it.
+ */
+ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
+- struct kmem_cache_cpu *c)
++ struct kmem_cache_cpu *c,
++ struct list_head *delayed_free)
+ {
+ void *object;
+ int searchnode = node;
+@@ -2097,11 +2117,12 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
+ if (node == NUMA_NO_NODE)
+ searchnode = numa_mem_id();
+
+- object = get_partial_node(s, get_node(s, searchnode), c, flags);
++ object = get_partial_node(s, get_node(s, searchnode), c, flags,
++ delayed_free);
+ if (object || node != NUMA_NO_NODE)
+ return object;
+
+- return get_any_partial(s, flags, c);
++ return get_any_partial(s, flags, c, delayed_free);
+ }
+
+ #ifdef CONFIG_PREEMPTION
+@@ -2177,7 +2198,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
+ * Remove the cpu slab
+ */
+ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+- void *freelist, struct kmem_cache_cpu *c)
++ void *freelist, struct kmem_cache_cpu *c,
++ struct list_head *delayed_free)
+ {
+ enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
+ struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+@@ -2303,7 +2325,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+ stat(s, DEACTIVATE_FULL);
+ else if (m == M_FREE) {
+ stat(s, DEACTIVATE_EMPTY);
+- discard_slab(s, page);
++ discard_slab_delayed(s, page, delayed_free);
+ stat(s, FREE_SLAB);
+ }
+
+@@ -2318,8 +2340,8 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
+ * for the cpu using c (or some other guarantee must be there
+ * to guarantee no concurrent accesses).
+ */
+-static void unfreeze_partials(struct kmem_cache *s,
+- struct kmem_cache_cpu *c)
++static void unfreeze_partials(struct kmem_cache *s, struct kmem_cache_cpu *c,
++ struct list_head *delayed_free)
+ {
+ #ifdef CONFIG_SLUB_CPU_PARTIAL
+ struct kmem_cache_node *n = NULL, *n2 = NULL;
+@@ -2373,7 +2395,7 @@ static void unfreeze_partials(struct kmem_cache *s,
+ discard_page = discard_page->next;
+
+ stat(s, DEACTIVATE_EMPTY);
+- discard_slab(s, page);
++ discard_slab_delayed(s, page, delayed_free);
+ stat(s, FREE_SLAB);
+ }
+ #endif /* CONFIG_SLUB_CPU_PARTIAL */
+@@ -2386,7 +2408,8 @@ static void unfreeze_partials(struct kmem_cache *s,
+ * If we did not find a slot then simply move all the partials to the
+ * per node partial list.
+ */
+-static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
++static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain,
++ struct list_head *delayed_free)
+ {
+ #ifdef CONFIG_SLUB_CPU_PARTIAL
+ struct page *oldpage;
+@@ -2409,7 +2432,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+ * set to the per node partial list.
+ */
+ local_irq_save(flags);
+- unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
++ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab),
++ delayed_free);
+ local_irq_restore(flags);
+ oldpage = NULL;
+ pobjects = 0;
+@@ -2431,17 +2455,18 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+ unsigned long flags;
+
+ local_irq_save(flags);
+- unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
++ unfreeze_partials(s, this_cpu_ptr(s->cpu_slab), delayed_free);
+ local_irq_restore(flags);
+ }
+ preempt_enable();
+ #endif /* CONFIG_SLUB_CPU_PARTIAL */
+ }
+
+-static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
++static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c,
++ struct list_head *delayed_free)
+ {
+ stat(s, CPUSLAB_FLUSH);
+- deactivate_slab(s, c->page, c->freelist, c);
++ deactivate_slab(s, c->page, c->freelist, c, delayed_free);
+
+ c->tid = next_tid(c->tid);
+ }
+@@ -2451,21 +2476,24 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
+ *
+ * Called from IPI handler with interrupts disabled.
+ */
+-static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
++static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu,
++ struct list_head *delayed_free)
+ {
+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+
+ if (c->page)
+- flush_slab(s, c);
++ flush_slab(s, c, delayed_free);
+
+- unfreeze_partials(s, c);
++ unfreeze_partials(s, c, delayed_free);
+ }
+
+ static void flush_cpu_slab(void *d)
+ {
+ struct kmem_cache *s = d;
++ LIST_HEAD(delayed_free);
+
+- __flush_cpu_slab(s, smp_processor_id());
++ __flush_cpu_slab(s, smp_processor_id(), &delayed_free);
++ discard_delayed(&delayed_free);
+ }
+
+ static bool has_cpu_slab(int cpu, void *info)
+@@ -2489,13 +2517,15 @@ static int slub_cpu_dead(unsigned int cpu)
+ {
+ struct kmem_cache *s;
+ unsigned long flags;
++ LIST_HEAD(delayed_free);
+
+ mutex_lock(&slab_mutex);
+ list_for_each_entry(s, &slab_caches, list) {
+ local_irq_save(flags);
+- __flush_cpu_slab(s, cpu);
++ __flush_cpu_slab(s, cpu, &delayed_free);
+ local_irq_restore(flags);
+ }
++ discard_delayed(&delayed_free);
+ mutex_unlock(&slab_mutex);
+ return 0;
+ }
+@@ -2579,7 +2609,8 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
+ }
+
+ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
+- int node, struct kmem_cache_cpu **pc)
++ int node, struct kmem_cache_cpu **pc,
++ struct list_head *delayed_free)
+ {
+ void *freelist;
+ struct kmem_cache_cpu *c = *pc;
+@@ -2587,7 +2618,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
+
+ WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
+
+- freelist = get_partial(s, flags, node, c);
++ freelist = get_partial(s, flags, node, c, delayed_free);
+
+ if (freelist)
+ return freelist;
+@@ -2596,7 +2627,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
+ if (page) {
+ c = raw_cpu_ptr(s->cpu_slab);
+ if (c->page)
+- flush_slab(s, c);
++ flush_slab(s, c, delayed_free);
+
+ /*
+ * No other reference to the page yet so we can
+@@ -2675,7 +2706,8 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page)
+ * already disabled (which is the case for bulk allocation).
+ */
+ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+- unsigned long addr, struct kmem_cache_cpu *c)
++ unsigned long addr, struct kmem_cache_cpu *c,
++ struct list_head *delayed_free)
+ {
+ void *freelist;
+ struct page *page;
+@@ -2705,7 +2737,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ goto redo;
+ } else {
+ stat(s, ALLOC_NODE_MISMATCH);
+- deactivate_slab(s, page, c->freelist, c);
++ deactivate_slab(s, page, c->freelist, c, delayed_free);
+ goto new_slab;
+ }
+ }
+@@ -2716,7 +2748,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ * information when the page leaves the per-cpu allocator
+ */
+ if (unlikely(!pfmemalloc_match(page, gfpflags))) {
+- deactivate_slab(s, page, c->freelist, c);
++ deactivate_slab(s, page, c->freelist, c, delayed_free);
+ goto new_slab;
+ }
+
+@@ -2755,7 +2787,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ goto redo;
+ }
+
+- freelist = new_slab_objects(s, gfpflags, node, &c);
++ freelist = new_slab_objects(s, gfpflags, node, &c, delayed_free);
+
+ if (unlikely(!freelist)) {
+ slab_out_of_memory(s, gfpflags, node);
+@@ -2771,7 +2803,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ !alloc_debug_processing(s, page, freelist, addr))
+ goto new_slab; /* Slab failed checks. Next slab needed */
+
+- deactivate_slab(s, page, get_freepointer(s, freelist), c);
++ deactivate_slab(s, page, get_freepointer(s, freelist), c, delayed_free);
+ return freelist;
+ }
+
+@@ -2784,6 +2816,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ {
+ void *p;
+ unsigned long flags;
++ LIST_HEAD(delayed_free);
+
+ local_irq_save(flags);
+ #ifdef CONFIG_PREEMPTION
+@@ -2795,8 +2828,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+ c = this_cpu_ptr(s->cpu_slab);
+ #endif
+
+- p = ___slab_alloc(s, gfpflags, node, addr, c);
++ p = ___slab_alloc(s, gfpflags, node, addr, c, &delayed_free);
+ local_irq_restore(flags);
++ discard_delayed(&delayed_free);
+ return p;
+ }
+
+@@ -3060,11 +3094,13 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
+ */
+ stat(s, FREE_FROZEN);
+ } else if (new.frozen) {
++ LIST_HEAD(delayed_free);
+ /*
+ * If we just froze the page then put it onto the
+ * per cpu partial list.
+ */
+- put_cpu_partial(s, page, 1);
++ put_cpu_partial(s, page, 1, &delayed_free);
++ discard_delayed(&delayed_free);
+ stat(s, CPU_PARTIAL_FREE);
+ }
+
+@@ -3315,6 +3351,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ struct kmem_cache_cpu *c;
+ int i;
+ struct obj_cgroup *objcg = NULL;
++ LIST_HEAD(delayed_free);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP))
+ WARN_ON_ONCE(!preemptible() &&
+@@ -3356,7 +3393,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ * of re-populating per CPU c->freelist
+ */
+ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
+- _RET_IP_, c);
++ _RET_IP_, c, &delayed_free);
+ if (unlikely(!p[i]))
+ goto error;
+
+@@ -3372,6 +3409,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ c->tid = next_tid(c->tid);
+ local_irq_enable();
+
++ discard_delayed(&delayed_free);
++
+ /* Clear memory outside IRQ disabled fastpath loop */
+ if (unlikely(slab_want_init_on_alloc(flags, s))) {
+ int j;
+@@ -3385,6 +3424,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+ return i;
+ error:
+ local_irq_enable();
++ discard_delayed(&delayed_free);
+ slab_post_alloc_hook(s, objcg, flags, i, p);
+ __kmem_cache_free_bulk(s, i, p);
+ return 0;
+@@ -4437,6 +4477,7 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
+ int node;
+ struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
+ struct kmem_cache_node *n;
++ LIST_HEAD(delayed_free);
+
+ memcpy(s, static_cache, kmem_cache->object_size);
+
+@@ -4445,7 +4486,8 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
+ * up. Even if it weren't true, IRQs are not up so we couldn't fire
+ * IPIs around.
+ */
+- __flush_cpu_slab(s, smp_processor_id());
++ __flush_cpu_slab(s, smp_processor_id(), &delayed_free);
++ discard_delayed(&delayed_free);
+ for_each_kmem_cache_node(s, node, n) {
+ struct page *p;
+
+--
+2.19.1
+
diff --git a/features/rt/mm-slub-Move-flush_cpu_slab-invocations-__free_slab-.patch b/features/rt/mm-slub-Move-flush_cpu_slab-invocations-__free_slab-.patch
new file mode 100644
index 00000000..119f020a
--- /dev/null
+++ b/features/rt/mm-slub-Move-flush_cpu_slab-invocations-__free_slab-.patch
@@ -0,0 +1,120 @@
+From 3bd87df897f0a51299da79bf24091cc937f1706e Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 26 Feb 2021 17:11:55 +0100
+Subject: [PATCH 108/191] mm: slub: Move flush_cpu_slab() invocations
+ __free_slab() invocations out of IRQ context
+
+flush_all() flushes a specific SLAB cache on each CPU (where the cache
+is present). The discard_delayed()/__free_slab() invocation happens
+within IPI handler and is problematic for PREEMPT_RT.
+
+The flush operation is not a frequent operation or a hot path. The
+per-CPU flush operation can be moved to within a workqueue.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/slub.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 52 insertions(+), 8 deletions(-)
+
+diff --git a/mm/slub.c b/mm/slub.c
+index af9c0fbe2cf5..ec608c1d5fdb 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2487,26 +2487,70 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu,
+ unfreeze_partials(s, c, delayed_free);
+ }
+
+-static void flush_cpu_slab(void *d)
++struct slub_flush_work {
++ struct work_struct work;
++ struct kmem_cache *s;
++ bool skip;
++};
++
++static void flush_cpu_slab(struct work_struct *w)
+ {
+- struct kmem_cache *s = d;
++ struct slub_flush_work *sfw;
+ LIST_HEAD(delayed_free);
+
+- __flush_cpu_slab(s, smp_processor_id(), &delayed_free);
++ sfw = container_of(w, struct slub_flush_work, work);
++
++ local_irq_disable();
++ __flush_cpu_slab(sfw->s, smp_processor_id(), &delayed_free);
++ local_irq_enable();
++
+ discard_delayed(&delayed_free);
+ }
+
+-static bool has_cpu_slab(int cpu, void *info)
++static bool has_cpu_slab(int cpu, struct kmem_cache *s)
+ {
+- struct kmem_cache *s = info;
+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+
+ return c->page || slub_percpu_partial(c);
+ }
+
++static DEFINE_MUTEX(flush_lock);
++static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
++
++static void flush_all_locked(struct kmem_cache *s)
++{
++ struct slub_flush_work *sfw;
++ unsigned int cpu;
++
++ mutex_lock(&flush_lock);
++
++ for_each_online_cpu(cpu) {
++ sfw = &per_cpu(slub_flush, cpu);
++ if (!has_cpu_slab(cpu, s)) {
++ sfw->skip = true;
++ continue;
++ }
++ INIT_WORK(&sfw->work, flush_cpu_slab);
++ sfw->skip = false;
++ sfw->s = s;
++ schedule_work_on(cpu, &sfw->work);
++ }
++
++ for_each_online_cpu(cpu) {
++ sfw = &per_cpu(slub_flush, cpu);
++ if (sfw->skip)
++ continue;
++ flush_work(&sfw->work);
++ }
++
++ mutex_unlock(&flush_lock);
++}
++
+ static void flush_all(struct kmem_cache *s)
+ {
+- on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
++ cpus_read_lock();
++ flush_all_locked(s);
++ cpus_read_unlock();
+ }
+
+ /*
+@@ -4009,7 +4053,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
+ int node;
+ struct kmem_cache_node *n;
+
+- flush_all(s);
++ flush_all_locked(s);
+ /* Attempt to free all objects */
+ for_each_kmem_cache_node(s, node, n) {
+ free_partial(s, n);
+@@ -4293,7 +4337,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
+ unsigned long flags;
+ int ret = 0;
+
+- flush_all(s);
++ flush_all_locked(s);
+ for_each_kmem_cache_node(s, node, n) {
+ INIT_LIST_HEAD(&discard);
+ for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
+--
+2.19.1
+
diff --git a/features/rt/mm-vmalloc-Another-preempt-disable-region-which-suck.patch b/features/rt/mm-vmalloc-Another-preempt-disable-region-which-suck.patch
new file mode 100644
index 00000000..18978e06
--- /dev/null
+++ b/features/rt/mm-vmalloc-Another-preempt-disable-region-which-suck.patch
@@ -0,0 +1,72 @@
+From 9834be9d8d752c94f6c88a3a380898da34cf003b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Jul 2011 11:39:36 +0200
+Subject: [PATCH 133/191] mm/vmalloc: Another preempt disable region which
+ sucks
+
+Avoid the preempt disable version of get_cpu_var(). The inner-lock should
+provide enough serialisation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ mm/vmalloc.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 4f5f8c907897..e6cd482a11b9 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1558,7 +1558,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+ struct vmap_block *vb;
+ struct vmap_area *va;
+ unsigned long vb_idx;
+- int node, err;
++ int node, err, cpu;
+ void *vaddr;
+
+ node = numa_node_id();
+@@ -1595,11 +1595,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
+ return ERR_PTR(err);
+ }
+
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = this_cpu_ptr(&vmap_block_queue);
+ spin_lock(&vbq->lock);
+ list_add_tail_rcu(&vb->free_list, &vbq->free);
+ spin_unlock(&vbq->lock);
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+
+ return vaddr;
+ }
+@@ -1664,6 +1665,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+ struct vmap_block *vb;
+ void *vaddr = NULL;
+ unsigned int order;
++ int cpu;
+
+ BUG_ON(offset_in_page(size));
+ BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+@@ -1678,7 +1680,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+ order = get_order(size);
+
+ rcu_read_lock();
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = this_cpu_ptr(&vmap_block_queue);
+ list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+ unsigned long pages_off;
+
+@@ -1701,7 +1704,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
+ break;
+ }
+
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+ rcu_read_unlock();
+
+ /* Allocate new block if nothing was found */
+--
+2.19.1
+
diff --git a/features/rt/mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch b/features/rt/mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch
new file mode 100644
index 00000000..cdffd95d
--- /dev/null
+++ b/features/rt/mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch
@@ -0,0 +1,144 @@
+From 3032c5b7a5a91389d663bd54367df7ed1a92a7a6 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:13 -0500
+Subject: [PATCH 085/191] mm/vmstat: Protect per cpu variables with preempt
+ disable on RT
+
+Disable preemption on -RT for the vmstat code. On vanila the code runs in
+IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the
+same ressources is not updated in parallel due to preemption.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/vmstat.h | 4 ++++
+ mm/vmstat.c | 12 ++++++++++++
+ 2 files changed, 16 insertions(+)
+
+diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
+index 506d625163a1..09a7e4c714b8 100644
+--- a/include/linux/vmstat.h
++++ b/include/linux/vmstat.h
+@@ -63,7 +63,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
+ */
+ static inline void __count_vm_event(enum vm_event_item item)
+ {
++ preempt_disable_rt();
+ raw_cpu_inc(vm_event_states.event[item]);
++ preempt_enable_rt();
+ }
+
+ static inline void count_vm_event(enum vm_event_item item)
+@@ -73,7 +75,9 @@ static inline void count_vm_event(enum vm_event_item item)
+
+ static inline void __count_vm_events(enum vm_event_item item, long delta)
+ {
++ preempt_disable_rt();
+ raw_cpu_add(vm_event_states.event[item], delta);
++ preempt_enable_rt();
+ }
+
+ static inline void count_vm_events(enum vm_event_item item, long delta)
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 74b2c374b86c..4ef7b8a8f5ce 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -321,6 +321,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
+ long x;
+ long t;
+
++ preempt_disable_rt();
+ x = delta + __this_cpu_read(*p);
+
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -330,6 +331,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
+ x = 0;
+ }
+ __this_cpu_write(*p, x);
++ preempt_enable_rt();
+ }
+ EXPORT_SYMBOL(__mod_zone_page_state);
+
+@@ -352,6 +354,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
+ delta >>= PAGE_SHIFT;
+ }
+
++ preempt_disable_rt();
+ x = delta + __this_cpu_read(*p);
+
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -361,6 +364,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
+ x = 0;
+ }
+ __this_cpu_write(*p, x);
++ preempt_enable_rt();
+ }
+ EXPORT_SYMBOL(__mod_node_page_state);
+
+@@ -393,6 +397,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+ s8 __percpu *p = pcp->vm_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_inc_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v > t)) {
+@@ -401,6 +406,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
+ zone_page_state_add(v + overstep, zone, item);
+ __this_cpu_write(*p, -overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+@@ -411,6 +417,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+
++ preempt_disable_rt();
+ v = __this_cpu_inc_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v > t)) {
+@@ -419,6 +426,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+ node_page_state_add(v + overstep, pgdat, item);
+ __this_cpu_write(*p, -overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+@@ -439,6 +447,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+ s8 __percpu *p = pcp->vm_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_dec_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v < - t)) {
+@@ -447,6 +456,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+ zone_page_state_add(v - overstep, zone, item);
+ __this_cpu_write(*p, overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+@@ -457,6 +467,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+
+ VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
+
++ preempt_disable_rt();
+ v = __this_cpu_dec_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v < - t)) {
+@@ -465,6 +476,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
+ node_page_state_add(v - overstep, pgdat, item);
+ __this_cpu_write(*p, overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
+--
+2.19.1
+
diff --git a/features/rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch b/features/rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
new file mode 100644
index 00000000..d2cd6abd
--- /dev/null
+++ b/features/rt/mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
@@ -0,0 +1,48 @@
+From f8847b3d3ebc1ea59be319e5833c585bacd2d5f2 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 11 Feb 2019 10:40:46 +0100
+Subject: [PATCH 036/191] mm: workingset: replace IRQ-off check with a lockdep
+ assert.
+
+Commit
+
+ 68d48e6a2df57 ("mm: workingset: add vmstat counter for shadow nodes")
+
+introduced an IRQ-off check to ensure that a lock is held which also
+disabled interrupts. This does not work the same way on -RT because none
+of the locks, that are held, disable interrupts.
+Replace this check with a lockdep assert which ensures that the lock is
+held.
+
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/workingset.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/mm/workingset.c b/mm/workingset.c
+index cd39902c1062..3d34c1309f54 100644
+--- a/mm/workingset.c
++++ b/mm/workingset.c
+@@ -430,6 +430,8 @@ static struct list_lru shadow_nodes;
+
+ void workingset_update_node(struct xa_node *node)
+ {
++ struct address_space *mapping;
++
+ /*
+ * Track non-empty nodes that contain only shadow entries;
+ * unlink those that contain pages or are being freed.
+@@ -438,7 +440,8 @@ void workingset_update_node(struct xa_node *node)
+ * already where they should be. The list_empty() test is safe
+ * as node->private_list is protected by the i_pages lock.
+ */
+- VM_WARN_ON_ONCE(!irqs_disabled()); /* For __inc_lruvec_page_state */
++ mapping = container_of(node->array, struct address_space, i_pages);
++ lockdep_assert_held(&mapping->i_pages.xa_lock);
+
+ if (node->count && node->count == node->nr_values) {
+ if (list_empty(&node->private_list)) {
+--
+2.19.1
+
diff --git a/features/rt/mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch b/features/rt/mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch
new file mode 100644
index 00000000..20da73a1
--- /dev/null
+++ b/features/rt/mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch
@@ -0,0 +1,211 @@
+From 58041154aa39fc127b4c5d0f6179c606403ced5d Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <umgwanakikbuti@gmail.com>
+Date: Tue, 22 Mar 2016 11:16:09 +0100
+Subject: [PATCH 116/191] mm/zsmalloc: copy with get_cpu_var() and locking
+
+get_cpu_var() disables preemption and triggers a might_sleep() splat later.
+This is replaced with get_locked_var().
+This bitspinlocks are replaced with a proper mutex which requires a slightly
+larger struct to allocate.
+
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+[bigeasy: replace the bitspin_lock() with a mutex, get_locked_var(). Mike then
+fixed the size magic]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/zsmalloc.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 79 insertions(+), 6 deletions(-)
+
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index 30c358b72025..6177b736d018 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -57,6 +57,7 @@
+ #include <linux/wait.h>
+ #include <linux/pagemap.h>
+ #include <linux/fs.h>
++#include <linux/local_lock.h>
+
+ #define ZSPAGE_MAGIC 0x58
+
+@@ -77,6 +78,20 @@
+
+ #define ZS_HANDLE_SIZE (sizeof(unsigned long))
+
++#ifdef CONFIG_PREEMPT_RT
++
++struct zsmalloc_handle {
++ unsigned long addr;
++ struct mutex lock;
++};
++
++#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle))
++
++#else
++
++#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long))
++#endif
++
+ /*
+ * Object location (<PFN>, <obj_idx>) is encoded as
+ * a single (unsigned long) handle value.
+@@ -293,6 +308,7 @@ struct zspage {
+ };
+
+ struct mapping_area {
++ local_lock_t lock;
+ char *vm_buf; /* copy buffer for objects that span pages */
+ char *vm_addr; /* address of kmap_atomic()'ed pages */
+ enum zs_mapmode vm_mm; /* mapping mode */
+@@ -322,7 +338,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
+
+ static int create_cache(struct zs_pool *pool)
+ {
+- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
++ pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE,
+ 0, 0, NULL);
+ if (!pool->handle_cachep)
+ return 1;
+@@ -346,9 +362,26 @@ static void destroy_cache(struct zs_pool *pool)
+
+ static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
+ {
+- return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
+- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
++ void *p;
++
++ p = kmem_cache_alloc(pool->handle_cachep,
++ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
++#ifdef CONFIG_PREEMPT_RT
++ if (p) {
++ struct zsmalloc_handle *zh = p;
++
++ mutex_init(&zh->lock);
++ }
++#endif
++ return (unsigned long)p;
++}
++
++#ifdef CONFIG_PREEMPT_RT
++static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle)
++{
++ return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1));
+ }
++#endif
+
+ static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
+ {
+@@ -368,12 +401,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
+
+ static void record_obj(unsigned long handle, unsigned long obj)
+ {
++#ifdef CONFIG_PREEMPT_RT
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ WRITE_ONCE(zh->addr, obj);
++#else
+ /*
+ * lsb of @obj represents handle lock while other bits
+ * represent object value the handle is pointing so
+ * updating shouldn't do store tearing.
+ */
+ WRITE_ONCE(*(unsigned long *)handle, obj);
++#endif
+ }
+
+ /* zpool driver */
+@@ -455,7 +494,10 @@ MODULE_ALIAS("zpool-zsmalloc");
+ #endif /* CONFIG_ZPOOL */
+
+ /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
+-static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
++static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = {
++ /* XXX remove this and use a spin_lock_t in pin_tag() */
++ .lock = INIT_LOCAL_LOCK(lock),
++};
+
+ static bool is_zspage_isolated(struct zspage *zspage)
+ {
+@@ -862,7 +904,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
+
+ static unsigned long handle_to_obj(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return zh->addr;
++#else
+ return *(unsigned long *)handle;
++#endif
+ }
+
+ static unsigned long obj_to_head(struct page *page, void *obj)
+@@ -876,22 +924,46 @@ static unsigned long obj_to_head(struct page *page, void *obj)
+
+ static inline int testpin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_is_locked(&zh->lock);
++#else
+ return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static inline int trypin_tag(unsigned long handle)
+ {
++#ifdef CONFIG_PREEMPT_RT
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_trylock(&zh->lock);
++#else
+ return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static void pin_tag(unsigned long handle) __acquires(bitlock)
+ {
++#ifdef CONFIG_PREEMPT_RT
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_lock(&zh->lock);
++#else
+ bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static void unpin_tag(unsigned long handle) __releases(bitlock)
+ {
++#ifdef CONFIG_PREEMPT_RT
++ struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
++
++ return mutex_unlock(&zh->lock);
++#else
+ bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
++#endif
+ }
+
+ static void reset_page(struct page *page)
+@@ -1274,7 +1346,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ class = pool->size_class[class_idx];
+ off = (class->size * obj_idx) & ~PAGE_MASK;
+
+- area = &get_cpu_var(zs_map_area);
++ local_lock(&zs_map_area.lock);
++ area = this_cpu_ptr(&zs_map_area);
+ area->vm_mm = mm;
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+@@ -1328,7 +1401,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
+
+ __zs_unmap_object(area, pages, off, class->size);
+ }
+- put_cpu_var(zs_map_area);
++ local_unlock(&zs_map_area.lock);
+
+ migrate_read_unlock(zspage);
+ unpin_tag(handle);
+--
+2.19.1
+
diff --git a/features/rt/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch b/features/rt/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch
new file mode 100644
index 00000000..81ff8d7a
--- /dev/null
+++ b/features/rt/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch
@@ -0,0 +1,36 @@
+From 792214a4ddc6c2bca52779e1665f52c5587868f9 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 16 Sep 2020 16:15:39 +0200
+Subject: [PATCH 142/191] net: Dequeue in dev_cpu_dead() without the lock
+
+Upstream uses skb_dequeue() to acquire lock of `input_pkt_queue'. The reason is
+to synchronize against a remote CPU which still thinks that the CPU is online
+enqueues packets to this CPU.
+There are no guarantees that the packet is enqueued before the callback is run,
+it just hope.
+RT however complains about an not initialized lock because it uses another lock
+for `input_pkt_queue' due to the IRQ-off nature of the context.
+
+Use the unlocked dequeue version for `input_pkt_queue'.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 16c9aa19ede2..aab963be5655 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -11162,7 +11162,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
+ netif_rx_ni(skb);
+ input_queue_head_incr(oldsd);
+ }
+- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
++ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+ netif_rx_ni(skb);
+ input_queue_head_incr(oldsd);
+ }
+--
+2.19.1
+
diff --git a/features/rt/net-Move-lockdep-where-it-belongs.patch b/features/rt/net-Move-lockdep-where-it-belongs.patch
new file mode 100644
index 00000000..d6d1ff0d
--- /dev/null
+++ b/features/rt/net-Move-lockdep-where-it-belongs.patch
@@ -0,0 +1,46 @@
+From abf7b145f704064b5de40759c313fe711a89234f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 8 Sep 2020 07:32:20 +0200
+Subject: [PATCH 038/191] net: Move lockdep where it belongs
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ net/core/sock.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 0ed98f20448a..f1d1cee0dddd 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3064,12 +3064,11 @@ void lock_sock_nested(struct sock *sk, int subclass)
+ if (sk->sk_lock.owned)
+ __lock_sock(sk);
+ sk->sk_lock.owned = 1;
+- spin_unlock(&sk->sk_lock.slock);
++ spin_unlock_bh(&sk->sk_lock.slock);
+ /*
+ * The sk_lock has mutex_lock() semantics here:
+ */
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+- local_bh_enable();
+ }
+ EXPORT_SYMBOL(lock_sock_nested);
+
+@@ -3118,13 +3117,12 @@ bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
+
+ __lock_sock(sk);
+ sk->sk_lock.owned = 1;
+- spin_unlock(&sk->sk_lock.slock);
++ spin_unlock_bh(&sk->sk_lock.slock);
+ /*
+ * The sk_lock has mutex_lock() semantics here:
+ */
+ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+ __acquire(&sk->sk_lock.slock);
+- local_bh_enable();
+ return true;
+ }
+ EXPORT_SYMBOL(lock_sock_fast);
+--
+2.19.1
+
diff --git a/features/rt/net-Properly-annotate-the-try-lock-for-the-seqlock.patch b/features/rt/net-Properly-annotate-the-try-lock-for-the-seqlock.patch
new file mode 100644
index 00000000..59f37318
--- /dev/null
+++ b/features/rt/net-Properly-annotate-the-try-lock-for-the-seqlock.patch
@@ -0,0 +1,69 @@
+From 363d9082c806cbde11d4468cfaaac47e4b529c82 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 8 Sep 2020 16:57:11 +0200
+Subject: [PATCH 092/191] net: Properly annotate the try-lock for the seqlock
+
+In patch
+ ("net/Qdisc: use a seqlock instead seqcount")
+
+the seqcount has been replaced with a seqlock to allow to reader to
+boost the preempted writer.
+The try_write_seqlock() acquired the lock with a try-lock but the
+seqcount annotation was "lock".
+
+Opencode write_seqcount_t_begin() and use the try-lock annotation for
+lockdep.
+
+Reported-by: Mike Galbraith <efault@gmx.de>
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/net_seq_lock.h | 9 ---------
+ include/net/sch_generic.h | 10 +++++++++-
+ 2 files changed, 9 insertions(+), 10 deletions(-)
+
+diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h
+index 95a497a72e51..67710bace741 100644
+--- a/include/net/net_seq_lock.h
++++ b/include/net/net_seq_lock.h
+@@ -6,15 +6,6 @@
+ # define net_seq_begin(__r) read_seqbegin(__r)
+ # define net_seq_retry(__r, __s) read_seqretry(__r, __s)
+
+-static inline int try_write_seqlock(seqlock_t *sl)
+-{
+- if (spin_trylock(&sl->lock)) {
+- write_seqcount_begin(&sl->seqcount);
+- return 1;
+- }
+- return 0;
+-}
+-
+ #else
+ # define net_seqlock_t seqcount_t
+ # define net_seq_begin(__r) read_seqcount_begin(__r)
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 1650446d3bfa..bf0865d642f5 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -171,8 +171,16 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ return false;
+ }
+ #ifdef CONFIG_PREEMPT_RT
+- if (try_write_seqlock(&qdisc->running))
++ if (spin_trylock(&qdisc->running.lock)) {
++ seqcount_t *s = &qdisc->running.seqcount.seqcount;
++ /*
++ * Variant of write_seqcount_t_begin() telling lockdep that a
++ * trylock was attempted.
++ */
++ do_raw_write_seqcount_begin(s);
++ seqcount_acquire(&s->dep_map, 0, 1, _RET_IP_);
+ return true;
++ }
+ return false;
+ #else
+ /* Variant of write_seqcount_begin() telling lockdep a trylock
+--
+2.19.1
+
diff --git a/features/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/features/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
new file mode 100644
index 00000000..7ef9f7ad
--- /dev/null
+++ b/features/rt/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -0,0 +1,298 @@
+From 0532faba565e7c8cf62950d7f1a9a7bc7ab9840b Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Sep 2016 17:36:35 +0200
+Subject: [PATCH 091/191] net/Qdisc: use a seqlock instead seqcount
+
+The seqcount disables preemption on -RT while it is held which can't
+remove. Also we don't want the reader to spin for ages if the writer is
+scheduled out. The seqlock on the other hand will serialize / sleep on
+the lock while writer is active.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/gen_stats.h | 11 ++++++-----
+ include/net/net_seq_lock.h | 24 ++++++++++++++++++++++++
+ include/net/sch_generic.h | 19 +++++++++++++++++--
+ net/core/gen_estimator.c | 6 +++---
+ net/core/gen_stats.c | 12 ++++++------
+ net/sched/sch_api.c | 2 +-
+ net/sched/sch_generic.c | 10 ++++++++++
+ 7 files changed, 67 insertions(+), 17 deletions(-)
+ create mode 100644 include/net/net_seq_lock.h
+
+diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
+index 1424e02cef90..163f8415e5db 100644
+--- a/include/net/gen_stats.h
++++ b/include/net/gen_stats.h
+@@ -6,6 +6,7 @@
+ #include <linux/socket.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/pkt_sched.h>
++#include <net/net_seq_lock.h>
+
+ /* Note: this used to be in include/uapi/linux/gen_stats.h */
+ struct gnet_stats_basic_packed {
+@@ -42,15 +43,15 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
+ spinlock_t *lock, struct gnet_dump *d,
+ int padattr);
+
+-int gnet_stats_copy_basic(const seqcount_t *running,
++int gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
+-void __gnet_stats_copy_basic(const seqcount_t *running,
++void __gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
+-int gnet_stats_copy_basic_hw(const seqcount_t *running,
++int gnet_stats_copy_basic_hw(net_seqlock_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b);
+@@ -70,13 +71,13 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct net_rate_estimator __rcu **rate_est,
+ spinlock_t *lock,
+- seqcount_t *running, struct nlattr *opt);
++ net_seqlock_t *running, struct nlattr *opt);
+ void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
+ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct net_rate_estimator __rcu **ptr,
+ spinlock_t *lock,
+- seqcount_t *running, struct nlattr *opt);
++ net_seqlock_t *running, struct nlattr *opt);
+ bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
+ bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
+ struct gnet_stats_rate_est64 *sample);
+diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h
+new file mode 100644
+index 000000000000..95a497a72e51
+--- /dev/null
++++ b/include/net/net_seq_lock.h
+@@ -0,0 +1,24 @@
++#ifndef __NET_NET_SEQ_LOCK_H__
++#define __NET_NET_SEQ_LOCK_H__
++
++#ifdef CONFIG_PREEMPT_RT
++# define net_seqlock_t seqlock_t
++# define net_seq_begin(__r) read_seqbegin(__r)
++# define net_seq_retry(__r, __s) read_seqretry(__r, __s)
++
++static inline int try_write_seqlock(seqlock_t *sl)
++{
++ if (spin_trylock(&sl->lock)) {
++ write_seqcount_begin(&sl->seqcount);
++ return 1;
++ }
++ return 0;
++}
++
++#else
++# define net_seqlock_t seqcount_t
++# define net_seq_begin(__r) read_seqcount_begin(__r)
++# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s)
++#endif
++
++#endif
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 2d6eb60c58c8..1650446d3bfa 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -10,6 +10,7 @@
+ #include <linux/percpu.h>
+ #include <linux/dynamic_queue_limits.h>
+ #include <linux/list.h>
++#include <net/net_seq_lock.h>
+ #include <linux/refcount.h>
+ #include <linux/workqueue.h>
+ #include <linux/mutex.h>
+@@ -100,7 +101,7 @@ struct Qdisc {
+ struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
+ struct qdisc_skb_head q;
+ struct gnet_stats_basic_packed bstats;
+- seqcount_t running;
++ net_seqlock_t running;
+ struct gnet_stats_queue qstats;
+ unsigned long state;
+ struct Qdisc *next_sched;
+@@ -141,7 +142,11 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
+ {
+ if (qdisc->flags & TCQ_F_NOLOCK)
+ return spin_is_locked(&qdisc->seqlock);
++#ifdef CONFIG_PREEMPT_RT
++ return spin_is_locked(&qdisc->running.lock) ? true : false;
++#else
+ return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
++#endif
+ }
+
+ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
+@@ -165,17 +170,27 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
+ } else if (qdisc_is_running(qdisc)) {
+ return false;
+ }
++#ifdef CONFIG_PREEMPT_RT
++ if (try_write_seqlock(&qdisc->running))
++ return true;
++ return false;
++#else
+ /* Variant of write_seqcount_begin() telling lockdep a trylock
+ * was attempted.
+ */
+ raw_write_seqcount_begin(&qdisc->running);
+ seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
+ return true;
++#endif
+ }
+
+ static inline void qdisc_run_end(struct Qdisc *qdisc)
+ {
++#ifdef CONFIG_PREEMPT_RT
++ write_sequnlock(&qdisc->running);
++#else
+ write_seqcount_end(&qdisc->running);
++#endif
+ if (qdisc->flags & TCQ_F_NOLOCK)
+ spin_unlock(&qdisc->seqlock);
+ }
+@@ -540,7 +555,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
+ return qdisc_lock(root);
+ }
+
+-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
++static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
+ {
+ struct Qdisc *root = qdisc_root_sleeping(qdisc);
+
+diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
+index 8e582e29a41e..e51f4854d8b2 100644
+--- a/net/core/gen_estimator.c
++++ b/net/core/gen_estimator.c
+@@ -42,7 +42,7 @@
+ struct net_rate_estimator {
+ struct gnet_stats_basic_packed *bstats;
+ spinlock_t *stats_lock;
+- seqcount_t *running;
++ net_seqlock_t *running;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ u8 ewma_log;
+ u8 intvl_log; /* period : (250ms << intvl_log) */
+@@ -125,7 +125,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct net_rate_estimator __rcu **rate_est,
+ spinlock_t *lock,
+- seqcount_t *running,
++ net_seqlock_t *running,
+ struct nlattr *opt)
+ {
+ struct gnet_estimator *parm = nla_data(opt);
+@@ -226,7 +226,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
+ struct net_rate_estimator __rcu **rate_est,
+ spinlock_t *lock,
+- seqcount_t *running, struct nlattr *opt)
++ net_seqlock_t *running, struct nlattr *opt)
+ {
+ return gen_new_estimator(bstats, cpu_bstats, rate_est,
+ lock, running, opt);
+diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
+index e491b083b348..ef432cea2e10 100644
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -137,7 +137,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
+ }
+
+ void
+-__gnet_stats_copy_basic(const seqcount_t *running,
++__gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b)
+@@ -150,15 +150,15 @@ __gnet_stats_copy_basic(const seqcount_t *running,
+ }
+ do {
+ if (running)
+- seq = read_seqcount_begin(running);
++ seq = net_seq_begin(running);
+ bstats->bytes = b->bytes;
+ bstats->packets = b->packets;
+- } while (running && read_seqcount_retry(running, seq));
++ } while (running && net_seq_retry(running, seq));
+ }
+ EXPORT_SYMBOL(__gnet_stats_copy_basic);
+
+ static int
+-___gnet_stats_copy_basic(const seqcount_t *running,
++___gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b,
+@@ -204,7 +204,7 @@ ___gnet_stats_copy_basic(const seqcount_t *running,
+ * if the room in the socket buffer was not sufficient.
+ */
+ int
+-gnet_stats_copy_basic(const seqcount_t *running,
++gnet_stats_copy_basic(net_seqlock_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b)
+@@ -228,7 +228,7 @@ EXPORT_SYMBOL(gnet_stats_copy_basic);
+ * if the room in the socket buffer was not sufficient.
+ */
+ int
+-gnet_stats_copy_basic_hw(const seqcount_t *running,
++gnet_stats_copy_basic_hw(net_seqlock_t *running,
+ struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b)
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index f87d07736a14..7a627b208393 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1258,7 +1258,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
+ rcu_assign_pointer(sch->stab, stab);
+ }
+ if (tca[TCA_RATE]) {
+- seqcount_t *running;
++ net_seqlock_t *running;
+
+ err = -EOPNOTSUPP;
+ if (sch->flags & TCQ_F_MQROOT) {
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 49eae93d1489..512a39d6edec 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -553,7 +553,11 @@ struct Qdisc noop_qdisc = {
+ .ops = &noop_qdisc_ops,
+ .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
+ .dev_queue = &noop_netdev_queue,
++#ifdef CONFIG_PREEMPT_RT
++ .running = __SEQLOCK_UNLOCKED(noop_qdisc.running),
++#else
+ .running = SEQCNT_ZERO(noop_qdisc.running),
++#endif
+ .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
+ .gso_skb = {
+ .next = (struct sk_buff *)&noop_qdisc.gso_skb,
+@@ -845,9 +849,15 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ lockdep_set_class(&sch->busylock,
+ dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
+
++#ifdef CONFIG_PREEMPT_RT
++ seqlock_init(&sch->running);
++ lockdep_set_class(&sch->running.lock,
++ dev->qdisc_running_key ?: &qdisc_running_key);
++#else
+ seqcount_init(&sch->running);
+ lockdep_set_class(&sch->running,
+ dev->qdisc_running_key ?: &qdisc_running_key);
++#endif
+
+ sch->ops = ops;
+ sch->flags = ops->static_flags;
+--
+2.19.1
+
diff --git a/features/rt/net-Remove-preemption-disabling-in-netif_rx.patch b/features/rt/net-Remove-preemption-disabling-in-netif_rx.patch
new file mode 100644
index 00000000..b92de5fc
--- /dev/null
+++ b/features/rt/net-Remove-preemption-disabling-in-netif_rx.patch
@@ -0,0 +1,67 @@
+From efa7c96f4b8dec2a51711a6eef658093b42a6d91 Mon Sep 17 00:00:00 2001
+From: Priyanka Jain <Priyanka.Jain@freescale.com>
+Date: Thu, 17 May 2012 09:35:11 +0530
+Subject: [PATCH 150/191] net: Remove preemption disabling in netif_rx()
+
+1)enqueue_to_backlog() (called from netif_rx) should be
+ bind to a particluar CPU. This can be achieved by
+ disabling migration. No need to disable preemption
+
+2)Fixes crash "BUG: scheduling while atomic: ksoftirqd"
+ in case of RT.
+ If preemption is disabled, enqueue_to_backog() is called
+ in atomic context. And if backlog exceeds its count,
+ kfree_skb() is called. But in RT, kfree_skb() might
+ gets scheduled out, so it expects non atomic context.
+
+-Replace preempt_enable(), preempt_disable() with
+ migrate_enable(), migrate_disable() respectively
+-Replace get_cpu(), put_cpu() with get_cpu_light(),
+ put_cpu_light() respectively
+
+Signed-off-by: Priyanka Jain <Priyanka.Jain@freescale.com>
+Acked-by: Rajan Srivastava <Rajan.Srivastava@freescale.com>
+Cc: <rostedt@goodmis.orgn>
+Link: http://lkml.kernel.org/r/1337227511-2271-1-git-send-email-Priyanka.Jain@freescale.com
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[bigeasy: Remove assumption about migrate_disable() from the description.]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/dev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 52c928940167..ca150f02eed1 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4837,7 +4837,7 @@ static int netif_rx_internal(struct sk_buff *skb)
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu;
+
+- preempt_disable();
++ migrate_disable();
+ rcu_read_lock();
+
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
+@@ -4847,14 +4847,14 @@ static int netif_rx_internal(struct sk_buff *skb)
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+
+ rcu_read_unlock();
+- preempt_enable();
++ migrate_enable();
+ } else
+ #endif
+ {
+ unsigned int qtail;
+
+- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+- put_cpu();
++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
++ put_cpu_light();
+ }
+ return ret;
+ }
+--
+2.19.1
+
diff --git a/features/rt/net-Use-skbufhead-with-raw-lock.patch b/features/rt/net-Use-skbufhead-with-raw-lock.patch
new file mode 100644
index 00000000..3309dda6
--- /dev/null
+++ b/features/rt/net-Use-skbufhead-with-raw-lock.patch
@@ -0,0 +1,73 @@
+From d7ca8b65392672151fbaaa0c0ec93b3be0dd2d51 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Jul 2011 15:38:34 +0200
+Subject: [PATCH 141/191] net: Use skbufhead with raw lock
+
+Use the rps lock as rawlock so we can keep irq-off regions. It looks low
+latency. However we can't kfree() from this context therefore we defer this
+to the softirq and use the tofree_queue list for it (similar to process_queue).
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/skbuff.h | 7 +++++++
+ net/core/dev.c | 6 +++---
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 6d0a33d1c0db..3402903b5cf0 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -295,6 +295,7 @@ struct sk_buff_head {
+
+ __u32 qlen;
+ spinlock_t lock;
++ raw_spinlock_t raw_lock;
+ };
+
+ struct sk_buff;
+@@ -1902,6 +1903,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
+ __skb_queue_head_init(list);
+ }
+
++static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
++{
++ raw_spin_lock_init(&list->raw_lock);
++ __skb_queue_head_init(list);
++}
++
+ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
+ struct lock_class_key *class)
+ {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2d6ee43e02b8..16c9aa19ede2 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -223,14 +223,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
+ static inline void rps_lock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_lock(&sd->input_pkt_queue.lock);
++ raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+ static inline void rps_unlock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_unlock(&sd->input_pkt_queue.lock);
++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+@@ -11478,7 +11478,7 @@ static int __init net_dev_init(void)
+
+ INIT_WORK(flush, flush_backlog);
+
+- skb_queue_head_init(&sd->input_pkt_queue);
++ skb_queue_head_init_raw(&sd->input_pkt_queue);
+ skb_queue_head_init(&sd->process_queue);
+ #ifdef CONFIG_XFRM_OFFLOAD
+ skb_queue_head_init(&sd->xfrm_backlog);
+--
+2.19.1
+
diff --git a/features/rt/net-core-disable-NET_RX_BUSY_POLL-on-RT.patch b/features/rt/net-core-disable-NET_RX_BUSY_POLL-on-RT.patch
new file mode 100644
index 00000000..4612f762
--- /dev/null
+++ b/features/rt/net-core-disable-NET_RX_BUSY_POLL-on-RT.patch
@@ -0,0 +1,43 @@
+From 116e028b98d7a297eccf815f0a734e80154b53c3 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Sat, 27 May 2017 19:02:06 +0200
+Subject: [PATCH 096/191] net/core: disable NET_RX_BUSY_POLL on RT
+
+napi_busy_loop() disables preemption and performs a NAPI poll. We can't acquire
+sleeping locks with disabled preemption so we would have to work around this
+and add explicit locking for synchronisation against ksoftirqd.
+Without explicit synchronisation a low priority process would "own" the NAPI
+state (by setting NAPIF_STATE_SCHED) and could be scheduled out (no
+preempt_disable() and BH is preemptible on RT).
+In case a network packages arrives then the interrupt handler would set
+NAPIF_STATE_MISSED and the system would wait until the task owning the NAPI
+would be scheduled in again.
+Should a task with RT priority busy poll then it would consume the CPU instead
+allowing tasks with lower priority to run.
+
+The NET_RX_BUSY_POLL is disabled by default (the system wide sysctls for
+poll/read are set to zero) so disable NET_RX_BUSY_POLL on RT to avoid wrong
+locking context on RT. Should this feature be considered useful on RT systems
+then it could be enabled again with proper locking and synchronisation.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/Kconfig b/net/Kconfig
+index 8cea808ad9e8..32f101108361 100644
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -286,7 +286,7 @@ config CGROUP_NET_CLASSID
+
+ config NET_RX_BUSY_POLL
+ bool
+- default y
++ default y if !PREEMPT_RT
+
+ config BQL
+ bool
+--
+2.19.1
+
diff --git a/features/rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch b/features/rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch
new file mode 100644
index 00000000..ba6256a3
--- /dev/null
+++ b/features/rt/net-core-use-local_bh_disable-in-netif_rx_ni.patch
@@ -0,0 +1,40 @@
+From 44a59612611dac7147b1bc47494ed5e8aa95855e Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 16 Jun 2017 19:03:16 +0200
+Subject: [PATCH 126/191] net/core: use local_bh_disable() in netif_rx_ni()
+
+In 2004 netif_rx_ni() gained a preempt_disable() section around
+netif_rx() and its do_softirq() + testing for it. The do_softirq() part
+is required because netif_rx() raises the softirq but does not invoke
+it. The preempt_disable() is required to remain on the same CPU which added the
+skb to the per-CPU list.
+All this can be avoided be putting this into a local_bh_disable()ed
+section. The local_bh_enable() part will invoke do_softirq() if
+required.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/dev.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 86a599a41062..2d6ee43e02b8 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4889,11 +4889,9 @@ int netif_rx_ni(struct sk_buff *skb)
+
+ trace_netif_rx_ni_entry(skb);
+
+- preempt_disable();
++ local_bh_disable();
+ err = netif_rx_internal(skb);
+- if (local_softirq_pending())
+- do_softirq();
+- preempt_enable();
++ local_bh_enable();
+ trace_netif_rx_ni_exit(err);
+
+ return err;
+--
+2.19.1
+
diff --git a/features/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/features/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
new file mode 100644
index 00000000..75ccf34f
--- /dev/null
+++ b/features/rt/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -0,0 +1,41 @@
+From 2e3722be67eda134ba0ed9bc8507e6f3ca3e0011 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 30 Mar 2016 13:36:29 +0200
+Subject: [PATCH 143/191] net: dev: always take qdisc's busylock in
+ __dev_xmit_skb()
+
+The root-lock is dropped before dev_hard_start_xmit() is invoked and after
+setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away
+by a task with a higher priority then the task with the higher priority
+won't be able to submit packets to the NIC directly instead they will be
+enqueued into the Qdisc. The NIC will remain idle until the task(s) with
+higher priority leave the CPU and the task with lower priority gets back
+and finishes the job.
+
+If we take always the busylock we ensure that the RT task can boost the
+low-prio task and submit the packet.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/dev.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index aab963be5655..52c928940167 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3807,7 +3807,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+ * This permits qdisc->running owner to get the lock more
+ * often and dequeue packets faster.
+ */
++#ifdef CONFIG_PREEMPT_RT
++ contended = true;
++#else
+ contended = qdisc_is_running(q);
++#endif
+ if (unlikely(contended))
+ spin_lock(&q->busylock);
+
+--
+2.19.1
+
diff --git a/features/rt/net-jme-Replace-link-change-tasklet-with-work.patch b/features/rt/net-jme-Replace-link-change-tasklet-with-work.patch
new file mode 100644
index 00000000..8373eb7f
--- /dev/null
+++ b/features/rt/net-jme-Replace-link-change-tasklet-with-work.patch
@@ -0,0 +1,87 @@
+From f5a683610df7d5b147cf18ed631f98f1813afec5 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:11 +0100
+Subject: [PATCH 048/191] net: jme: Replace link-change tasklet with work
+
+The link change tasklet disables the tasklets for tx/rx processing while
+upating hw parameters and then enables the tasklets again.
+
+This update can also be pushed into a workqueue where it can be performed
+in preemptible context. This allows tasklet_disable() to become sleeping.
+
+Replace the linkch_task tasklet with a work.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/jme.c | 10 +++++-----
+ drivers/net/ethernet/jme.h | 2 +-
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
+index e9efe074edc1..f1b9284e0bea 100644
+--- a/drivers/net/ethernet/jme.c
++++ b/drivers/net/ethernet/jme.c
+@@ -1265,9 +1265,9 @@ jme_stop_shutdown_timer(struct jme_adapter *jme)
+ jwrite32f(jme, JME_APMC, apmc);
+ }
+
+-static void jme_link_change_tasklet(struct tasklet_struct *t)
++static void jme_link_change_work(struct work_struct *work)
+ {
+- struct jme_adapter *jme = from_tasklet(jme, t, linkch_task);
++ struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task);
+ struct net_device *netdev = jme->dev;
+ int rc;
+
+@@ -1510,7 +1510,7 @@ jme_intr_msi(struct jme_adapter *jme, u32 intrstat)
+ * all other events are ignored
+ */
+ jwrite32(jme, JME_IEVE, intrstat);
+- tasklet_schedule(&jme->linkch_task);
++ schedule_work(&jme->linkch_task);
+ goto out_reenable;
+ }
+
+@@ -1832,7 +1832,6 @@ jme_open(struct net_device *netdev)
+ jme_clear_pm_disable_wol(jme);
+ JME_NAPI_ENABLE(jme);
+
+- tasklet_setup(&jme->linkch_task, jme_link_change_tasklet);
+ tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
+ tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
+ tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
+@@ -1920,7 +1919,7 @@ jme_close(struct net_device *netdev)
+
+ JME_NAPI_DISABLE(jme);
+
+- tasklet_kill(&jme->linkch_task);
++ cancel_work_sync(&jme->linkch_task);
+ tasklet_kill(&jme->txclean_task);
+ tasklet_kill(&jme->rxclean_task);
+ tasklet_kill(&jme->rxempty_task);
+@@ -3035,6 +3034,7 @@ jme_init_one(struct pci_dev *pdev,
+ atomic_set(&jme->rx_empty, 1);
+
+ tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
++ INIT_WORK(&jme->linkch_task, jme_link_change_work);
+ jme->dpi.cur = PCC_P1;
+
+ jme->reg_ghc = 0;
+diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
+index a2c3b00d939d..2af76329b4a2 100644
+--- a/drivers/net/ethernet/jme.h
++++ b/drivers/net/ethernet/jme.h
+@@ -411,7 +411,7 @@ struct jme_adapter {
+ struct tasklet_struct rxempty_task;
+ struct tasklet_struct rxclean_task;
+ struct tasklet_struct txclean_task;
+- struct tasklet_struct linkch_task;
++ struct work_struct linkch_task;
+ struct tasklet_struct pcc_task;
+ unsigned long flags;
+ u32 reg_txcs;
+--
+2.19.1
+
diff --git a/features/rt/net-sundance-Use-tasklet_disable_in_atomic.patch b/features/rt/net-sundance-Use-tasklet_disable_in_atomic.patch
new file mode 100644
index 00000000..93713951
--- /dev/null
+++ b/features/rt/net-sundance-Use-tasklet_disable_in_atomic.patch
@@ -0,0 +1,38 @@
+From 8281edf5e8fa71507a09c7511ea11da232e373af Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:12 +0100
+Subject: [PATCH 049/191] net: sundance: Use tasklet_disable_in_atomic().
+
+tasklet_disable() is used in the timer callback. This might be distangled,
+but without access to the hardware that's a bit risky.
+
+Replace it with tasklet_disable_in_atomic() so tasklet_disable() can be
+changed to a sleep wait once all remaining atomic users are converted.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Denis Kirjanov <kda@linux-powerpc.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Jakub Kicinski <kuba@kernel.org>
+Cc: netdev@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/dlink/sundance.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
+index e3a8858915b3..df0eab479d51 100644
+--- a/drivers/net/ethernet/dlink/sundance.c
++++ b/drivers/net/ethernet/dlink/sundance.c
+@@ -963,7 +963,7 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
+ unsigned long flag;
+
+ netif_stop_queue(dev);
+- tasklet_disable(&np->tx_tasklet);
++ tasklet_disable_in_atomic(&np->tx_tasklet);
+ iowrite16(0, ioaddr + IntrEnable);
+ printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
+ "TxFrameId %2.2x,"
+--
+2.19.1
+
diff --git a/features/rt/notifier-Make-atomic_notifiers-use-raw_spinlock.patch b/features/rt/notifier-Make-atomic_notifiers-use-raw_spinlock.patch
new file mode 100644
index 00000000..9defbe15
--- /dev/null
+++ b/features/rt/notifier-Make-atomic_notifiers-use-raw_spinlock.patch
@@ -0,0 +1,131 @@
+From 074172587a9dfb0bae37a5663abb7e439376abfc Mon Sep 17 00:00:00 2001
+From: Valentin Schneider <valentin.schneider@arm.com>
+Date: Sun, 22 Nov 2020 20:19:04 +0000
+Subject: [PATCH 005/191] notifier: Make atomic_notifiers use raw_spinlock
+
+Booting a recent PREEMPT_RT kernel (v5.10-rc3-rt7-rebase) on my arm64 Juno
+leads to the idle task blocking on an RT sleeping spinlock down some
+notifier path:
+
+ [ 1.809101] BUG: scheduling while atomic: swapper/5/0/0x00000002
+ [ 1.809116] Modules linked in:
+ [ 1.809123] Preemption disabled at:
+ [ 1.809125] secondary_start_kernel (arch/arm64/kernel/smp.c:227)
+ [ 1.809146] CPU: 5 PID: 0 Comm: swapper/5 Tainted: G W 5.10.0-rc3-rt7 #168
+ [ 1.809153] Hardware name: ARM Juno development board (r0) (DT)
+ [ 1.809158] Call trace:
+ [ 1.809160] dump_backtrace (arch/arm64/kernel/stacktrace.c:100 (discriminator 1))
+ [ 1.809170] show_stack (arch/arm64/kernel/stacktrace.c:198)
+ [ 1.809178] dump_stack (lib/dump_stack.c:122)
+ [ 1.809188] __schedule_bug (kernel/sched/core.c:4886)
+ [ 1.809197] __schedule (./arch/arm64/include/asm/preempt.h:18 kernel/sched/core.c:4913 kernel/sched/core.c:5040)
+ [ 1.809204] preempt_schedule_lock (kernel/sched/core.c:5365 (discriminator 1))
+ [ 1.809210] rt_spin_lock_slowlock_locked (kernel/locking/rtmutex.c:1072)
+ [ 1.809217] rt_spin_lock_slowlock (kernel/locking/rtmutex.c:1110)
+ [ 1.809224] rt_spin_lock (./include/linux/rcupdate.h:647 kernel/locking/rtmutex.c:1139)
+ [ 1.809231] atomic_notifier_call_chain_robust (kernel/notifier.c:71 kernel/notifier.c:118 kernel/notifier.c:186)
+ [ 1.809240] cpu_pm_enter (kernel/cpu_pm.c:39 kernel/cpu_pm.c:93)
+ [ 1.809249] psci_enter_idle_state (drivers/cpuidle/cpuidle-psci.c:52 drivers/cpuidle/cpuidle-psci.c:129)
+ [ 1.809258] cpuidle_enter_state (drivers/cpuidle/cpuidle.c:238)
+ [ 1.809267] cpuidle_enter (drivers/cpuidle/cpuidle.c:353)
+ [ 1.809275] do_idle (kernel/sched/idle.c:132 kernel/sched/idle.c:213 kernel/sched/idle.c:273)
+ [ 1.809282] cpu_startup_entry (kernel/sched/idle.c:368 (discriminator 1))
+ [ 1.809288] secondary_start_kernel (arch/arm64/kernel/smp.c:273)
+
+Two points worth noting:
+
+1) That this is conceptually the same issue as pointed out in:
+ 313c8c16ee62 ("PM / CPU: replace raw_notifier with atomic_notifier")
+2) Only the _robust() variant of atomic_notifier callchains suffer from
+ this
+
+AFAICT only the cpu_pm_notifier_chain really needs to be changed, but
+singling it out would mean introducing a new (truly) non-blocking API. At
+the same time, callers that are fine with any blocking within the call
+chain should use blocking notifiers, so patching up all atomic_notifier's
+doesn't seem *too* crazy to me.
+
+Fixes: 70d932985757 ("notifier: Fix broken error handling pattern")
+Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
+Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com>
+Link: https://lkml.kernel.org/r/20201122201904.30940-1-valentin.schneider@arm.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/notifier.h | 6 +++---
+ kernel/notifier.c | 12 ++++++------
+ 2 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/include/linux/notifier.h b/include/linux/notifier.h
+index 2fb373a5c1ed..723bc2df6388 100644
+--- a/include/linux/notifier.h
++++ b/include/linux/notifier.h
+@@ -58,7 +58,7 @@ struct notifier_block {
+ };
+
+ struct atomic_notifier_head {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ struct notifier_block __rcu *head;
+ };
+
+@@ -78,7 +78,7 @@ struct srcu_notifier_head {
+ };
+
+ #define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
+- spin_lock_init(&(name)->lock); \
++ raw_spin_lock_init(&(name)->lock); \
+ (name)->head = NULL; \
+ } while (0)
+ #define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \
+@@ -95,7 +95,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
+ cleanup_srcu_struct(&(name)->srcu);
+
+ #define ATOMIC_NOTIFIER_INIT(name) { \
+- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
+ .head = NULL }
+ #define BLOCKING_NOTIFIER_INIT(name) { \
+ .rwsem = __RWSEM_INITIALIZER((name).rwsem), \
+diff --git a/kernel/notifier.c b/kernel/notifier.c
+index 1b019cbca594..c20782f07643 100644
+--- a/kernel/notifier.c
++++ b/kernel/notifier.c
+@@ -142,9 +142,9 @@ int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
+ unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&nh->lock, flags);
++ raw_spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_register(&nh->head, n);
+- spin_unlock_irqrestore(&nh->lock, flags);
++ raw_spin_unlock_irqrestore(&nh->lock, flags);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
+@@ -164,9 +164,9 @@ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
+ unsigned long flags;
+ int ret;
+
+- spin_lock_irqsave(&nh->lock, flags);
++ raw_spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_chain_unregister(&nh->head, n);
+- spin_unlock_irqrestore(&nh->lock, flags);
++ raw_spin_unlock_irqrestore(&nh->lock, flags);
+ synchronize_rcu();
+ return ret;
+ }
+@@ -182,9 +182,9 @@ int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh,
+ * Musn't use RCU; because then the notifier list can
+ * change between the up and down traversal.
+ */
+- spin_lock_irqsave(&nh->lock, flags);
++ raw_spin_lock_irqsave(&nh->lock, flags);
+ ret = notifier_call_chain_robust(&nh->head, val_up, val_down, v);
+- spin_unlock_irqrestore(&nh->lock, flags);
++ raw_spin_unlock_irqrestore(&nh->lock, flags);
+
+ return ret;
+ }
+--
+2.19.1
+
diff --git a/features/rt/panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch b/features/rt/panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch
new file mode 100644
index 00000000..b8f71482
--- /dev/null
+++ b/features/rt/panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch
@@ -0,0 +1,33 @@
+From 487bb91d3835707e8324dfddf3a2a524072e62ef Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 14 Jul 2015 14:26:34 +0200
+Subject: [PATCH 147/191] panic: skip get_random_bytes for RT_FULL in
+ init_oops_id
+
+Disable on -RT. If this is invoked from irq-context we will have problems
+to acquire the sleeping lock.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/panic.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 678529917d5d..14017d1b77b0 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -544,9 +544,11 @@ static u64 oops_id;
+
+ static int init_oops_id(void)
+ {
++#ifndef CONFIG_PREEMPT_RT
+ if (!oops_id)
+ get_random_bytes(&oops_id, sizeof(oops_id));
+ else
++#endif
+ oops_id++;
+
+ return 0;
+--
+2.19.1
+
diff --git a/features/rt/pid.h-include-atomic.h.patch b/features/rt/pid.h-include-atomic.h.patch
new file mode 100644
index 00000000..abe97a5c
--- /dev/null
+++ b/features/rt/pid.h-include-atomic.h.patch
@@ -0,0 +1,42 @@
+From e1d5c19b4e1fffa27d74e71fc8e15d416b2e15ea Mon Sep 17 00:00:00 2001
+From: Grygorii Strashko <Grygorii.Strashko@linaro.org>
+Date: Tue, 21 Jul 2015 19:43:56 +0300
+Subject: [PATCH 127/191] pid.h: include atomic.h
+
+This patch fixes build error:
+ CC kernel/pid_namespace.o
+In file included from kernel/pid_namespace.c:11:0:
+include/linux/pid.h: In function 'get_pid':
+include/linux/pid.h:78:3: error: implicit declaration of function 'atomic_inc' [-Werror=implicit-function-declaration]
+ atomic_inc(&pid->count);
+ ^
+which happens when
+ CONFIG_PROVE_LOCKING=n
+ CONFIG_DEBUG_SPINLOCK=n
+ CONFIG_DEBUG_MUTEXES=n
+ CONFIG_DEBUG_LOCK_ALLOC=n
+ CONFIG_PID_NS=y
+
+Vanilla gets this via spinlock.h.
+
+Signed-off-by: Grygorii Strashko <Grygorii.Strashko@linaro.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/pid.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/linux/pid.h b/include/linux/pid.h
+index fa10acb8d6a4..2f86f84e9fc1 100644
+--- a/include/linux/pid.h
++++ b/include/linux/pid.h
+@@ -3,6 +3,7 @@
+ #define _LINUX_PID_H
+
+ #include <linux/rculist.h>
++#include <linux/atomic.h>
+ #include <linux/wait.h>
+ #include <linux/refcount.h>
+
+--
+2.19.1
+
diff --git a/features/rt/powerpc-Add-support-for-lazy-preemption.patch b/features/rt/powerpc-Add-support-for-lazy-preemption.patch
new file mode 100644
index 00000000..4cc73824
--- /dev/null
+++ b/features/rt/powerpc-Add-support-for-lazy-preemption.patch
@@ -0,0 +1,215 @@
+From 2525cb41cb38be3c1639aed5deb00823f5a472f4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 1 Nov 2012 10:14:11 +0100
+Subject: [PATCH 167/191] powerpc: Add support for lazy preemption
+
+Implement the powerpc pieces for lazy preempt.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/powerpc/Kconfig | 1 +
+ arch/powerpc/include/asm/thread_info.h | 7 +++++++
+ arch/powerpc/kernel/asm-offsets.c | 1 +
+ arch/powerpc/kernel/entry_32.S | 11 +++++++++--
+ arch/powerpc/kernel/exceptions-64e.S | 16 ++++++++++++----
+ arch/powerpc/kernel/interrupt.c | 10 +++++++---
+ 6 files changed, 37 insertions(+), 9 deletions(-)
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 386ae12d8523..bbee9b2f2bc7 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -231,6 +231,7 @@ config PPC
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select MMU_GATHER_RCU_TABLE_FREE
+ select MMU_GATHER_PAGE_SIZE
+ select HAVE_REGS_AND_STACK_ACCESS_API
+diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
+index 386d576673a1..730ce15944ce 100644
+--- a/arch/powerpc/include/asm/thread_info.h
++++ b/arch/powerpc/include/asm/thread_info.h
+@@ -48,6 +48,8 @@
+ struct thread_info {
+ int preempt_count; /* 0 => preemptable,
+ <0 => BUG */
++ int preempt_lazy_count; /* 0 => preemptable,
++ <0 => BUG */
+ unsigned long local_flags; /* private flags for thread */
+ #ifdef CONFIG_LIVEPATCH
+ unsigned long *livepatch_sp;
+@@ -94,6 +96,7 @@ void arch_setup_new_exec(void);
+ #define TIF_PATCH_PENDING 6 /* pending live patching update */
+ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
+ #define TIF_SINGLESTEP 8 /* singlestepping active */
++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */
+ #define TIF_SECCOMP 10 /* secure computing */
+ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
+ #define TIF_NOERROR 12 /* Force successful syscall return */
+@@ -109,6 +112,7 @@ void arch_setup_new_exec(void);
+ #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
+ #define TIF_32BIT 20 /* 32 bit binary */
+
++
+ /* as above, but as bit values */
+ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+ #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+@@ -120,6 +124,7 @@ void arch_setup_new_exec(void);
+ #define _TIF_PATCH_PENDING (1<<TIF_PATCH_PENDING)
+ #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+ #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
++#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY)
+ #define _TIF_SECCOMP (1<<TIF_SECCOMP)
+ #define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
+ #define _TIF_NOERROR (1<<TIF_NOERROR)
+@@ -133,10 +138,12 @@ void arch_setup_new_exec(void);
+ _TIF_SYSCALL_EMU)
+
+ #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
++ _TIF_NEED_RESCHED_LAZY | \
+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+ _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
+ _TIF_NOTIFY_SIGNAL)
+ #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
+ /* Bits in local_flags */
+ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index f3a662201a9f..1202c9c2e5b5 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -191,6 +191,7 @@ int main(void)
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
+ OFFSET(TI_PREEMPT, thread_info, preempt_count);
++ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
+
+ #ifdef CONFIG_PPC64
+ OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 78c430b7f9d9..b778938b4a5b 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -674,7 +674,14 @@ resume_kernel:
+ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
+ bne restore_kuap
+ andi. r8,r8,_TIF_NEED_RESCHED
++ bne+ 1f
++ lwz r0,TI_PREEMPT_LAZY(r2)
++ cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
++ bne restore_kuap
++ lwz r0,TI_FLAGS(r2)
++ andi. r0,r0,_TIF_NEED_RESCHED_LAZY
+ beq+ restore_kuap
++1:
+ lwz r3,_MSR(r1)
+ andi. r0,r3,MSR_EE /* interrupts off? */
+ beq restore_kuap /* don't schedule if so */
+@@ -989,7 +996,7 @@ global_dbcr0:
+ #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
+
+ do_work: /* r10 contains MSR_KERNEL here */
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ beq do_user_signal
+
+ do_resched: /* r10 contains MSR_KERNEL here */
+@@ -1008,7 +1015,7 @@ recheck:
+ LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
+ mtmsr r10 /* disable interrupts */
+ lwz r9,TI_FLAGS(r2)
+- andi. r0,r9,_TIF_NEED_RESCHED
++ andi. r0,r9,_TIF_NEED_RESCHED_MASK
+ bne- do_resched
+ andi. r0,r9,_TIF_USER_WORK_MASK
+ beq restore_user
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index e8eb9992a270..6eb9599a3262 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -1074,7 +1074,7 @@ _GLOBAL(ret_from_except_lite)
+ li r10, -1
+ mtspr SPRN_DBSR,r10
+ b restore
+-1: andi. r0,r4,_TIF_NEED_RESCHED
++1: andi. r0,r4,_TIF_NEED_RESCHED_MASK
+ beq 2f
+ bl restore_interrupts
+ SCHEDULE_USER
+@@ -1126,12 +1126,20 @@ resume_kernel:
+ bne- 0b
+ 1:
+
+-#ifdef CONFIG_PREEMPT
++#ifdef CONFIG_PREEMPTION
+ /* Check if we need to preempt */
++ lwz r8,TI_PREEMPT(r9)
++ cmpwi 0,r8,0 /* if non-zero, just restore regs and return */
++ bne restore
+ andi. r0,r4,_TIF_NEED_RESCHED
++ bne+ check_count
++
++ andi. r0,r4,_TIF_NEED_RESCHED_LAZY
+ beq+ restore
++ lwz r8,TI_PREEMPT_LAZY(r9)
++
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+- lwz r8,TI_PREEMPT(r9)
++check_count:
+ cmpwi cr0,r8,0
+ bne restore
+ ld r0,SOFTE(r1)
+@@ -1152,7 +1160,7 @@ resume_kernel:
+ * interrupted after loading SRR0/1.
+ */
+ wrteei 0
+-#endif /* CONFIG_PREEMPT */
++#endif /* CONFIG_PREEMPTION */
+
+ restore:
+ /*
+diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
+index c475a229a42a..d6b69de1284c 100644
+--- a/arch/powerpc/kernel/interrupt.c
++++ b/arch/powerpc/kernel/interrupt.c
+@@ -286,7 +286,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
+ ti_flags = READ_ONCE(current_thread_info()->flags);
+ while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
+ local_irq_enable();
+- if (ti_flags & _TIF_NEED_RESCHED) {
++ if (ti_flags & _TIF_NEED_RESCHED_MASK) {
+ schedule();
+ } else {
+ /*
+@@ -381,7 +381,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
+ ti_flags = READ_ONCE(current_thread_info()->flags);
+ while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
+ local_irq_enable(); /* returning to user: may enable */
+- if (ti_flags & _TIF_NEED_RESCHED) {
++ if (ti_flags & _TIF_NEED_RESCHED_MASK) {
+ schedule();
+ } else {
+ if (ti_flags & _TIF_SIGPENDING)
+@@ -473,11 +473,15 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
+ /* Returning to a kernel context with local irqs enabled. */
+ WARN_ON_ONCE(!(regs->msr & MSR_EE));
+ again:
+- if (IS_ENABLED(CONFIG_PREEMPT)) {
++ if (IS_ENABLED(CONFIG_PREEMPTION)) {
+ /* Return to preemptible kernel context */
+ if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED)) {
+ if (preempt_count() == 0)
+ preempt_schedule_irq();
++ } else if (unlikely(current_thread_info()->flags & _TIF_NEED_RESCHED_LAZY)) {
++ if ((preempt_count() == 0) &&
++ (current_thread_info()->preempt_lazy_count == 0))
++ preempt_schedule_irq();
+ }
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/powerpc-Avoid-recursive-header-includes.patch b/features/rt/powerpc-Avoid-recursive-header-includes.patch
new file mode 100644
index 00000000..cfecd330
--- /dev/null
+++ b/features/rt/powerpc-Avoid-recursive-header-includes.patch
@@ -0,0 +1,47 @@
+From b8c2c7090ec3f3373205ff4b223ac447159c4c96 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 8 Jan 2021 19:48:21 +0100
+Subject: [PATCH 184/191] powerpc: Avoid recursive header includes
+
+- The include of bug.h leads to an include of printk.h which gets back
+ to spinlock.h and complains then about missing xchg().
+ Remove bug.h and add bits.h which is needed for BITS_PER_BYTE.
+
+- Avoid the "please don't include this file directly" error from
+ rwlock-rt. Allow an include from/with rtmutex.h.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/include/asm/cmpxchg.h | 2 +-
+ arch/powerpc/include/asm/simple_spinlock_types.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
+index cf091c4c22e5..7371f7e23c35 100644
+--- a/arch/powerpc/include/asm/cmpxchg.h
++++ b/arch/powerpc/include/asm/cmpxchg.h
+@@ -5,7 +5,7 @@
+ #ifdef __KERNEL__
+ #include <linux/compiler.h>
+ #include <asm/synch.h>
+-#include <linux/bug.h>
++#include <linux/bits.h>
+
+ #ifdef __BIG_ENDIAN
+ #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
+diff --git a/arch/powerpc/include/asm/simple_spinlock_types.h b/arch/powerpc/include/asm/simple_spinlock_types.h
+index 0f3cdd8faa95..d45561e9e6ba 100644
+--- a/arch/powerpc/include/asm/simple_spinlock_types.h
++++ b/arch/powerpc/include/asm/simple_spinlock_types.h
+@@ -2,7 +2,7 @@
+ #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
+ #define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
++#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__LINUX_RT_MUTEX_H)
+ # error "please don't include this file directly"
+ #endif
+
+--
+2.19.1
+
diff --git a/features/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/features/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
new file mode 100644
index 00000000..bc6061ec
--- /dev/null
+++ b/features/rt/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
@@ -0,0 +1,44 @@
+From 4496674f6fc7117a30d0c2ea5c994c9675808e69 Mon Sep 17 00:00:00 2001
+From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Date: Fri, 24 Apr 2015 15:53:13 +0000
+Subject: [PATCH 182/191] powerpc/kvm: Disable in-kernel MPIC emulation for
+ PREEMPT_RT
+
+While converting the openpic emulation code to use a raw_spinlock_t enables
+guests to run on RT, there's still a performance issue. For interrupts sent in
+directed delivery mode with a multiple CPU mask, the emulated openpic will loop
+through all of the VCPUs, and for each VCPUs, it call IRQ_check, which will loop
+through all the pending interrupts for that VCPU. This is done while holding the
+raw_lock, meaning that in all this time the interrupts and preemption are
+disabled on the host Linux. A malicious user app can max both these number and
+cause a DoS.
+
+This temporary fix is sent for two reasons. First is so that users who want to
+use the in-kernel MPIC emulation are aware of the potential latencies, thus
+making sure that the hardware MPIC and their usage scenario does not involve
+interrupts sent in directed delivery mode, and the number of possible pending
+interrupts is kept small. Secondly, this should incentivize the development of a
+proper openpic emulation that would be better suited for RT.
+
+Acked-by: Scott Wood <scottwood@freescale.com>
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/kvm/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
+index e45644657d49..b826174ce983 100644
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -179,6 +179,7 @@ config KVM_E500MC
+ config KVM_MPIC
+ bool "KVM in-kernel MPIC emulation"
+ depends on KVM && E500
++ depends on !PREEMPT_RT
+ select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_IRQFD
+ select HAVE_KVM_IRQ_ROUTING
+--
+2.19.1
+
diff --git a/features/rt/powerpc-mm-Move-the-linear_mapping_mutex-to-the-ifde.patch b/features/rt/powerpc-mm-Move-the-linear_mapping_mutex-to-the-ifde.patch
new file mode 100644
index 00000000..9f28057d
--- /dev/null
+++ b/features/rt/powerpc-mm-Move-the-linear_mapping_mutex-to-the-ifde.patch
@@ -0,0 +1,43 @@
+From 36035164cb4bbf6cc044ff5d6f481c528ec34fb3 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 19 Feb 2021 17:51:07 +0100
+Subject: [PATCH 006/191] powerpc/mm: Move the linear_mapping_mutex to the
+ ifdef where it is used
+
+The mutex linear_mapping_mutex is defined at the of the file while its
+only two user are within the CONFIG_MEMORY_HOTPLUG block.
+A compile without CONFIG_MEMORY_HOTPLUG set fails on PREEMPT_RT because
+its mutex implementation is smart enough to realize that it is unused.
+
+Move the definition of linear_mapping_mutex to ifdef block where it is
+used.
+
+Fixes: 1f73ad3e8d755 ("powerpc/mm: print warning in arch_remove_linear_mapping()")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/mm/mem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 4e8ce6d85232..7a59a5c9aa5d 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -54,7 +54,6 @@
+
+ #include <mm/mmu_decl.h>
+
+-static DEFINE_MUTEX(linear_mapping_mutex);
+ unsigned long long memory_limit;
+ bool init_mem_is_free;
+
+@@ -72,6 +71,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ EXPORT_SYMBOL(phys_mem_access_prot);
+
+ #ifdef CONFIG_MEMORY_HOTPLUG
++static DEFINE_MUTEX(linear_mapping_mutex);
+
+ #ifdef CONFIG_NUMA
+ int memory_add_physaddr_to_nid(u64 start)
+--
+2.19.1
+
diff --git a/features/rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch b/features/rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch
new file mode 100644
index 00000000..d3850690
--- /dev/null
+++ b/features/rt/powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch
@@ -0,0 +1,116 @@
+From 4478a8edce91a9ecc083e0a1c892bc61e03debb5 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 26 Mar 2019 18:31:54 +0100
+Subject: [PATCH 181/191] powerpc/pseries/iommu: Use a locallock instead
+ local_irq_save()
+
+The locallock protects the per-CPU variable tce_page. The function
+attempts to allocate memory while tce_page is protected (by disabling
+interrupts).
+
+Use local_irq_save() instead of local_irq_disable().
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/platforms/pseries/iommu.c | 31 +++++++++++++++++---------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
+
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 9fc5217f0c8e..4fdb9370b913 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -24,6 +24,7 @@
+ #include <linux/of.h>
+ #include <linux/iommu.h>
+ #include <linux/rculist.h>
++#include <linux/local_lock.h>
+ #include <asm/io.h>
+ #include <asm/prom.h>
+ #include <asm/rtas.h>
+@@ -190,7 +191,13 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
+ return ret;
+ }
+
+-static DEFINE_PER_CPU(__be64 *, tce_page);
++struct tce_page {
++ __be64 * page;
++ local_lock_t lock;
++};
++static DEFINE_PER_CPU(struct tce_page, tce_page) = {
++ .lock = INIT_LOCAL_LOCK(lock),
++};
+
+ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ long npages, unsigned long uaddr,
+@@ -212,9 +219,10 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ direction, attrs);
+ }
+
+- local_irq_save(flags); /* to protect tcep and the page behind it */
++ /* to protect tcep and the page behind it */
++ local_lock_irqsave(&tce_page.lock, flags);
+
+- tcep = __this_cpu_read(tce_page);
++ tcep = __this_cpu_read(tce_page.page);
+
+ /* This is safe to do since interrupts are off when we're called
+ * from iommu_alloc{,_sg}()
+@@ -223,12 +231,12 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
+ /* If allocation fails, fall back to the loop implementation */
+ if (!tcep) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&tce_page.lock, flags);
+ return tce_build_pSeriesLP(tbl->it_index, tcenum,
+ tbl->it_page_shift,
+ npages, uaddr, direction, attrs);
+ }
+- __this_cpu_write(tce_page, tcep);
++ __this_cpu_write(tce_page.page, tcep);
+ }
+
+ rpn = __pa(uaddr) >> TCE_SHIFT;
+@@ -258,7 +266,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+ tcenum += limit;
+ } while (npages > 0 && !rc);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&tce_page.lock, flags);
+
+ if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
+ ret = (int)rc;
+@@ -429,16 +437,17 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
+ DMA_BIDIRECTIONAL, 0);
+ }
+
+- local_irq_disable(); /* to protect tcep and the page behind it */
+- tcep = __this_cpu_read(tce_page);
++ /* to protect tcep and the page behind it */
++ local_lock_irq(&tce_page.lock);
++ tcep = __this_cpu_read(tce_page.page);
+
+ if (!tcep) {
+ tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
+ if (!tcep) {
+- local_irq_enable();
++ local_unlock_irq(&tce_page.lock);
+ return -ENOMEM;
+ }
+- __this_cpu_write(tce_page, tcep);
++ __this_cpu_write(tce_page.page, tcep);
+ }
+
+ proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
+@@ -481,7 +490,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
+
+ /* error cleanup: caller will clear whole range */
+
+- local_irq_enable();
++ local_unlock_irq(&tce_page.lock);
+ return rc;
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/powerpc-stackprotector-work-around-stack-guard-init-.patch b/features/rt/powerpc-stackprotector-work-around-stack-guard-init-.patch
new file mode 100644
index 00000000..4d3d3734
--- /dev/null
+++ b/features/rt/powerpc-stackprotector-work-around-stack-guard-init-.patch
@@ -0,0 +1,35 @@
+From 77000da2f2cf79d495a6d07abfbe32ebf4cd279f Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 26 Mar 2019 18:31:29 +0100
+Subject: [PATCH 183/191] powerpc/stackprotector: work around stack-guard init
+ from atomic
+
+This is invoked from the secondary CPU in atomic context. On x86 we use
+tsc instead. On Power we XOR it against mftb() so lets use stack address
+as the initial value.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/include/asm/stackprotector.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
+index 1c8460e23583..b1653c160bab 100644
+--- a/arch/powerpc/include/asm/stackprotector.h
++++ b/arch/powerpc/include/asm/stackprotector.h
+@@ -24,7 +24,11 @@ static __always_inline void boot_init_stack_canary(void)
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
++#ifdef CONFIG_PREEMPT_RT
++ canary = (unsigned long)&canary;
++#else
+ canary = get_random_canary();
++#endif
+ canary ^= mftb();
+ canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
+--
+2.19.1
+
diff --git a/features/rt/powerpc-traps-Use-PREEMPT_RT.patch b/features/rt/powerpc-traps-Use-PREEMPT_RT.patch
new file mode 100644
index 00000000..63c650aa
--- /dev/null
+++ b/features/rt/powerpc-traps-Use-PREEMPT_RT.patch
@@ -0,0 +1,38 @@
+From 3411ea60697a880d9c13ccedd06aa313a384b569 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 26 Jul 2019 11:30:49 +0200
+Subject: [PATCH 180/191] powerpc: traps: Use PREEMPT_RT
+
+Add PREEMPT_RT to the backtrace if enabled.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/kernel/traps.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index 5828c83eaca6..6d0f70abed87 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -260,12 +260,17 @@ static char *get_mmu_str(void)
+
+ static int __die(const char *str, struct pt_regs *regs, long err)
+ {
++ const char *pr = "";
++
+ printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
+
++ if (IS_ENABLED(CONFIG_PREEMPTION))
++ pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
++
+ printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
+ IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
+ PAGE_SIZE / 1024, get_mmu_str(),
+- IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
++ pr,
+ IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
+ IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
+ debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
+--
+2.19.1
+
diff --git a/features/rt/preempt-Provide-preempt_-_-no-rt-variants.patch b/features/rt/preempt-Provide-preempt_-_-no-rt-variants.patch
new file mode 100644
index 00000000..c186b8ae
--- /dev/null
+++ b/features/rt/preempt-Provide-preempt_-_-no-rt-variants.patch
@@ -0,0 +1,52 @@
+From 6151dd51e19fcd371b8b3690f38f5065c8ce383b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 24 Jul 2009 12:38:56 +0200
+Subject: [PATCH 084/191] preempt: Provide preempt_*_(no)rt variants
+
+RT needs a few preempt_disable/enable points which are not necessary
+otherwise. Implement variants to avoid #ifdeffery.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/preempt.h | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 4d244e295e85..5ceac863e729 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -188,7 +188,11 @@ do { \
+ preempt_count_dec(); \
+ } while (0)
+
+-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++#ifdef CONFIG_PREEMPT_RT
++# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++#else
++# define preempt_enable_no_resched() preempt_enable()
++#endif
+
+ #define preemptible() (preempt_count() == 0 && !irqs_disabled())
+
+@@ -282,6 +286,18 @@ do { \
+ set_preempt_need_resched(); \
+ } while (0)
+
++#ifdef CONFIG_PREEMPT_RT
++# define preempt_disable_rt() preempt_disable()
++# define preempt_enable_rt() preempt_enable()
++# define preempt_disable_nort() barrier()
++# define preempt_enable_nort() barrier()
++#else
++# define preempt_disable_rt() barrier()
++# define preempt_enable_rt() barrier()
++# define preempt_disable_nort() preempt_disable()
++# define preempt_enable_nort() preempt_enable()
++#endif
++
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+
+ struct preempt_notifier;
+--
+2.19.1
+
diff --git a/features/rt/printk-add-console-handover.patch b/features/rt/printk-add-console-handover.patch
new file mode 100644
index 00000000..bb55e3b1
--- /dev/null
+++ b/features/rt/printk-add-console-handover.patch
@@ -0,0 +1,75 @@
+From a87105bd119478687e8f73298fd74f6594f922ad Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:09 +0106
+Subject: [PATCH 032/191] printk: add console handover
+
+If earlyprintk is used, a boot console will print directly to the
+console immediately. The boot console will unregister itself as soon
+as a non-boot console registers. However, the non-boot console does
+not begin printing until its kthread has started. Since this happens
+much later, there is a long pause in the console output. If the
+ringbuffer is small, messages could even be dropped during the
+pause.
+
+Add a new CON_HANDOVER console flag to be used internally by printk
+in order to track which non-boot console took over from a boot
+console. If handover consoles have implemented write_atomic(), they
+are allowed to print directly to the console until their kthread can
+take over.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 1 +
+ kernel/printk/printk.c | 8 +++++++-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/console.h b/include/linux/console.h
+index b370e37a6d49..69bfff368294 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -137,6 +137,7 @@ static inline int con_debug_leave(void)
+ #define CON_ANYTIME (16) /* Safe to call when cpu is offline */
+ #define CON_BRL (32) /* Used for a braille device */
+ #define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
++#define CON_HANDOVER (128) /* Device was previously a boot console. */
+
+ struct console {
+ char name[16];
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index d1b25ac5c73b..ee34245bf08a 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1726,6 +1726,8 @@ static bool console_can_sync(struct console *con)
+ return false;
+ if (con->write_atomic && kernel_sync_mode())
+ return true;
++ if (con->write_atomic && (con->flags & CON_HANDOVER) && !con->thread)
++ return true;
+ if (con->write && (con->flags & CON_BOOT) && !con->thread)
+ return true;
+ return false;
+@@ -1737,6 +1739,8 @@ static bool call_sync_console_driver(struct console *con, const char *text, size
+ return false;
+ if (con->write_atomic && kernel_sync_mode())
+ con->write_atomic(con, text, text_len);
++ else if (con->write_atomic && (con->flags & CON_HANDOVER) && !con->thread)
++ con->write_atomic(con, text, text_len);
+ else if (con->write && (con->flags & CON_BOOT) && !con->thread)
+ con->write(con, text, text_len);
+ else
+@@ -2829,8 +2833,10 @@ void register_console(struct console *newcon)
+ * the real console are the same physical device, it's annoying to
+ * see the beginning boot messages twice
+ */
+- if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
++ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
+ newcon->flags &= ~CON_PRINTBUFFER;
++ newcon->flags |= CON_HANDOVER;
++ }
+
+ /*
+ * Put this console in the list - keep the
+--
+2.19.1
+
diff --git a/features/rt/printk-add-pr_flush.patch b/features/rt/printk-add-pr_flush.patch
new file mode 100644
index 00000000..727a046d
--- /dev/null
+++ b/features/rt/printk-add-pr_flush.patch
@@ -0,0 +1,210 @@
+From 36974665afc2e7e5cfc1e1d802fbb1d9eb17a510 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:10 +0106
+Subject: [PATCH 033/191] printk: add pr_flush()
+
+Provide a function to allow waiting for console printers to catch
+up to the latest logged message.
+
+Use pr_flush() to give console printers a chance to finish in
+critical situations if no atomic console is available. For now
+pr_flush() is only used in the most common error paths:
+panic(), print_oops_end_marker(), report_bug(), kmsg_dump().
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/printk.h | 2 ++
+ kernel/panic.c | 28 +++++++++------
+ kernel/printk/printk.c | 79 ++++++++++++++++++++++++++++++++++++++++++
+ lib/bug.c | 1 +
+ 4 files changed, 99 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 153212445b68..7e4352467d83 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -481,6 +481,8 @@ extern int kptr_restrict;
+ no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+ #endif
+
++bool pr_flush(int timeout_ms, bool reset_on_progress);
++
+ /*
+ * ratelimited messages with local ratelimit_state,
+ * no local ratelimit_state used in the !PRINTK case
+diff --git a/kernel/panic.c b/kernel/panic.c
+index c722faaae44b..678529917d5d 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -177,12 +177,28 @@ static void panic_print_sys_info(void)
+ void panic(const char *fmt, ...)
+ {
+ static char buf[1024];
++ va_list args2;
+ va_list args;
+ long i, i_next = 0, len;
+ int state = 0;
+ int old_cpu, this_cpu;
+ bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
+
++ console_verbose();
++ pr_emerg("Kernel panic - not syncing:\n");
++ va_start(args2, fmt);
++ va_copy(args, args2);
++ vprintk(fmt, args2);
++ va_end(args2);
++#ifdef CONFIG_DEBUG_BUGVERBOSE
++ /*
++ * Avoid nested stack-dumping if a panic occurs during oops processing
++ */
++ if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
++ dump_stack();
++#endif
++ pr_flush(1000, true);
++
+ /*
+ * Disable local interrupts. This will prevent panic_smp_self_stop
+ * from deadlocking the first cpu that invokes the panic, since
+@@ -213,24 +229,13 @@ void panic(const char *fmt, ...)
+ if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
+ panic_smp_self_stop();
+
+- console_verbose();
+ bust_spinlocks(1);
+- va_start(args, fmt);
+ len = vscnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ if (len && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+- pr_emerg("Kernel panic - not syncing: %s\n", buf);
+-#ifdef CONFIG_DEBUG_BUGVERBOSE
+- /*
+- * Avoid nested stack-dumping if a panic occurs during oops processing
+- */
+- if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
+- dump_stack();
+-#endif
+-
+ /*
+ * If kgdb is enabled, give it a chance to run before we stop all
+ * the other CPUs or else we won't be able to debug processes left
+@@ -552,6 +557,7 @@ static void print_oops_end_marker(void)
+ {
+ init_oops_id();
+ pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
++ pr_flush(1000, true);
+ }
+
+ /*
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index ee34245bf08a..a5fc854977bb 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3228,6 +3228,12 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+ sync_mode = true;
+ pr_info("enabled sync mode\n");
+ }
++
++ /*
++ * Give the printing threads time to flush, allowing up to
++ * 1s of no printing forward progress before giving up.
++ */
++ pr_flush(1000, true);
+ }
+
+ rcu_read_lock();
+@@ -3507,3 +3513,76 @@ void console_atomic_unlock(unsigned int flags)
+ prb_unlock(&printk_cpulock, flags);
+ }
+ EXPORT_SYMBOL(console_atomic_unlock);
++
++static void pr_msleep(bool may_sleep, int ms)
++{
++ if (may_sleep) {
++ msleep(ms);
++ } else {
++ while (ms--)
++ udelay(1000);
++ }
++}
++
++/**
++ * pr_flush() - Wait for printing threads to catch up.
++ *
++ * @timeout_ms: The maximum time (in ms) to wait.
++ * @reset_on_progress: Reset the timeout if forward progress is seen.
++ *
++ * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
++ * represents infinite waiting.
++ *
++ * If @reset_on_progress is true, the timeout will be reset whenever any
++ * printer has been seen to make some forward progress.
++ *
++ * Context: Any context.
++ * Return: true if all enabled printers are caught up.
++ */
++bool pr_flush(int timeout_ms, bool reset_on_progress)
++{
++ int remaining = timeout_ms;
++ struct console *con;
++ u64 last_diff = 0;
++ bool may_sleep;
++ u64 printk_seq;
++ u64 diff;
++ u64 seq;
++
++ may_sleep = (preemptible() && !in_softirq());
++
++ seq = prb_next_seq(prb);
++
++ for (;;) {
++ diff = 0;
++
++ for_each_console(con) {
++ if (!(con->flags & CON_ENABLED))
++ continue;
++ printk_seq = atomic64_read(&con->printk_seq);
++ if (printk_seq < seq)
++ diff += seq - printk_seq;
++ }
++
++ if (diff != last_diff && reset_on_progress)
++ remaining = timeout_ms;
++
++ if (!diff || remaining == 0)
++ break;
++
++ if (remaining < 0) {
++ pr_msleep(may_sleep, 100);
++ } else if (remaining < 100) {
++ pr_msleep(may_sleep, remaining);
++ remaining = 0;
++ } else {
++ pr_msleep(may_sleep, 100);
++ remaining -= 100;
++ }
++
++ last_diff = diff;
++ }
++
++ return (diff == 0);
++}
++EXPORT_SYMBOL(pr_flush);
+diff --git a/lib/bug.c b/lib/bug.c
+index 8f9d537bfb2a..8696908372d2 100644
+--- a/lib/bug.c
++++ b/lib/bug.c
+@@ -202,6 +202,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
+ else
+ pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
+ (void *)bugaddr);
++ pr_flush(1000, true);
+
+ return BUG_TRAP_TYPE_BUG;
+ }
+--
+2.19.1
+
diff --git a/features/rt/printk-add-syslog_lock.patch b/features/rt/printk-add-syslog_lock.patch
new file mode 100644
index 00000000..741d96e2
--- /dev/null
+++ b/features/rt/printk-add-syslog_lock.patch
@@ -0,0 +1,158 @@
+From 2b8baa6554441c2caba94ff88245cf653796b8cf Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 10 Dec 2020 16:58:02 +0106
+Subject: [PATCH 014/191] printk: add syslog_lock
+
+The global variables @syslog_seq, @syslog_partial, @syslog_time
+and write access to @clear_seq are protected by @logbuf_lock.
+Once @logbuf_lock is removed, these variables will need their
+own synchronization method. Introduce @syslog_lock for this
+purpose.
+
+@syslog_lock is a raw_spin_lock for now. This simplifies the
+transition to removing @logbuf_lock. Once @logbuf_lock and the
+safe buffers are removed, @syslog_lock can change to spin_lock.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 41 +++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 37 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 0031bb2156d1..713d09843d23 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -390,8 +390,12 @@ DEFINE_RAW_SPINLOCK(logbuf_lock);
+ printk_safe_exit_irqrestore(flags); \
+ } while (0)
+
++/* syslog_lock protects syslog_* variables and write access to clear_seq. */
++static DEFINE_RAW_SPINLOCK(syslog_lock);
++
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
++/* All 3 protected by @syslog_lock. */
+ /* the next printk record to read by syslog(READ) or /proc/kmsg */
+ static u64 syslog_seq;
+ static size_t syslog_partial;
+@@ -410,7 +414,7 @@ struct latched_seq {
+ /*
+ * The next printk record to read after the last 'clear' command. There are
+ * two copies (updated with seqcount_latch) so that reads can locklessly
+- * access a valid value. Writers are synchronized by @logbuf_lock.
++ * access a valid value. Writers are synchronized by @syslog_lock.
+ */
+ static struct latched_seq clear_seq = {
+ .latch = SEQCNT_LATCH_ZERO(clear_seq.latch),
+@@ -470,7 +474,7 @@ bool printk_percpu_data_ready(void)
+ return __printk_percpu_data_ready;
+ }
+
+-/* Must be called under logbuf_lock. */
++/* Must be called under syslog_lock. */
+ static void latched_seq_write(struct latched_seq *ls, u64 val)
+ {
+ raw_write_seqcount_latch(&ls->latch);
+@@ -1529,7 +1533,9 @@ static int syslog_print(char __user *buf, int size)
+ size_t skip;
+
+ logbuf_lock_irq();
++ raw_spin_lock(&syslog_lock);
+ if (!prb_read_valid(prb, syslog_seq, &r)) {
++ raw_spin_unlock(&syslog_lock);
+ logbuf_unlock_irq();
+ break;
+ }
+@@ -1559,6 +1565,7 @@ static int syslog_print(char __user *buf, int size)
+ syslog_partial += n;
+ } else
+ n = 0;
++ raw_spin_unlock(&syslog_lock);
+ logbuf_unlock_irq();
+
+ if (!n)
+@@ -1625,8 +1632,11 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ break;
+ }
+
+- if (clear)
++ if (clear) {
++ raw_spin_lock(&syslog_lock);
+ latched_seq_write(&clear_seq, seq);
++ raw_spin_unlock(&syslog_lock);
++ }
+ logbuf_unlock_irq();
+
+ kfree(text);
+@@ -1636,10 +1646,24 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ static void syslog_clear(void)
+ {
+ logbuf_lock_irq();
++ raw_spin_lock(&syslog_lock);
+ latched_seq_write(&clear_seq, prb_next_seq(prb));
++ raw_spin_unlock(&syslog_lock);
+ logbuf_unlock_irq();
+ }
+
++/* Return a consistent copy of @syslog_seq. */
++static u64 read_syslog_seq_irq(void)
++{
++ u64 seq;
++
++ raw_spin_lock_irq(&syslog_lock);
++ seq = syslog_seq;
++ raw_spin_unlock_irq(&syslog_lock);
++
++ return seq;
++}
++
+ int do_syslog(int type, char __user *buf, int len, int source)
+ {
+ struct printk_info info;
+@@ -1663,8 +1687,9 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ return 0;
+ if (!access_ok(buf, len))
+ return -EFAULT;
++
+ error = wait_event_interruptible(log_wait,
+- prb_read_valid(prb, syslog_seq, NULL));
++ prb_read_valid(prb, read_syslog_seq_irq(), NULL));
+ if (error)
+ return error;
+ error = syslog_print(buf, len);
+@@ -1713,8 +1738,10 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ /* Number of chars in the log buffer */
+ case SYSLOG_ACTION_SIZE_UNREAD:
+ logbuf_lock_irq();
++ raw_spin_lock(&syslog_lock);
+ if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
+ /* No unread messages. */
++ raw_spin_unlock(&syslog_lock);
+ logbuf_unlock_irq();
+ return 0;
+ }
+@@ -1743,6 +1770,7 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ }
+ error -= syslog_partial;
+ }
++ raw_spin_unlock(&syslog_lock);
+ logbuf_unlock_irq();
+ break;
+ /* Size of the log buffer */
+@@ -2992,7 +3020,12 @@ void register_console(struct console *newcon)
+ */
+ exclusive_console = newcon;
+ exclusive_console_stop_seq = console_seq;
++
++ /* Get a consistent copy of @syslog_seq. */
++ raw_spin_lock(&syslog_lock);
+ console_seq = syslog_seq;
++ raw_spin_unlock(&syslog_lock);
++
+ logbuf_unlock_irqrestore(flags);
+ }
+ console_unlock();
+--
+2.19.1
+
diff --git a/features/rt/printk-change-console_seq-to-atomic64_t.patch b/features/rt/printk-change-console_seq-to-atomic64_t.patch
new file mode 100644
index 00000000..36f793a1
--- /dev/null
+++ b/features/rt/printk-change-console_seq-to-atomic64_t.patch
@@ -0,0 +1,131 @@
+From b90a5d78be04a9b45524b66bd55a22e56f247633 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:05 +0106
+Subject: [PATCH 028/191] printk: change @console_seq to atomic64_t
+
+In preparation for atomic printing, change @console_seq to atomic
+so that it can be accessed without requiring @console_sem.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 34 +++++++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 15 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 2b13deb971b9..28bd7a7807bc 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -366,12 +366,13 @@ static u64 syslog_seq;
+ static size_t syslog_partial;
+ static bool syslog_time;
+
+-/* All 3 protected by @console_sem. */
+-/* the next printk record to write to the console */
+-static u64 console_seq;
++/* Both protected by @console_sem. */
+ static u64 exclusive_console_stop_seq;
+ static unsigned long console_dropped;
+
++/* the next printk record to write to the console */
++static atomic64_t console_seq = ATOMIC64_INIT(0);
++
+ struct latched_seq {
+ seqcount_latch_t latch;
+ u64 val[2];
+@@ -2270,7 +2271,7 @@ EXPORT_SYMBOL(printk);
+ #define prb_first_valid_seq(rb) 0
+
+ static u64 syslog_seq;
+-static u64 console_seq;
++static atomic64_t console_seq = ATOMIC64_INIT(0);
+ static u64 exclusive_console_stop_seq;
+ static unsigned long console_dropped;
+
+@@ -2585,6 +2586,7 @@ void console_unlock(void)
+ bool do_cond_resched, retry;
+ struct printk_info info;
+ struct printk_record r;
++ u64 seq;
+
+ if (console_suspended) {
+ up_console_sem();
+@@ -2627,12 +2629,14 @@ void console_unlock(void)
+ size_t len;
+
+ skip:
+- if (!prb_read_valid(prb, console_seq, &r))
++ seq = atomic64_read(&console_seq);
++ if (!prb_read_valid(prb, seq, &r))
+ break;
+
+- if (console_seq != r.info->seq) {
+- console_dropped += r.info->seq - console_seq;
+- console_seq = r.info->seq;
++ if (seq != r.info->seq) {
++ console_dropped += r.info->seq - seq;
++ atomic64_set(&console_seq, r.info->seq);
++ seq = r.info->seq;
+ }
+
+ if (suppress_message_printing(r.info->level)) {
+@@ -2641,13 +2645,13 @@ void console_unlock(void)
+ * directly to the console when we received it, and
+ * record that has level above the console loglevel.
+ */
+- console_seq++;
++ atomic64_set(&console_seq, seq + 1);
+ goto skip;
+ }
+
+ /* Output to all consoles once old messages replayed. */
+ if (unlikely(exclusive_console &&
+- console_seq >= exclusive_console_stop_seq)) {
++ seq >= exclusive_console_stop_seq)) {
+ exclusive_console = NULL;
+ }
+
+@@ -2668,7 +2672,7 @@ void console_unlock(void)
+ len = record_print_text(&r,
+ console_msg_format & MSG_FORMAT_SYSLOG,
+ printk_time);
+- console_seq++;
++ atomic64_set(&console_seq, seq + 1);
+
+ /*
+ * While actively printing out messages, if another printk()
+@@ -2699,7 +2703,7 @@ void console_unlock(void)
+ * there's a new owner and the console_unlock() from them will do the
+ * flush, no worries.
+ */
+- retry = prb_read_valid(prb, console_seq, NULL);
++ retry = prb_read_valid(prb, atomic64_read(&console_seq), NULL);
+ if (retry && console_trylock())
+ goto again;
+ }
+@@ -2762,7 +2766,7 @@ void console_flush_on_panic(enum con_flush_mode mode)
+ console_may_schedule = 0;
+
+ if (mode == CONSOLE_REPLAY_ALL)
+- console_seq = prb_first_valid_seq(prb);
++ atomic64_set(&console_seq, prb_first_valid_seq(prb));
+ console_unlock();
+ }
+
+@@ -2999,11 +3003,11 @@ void register_console(struct console *newcon)
+ * ignores console_lock.
+ */
+ exclusive_console = newcon;
+- exclusive_console_stop_seq = console_seq;
++ exclusive_console_stop_seq = atomic64_read(&console_seq);
+
+ /* Get a consistent copy of @syslog_seq. */
+ spin_lock_irqsave(&syslog_lock, flags);
+- console_seq = syslog_seq;
++ atomic64_set(&console_seq, syslog_seq);
+ spin_unlock_irqrestore(&syslog_lock, flags);
+ }
+ console_unlock();
+--
+2.19.1
+
diff --git a/features/rt/printk-combine-boot_delay_msec-into-printk_delay.patch b/features/rt/printk-combine-boot_delay_msec-into-printk_delay.patch
new file mode 100644
index 00000000..727ce601
--- /dev/null
+++ b/features/rt/printk-combine-boot_delay_msec-into-printk_delay.patch
@@ -0,0 +1,43 @@
+From e26bd5bb048a90ae9dbb91191947709c0bfd7050 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:04 +0106
+Subject: [PATCH 027/191] printk: combine boot_delay_msec() into printk_delay()
+
+boot_delay_msec() is always called immediately before printk_delay()
+so just combine the two.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 84fae4f08634..2b13deb971b9 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1727,8 +1727,10 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
+
+ int printk_delay_msec __read_mostly;
+
+-static inline void printk_delay(void)
++static inline void printk_delay(int level)
+ {
++ boot_delay_msec(level);
++
+ if (unlikely(printk_delay_msec)) {
+ int m = printk_delay_msec;
+
+@@ -2186,8 +2188,7 @@ asmlinkage int vprintk_emit(int facility, int level,
+ in_sched = true;
+ }
+
+- boot_delay_msec(level);
+- printk_delay();
++ printk_delay(level);
+
+ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
+
+--
+2.19.1
+
diff --git a/features/rt/printk-console-remove-unnecessary-safe-buffer-usage.patch b/features/rt/printk-console-remove-unnecessary-safe-buffer-usage.patch
new file mode 100644
index 00000000..e6861260
--- /dev/null
+++ b/features/rt/printk-console-remove-unnecessary-safe-buffer-usage.patch
@@ -0,0 +1,47 @@
+From 158a782478bdf789c7a1797b742fdfb446e27dde Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 17 Feb 2021 18:28:05 +0100
+Subject: [PATCH 020/191] printk: console: remove unnecessary safe buffer usage
+
+Upon registering a console, safe buffers are activated when setting
+up the sequence number to replay the log. However, these are already
+protected by @console_sem and @syslog_lock. Remove the unnecessary
+safe buffer usage.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+---
+ kernel/printk/printk.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 15aed1a7bd05..523621889a72 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2967,9 +2967,7 @@ void register_console(struct console *newcon)
+ /*
+ * console_unlock(); will print out the buffered messages
+ * for us.
+- */
+- printk_safe_enter_irqsave(flags);
+- /*
++ *
+ * We're about to replay the log buffer. Only do this to the
+ * just-registered console to avoid excessive message spam to
+ * the already-registered consoles.
+@@ -2982,11 +2980,9 @@ void register_console(struct console *newcon)
+ exclusive_console_stop_seq = console_seq;
+
+ /* Get a consistent copy of @syslog_seq. */
+- raw_spin_lock(&syslog_lock);
++ raw_spin_lock_irqsave(&syslog_lock, flags);
+ console_seq = syslog_seq;
+- raw_spin_unlock(&syslog_lock);
+-
+- printk_safe_exit_irqrestore(flags);
++ raw_spin_unlock_irqrestore(&syslog_lock, flags);
+ }
+ console_unlock();
+ console_sysfs_notify();
+--
+2.19.1
+
diff --git a/features/rt/printk-consolidate-kmsg_dump_get_buffer-syslog_print.patch b/features/rt/printk-consolidate-kmsg_dump_get_buffer-syslog_print.patch
new file mode 100644
index 00000000..97ae80c8
--- /dev/null
+++ b/features/rt/printk-consolidate-kmsg_dump_get_buffer-syslog_print.patch
@@ -0,0 +1,146 @@
+From b29f148df0ca450ed3a53ed20ca0bdb921c54c9a Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 13 Jan 2021 11:29:53 +0106
+Subject: [PATCH 010/191] printk: consolidate
+ kmsg_dump_get_buffer/syslog_print_all code
+
+The logic for finding records to fit into a buffer is the same for
+kmsg_dump_get_buffer() and syslog_print_all(). Introduce a helper
+function find_first_fitting_seq() to handle this logic.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+---
+ kernel/printk/printk.c | 87 ++++++++++++++++++++++++------------------
+ 1 file changed, 50 insertions(+), 37 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 3f17ff13fd51..ff16a29aa620 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1421,6 +1421,50 @@ static size_t get_record_print_text_size(struct printk_info *info,
+ return ((prefix_len * line_count) + info->text_len + 1);
+ }
+
++/*
++ * Beginning with @start_seq, find the first record where it and all following
++ * records up to (but not including) @max_seq fit into @size.
++ *
++ * @max_seq is simply an upper bound and does not need to exist. If the caller
++ * does not require an upper bound, -1 can be used for @max_seq.
++ */
++static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
++ bool syslog, bool time)
++{
++ struct printk_info info;
++ unsigned int line_count;
++ size_t len = 0;
++ u64 seq;
++
++ /* Determine the size of the records up to @max_seq. */
++ prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
++ if (info.seq >= max_seq)
++ break;
++ len += get_record_print_text_size(&info, line_count, syslog, time);
++ }
++
++ /*
++ * Adjust the upper bound for the next loop to avoid subtracting
++ * lengths that were never added.
++ */
++ if (seq < max_seq)
++ max_seq = seq;
++
++ /*
++ * Move first record forward until length fits into the buffer. Ignore
++ * newest messages that were not counted in the above cycle. Messages
++ * might appear and get lost in the meantime. This is a best effort
++ * that prevents an infinite loop that could occur with a retry.
++ */
++ prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
++ if (len <= size || info.seq >= max_seq)
++ break;
++ len -= get_record_print_text_size(&info, line_count, syslog, time);
++ }
++
++ return seq;
++}
++
+ static int syslog_print(char __user *buf, int size)
+ {
+ struct printk_info info;
+@@ -1492,9 +1536,7 @@ static int syslog_print(char __user *buf, int size)
+ static int syslog_print_all(char __user *buf, int size, bool clear)
+ {
+ struct printk_info info;
+- unsigned int line_count;
+ struct printk_record r;
+- u64 max_seq;
+ char *text;
+ int len = 0;
+ u64 seq;
+@@ -1510,21 +1552,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ * Find first record that fits, including all following records,
+ * into the user-provided buffer for this dump.
+ */
+- prb_for_each_info(clear_seq, prb, seq, &info, &line_count)
+- len += get_record_print_text_size(&info, line_count, true, time);
+-
+- /*
+- * Set an upper bound for the next loop to avoid subtracting lengths
+- * that were never added.
+- */
+- max_seq = seq;
+-
+- /* move first record forward until length fits into the buffer */
+- prb_for_each_info(clear_seq, prb, seq, &info, &line_count) {
+- if (len <= size || info.seq >= max_seq)
+- break;
+- len -= get_record_print_text_size(&info, line_count, true, time);
+- }
++ seq = find_first_fitting_seq(clear_seq, -1, size, true, time);
+
+ prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX);
+
+@@ -3427,7 +3455,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ char *buf, size_t size, size_t *len_out)
+ {
+ struct printk_info info;
+- unsigned int line_count;
+ struct printk_record r;
+ unsigned long flags;
+ u64 seq;
+@@ -3455,26 +3482,12 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+
+ /*
+ * Find first record that fits, including all following records,
+- * into the user-provided buffer for this dump.
++ * into the user-provided buffer for this dump. Pass in size-1
++ * because this function (by way of record_print_text()) will
++ * not write more than size-1 bytes of text into @buf.
+ */
+-
+- prb_for_each_info(dumper->cur_seq, prb, seq, &info, &line_count) {
+- if (info.seq >= dumper->next_seq)
+- break;
+- len += get_record_print_text_size(&info, line_count, syslog, time);
+- }
+-
+- /*
+- * Move first record forward until length fits into the buffer. Ignore
+- * newest messages that were not counted in the above cycle. Messages
+- * might appear and get lost in the meantime. This is the best effort
+- * that prevents an infinite loop.
+- */
+- prb_for_each_info(dumper->cur_seq, prb, seq, &info, &line_count) {
+- if (len < size || info.seq >= dumper->next_seq)
+- break;
+- len -= get_record_print_text_size(&info, line_count, syslog, time);
+- }
++ seq = find_first_fitting_seq(dumper->cur_seq, dumper->next_seq,
++ size - 1, syslog, time);
+
+ /*
+ * Next kmsg_dump_get_buffer() invocation will dump block of
+--
+2.19.1
+
diff --git a/features/rt/printk-convert-syslog_lock-to-spin_lock.patch b/features/rt/printk-convert-syslog_lock-to-spin_lock.patch
new file mode 100644
index 00000000..611e45b5
--- /dev/null
+++ b/features/rt/printk-convert-syslog_lock-to-spin_lock.patch
@@ -0,0 +1,118 @@
+From 3fe6df2a782687f96ec4e6eb1dcaa4b9dcc018ef Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 18 Feb 2021 17:37:41 +0100
+Subject: [PATCH 023/191] printk: convert @syslog_lock to spin_lock
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 274d4ef05a90..3e0b9bf28e12 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -356,7 +356,7 @@ enum log_flags {
+ };
+
+ /* syslog_lock protects syslog_* variables and write access to clear_seq. */
+-static DEFINE_RAW_SPINLOCK(syslog_lock);
++static DEFINE_SPINLOCK(syslog_lock);
+
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+@@ -1479,9 +1479,9 @@ static int syslog_print(char __user *buf, int size)
+ size_t n;
+ size_t skip;
+
+- raw_spin_lock_irq(&syslog_lock);
++ spin_lock_irq(&syslog_lock);
+ if (!prb_read_valid(prb, syslog_seq, &r)) {
+- raw_spin_unlock_irq(&syslog_lock);
++ spin_unlock_irq(&syslog_lock);
+ break;
+ }
+ if (r.info->seq != syslog_seq) {
+@@ -1510,7 +1510,7 @@ static int syslog_print(char __user *buf, int size)
+ syslog_partial += n;
+ } else
+ n = 0;
+- raw_spin_unlock_irq(&syslog_lock);
++ spin_unlock_irq(&syslog_lock);
+
+ if (!n)
+ break;
+@@ -1574,9 +1574,9 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ }
+
+ if (clear) {
+- raw_spin_lock_irq(&syslog_lock);
++ spin_lock_irq(&syslog_lock);
+ latched_seq_write(&clear_seq, seq);
+- raw_spin_unlock_irq(&syslog_lock);
++ spin_unlock_irq(&syslog_lock);
+ }
+
+ kfree(text);
+@@ -1585,9 +1585,9 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+
+ static void syslog_clear(void)
+ {
+- raw_spin_lock_irq(&syslog_lock);
++ spin_lock_irq(&syslog_lock);
+ latched_seq_write(&clear_seq, prb_next_seq(prb));
+- raw_spin_unlock_irq(&syslog_lock);
++ spin_unlock_irq(&syslog_lock);
+ }
+
+ /* Return a consistent copy of @syslog_seq. */
+@@ -1595,9 +1595,9 @@ static u64 read_syslog_seq_irq(void)
+ {
+ u64 seq;
+
+- raw_spin_lock_irq(&syslog_lock);
++ spin_lock_irq(&syslog_lock);
+ seq = syslog_seq;
+- raw_spin_unlock_irq(&syslog_lock);
++ spin_unlock_irq(&syslog_lock);
+
+ return seq;
+ }
+@@ -1675,10 +1675,10 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ break;
+ /* Number of chars in the log buffer */
+ case SYSLOG_ACTION_SIZE_UNREAD:
+- raw_spin_lock_irq(&syslog_lock);
++ spin_lock_irq(&syslog_lock);
+ if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
+ /* No unread messages. */
+- raw_spin_unlock_irq(&syslog_lock);
++ spin_unlock_irq(&syslog_lock);
+ return 0;
+ }
+ if (info.seq != syslog_seq) {
+@@ -1706,7 +1706,7 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ }
+ error -= syslog_partial;
+ }
+- raw_spin_unlock_irq(&syslog_lock);
++ spin_unlock_irq(&syslog_lock);
+ break;
+ /* Size of the log buffer */
+ case SYSLOG_ACTION_SIZE_BUFFER:
+@@ -3001,9 +3001,9 @@ void register_console(struct console *newcon)
+ exclusive_console_stop_seq = console_seq;
+
+ /* Get a consistent copy of @syslog_seq. */
+- raw_spin_lock_irqsave(&syslog_lock, flags);
++ spin_lock_irqsave(&syslog_lock, flags);
+ console_seq = syslog_seq;
+- raw_spin_unlock_irqrestore(&syslog_lock, flags);
++ spin_unlock_irqrestore(&syslog_lock, flags);
+ }
+ console_unlock();
+ console_sysfs_notify();
+--
+2.19.1
+
diff --git a/features/rt/printk-introduce-CONSOLE_LOG_MAX-for-improved-multi-.patch b/features/rt/printk-introduce-CONSOLE_LOG_MAX-for-improved-multi-.patch
new file mode 100644
index 00000000..70b8eafb
--- /dev/null
+++ b/features/rt/printk-introduce-CONSOLE_LOG_MAX-for-improved-multi-.patch
@@ -0,0 +1,94 @@
+From f0d983447bd8059038b24361b5698ffec757f812 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 10 Dec 2020 12:48:01 +0106
+Subject: [PATCH 011/191] printk: introduce CONSOLE_LOG_MAX for improved
+ multi-line support
+
+Instead of using "LOG_LINE_MAX + PREFIX_MAX" for temporary buffer
+sizes, introduce CONSOLE_LOG_MAX. This represents the maximum size
+that is allowed to be printed to the console for a single record.
+
+Rather than setting CONSOLE_LOG_MAX to "LOG_LINE_MAX + PREFIX_MAX"
+(1024), increase it to 4096. With a larger buffer size, multi-line
+records that are nearly LOG_LINE_MAX in length will have a better
+chance of being fully printed. (When formatting a record for the
+console, each line of a multi-line record is prepended with a copy
+of the prefix.)
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+---
+ kernel/printk/printk.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index ff16a29aa620..8a903faaec4e 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -410,8 +410,13 @@ static u64 clear_seq;
+ #else
+ #define PREFIX_MAX 32
+ #endif
++
++/* the maximum size allowed to be reserved for a record */
+ #define LOG_LINE_MAX (1024 - PREFIX_MAX)
+
++/* the maximum size of a formatted record (i.e. with prefix added per line) */
++#define CONSOLE_LOG_MAX 4096
++
+ #define LOG_LEVEL(v) ((v) & 0x07)
+ #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
+
+@@ -1472,11 +1477,11 @@ static int syslog_print(char __user *buf, int size)
+ char *text;
+ int len = 0;
+
+- text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
++ text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
+ if (!text)
+ return -ENOMEM;
+
+- prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX);
++ prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
+
+ while (size > 0) {
+ size_t n;
+@@ -1542,7 +1547,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ u64 seq;
+ bool time;
+
+- text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
++ text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
+ if (!text)
+ return -ENOMEM;
+
+@@ -1554,7 +1559,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ */
+ seq = find_first_fitting_seq(clear_seq, -1, size, true, time);
+
+- prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX);
++ prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
+
+ len = 0;
+ prb_for_each_record(seq, prb, seq, &r) {
+@@ -2187,8 +2192,7 @@ EXPORT_SYMBOL(printk);
+
+ #else /* CONFIG_PRINTK */
+
+-#define LOG_LINE_MAX 0
+-#define PREFIX_MAX 0
++#define CONSOLE_LOG_MAX 0
+ #define printk_time false
+
+ #define prb_read_valid(rb, seq, r) false
+@@ -2506,7 +2510,7 @@ static inline int can_use_console(void)
+ void console_unlock(void)
+ {
+ static char ext_text[CONSOLE_EXT_LOG_MAX];
+- static char text[LOG_LINE_MAX + PREFIX_MAX];
++ static char text[CONSOLE_LOG_MAX];
+ unsigned long flags;
+ bool do_cond_resched, retry;
+ struct printk_info info;
+--
+2.19.1
+
diff --git a/features/rt/printk-introduce-a-kmsg_dump-iterator.patch b/features/rt/printk-introduce-a-kmsg_dump-iterator.patch
new file mode 100644
index 00000000..cab5a8ea
--- /dev/null
+++ b/features/rt/printk-introduce-a-kmsg_dump-iterator.patch
@@ -0,0 +1,560 @@
+From b20f5eb741807f3e89ae444f82f7a4f3d76affb1 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 18 Dec 2020 11:40:08 +0000
+Subject: [PATCH 015/191] printk: introduce a kmsg_dump iterator
+
+Rather than store the iterator information into the registered
+kmsg_dump structure, create a separate iterator structure. The
+kmsg_dump_iter structure can reside on the stack of the caller,
+thus allowing lockless use of the kmsg_dump functions.
+
+This is in preparation for removal of @logbuf_lock.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/kernel/nvram_64.c | 12 ++--
+ arch/powerpc/platforms/powernv/opal-kmsg.c | 3 +-
+ arch/powerpc/xmon/xmon.c | 6 +-
+ arch/um/kernel/kmsg_dump.c | 5 +-
+ drivers/hv/vmbus_drv.c | 5 +-
+ drivers/mtd/mtdoops.c | 5 +-
+ fs/pstore/platform.c | 5 +-
+ include/linux/kmsg_dump.h | 43 +++++++-------
+ kernel/debug/kdb/kdb_main.c | 10 ++--
+ kernel/printk/printk.c | 65 +++++++++++-----------
+ 10 files changed, 84 insertions(+), 75 deletions(-)
+
+diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
+index 532f22637783..1ef55f4b389a 100644
+--- a/arch/powerpc/kernel/nvram_64.c
++++ b/arch/powerpc/kernel/nvram_64.c
+@@ -73,7 +73,8 @@ static const char *nvram_os_partitions[] = {
+ };
+
+ static void oops_to_nvram(struct kmsg_dumper *dumper,
+- enum kmsg_dump_reason reason);
++ enum kmsg_dump_reason reason,
++ struct kmsg_dumper_iter *iter);
+
+ static struct kmsg_dumper nvram_kmsg_dumper = {
+ .dump = oops_to_nvram
+@@ -643,7 +644,8 @@ void __init nvram_init_oops_partition(int rtas_partition_exists)
+ * partition. If that's too much, go back and capture uncompressed text.
+ */
+ static void oops_to_nvram(struct kmsg_dumper *dumper,
+- enum kmsg_dump_reason reason)
++ enum kmsg_dump_reason reason,
++ struct kmsg_dumper_iter *iter)
+ {
+ struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
+ static unsigned int oops_count = 0;
+@@ -681,13 +683,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
+ return;
+
+ if (big_oops_buf) {
+- kmsg_dump_get_buffer(dumper, false,
++ kmsg_dump_get_buffer(iter, false,
+ big_oops_buf, big_oops_buf_sz, &text_len);
+ rc = zip_oops(text_len);
+ }
+ if (rc != 0) {
+- kmsg_dump_rewind(dumper);
+- kmsg_dump_get_buffer(dumper, false,
++ kmsg_dump_rewind(iter);
++ kmsg_dump_get_buffer(iter, false,
+ oops_data, oops_data_sz, &text_len);
+ err_type = ERR_TYPE_KERNEL_PANIC;
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c
+index 6c3bc4b4da98..ec862846bc82 100644
+--- a/arch/powerpc/platforms/powernv/opal-kmsg.c
++++ b/arch/powerpc/platforms/powernv/opal-kmsg.c
+@@ -20,7 +20,8 @@
+ * message, it just ensures that OPAL completely flushes the console buffer.
+ */
+ static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper,
+- enum kmsg_dump_reason reason)
++ enum kmsg_dump_reason reason,
++ struct kmsg_dumper_iter *iter)
+ {
+ /*
+ * Outside of a panic context the pollers will continue to run,
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 3fe37495f63d..900882f90431 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -3001,7 +3001,7 @@ print_address(unsigned long addr)
+ static void
+ dump_log_buf(void)
+ {
+- struct kmsg_dumper dumper = { .active = 1 };
++ struct kmsg_dumper_iter iter = { .active = 1 };
+ unsigned char buf[128];
+ size_t len;
+
+@@ -3013,9 +3013,9 @@ dump_log_buf(void)
+ catch_memory_errors = 1;
+ sync();
+
+- kmsg_dump_rewind_nolock(&dumper);
++ kmsg_dump_rewind_nolock(&iter);
+ xmon_start_pagination();
+- while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) {
++ while (kmsg_dump_get_line_nolock(&iter, false, buf, sizeof(buf), &len)) {
+ buf[len] = '\0';
+ printf("%s", buf);
+ }
+diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c
+index 6516ef1f8274..78befecb79d2 100644
+--- a/arch/um/kernel/kmsg_dump.c
++++ b/arch/um/kernel/kmsg_dump.c
+@@ -7,7 +7,8 @@
+ #include <os.h>
+
+ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
+- enum kmsg_dump_reason reason)
++ enum kmsg_dump_reason reason,
++ struct kmsg_dumper_iter *iter)
+ {
+ static char line[1024];
+ struct console *con;
+@@ -30,7 +31,7 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
+ return;
+
+ printf("kmsg_dump:\n");
+- while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len)) {
++ while (kmsg_dump_get_line(iter, true, line, sizeof(line), &len)) {
+ line[len] = '\0';
+ printf("%s", line);
+ }
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 10dce9f91216..806950bc3c1d 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -1389,7 +1389,8 @@ static void vmbus_isr(void)
+ * buffer and call into Hyper-V to transfer the data.
+ */
+ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
+- enum kmsg_dump_reason reason)
++ enum kmsg_dump_reason reason,
++ struct kmsg_dumper_iter *iter)
+ {
+ size_t bytes_written;
+ phys_addr_t panic_pa;
+@@ -1404,7 +1405,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
+ * Write dump contents to the page. No need to synchronize; panic should
+ * be single-threaded.
+ */
+- kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
++ kmsg_dump_get_buffer(iter, false, hv_panic_page, HV_HYP_PAGE_SIZE,
+ &bytes_written);
+ if (bytes_written)
+ hyperv_report_panic_msg(panic_pa, bytes_written);
+diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
+index 774970bfcf85..6bc2c728adb7 100644
+--- a/drivers/mtd/mtdoops.c
++++ b/drivers/mtd/mtdoops.c
+@@ -267,7 +267,8 @@ static void find_next_position(struct mtdoops_context *cxt)
+ }
+
+ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
+- enum kmsg_dump_reason reason)
++ enum kmsg_dump_reason reason,
++ struct kmsg_dumper_iter *iter)
+ {
+ struct mtdoops_context *cxt = container_of(dumper,
+ struct mtdoops_context, dump);
+@@ -276,7 +277,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
+ if (reason == KMSG_DUMP_OOPS && !dump_oops)
+ return;
+
+- kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
++ kmsg_dump_get_buffer(iter, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+ record_size - MTDOOPS_HEADER_SIZE, NULL);
+
+ if (reason != KMSG_DUMP_OOPS) {
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index d963ae7902f9..b90fedcc1716 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -383,7 +383,8 @@ void pstore_record_init(struct pstore_record *record,
+ * end of the buffer.
+ */
+ static void pstore_dump(struct kmsg_dumper *dumper,
+- enum kmsg_dump_reason reason)
++ enum kmsg_dump_reason reason,
++ struct kmsg_dumper_iter *iter)
+ {
+ unsigned long total = 0;
+ const char *why;
+@@ -435,7 +436,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
+ dst_size -= header_size;
+
+ /* Write dump contents. */
+- if (!kmsg_dump_get_buffer(dumper, true, dst + header_size,
++ if (!kmsg_dump_get_buffer(iter, true, dst + header_size,
+ dst_size, &dump_size))
+ break;
+
+diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
+index 4095a34db0fa..2fdb10ab1799 100644
+--- a/include/linux/kmsg_dump.h
++++ b/include/linux/kmsg_dump.h
+@@ -29,6 +29,18 @@ enum kmsg_dump_reason {
+ KMSG_DUMP_MAX
+ };
+
++/**
++ * struct kmsg_dumper_iter - iterator for kernel crash message dumper
++ * @active: Flag that specifies if this is currently dumping
++ * @cur_seq: Points to the oldest message to dump (private)
++ * @next_seq: Points after the newest message to dump (private)
++ */
++struct kmsg_dumper_iter {
++ bool active;
++ u64 cur_seq;
++ u64 next_seq;
++};
++
+ /**
+ * struct kmsg_dumper - kernel crash message dumper structure
+ * @list: Entry in the dumper list (private)
+@@ -36,37 +48,30 @@ enum kmsg_dump_reason {
+ * through the record iterator
+ * @max_reason: filter for highest reason number that should be dumped
+ * @registered: Flag that specifies if this is already registered
+- * @active: Flag that specifies if this is currently dumping
+- * @cur_seq: Points to the oldest message to dump (private)
+- * @next_seq: Points after the newest message to dump (private)
+ */
+ struct kmsg_dumper {
+ struct list_head list;
+- void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
++ void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
++ struct kmsg_dumper_iter *iter);
+ enum kmsg_dump_reason max_reason;
+- bool active;
+ bool registered;
+-
+- /* private state of the kmsg iterator */
+- u64 cur_seq;
+- u64 next_seq;
+ };
+
+ #ifdef CONFIG_PRINTK
+ void kmsg_dump(enum kmsg_dump_reason reason);
+
+-bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
++bool kmsg_dump_get_line_nolock(struct kmsg_dumper_iter *iter, bool syslog,
+ char *line, size_t size, size_t *len);
+
+-bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
++bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog,
+ char *line, size_t size, size_t *len);
+
+-bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
++bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+ char *buf, size_t size, size_t *len_out);
+
+-void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
++void kmsg_dump_rewind_nolock(struct kmsg_dumper_iter *iter);
+
+-void kmsg_dump_rewind(struct kmsg_dumper *dumper);
++void kmsg_dump_rewind(struct kmsg_dumper_iter *dumper_iter);
+
+ int kmsg_dump_register(struct kmsg_dumper *dumper);
+
+@@ -78,30 +83,30 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason)
+ {
+ }
+
+-static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper,
++static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper_iter *iter,
+ bool syslog, const char *line,
+ size_t size, size_t *len)
+ {
+ return false;
+ }
+
+-static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
++static inline bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog,
+ const char *line, size_t size, size_t *len)
+ {
+ return false;
+ }
+
+-static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
++static inline bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+ char *buf, size_t size, size_t *len)
+ {
+ return false;
+ }
+
+-static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
++static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper_iter *iter)
+ {
+ }
+
+-static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
++static inline void kmsg_dump_rewind(struct kmsg_dumper_iter *iter)
+ {
+ }
+
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 930ac1b25ec7..7ae9da245e4b 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -2101,7 +2101,7 @@ static int kdb_dmesg(int argc, const char **argv)
+ int adjust = 0;
+ int n = 0;
+ int skip = 0;
+- struct kmsg_dumper dumper = { .active = 1 };
++ struct kmsg_dumper_iter iter = { .active = 1 };
+ size_t len;
+ char buf[201];
+
+@@ -2126,8 +2126,8 @@ static int kdb_dmesg(int argc, const char **argv)
+ kdb_set(2, setargs);
+ }
+
+- kmsg_dump_rewind_nolock(&dumper);
+- while (kmsg_dump_get_line_nolock(&dumper, 1, NULL, 0, NULL))
++ kmsg_dump_rewind_nolock(&iter);
++ while (kmsg_dump_get_line_nolock(&iter, 1, NULL, 0, NULL))
+ n++;
+
+ if (lines < 0) {
+@@ -2159,8 +2159,8 @@ static int kdb_dmesg(int argc, const char **argv)
+ if (skip >= n || skip < 0)
+ return 0;
+
+- kmsg_dump_rewind_nolock(&dumper);
+- while (kmsg_dump_get_line_nolock(&dumper, 1, buf, sizeof(buf), &len)) {
++ kmsg_dump_rewind_nolock(&iter);
++ while (kmsg_dump_get_line_nolock(&iter, 1, buf, sizeof(buf), &len)) {
+ if (skip) {
+ skip--;
+ continue;
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 713d09843d23..7d426c5dcec6 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3389,6 +3389,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
+ */
+ void kmsg_dump(enum kmsg_dump_reason reason)
+ {
++ struct kmsg_dumper_iter iter;
+ struct kmsg_dumper *dumper;
+ unsigned long flags;
+
+@@ -3408,25 +3409,21 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+ continue;
+
+ /* initialize iterator with data about the stored records */
+- dumper->active = true;
+-
++ iter.active = true;
+ logbuf_lock_irqsave(flags);
+- dumper->cur_seq = latched_seq_read_nolock(&clear_seq);
+- dumper->next_seq = prb_next_seq(prb);
++ iter.cur_seq = latched_seq_read_nolock(&clear_seq);
++ iter.next_seq = prb_next_seq(prb);
+ logbuf_unlock_irqrestore(flags);
+
+ /* invoke dumper which will iterate over records */
+- dumper->dump(dumper, reason);
+-
+- /* reset iterator */
+- dumper->active = false;
++ dumper->dump(dumper, reason, &iter);
+ }
+ rcu_read_unlock();
+ }
+
+ /**
+ * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
+- * @dumper: registered kmsg dumper
++ * @iter: kmsg dumper iterator
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+ * @size: maximum size of the buffer
+@@ -3443,7 +3440,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+ *
+ * The function is similar to kmsg_dump_get_line(), but grabs no locks.
+ */
+-bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
++bool kmsg_dump_get_line_nolock(struct kmsg_dumper_iter *iter, bool syslog,
+ char *line, size_t size, size_t *len)
+ {
+ struct printk_info info;
+@@ -3454,16 +3451,16 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+
+ prb_rec_init_rd(&r, &info, line, size);
+
+- if (!dumper->active)
++ if (!iter->active)
+ goto out;
+
+ /* Read text or count text lines? */
+ if (line) {
+- if (!prb_read_valid(prb, dumper->cur_seq, &r))
++ if (!prb_read_valid(prb, iter->cur_seq, &r))
+ goto out;
+ l = record_print_text(&r, syslog, printk_time);
+ } else {
+- if (!prb_read_valid_info(prb, dumper->cur_seq,
++ if (!prb_read_valid_info(prb, iter->cur_seq,
+ &info, &line_count)) {
+ goto out;
+ }
+@@ -3472,7 +3469,7 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+
+ }
+
+- dumper->cur_seq = r.info->seq + 1;
++ iter->cur_seq = r.info->seq + 1;
+ ret = true;
+ out:
+ if (len)
+@@ -3482,7 +3479,7 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+
+ /**
+ * kmsg_dump_get_line - retrieve one kmsg log line
+- * @dumper: registered kmsg dumper
++ * @iter: kmsg dumper iterator
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+ * @size: maximum size of the buffer
+@@ -3497,14 +3494,14 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+-bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
++bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog,
+ char *line, size_t size, size_t *len)
+ {
+ unsigned long flags;
+ bool ret;
+
+ logbuf_lock_irqsave(flags);
+- ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
++ ret = kmsg_dump_get_line_nolock(iter, syslog, line, size, len);
+ logbuf_unlock_irqrestore(flags);
+
+ return ret;
+@@ -3513,7 +3510,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+
+ /**
+ * kmsg_dump_get_buffer - copy kmsg log lines
+- * @dumper: registered kmsg dumper
++ * @iter: kmsg dumper iterator
+ * @syslog: include the "<4>" prefixes
+ * @buf: buffer to copy the line to
+ * @size: maximum size of the buffer
+@@ -3530,7 +3527,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+-bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
++bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+ char *buf, size_t size, size_t *len_out)
+ {
+ struct printk_info info;
+@@ -3542,19 +3539,19 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ bool ret = false;
+ bool time = printk_time;
+
+- if (!dumper->active || !buf || !size)
++ if (!iter->active || !buf || !size)
+ goto out;
+
+ logbuf_lock_irqsave(flags);
+- if (prb_read_valid_info(prb, dumper->cur_seq, &info, NULL)) {
+- if (info.seq != dumper->cur_seq) {
++ if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
++ if (info.seq != iter->cur_seq) {
+ /* messages are gone, move to first available one */
+- dumper->cur_seq = info.seq;
++ iter->cur_seq = info.seq;
+ }
+ }
+
+ /* last entry */
+- if (dumper->cur_seq >= dumper->next_seq) {
++ if (iter->cur_seq >= iter->next_seq) {
+ logbuf_unlock_irqrestore(flags);
+ goto out;
+ }
+@@ -3565,7 +3562,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ * because this function (by way of record_print_text()) will
+ * not write more than size-1 bytes of text into @buf.
+ */
+- seq = find_first_fitting_seq(dumper->cur_seq, dumper->next_seq,
++ seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
+ size - 1, syslog, time);
+
+ /*
+@@ -3578,7 +3575,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+
+ len = 0;
+ prb_for_each_record(seq, prb, seq, &r) {
+- if (r.info->seq >= dumper->next_seq)
++ if (r.info->seq >= iter->next_seq)
+ break;
+
+ len += record_print_text(&r, syslog, time);
+@@ -3587,7 +3584,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ prb_rec_init_rd(&r, &info, buf + len, size - len);
+ }
+
+- dumper->next_seq = next_seq;
++ iter->next_seq = next_seq;
+ ret = true;
+ logbuf_unlock_irqrestore(flags);
+ out:
+@@ -3599,7 +3596,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+
+ /**
+ * kmsg_dump_rewind_nolock - reset the iterator (unlocked version)
+- * @dumper: registered kmsg dumper
++ * @iter: kmsg dumper iterator
+ *
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+@@ -3607,26 +3604,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+ *
+ * The function is similar to kmsg_dump_rewind(), but grabs no locks.
+ */
+-void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
++void kmsg_dump_rewind_nolock(struct kmsg_dumper_iter *iter)
+ {
+- dumper->cur_seq = latched_seq_read_nolock(&clear_seq);
+- dumper->next_seq = prb_next_seq(prb);
++ iter->cur_seq = latched_seq_read_nolock(&clear_seq);
++ iter->next_seq = prb_next_seq(prb);
+ }
+
+ /**
+ * kmsg_dump_rewind - reset the iterator
+- * @dumper: registered kmsg dumper
++ * @iter: kmsg dumper iterator
+ *
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+ * times within the same dumper.dump() callback.
+ */
+-void kmsg_dump_rewind(struct kmsg_dumper *dumper)
++void kmsg_dump_rewind(struct kmsg_dumper_iter *iter)
+ {
+ unsigned long flags;
+
+ logbuf_lock_irqsave(flags);
+- kmsg_dump_rewind_nolock(dumper);
++ kmsg_dump_rewind_nolock(iter);
+ logbuf_unlock_irqrestore(flags);
+ }
+ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+--
+2.19.1
+
diff --git a/features/rt/printk-introduce-kernel-sync-mode.patch b/features/rt/printk-introduce-kernel-sync-mode.patch
new file mode 100644
index 00000000..c307bc7e
--- /dev/null
+++ b/features/rt/printk-introduce-kernel-sync-mode.patch
@@ -0,0 +1,308 @@
+From 9707df2d67d37317d4ab73abea7213f09100ed53 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:06 +0106
+Subject: [PATCH 029/191] printk: introduce kernel sync mode
+
+When the kernel performs an OOPS, enter into "sync mode":
+
+- only atomic consoles (write_atomic() callback) will print
+- printing occurs within vprintk_store() instead of console_unlock()
+
+CONSOLE_LOG_MAX is moved to printk.h to support the per-console
+buffer used in sync mode.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 4 ++
+ include/linux/printk.h | 6 ++
+ kernel/printk/printk.c | 133 ++++++++++++++++++++++++++++++++++++++--
+ 3 files changed, 137 insertions(+), 6 deletions(-)
+
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 919c8d43d986..1d699d02e99a 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -16,6 +16,7 @@
+
+ #include <linux/atomic.h>
+ #include <linux/types.h>
++#include <linux/printk.h>
+
+ struct vc_data;
+ struct console_font_op;
+@@ -150,6 +151,9 @@ struct console {
+ short flags;
+ short index;
+ int cflag;
++#ifdef CONFIG_PRINTK
++ char sync_buf[CONSOLE_LOG_MAX];
++#endif
+ void *data;
+ struct console *next;
+ };
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 2476796c1150..1ebd93581acc 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -46,6 +46,12 @@ static inline const char *printk_skip_headers(const char *buffer)
+
+ #define CONSOLE_EXT_LOG_MAX 8192
+
++/*
++ * The maximum size of a record formatted for console printing
++ * (i.e. with the prefix prepended to every line).
++ */
++#define CONSOLE_LOG_MAX 4096
++
+ /* printk's without a loglevel use this.. */
+ #define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 28bd7a7807bc..4aa402d2df55 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -44,6 +44,7 @@
+ #include <linux/irq_work.h>
+ #include <linux/ctype.h>
+ #include <linux/uio.h>
++#include <linux/clocksource.h>
+ #include <linux/sched/clock.h>
+ #include <linux/sched/debug.h>
+ #include <linux/sched/task_stack.h>
+@@ -359,6 +360,9 @@ enum log_flags {
+ static DEFINE_SPINLOCK(syslog_lock);
+
+ #ifdef CONFIG_PRINTK
++/* Set to enable sync mode. Once set, it is never cleared. */
++static bool sync_mode;
++
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+ /* All 3 protected by @syslog_lock. */
+ /* the next printk record to read by syslog(READ) or /proc/kmsg */
+@@ -398,9 +402,6 @@ static struct latched_seq clear_seq = {
+ /* the maximum size allowed to be reserved for a record */
+ #define LOG_LINE_MAX (1024 - PREFIX_MAX)
+
+-/* the maximum size of a formatted record (i.e. with prefix added per line) */
+-#define CONSOLE_LOG_MAX 4096
+-
+ #define LOG_LEVEL(v) ((v) & 0x07)
+ #define LOG_FACILITY(v) ((v) >> 3 & 0xff)
+
+@@ -1742,6 +1743,91 @@ static inline void printk_delay(int level)
+ }
+ }
+
++static bool kernel_sync_mode(void)
++{
++ return (oops_in_progress || sync_mode);
++}
++
++static bool console_can_sync(struct console *con)
++{
++ if (!(con->flags & CON_ENABLED))
++ return false;
++ if (con->write_atomic && kernel_sync_mode())
++ return true;
++ return false;
++}
++
++static bool call_sync_console_driver(struct console *con, const char *text, size_t text_len)
++{
++ if (!(con->flags & CON_ENABLED))
++ return false;
++ if (con->write_atomic && kernel_sync_mode())
++ con->write_atomic(con, text, text_len);
++ else
++ return false;
++
++ return true;
++}
++
++static bool have_atomic_console(void)
++{
++ struct console *con;
++
++ for_each_console(con) {
++ if (!(con->flags & CON_ENABLED))
++ continue;
++ if (con->write_atomic)
++ return true;
++ }
++ return false;
++}
++
++static bool print_sync(struct console *con, u64 *seq)
++{
++ struct printk_info info;
++ struct printk_record r;
++ size_t text_len;
++
++ prb_rec_init_rd(&r, &info, &con->sync_buf[0], sizeof(con->sync_buf));
++
++ if (!prb_read_valid(prb, *seq, &r))
++ return false;
++
++ text_len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
++
++ if (!call_sync_console_driver(con, &con->sync_buf[0], text_len))
++ return false;
++
++ *seq = r.info->seq;
++
++ touch_softlockup_watchdog_sync();
++ clocksource_touch_watchdog();
++ rcu_cpu_stall_reset();
++ touch_nmi_watchdog();
++
++ if (text_len)
++ printk_delay(r.info->level);
++
++ return true;
++}
++
++static void print_sync_until(struct console *con, u64 seq)
++{
++ unsigned int flags;
++ u64 printk_seq;
++
++ console_atomic_lock(&flags);
++ for (;;) {
++ printk_seq = atomic64_read(&console_seq);
++ if (printk_seq >= seq)
++ break;
++ if (!print_sync(con, &printk_seq))
++ break;
++ atomic64_set(&console_seq, printk_seq + 1);
++ }
++ console_atomic_unlock(flags);
++}
++
+ /*
+ * Special console_lock variants that help to reduce the risk of soft-lockups.
+ * They allow to pass console_lock to another printk() call using a busy wait.
+@@ -1916,6 +2002,8 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
+ if (!cpu_online(smp_processor_id()) &&
+ !(con->flags & CON_ANYTIME))
+ continue;
++ if (kernel_sync_mode())
++ continue;
+ if (con->flags & CON_EXTENDED)
+ con->write(con, ext_text, ext_len);
+ else {
+@@ -2070,6 +2158,7 @@ int vprintk_store(int facility, int level,
+ const u32 caller_id = printk_caller_id();
+ struct prb_reserved_entry e;
+ enum log_flags lflags = 0;
++ bool final_commit = false;
+ struct printk_record r;
+ unsigned long irqflags;
+ u16 trunc_msg_len = 0;
+@@ -2079,6 +2168,7 @@ int vprintk_store(int facility, int level,
+ u16 text_len;
+ int ret = 0;
+ u64 ts_nsec;
++ u64 seq;
+
+ /*
+ * Since the duration of printk() can vary depending on the message
+@@ -2117,6 +2207,7 @@ int vprintk_store(int facility, int level,
+ if (lflags & LOG_CONT) {
+ prb_rec_init_wr(&r, reserve_size);
+ if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
++ seq = r.info->seq;
+ text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
+ facility, &lflags, fmt, args);
+ r.info->text_len += text_len;
+@@ -2124,6 +2215,7 @@ int vprintk_store(int facility, int level,
+ if (lflags & LOG_NEWLINE) {
+ r.info->flags |= LOG_NEWLINE;
+ prb_final_commit(&e);
++ final_commit = true;
+ } else {
+ prb_commit(&e);
+ }
+@@ -2148,6 +2240,8 @@ int vprintk_store(int facility, int level,
+ goto out;
+ }
+
++ seq = r.info->seq;
++
+ /* fill message */
+ text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &lflags, fmt, args);
+ if (trunc_msg_len)
+@@ -2162,13 +2256,25 @@ int vprintk_store(int facility, int level,
+ memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
+
+ /* A message without a trailing newline can be continued. */
+- if (!(lflags & LOG_NEWLINE))
++ if (!(lflags & LOG_NEWLINE)) {
+ prb_commit(&e);
+- else
++ } else {
+ prb_final_commit(&e);
++ final_commit = true;
++ }
+
+ ret = text_len + trunc_msg_len;
+ out:
++ /* only the kernel may perform synchronous printing */
++ if (facility == 0 && final_commit) {
++ struct console *con;
++
++ for_each_console(con) {
++ if (console_can_sync(con))
++ print_sync_until(con, seq + 1);
++ }
++ }
++
+ printk_exit_irqrestore(irqflags);
+ return ret;
+ }
+@@ -2264,12 +2370,13 @@ EXPORT_SYMBOL(printk);
+
+ #else /* CONFIG_PRINTK */
+
+-#define CONSOLE_LOG_MAX 0
+ #define printk_time false
+
+ #define prb_read_valid(rb, seq, r) false
+ #define prb_first_valid_seq(rb) 0
+
++#define kernel_sync_mode() false
++
+ static u64 syslog_seq;
+ static atomic64_t console_seq = ATOMIC64_INIT(0);
+ static u64 exclusive_console_stop_seq;
+@@ -2562,6 +2669,8 @@ static int have_callable_console(void)
+ */
+ static inline int can_use_console(void)
+ {
++ if (kernel_sync_mode())
++ return false;
+ return cpu_online(raw_smp_processor_id()) || have_callable_console();
+ }
+
+@@ -3374,6 +3483,18 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+ struct kmsg_dumper_iter iter;
+ struct kmsg_dumper *dumper;
+
++ if (!oops_in_progress) {
++ /*
++ * If atomic consoles are available, activate kernel sync mode
++ * to make sure any final messages are visible. The trailing
++ * printk message is important to flush any pending messages.
++ */
++ if (have_atomic_console()) {
++ sync_mode = true;
++ pr_info("enabled sync mode\n");
++ }
++ }
++
+ rcu_read_lock();
+ list_for_each_entry_rcu(dumper, &dump_list, list) {
+ enum kmsg_dump_reason max_reason = dumper->max_reason;
+--
+2.19.1
+
diff --git a/features/rt/printk-kmsg_dump-remove-_nolock-variants.patch b/features/rt/printk-kmsg_dump-remove-_nolock-variants.patch
new file mode 100644
index 00000000..f527d6dc
--- /dev/null
+++ b/features/rt/printk-kmsg_dump-remove-_nolock-variants.patch
@@ -0,0 +1,225 @@
+From 3635a853f4118c42ffc7e537ed64c9ab04de93d8 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 21 Dec 2020 10:27:58 +0106
+Subject: [PATCH 018/191] printk: kmsg_dump: remove _nolock() variants
+
+kmsg_dump_rewind() and kmsg_dump_get_line() are lockless, so there is
+no need for _nolock() variants. Remove these functions and switch all
+callers of the _nolock() variants.
+
+The functions without _nolock() were chosen because they are already
+exported to kernel modules.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+---
+ arch/powerpc/xmon/xmon.c | 4 +--
+ include/linux/kmsg_dump.h | 18 +----------
+ kernel/debug/kdb/kdb_main.c | 8 ++---
+ kernel/printk/printk.c | 60 +++++--------------------------------
+ 4 files changed, 15 insertions(+), 75 deletions(-)
+
+diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
+index 900882f90431..be2413890bda 100644
+--- a/arch/powerpc/xmon/xmon.c
++++ b/arch/powerpc/xmon/xmon.c
+@@ -3013,9 +3013,9 @@ dump_log_buf(void)
+ catch_memory_errors = 1;
+ sync();
+
+- kmsg_dump_rewind_nolock(&iter);
++ kmsg_dump_rewind(&iter);
+ xmon_start_pagination();
+- while (kmsg_dump_get_line_nolock(&iter, false, buf, sizeof(buf), &len)) {
++ while (kmsg_dump_get_line(&iter, false, buf, sizeof(buf), &len)) {
+ buf[len] = '\0';
+ printf("%s", buf);
+ }
+diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
+index 2fdb10ab1799..86673930c8ea 100644
+--- a/include/linux/kmsg_dump.h
++++ b/include/linux/kmsg_dump.h
+@@ -60,18 +60,13 @@ struct kmsg_dumper {
+ #ifdef CONFIG_PRINTK
+ void kmsg_dump(enum kmsg_dump_reason reason);
+
+-bool kmsg_dump_get_line_nolock(struct kmsg_dumper_iter *iter, bool syslog,
+- char *line, size_t size, size_t *len);
+-
+ bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog,
+ char *line, size_t size, size_t *len);
+
+ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+ char *buf, size_t size, size_t *len_out);
+
+-void kmsg_dump_rewind_nolock(struct kmsg_dumper_iter *iter);
+-
+-void kmsg_dump_rewind(struct kmsg_dumper_iter *dumper_iter);
++void kmsg_dump_rewind(struct kmsg_dumper_iter *iter);
+
+ int kmsg_dump_register(struct kmsg_dumper *dumper);
+
+@@ -83,13 +78,6 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason)
+ {
+ }
+
+-static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper_iter *iter,
+- bool syslog, const char *line,
+- size_t size, size_t *len)
+-{
+- return false;
+-}
+-
+ static inline bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog,
+ const char *line, size_t size, size_t *len)
+ {
+@@ -102,10 +90,6 @@ static inline bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool sysl
+ return false;
+ }
+
+-static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper_iter *iter)
+-{
+-}
+-
+ static inline void kmsg_dump_rewind(struct kmsg_dumper_iter *iter)
+ {
+ }
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index 7ae9da245e4b..dbf1d126ac5e 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -2126,8 +2126,8 @@ static int kdb_dmesg(int argc, const char **argv)
+ kdb_set(2, setargs);
+ }
+
+- kmsg_dump_rewind_nolock(&iter);
+- while (kmsg_dump_get_line_nolock(&iter, 1, NULL, 0, NULL))
++ kmsg_dump_rewind(&iter);
++ while (kmsg_dump_get_line(&iter, 1, NULL, 0, NULL))
+ n++;
+
+ if (lines < 0) {
+@@ -2159,8 +2159,8 @@ static int kdb_dmesg(int argc, const char **argv)
+ if (skip >= n || skip < 0)
+ return 0;
+
+- kmsg_dump_rewind_nolock(&iter);
+- while (kmsg_dump_get_line_nolock(&iter, 1, buf, sizeof(buf), &len)) {
++ kmsg_dump_rewind(&iter);
++ while (kmsg_dump_get_line(&iter, 1, buf, sizeof(buf), &len)) {
+ if (skip) {
+ skip--;
+ continue;
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 32b2d763aef7..fbb9f2f30430 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3382,7 +3382,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+ }
+
+ /**
+- * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
++ * kmsg_dump_get_line - retrieve one kmsg log line
+ * @iter: kmsg dumper iterator
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+@@ -3397,18 +3397,18 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+- *
+- * The function is similar to kmsg_dump_get_line(), but grabs no locks.
+ */
+-bool kmsg_dump_get_line_nolock(struct kmsg_dumper_iter *iter, bool syslog,
+- char *line, size_t size, size_t *len)
++bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog,
++ char *line, size_t size, size_t *len)
+ {
+ struct printk_info info;
+ unsigned int line_count;
+ struct printk_record r;
++ unsigned long flags;
+ size_t l = 0;
+ bool ret = false;
+
++ printk_safe_enter_irqsave(flags);
+ prb_rec_init_rd(&r, &info, line, size);
+
+ if (!iter->active)
+@@ -3432,40 +3432,11 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper_iter *iter, bool syslog,
+ iter->cur_seq = r.info->seq + 1;
+ ret = true;
+ out:
++ printk_safe_exit_irqrestore(flags);
+ if (len)
+ *len = l;
+ return ret;
+ }
+-
+-/**
+- * kmsg_dump_get_line - retrieve one kmsg log line
+- * @iter: kmsg dumper iterator
+- * @syslog: include the "<4>" prefixes
+- * @line: buffer to copy the line to
+- * @size: maximum size of the buffer
+- * @len: length of line placed into buffer
+- *
+- * Start at the beginning of the kmsg buffer, with the oldest kmsg
+- * record, and copy one record into the provided buffer.
+- *
+- * Consecutive calls will return the next available record moving
+- * towards the end of the buffer with the youngest messages.
+- *
+- * A return value of FALSE indicates that there are no more records to
+- * read.
+- */
+-bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog,
+- char *line, size_t size, size_t *len)
+-{
+- unsigned long flags;
+- bool ret;
+-
+- printk_safe_enter_irqsave(flags);
+- ret = kmsg_dump_get_line_nolock(iter, syslog, line, size, len);
+- printk_safe_exit_irqrestore(flags);
+-
+- return ret;
+-}
+ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+
+ /**
+@@ -3554,22 +3525,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+ }
+ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+
+-/**
+- * kmsg_dump_rewind_nolock - reset the iterator (unlocked version)
+- * @iter: kmsg dumper iterator
+- *
+- * Reset the dumper's iterator so that kmsg_dump_get_line() and
+- * kmsg_dump_get_buffer() can be called again and used multiple
+- * times within the same dumper.dump() callback.
+- *
+- * The function is similar to kmsg_dump_rewind(), but grabs no locks.
+- */
+-void kmsg_dump_rewind_nolock(struct kmsg_dumper_iter *iter)
+-{
+- iter->cur_seq = latched_seq_read_nolock(&clear_seq);
+- iter->next_seq = prb_next_seq(prb);
+-}
+-
+ /**
+ * kmsg_dump_rewind - reset the iterator
+ * @iter: kmsg dumper iterator
+@@ -3583,7 +3538,8 @@ void kmsg_dump_rewind(struct kmsg_dumper_iter *iter)
+ unsigned long flags;
+
+ printk_safe_enter_irqsave(flags);
+- kmsg_dump_rewind_nolock(iter);
++ iter->cur_seq = latched_seq_read_nolock(&clear_seq);
++ iter->next_seq = prb_next_seq(prb);
+ printk_safe_exit_irqrestore(flags);
+ }
+ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+--
+2.19.1
+
diff --git a/features/rt/printk-kmsg_dump-remove-unused-fields.patch b/features/rt/printk-kmsg_dump-remove-unused-fields.patch
new file mode 100644
index 00000000..eee152e1
--- /dev/null
+++ b/features/rt/printk-kmsg_dump-remove-unused-fields.patch
@@ -0,0 +1,42 @@
+From 978c2a2d02d76dfd23c87cf04b8d502030dc66f8 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 21 Dec 2020 11:19:39 +0106
+Subject: [PATCH 008/191] printk: kmsg_dump: remove unused fields
+
+struct kmsg_dumper still contains some fields that were used to
+iterate the old ringbuffer. They are no longer used. Remove them
+and update the struct documentation.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/kmsg_dump.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
+index 3378bcbe585e..235c50982c2d 100644
+--- a/include/linux/kmsg_dump.h
++++ b/include/linux/kmsg_dump.h
+@@ -36,6 +36,9 @@ enum kmsg_dump_reason {
+ * through the record iterator
+ * @max_reason: filter for highest reason number that should be dumped
+ * @registered: Flag that specifies if this is already registered
++ * @active: Flag that specifies if this is currently dumping
++ * @cur_seq: Points to the oldest message to dump (private)
++ * @next_seq: Points after the newest message to dump (private)
+ */
+ struct kmsg_dumper {
+ struct list_head list;
+@@ -45,8 +48,6 @@ struct kmsg_dumper {
+ bool registered;
+
+ /* private state of the kmsg iterator */
+- u32 cur_idx;
+- u32 next_idx;
+ u64 cur_seq;
+ u64 next_seq;
+ };
+--
+2.19.1
+
diff --git a/features/rt/printk-kmsg_dump-use-kmsg_dump_rewind.patch b/features/rt/printk-kmsg_dump-use-kmsg_dump_rewind.patch
new file mode 100644
index 00000000..d63957e3
--- /dev/null
+++ b/features/rt/printk-kmsg_dump-use-kmsg_dump_rewind.patch
@@ -0,0 +1,41 @@
+From bc6b56661c0c00b39b105f6c1c2166c89f60070c Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 17 Feb 2021 18:23:16 +0100
+Subject: [PATCH 019/191] printk: kmsg_dump: use kmsg_dump_rewind
+
+kmsg_dump() is open coding the kmsg_dump_rewind(). Call
+kmsg_dump_rewind() instead.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index fbb9f2f30430..15aed1a7bd05 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3351,7 +3351,6 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+ {
+ struct kmsg_dumper_iter iter;
+ struct kmsg_dumper *dumper;
+- unsigned long flags;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(dumper, &dump_list, list) {
+@@ -3370,10 +3369,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+
+ /* initialize iterator with data about the stored records */
+ iter.active = true;
+- printk_safe_enter_irqsave(flags);
+- iter.cur_seq = latched_seq_read_nolock(&clear_seq);
+- iter.next_seq = prb_next_seq(prb);
+- printk_safe_exit_irqrestore(flags);
++ kmsg_dump_rewind(&iter);
+
+ /* invoke dumper which will iterate over records */
+ dumper->dump(dumper, reason, &iter);
+--
+2.19.1
+
diff --git a/features/rt/printk-limit-second-loop-of-syslog_print_all.patch b/features/rt/printk-limit-second-loop-of-syslog_print_all.patch
new file mode 100644
index 00000000..1ca5451b
--- /dev/null
+++ b/features/rt/printk-limit-second-loop-of-syslog_print_all.patch
@@ -0,0 +1,55 @@
+From a8c89ca2edd1f674c9fc18b64fe9c67454d5b8c6 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 17 Feb 2021 16:15:31 +0100
+Subject: [PATCH 007/191] printk: limit second loop of syslog_print_all
+
+The second loop of syslog_print_all() subtracts lengths that were
+added in the first loop. With commit b031a684bfd0 ("printk: remove
+logbuf_lock writer-protection of ringbuffer") it is possible that
+records are (over)written during syslog_print_all(). This allows the
+possibility of the second loop subtracting lengths that were never
+added in the first loop.
+
+This situation can result in syslog_print_all() filling the buffer
+starting from a later record, even though there may have been room
+to fit the earlier record(s) as well.
+
+Fixes: b031a684bfd0 ("printk: remove logbuf_lock writer-protection of ringbuffer")
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+---
+ kernel/printk/printk.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 575a34b88936..77ae2704e979 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1494,6 +1494,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ struct printk_info info;
+ unsigned int line_count;
+ struct printk_record r;
++ u64 max_seq;
+ char *text;
+ int len = 0;
+ u64 seq;
+@@ -1512,9 +1513,15 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ prb_for_each_info(clear_seq, prb, seq, &info, &line_count)
+ len += get_record_print_text_size(&info, line_count, true, time);
+
++ /*
++ * Set an upper bound for the next loop to avoid subtracting lengths
++ * that were never added.
++ */
++ max_seq = seq;
++
+ /* move first record forward until length fits into the buffer */
+ prb_for_each_info(clear_seq, prb, seq, &info, &line_count) {
+- if (len <= size)
++ if (len <= size || info.seq >= max_seq)
+ break;
+ len -= get_record_print_text_size(&info, line_count, true, time);
+ }
+--
+2.19.1
+
diff --git a/features/rt/printk-move-console-printing-to-kthreads.patch b/features/rt/printk-move-console-printing-to-kthreads.patch
new file mode 100644
index 00000000..942fcb13
--- /dev/null
+++ b/features/rt/printk-move-console-printing-to-kthreads.patch
@@ -0,0 +1,846 @@
+From acce83424770494715634141b1a06a8e17f128e1 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:07 +0106
+Subject: [PATCH 030/191] printk: move console printing to kthreads
+
+Create a kthread for each console to perform console printing. Now
+all console printing is fully asynchronous except for the boot
+console and when the kernel enters sync mode (and there are atomic
+consoles available).
+
+The console_lock() and console_unlock() functions now only do what
+their name says... locking and unlocking of the console.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 2 +
+ kernel/printk/printk.c | 625 ++++++++++++----------------------------
+ 2 files changed, 186 insertions(+), 441 deletions(-)
+
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 1d699d02e99a..b370e37a6d49 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -154,6 +154,8 @@ struct console {
+ #ifdef CONFIG_PRINTK
+ char sync_buf[CONSOLE_LOG_MAX];
+ #endif
++ atomic64_t printk_seq;
++ struct task_struct *thread;
+ void *data;
+ struct console *next;
+ };
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 4aa402d2df55..e2dd2abfb855 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -44,6 +44,7 @@
+ #include <linux/irq_work.h>
+ #include <linux/ctype.h>
+ #include <linux/uio.h>
++#include <linux/kthread.h>
+ #include <linux/clocksource.h>
+ #include <linux/sched/clock.h>
+ #include <linux/sched/debug.h>
+@@ -267,11 +268,6 @@ static void __up_console_sem(unsigned long ip)
+ */
+ static int console_locked, console_suspended;
+
+-/*
+- * If exclusive_console is non-NULL then only this console is to be printed to.
+- */
+-static struct console *exclusive_console;
+-
+ /*
+ * Array of consoles built from command line options (console=)
+ */
+@@ -356,10 +352,10 @@ enum log_flags {
+ LOG_CONT = 8, /* text is a fragment of a continuation line */
+ };
+
++#ifdef CONFIG_PRINTK
+ /* syslog_lock protects syslog_* variables and write access to clear_seq. */
+ static DEFINE_SPINLOCK(syslog_lock);
+
+-#ifdef CONFIG_PRINTK
+ /* Set to enable sync mode. Once set, it is never cleared. */
+ static bool sync_mode;
+
+@@ -370,13 +366,6 @@ static u64 syslog_seq;
+ static size_t syslog_partial;
+ static bool syslog_time;
+
+-/* Both protected by @console_sem. */
+-static u64 exclusive_console_stop_seq;
+-static unsigned long console_dropped;
+-
+-/* the next printk record to write to the console */
+-static atomic64_t console_seq = ATOMIC64_INIT(0);
+-
+ struct latched_seq {
+ seqcount_latch_t latch;
+ u64 val[2];
+@@ -1754,6 +1743,8 @@ static bool console_can_sync(struct console *con)
+ return false;
+ if (con->write_atomic && kernel_sync_mode())
+ return true;
++ if (con->write && (con->flags & CON_BOOT) && !con->thread)
++ return true;
+ return false;
+ }
+
+@@ -1763,6 +1754,8 @@ static bool call_sync_console_driver(struct console *con, const char *text, size
+ return false;
+ if (con->write_atomic && kernel_sync_mode())
+ con->write_atomic(con, text, text_len);
++ else if (con->write && (con->flags & CON_BOOT) && !con->thread)
++ con->write(con, text, text_len);
+ else
+ return false;
+
+@@ -1818,202 +1811,16 @@ static void print_sync_until(struct console *con, u64 seq)
+
+ console_atomic_lock(&flags);
+ for (;;) {
+- printk_seq = atomic64_read(&console_seq);
++ printk_seq = atomic64_read(&con->printk_seq);
+ if (printk_seq >= seq)
+ break;
+ if (!print_sync(con, &printk_seq))
+ break;
+- atomic64_set(&console_seq, printk_seq + 1);
++ atomic64_set(&con->printk_seq, printk_seq + 1);
+ }
+ console_atomic_unlock(flags);
+ }
+
+-/*
+- * Special console_lock variants that help to reduce the risk of soft-lockups.
+- * They allow to pass console_lock to another printk() call using a busy wait.
+- */
+-
+-#ifdef CONFIG_LOCKDEP
+-static struct lockdep_map console_owner_dep_map = {
+- .name = "console_owner"
+-};
+-#endif
+-
+-static DEFINE_RAW_SPINLOCK(console_owner_lock);
+-static struct task_struct *console_owner;
+-static bool console_waiter;
+-
+-/**
+- * console_lock_spinning_enable - mark beginning of code where another
+- * thread might safely busy wait
+- *
+- * This basically converts console_lock into a spinlock. This marks
+- * the section where the console_lock owner can not sleep, because
+- * there may be a waiter spinning (like a spinlock). Also it must be
+- * ready to hand over the lock at the end of the section.
+- */
+-static void console_lock_spinning_enable(void)
+-{
+- raw_spin_lock(&console_owner_lock);
+- console_owner = current;
+- raw_spin_unlock(&console_owner_lock);
+-
+- /* The waiter may spin on us after setting console_owner */
+- spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
+-}
+-
+-/**
+- * console_lock_spinning_disable_and_check - mark end of code where another
+- * thread was able to busy wait and check if there is a waiter
+- *
+- * This is called at the end of the section where spinning is allowed.
+- * It has two functions. First, it is a signal that it is no longer
+- * safe to start busy waiting for the lock. Second, it checks if
+- * there is a busy waiter and passes the lock rights to her.
+- *
+- * Important: Callers lose the lock if there was a busy waiter.
+- * They must not touch items synchronized by console_lock
+- * in this case.
+- *
+- * Return: 1 if the lock rights were passed, 0 otherwise.
+- */
+-static int console_lock_spinning_disable_and_check(void)
+-{
+- int waiter;
+-
+- raw_spin_lock(&console_owner_lock);
+- waiter = READ_ONCE(console_waiter);
+- console_owner = NULL;
+- raw_spin_unlock(&console_owner_lock);
+-
+- if (!waiter) {
+- spin_release(&console_owner_dep_map, _THIS_IP_);
+- return 0;
+- }
+-
+- /* The waiter is now free to continue */
+- WRITE_ONCE(console_waiter, false);
+-
+- spin_release(&console_owner_dep_map, _THIS_IP_);
+-
+- /*
+- * Hand off console_lock to waiter. The waiter will perform
+- * the up(). After this, the waiter is the console_lock owner.
+- */
+- mutex_release(&console_lock_dep_map, _THIS_IP_);
+- return 1;
+-}
+-
+-/**
+- * console_trylock_spinning - try to get console_lock by busy waiting
+- *
+- * This allows to busy wait for the console_lock when the current
+- * owner is running in specially marked sections. It means that
+- * the current owner is running and cannot reschedule until it
+- * is ready to lose the lock.
+- *
+- * Return: 1 if we got the lock, 0 othrewise
+- */
+-static int console_trylock_spinning(void)
+-{
+- struct task_struct *owner = NULL;
+- bool waiter;
+- bool spin = false;
+- unsigned long flags;
+-
+- if (console_trylock())
+- return 1;
+-
+- printk_safe_enter_irqsave(flags);
+-
+- raw_spin_lock(&console_owner_lock);
+- owner = READ_ONCE(console_owner);
+- waiter = READ_ONCE(console_waiter);
+- if (!waiter && owner && owner != current) {
+- WRITE_ONCE(console_waiter, true);
+- spin = true;
+- }
+- raw_spin_unlock(&console_owner_lock);
+-
+- /*
+- * If there is an active printk() writing to the
+- * consoles, instead of having it write our data too,
+- * see if we can offload that load from the active
+- * printer, and do some printing ourselves.
+- * Go into a spin only if there isn't already a waiter
+- * spinning, and there is an active printer, and
+- * that active printer isn't us (recursive printk?).
+- */
+- if (!spin) {
+- printk_safe_exit_irqrestore(flags);
+- return 0;
+- }
+-
+- /* We spin waiting for the owner to release us */
+- spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
+- /* Owner will clear console_waiter on hand off */
+- while (READ_ONCE(console_waiter))
+- cpu_relax();
+- spin_release(&console_owner_dep_map, _THIS_IP_);
+-
+- printk_safe_exit_irqrestore(flags);
+- /*
+- * The owner passed the console lock to us.
+- * Since we did not spin on console lock, annotate
+- * this as a trylock. Otherwise lockdep will
+- * complain.
+- */
+- mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
+-
+- return 1;
+-}
+-
+-/*
+- * Call the console drivers, asking them to write out
+- * log_buf[start] to log_buf[end - 1].
+- * The console_lock must be held.
+- */
+-static void call_console_drivers(const char *ext_text, size_t ext_len,
+- const char *text, size_t len)
+-{
+- static char dropped_text[64];
+- size_t dropped_len = 0;
+- struct console *con;
+-
+- trace_console_rcuidle(text, len);
+-
+- if (!console_drivers)
+- return;
+-
+- if (console_dropped) {
+- dropped_len = snprintf(dropped_text, sizeof(dropped_text),
+- "** %lu printk messages dropped **\n",
+- console_dropped);
+- console_dropped = 0;
+- }
+-
+- for_each_console(con) {
+- if (exclusive_console && con != exclusive_console)
+- continue;
+- if (!(con->flags & CON_ENABLED))
+- continue;
+- if (!con->write)
+- continue;
+- if (!cpu_online(smp_processor_id()) &&
+- !(con->flags & CON_ANYTIME))
+- continue;
+- if (kernel_sync_mode())
+- continue;
+- if (con->flags & CON_EXTENDED)
+- con->write(con, ext_text, ext_len);
+- else {
+- if (dropped_len)
+- con->write(con, dropped_text, dropped_len);
+- con->write(con, text, len);
+- }
+- }
+-}
+-
+ #ifdef CONFIG_PRINTK_NMI
+ #define NUM_RECURSION_CTX 2
+ #else
+@@ -2284,39 +2091,16 @@ asmlinkage int vprintk_emit(int facility, int level,
+ const char *fmt, va_list args)
+ {
+ int printed_len;
+- bool in_sched = false;
+
+ /* Suppress unimportant messages after panic happens */
+ if (unlikely(suppress_printk))
+ return 0;
+
+- if (level == LOGLEVEL_SCHED) {
++ if (level == LOGLEVEL_SCHED)
+ level = LOGLEVEL_DEFAULT;
+- in_sched = true;
+- }
+-
+- printk_delay(level);
+
+ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
+
+- /* If called from the scheduler, we can not call up(). */
+- if (!in_sched) {
+- /*
+- * Disable preemption to avoid being preempted while holding
+- * console_sem which would prevent anyone from printing to
+- * console
+- */
+- preempt_disable();
+- /*
+- * Try to acquire and then immediately release the console
+- * semaphore. The release will print out buffers and wake up
+- * /dev/kmsg and syslog() users.
+- */
+- if (console_trylock_spinning())
+- console_unlock();
+- preempt_enable();
+- }
+-
+ wake_up_klogd();
+ return printed_len;
+ }
+@@ -2368,38 +2152,158 @@ asmlinkage __visible int printk(const char *fmt, ...)
+ }
+ EXPORT_SYMBOL(printk);
+
+-#else /* CONFIG_PRINTK */
++static int printk_kthread_func(void *data)
++{
++ struct console *con = data;
++ unsigned long dropped = 0;
++ char *dropped_text = NULL;
++ struct printk_info info;
++ struct printk_record r;
++ char *ext_text = NULL;
++ size_t dropped_len;
++ int ret = -ENOMEM;
++ char *text = NULL;
++ char *write_text;
++ u64 printk_seq;
++ size_t len;
++ int error;
++ u64 seq;
+
+-#define printk_time false
++ if (con->flags & CON_EXTENDED) {
++ ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
++ if (!ext_text)
++ goto out;
++ }
++ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
++ dropped_text = kmalloc(64, GFP_KERNEL);
++ if (!text || !dropped_text)
++ goto out;
+
+-#define prb_read_valid(rb, seq, r) false
+-#define prb_first_valid_seq(rb) 0
++ if (con->flags & CON_EXTENDED)
++ write_text = ext_text;
++ else
++ write_text = text;
+
+-#define kernel_sync_mode() false
++ seq = atomic64_read(&con->printk_seq);
+
+-static u64 syslog_seq;
+-static atomic64_t console_seq = ATOMIC64_INIT(0);
+-static u64 exclusive_console_stop_seq;
+-static unsigned long console_dropped;
++ prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX);
++
++ for (;;) {
++ error = wait_event_interruptible(log_wait,
++ prb_read_valid(prb, seq, &r) || kthread_should_stop());
++
++ if (kthread_should_stop())
++ break;
++
++ if (error)
++ continue;
++
++ if (seq != r.info->seq) {
++ dropped += r.info->seq - seq;
++ seq = r.info->seq;
++ }
++
++ seq++;
++
++ if (!(con->flags & CON_ENABLED))
++ continue;
++
++ if (suppress_message_printing(r.info->level))
++ continue;
++
++ if (con->flags & CON_EXTENDED) {
++ len = info_print_ext_header(ext_text,
++ CONSOLE_EXT_LOG_MAX,
++ r.info);
++ len += msg_print_ext_body(ext_text + len,
++ CONSOLE_EXT_LOG_MAX - len,
++ &r.text_buf[0], r.info->text_len,
++ &r.info->dev_info);
++ } else {
++ len = record_print_text(&r,
++ console_msg_format & MSG_FORMAT_SYSLOG,
++ printk_time);
++ }
++
++ printk_seq = atomic64_read(&con->printk_seq);
+
+-static size_t record_print_text(const struct printk_record *r,
+- bool syslog, bool time)
++ console_lock();
++ console_may_schedule = 0;
++
++ if (kernel_sync_mode() && con->write_atomic) {
++ console_unlock();
++ break;
++ }
++
++ if (!(con->flags & CON_EXTENDED) && dropped) {
++ dropped_len = snprintf(dropped_text, 64,
++ "** %lu printk messages dropped **\n",
++ dropped);
++ dropped = 0;
++
++ con->write(con, dropped_text, dropped_len);
++ printk_delay(r.info->level);
++ }
++
++ con->write(con, write_text, len);
++ if (len)
++ printk_delay(r.info->level);
++
++ atomic64_cmpxchg_relaxed(&con->printk_seq, printk_seq, seq);
++
++ console_unlock();
++ }
++out:
++ kfree(dropped_text);
++ kfree(text);
++ kfree(ext_text);
++ pr_info("%sconsole [%s%d]: printing thread stopped\n",
++ (con->flags & CON_BOOT) ? "boot" : "",
++ con->name, con->index);
++ return ret;
++}
++
++/* Must be called within console_lock(). */
++static void start_printk_kthread(struct console *con)
+ {
+- return 0;
++ con->thread = kthread_run(printk_kthread_func, con,
++ "pr/%s%d", con->name, con->index);
++ if (IS_ERR(con->thread)) {
++ pr_err("%sconsole [%s%d]: unable to start printing thread\n",
++ (con->flags & CON_BOOT) ? "boot" : "",
++ con->name, con->index);
++ return;
++ }
++ pr_info("%sconsole [%s%d]: printing thread started\n",
++ (con->flags & CON_BOOT) ? "boot" : "",
++ con->name, con->index);
+ }
+-static ssize_t info_print_ext_header(char *buf, size_t size,
+- struct printk_info *info)
++
++/* protected by console_lock */
++static bool kthreads_started;
++
++/* Must be called within console_lock(). */
++static void console_try_thread(struct console *con)
+ {
+- return 0;
++ if (kthreads_started) {
++ start_printk_kthread(con);
++ return;
++ }
++
++ /*
++ * The printing threads have not been started yet. If this console
++ * can print synchronously, print all unprinted messages.
++ */
++ if (console_can_sync(con))
++ print_sync_until(con, prb_next_seq(prb));
+ }
+-static ssize_t msg_print_ext_body(char *buf, size_t size,
+- char *text, size_t text_len,
+- struct dev_printk_info *dev_info) { return 0; }
+-static void console_lock_spinning_enable(void) { }
+-static int console_lock_spinning_disable_and_check(void) { return 0; }
+-static void call_console_drivers(const char *ext_text, size_t ext_len,
+- const char *text, size_t len) {}
+-static bool suppress_message_printing(int level) { return false; }
++
++#else /* CONFIG_PRINTK */
++
++#define prb_first_valid_seq(rb) 0
++#define prb_next_seq(rb) 0
++
++#define console_try_thread(con)
+
+ #endif /* CONFIG_PRINTK */
+
+@@ -2644,36 +2548,6 @@ int is_console_locked(void)
+ }
+ EXPORT_SYMBOL(is_console_locked);
+
+-/*
+- * Check if we have any console that is capable of printing while cpu is
+- * booting or shutting down. Requires console_sem.
+- */
+-static int have_callable_console(void)
+-{
+- struct console *con;
+-
+- for_each_console(con)
+- if ((con->flags & CON_ENABLED) &&
+- (con->flags & CON_ANYTIME))
+- return 1;
+-
+- return 0;
+-}
+-
+-/*
+- * Can we actually use the console at this time on this cpu?
+- *
+- * Console drivers may assume that per-cpu resources have been allocated. So
+- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
+- * call them until this CPU is officially up.
+- */
+-static inline int can_use_console(void)
+-{
+- if (kernel_sync_mode())
+- return false;
+- return cpu_online(raw_smp_processor_id()) || have_callable_console();
+-}
+-
+ /**
+ * console_unlock - unlock the console system
+ *
+@@ -2690,131 +2564,14 @@ static inline int can_use_console(void)
+ */
+ void console_unlock(void)
+ {
+- static char ext_text[CONSOLE_EXT_LOG_MAX];
+- static char text[CONSOLE_LOG_MAX];
+- bool do_cond_resched, retry;
+- struct printk_info info;
+- struct printk_record r;
+- u64 seq;
+-
+ if (console_suspended) {
+ up_console_sem();
+ return;
+ }
+
+- prb_rec_init_rd(&r, &info, text, sizeof(text));
+-
+- /*
+- * Console drivers are called with interrupts disabled, so
+- * @console_may_schedule should be cleared before; however, we may
+- * end up dumping a lot of lines, for example, if called from
+- * console registration path, and should invoke cond_resched()
+- * between lines if allowable. Not doing so can cause a very long
+- * scheduling stall on a slow console leading to RCU stall and
+- * softlockup warnings which exacerbate the issue with more
+- * messages practically incapacitating the system.
+- *
+- * console_trylock() is not able to detect the preemptive
+- * context reliably. Therefore the value must be stored before
+- * and cleared after the "again" goto label.
+- */
+- do_cond_resched = console_may_schedule;
+-again:
+- console_may_schedule = 0;
+-
+- /*
+- * We released the console_sem lock, so we need to recheck if
+- * cpu is online and (if not) is there at least one CON_ANYTIME
+- * console.
+- */
+- if (!can_use_console()) {
+- console_locked = 0;
+- up_console_sem();
+- return;
+- }
+-
+- for (;;) {
+- size_t ext_len = 0;
+- size_t len;
+-
+-skip:
+- seq = atomic64_read(&console_seq);
+- if (!prb_read_valid(prb, seq, &r))
+- break;
+-
+- if (seq != r.info->seq) {
+- console_dropped += r.info->seq - seq;
+- atomic64_set(&console_seq, r.info->seq);
+- seq = r.info->seq;
+- }
+-
+- if (suppress_message_printing(r.info->level)) {
+- /*
+- * Skip record we have buffered and already printed
+- * directly to the console when we received it, and
+- * record that has level above the console loglevel.
+- */
+- atomic64_set(&console_seq, seq + 1);
+- goto skip;
+- }
+-
+- /* Output to all consoles once old messages replayed. */
+- if (unlikely(exclusive_console &&
+- seq >= exclusive_console_stop_seq)) {
+- exclusive_console = NULL;
+- }
+-
+- /*
+- * Handle extended console text first because later
+- * record_print_text() will modify the record buffer in-place.
+- */
+- if (nr_ext_console_drivers) {
+- ext_len = info_print_ext_header(ext_text,
+- sizeof(ext_text),
+- r.info);
+- ext_len += msg_print_ext_body(ext_text + ext_len,
+- sizeof(ext_text) - ext_len,
+- &r.text_buf[0],
+- r.info->text_len,
+- &r.info->dev_info);
+- }
+- len = record_print_text(&r,
+- console_msg_format & MSG_FORMAT_SYSLOG,
+- printk_time);
+- atomic64_set(&console_seq, seq + 1);
+-
+- /*
+- * While actively printing out messages, if another printk()
+- * were to occur on another CPU, it may wait for this one to
+- * finish. This task can not be preempted if there is a
+- * waiter waiting to take over.
+- */
+- console_lock_spinning_enable();
+-
+- stop_critical_timings(); /* don't trace print latency */
+- call_console_drivers(ext_text, ext_len, text, len);
+- start_critical_timings();
+-
+- if (console_lock_spinning_disable_and_check())
+- return;
+-
+- if (do_cond_resched)
+- cond_resched();
+- }
+-
+ console_locked = 0;
+
+ up_console_sem();
+-
+- /*
+- * Someone could have filled up the buffer again, so re-check if there's
+- * something to flush. In case we cannot trylock the console_sem again,
+- * there's a new owner and the console_unlock() from them will do the
+- * flush, no worries.
+- */
+- retry = prb_read_valid(prb, atomic64_read(&console_seq), NULL);
+- if (retry && console_trylock())
+- goto again;
+ }
+ EXPORT_SYMBOL(console_unlock);
+
+@@ -2864,18 +2621,20 @@ void console_unblank(void)
+ */
+ void console_flush_on_panic(enum con_flush_mode mode)
+ {
+- /*
+- * If someone else is holding the console lock, trylock will fail
+- * and may_schedule may be set. Ignore and proceed to unlock so
+- * that messages are flushed out. As this can be called from any
+- * context and we don't want to get preempted while flushing,
+- * ensure may_schedule is cleared.
+- */
+- console_trylock();
++ struct console *c;
++ u64 seq;
++
++ if (!console_trylock())
++ return;
++
+ console_may_schedule = 0;
+
+- if (mode == CONSOLE_REPLAY_ALL)
+- atomic64_set(&console_seq, prb_first_valid_seq(prb));
++ if (mode == CONSOLE_REPLAY_ALL) {
++ seq = prb_first_valid_seq(prb);
++ for_each_console(c)
++ atomic64_set(&c->printk_seq, seq);
++ }
++
+ console_unlock();
+ }
+
+@@ -3010,7 +2769,6 @@ static int try_enable_new_console(struct console *newcon, bool user_specified)
+ */
+ void register_console(struct console *newcon)
+ {
+- unsigned long flags;
+ struct console *bcon = NULL;
+ int err;
+
+@@ -3034,6 +2792,8 @@ void register_console(struct console *newcon)
+ }
+ }
+
++ newcon->thread = NULL;
++
+ if (console_drivers && console_drivers->flags & CON_BOOT)
+ bcon = console_drivers;
+
+@@ -3098,27 +2858,12 @@ void register_console(struct console *newcon)
+ if (newcon->flags & CON_EXTENDED)
+ nr_ext_console_drivers++;
+
+- if (newcon->flags & CON_PRINTBUFFER) {
+- /*
+- * console_unlock(); will print out the buffered messages
+- * for us.
+- *
+- * We're about to replay the log buffer. Only do this to the
+- * just-registered console to avoid excessive message spam to
+- * the already-registered consoles.
+- *
+- * Set exclusive_console with disabled interrupts to reduce
+- * race window with eventual console_flush_on_panic() that
+- * ignores console_lock.
+- */
+- exclusive_console = newcon;
+- exclusive_console_stop_seq = atomic64_read(&console_seq);
++ if (newcon->flags & CON_PRINTBUFFER)
++ atomic64_set(&newcon->printk_seq, 0);
++ else
++ atomic64_set(&newcon->printk_seq, prb_next_seq(prb));
+
+- /* Get a consistent copy of @syslog_seq. */
+- spin_lock_irqsave(&syslog_lock, flags);
+- atomic64_set(&console_seq, syslog_seq);
+- spin_unlock_irqrestore(&syslog_lock, flags);
+- }
++ console_try_thread(newcon);
+ console_unlock();
+ console_sysfs_notify();
+
+@@ -3192,6 +2937,9 @@ int unregister_console(struct console *console)
+ console_unlock();
+ console_sysfs_notify();
+
++ if (console->thread && !IS_ERR(console->thread))
++ kthread_stop(console->thread);
++
+ if (console->exit)
+ res = console->exit(console);
+
+@@ -3274,6 +3022,15 @@ static int __init printk_late_init(void)
+ unregister_console(con);
+ }
+ }
++
++#ifdef CONFIG_PRINTK
++ console_lock();
++ for_each_console(con)
++ start_printk_kthread(con);
++ kthreads_started = true;
++ console_unlock();
++#endif
++
+ ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
+ console_cpu_notify);
+ WARN_ON(ret < 0);
+@@ -3289,7 +3046,6 @@ late_initcall(printk_late_init);
+ * Delayed printk version, for scheduler-internal messages:
+ */
+ #define PRINTK_PENDING_WAKEUP 0x01
+-#define PRINTK_PENDING_OUTPUT 0x02
+
+ static DEFINE_PER_CPU(int, printk_pending);
+
+@@ -3297,14 +3053,8 @@ static void wake_up_klogd_work_func(struct irq_work *irq_work)
+ {
+ int pending = __this_cpu_xchg(printk_pending, 0);
+
+- if (pending & PRINTK_PENDING_OUTPUT) {
+- /* If trylock fails, someone else is doing the printing */
+- if (console_trylock())
+- console_unlock();
+- }
+-
+ if (pending & PRINTK_PENDING_WAKEUP)
+- wake_up_interruptible(&log_wait);
++ wake_up_interruptible_all(&log_wait);
+ }
+
+ static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
+@@ -3325,13 +3075,6 @@ void wake_up_klogd(void)
+
+ void defer_console_output(void)
+ {
+- if (!printk_percpu_data_ready())
+- return;
+-
+- preempt_disable();
+- __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
+- irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
+- preempt_enable();
+ }
+
+ int vprintk_deferred(const char *fmt, va_list args)
+--
+2.19.1
+
diff --git a/features/rt/printk-refactor-kmsg_dump_get_buffer.patch b/features/rt/printk-refactor-kmsg_dump_get_buffer.patch
new file mode 100644
index 00000000..3ac51cf4
--- /dev/null
+++ b/features/rt/printk-refactor-kmsg_dump_get_buffer.patch
@@ -0,0 +1,144 @@
+From d90ed552b2dc6bf64d8c0858f712deb58825fb95 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:41:56 +0106
+Subject: [PATCH 009/191] printk: refactor kmsg_dump_get_buffer()
+
+kmsg_dump_get_buffer() requires nearly the same logic as
+syslog_print_all(), but uses different variable names and
+does not make use of the ringbuffer loop macros. Modify
+kmsg_dump_get_buffer() so that the implementation is as similar
+to syslog_print_all() as possible.
+
+A follow-up commit will move this common logic into a
+separate helper function.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/kmsg_dump.h | 2 +-
+ kernel/printk/printk.c | 60 +++++++++++++++++++++------------------
+ 2 files changed, 33 insertions(+), 29 deletions(-)
+
+diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
+index 235c50982c2d..4095a34db0fa 100644
+--- a/include/linux/kmsg_dump.h
++++ b/include/linux/kmsg_dump.h
+@@ -62,7 +62,7 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+ char *line, size_t size, size_t *len);
+
+ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+- char *buf, size_t size, size_t *len);
++ char *buf, size_t size, size_t *len_out);
+
+ void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 77ae2704e979..3f17ff13fd51 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3424,7 +3424,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+ * read.
+ */
+ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+- char *buf, size_t size, size_t *len)
++ char *buf, size_t size, size_t *len_out)
+ {
+ struct printk_info info;
+ unsigned int line_count;
+@@ -3432,12 +3432,10 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ unsigned long flags;
+ u64 seq;
+ u64 next_seq;
+- size_t l = 0;
++ size_t len = 0;
+ bool ret = false;
+ bool time = printk_time;
+
+- prb_rec_init_rd(&r, &info, buf, size);
+-
+ if (!dumper->active || !buf || !size)
+ goto out;
+
+@@ -3455,48 +3453,54 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
+ goto out;
+ }
+
+- /* calculate length of entire buffer */
+- seq = dumper->cur_seq;
+- while (prb_read_valid_info(prb, seq, &info, &line_count)) {
+- if (r.info->seq >= dumper->next_seq)
++ /*
++ * Find first record that fits, including all following records,
++ * into the user-provided buffer for this dump.
++ */
++
++ prb_for_each_info(dumper->cur_seq, prb, seq, &info, &line_count) {
++ if (info.seq >= dumper->next_seq)
+ break;
+- l += get_record_print_text_size(&info, line_count, syslog, time);
+- seq = r.info->seq + 1;
++ len += get_record_print_text_size(&info, line_count, syslog, time);
+ }
+
+- /* move first record forward until length fits into the buffer */
+- seq = dumper->cur_seq;
+- while (l >= size && prb_read_valid_info(prb, seq,
+- &info, &line_count)) {
+- if (r.info->seq >= dumper->next_seq)
++ /*
++ * Move first record forward until length fits into the buffer. Ignore
++ * newest messages that were not counted in the above cycle. Messages
++ * might appear and get lost in the meantime. This is the best effort
++ * that prevents an infinite loop.
++ */
++ prb_for_each_info(dumper->cur_seq, prb, seq, &info, &line_count) {
++ if (len < size || info.seq >= dumper->next_seq)
+ break;
+- l -= get_record_print_text_size(&info, line_count, syslog, time);
+- seq = r.info->seq + 1;
++ len -= get_record_print_text_size(&info, line_count, syslog, time);
+ }
+
+- /* last message in next interation */
++ /*
++ * Next kmsg_dump_get_buffer() invocation will dump block of
++ * older records stored right before this one.
++ */
+ next_seq = seq;
+
+- /* actually read text into the buffer now */
+- l = 0;
+- while (prb_read_valid(prb, seq, &r)) {
++ prb_rec_init_rd(&r, &info, buf, size);
++
++ len = 0;
++ prb_for_each_record(seq, prb, seq, &r) {
+ if (r.info->seq >= dumper->next_seq)
+ break;
+
+- l += record_print_text(&r, syslog, time);
+-
+- /* adjust record to store to remaining buffer space */
+- prb_rec_init_rd(&r, &info, buf + l, size - l);
++ len += record_print_text(&r, syslog, time);
+
+- seq = r.info->seq + 1;
++ /* Adjust record to store to remaining buffer space. */
++ prb_rec_init_rd(&r, &info, buf + len, size - len);
+ }
+
+ dumper->next_seq = next_seq;
+ ret = true;
+ logbuf_unlock_irqrestore(flags);
+ out:
+- if (len)
+- *len = l;
++ if (len_out)
++ *len_out = len;
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+--
+2.19.1
+
diff --git a/features/rt/printk-relocate-printk_delay-and-vprintk_default.patch b/features/rt/printk-relocate-printk_delay-and-vprintk_default.patch
new file mode 100644
index 00000000..3e26bd6f
--- /dev/null
+++ b/features/rt/printk-relocate-printk_delay-and-vprintk_default.patch
@@ -0,0 +1,88 @@
+From d517c77fcda22109a0cfd3bd55312c4404eb3206 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:03 +0106
+Subject: [PATCH 026/191] printk: relocate printk_delay() and vprintk_default()
+
+Move printk_delay() and vprintk_default() "as is" further up so that
+they can be used by new functions in an upcoming commit.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 40 ++++++++++++++++++++--------------------
+ 1 file changed, 20 insertions(+), 20 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 5af6f757818f..84fae4f08634 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1725,6 +1725,20 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
+ return do_syslog(type, buf, len, SYSLOG_FROM_READER);
+ }
+
++int printk_delay_msec __read_mostly;
++
++static inline void printk_delay(void)
++{
++ if (unlikely(printk_delay_msec)) {
++ int m = printk_delay_msec;
++
++ while (m--) {
++ mdelay(1);
++ touch_nmi_watchdog();
++ }
++ }
++}
++
+ /*
+ * Special console_lock variants that help to reduce the risk of soft-lockups.
+ * They allow to pass console_lock to another printk() call using a busy wait.
+@@ -1968,20 +1982,6 @@ static void printk_exit_irqrestore(unsigned long flags)
+ local_irq_restore(flags);
+ }
+
+-int printk_delay_msec __read_mostly;
+-
+-static inline void printk_delay(void)
+-{
+- if (unlikely(printk_delay_msec)) {
+- int m = printk_delay_msec;
+-
+- while (m--) {
+- mdelay(1);
+- touch_nmi_watchdog();
+- }
+- }
+-}
+-
+ static inline u32 printk_caller_id(void)
+ {
+ return in_task() ? task_pid_nr(current) :
+@@ -2214,18 +2214,18 @@ asmlinkage int vprintk_emit(int facility, int level,
+ }
+ EXPORT_SYMBOL(vprintk_emit);
+
+-asmlinkage int vprintk(const char *fmt, va_list args)
+-{
+- return vprintk_func(fmt, args);
+-}
+-EXPORT_SYMBOL(vprintk);
+-
+ int vprintk_default(const char *fmt, va_list args)
+ {
+ return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+ }
+ EXPORT_SYMBOL_GPL(vprintk_default);
+
++asmlinkage int vprintk(const char *fmt, va_list args)
++{
++ return vprintk_func(fmt, args);
++}
++EXPORT_SYMBOL(vprintk);
++
+ /**
+ * printk - print a kernel message
+ * @fmt: format string
+--
+2.19.1
+
diff --git a/features/rt/printk-remove-deferred-printing.patch b/features/rt/printk-remove-deferred-printing.patch
new file mode 100644
index 00000000..2777df6c
--- /dev/null
+++ b/features/rt/printk-remove-deferred-printing.patch
@@ -0,0 +1,432 @@
+From 67ba4172040ce8b57495aabc320404e9e7379e7a Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:08 +0106
+Subject: [PATCH 031/191] printk: remove deferred printing
+
+Since printing occurs either atomically or from the printing
+kthread, there is no need for any deferring or tracking possible
+recursion paths. Remove all printk context tracking.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/kernel/smp.c | 2 -
+ arch/powerpc/kexec/crash.c | 3 --
+ include/linux/hardirq.h | 2 -
+ include/linux/printk.h | 12 -----
+ kernel/printk/Makefile | 1 -
+ kernel/printk/internal.h | 70 -----------------------------
+ kernel/printk/printk.c | 58 +++++++++---------------
+ kernel/printk/printk_safe.c | 89 -------------------------------------
+ kernel/trace/trace.c | 2 -
+ 9 files changed, 22 insertions(+), 217 deletions(-)
+ delete mode 100644 kernel/printk/internal.h
+ delete mode 100644 kernel/printk/printk_safe.c
+
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 74679240a9d8..0dd2d733ad62 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -668,9 +668,7 @@ static void do_handle_IPI(int ipinr)
+ break;
+
+ case IPI_CPU_BACKTRACE:
+- printk_nmi_enter();
+ nmi_cpu_backtrace(get_irq_regs());
+- printk_nmi_exit();
+ break;
+
+ default:
+diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
+index c9a889880214..d488311efab1 100644
+--- a/arch/powerpc/kexec/crash.c
++++ b/arch/powerpc/kexec/crash.c
+@@ -311,9 +311,6 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
+ unsigned int i;
+ int (*old_handler)(struct pt_regs *regs);
+
+- /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
+- printk_nmi_enter();
+-
+ /*
+ * This function is only called after the system
+ * has panicked or is otherwise in a critical state.
+diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
+index 7c9d6a2d7e90..0926e9ca4d85 100644
+--- a/include/linux/hardirq.h
++++ b/include/linux/hardirq.h
+@@ -115,7 +115,6 @@ extern void rcu_nmi_exit(void);
+ do { \
+ lockdep_off(); \
+ arch_nmi_enter(); \
+- printk_nmi_enter(); \
+ BUG_ON(in_nmi() == NMI_MASK); \
+ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
+ } while (0)
+@@ -134,7 +133,6 @@ extern void rcu_nmi_exit(void);
+ do { \
+ BUG_ON(!in_nmi()); \
+ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
+- printk_nmi_exit(); \
+ arch_nmi_exit(); \
+ lockdep_on(); \
+ } while (0)
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index 1ebd93581acc..153212445b68 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -155,18 +155,6 @@ static inline __printf(1, 2) __cold
+ void early_printk(const char *s, ...) { }
+ #endif
+
+-#ifdef CONFIG_PRINTK_NMI
+-extern void printk_nmi_enter(void);
+-extern void printk_nmi_exit(void);
+-extern void printk_nmi_direct_enter(void);
+-extern void printk_nmi_direct_exit(void);
+-#else
+-static inline void printk_nmi_enter(void) { }
+-static inline void printk_nmi_exit(void) { }
+-static inline void printk_nmi_direct_enter(void) { }
+-static inline void printk_nmi_direct_exit(void) { }
+-#endif /* PRINTK_NMI */
+-
+ struct dev_printk_info;
+
+ #ifdef CONFIG_PRINTK
+diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
+index eee3dc9b60a9..59cb24e25f00 100644
+--- a/kernel/printk/Makefile
++++ b/kernel/printk/Makefile
+@@ -1,5 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ obj-y = printk.o
+-obj-$(CONFIG_PRINTK) += printk_safe.o
+ obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
+ obj-$(CONFIG_PRINTK) += printk_ringbuffer.o
+diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
+deleted file mode 100644
+index e108b2ece8c7..000000000000
+--- a/kernel/printk/internal.h
++++ /dev/null
+@@ -1,70 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-/*
+- * internal.h - printk internal definitions
+- */
+-#include <linux/percpu.h>
+-
+-#ifdef CONFIG_PRINTK
+-
+-#define PRINTK_SAFE_CONTEXT_MASK 0x007ffffff
+-#define PRINTK_NMI_DIRECT_CONTEXT_MASK 0x008000000
+-#define PRINTK_NMI_CONTEXT_MASK 0xff0000000
+-
+-#define PRINTK_NMI_CONTEXT_OFFSET 0x010000000
+-
+-__printf(4, 0)
+-int vprintk_store(int facility, int level,
+- const struct dev_printk_info *dev_info,
+- const char *fmt, va_list args);
+-
+-__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
+-__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
+-__printf(1, 0) int vprintk_func(const char *fmt, va_list args);
+-void __printk_safe_enter(void);
+-void __printk_safe_exit(void);
+-
+-bool printk_percpu_data_ready(void);
+-
+-#define printk_safe_enter_irqsave(flags) \
+- do { \
+- local_irq_save(flags); \
+- __printk_safe_enter(); \
+- } while (0)
+-
+-#define printk_safe_exit_irqrestore(flags) \
+- do { \
+- __printk_safe_exit(); \
+- local_irq_restore(flags); \
+- } while (0)
+-
+-#define printk_safe_enter_irq() \
+- do { \
+- local_irq_disable(); \
+- __printk_safe_enter(); \
+- } while (0)
+-
+-#define printk_safe_exit_irq() \
+- do { \
+- __printk_safe_exit(); \
+- local_irq_enable(); \
+- } while (0)
+-
+-void defer_console_output(void);
+-
+-#else
+-
+-__printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
+-
+-/*
+- * In !PRINTK builds we still export console_sem
+- * semaphore and some of console functions (console_unlock()/etc.), so
+- * printk-safe must preserve the existing local IRQ guarantees.
+- */
+-#define printk_safe_enter_irqsave(flags) local_irq_save(flags)
+-#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
+-
+-#define printk_safe_enter_irq() local_irq_disable()
+-#define printk_safe_exit_irq() local_irq_enable()
+-
+-static inline bool printk_percpu_data_ready(void) { return false; }
+-#endif /* CONFIG_PRINTK */
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index e2dd2abfb855..d1b25ac5c73b 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -45,6 +45,7 @@
+ #include <linux/ctype.h>
+ #include <linux/uio.h>
+ #include <linux/kthread.h>
++#include <linux/kdb.h>
+ #include <linux/clocksource.h>
+ #include <linux/sched/clock.h>
+ #include <linux/sched/debug.h>
+@@ -60,7 +61,6 @@
+ #include "printk_ringbuffer.h"
+ #include "console_cmdline.h"
+ #include "braille.h"
+-#include "internal.h"
+
+ int console_printk[4] = {
+ CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
+@@ -227,19 +227,7 @@ static int nr_ext_console_drivers;
+
+ static int __down_trylock_console_sem(unsigned long ip)
+ {
+- int lock_failed;
+- unsigned long flags;
+-
+- /*
+- * Here and in __up_console_sem() we need to be in safe mode,
+- * because spindump/WARN/etc from under console ->lock will
+- * deadlock in printk()->down_trylock_console_sem() otherwise.
+- */
+- printk_safe_enter_irqsave(flags);
+- lock_failed = down_trylock(&console_sem);
+- printk_safe_exit_irqrestore(flags);
+-
+- if (lock_failed)
++ if (down_trylock(&console_sem))
+ return 1;
+ mutex_acquire(&console_lock_dep_map, 0, 1, ip);
+ return 0;
+@@ -248,13 +236,9 @@ static int __down_trylock_console_sem(unsigned long ip)
+
+ static void __up_console_sem(unsigned long ip)
+ {
+- unsigned long flags;
+-
+ mutex_release(&console_lock_dep_map, ip);
+
+- printk_safe_enter_irqsave(flags);
+ up(&console_sem);
+- printk_safe_exit_irqrestore(flags);
+ }
+ #define up_console_sem() __up_console_sem(_RET_IP_)
+
+@@ -426,7 +410,7 @@ static struct printk_ringbuffer *prb = &printk_rb_static;
+ */
+ static bool __printk_percpu_data_ready __read_mostly;
+
+-bool printk_percpu_data_ready(void)
++static bool printk_percpu_data_ready(void)
+ {
+ return __printk_percpu_data_ready;
+ }
+@@ -1060,7 +1044,6 @@ void __init setup_log_buf(int early)
+ struct printk_record r;
+ size_t new_descs_size;
+ size_t new_infos_size;
+- unsigned long flags;
+ char *new_log_buf;
+ unsigned int free;
+ u64 seq;
+@@ -1958,9 +1941,9 @@ static u16 printk_sprint(char *text, u16 size, int facility, enum log_flags *lfl
+ }
+
+ __printf(4, 0)
+-int vprintk_store(int facility, int level,
+- const struct dev_printk_info *dev_info,
+- const char *fmt, va_list args)
++static int vprintk_store(int facility, int level,
++ const struct dev_printk_info *dev_info,
++ const char *fmt, va_list args)
+ {
+ const u32 caller_id = printk_caller_id();
+ struct prb_reserved_entry e;
+@@ -2106,11 +2089,22 @@ asmlinkage int vprintk_emit(int facility, int level,
+ }
+ EXPORT_SYMBOL(vprintk_emit);
+
+-int vprintk_default(const char *fmt, va_list args)
++__printf(1, 0)
++static int vprintk_default(const char *fmt, va_list args)
+ {
+ return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+ }
+-EXPORT_SYMBOL_GPL(vprintk_default);
++
++__printf(1, 0)
++static int vprintk_func(const char *fmt, va_list args)
++{
++#ifdef CONFIG_KGDB_KDB
++ /* Allow to pass printk() to kdb but avoid a recursion. */
++ if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
++ return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
++#endif
++ return vprintk_default(fmt, args);
++}
+
+ asmlinkage int vprintk(const char *fmt, va_list args)
+ {
+@@ -3073,18 +3067,10 @@ void wake_up_klogd(void)
+ preempt_enable();
+ }
+
+-void defer_console_output(void)
+-{
+-}
+-
+-int vprintk_deferred(const char *fmt, va_list args)
++__printf(1, 0)
++static int vprintk_deferred(const char *fmt, va_list args)
+ {
+- int r;
+-
+- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
+- defer_console_output();
+-
+- return r;
++ return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+ }
+
+ int printk_deferred(const char *fmt, ...)
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+deleted file mode 100644
+index 3ee2d62e80ef..000000000000
+--- a/kernel/printk/printk_safe.c
++++ /dev/null
+@@ -1,89 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-or-later
+-/*
+- * printk_safe.c - Safe printk for printk-deadlock-prone contexts
+- */
+-
+-#include <linux/preempt.h>
+-#include <linux/spinlock.h>
+-#include <linux/debug_locks.h>
+-#include <linux/kdb.h>
+-#include <linux/smp.h>
+-#include <linux/cpumask.h>
+-#include <linux/irq_work.h>
+-#include <linux/printk.h>
+-#include <linux/kprobes.h>
+-
+-#include "internal.h"
+-
+-static DEFINE_PER_CPU(int, printk_context);
+-
+-#ifdef CONFIG_PRINTK_NMI
+-
+-void noinstr printk_nmi_enter(void)
+-{
+- this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
+-}
+-
+-void noinstr printk_nmi_exit(void)
+-{
+- this_cpu_sub(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
+-}
+-
+-/*
+- * Marks a code that might produce many messages in NMI context
+- * and the risk of losing them is more critical than eventual
+- * reordering.
+- */
+-void printk_nmi_direct_enter(void)
+-{
+- if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
+- this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
+-}
+-
+-void printk_nmi_direct_exit(void)
+-{
+- this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
+-}
+-
+-#endif /* CONFIG_PRINTK_NMI */
+-
+-/* Can be preempted by NMI. */
+-void __printk_safe_enter(void)
+-{
+- this_cpu_inc(printk_context);
+-}
+-
+-/* Can be preempted by NMI. */
+-void __printk_safe_exit(void)
+-{
+- this_cpu_dec(printk_context);
+-}
+-
+-__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
+-{
+-#ifdef CONFIG_KGDB_KDB
+- /* Allow to pass printk() to kdb but avoid a recursion. */
+- if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
+- return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
+-#endif
+-
+- /*
+- * Use the main logbuf even in NMI. But avoid calling console
+- * drivers that might have their own locks.
+- */
+- if (this_cpu_read(printk_context) &
+- (PRINTK_NMI_DIRECT_CONTEXT_MASK |
+- PRINTK_NMI_CONTEXT_MASK |
+- PRINTK_SAFE_CONTEXT_MASK)) {
+- int len;
+-
+- printk_safe_enter_irqsave(flags);
+- len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+- printk_safe_exit_irqrestore(flags);
+- defer_console_output();
+- return len;
+- }
+-
+- /* No obstacles. */
+- return vprintk_default(fmt, args);
+-}
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index eccb4e1187cc..a6baa9b174e4 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -9404,7 +9404,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+ tracing_off();
+
+ local_irq_save(flags);
+- printk_nmi_direct_enter();
+
+ /* Simulate the iterator */
+ trace_init_global_iter(&iter);
+@@ -9486,7 +9485,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
+ atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
+ }
+ atomic_dec(&dump_running);
+- printk_nmi_direct_exit();
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(ftrace_dump);
+--
+2.19.1
+
diff --git a/features/rt/printk-remove-logbuf_lock.patch b/features/rt/printk-remove-logbuf_lock.patch
new file mode 100644
index 00000000..9c211ab8
--- /dev/null
+++ b/features/rt/printk-remove-logbuf_lock.patch
@@ -0,0 +1,485 @@
+From e2710afdb8e86bd9c77ab1c3ca433719dc087b64 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 26 Jan 2021 17:43:19 +0106
+Subject: [PATCH 017/191] printk: remove logbuf_lock
+
+Since the ringbuffer is lockless, there is no need for it to be
+protected by @logbuf_lock. Remove @logbuf_lock.
+
+This means that printk_nmi_direct and printk_safe_flush_on_panic()
+no longer need to acquire any lock to run.
+
+@console_seq, @exclusive_console_stop_seq, @console_dropped are
+protected by @console_lock.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/internal.h | 4 +-
+ kernel/printk/printk.c | 118 ++++++++++++------------------------
+ kernel/printk/printk_safe.c | 29 +++------
+ 3 files changed, 48 insertions(+), 103 deletions(-)
+
+diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
+index 3a8fd491758c..e7acc2888c8e 100644
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -12,8 +12,6 @@
+
+ #define PRINTK_NMI_CONTEXT_OFFSET 0x010000000
+
+-extern raw_spinlock_t logbuf_lock;
+-
+ __printf(4, 0)
+ int vprintk_store(int facility, int level,
+ const struct dev_printk_info *dev_info,
+@@ -59,7 +57,7 @@ void defer_console_output(void);
+ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
+
+ /*
+- * In !PRINTK builds we still export logbuf_lock spin_lock, console_sem
++ * In !PRINTK builds we still export console_sem
+ * semaphore and some of console functions (console_unlock()/etc.), so
+ * printk-safe must preserve the existing local IRQ guarantees.
+ */
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 7d426c5dcec6..32b2d763aef7 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -355,41 +355,6 @@ enum log_flags {
+ LOG_CONT = 8, /* text is a fragment of a continuation line */
+ };
+
+-/*
+- * The logbuf_lock protects kmsg buffer, indices, counters. This can be taken
+- * within the scheduler's rq lock. It must be released before calling
+- * console_unlock() or anything else that might wake up a process.
+- */
+-DEFINE_RAW_SPINLOCK(logbuf_lock);
+-
+-/*
+- * Helper macros to lock/unlock logbuf_lock and switch between
+- * printk-safe/unsafe modes.
+- */
+-#define logbuf_lock_irq() \
+- do { \
+- printk_safe_enter_irq(); \
+- raw_spin_lock(&logbuf_lock); \
+- } while (0)
+-
+-#define logbuf_unlock_irq() \
+- do { \
+- raw_spin_unlock(&logbuf_lock); \
+- printk_safe_exit_irq(); \
+- } while (0)
+-
+-#define logbuf_lock_irqsave(flags) \
+- do { \
+- printk_safe_enter_irqsave(flags); \
+- raw_spin_lock(&logbuf_lock); \
+- } while (0)
+-
+-#define logbuf_unlock_irqrestore(flags) \
+- do { \
+- raw_spin_unlock(&logbuf_lock); \
+- printk_safe_exit_irqrestore(flags); \
+- } while (0)
+-
+ /* syslog_lock protects syslog_* variables and write access to clear_seq. */
+ static DEFINE_RAW_SPINLOCK(syslog_lock);
+
+@@ -401,6 +366,7 @@ static u64 syslog_seq;
+ static size_t syslog_partial;
+ static bool syslog_time;
+
++/* All 3 protected by @console_sem. */
+ /* the next printk record to write to the console */
+ static u64 console_seq;
+ static u64 exclusive_console_stop_seq;
+@@ -766,27 +732,27 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
+ if (ret)
+ return ret;
+
+- logbuf_lock_irq();
++ printk_safe_enter_irq();
+ if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+ goto out;
+ }
+
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+ ret = wait_event_interruptible(log_wait,
+ prb_read_valid(prb, atomic64_read(&user->seq), r));
+ if (ret)
+ goto out;
+- logbuf_lock_irq();
++ printk_safe_enter_irq();
+ }
+
+ if (r->info->seq != atomic64_read(&user->seq)) {
+ /* our last seen message is gone, return error and reset */
+ atomic64_set(&user->seq, r->info->seq);
+ ret = -EPIPE;
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+ goto out;
+ }
+
+@@ -796,7 +762,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
+ &r->info->dev_info);
+
+ atomic64_set(&user->seq, r->info->seq + 1);
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+
+ if (len > count) {
+ ret = -EINVAL;
+@@ -831,7 +797,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
+ if (offset)
+ return -ESPIPE;
+
+- logbuf_lock_irq();
++ printk_safe_enter_irq();
+ switch (whence) {
+ case SEEK_SET:
+ /* the first record */
+@@ -852,7 +818,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
+ default:
+ ret = -EINVAL;
+ }
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+ return ret;
+ }
+
+@@ -867,15 +833,15 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
+
+ poll_wait(file, &log_wait, wait);
+
+- logbuf_lock_irq();
+- if (prb_read_valid_info(prb, user->seq, &info, NULL)) {
++ printk_safe_enter_irq();
++ if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
+ /* return error when data has vanished underneath us */
+ if (info.seq != atomic64_read(&user->seq))
+ ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
+ else
+ ret = EPOLLIN|EPOLLRDNORM;
+ }
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+
+ return ret;
+ }
+@@ -908,9 +874,9 @@ static int devkmsg_open(struct inode *inode, struct file *file)
+ prb_rec_init_rd(&user->record, &user->info,
+ &user->text_buf[0], sizeof(user->text_buf));
+
+- logbuf_lock_irq();
++ printk_safe_enter_irq();
+ atomic64_set(&user->seq, prb_first_valid_seq(prb));
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+
+ file->private_data = user;
+ return 0;
+@@ -1532,11 +1498,11 @@ static int syslog_print(char __user *buf, int size)
+ size_t n;
+ size_t skip;
+
+- logbuf_lock_irq();
++ printk_safe_enter_irq();
+ raw_spin_lock(&syslog_lock);
+ if (!prb_read_valid(prb, syslog_seq, &r)) {
+ raw_spin_unlock(&syslog_lock);
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+ break;
+ }
+ if (r.info->seq != syslog_seq) {
+@@ -1566,7 +1532,7 @@ static int syslog_print(char __user *buf, int size)
+ } else
+ n = 0;
+ raw_spin_unlock(&syslog_lock);
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+
+ if (!n)
+ break;
+@@ -1600,7 +1566,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ return -ENOMEM;
+
+ time = printk_time;
+- logbuf_lock_irq();
++ printk_safe_enter_irq();
+ /*
+ * Find first record that fits, including all following records,
+ * into the user-provided buffer for this dump.
+@@ -1621,12 +1587,12 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ break;
+ }
+
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+ if (copy_to_user(buf + len, text, textlen))
+ len = -EFAULT;
+ else
+ len += textlen;
+- logbuf_lock_irq();
++ printk_safe_enter_irq();
+
+ if (len < 0)
+ break;
+@@ -1637,7 +1603,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ latched_seq_write(&clear_seq, seq);
+ raw_spin_unlock(&syslog_lock);
+ }
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+
+ kfree(text);
+ return len;
+@@ -1645,11 +1611,11 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+
+ static void syslog_clear(void)
+ {
+- logbuf_lock_irq();
++ printk_safe_enter_irq();
+ raw_spin_lock(&syslog_lock);
+ latched_seq_write(&clear_seq, prb_next_seq(prb));
+ raw_spin_unlock(&syslog_lock);
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+ }
+
+ /* Return a consistent copy of @syslog_seq. */
+@@ -1737,12 +1703,12 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ break;
+ /* Number of chars in the log buffer */
+ case SYSLOG_ACTION_SIZE_UNREAD:
+- logbuf_lock_irq();
++ printk_safe_enter_irq();
+ raw_spin_lock(&syslog_lock);
+ if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
+ /* No unread messages. */
+ raw_spin_unlock(&syslog_lock);
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+ return 0;
+ }
+ if (info.seq != syslog_seq) {
+@@ -1771,7 +1737,7 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ error -= syslog_partial;
+ }
+ raw_spin_unlock(&syslog_lock);
+- logbuf_unlock_irq();
++ printk_safe_exit_irq();
+ break;
+ /* Size of the log buffer */
+ case SYSLOG_ACTION_SIZE_BUFFER:
+@@ -2627,7 +2593,6 @@ void console_unlock(void)
+ size_t len;
+
+ printk_safe_enter_irqsave(flags);
+- raw_spin_lock(&logbuf_lock);
+ skip:
+ if (!prb_read_valid(prb, console_seq, &r))
+ break;
+@@ -2671,7 +2636,6 @@ void console_unlock(void)
+ console_msg_format & MSG_FORMAT_SYSLOG,
+ printk_time);
+ console_seq++;
+- raw_spin_unlock(&logbuf_lock);
+
+ /*
+ * While actively printing out messages, if another printk()
+@@ -2698,8 +2662,6 @@ void console_unlock(void)
+
+ console_locked = 0;
+
+- raw_spin_unlock(&logbuf_lock);
+-
+ up_console_sem();
+
+ /*
+@@ -2708,9 +2670,7 @@ void console_unlock(void)
+ * there's a new owner and the console_unlock() from them will do the
+ * flush, no worries.
+ */
+- raw_spin_lock(&logbuf_lock);
+ retry = prb_read_valid(prb, console_seq, NULL);
+- raw_spin_unlock(&logbuf_lock);
+ printk_safe_exit_irqrestore(flags);
+
+ if (retry && console_trylock())
+@@ -2777,9 +2737,9 @@ void console_flush_on_panic(enum con_flush_mode mode)
+ if (mode == CONSOLE_REPLAY_ALL) {
+ unsigned long flags;
+
+- logbuf_lock_irqsave(flags);
++ printk_safe_enter_irqsave(flags);
+ console_seq = prb_first_valid_seq(prb);
+- logbuf_unlock_irqrestore(flags);
++ printk_safe_exit_irqrestore(flags);
+ }
+ console_unlock();
+ }
+@@ -3008,7 +2968,7 @@ void register_console(struct console *newcon)
+ * console_unlock(); will print out the buffered messages
+ * for us.
+ */
+- logbuf_lock_irqsave(flags);
++ printk_safe_enter_irqsave(flags);
+ /*
+ * We're about to replay the log buffer. Only do this to the
+ * just-registered console to avoid excessive message spam to
+@@ -3026,7 +2986,7 @@ void register_console(struct console *newcon)
+ console_seq = syslog_seq;
+ raw_spin_unlock(&syslog_lock);
+
+- logbuf_unlock_irqrestore(flags);
++ printk_safe_exit_irqrestore(flags);
+ }
+ console_unlock();
+ console_sysfs_notify();
+@@ -3410,10 +3370,10 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+
+ /* initialize iterator with data about the stored records */
+ iter.active = true;
+- logbuf_lock_irqsave(flags);
++ printk_safe_enter_irqsave(flags);
+ iter.cur_seq = latched_seq_read_nolock(&clear_seq);
+ iter.next_seq = prb_next_seq(prb);
+- logbuf_unlock_irqrestore(flags);
++ printk_safe_exit_irqrestore(flags);
+
+ /* invoke dumper which will iterate over records */
+ dumper->dump(dumper, reason, &iter);
+@@ -3500,9 +3460,9 @@ bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog,
+ unsigned long flags;
+ bool ret;
+
+- logbuf_lock_irqsave(flags);
++ printk_safe_enter_irqsave(flags);
+ ret = kmsg_dump_get_line_nolock(iter, syslog, line, size, len);
+- logbuf_unlock_irqrestore(flags);
++ printk_safe_exit_irqrestore(flags);
+
+ return ret;
+ }
+@@ -3542,7 +3502,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+ if (!iter->active || !buf || !size)
+ goto out;
+
+- logbuf_lock_irqsave(flags);
++ printk_safe_enter_irqsave(flags);
+ if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
+ if (info.seq != iter->cur_seq) {
+ /* messages are gone, move to first available one */
+@@ -3552,7 +3512,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+
+ /* last entry */
+ if (iter->cur_seq >= iter->next_seq) {
+- logbuf_unlock_irqrestore(flags);
++ printk_safe_exit_irqrestore(flags);
+ goto out;
+ }
+
+@@ -3586,7 +3546,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+
+ iter->next_seq = next_seq;
+ ret = true;
+- logbuf_unlock_irqrestore(flags);
++ printk_safe_exit_irqrestore(flags);
+ out:
+ if (len_out)
+ *len_out = len;
+@@ -3622,9 +3582,9 @@ void kmsg_dump_rewind(struct kmsg_dumper_iter *iter)
+ {
+ unsigned long flags;
+
+- logbuf_lock_irqsave(flags);
++ printk_safe_enter_irqsave(flags);
+ kmsg_dump_rewind_nolock(iter);
+- logbuf_unlock_irqrestore(flags);
++ printk_safe_exit_irqrestore(flags);
+ }
+ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index 2e9e3ed7d63e..7df8a88d4115 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -16,7 +16,7 @@
+ #include "internal.h"
+
+ /*
+- * printk() could not take logbuf_lock in NMI context. Instead,
++ * In NMI and safe mode, printk() avoids taking locks. Instead,
+ * it uses an alternative implementation that temporary stores
+ * the strings into a per-CPU buffer. The content of the buffer
+ * is later flushed into the main ring buffer via IRQ work.
+@@ -266,18 +266,6 @@ void printk_safe_flush(void)
+ */
+ void printk_safe_flush_on_panic(void)
+ {
+- /*
+- * Make sure that we could access the main ring buffer.
+- * Do not risk a double release when more CPUs are up.
+- */
+- if (raw_spin_is_locked(&logbuf_lock)) {
+- if (num_online_cpus() > 1)
+- return;
+-
+- debug_locks_off();
+- raw_spin_lock_init(&logbuf_lock);
+- }
+-
+ if (raw_spin_is_locked(&safe_read_lock)) {
+ if (num_online_cpus() > 1)
+ return;
+@@ -319,9 +307,7 @@ void noinstr printk_nmi_exit(void)
+ * reordering.
+ *
+ * It has effect only when called in NMI context. Then printk()
+- * will try to store the messages into the main logbuf directly
+- * and use the per-CPU buffers only as a fallback when the lock
+- * is not available.
++ * will store the messages into the main logbuf directly.
+ */
+ void printk_nmi_direct_enter(void)
+ {
+@@ -376,20 +362,21 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
+ #endif
+
+ /*
+- * Try to use the main logbuf even in NMI. But avoid calling console
++ * Use the main logbuf even in NMI. But avoid calling console
+ * drivers that might have their own locks.
+ */
+- if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
+- raw_spin_trylock(&logbuf_lock)) {
++ if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK)) {
++ unsigned long flags;
+ int len;
+
++ printk_safe_enter_irqsave(flags);
+ len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
+- raw_spin_unlock(&logbuf_lock);
++ printk_safe_exit_irqrestore(flags);
+ defer_console_output();
+ return len;
+ }
+
+- /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
++ /* Use extra buffer in NMI. */
+ if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
+ return vprintk_nmi(fmt, args);
+
+--
+2.19.1
+
diff --git a/features/rt/printk-remove-safe-buffers.patch b/features/rt/printk-remove-safe-buffers.patch
new file mode 100644
index 00000000..66ca3843
--- /dev/null
+++ b/features/rt/printk-remove-safe-buffers.patch
@@ -0,0 +1,875 @@
+From 5d9a7df0372d8da92a5260fd9a4cd756a0b31fb0 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:00 +0106
+Subject: [PATCH 022/191] printk: remove safe buffers
+
+With @logbuf_lock removed, the high level printk functions for
+storing messages are lockless. Messages can be stored from any
+context, so there is no need for the NMI and safe buffers anymore.
+
+Remove the NMI and safe buffers. In NMI or safe contexts, store
+the message immediately but still use irq_work to defer the console
+printing.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/kernel/traps.c | 1 -
+ arch/powerpc/kernel/watchdog.c | 5 -
+ include/linux/printk.h | 10 -
+ kernel/kexec_core.c | 1 -
+ kernel/panic.c | 3 -
+ kernel/printk/internal.h | 2 -
+ kernel/printk/printk.c | 85 ++-------
+ kernel/printk/printk_safe.c | 328 +--------------------------------
+ lib/nmi_backtrace.c | 6 -
+ 9 files changed, 17 insertions(+), 424 deletions(-)
+
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index a44a30b0688c..5828c83eaca6 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -171,7 +171,6 @@ extern void panic_flush_kmsg_start(void)
+
+ extern void panic_flush_kmsg_end(void)
+ {
+- printk_safe_flush_on_panic();
+ kmsg_dump(KMSG_DUMP_PANIC);
+ bust_spinlocks(0);
+ debug_locks_off();
+diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
+index c9a8f4781a10..dc17d8903d4f 100644
+--- a/arch/powerpc/kernel/watchdog.c
++++ b/arch/powerpc/kernel/watchdog.c
+@@ -183,11 +183,6 @@ static void watchdog_smp_panic(int cpu, u64 tb)
+
+ wd_smp_unlock(&flags);
+
+- printk_safe_flush();
+- /*
+- * printk_safe_flush() seems to require another print
+- * before anything actually goes out to console.
+- */
+ if (sysctl_hardlockup_all_cpu_backtrace)
+ trigger_allbutself_cpu_backtrace();
+
+diff --git a/include/linux/printk.h b/include/linux/printk.h
+index fe7eb2351610..2476796c1150 100644
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -207,8 +207,6 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
+ void dump_stack_print_info(const char *log_lvl);
+ void show_regs_print_info(const char *log_lvl);
+ extern asmlinkage void dump_stack(void) __cold;
+-extern void printk_safe_flush(void);
+-extern void printk_safe_flush_on_panic(void);
+ #else
+ static inline __printf(1, 0)
+ int vprintk(const char *s, va_list args)
+@@ -272,14 +270,6 @@ static inline void show_regs_print_info(const char *log_lvl)
+ static inline void dump_stack(void)
+ {
+ }
+-
+-static inline void printk_safe_flush(void)
+-{
+-}
+-
+-static inline void printk_safe_flush_on_panic(void)
+-{
+-}
+ #endif
+
+ extern int kptr_restrict;
+diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
+index a0b6780740c8..480d5f77ef4f 100644
+--- a/kernel/kexec_core.c
++++ b/kernel/kexec_core.c
+@@ -977,7 +977,6 @@ void crash_kexec(struct pt_regs *regs)
+ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
+ if (old_cpu == PANIC_CPU_INVALID) {
+ /* This is the 1st CPU which comes here, so go ahead. */
+- printk_safe_flush_on_panic();
+ __crash_kexec(regs);
+
+ /*
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 21d59a508272..c722faaae44b 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -247,7 +247,6 @@ void panic(const char *fmt, ...)
+ * Bypass the panic_cpu check and call __crash_kexec directly.
+ */
+ if (!_crash_kexec_post_notifiers) {
+- printk_safe_flush_on_panic();
+ __crash_kexec(NULL);
+
+ /*
+@@ -271,8 +270,6 @@ void panic(const char *fmt, ...)
+ */
+ atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
+
+- /* Call flush even twice. It tries harder with a single online CPU */
+- printk_safe_flush_on_panic();
+ kmsg_dump(KMSG_DUMP_PANIC);
+
+ /*
+diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
+index e7acc2888c8e..e108b2ece8c7 100644
+--- a/kernel/printk/internal.h
++++ b/kernel/printk/internal.h
+@@ -23,7 +23,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args);
+ void __printk_safe_enter(void);
+ void __printk_safe_exit(void);
+
+-void printk_safe_init(void);
+ bool printk_percpu_data_ready(void);
+
+ #define printk_safe_enter_irqsave(flags) \
+@@ -67,6 +66,5 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
+ #define printk_safe_enter_irq() local_irq_disable()
+ #define printk_safe_exit_irq() local_irq_enable()
+
+-static inline void printk_safe_init(void) { }
+ static inline bool printk_percpu_data_ready(void) { return false; }
+ #endif /* CONFIG_PRINTK */
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 008a0ede8b05..274d4ef05a90 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -732,27 +732,22 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
+ if (ret)
+ return ret;
+
+- printk_safe_enter_irq();
+ if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+- printk_safe_exit_irq();
+ goto out;
+ }
+
+- printk_safe_exit_irq();
+ ret = wait_event_interruptible(log_wait,
+ prb_read_valid(prb, atomic64_read(&user->seq), r));
+ if (ret)
+ goto out;
+- printk_safe_enter_irq();
+ }
+
+ if (r->info->seq != atomic64_read(&user->seq)) {
+ /* our last seen message is gone, return error and reset */
+ atomic64_set(&user->seq, r->info->seq);
+ ret = -EPIPE;
+- printk_safe_exit_irq();
+ goto out;
+ }
+
+@@ -762,7 +757,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
+ &r->info->dev_info);
+
+ atomic64_set(&user->seq, r->info->seq + 1);
+- printk_safe_exit_irq();
+
+ if (len > count) {
+ ret = -EINVAL;
+@@ -797,7 +791,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
+ if (offset)
+ return -ESPIPE;
+
+- printk_safe_enter_irq();
+ switch (whence) {
+ case SEEK_SET:
+ /* the first record */
+@@ -818,7 +811,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
+ default:
+ ret = -EINVAL;
+ }
+- printk_safe_exit_irq();
+ return ret;
+ }
+
+@@ -833,7 +825,6 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
+
+ poll_wait(file, &log_wait, wait);
+
+- printk_safe_enter_irq();
+ if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
+ /* return error when data has vanished underneath us */
+ if (info.seq != atomic64_read(&user->seq))
+@@ -841,7 +832,6 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
+ else
+ ret = EPOLLIN|EPOLLRDNORM;
+ }
+- printk_safe_exit_irq();
+
+ return ret;
+ }
+@@ -874,9 +864,7 @@ static int devkmsg_open(struct inode *inode, struct file *file)
+ prb_rec_init_rd(&user->record, &user->info,
+ &user->text_buf[0], sizeof(user->text_buf));
+
+- printk_safe_enter_irq();
+ atomic64_set(&user->seq, prb_first_valid_seq(prb));
+- printk_safe_exit_irq();
+
+ file->private_data = user;
+ return 0;
+@@ -1042,9 +1030,6 @@ static inline void log_buf_add_cpu(void) {}
+
+ static void __init set_percpu_data_ready(void)
+ {
+- printk_safe_init();
+- /* Make sure we set this flag only after printk_safe() init is done */
+- barrier();
+ __printk_percpu_data_ready = true;
+ }
+
+@@ -1142,8 +1127,6 @@ void __init setup_log_buf(int early)
+ new_descs, ilog2(new_descs_count),
+ new_infos);
+
+- printk_safe_enter_irqsave(flags);
+-
+ log_buf_len = new_log_buf_len;
+ log_buf = new_log_buf;
+ new_log_buf_len = 0;
+@@ -1159,8 +1142,6 @@ void __init setup_log_buf(int early)
+ */
+ prb = &printk_rb_dynamic;
+
+- printk_safe_exit_irqrestore(flags);
+-
+ if (seq != prb_next_seq(&printk_rb_static)) {
+ pr_err("dropped %llu messages\n",
+ prb_next_seq(&printk_rb_static) - seq);
+@@ -1498,11 +1479,9 @@ static int syslog_print(char __user *buf, int size)
+ size_t n;
+ size_t skip;
+
+- printk_safe_enter_irq();
+- raw_spin_lock(&syslog_lock);
++ raw_spin_lock_irq(&syslog_lock);
+ if (!prb_read_valid(prb, syslog_seq, &r)) {
+- raw_spin_unlock(&syslog_lock);
+- printk_safe_exit_irq();
++ raw_spin_unlock_irq(&syslog_lock);
+ break;
+ }
+ if (r.info->seq != syslog_seq) {
+@@ -1531,8 +1510,7 @@ static int syslog_print(char __user *buf, int size)
+ syslog_partial += n;
+ } else
+ n = 0;
+- raw_spin_unlock(&syslog_lock);
+- printk_safe_exit_irq();
++ raw_spin_unlock_irq(&syslog_lock);
+
+ if (!n)
+ break;
+@@ -1566,7 +1544,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ return -ENOMEM;
+
+ time = printk_time;
+- printk_safe_enter_irq();
+ /*
+ * Find first record that fits, including all following records,
+ * into the user-provided buffer for this dump.
+@@ -1587,23 +1564,20 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ break;
+ }
+
+- printk_safe_exit_irq();
+ if (copy_to_user(buf + len, text, textlen))
+ len = -EFAULT;
+ else
+ len += textlen;
+- printk_safe_enter_irq();
+
+ if (len < 0)
+ break;
+ }
+
+ if (clear) {
+- raw_spin_lock(&syslog_lock);
++ raw_spin_lock_irq(&syslog_lock);
+ latched_seq_write(&clear_seq, seq);
+- raw_spin_unlock(&syslog_lock);
++ raw_spin_unlock_irq(&syslog_lock);
+ }
+- printk_safe_exit_irq();
+
+ kfree(text);
+ return len;
+@@ -1611,11 +1585,9 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+
+ static void syslog_clear(void)
+ {
+- printk_safe_enter_irq();
+- raw_spin_lock(&syslog_lock);
++ raw_spin_lock_irq(&syslog_lock);
+ latched_seq_write(&clear_seq, prb_next_seq(prb));
+- raw_spin_unlock(&syslog_lock);
+- printk_safe_exit_irq();
++ raw_spin_unlock_irq(&syslog_lock);
+ }
+
+ /* Return a consistent copy of @syslog_seq. */
+@@ -1703,12 +1675,10 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ break;
+ /* Number of chars in the log buffer */
+ case SYSLOG_ACTION_SIZE_UNREAD:
+- printk_safe_enter_irq();
+- raw_spin_lock(&syslog_lock);
++ raw_spin_lock_irq(&syslog_lock);
+ if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
+ /* No unread messages. */
+- raw_spin_unlock(&syslog_lock);
+- printk_safe_exit_irq();
++ raw_spin_unlock_irq(&syslog_lock);
+ return 0;
+ }
+ if (info.seq != syslog_seq) {
+@@ -1736,8 +1706,7 @@ int do_syslog(int type, char __user *buf, int len, int source)
+ }
+ error -= syslog_partial;
+ }
+- raw_spin_unlock(&syslog_lock);
+- printk_safe_exit_irq();
++ raw_spin_unlock_irq(&syslog_lock);
+ break;
+ /* Size of the log buffer */
+ case SYSLOG_ACTION_SIZE_BUFFER:
+@@ -2207,7 +2176,6 @@ asmlinkage int vprintk_emit(int facility, int level,
+ {
+ int printed_len;
+ bool in_sched = false;
+- unsigned long flags;
+
+ /* Suppress unimportant messages after panic happens */
+ if (unlikely(suppress_printk))
+@@ -2221,9 +2189,7 @@ asmlinkage int vprintk_emit(int facility, int level,
+ boot_delay_msec(level);
+ printk_delay();
+
+- printk_safe_enter_irqsave(flags);
+ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
+- printk_safe_exit_irqrestore(flags);
+
+ /* If called from the scheduler, we can not call up(). */
+ if (!in_sched) {
+@@ -2615,7 +2581,6 @@ void console_unlock(void)
+ {
+ static char ext_text[CONSOLE_EXT_LOG_MAX];
+ static char text[CONSOLE_LOG_MAX];
+- unsigned long flags;
+ bool do_cond_resched, retry;
+ struct printk_info info;
+ struct printk_record r;
+@@ -2660,7 +2625,6 @@ void console_unlock(void)
+ size_t ext_len = 0;
+ size_t len;
+
+- printk_safe_enter_irqsave(flags);
+ skip:
+ if (!prb_read_valid(prb, console_seq, &r))
+ break;
+@@ -2717,12 +2681,8 @@ void console_unlock(void)
+ call_console_drivers(ext_text, ext_len, text, len);
+ start_critical_timings();
+
+- if (console_lock_spinning_disable_and_check()) {
+- printk_safe_exit_irqrestore(flags);
++ if (console_lock_spinning_disable_and_check())
+ return;
+- }
+-
+- printk_safe_exit_irqrestore(flags);
+
+ if (do_cond_resched)
+ cond_resched();
+@@ -2739,8 +2699,6 @@ void console_unlock(void)
+ * flush, no worries.
+ */
+ retry = prb_read_valid(prb, console_seq, NULL);
+- printk_safe_exit_irqrestore(flags);
+-
+ if (retry && console_trylock())
+ goto again;
+ }
+@@ -2802,13 +2760,8 @@ void console_flush_on_panic(enum con_flush_mode mode)
+ console_trylock();
+ console_may_schedule = 0;
+
+- if (mode == CONSOLE_REPLAY_ALL) {
+- unsigned long flags;
+-
+- printk_safe_enter_irqsave(flags);
++ if (mode == CONSOLE_REPLAY_ALL)
+ console_seq = prb_first_valid_seq(prb);
+- printk_safe_exit_irqrestore(flags);
+- }
+ console_unlock();
+ }
+
+@@ -3464,11 +3417,9 @@ bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog,
+ struct printk_info info;
+ unsigned int line_count;
+ struct printk_record r;
+- unsigned long flags;
+ size_t l = 0;
+ bool ret = false;
+
+- printk_safe_enter_irqsave(flags);
+ prb_rec_init_rd(&r, &info, line, size);
+
+ if (!iter->active)
+@@ -3492,7 +3443,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper_iter *iter, bool syslog,
+ iter->cur_seq = r.info->seq + 1;
+ ret = true;
+ out:
+- printk_safe_exit_irqrestore(flags);
+ if (len)
+ *len = l;
+ return ret;
+@@ -3523,7 +3473,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+ {
+ struct printk_info info;
+ struct printk_record r;
+- unsigned long flags;
+ u64 seq;
+ u64 next_seq;
+ size_t len = 0;
+@@ -3533,7 +3482,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+ if (!iter->active || !buf || !size)
+ goto out;
+
+- printk_safe_enter_irqsave(flags);
+ if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
+ if (info.seq != iter->cur_seq) {
+ /* messages are gone, move to first available one */
+@@ -3542,10 +3490,8 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+ }
+
+ /* last entry */
+- if (iter->cur_seq >= iter->next_seq) {
+- printk_safe_exit_irqrestore(flags);
++ if (iter->cur_seq >= iter->next_seq)
+ goto out;
+- }
+
+ /*
+ * Find first record that fits, including all following records,
+@@ -3577,7 +3523,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper_iter *iter, bool syslog,
+
+ iter->next_seq = next_seq;
+ ret = true;
+- printk_safe_exit_irqrestore(flags);
+ out:
+ if (len_out)
+ *len_out = len;
+@@ -3595,12 +3540,8 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+ */
+ void kmsg_dump_rewind(struct kmsg_dumper_iter *iter)
+ {
+- unsigned long flags;
+-
+- printk_safe_enter_irqsave(flags);
+ iter->cur_seq = latched_seq_read_nolock(&clear_seq);
+ iter->next_seq = prb_next_seq(prb);
+- printk_safe_exit_irqrestore(flags);
+ }
+ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index 7df8a88d4115..3ee2d62e80ef 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -15,281 +15,9 @@
+
+ #include "internal.h"
+
+-/*
+- * In NMI and safe mode, printk() avoids taking locks. Instead,
+- * it uses an alternative implementation that temporary stores
+- * the strings into a per-CPU buffer. The content of the buffer
+- * is later flushed into the main ring buffer via IRQ work.
+- *
+- * The alternative implementation is chosen transparently
+- * by examining current printk() context mask stored in @printk_context
+- * per-CPU variable.
+- *
+- * The implementation allows to flush the strings also from another CPU.
+- * There are situations when we want to make sure that all buffers
+- * were handled or when IRQs are blocked.
+- */
+-
+-#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
+- sizeof(atomic_t) - \
+- sizeof(atomic_t) - \
+- sizeof(struct irq_work))
+-
+-struct printk_safe_seq_buf {
+- atomic_t len; /* length of written data */
+- atomic_t message_lost;
+- struct irq_work work; /* IRQ work that flushes the buffer */
+- unsigned char buffer[SAFE_LOG_BUF_LEN];
+-};
+-
+-static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
+ static DEFINE_PER_CPU(int, printk_context);
+
+-static DEFINE_RAW_SPINLOCK(safe_read_lock);
+-
+ #ifdef CONFIG_PRINTK_NMI
+-static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
+-#endif
+-
+-/* Get flushed in a more safe context. */
+-static void queue_flush_work(struct printk_safe_seq_buf *s)
+-{
+- if (printk_percpu_data_ready())
+- irq_work_queue(&s->work);
+-}
+-
+-/*
+- * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
+- * have dedicated buffers, because otherwise printk-safe preempted by
+- * NMI-printk would have overwritten the NMI messages.
+- *
+- * The messages are flushed from irq work (or from panic()), possibly,
+- * from other CPU, concurrently with printk_safe_log_store(). Should this
+- * happen, printk_safe_log_store() will notice the buffer->len mismatch
+- * and repeat the write.
+- */
+-static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
+- const char *fmt, va_list args)
+-{
+- int add;
+- size_t len;
+- va_list ap;
+-
+-again:
+- len = atomic_read(&s->len);
+-
+- /* The trailing '\0' is not counted into len. */
+- if (len >= sizeof(s->buffer) - 1) {
+- atomic_inc(&s->message_lost);
+- queue_flush_work(s);
+- return 0;
+- }
+-
+- /*
+- * Make sure that all old data have been read before the buffer
+- * was reset. This is not needed when we just append data.
+- */
+- if (!len)
+- smp_rmb();
+-
+- va_copy(ap, args);
+- add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
+- va_end(ap);
+- if (!add)
+- return 0;
+-
+- /*
+- * Do it once again if the buffer has been flushed in the meantime.
+- * Note that atomic_cmpxchg() is an implicit memory barrier that
+- * makes sure that the data were written before updating s->len.
+- */
+- if (atomic_cmpxchg(&s->len, len, len + add) != len)
+- goto again;
+-
+- queue_flush_work(s);
+- return add;
+-}
+-
+-static inline void printk_safe_flush_line(const char *text, int len)
+-{
+- /*
+- * Avoid any console drivers calls from here, because we may be
+- * in NMI or printk_safe context (when in panic). The messages
+- * must go only into the ring buffer at this stage. Consoles will
+- * get explicitly called later when a crashdump is not generated.
+- */
+- printk_deferred("%.*s", len, text);
+-}
+-
+-/* printk part of the temporary buffer line by line */
+-static int printk_safe_flush_buffer(const char *start, size_t len)
+-{
+- const char *c, *end;
+- bool header;
+-
+- c = start;
+- end = start + len;
+- header = true;
+-
+- /* Print line by line. */
+- while (c < end) {
+- if (*c == '\n') {
+- printk_safe_flush_line(start, c - start + 1);
+- start = ++c;
+- header = true;
+- continue;
+- }
+-
+- /* Handle continuous lines or missing new line. */
+- if ((c + 1 < end) && printk_get_level(c)) {
+- if (header) {
+- c = printk_skip_level(c);
+- continue;
+- }
+-
+- printk_safe_flush_line(start, c - start);
+- start = c++;
+- header = true;
+- continue;
+- }
+-
+- header = false;
+- c++;
+- }
+-
+- /* Check if there was a partial line. Ignore pure header. */
+- if (start < end && !header) {
+- static const char newline[] = KERN_CONT "\n";
+-
+- printk_safe_flush_line(start, end - start);
+- printk_safe_flush_line(newline, strlen(newline));
+- }
+-
+- return len;
+-}
+-
+-static void report_message_lost(struct printk_safe_seq_buf *s)
+-{
+- int lost = atomic_xchg(&s->message_lost, 0);
+-
+- if (lost)
+- printk_deferred("Lost %d message(s)!\n", lost);
+-}
+-
+-/*
+- * Flush data from the associated per-CPU buffer. The function
+- * can be called either via IRQ work or independently.
+- */
+-static void __printk_safe_flush(struct irq_work *work)
+-{
+- struct printk_safe_seq_buf *s =
+- container_of(work, struct printk_safe_seq_buf, work);
+- unsigned long flags;
+- size_t len;
+- int i;
+-
+- /*
+- * The lock has two functions. First, one reader has to flush all
+- * available message to make the lockless synchronization with
+- * writers easier. Second, we do not want to mix messages from
+- * different CPUs. This is especially important when printing
+- * a backtrace.
+- */
+- raw_spin_lock_irqsave(&safe_read_lock, flags);
+-
+- i = 0;
+-more:
+- len = atomic_read(&s->len);
+-
+- /*
+- * This is just a paranoid check that nobody has manipulated
+- * the buffer an unexpected way. If we printed something then
+- * @len must only increase. Also it should never overflow the
+- * buffer size.
+- */
+- if ((i && i >= len) || len > sizeof(s->buffer)) {
+- const char *msg = "printk_safe_flush: internal error\n";
+-
+- printk_safe_flush_line(msg, strlen(msg));
+- len = 0;
+- }
+-
+- if (!len)
+- goto out; /* Someone else has already flushed the buffer. */
+-
+- /* Make sure that data has been written up to the @len */
+- smp_rmb();
+- i += printk_safe_flush_buffer(s->buffer + i, len - i);
+-
+- /*
+- * Check that nothing has got added in the meantime and truncate
+- * the buffer. Note that atomic_cmpxchg() is an implicit memory
+- * barrier that makes sure that the data were copied before
+- * updating s->len.
+- */
+- if (atomic_cmpxchg(&s->len, len, 0) != len)
+- goto more;
+-
+-out:
+- report_message_lost(s);
+- raw_spin_unlock_irqrestore(&safe_read_lock, flags);
+-}
+-
+-/**
+- * printk_safe_flush - flush all per-cpu nmi buffers.
+- *
+- * The buffers are flushed automatically via IRQ work. This function
+- * is useful only when someone wants to be sure that all buffers have
+- * been flushed at some point.
+- */
+-void printk_safe_flush(void)
+-{
+- int cpu;
+-
+- for_each_possible_cpu(cpu) {
+-#ifdef CONFIG_PRINTK_NMI
+- __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
+-#endif
+- __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
+- }
+-}
+-
+-/**
+- * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
+- * goes down.
+- *
+- * Similar to printk_safe_flush() but it can be called even in NMI context when
+- * the system goes down. It does the best effort to get NMI messages into
+- * the main ring buffer.
+- *
+- * Note that it could try harder when there is only one CPU online.
+- */
+-void printk_safe_flush_on_panic(void)
+-{
+- if (raw_spin_is_locked(&safe_read_lock)) {
+- if (num_online_cpus() > 1)
+- return;
+-
+- debug_locks_off();
+- raw_spin_lock_init(&safe_read_lock);
+- }
+-
+- printk_safe_flush();
+-}
+-
+-#ifdef CONFIG_PRINTK_NMI
+-/*
+- * Safe printk() for NMI context. It uses a per-CPU buffer to
+- * store the message. NMIs are not nested, so there is always only
+- * one writer running. But the buffer might get flushed from another
+- * CPU, so we need to be careful.
+- */
+-static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
+-{
+- struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
+-
+- return printk_safe_log_store(s, fmt, args);
+-}
+
+ void noinstr printk_nmi_enter(void)
+ {
+@@ -305,9 +33,6 @@ void noinstr printk_nmi_exit(void)
+ * Marks a code that might produce many messages in NMI context
+ * and the risk of losing them is more critical than eventual
+ * reordering.
+- *
+- * It has effect only when called in NMI context. Then printk()
+- * will store the messages into the main logbuf directly.
+ */
+ void printk_nmi_direct_enter(void)
+ {
+@@ -320,27 +45,8 @@ void printk_nmi_direct_exit(void)
+ this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
+ }
+
+-#else
+-
+-static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
+-{
+- return 0;
+-}
+-
+ #endif /* CONFIG_PRINTK_NMI */
+
+-/*
+- * Lock-less printk(), to avoid deadlocks should the printk() recurse
+- * into itself. It uses a per-CPU buffer to store the message, just like
+- * NMI.
+- */
+-static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
+-{
+- struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
+-
+- return printk_safe_log_store(s, fmt, args);
+-}
+-
+ /* Can be preempted by NMI. */
+ void __printk_safe_enter(void)
+ {
+@@ -365,8 +71,10 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
+ * Use the main logbuf even in NMI. But avoid calling console
+ * drivers that might have their own locks.
+ */
+- if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK)) {
+- unsigned long flags;
++ if (this_cpu_read(printk_context) &
++ (PRINTK_NMI_DIRECT_CONTEXT_MASK |
++ PRINTK_NMI_CONTEXT_MASK |
++ PRINTK_SAFE_CONTEXT_MASK)) {
+ int len;
+
+ printk_safe_enter_irqsave(flags);
+@@ -376,34 +84,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
+ return len;
+ }
+
+- /* Use extra buffer in NMI. */
+- if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
+- return vprintk_nmi(fmt, args);
+-
+- /* Use extra buffer to prevent a recursion deadlock in safe mode. */
+- if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
+- return vprintk_safe(fmt, args);
+-
+ /* No obstacles. */
+ return vprintk_default(fmt, args);
+ }
+-
+-void __init printk_safe_init(void)
+-{
+- int cpu;
+-
+- for_each_possible_cpu(cpu) {
+- struct printk_safe_seq_buf *s;
+-
+- s = &per_cpu(safe_print_seq, cpu);
+- init_irq_work(&s->work, __printk_safe_flush);
+-
+-#ifdef CONFIG_PRINTK_NMI
+- s = &per_cpu(nmi_print_seq, cpu);
+- init_irq_work(&s->work, __printk_safe_flush);
+-#endif
+- }
+-
+- /* Flush pending messages that did not have scheduled IRQ works. */
+- printk_safe_flush();
+-}
+diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
+index 8abe1870dba4..b09a490f5f70 100644
+--- a/lib/nmi_backtrace.c
++++ b/lib/nmi_backtrace.c
+@@ -75,12 +75,6 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
+ touch_softlockup_watchdog();
+ }
+
+- /*
+- * Force flush any remote buffers that might be stuck in IRQ context
+- * and therefore could not run their irq_work.
+- */
+- printk_safe_flush();
+-
+ clear_bit_unlock(0, &backtrace_flag);
+ put_cpu();
+ }
+--
+2.19.1
+
diff --git a/features/rt/printk-track-limit-recursion.patch b/features/rt/printk-track-limit-recursion.patch
new file mode 100644
index 00000000..0eba8c25
--- /dev/null
+++ b/features/rt/printk-track-limit-recursion.patch
@@ -0,0 +1,142 @@
+From 23b2babe079f76add633021e717676da7f238ff8 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 11 Dec 2020 00:55:25 +0106
+Subject: [PATCH 021/191] printk: track/limit recursion
+
+Limit printk() recursion to 1 level. This is enough to print a
+stacktrace for the printk call, should a WARN or BUG occur.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 74 ++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 71 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 523621889a72..008a0ede8b05 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1940,6 +1940,65 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
+ }
+ }
+
++#ifdef CONFIG_PRINTK_NMI
++#define NUM_RECURSION_CTX 2
++#else
++#define NUM_RECURSION_CTX 1
++#endif
++
++struct printk_recursion {
++ char count[NUM_RECURSION_CTX];
++};
++
++static DEFINE_PER_CPU(struct printk_recursion, percpu_printk_recursion);
++static char printk_recursion_count[NUM_RECURSION_CTX];
++
++static char *printk_recursion_counter(void)
++{
++ struct printk_recursion *rec;
++ char *count;
++
++ if (!printk_percpu_data_ready()) {
++ count = &printk_recursion_count[0];
++ } else {
++ rec = this_cpu_ptr(&percpu_printk_recursion);
++
++ count = &rec->count[0];
++ }
++
++#ifdef CONFIG_PRINTK_NMI
++ if (in_nmi())
++ count++;
++#endif
++
++ return count;
++}
++
++static bool printk_enter_irqsave(unsigned long *flags)
++{
++ char *count;
++
++ local_irq_save(*flags);
++ count = printk_recursion_counter();
++ /* Only 1 level of recursion allowed. */
++ if (*count > 1) {
++ local_irq_restore(*flags);
++ return false;
++ }
++ (*count)++;
++
++ return true;
++}
++
++static void printk_exit_irqrestore(unsigned long flags)
++{
++ char *count;
++
++ count = printk_recursion_counter();
++ (*count)--;
++ local_irq_restore(flags);
++}
++
+ int printk_delay_msec __read_mostly;
+
+ static inline void printk_delay(void)
+@@ -2040,11 +2099,13 @@ int vprintk_store(int facility, int level,
+ struct prb_reserved_entry e;
+ enum log_flags lflags = 0;
+ struct printk_record r;
++ unsigned long irqflags;
+ u16 trunc_msg_len = 0;
+ char prefix_buf[8];
+ u16 reserve_size;
+ va_list args2;
+ u16 text_len;
++ int ret = 0;
+ u64 ts_nsec;
+
+ /*
+@@ -2055,6 +2116,9 @@ int vprintk_store(int facility, int level,
+ */
+ ts_nsec = local_clock();
+
++ if (!printk_enter_irqsave(&irqflags))
++ return 0;
++
+ /*
+ * The sprintf needs to come first since the syslog prefix might be
+ * passed in as a parameter. An extra byte must be reserved so that
+@@ -2092,7 +2156,8 @@ int vprintk_store(int facility, int level,
+ prb_commit(&e);
+ }
+
+- return text_len;
++ ret = text_len;
++ goto out;
+ }
+ }
+
+@@ -2108,7 +2173,7 @@ int vprintk_store(int facility, int level,
+
+ prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
+ if (!prb_reserve(&e, prb, &r))
+- return 0;
++ goto out;
+ }
+
+ /* fill message */
+@@ -2130,7 +2195,10 @@ int vprintk_store(int facility, int level,
+ else
+ prb_final_commit(&e);
+
+- return (text_len + trunc_msg_len);
++ ret = text_len + trunc_msg_len;
++out:
++ printk_exit_irqrestore(irqflags);
++ return ret;
+ }
+
+ asmlinkage int vprintk_emit(int facility, int level,
+--
+2.19.1
+
diff --git a/features/rt/printk-use-atomic64_t-for-devkmsg_user.seq.patch b/features/rt/printk-use-atomic64_t-for-devkmsg_user.seq.patch
new file mode 100644
index 00000000..2c798ea0
--- /dev/null
+++ b/features/rt/printk-use-atomic64_t-for-devkmsg_user.seq.patch
@@ -0,0 +1,111 @@
+From ce5e82552c7e13c881911a386426cf474a77ae70 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 10 Dec 2020 15:33:40 +0106
+Subject: [PATCH 013/191] printk: use atomic64_t for devkmsg_user.seq
+
+@user->seq is indirectly protected by @logbuf_lock. Once @logbuf_lock
+is removed, @user->seq will be no longer safe from an atomicity point
+of view.
+
+In preparation for the removal of @logbuf_lock, change it to
+atomic64_t to provide this safety.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+---
+ kernel/printk/printk.c | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index b764222b325b..0031bb2156d1 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -662,7 +662,7 @@ static ssize_t msg_print_ext_body(char *buf, size_t size,
+
+ /* /dev/kmsg - userspace message inject/listen interface */
+ struct devkmsg_user {
+- u64 seq;
++ atomic64_t seq;
+ struct ratelimit_state rs;
+ struct mutex lock;
+ char buf[CONSOLE_EXT_LOG_MAX];
+@@ -763,7 +763,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
+ return ret;
+
+ logbuf_lock_irq();
+- if (!prb_read_valid(prb, user->seq, r)) {
++ if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ logbuf_unlock_irq();
+@@ -772,15 +772,15 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
+
+ logbuf_unlock_irq();
+ ret = wait_event_interruptible(log_wait,
+- prb_read_valid(prb, user->seq, r));
++ prb_read_valid(prb, atomic64_read(&user->seq), r));
+ if (ret)
+ goto out;
+ logbuf_lock_irq();
+ }
+
+- if (r->info->seq != user->seq) {
++ if (r->info->seq != atomic64_read(&user->seq)) {
+ /* our last seen message is gone, return error and reset */
+- user->seq = r->info->seq;
++ atomic64_set(&user->seq, r->info->seq);
+ ret = -EPIPE;
+ logbuf_unlock_irq();
+ goto out;
+@@ -791,7 +791,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
+ &r->text_buf[0], r->info->text_len,
+ &r->info->dev_info);
+
+- user->seq = r->info->seq + 1;
++ atomic64_set(&user->seq, r->info->seq + 1);
+ logbuf_unlock_irq();
+
+ if (len > count) {
+@@ -831,7 +831,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
+ switch (whence) {
+ case SEEK_SET:
+ /* the first record */
+- user->seq = prb_first_valid_seq(prb);
++ atomic64_set(&user->seq, prb_first_valid_seq(prb));
+ break;
+ case SEEK_DATA:
+ /*
+@@ -839,11 +839,11 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
+ * like issued by 'dmesg -c'. Reading /dev/kmsg itself
+ * changes no global state, and does not clear anything.
+ */
+- user->seq = latched_seq_read_nolock(&clear_seq);
++ atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
+ break;
+ case SEEK_END:
+ /* after the last record */
+- user->seq = prb_next_seq(prb);
++ atomic64_set(&user->seq, prb_next_seq(prb));
+ break;
+ default:
+ ret = -EINVAL;
+@@ -866,7 +866,7 @@ static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
+ logbuf_lock_irq();
+ if (prb_read_valid_info(prb, user->seq, &info, NULL)) {
+ /* return error when data has vanished underneath us */
+- if (info.seq != user->seq)
++ if (info.seq != atomic64_read(&user->seq))
+ ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
+ else
+ ret = EPOLLIN|EPOLLRDNORM;
+@@ -905,7 +905,7 @@ static int devkmsg_open(struct inode *inode, struct file *file)
+ &user->text_buf[0], sizeof(user->text_buf));
+
+ logbuf_lock_irq();
+- user->seq = prb_first_valid_seq(prb);
++ atomic64_set(&user->seq, prb_first_valid_seq(prb));
+ logbuf_unlock_irq();
+
+ file->private_data = user;
+--
+2.19.1
+
diff --git a/features/rt/printk-use-seqcount_latch-for-clear_seq.patch b/features/rt/printk-use-seqcount_latch-for-clear_seq.patch
new file mode 100644
index 00000000..35a9bd69
--- /dev/null
+++ b/features/rt/printk-use-seqcount_latch-for-clear_seq.patch
@@ -0,0 +1,146 @@
+From e6a23cb5f39c0c3e2cbaef3af7b0ab41fd2104ae Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:41:58 +0106
+Subject: [PATCH 012/191] printk: use seqcount_latch for clear_seq
+
+kmsg_dump_rewind_nolock() locklessly reads @clear_seq. However,
+this is not done atomically. Since @clear_seq is 64-bit, this
+cannot be an atomic operation for all platforms. Therefore, use
+a seqcount_latch to allow readers to always read a consistent
+value.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 58 ++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 50 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 8a903faaec4e..b764222b325b 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -402,8 +402,21 @@ static u64 console_seq;
+ static u64 exclusive_console_stop_seq;
+ static unsigned long console_dropped;
+
+-/* the next printk record to read after the last 'clear' command */
+-static u64 clear_seq;
++struct latched_seq {
++ seqcount_latch_t latch;
++ u64 val[2];
++};
++
++/*
++ * The next printk record to read after the last 'clear' command. There are
++ * two copies (updated with seqcount_latch) so that reads can locklessly
++ * access a valid value. Writers are synchronized by @logbuf_lock.
++ */
++static struct latched_seq clear_seq = {
++ .latch = SEQCNT_LATCH_ZERO(clear_seq.latch),
++ .val[0] = 0,
++ .val[1] = 0,
++};
+
+ #ifdef CONFIG_PRINTK_CALLER
+ #define PREFIX_MAX 48
+@@ -457,6 +470,31 @@ bool printk_percpu_data_ready(void)
+ return __printk_percpu_data_ready;
+ }
+
++/* Must be called under logbuf_lock. */
++static void latched_seq_write(struct latched_seq *ls, u64 val)
++{
++ raw_write_seqcount_latch(&ls->latch);
++ ls->val[0] = val;
++ raw_write_seqcount_latch(&ls->latch);
++ ls->val[1] = val;
++}
++
++/* Can be called from any context. */
++static u64 latched_seq_read_nolock(struct latched_seq *ls)
++{
++ unsigned int seq;
++ unsigned int idx;
++ u64 val;
++
++ do {
++ seq = raw_read_seqcount_latch(&ls->latch);
++ idx = seq & 0x1;
++ val = ls->val[idx];
++ } while (read_seqcount_latch_retry(&ls->latch, seq));
++
++ return val;
++}
++
+ /* Return log buffer address */
+ char *log_buf_addr_get(void)
+ {
+@@ -801,7 +839,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
+ * like issued by 'dmesg -c'. Reading /dev/kmsg itself
+ * changes no global state, and does not clear anything.
+ */
+- user->seq = clear_seq;
++ user->seq = latched_seq_read_nolock(&clear_seq);
+ break;
+ case SEEK_END:
+ /* after the last record */
+@@ -960,6 +998,9 @@ void log_buf_vmcoreinfo_setup(void)
+
+ VMCOREINFO_SIZE(atomic_long_t);
+ VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
++
++ VMCOREINFO_STRUCT_SIZE(latched_seq);
++ VMCOREINFO_OFFSET(latched_seq, val);
+ }
+ #endif
+
+@@ -1557,7 +1598,8 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ * Find first record that fits, including all following records,
+ * into the user-provided buffer for this dump.
+ */
+- seq = find_first_fitting_seq(clear_seq, -1, size, true, time);
++ seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
++ size, true, time);
+
+ prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
+
+@@ -1584,7 +1626,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ }
+
+ if (clear)
+- clear_seq = seq;
++ latched_seq_write(&clear_seq, seq);
+ logbuf_unlock_irq();
+
+ kfree(text);
+@@ -1594,7 +1636,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ static void syslog_clear(void)
+ {
+ logbuf_lock_irq();
+- clear_seq = prb_next_seq(prb);
++ latched_seq_write(&clear_seq, prb_next_seq(prb));
+ logbuf_unlock_irq();
+ }
+
+@@ -3336,7 +3378,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
+ dumper->active = true;
+
+ logbuf_lock_irqsave(flags);
+- dumper->cur_seq = clear_seq;
++ dumper->cur_seq = latched_seq_read_nolock(&clear_seq);
+ dumper->next_seq = prb_next_seq(prb);
+ logbuf_unlock_irqrestore(flags);
+
+@@ -3534,7 +3576,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+ */
+ void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+ {
+- dumper->cur_seq = clear_seq;
++ dumper->cur_seq = latched_seq_read_nolock(&clear_seq);
+ dumper->next_seq = prb_next_seq(prb);
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/features/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
new file mode 100644
index 00000000..fb15eefc
--- /dev/null
+++ b/features/rt/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -0,0 +1,165 @@
+From e49cfce7661aafc70145d7647cace3b08a5cff26 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 29 Aug 2013 18:21:04 +0200
+Subject: [PATCH 128/191] ptrace: fix ptrace vs tasklist_lock race
+
+As explained by Alexander Fyodorov <halcy@yandex.ru>:
+
+|read_lock(&tasklist_lock) in ptrace_stop() is converted to mutex on RT kernel,
+|and it can remove __TASK_TRACED from task->state (by moving it to
+|task->saved_state). If parent does wait() on child followed by a sys_ptrace
+|call, the following race can happen:
+|
+|- child sets __TASK_TRACED in ptrace_stop()
+|- parent does wait() which eventually calls wait_task_stopped() and returns
+| child's pid
+|- child blocks on read_lock(&tasklist_lock) in ptrace_stop() and moves
+| __TASK_TRACED flag to saved_state
+|- parent calls sys_ptrace, which calls ptrace_check_attach() and wait_task_inactive()
+
+The patch is based on his initial patch where an additional check is
+added in case the __TASK_TRACED moved to ->saved_state. The pi_lock is
+taken in case the caller is interrupted between looking into ->state and
+->saved_state.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/sched.h | 49 +++++++++++++++++++++++++++++++++++++++----
+ kernel/ptrace.c | 9 +++++++-
+ kernel/sched/core.c | 17 +++++++++++++--
+ 3 files changed, 68 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 1d85cfa28fe6..dab770a030bd 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -112,12 +112,8 @@ struct task_group;
+ __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
+ TASK_PARKED)
+
+-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
+-
+ #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+
+-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+-
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+
+ /*
+@@ -1884,6 +1880,51 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+ }
+
++static inline bool __task_is_stopped_or_traced(struct task_struct *task)
++{
++ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
++ return true;
++#ifdef CONFIG_PREEMPT_RT
++ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
++ return true;
++#endif
++ return false;
++}
++
++static inline bool task_is_stopped_or_traced(struct task_struct *task)
++{
++ bool traced_stopped;
++
++#ifdef CONFIG_PREEMPT_RT
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&task->pi_lock, flags);
++ traced_stopped = __task_is_stopped_or_traced(task);
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++#else
++ traced_stopped = __task_is_stopped_or_traced(task);
++#endif
++ return traced_stopped;
++}
++
++static inline bool task_is_traced(struct task_struct *task)
++{
++ bool traced = false;
++
++ if (task->state & __TASK_TRACED)
++ return true;
++#ifdef CONFIG_PREEMPT_RT
++ /* in case the task is sleeping on tasklist_lock */
++ raw_spin_lock_irq(&task->pi_lock);
++ if (task->state & __TASK_TRACED)
++ traced = true;
++ else if (task->saved_state & __TASK_TRACED)
++ traced = true;
++ raw_spin_unlock_irq(&task->pi_lock);
++#endif
++ return traced;
++}
++
+ /*
+ * cond_resched() and cond_resched_lock(): latency reduction via
+ * explicit rescheduling in places that are safe. The return
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 821cf1723814..3bb0ec669997 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -180,7 +180,14 @@ static bool ptrace_freeze_traced(struct task_struct *task)
+
+ spin_lock_irq(&task->sighand->siglock);
+ if (task_is_traced(task) && !__fatal_signal_pending(task)) {
+- task->state = __TASK_TRACED;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&task->pi_lock, flags);
++ if (task->state & __TASK_TRACED)
++ task->state = __TASK_TRACED;
++ else
++ task->saved_state = __TASK_TRACED;
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ ret = true;
+ }
+ spin_unlock_irq(&task->sighand->siglock);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index c05455d4f034..5dc7a8edf093 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2596,6 +2596,18 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+
++static bool check_task_state(struct task_struct *p, long match_state)
++{
++ bool match = false;
++
++ raw_spin_lock_irq(&p->pi_lock);
++ if (p->state == match_state || p->saved_state == match_state)
++ match = true;
++ raw_spin_unlock_irq(&p->pi_lock);
++
++ return match;
++}
++
+ /*
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+@@ -2640,7 +2652,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+ * is actually now running somewhere else!
+ */
+ while (task_running(rq, p)) {
+- if (match_state && unlikely(p->state != match_state))
++ if (match_state && !check_task_state(p, match_state))
+ return 0;
+ cpu_relax();
+ }
+@@ -2655,7 +2667,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
+ running = task_running(rq, p);
+ queued = task_on_rq_queued(p);
+ ncsw = 0;
+- if (!match_state || p->state == match_state)
++ if (!match_state || p->state == match_state ||
++ p->saved_state == match_state)
+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+ task_rq_unlock(rq, p, &rf);
+
+--
+2.19.1
+
diff --git a/features/rt/ptrace-fix-ptrace_unfreeze_traced-race-with-rt-lock.patch b/features/rt/ptrace-fix-ptrace_unfreeze_traced-race-with-rt-lock.patch
new file mode 100644
index 00000000..03d44b47
--- /dev/null
+++ b/features/rt/ptrace-fix-ptrace_unfreeze_traced-race-with-rt-lock.patch
@@ -0,0 +1,64 @@
+From 543c77838755c4c7554a11241e89298a8aae0679 Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Tue, 3 Nov 2020 12:39:01 +0100
+Subject: [PATCH 129/191] ptrace: fix ptrace_unfreeze_traced() race with
+ rt-lock
+
+The patch "ptrace: fix ptrace vs tasklist_lock race" changed
+ptrace_freeze_traced() to take task->saved_state into account, but
+ptrace_unfreeze_traced() has the same problem and needs a similar fix:
+it should check/update both ->state and ->saved_state.
+
+Reported-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
+Fixes: "ptrace: fix ptrace vs tasklist_lock race"
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ kernel/ptrace.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 3bb0ec669997..95025b90810f 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -197,8 +197,8 @@ static bool ptrace_freeze_traced(struct task_struct *task)
+
+ static void ptrace_unfreeze_traced(struct task_struct *task)
+ {
+- if (task->state != __TASK_TRACED)
+- return;
++ unsigned long flags;
++ bool frozen = true;
+
+ WARN_ON(!task->ptrace || task->parent != current);
+
+@@ -207,12 +207,19 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
+ * Recheck state under the lock to close this race.
+ */
+ spin_lock_irq(&task->sighand->siglock);
+- if (task->state == __TASK_TRACED) {
+- if (__fatal_signal_pending(task))
+- wake_up_state(task, __TASK_TRACED);
+- else
+- task->state = TASK_TRACED;
+- }
++
++ raw_spin_lock_irqsave(&task->pi_lock, flags);
++ if (task->state == __TASK_TRACED)
++ task->state = TASK_TRACED;
++ else if (task->saved_state == __TASK_TRACED)
++ task->saved_state = TASK_TRACED;
++ else
++ frozen = false;
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++
++ if (frozen && __fatal_signal_pending(task))
++ wake_up_state(task, __TASK_TRACED);
++
+ spin_unlock_irq(&task->sighand->siglock);
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/random-Make-it-work-on-rt.patch b/features/rt/random-Make-it-work-on-rt.patch
new file mode 100644
index 00000000..1362346f
--- /dev/null
+++ b/features/rt/random-Make-it-work-on-rt.patch
@@ -0,0 +1,185 @@
+From 39bd47e442b4654f824987ebaff05ce1dfaa1af0 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 21 Aug 2012 20:38:50 +0200
+Subject: [PATCH 149/191] random: Make it work on rt
+
+Delegate the random insertion to the forced threaded interrupt
+handler. Store the return IP of the hard interrupt handler in the irq
+descriptor and feed it into the random generator as a source of
+entropy.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kernel/cpu/mshyperv.c | 3 ++-
+ drivers/char/random.c | 11 +++++------
+ drivers/hv/hyperv_vmbus.h | 1 +
+ drivers/hv/vmbus_drv.c | 5 ++++-
+ include/linux/irqdesc.h | 1 +
+ include/linux/random.h | 2 +-
+ kernel/irq/handle.c | 8 +++++++-
+ kernel/irq/manage.c | 6 ++++++
+ 8 files changed, 27 insertions(+), 10 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index e88bc296afca..73a2494a0b2e 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -85,11 +85,12 @@ EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
+ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
+ {
+ struct pt_regs *old_regs = set_irq_regs(regs);
++ u64 ip = regs ? instruction_pointer(regs) : 0;
+
+ inc_irq_stat(hyperv_stimer0_count);
+ if (hv_stimer0_handler)
+ hv_stimer0_handler();
+- add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0);
++ add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0, ip);
+ ack_APIC_irq();
+
+ set_irq_regs(old_regs);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 0fe9e200e4c8..da0a1ef9615c 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1252,26 +1252,25 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
+ return *ptr;
+ }
+
+-void add_interrupt_randomness(int irq, int irq_flags)
++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
+ {
+ struct entropy_store *r;
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+- struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ cycles_t cycles = random_get_entropy();
+ __u32 c_high, j_high;
+- __u64 ip;
+
+ if (cycles == 0)
+- cycles = get_reg(fast_pool, regs);
++ cycles = get_reg(fast_pool, NULL);
+ c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
+ j_high = (sizeof(now) > 4) ? now >> 32 : 0;
+ fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
+ fast_pool->pool[1] ^= now ^ c_high;
+- ip = regs ? instruction_pointer(regs) : _RET_IP_;
++ if (!ip)
++ ip = _RET_IP_;
+ fast_pool->pool[2] ^= ip;
+ fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
+- get_reg(fast_pool, regs);
++ get_reg(fast_pool, NULL);
+
+ fast_mix(fast_pool);
+ add_interrupt_bench(cycles);
+diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
+index 9416e09ebd58..4a5767a15544 100644
+--- a/drivers/hv/hyperv_vmbus.h
++++ b/drivers/hv/hyperv_vmbus.h
+@@ -18,6 +18,7 @@
+ #include <linux/atomic.h>
+ #include <linux/hyperv.h>
+ #include <linux/interrupt.h>
++#include <linux/irq.h>
+
+ #include "hv_trace.h"
+
+diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
+index 806950bc3c1d..e0afa5031cc4 100644
+--- a/drivers/hv/vmbus_drv.c
++++ b/drivers/hv/vmbus_drv.c
+@@ -22,6 +22,7 @@
+ #include <linux/clockchips.h>
+ #include <linux/cpu.h>
+ #include <linux/sched/task_stack.h>
++#include <linux/irq.h>
+
+ #include <linux/delay.h>
+ #include <linux/notifier.h>
+@@ -1337,6 +1338,8 @@ static void vmbus_isr(void)
+ void *page_addr = hv_cpu->synic_event_page;
+ struct hv_message *msg;
+ union hv_synic_event_flags *event;
++ struct pt_regs *regs = get_irq_regs();
++ u64 ip = regs ? instruction_pointer(regs) : 0;
+ bool handled = false;
+
+ if (unlikely(page_addr == NULL))
+@@ -1381,7 +1384,7 @@ static void vmbus_isr(void)
+ tasklet_schedule(&hv_cpu->msg_dpc);
+ }
+
+- add_interrupt_randomness(hv_get_vector(), 0);
++ add_interrupt_randomness(hv_get_vector(), 0, ip);
+ }
+
+ /*
+diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
+index 891b323266df..45d8bda8fd53 100644
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -68,6 +68,7 @@ struct irq_desc {
+ unsigned int irqs_unhandled;
+ atomic_t threads_handled;
+ int threads_handled_last;
++ u64 random_ip;
+ raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
+ const struct cpumask *percpu_affinity;
+diff --git a/include/linux/random.h b/include/linux/random.h
+index f45b8be3e3c4..0e41d0527809 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -35,7 +35,7 @@ static inline void add_latent_entropy(void) {}
+
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value) __latent_entropy;
+-extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy;
+
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern int wait_for_random_bytes(void);
+diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
+index 762a928e18f9..7929fcdb7817 100644
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -192,10 +192,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
+ {
+ irqreturn_t retval;
+ unsigned int flags = 0;
++ struct pt_regs *regs = get_irq_regs();
++ u64 ip = regs ? instruction_pointer(regs) : 0;
+
+ retval = __handle_irq_event_percpu(desc, &flags);
+
+- add_interrupt_randomness(desc->irq_data.irq, flags);
++#ifdef CONFIG_PREEMPT_RT
++ desc->random_ip = ip;
++#else
++ add_interrupt_randomness(desc->irq_data.irq, flags, ip);
++#endif
+
+ if (!noirqdebug)
+ note_interrupt(desc, retval);
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index ac2c57429750..f4d18858d079 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1247,6 +1247,12 @@ static int irq_thread(void *data)
+ if (action_ret == IRQ_WAKE_THREAD)
+ irq_wake_secondary(desc, action);
+
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ migrate_disable();
++ add_interrupt_randomness(action->irq, 0,
++ desc->random_ip ^ (unsigned long) action);
++ migrate_enable();
++ }
+ wake_threads_waitq(desc);
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/rcu-Delay-RCU-selftests.patch b/features/rt/rcu-Delay-RCU-selftests.patch
new file mode 100644
index 00000000..e6df57ca
--- /dev/null
+++ b/features/rt/rcu-Delay-RCU-selftests.patch
@@ -0,0 +1,75 @@
+From 520c1da6a6bdb335171681b90eacfe32d394f358 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 10 Mar 2021 15:09:02 +0100
+Subject: [PATCH 130/191] rcu: Delay RCU-selftests
+
+Delay RCU-selftests until ksoftirqd is up and running.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ init/main.c | 7 +++++++
+ kernel/rcu/tasks.h | 9 ++-------
+ 2 files changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/init/main.c b/init/main.c
+index 608a8b27fabe..5bb42b4bbf59 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -1511,6 +1511,12 @@ void __init console_on_rootfs(void)
+ fput(file);
+ }
+
++#ifdef CONFIG_PROVE_RCU
++void rcu_tasks_initiate_self_tests(void);
++#else
++static inline void rcu_tasks_initiate_self_tests(void) {}
++#endif
++
+ static noinline void __init kernel_init_freeable(void)
+ {
+ #ifndef CONFIG_BLK_DEV_INITRD
+@@ -1539,6 +1545,7 @@ static noinline void __init kernel_init_freeable(void)
+
+ rcu_init_tasks_generic();
+ do_pre_smp_initcalls();
++ rcu_tasks_initiate_self_tests();
+ lockup_detector_init();
+
+ smp_init();
+diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
+index af7c19439f4e..3384c0bc6b21 100644
+--- a/kernel/rcu/tasks.h
++++ b/kernel/rcu/tasks.h
+@@ -1259,7 +1259,7 @@ static void test_rcu_tasks_callback(struct rcu_head *rhp)
+ rttd->notrun = true;
+ }
+
+-static void rcu_tasks_initiate_self_tests(void)
++void rcu_tasks_initiate_self_tests(void)
+ {
+ pr_info("Running RCU-tasks wait API self tests\n");
+ #ifdef CONFIG_TASKS_RCU
+@@ -1296,9 +1296,7 @@ static int rcu_tasks_verify_self_tests(void)
+ return ret;
+ }
+ late_initcall(rcu_tasks_verify_self_tests);
+-#else /* #ifdef CONFIG_PROVE_RCU */
+-static void rcu_tasks_initiate_self_tests(void) { }
+-#endif /* #else #ifdef CONFIG_PROVE_RCU */
++#endif /* #ifdef CONFIG_PROVE_RCU */
+
+ void __init rcu_init_tasks_generic(void)
+ {
+@@ -1313,9 +1311,6 @@ void __init rcu_init_tasks_generic(void)
+ #ifdef CONFIG_TASKS_TRACE_RCU
+ rcu_spawn_tasks_trace_kthread();
+ #endif
+-
+- // Run the self-tests.
+- rcu_tasks_initiate_self_tests();
+ }
+
+ #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
+--
+2.19.1
+
diff --git a/features/rt/rcu-Prevent-false-positive-softirq-warning-on-RT.patch b/features/rt/rcu-Prevent-false-positive-softirq-warning-on-RT.patch
new file mode 100644
index 00000000..5efbe3e1
--- /dev/null
+++ b/features/rt/rcu-Prevent-false-positive-softirq-warning-on-RT.patch
@@ -0,0 +1,34 @@
+From 6d20c55d64b7184c04dacaf5fb0b46d3ae3f9554 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:55:58 +0100
+Subject: [PATCH 060/191] rcu: Prevent false positive softirq warning on RT
+
+Soft interrupt disabled sections can legitimately be preempted or schedule
+out when blocking on a lock on RT enabled kernels so the RCU preempt check
+warning has to be disabled for RT kernels.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rcupdate.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index bd04f722714f..6d855ef091ba 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -334,7 +334,8 @@ static inline void rcu_preempt_sleep_check(void) { }
+ #define rcu_sleep_check() \
+ do { \
+ rcu_preempt_sleep_check(); \
+- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
++ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
+ "Illegal context switch in RCU-bh read-side critical section"); \
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
+ "Illegal context switch in RCU-sched read-side critical section"); \
+--
+2.19.1
+
diff --git a/features/rt/rcutorture-Avoid-problematic-critical-section-nestin.patch b/features/rt/rcutorture-Avoid-problematic-critical-section-nestin.patch
new file mode 100644
index 00000000..59d4b8a1
--- /dev/null
+++ b/features/rt/rcutorture-Avoid-problematic-critical-section-nestin.patch
@@ -0,0 +1,195 @@
+From 46cd674891358a7e6bf74242122b648fb85f6bcc Mon Sep 17 00:00:00 2001
+From: Scott Wood <swood@redhat.com>
+Date: Wed, 11 Sep 2019 17:57:29 +0100
+Subject: [PATCH 132/191] rcutorture: Avoid problematic critical section
+ nesting on RT
+
+rcutorture was generating some nesting scenarios that are not
+reasonable. Constrain the state selection to avoid them.
+
+Example #1:
+
+1. preempt_disable()
+2. local_bh_disable()
+3. preempt_enable()
+4. local_bh_enable()
+
+On PREEMPT_RT, BH disabling takes a local lock only when called in
+non-atomic context. Thus, atomic context must be retained until after BH
+is re-enabled. Likewise, if BH is initially disabled in non-atomic
+context, it cannot be re-enabled in atomic context.
+
+Example #2:
+
+1. rcu_read_lock()
+2. local_irq_disable()
+3. rcu_read_unlock()
+4. local_irq_enable()
+
+If the thread is preempted between steps 1 and 2,
+rcu_read_unlock_special.b.blocked will be set, but it won't be
+acted on in step 3 because IRQs are disabled. Thus, reporting of the
+quiescent state will be delayed beyond the local_irq_enable().
+
+For now, these scenarios will continue to be tested on non-PREEMPT_RT
+kernels, until debug checks are added to ensure that they are not
+happening elsewhere.
+
+Signed-off-by: Scott Wood <swood@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/rcu/rcutorture.c | 97 +++++++++++++++++++++++++++++++++++------
+ 1 file changed, 83 insertions(+), 14 deletions(-)
+
+diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
+index 99657ffa6688..d41a13e09185 100644
+--- a/kernel/rcu/rcutorture.c
++++ b/kernel/rcu/rcutorture.c
+@@ -61,10 +61,13 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@
+ #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
+ #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
+ #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
+-#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
++#define RCUTORTURE_RDR_ATOM_BH 0x40 /* ... disabling bh while atomic */
++#define RCUTORTURE_RDR_ATOM_RBH 0x80 /* ... RBH while atomic */
++#define RCUTORTURE_RDR_NBITS 8 /* Number of bits defined above. */
+ #define RCUTORTURE_MAX_EXTEND \
+ (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
+- RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
++ RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED | \
++ RCUTORTURE_RDR_ATOM_BH | RCUTORTURE_RDR_ATOM_RBH)
+ #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
+ /* Must be power of two minus one. */
+ #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
+@@ -1418,31 +1421,53 @@ static void rcutorture_one_extend(int *readstate, int newstate,
+ WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
+ rtrsp->rt_readstate = newstate;
+
+- /* First, put new protection in place to avoid critical-section gap. */
++ /*
++ * First, put new protection in place to avoid critical-section gap.
++ * Disable preemption around the ATOM disables to ensure that
++ * in_atomic() is true.
++ */
+ if (statesnew & RCUTORTURE_RDR_BH)
+ local_bh_disable();
++ if (statesnew & RCUTORTURE_RDR_RBH)
++ rcu_read_lock_bh();
+ if (statesnew & RCUTORTURE_RDR_IRQ)
+ local_irq_disable();
+ if (statesnew & RCUTORTURE_RDR_PREEMPT)
+ preempt_disable();
+- if (statesnew & RCUTORTURE_RDR_RBH)
+- rcu_read_lock_bh();
+ if (statesnew & RCUTORTURE_RDR_SCHED)
+ rcu_read_lock_sched();
++ preempt_disable();
++ if (statesnew & RCUTORTURE_RDR_ATOM_BH)
++ local_bh_disable();
++ if (statesnew & RCUTORTURE_RDR_ATOM_RBH)
++ rcu_read_lock_bh();
++ preempt_enable();
+ if (statesnew & RCUTORTURE_RDR_RCU)
+ idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
+
+- /* Next, remove old protection, irq first due to bh conflict. */
++ /*
++ * Next, remove old protection, in decreasing order of strength
++ * to avoid unlock paths that aren't safe in the stronger
++ * context. Disable preemption around the ATOM enables in
++ * case the context was only atomic due to IRQ disabling.
++ */
++ preempt_disable();
+ if (statesold & RCUTORTURE_RDR_IRQ)
+ local_irq_enable();
+- if (statesold & RCUTORTURE_RDR_BH)
++ if (statesold & RCUTORTURE_RDR_ATOM_BH)
+ local_bh_enable();
++ if (statesold & RCUTORTURE_RDR_ATOM_RBH)
++ rcu_read_unlock_bh();
++ preempt_enable();
+ if (statesold & RCUTORTURE_RDR_PREEMPT)
+ preempt_enable();
+- if (statesold & RCUTORTURE_RDR_RBH)
+- rcu_read_unlock_bh();
+ if (statesold & RCUTORTURE_RDR_SCHED)
+ rcu_read_unlock_sched();
++ if (statesold & RCUTORTURE_RDR_BH)
++ local_bh_enable();
++ if (statesold & RCUTORTURE_RDR_RBH)
++ rcu_read_unlock_bh();
++
+ if (statesold & RCUTORTURE_RDR_RCU) {
+ bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
+
+@@ -1485,6 +1510,12 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
+ int mask = rcutorture_extend_mask_max();
+ unsigned long randmask1 = torture_random(trsp) >> 8;
+ unsigned long randmask2 = randmask1 >> 3;
++ unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
++ unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
++ unsigned long nonatomic_bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
++ unsigned long atomic_bhs = RCUTORTURE_RDR_ATOM_BH |
++ RCUTORTURE_RDR_ATOM_RBH;
++ unsigned long tmp;
+
+ WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
+ /* Mostly only one bit (need preemption!), sometimes lots of bits. */
+@@ -1492,11 +1523,49 @@ rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
+ mask = mask & randmask2;
+ else
+ mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
+- /* Can't enable bh w/irq disabled. */
+- if ((mask & RCUTORTURE_RDR_IRQ) &&
+- ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
+- (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
+- mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
++
++ /*
++ * Can't enable bh w/irq disabled.
++ */
++ tmp = atomic_bhs | nonatomic_bhs;
++ if (mask & RCUTORTURE_RDR_IRQ)
++ mask |= oldmask & tmp;
++
++ /*
++ * Ideally these sequences would be detected in debug builds
++ * (regardless of RT), but until then don't stop testing
++ * them on non-RT.
++ */
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ /*
++ * Can't release the outermost rcu lock in an irq disabled
++ * section without preemption also being disabled, if irqs
++ * had ever been enabled during this RCU critical section
++ * (could leak a special flag and delay reporting the qs).
++ */
++ if ((oldmask & RCUTORTURE_RDR_RCU) &&
++ (mask & RCUTORTURE_RDR_IRQ) &&
++ !(mask & preempts))
++ mask |= RCUTORTURE_RDR_RCU;
++
++ /* Can't modify atomic bh in non-atomic context */
++ if ((oldmask & atomic_bhs) && (mask & atomic_bhs) &&
++ !(mask & preempts_irq)) {
++ mask |= oldmask & preempts_irq;
++ if (mask & RCUTORTURE_RDR_IRQ)
++ mask |= oldmask & tmp;
++ }
++ if ((mask & atomic_bhs) && !(mask & preempts_irq))
++ mask |= RCUTORTURE_RDR_PREEMPT;
++
++ /* Can't modify non-atomic bh in atomic context */
++ tmp = nonatomic_bhs;
++ if (oldmask & preempts_irq)
++ mask &= ~tmp;
++ if ((oldmask | mask) & preempts_irq)
++ mask |= oldmask & tmp;
++ }
++
+ return mask ?: RCUTORTURE_RDR_RCU;
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/rt-Add-local-irq-locks.patch b/features/rt/rt-Add-local-irq-locks.patch
new file mode 100644
index 00000000..0ffdcac6
--- /dev/null
+++ b/features/rt/rt-Add-local-irq-locks.patch
@@ -0,0 +1,210 @@
+From 751ab616fb58a462f6d5a1ac6fc31662d58e33e4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 20 Jun 2011 09:03:47 +0200
+Subject: [PATCH 099/191] rt: Add local irq locks
+
+Introduce locallock. For !RT this maps to preempt_disable()/
+local_irq_disable() so there is not much that changes. For RT this will
+map to a spinlock. This makes preemption possible and locked "ressource"
+gets the lockdep anotation it wouldn't have otherwise. The locks are
+recursive for owner == current. Also, all locks user migrate_disable()
+which ensures that the task is not migrated to another CPU while the lock
+is held and the owner is preempted.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/local_lock_internal.h | 129 +++++++++++++++++++++++++---
+ 1 file changed, 115 insertions(+), 14 deletions(-)
+
+diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
+index ded90b097e6e..7162dcd0a847 100644
+--- a/include/linux/local_lock_internal.h
++++ b/include/linux/local_lock_internal.h
+@@ -7,36 +7,94 @@
+ #include <linux/lockdep.h>
+
+ typedef struct {
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
++#ifdef CONFIG_PREEMPT_RT
++ spinlock_t lock;
++ struct task_struct *owner;
++ int nestcnt;
++
++#elif defined(CONFIG_DEBUG_LOCK_ALLOC)
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+ #endif
+ } local_lock_t;
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define LL_DEP_MAP_INIT(lockname) \
++#ifdef CONFIG_PREEMPT_RT
++
++#define INIT_LOCAL_LOCK(lockname) { \
++ __SPIN_LOCK_UNLOCKED((lockname).lock), \
++ .owner = NULL, \
++ .nestcnt = 0, \
++ }
++#else
++
++# ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define LL_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+- .lock_type = LD_LOCK_PERCPU, \
++ .lock_type = LD_LOCK_PERCPU, \
+ }
+-#else
+-# define LL_DEP_MAP_INIT(lockname)
+-#endif
++# else
++# define LL_DEP_MAP_INIT(lockname)
++# endif
+
+ #define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) }
+
+-#define __local_lock_init(lock) \
++#endif
++
++#ifdef CONFIG_PREEMPT_RT
++
++static inline void ___local_lock_init(local_lock_t *l)
++{
++ l->owner = NULL;
++ l->nestcnt = 0;
++}
++
++#define __local_lock_init(l) \
++do { \
++ spin_lock_init(&(l)->lock); \
++ ___local_lock_init(l); \
++} while (0)
++
++#else
++
++#define __local_lock_init(l) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+- debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+- lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
++ debug_check_no_locks_freed((void *)l, sizeof(*l)); \
++ lockdep_init_map_type(&(l)->dep_map, #l, &__key, 0, \
+ LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_PERCPU); \
+ } while (0)
++#endif
++
++#ifdef CONFIG_PREEMPT_RT
++
++static inline void local_lock_acquire(local_lock_t *l)
++{
++ if (l->owner != current) {
++ spin_lock(&l->lock);
++ DEBUG_LOCKS_WARN_ON(l->owner);
++ DEBUG_LOCKS_WARN_ON(l->nestcnt);
++ l->owner = current;
++ }
++ l->nestcnt++;
++}
++
++static inline void local_lock_release(local_lock_t *l)
++{
++ DEBUG_LOCKS_WARN_ON(l->nestcnt == 0);
++ DEBUG_LOCKS_WARN_ON(l->owner != current);
++ if (--l->nestcnt)
++ return;
++
++ l->owner = NULL;
++ spin_unlock(&l->lock);
++}
++
++#elif defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ static inline void local_lock_acquire(local_lock_t *l)
+ {
+ lock_map_acquire(&l->dep_map);
+@@ -56,21 +114,50 @@ static inline void local_lock_acquire(local_lock_t *l) { }
+ static inline void local_lock_release(local_lock_t *l) { }
+ #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+
++#ifdef CONFIG_PREEMPT_RT
++
+ #define __local_lock(lock) \
+ do { \
+- preempt_disable(); \
++ migrate_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
++#define __local_unlock(lock) \
++ do { \
++ local_lock_release(this_cpu_ptr(lock)); \
++ migrate_enable(); \
++ } while (0)
++
+ #define __local_lock_irq(lock) \
+ do { \
+- local_irq_disable(); \
++ migrate_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+ #define __local_lock_irqsave(lock, flags) \
+ do { \
+- local_irq_save(flags); \
++ migrate_disable(); \
++ flags = 0; \
++ local_lock_acquire(this_cpu_ptr(lock)); \
++ } while (0)
++
++#define __local_unlock_irq(lock) \
++ do { \
++ local_lock_release(this_cpu_ptr(lock)); \
++ migrate_enable(); \
++ } while (0)
++
++#define __local_unlock_irqrestore(lock, flags) \
++ do { \
++ local_lock_release(this_cpu_ptr(lock)); \
++ migrate_enable(); \
++ } while (0)
++
++#else
++
++#define __local_lock(lock) \
++ do { \
++ preempt_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+@@ -80,6 +167,18 @@ static inline void local_lock_release(local_lock_t *l) { }
+ preempt_enable(); \
+ } while (0)
+
++#define __local_lock_irq(lock) \
++ do { \
++ local_irq_disable(); \
++ local_lock_acquire(this_cpu_ptr(lock)); \
++ } while (0)
++
++#define __local_lock_irqsave(lock, flags) \
++ do { \
++ local_irq_save(flags); \
++ local_lock_acquire(this_cpu_ptr(lock)); \
++ } while (0)
++
+ #define __local_unlock_irq(lock) \
+ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
+@@ -91,3 +190,5 @@ static inline void local_lock_release(local_lock_t *l) { }
+ local_lock_release(this_cpu_ptr(lock)); \
+ local_irq_restore(flags); \
+ } while (0)
++
++#endif
+--
+2.19.1
+
diff --git a/features/rt/rt-Introduce-cpu_chill.patch b/features/rt/rt-Introduce-cpu_chill.patch
new file mode 100644
index 00000000..2a837661
--- /dev/null
+++ b/features/rt/rt-Introduce-cpu_chill.patch
@@ -0,0 +1,121 @@
+From fccac47c4972dfaef50b0e080c60879a9d97bd25 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 7 Mar 2012 20:51:03 +0100
+Subject: [PATCH 138/191] rt: Introduce cpu_chill()
+
+Retry loops on RT might loop forever when the modifying side was
+preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill()
+defaults to cpu_relax() for non RT. On RT it puts the looping task to
+sleep for a tick so the preempted task can make progress.
+
+Steven Rostedt changed it to use a hrtimer instead of msleep():
+|
+|Ulrich Obergfell pointed out that cpu_chill() calls msleep() which is woken
+|up by the ksoftirqd running the TIMER softirq. But as the cpu_chill() is
+|called from softirq context, it may block the ksoftirqd() from running, in
+|which case, it may never wake up the msleep() causing the deadlock.
+
++ bigeasy later changed to schedule_hrtimeout()
+|If a task calls cpu_chill() and gets woken up by a regular or spurious
+|wakeup and has a signal pending, then it exits the sleep loop in
+|do_nanosleep() and sets up the restart block. If restart->nanosleep.type is
+|not TI_NONE then this results in accessing a stale user pointer from a
+|previously interrupted syscall and a copy to user based on the stale
+|pointer or a BUG() when 'type' is not supported in nanosleep_copyout().
+
++ bigeasy: add PF_NOFREEZE:
+| [....] Waiting for /dev to be fully populated...
+| =====================================
+| [ BUG: udevd/229 still has locks held! ]
+| 3.12.11-rt17 #23 Not tainted
+| -------------------------------------
+| 1 lock held by udevd/229:
+| #0: (&type->i_mutex_dir_key#2){+.+.+.}, at: lookup_slow+0x28/0x98
+|
+| stack backtrace:
+| CPU: 0 PID: 229 Comm: udevd Not tainted 3.12.11-rt17 #23
+| (unwind_backtrace+0x0/0xf8) from (show_stack+0x10/0x14)
+| (show_stack+0x10/0x14) from (dump_stack+0x74/0xbc)
+| (dump_stack+0x74/0xbc) from (do_nanosleep+0x120/0x160)
+| (do_nanosleep+0x120/0x160) from (hrtimer_nanosleep+0x90/0x110)
+| (hrtimer_nanosleep+0x90/0x110) from (cpu_chill+0x30/0x38)
+| (cpu_chill+0x30/0x38) from (dentry_kill+0x158/0x1ec)
+| (dentry_kill+0x158/0x1ec) from (dput+0x74/0x15c)
+| (dput+0x74/0x15c) from (lookup_real+0x4c/0x50)
+| (lookup_real+0x4c/0x50) from (__lookup_hash+0x34/0x44)
+| (__lookup_hash+0x34/0x44) from (lookup_slow+0x38/0x98)
+| (lookup_slow+0x38/0x98) from (path_lookupat+0x208/0x7fc)
+| (path_lookupat+0x208/0x7fc) from (filename_lookup+0x20/0x60)
+| (filename_lookup+0x20/0x60) from (user_path_at_empty+0x50/0x7c)
+| (user_path_at_empty+0x50/0x7c) from (user_path_at+0x14/0x1c)
+| (user_path_at+0x14/0x1c) from (vfs_fstatat+0x48/0x94)
+| (vfs_fstatat+0x48/0x94) from (SyS_stat64+0x14/0x30)
+| (SyS_stat64+0x14/0x30) from (ret_fast_syscall+0x0/0x48)
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 6 ++++++
+ kernel/time/hrtimer.c | 30 ++++++++++++++++++++++++++++++
+ 2 files changed, 36 insertions(+)
+
+diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
+index bb5e7b0a4274..e425a26a5ed8 100644
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -540,4 +540,10 @@ int hrtimers_dead_cpu(unsigned int cpu);
+ #define hrtimers_dead_cpu NULL
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT
++extern void cpu_chill(void);
++#else
++# define cpu_chill() cpu_relax()
++#endif
++
+ #endif
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 788b9d137de4..3fa18a01f5b2 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -2006,6 +2006,36 @@ SYSCALL_DEFINE2(nanosleep_time32, struct old_timespec32 __user *, rqtp,
+ }
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT
++/*
++ * Sleep for 1 ms in hope whoever holds what we want will let it go.
++ */
++void cpu_chill(void)
++{
++ unsigned int freeze_flag = current->flags & PF_NOFREEZE;
++ struct task_struct *self = current;
++ ktime_t chill_time;
++
++ raw_spin_lock_irq(&self->pi_lock);
++ self->saved_state = self->state;
++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
++ raw_spin_unlock_irq(&self->pi_lock);
++
++ chill_time = ktime_set(0, NSEC_PER_MSEC);
++
++ current->flags |= PF_NOFREEZE;
++ schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD);
++ if (!freeze_flag)
++ current->flags &= ~PF_NOFREEZE;
++
++ raw_spin_lock_irq(&self->pi_lock);
++ __set_current_state_no_track(self->saved_state);
++ self->saved_state = TASK_RUNNING;
++ raw_spin_unlock_irq(&self->pi_lock);
++}
++EXPORT_SYMBOL(cpu_chill);
++#endif
++
+ /*
+ * Functions related to boot-time initialization:
+ */
+--
+2.19.1
+
diff --git a/features/rt/rt.scc b/features/rt/rt.scc
index 4cc0cb9a..f00f2eb5 100644
--- a/features/rt/rt.scc
+++ b/features/rt/rt.scc
@@ -1,3 +1,194 @@
# SPDX-License-Identifier: MIT
#
+patch highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
+patch timers-Move-clearing-of-base-timer_running-under-bas.patch
+patch kthread-Move-prio-affinite-change-into-the-newly-cre.patch
+patch genirq-Move-prio-assignment-into-the-newly-created-t.patch
+patch notifier-Make-atomic_notifiers-use-raw_spinlock.patch
+patch powerpc-mm-Move-the-linear_mapping_mutex-to-the-ifde.patch
+patch printk-limit-second-loop-of-syslog_print_all.patch
+patch printk-kmsg_dump-remove-unused-fields.patch
+patch printk-refactor-kmsg_dump_get_buffer.patch
+patch printk-consolidate-kmsg_dump_get_buffer-syslog_print.patch
+patch printk-introduce-CONSOLE_LOG_MAX-for-improved-multi-.patch
+patch printk-use-seqcount_latch-for-clear_seq.patch
+patch printk-use-atomic64_t-for-devkmsg_user.seq.patch
+patch printk-add-syslog_lock.patch
+patch printk-introduce-a-kmsg_dump-iterator.patch
+patch um-synchronize-kmsg_dumper.patch
+patch printk-remove-logbuf_lock.patch
+patch printk-kmsg_dump-remove-_nolock-variants.patch
+patch printk-kmsg_dump-use-kmsg_dump_rewind.patch
+patch printk-console-remove-unnecessary-safe-buffer-usage.patch
+patch printk-track-limit-recursion.patch
+patch printk-remove-safe-buffers.patch
+patch printk-convert-syslog_lock-to-spin_lock.patch
+patch console-add-write_atomic-interface.patch
+patch serial-8250-implement-write_atomic.patch
+patch printk-relocate-printk_delay-and-vprintk_default.patch
+patch printk-combine-boot_delay_msec-into-printk_delay.patch
+patch printk-change-console_seq-to-atomic64_t.patch
+patch printk-introduce-kernel-sync-mode.patch
+patch printk-move-console-printing-to-kthreads.patch
+patch printk-remove-deferred-printing.patch
+patch printk-add-console-handover.patch
+patch printk-add-pr_flush.patch
+patch kcov-Remove-kcov-include-from-sched.h-and-move-it-to.patch
+patch cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
+patch mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
+patch shmem-Use-raw_spinlock_t-for-stat_lock.patch
+patch net-Move-lockdep-where-it-belongs.patch
+patch tcp-Remove-superfluous-BH-disable-around-listening_h.patch
+patch smp-Wake-ksoftirqd-on-PREEMPT_RT-instead-do_softirq.patch
+patch tasklets-Replace-barrier-with-cpu_relax-in-tasklet_u.patch
+patch tasklets-Use-static-inlines-for-stub-implementations.patch
+patch tasklets-Provide-tasklet_disable_in_atomic.patch
+patch tasklets-Use-spin-wait-in-tasklet_disable-temporaril.patch
+patch tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch
+patch tasklets-Replace-spin-wait-in-tasklet_kill.patch
+patch tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch
+patch net-jme-Replace-link-change-tasklet-with-work.patch
+patch net-sundance-Use-tasklet_disable_in_atomic.patch
+patch ath9k-Use-tasklet_disable_in_atomic.patch
+patch atm-eni-Use-tasklet_disable_in_atomic-in-the-send-ca.patch
+patch PCI-hv-Use-tasklet_disable_in_atomic.patch
+patch firewire-ohci-Use-tasklet_disable_in_atomic-where-re.patch
+patch tasklets-Switch-tasklet_disable-to-the-sleep-wait-va.patch
+patch softirq-Add-RT-specific-softirq-accounting.patch
+patch irqtime-Make-accounting-correct-on-RT.patch
+patch softirq-Move-various-protections-into-inline-helpers.patch
+patch softirq-Make-softirq-control-and-processing-RT-aware.patch
+patch tick-sched-Prevent-false-positive-softirq-pending-wa.patch
+patch rcu-Prevent-false-positive-softirq-warning-on-RT.patch
+patch locking-rtmutex-Remove-cruft.patch
+patch locking-rtmutex-Remove-output-from-deadlock-detector.patch
+patch locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch
+patch locking-rtmutex-Remove-rt_mutex_timed_lock.patch
+patch locking-rtmutex-Handle-the-various-new-futex-race-co.patch
+patch futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
+patch locking-rtmutex-Make-lock_killable-work.patch
+patch locking-spinlock-Split-the-lock-types-header.patch
+patch locking-rtmutex-Avoid-include-hell.patch
+patch lockdep-Reduce-header-files-in-debug_locks.h.patch
+patch locking-split-out-the-rbtree-definition.patch
+patch locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
+patch locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
+patch sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
+patch locking-rtmutex-add-sleeping-lock-implementation.patch
+patch locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
+patch locking-rtmutex-add-mutex-implementation-based-on-rt.patch
+patch locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
+patch locking-rtmutex-add-rwlock-implementation-based-on-r.patch
+patch locking-rtmutex-wire-up-RT-s-locking.patch
+patch locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
+patch locking-rtmutex-Use-custom-scheduling-function-for-s.patch
+patch signal-Revert-ptrace-preempt-magic.patch
+patch preempt-Provide-preempt_-_-no-rt-variants.patch
+patch mm-vmstat-Protect-per-cpu-variables-with-preempt-dis.patch
+patch mm-memcontrol-Disable-preemption-in-__mod_memcg_lruv.patch
+patch xfrm-Use-sequence-counter-with-associated-spinlock.patch
+patch u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
+patch fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+patch fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
+patch net-Qdisc-use-a-seqlock-instead-seqcount.patch
+patch net-Properly-annotate-the-try-lock-for-the-seqlock.patch
+patch kconfig-Disable-config-options-which-are-not-RT-comp.patch
+patch mm-Allow-only-SLUB-on-RT.patch
+patch sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch
+patch net-core-disable-NET_RX_BUSY_POLL-on-RT.patch
+patch efi-Disable-runtime-services-on-RT.patch
+patch efi-Allow-efi-runtime.patch
+patch rt-Add-local-irq-locks.patch
+patch signal-x86-Delay-calling-signals-in-atomic.patch
+patch kernel-sched-add-put-get-_cpu_light.patch
+patch trace-Add-migrate-disabled-counter-to-tracing-output.patch
+patch locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
+patch mm-sl-au-b-Change-list_lock-to-raw_spinlock_t.patch
+patch mm-slub-Make-object_map_lock-a-raw_spinlock_t.patch
+patch mm-slub-Enable-irqs-for-__GFP_WAIT.patch
+patch mm-slub-Move-discard_slab-invocations-out-of-IRQ-off.patch
+patch mm-slub-Move-flush_cpu_slab-invocations-__free_slab-.patch
+patch mm-slub-Don-t-resize-the-location-tracking-cache-on-.patch
+patch mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
+patch mm-page_alloc-Use-a-local_lock-instead-of-explicit-l.patch
+patch mm-slub-Don-t-enable-partial-CPU-caches-on-PREEMPT_R.patch
+patch mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
+patch mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+patch mm-memcontrol-Replace-local_irq_disable-with-local-l.patch
+patch mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch
+patch x86-kvm-Require-const-tsc-for-RT.patch
+patch wait.h-include-atomic.h.patch
+patch sched-Limit-the-number-of-task-migrations-per-batch.patch
+patch sched-Move-mmdrop-to-RCU-on-RT.patch
+patch kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+patch sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch
+patch sched-Disable-TTWU_QUEUE-on-RT.patch
+patch softirq-Check-preemption-after-reenabling-interrupts.patch
+patch softirq-Disable-softirq-stacks-for-RT.patch
+patch net-core-use-local_bh_disable-in-netif_rx_ni.patch
+patch pid.h-include-atomic.h.patch
+patch ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+patch ptrace-fix-ptrace_unfreeze_traced-race-with-rt-lock.patch
+patch rcu-Delay-RCU-selftests.patch
+patch locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch
+patch rcutorture-Avoid-problematic-critical-section-nestin.patch
+patch mm-vmalloc-Another-preempt-disable-region-which-suck.patch
+patch block-mq-do-not-invoke-preempt_disable.patch
+patch md-raid5-Make-raid5_percpu-handling-RT-aware.patch
+patch scsi-fcoe-Make-RT-aware.patch
+patch sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+patch rt-Introduce-cpu_chill.patch
+patch fs-namespace-Use-cpu_chill-in-trylock-loops.patch
+patch debugobjects-Make-RT-aware.patch
+patch net-Use-skbufhead-with-raw-lock.patch
+patch net-Dequeue-in-dev_cpu_dead-without-the-lock.patch
+patch net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+patch irqwork-push-most-work-into-softirq-context.patch
+patch crypto-limit-more-FPU-enabled-sections.patch
+patch crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
+patch panic-skip-get_random_bytes-for-RT_FULL-in-init_oops.patch
+patch x86-stackprotector-Avoid-random-pool-on-rt.patch
+patch random-Make-it-work-on-rt.patch
+patch net-Remove-preemption-disabling-in-netif_rx.patch
+patch lockdep-Make-it-RT-aware.patch
+patch lockdep-selftest-Only-do-hardirq-context-test-for-ra.patch
+patch lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
+patch lockdep-disable-self-test.patch
+patch drm-radeon-i915-Use-preempt_disable-enable_rt-where-.patch
+patch drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
+patch drm-i915-disable-tracing-on-RT.patch
+patch drm-i915-skip-DRM_I915_LOW_LEVEL_TRACEPOINTS-with-NO.patch
+patch drm-i915-gt-Only-disable-interrupts-for-the-timeline.patch
+patch cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
+patch x86-Allow-to-enable-RT.patch
+patch mm-scatterlist-Do-not-disable-irqs-on-RT.patch
+patch sched-Add-support-for-lazy-preemption.patch
+patch x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
+patch x86-Support-for-lazy-preemption.patch
+patch arm-Add-support-for-lazy-preemption.patch
+patch powerpc-Add-support-for-lazy-preemption.patch
+patch arch-arm64-Add-lazy-preempt-support.patch
+patch jump-label-disable-if-stop_machine-is-used.patch
+patch leds-trigger-disable-CPU-trigger-on-RT.patch
+patch tty-serial-omap-Make-the-locking-RT-aware.patch
+patch tty-serial-pl011-Make-the-locking-work-on-RT.patch
+patch ARM-enable-irq-in-translation-section-permission-fau.patch
+patch genirq-update-irq_set_irqchip_state-documentation.patch
+patch KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+patch arm64-fpsimd-Delay-freeing-memory-in-fpsimd_flush_th.patch
+patch x86-Enable-RT-also-on-32bit.patch
+patch ARM-Allow-to-enable-RT.patch
+patch ARM64-Allow-to-enable-RT.patch
+patch powerpc-traps-Use-PREEMPT_RT.patch
+patch powerpc-pseries-iommu-Use-a-locallock-instead-local_.patch
+patch powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
+patch powerpc-stackprotector-work-around-stack-guard-init-.patch
+patch powerpc-Avoid-recursive-header-includes.patch
+patch POWERPC-Allow-to-enable-RT.patch
+patch drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+patch tpm_tis-fix-stall-after-iowrite-s.patch
+patch signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch
+patch genirq-Disable-irqpoll-on-rt.patch
+patch sysfs-Add-sys-kernel-realtime-entry.patch
+patch Add-localversion-for-RT-release.patch
diff --git a/features/rt/sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch b/features/rt/sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
new file mode 100644
index 00000000..8de24806
--- /dev/null
+++ b/features/rt/sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
@@ -0,0 +1,115 @@
+From 34b399662682e7d121a12d7f11c06f809c60d085 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 25 Jun 2011 09:21:04 +0200
+Subject: [PATCH 074/191] sched: Add saved_state for tasks blocked on sleeping
+ locks
+
+Spinlocks are state preserving in !RT. RT changes the state when a
+task gets blocked on a lock. So we need to remember the state before
+the lock contention. If a regular wakeup (not a RTmutex related
+wakeup) happens, the saved_state is updated to running. When the lock
+sleep is done, the saved state is restored.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 3 +++
+ kernel/sched/core.c | 34 ++++++++++++++++++++++++++++++++--
+ kernel/sched/sched.h | 1 +
+ 3 files changed, 36 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index c8998312d7bc..3f05b8c29132 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -655,6 +655,8 @@ struct task_struct {
+ #endif
+ /* -1 unrunnable, 0 runnable, >0 stopped: */
+ volatile long state;
++ /* saved state for "spinlock sleepers" */
++ volatile long saved_state;
+
+ /*
+ * This begins the randomizable portion of task_struct. Only
+@@ -1780,6 +1782,7 @@ extern struct task_struct *find_get_task_by_vpid(pid_t nr);
+
+ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+ extern int wake_up_process(struct task_struct *tsk);
++extern int wake_up_lock_sleeper(struct task_struct *tsk);
+ extern void wake_up_new_task(struct task_struct *tsk);
+
+ #ifdef CONFIG_SMP
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 98191218d891..4efe6fd72719 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3314,7 +3314,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ int cpu, success = 0;
+
+ preempt_disable();
+- if (p == current) {
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && p == current) {
+ /*
+ * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
+ * == smp_processor_id()'. Together this means we can special
+@@ -3344,8 +3344,26 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ smp_mb__after_spinlock();
+- if (!(p->state & state))
++ if (!(p->state & state)) {
++ /*
++ * The task might be running due to a spinlock sleeper
++ * wakeup. Check the saved state and set it to running
++ * if the wakeup condition is true.
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER)) {
++ if (p->saved_state & state) {
++ p->saved_state = TASK_RUNNING;
++ success = 1;
++ }
++ }
+ goto unlock;
++ }
++ /*
++ * If this is a regular wakeup, then we can unconditionally
++ * clear the saved state of a "lock sleeper".
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER))
++ p->saved_state = TASK_RUNNING;
+
+ trace_sched_waking(p);
+
+@@ -3534,6 +3552,18 @@ int wake_up_process(struct task_struct *p)
+ }
+ EXPORT_SYMBOL(wake_up_process);
+
++/**
++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
++ * @p: The process to be woken up.
++ *
++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
++ * the nature of the wakeup.
++ */
++int wake_up_lock_sleeper(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER);
++}
++
+ int wake_up_state(struct task_struct *p, unsigned int state)
+ {
+ return try_to_wake_up(p, state, 0);
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 10a1522b1e30..b65a4e244a77 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1751,6 +1751,7 @@ static inline int task_on_rq_migrating(struct task_struct *p)
+ #define WF_SYNC 0x10 /* Waker goes to sleep after wakeup */
+ #define WF_MIGRATED 0x20 /* Internal use, task got migrated */
+ #define WF_ON_CPU 0x40 /* Wakee is on_cpu */
++#define WF_LOCK_SLEEPER 0x80 /* Wakeup spinlock "sleeper" */
+
+ #ifdef CONFIG_SMP
+ static_assert(WF_EXEC == SD_BALANCE_EXEC);
+--
+2.19.1
+
diff --git a/features/rt/sched-Add-support-for-lazy-preemption.patch b/features/rt/sched-Add-support-for-lazy-preemption.patch
new file mode 100644
index 00000000..e3b28534
--- /dev/null
+++ b/features/rt/sched-Add-support-for-lazy-preemption.patch
@@ -0,0 +1,690 @@
+From 4ea3d1a1ba58c951bbfc4f5e3592823f3fcd2d4a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 26 Oct 2012 18:50:54 +0100
+Subject: [PATCH 163/191] sched: Add support for lazy preemption
+
+It has become an obsession to mitigate the determinism vs. throughput
+loss of RT. Looking at the mainline semantics of preemption points
+gives a hint why RT sucks throughput wise for ordinary SCHED_OTHER
+tasks. One major issue is the wakeup of tasks which are right away
+preempting the waking task while the waking task holds a lock on which
+the woken task will block right after having preempted the wakee. In
+mainline this is prevented due to the implicit preemption disable of
+spin/rw_lock held regions. On RT this is not possible due to the fully
+preemptible nature of sleeping spinlocks.
+
+Though for a SCHED_OTHER task preempting another SCHED_OTHER task this
+is really not a correctness issue. RT folks are concerned about
+SCHED_FIFO/RR tasks preemption and not about the purely fairness
+driven SCHED_OTHER preemption latencies.
+
+So I introduced a lazy preemption mechanism which only applies to
+SCHED_OTHER tasks preempting another SCHED_OTHER task. Aside of the
+existing preempt_count each tasks sports now a preempt_lazy_count
+which is manipulated on lock acquiry and release. This is slightly
+incorrect as for lazyness reasons I coupled this on
+migrate_disable/enable so some other mechanisms get the same treatment
+(e.g. get_cpu_light).
+
+Now on the scheduler side instead of setting NEED_RESCHED this sets
+NEED_RESCHED_LAZY in case of a SCHED_OTHER/SCHED_OTHER preemption and
+therefor allows to exit the waking task the lock held region before
+the woken task preempts. That also works better for cross CPU wakeups
+as the other side can stay in the adaptive spinning loop.
+
+For RT class preemption there is no change. This simply sets
+NEED_RESCHED and forgoes the lazy preemption counter.
+
+ Initial test do not expose any observable latency increasement, but
+history shows that I've been proven wrong before :)
+
+The lazy preemption mode is per default on, but with
+CONFIG_SCHED_DEBUG enabled it can be disabled via:
+
+ # echo NO_PREEMPT_LAZY >/sys/kernel/debug/sched_features
+
+and reenabled via
+
+ # echo PREEMPT_LAZY >/sys/kernel/debug/sched_features
+
+The test results so far are very machine and workload dependent, but
+there is a clear trend that it enhances the non RT workload
+performance.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/preempt.h | 54 ++++++++++++++++++++++--
+ include/linux/sched.h | 38 +++++++++++++++++
+ include/linux/thread_info.h | 12 +++++-
+ include/linux/trace_events.h | 5 ++-
+ kernel/Kconfig.preempt | 6 +++
+ kernel/sched/core.c | 82 +++++++++++++++++++++++++++++++++++-
+ kernel/sched/fair.c | 16 +++----
+ kernel/sched/features.h | 3 ++
+ kernel/sched/sched.h | 9 ++++
+ kernel/trace/trace.c | 50 +++++++++++++---------
+ kernel/trace/trace_events.c | 1 +
+ kernel/trace/trace_output.c | 14 +++++-
+ 12 files changed, 254 insertions(+), 36 deletions(-)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index fb140e00f74d..af39859f02ee 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -174,6 +174,20 @@ extern void preempt_count_sub(int val);
+ #define preempt_count_inc() preempt_count_add(1)
+ #define preempt_count_dec() preempt_count_sub(1)
+
++#ifdef CONFIG_PREEMPT_LAZY
++#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
++#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
++#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
++#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
++#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
++#else
++#define add_preempt_lazy_count(val) do { } while (0)
++#define sub_preempt_lazy_count(val) do { } while (0)
++#define inc_preempt_lazy_count() do { } while (0)
++#define dec_preempt_lazy_count() do { } while (0)
++#define preempt_lazy_count() (0)
++#endif
++
+ #ifdef CONFIG_PREEMPT_COUNT
+
+ #define preempt_disable() \
+@@ -182,6 +196,12 @@ do { \
+ barrier(); \
+ } while (0)
+
++#define preempt_lazy_disable() \
++do { \
++ inc_preempt_lazy_count(); \
++ barrier(); \
++} while (0)
++
+ #define sched_preempt_enable_no_resched() \
+ do { \
+ barrier(); \
+@@ -219,6 +239,18 @@ do { \
+ __preempt_schedule(); \
+ } while (0)
+
++/*
++ * open code preempt_check_resched() because it is not exported to modules and
++ * used by local_unlock() or bpf_enable_instrumentation().
++ */
++#define preempt_lazy_enable() \
++do { \
++ dec_preempt_lazy_count(); \
++ barrier(); \
++ if (should_resched(0)) \
++ __preempt_schedule(); \
++} while (0)
++
+ #else /* !CONFIG_PREEMPTION */
+ #define preempt_enable() \
+ do { \
+@@ -226,6 +258,12 @@ do { \
+ preempt_count_dec(); \
+ } while (0)
+
++#define preempt_lazy_enable() \
++do { \
++ dec_preempt_lazy_count(); \
++ barrier(); \
++} while (0)
++
+ #define preempt_enable_notrace() \
+ do { \
+ barrier(); \
+@@ -267,6 +305,9 @@ do { \
+ #define preempt_check_resched_rt() barrier()
+ #define preemptible() 0
+
++#define preempt_lazy_disable() barrier()
++#define preempt_lazy_enable() barrier()
++
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+ #ifdef MODULE
+@@ -285,7 +326,7 @@ do { \
+ } while (0)
+ #define preempt_fold_need_resched() \
+ do { \
+- if (tif_need_resched()) \
++ if (tif_need_resched_now()) \
+ set_preempt_need_resched(); \
+ } while (0)
+
+@@ -413,8 +454,15 @@ extern void migrate_enable(void);
+
+ #else
+
+-static inline void migrate_disable(void) { }
+-static inline void migrate_enable(void) { }
++static inline void migrate_disable(void)
++{
++ preempt_lazy_disable();
++}
++
++static inline void migrate_enable(void)
++{
++ preempt_lazy_enable();
++}
+
+ #endif /* CONFIG_SMP */
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index dab770a030bd..8e566b8049aa 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1880,6 +1880,44 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
++}
++
++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
++}
++
++static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
++{
++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
++}
++
++static inline int need_resched_lazy(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++}
++
++static inline int need_resched_now(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED);
++}
++
++#else
++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
++static inline int need_resched_lazy(void) { return 0; }
++
++static inline int need_resched_now(void)
++{
++ return test_thread_flag(TIF_NEED_RESCHED);
++}
++
++#endif
++
++
+ static inline bool __task_is_stopped_or_traced(struct task_struct *task)
+ {
+ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
+diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
+index 9b2158c69275..8d1cac4052f2 100644
+--- a/include/linux/thread_info.h
++++ b/include/linux/thread_info.h
+@@ -149,7 +149,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
+ clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
+ #endif /* !CONFIG_GENERIC_ENTRY */
+
+-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
++#ifdef CONFIG_PREEMPT_LAZY
++#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \
++ test_thread_flag(TIF_NEED_RESCHED_LAZY))
++#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED))
++#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)
++
++#else
++#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
++#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED)
++#define tif_need_resched_lazy() 0
++#endif
+
+ #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+ static inline int arch_within_stack_frames(const void * const stack,
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 1048965a8750..9e4bc12bb826 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -70,6 +70,7 @@ struct trace_entry {
+ unsigned char preempt_count;
+ int pid;
+ unsigned char migrate_disable;
++ unsigned char preempt_lazy_count;
+ };
+
+ #define TRACE_EVENT_TYPE_MAX \
+@@ -159,9 +160,10 @@ static inline void tracing_generic_entry_update(struct trace_entry *entry,
+ {
+ entry->preempt_count = trace_ctx & 0xff;
+ entry->migrate_disable = (trace_ctx >> 8) & 0xff;
++ entry->preempt_lazy_count = (trace_ctx >> 16) & 0xff;
+ entry->pid = current->pid;
+ entry->type = type;
+- entry->flags = trace_ctx >> 16;
++ entry->flags = trace_ctx >> 24;
+ }
+
+ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
+@@ -174,6 +176,7 @@ enum trace_flag_type {
+ TRACE_FLAG_SOFTIRQ = 0x10,
+ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
+ TRACE_FLAG_NMI = 0x40,
++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
+ };
+
+ #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index c8e516f6db20..90837a6cb2e8 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -1,5 +1,11 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+
++config HAVE_PREEMPT_LAZY
++ bool
++
++config PREEMPT_LAZY
++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT
++
+ choice
+ prompt "Preemption Model"
+ default PREEMPT_NONE
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 5dc7a8edf093..46f2d26a7108 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -647,6 +647,48 @@ void resched_curr(struct rq *rq)
+ trace_sched_wake_idle_without_ipi(cpu);
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++
++static int tsk_is_polling(struct task_struct *p)
++{
++#ifdef TIF_POLLING_NRFLAG
++ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
++#else
++ return 0;
++#endif
++}
++
++void resched_curr_lazy(struct rq *rq)
++{
++ struct task_struct *curr = rq->curr;
++ int cpu;
++
++ if (!sched_feat(PREEMPT_LAZY)) {
++ resched_curr(rq);
++ return;
++ }
++
++ lockdep_assert_held(&rq->lock);
++
++ if (test_tsk_need_resched(curr))
++ return;
++
++ if (test_tsk_need_resched_lazy(curr))
++ return;
++
++ set_tsk_need_resched_lazy(curr);
++
++ cpu = cpu_of(rq);
++ if (cpu == smp_processor_id())
++ return;
++
++ /* NEED_RESCHED_LAZY must be visible before we test polling */
++ smp_mb();
++ if (!tsk_is_polling(curr))
++ smp_send_reschedule(cpu);
++}
++#endif
++
+ void resched_cpu(int cpu)
+ {
+ struct rq *rq = cpu_rq(cpu);
+@@ -1778,6 +1820,7 @@ void migrate_disable(void)
+ preempt_disable();
+ this_rq()->nr_pinned++;
+ p->migration_disabled = 1;
++ preempt_lazy_disable();
+ preempt_enable();
+ }
+ EXPORT_SYMBOL_GPL(migrate_disable);
+@@ -1806,6 +1849,7 @@ void migrate_enable(void)
+ barrier();
+ p->migration_disabled = 0;
+ this_rq()->nr_pinned--;
++ preempt_lazy_enable();
+ preempt_enable();
+ }
+ EXPORT_SYMBOL_GPL(migrate_enable);
+@@ -3851,6 +3895,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
+ p->on_cpu = 0;
+ #endif
+ init_task_preempt_count(p);
++#ifdef CONFIG_HAVE_PREEMPT_LAZY
++ task_thread_info(p)->preempt_lazy_count = 0;
++#endif
+ #ifdef CONFIG_SMP
+ plist_node_init(&p->pushable_tasks, MAX_PRIO);
+ RB_CLEAR_NODE(&p->pushable_dl_tasks);
+@@ -5104,6 +5151,7 @@ static void __sched notrace __schedule(bool preempt, bool spinning_lock)
+
+ next = pick_next_task(rq, prev, &rf);
+ clear_tsk_need_resched(prev);
++ clear_tsk_need_resched_lazy(prev);
+ clear_preempt_need_resched();
+
+ if (likely(prev != next)) {
+@@ -5303,6 +5351,30 @@ static void __sched notrace preempt_schedule_common(void)
+ } while (need_resched());
+ }
+
++#ifdef CONFIG_PREEMPT_LAZY
++/*
++ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is
++ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as
++ * preempt_lazy_count counter >0.
++ */
++static __always_inline int preemptible_lazy(void)
++{
++ if (test_thread_flag(TIF_NEED_RESCHED))
++ return 1;
++ if (current_thread_info()->preempt_lazy_count)
++ return 0;
++ return 1;
++}
++
++#else
++
++static inline int preemptible_lazy(void)
++{
++ return 1;
++}
++
++#endif
++
+ #ifdef CONFIG_PREEMPTION
+ /*
+ * This is the entry point to schedule() from in-kernel preemption
+@@ -5316,7 +5388,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
+ */
+ if (likely(!preemptible()))
+ return;
+-
++ if (!preemptible_lazy())
++ return;
+ preempt_schedule_common();
+ }
+ NOKPROBE_SYMBOL(preempt_schedule);
+@@ -5362,6 +5435,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+ if (likely(!preemptible()))
+ return;
+
++ if (!preemptible_lazy())
++ return;
++
+ do {
+ /*
+ * Because the function tracer can trace preempt_count_sub()
+@@ -7554,7 +7630,9 @@ void init_idle(struct task_struct *idle, int cpu)
+
+ /* Set the preempt count _outside_ the spinlocks! */
+ init_idle_preempt_count(idle, cpu);
+-
++#ifdef CONFIG_HAVE_PREEMPT_LAZY
++ task_thread_info(idle)->preempt_lazy_count = 0;
++#endif
+ /*
+ * The idle tasks have their own, simple scheduling class:
+ */
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 794c2cb945f8..f354c6d370d7 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4365,7 +4365,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+ ideal_runtime = sched_slice(cfs_rq, curr);
+ delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
+ if (delta_exec > ideal_runtime) {
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ /*
+ * The current task ran long enough, ensure it doesn't get
+ * re-elected due to buddy favours.
+@@ -4389,7 +4389,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+ return;
+
+ if (delta > ideal_runtime)
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ }
+
+ static void
+@@ -4532,7 +4532,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
+ * validating it and just reschedule.
+ */
+ if (queued) {
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ return;
+ }
+ /*
+@@ -4669,7 +4669,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
+ * hierarchy can be throttled
+ */
+ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
+- resched_curr(rq_of(cfs_rq));
++ resched_curr_lazy(rq_of(cfs_rq));
+ }
+
+ static __always_inline
+@@ -5413,7 +5413,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
+
+ if (delta < 0) {
+ if (task_current(rq, p))
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ return;
+ }
+ hrtick_start(rq, delta);
+@@ -6988,7 +6988,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+ return;
+
+ preempt:
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ /*
+ * Only set the backward buddy when the current task is still
+ * on the rq. This can happen when a wakeup gets interleaved
+@@ -10775,7 +10775,7 @@ static void task_fork_fair(struct task_struct *p)
+ * 'current' within the tree based on its new key value.
+ */
+ swap(curr->vruntime, se->vruntime);
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ }
+
+ se->vruntime -= cfs_rq->min_vruntime;
+@@ -10802,7 +10802,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
+ */
+ if (task_current(rq, p)) {
+ if (p->prio > oldprio)
+- resched_curr(rq);
++ resched_curr_lazy(rq);
+ } else
+ check_preempt_curr(rq, p, 0);
+ }
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index b47967d00570..c490f2c902ae 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -48,6 +48,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
+
+ #ifdef CONFIG_PREEMPT_RT
+ SCHED_FEAT(TTWU_QUEUE, false)
++# ifdef CONFIG_PREEMPT_LAZY
++SCHED_FEAT(PREEMPT_LAZY, true)
++# endif
+ #else
+
+ /*
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index b65a4e244a77..178fe64154e7 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -2015,6 +2015,15 @@ extern void reweight_task(struct task_struct *p, int prio);
+ extern void resched_curr(struct rq *rq);
+ extern void resched_cpu(int cpu);
+
++#ifdef CONFIG_PREEMPT_LAZY
++extern void resched_curr_lazy(struct rq *rq);
++#else
++static inline void resched_curr_lazy(struct rq *rq)
++{
++ resched_curr(rq);
++}
++#endif
++
+ extern struct rt_bandwidth def_rt_bandwidth;
+ extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 55bcdaedf7a8..9836ab9d22bb 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2614,8 +2614,16 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
+ trace_flags |= TRACE_FLAG_NEED_RESCHED;
+ if (test_preempt_need_resched())
+ trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
+- return (trace_flags << 16) | (pc & 0xff) |
+- (migration_disable_value() & 0xff) << 8;
++
++#ifdef CONFIG_PREEMPT_LAZY
++ if (need_resched_lazy())
++ trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
++#endif
++
++ return (pc & 0xff) |
++ (migration_disable_value() & 0xff) << 8 |
++ (preempt_lazy_count() & 0xff) << 16 |
++ (trace_flags << 24);
+ }
+
+ struct ring_buffer_event *
+@@ -3875,15 +3883,17 @@ unsigned long trace_total_entries(struct trace_array *tr)
+
+ static void print_lat_help_header(struct seq_file *m)
+ {
+- seq_puts(m, "# _------=> CPU# \n"
+- "# / _-----=> irqs-off \n"
+- "# | / _----=> need-resched \n"
+- "# || / _---=> hardirq/softirq \n"
+- "# ||| / _--=> preempt-depth \n"
+- "# |||| / _-=> migrate-disable \n"
+- "# ||||| / delay \n"
+- "# cmd pid |||||| time | caller \n"
+- "# \\ / |||||| \\ | / \n");
++ seq_puts(m, "# _--------=> CPU# \n"
++ "# / _-------=> irqs-off \n"
++ "# | / _------=> need-resched \n"
++ "# || / _-----=> need-resched-lazy\n"
++ "# ||| / _----=> hardirq/softirq \n"
++ "# |||| / _---=> preempt-depth \n"
++ "# ||||| / _--=> preempt-lazy-depth\n"
++ "# |||||| / _-=> migrate-disable \n"
++ "# ||||||| / delay \n"
++ "# cmd pid |||||||| time | caller \n"
++ "# \\ / |||||||| \\ | / \n");
+ }
+
+ static void print_event_info(struct array_buffer *buf, struct seq_file *m)
+@@ -3917,14 +3927,16 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
+
+ print_event_info(buf, m);
+
+- seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
+- seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
+- seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
+- seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
+- seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
+- seq_printf(m, "# %.*s|||| / delay\n", prec, space);
+- seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
+- seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
++ seq_printf(m, "# %.*s _-------=> irqs-off\n", prec, space);
++ seq_printf(m, "# %.*s / _------=> need-resched\n", prec, space);
++ seq_printf(m, "# %.*s| / _-----=> need-resched-lazy\n", prec, space);
++ seq_printf(m, "# %.*s|| / _----=> hardirq/softirq\n", prec, space);
++ seq_printf(m, "# %.*s||| / _---=> preempt-depth\n", prec, space);
++ seq_printf(m, "# %.*s|||| / _--=> preempt-lazy-depth\n", prec, space);
++ seq_printf(m, "# %.*s||||| / _-=> migrate-disable\n", prec, space);
++ seq_printf(m, "# %.*s|||||| / delay\n", prec, space);
++ seq_printf(m, "# TASK-PID %.*s CPU# ||||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
++ seq_printf(m, "# | | %.*s | ||||||| | |\n", prec, " | ");
+ }
+
+ void
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index ba0b9edd652d..39fa2a7a8b03 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -184,6 +184,7 @@ static int trace_define_common_fields(void)
+ __common_field(unsigned char, preempt_count);
+ __common_field(int, pid);
+ __common_field(unsigned char, migrate_disable);
++ __common_field(unsigned char, preempt_lazy_count);
+
+ return ret;
+ }
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 5e32edb8a90a..af85aec1e887 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -451,6 +451,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
+ {
+ char hardsoft_irq;
+ char need_resched;
++ char need_resched_lazy;
+ char irqs_off;
+ int hardirq;
+ int softirq;
+@@ -481,6 +482,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
+ break;
+ }
+
++ need_resched_lazy =
++ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.';
++
+ hardsoft_irq =
+ (nmi && hardirq) ? 'Z' :
+ nmi ? 'z' :
+@@ -489,14 +493,20 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
+ softirq ? 's' :
+ '.' ;
+
+- trace_seq_printf(s, "%c%c%c",
+- irqs_off, need_resched, hardsoft_irq);
++ trace_seq_printf(s, "%c%c%c%c",
++ irqs_off, need_resched, need_resched_lazy,
++ hardsoft_irq);
+
+ if (entry->preempt_count)
+ trace_seq_printf(s, "%x", entry->preempt_count);
+ else
+ trace_seq_putc(s, '.');
+
++ if (entry->preempt_lazy_count)
++ trace_seq_printf(s, "%x", entry->preempt_lazy_count);
++ else
++ trace_seq_putc(s, '.');
++
+ if (entry->migrate_disable)
+ trace_seq_printf(s, "%x", entry->migrate_disable);
+ else
+--
+2.19.1
+
diff --git a/features/rt/sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch b/features/rt/sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch
new file mode 100644
index 00000000..f99140d4
--- /dev/null
+++ b/features/rt/sched-Disable-CONFIG_RT_GROUP_SCHED-on-RT.patch
@@ -0,0 +1,34 @@
+From bcd494d279a6e818f31d1073dd429d3c0301940d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Jul 2011 17:03:52 +0200
+Subject: [PATCH 095/191] sched: Disable CONFIG_RT_GROUP_SCHED on RT
+
+Carsten reported problems when running:
+
+ taskset 01 chrt -f 1 sleep 1
+
+from within rc.local on a F15 machine. The task stays running and
+never gets on the run queue because some of the run queues have
+rt_throttled=1 which does not go away. Works nice from a ssh login
+shell. Disabling CONFIG_RT_GROUP_SCHED solves that as well.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ init/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/init/Kconfig b/init/Kconfig
+index d51c16a3f355..37686a22a769 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -973,6 +973,7 @@ config CFS_BANDWIDTH
+ config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on CGROUP_SCHED
++ depends on !PREEMPT_RT
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
+--
+2.19.1
+
diff --git a/features/rt/sched-Disable-TTWU_QUEUE-on-RT.patch b/features/rt/sched-Disable-TTWU_QUEUE-on-RT.patch
new file mode 100644
index 00000000..d8a5054d
--- /dev/null
+++ b/features/rt/sched-Disable-TTWU_QUEUE-on-RT.patch
@@ -0,0 +1,37 @@
+From 1b8c8ade10b0b08dd7f7c2068683a78571967037 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 13 Sep 2011 16:42:35 +0200
+Subject: [PATCH 123/191] sched: Disable TTWU_QUEUE on RT
+
+The queued remote wakeup mechanism can introduce rather large
+latencies if the number of migrated tasks is high. Disable it for RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/sched/features.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/kernel/sched/features.h b/kernel/sched/features.h
+index 1bc2b158fc51..b47967d00570 100644
+--- a/kernel/sched/features.h
++++ b/kernel/sched/features.h
+@@ -46,11 +46,16 @@ SCHED_FEAT(DOUBLE_TICK, false)
+ */
+ SCHED_FEAT(NONTASK_CAPACITY, true)
+
++#ifdef CONFIG_PREEMPT_RT
++SCHED_FEAT(TTWU_QUEUE, false)
++#else
++
+ /*
+ * Queue remote wakeups on the target CPU and process them
+ * using the scheduler IPI. Reduces rq->lock contention/bounces.
+ */
+ SCHED_FEAT(TTWU_QUEUE, true)
++#endif
+
+ /*
+ * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
+--
+2.19.1
+
diff --git a/features/rt/sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch b/features/rt/sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch
new file mode 100644
index 00000000..3e3b66a4
--- /dev/null
+++ b/features/rt/sched-Do-not-account-rcu_preempt_depth-on-RT-in-migh.patch
@@ -0,0 +1,56 @@
+From b3451220b4d08d1f80b2e4c10a2cd5d8bc9724e5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 7 Jun 2011 09:19:06 +0200
+Subject: [PATCH 122/191] sched: Do not account rcu_preempt_depth on RT in
+ might_sleep()
+
+RT changes the rcu_preempt_depth semantics, so we cannot check for it
+in might_sleep().
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rcupdate.h | 7 +++++++
+ kernel/sched/core.c | 2 +-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 6d855ef091ba..8b18be4c38ce 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -54,6 +54,11 @@ void __rcu_read_unlock(void);
+ * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
+ */
+ #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
++#ifndef CONFIG_PREEMPT_RT
++#define sched_rcu_preempt_depth() rcu_preempt_depth()
++#else
++static inline int sched_rcu_preempt_depth(void) { return 0; }
++#endif
+
+ #else /* #ifdef CONFIG_PREEMPT_RCU */
+
+@@ -79,6 +84,8 @@ static inline int rcu_preempt_depth(void)
+ return 0;
+ }
+
++#define sched_rcu_preempt_depth() rcu_preempt_depth()
++
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+ /* Internal to kernel */
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 2069022bdce5..c05455d4f034 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -8323,7 +8323,7 @@ void __init sched_init(void)
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ static inline int preempt_count_equals(int preempt_offset)
+ {
+- int nested = preempt_count() + rcu_preempt_depth();
++ int nested = preempt_count() + sched_rcu_preempt_depth();
+
+ return (nested == preempt_offset);
+ }
+--
+2.19.1
+
diff --git a/features/rt/sched-Limit-the-number-of-task-migrations-per-batch.patch b/features/rt/sched-Limit-the-number-of-task-migrations-per-batch.patch
new file mode 100644
index 00000000..2acfbe0a
--- /dev/null
+++ b/features/rt/sched-Limit-the-number-of-task-migrations-per-batch.patch
@@ -0,0 +1,32 @@
+From 8a4c33943f124f335c73fac2e3000c83073dd389 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 6 Jun 2011 12:12:51 +0200
+Subject: [PATCH 119/191] sched: Limit the number of task migrations per batch
+
+Put an upper limit on the number of tasks which are migrated per batch
+to avoid large latencies.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/sched/core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 59fb8b18799f..aefac1f2e324 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -64,7 +64,11 @@ const_debug unsigned int sysctl_sched_features =
+ * Number of tasks to iterate in a single balance run.
+ * Limited because this is done with IRQs disabled.
+ */
++#ifdef CONFIG_PREEMPT_RT
++const_debug unsigned int sysctl_sched_nr_migrate = 8;
++#else
+ const_debug unsigned int sysctl_sched_nr_migrate = 32;
++#endif
+
+ /*
+ * period over which we measure -rt task CPU usage in us.
+--
+2.19.1
+
diff --git a/features/rt/sched-Move-mmdrop-to-RCU-on-RT.patch b/features/rt/sched-Move-mmdrop-to-RCU-on-RT.patch
new file mode 100644
index 00000000..f6267f74
--- /dev/null
+++ b/features/rt/sched-Move-mmdrop-to-RCU-on-RT.patch
@@ -0,0 +1,114 @@
+From b36ded84e3c469fb806a75722808dc89a93a76ef Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 6 Jun 2011 12:20:33 +0200
+Subject: [PATCH 120/191] sched: Move mmdrop to RCU on RT
+
+Takes sleeping locks and calls into the memory allocator, so nothing
+we want to do in task switch and oder atomic contexts.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/mm_types.h | 4 ++++
+ include/linux/sched/mm.h | 11 +++++++++++
+ kernel/fork.c | 13 +++++++++++++
+ kernel/sched/core.c | 7 ++++++-
+ 4 files changed, 34 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index e94df45b5483..bb3ebe46a9db 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -12,6 +12,7 @@
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+ #include <linux/uprobes.h>
++#include <linux/rcupdate.h>
+ #include <linux/page-flags-layout.h>
+ #include <linux/workqueue.h>
+ #include <linux/seqlock.h>
+@@ -556,6 +557,9 @@ struct mm_struct {
+ bool tlb_flush_batched;
+ #endif
+ struct uprobes_state uprobes_state;
++#ifdef CONFIG_PREEMPT_RT
++ struct rcu_head delayed_drop;
++#endif
+ #ifdef CONFIG_HUGETLB_PAGE
+ atomic_long_t hugetlb_usage;
+ #endif
+diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
+index 90b2a0bce11c..22af69051d1c 100644
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -49,6 +49,17 @@ static inline void mmdrop(struct mm_struct *mm)
+ __mmdrop(mm);
+ }
+
++#ifdef CONFIG_PREEMPT_RT
++extern void __mmdrop_delayed(struct rcu_head *rhp);
++static inline void mmdrop_delayed(struct mm_struct *mm)
++{
++ if (atomic_dec_and_test(&mm->mm_count))
++ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
++}
++#else
++# define mmdrop_delayed(mm) mmdrop(mm)
++#endif
++
+ /**
+ * mmget() - Pin the address space associated with a &struct mm_struct.
+ * @mm: The address space to pin.
+diff --git a/kernel/fork.c b/kernel/fork.c
+index c0cfae6e545c..da1b307cbf73 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -689,6 +689,19 @@ void __mmdrop(struct mm_struct *mm)
+ }
+ EXPORT_SYMBOL_GPL(__mmdrop);
+
++#ifdef CONFIG_PREEMPT_RT
++/*
++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
++ * want another facility to make this work.
++ */
++void __mmdrop_delayed(struct rcu_head *rhp)
++{
++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
++
++ __mmdrop(mm);
++}
++#endif
++
+ static void mmdrop_async_fn(struct work_struct *work)
+ {
+ struct mm_struct *mm;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index aefac1f2e324..9ec24e4188f4 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4270,9 +4270,13 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+ * provided by mmdrop(),
+ * - a sync_core for SYNC_CORE.
+ */
++ /*
++ * We use mmdrop_delayed() here so we don't have to do the
++ * full __mmdrop() when we are the last user.
++ */
+ if (mm) {
+ membarrier_mm_sync_core_before_usermode(mm);
+- mmdrop(mm);
++ mmdrop_delayed(mm);
+ }
+ if (unlikely(prev_state == TASK_DEAD)) {
+ if (prev->sched_class->task_dead)
+@@ -7651,6 +7655,7 @@ void sched_setnuma(struct task_struct *p, int nid)
+ #endif /* CONFIG_NUMA_BALANCING */
+
+ #ifdef CONFIG_HOTPLUG_CPU
++
+ /*
+ * Ensure that the idle task is using init_mm right before its CPU goes
+ * offline.
+--
+2.19.1
+
diff --git a/features/rt/scsi-fcoe-Make-RT-aware.patch b/features/rt/scsi-fcoe-Make-RT-aware.patch
new file mode 100644
index 00000000..e9dd145e
--- /dev/null
+++ b/features/rt/scsi-fcoe-Make-RT-aware.patch
@@ -0,0 +1,115 @@
+From 6b5015f29677cba625aa0e8204b629ef95d78fa3 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 12 Nov 2011 14:00:48 +0100
+Subject: [PATCH 136/191] scsi/fcoe: Make RT aware.
+
+Do not disable preemption while taking sleeping locks. All user look safe
+for migrate_diable() only.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/scsi/fcoe/fcoe.c | 16 ++++++++--------
+ drivers/scsi/fcoe/fcoe_ctlr.c | 4 ++--
+ drivers/scsi/libfc/fc_exch.c | 4 ++--
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
+index 03bf49adaafe..52e0a2486b43 100644
+--- a/drivers/scsi/fcoe/fcoe.c
++++ b/drivers/scsi/fcoe/fcoe.c
+@@ -1452,11 +1452,11 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
+ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
+ {
+ struct fcoe_percpu_s *fps;
+- int rc;
++ int rc, cpu = get_cpu_light();
+
+- fps = &get_cpu_var(fcoe_percpu);
++ fps = &per_cpu(fcoe_percpu, cpu);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
+- put_cpu_var(fcoe_percpu);
++ put_cpu_light();
+
+ return rc;
+ }
+@@ -1641,11 +1641,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
+ return 0;
+ }
+
+- stats = per_cpu_ptr(lport->stats, get_cpu());
++ stats = per_cpu_ptr(lport->stats, get_cpu_light());
+ stats->InvalidCRCCount++;
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+- put_cpu();
++ put_cpu_light();
+ return -EINVAL;
+ }
+
+@@ -1686,7 +1686,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
+ */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+
+- stats = per_cpu_ptr(lport->stats, get_cpu());
++ stats = per_cpu_ptr(lport->stats, get_cpu_light());
+ if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ if (stats->ErrorFrames < 5)
+ printk(KERN_WARNING "fcoe: FCoE version "
+@@ -1718,13 +1718,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
+ goto drop;
+
+ if (!fcoe_filter_frames(lport, fp)) {
+- put_cpu();
++ put_cpu_light();
+ fc_exch_recv(lport, fp);
+ return;
+ }
+ drop:
+ stats->ErrorFrames++;
+- put_cpu();
++ put_cpu_light();
+ kfree_skb(skb);
+ }
+
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index 5ea426effa60..0d6b9acc7cf8 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -828,7 +828,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
+
+ INIT_LIST_HEAD(&del_list);
+
+- stats = per_cpu_ptr(fip->lp->stats, get_cpu());
++ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
+
+ list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+ deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
+@@ -864,7 +864,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
+ sel_time = fcf->time;
+ }
+ }
+- put_cpu();
++ put_cpu_light();
+
+ list_for_each_entry_safe(fcf, next, &del_list, list) {
+ /* Removes fcf from current list */
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index 841000445b9a..26d661ddc950 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -825,10 +825,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
+ }
+ memset(ep, 0, sizeof(*ep));
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ pool = per_cpu_ptr(mp->pool, cpu);
+ spin_lock_bh(&pool->lock);
+- put_cpu();
++ put_cpu_light();
+
+ /* peek cache of free slot */
+ if (pool->left != FC_XID_UNKNOWN) {
+--
+2.19.1
+
diff --git a/features/rt/serial-8250-implement-write_atomic.patch b/features/rt/serial-8250-implement-write_atomic.patch
new file mode 100644
index 00000000..37294b37
--- /dev/null
+++ b/features/rt/serial-8250-implement-write_atomic.patch
@@ -0,0 +1,499 @@
+From 4c5b59250c842f7a609e5581ce10f7d7967b8ec0 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:02 +0106
+Subject: [PATCH 025/191] serial: 8250: implement write_atomic
+
+Implement a non-sleeping NMI-safe write_atomic() console function in
+order to support emergency console printing.
+
+Since interrupts need to be disabled during transmit, all usage of
+the IER register is wrapped with access functions that use the
+console_atomic_lock() function to synchronize register access while
+tracking the state of the interrupts. This is necessary because
+write_atomic() can be called from an NMI context that has preempted
+write_atomic().
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/serial/8250/8250.h | 47 ++++++++++++-
+ drivers/tty/serial/8250/8250_core.c | 17 +++--
+ drivers/tty/serial/8250/8250_fsl.c | 9 +++
+ drivers/tty/serial/8250/8250_ingenic.c | 7 ++
+ drivers/tty/serial/8250/8250_mtk.c | 29 +++++++-
+ drivers/tty/serial/8250/8250_port.c | 92 ++++++++++++++++----------
+ include/linux/serial_8250.h | 5 ++
+ 7 files changed, 162 insertions(+), 44 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
+index 52bb21205bb6..5cbcaafbb4aa 100644
+--- a/drivers/tty/serial/8250/8250.h
++++ b/drivers/tty/serial/8250/8250.h
+@@ -130,12 +130,55 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
+ up->dl_write(up, value);
+ }
+
++static inline void serial8250_set_IER(struct uart_8250_port *up,
++ unsigned char ier)
++{
++ struct uart_port *port = &up->port;
++ unsigned int flags;
++ bool is_console;
++
++ is_console = uart_console(port);
++
++ if (is_console)
++ console_atomic_lock(&flags);
++
++ serial_out(up, UART_IER, ier);
++
++ if (is_console)
++ console_atomic_unlock(flags);
++}
++
++static inline unsigned char serial8250_clear_IER(struct uart_8250_port *up)
++{
++ struct uart_port *port = &up->port;
++ unsigned int clearval = 0;
++ unsigned int prior;
++ unsigned int flags;
++ bool is_console;
++
++ is_console = uart_console(port);
++
++ if (up->capabilities & UART_CAP_UUE)
++ clearval = UART_IER_UUE;
++
++ if (is_console)
++ console_atomic_lock(&flags);
++
++ prior = serial_port_in(port, UART_IER);
++ serial_port_out(port, UART_IER, clearval);
++
++ if (is_console)
++ console_atomic_unlock(flags);
++
++ return prior;
++}
++
+ static inline bool serial8250_set_THRI(struct uart_8250_port *up)
+ {
+ if (up->ier & UART_IER_THRI)
+ return false;
+ up->ier |= UART_IER_THRI;
+- serial_out(up, UART_IER, up->ier);
++ serial8250_set_IER(up, up->ier);
+ return true;
+ }
+
+@@ -144,7 +187,7 @@ static inline bool serial8250_clear_THRI(struct uart_8250_port *up)
+ if (!(up->ier & UART_IER_THRI))
+ return false;
+ up->ier &= ~UART_IER_THRI;
+- serial_out(up, UART_IER, up->ier);
++ serial8250_set_IER(up, up->ier);
+ return true;
+ }
+
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index cae61d1ebec5..47dd23056271 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -274,10 +274,8 @@ static void serial8250_backup_timeout(struct timer_list *t)
+ * Must disable interrupts or else we risk racing with the interrupt
+ * based handler.
+ */
+- if (up->port.irq) {
+- ier = serial_in(up, UART_IER);
+- serial_out(up, UART_IER, 0);
+- }
++ if (up->port.irq)
++ ier = serial8250_clear_IER(up);
+
+ iir = serial_in(up, UART_IIR);
+
+@@ -300,7 +298,7 @@ static void serial8250_backup_timeout(struct timer_list *t)
+ serial8250_tx_chars(up);
+
+ if (up->port.irq)
+- serial_out(up, UART_IER, ier);
++ serial8250_set_IER(up, ier);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+@@ -578,6 +576,14 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
+
+ #ifdef CONFIG_SERIAL_8250_CONSOLE
+
++static void univ8250_console_write_atomic(struct console *co, const char *s,
++ unsigned int count)
++{
++ struct uart_8250_port *up = &serial8250_ports[co->index];
++
++ serial8250_console_write_atomic(up, s, count);
++}
++
+ static void univ8250_console_write(struct console *co, const char *s,
+ unsigned int count)
+ {
+@@ -671,6 +677,7 @@ static int univ8250_console_match(struct console *co, char *name, int idx,
+
+ static struct console univ8250_console = {
+ .name = "ttyS",
++ .write_atomic = univ8250_console_write_atomic,
+ .write = univ8250_console_write,
+ .device = uart_console_device,
+ .setup = univ8250_console_setup,
+diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
+index fbcc90c31ca1..b33cb454ce03 100644
+--- a/drivers/tty/serial/8250/8250_fsl.c
++++ b/drivers/tty/serial/8250/8250_fsl.c
+@@ -60,9 +60,18 @@ int fsl8250_handle_irq(struct uart_port *port)
+
+ /* Stop processing interrupts on input overrun */
+ if ((orig_lsr & UART_LSR_OE) && (up->overrun_backoff_time_ms > 0)) {
++ unsigned int ca_flags;
+ unsigned long delay;
++ bool is_console;
+
++ is_console = uart_console(port);
++
++ if (is_console)
++ console_atomic_lock(&ca_flags);
+ up->ier = port->serial_in(port, UART_IER);
++ if (is_console)
++ console_atomic_unlock(ca_flags);
++
+ if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
+ port->ops->stop_rx(port);
+ } else {
+diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
+index 988bf6bcce42..bcd26d672539 100644
+--- a/drivers/tty/serial/8250/8250_ingenic.c
++++ b/drivers/tty/serial/8250/8250_ingenic.c
+@@ -146,6 +146,8 @@ OF_EARLYCON_DECLARE(x1000_uart, "ingenic,x1000-uart",
+
+ static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
+ {
++ unsigned int flags;
++ bool is_console;
+ int ier;
+
+ switch (offset) {
+@@ -167,7 +169,12 @@ static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
+ * If we have enabled modem status IRQs we should enable
+ * modem mode.
+ */
++ is_console = uart_console(p);
++ if (is_console)
++ console_atomic_lock(&flags);
+ ier = p->serial_in(p, UART_IER);
++ if (is_console)
++ console_atomic_unlock(flags);
+
+ if (ier & UART_IER_MSI)
+ value |= UART_MCR_MDCE | UART_MCR_FCM;
+diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
+index f7d3023f860f..8133713dcf5e 100644
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -213,12 +213,37 @@ static void mtk8250_shutdown(struct uart_port *port)
+
+ static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
+ {
+- serial_out(up, UART_IER, serial_in(up, UART_IER) & (~mask));
++ struct uart_port *port = &up->port;
++ unsigned int flags;
++ unsigned int ier;
++ bool is_console;
++
++ is_console = uart_console(port);
++
++ if (is_console)
++ console_atomic_lock(&flags);
++
++ ier = serial_in(up, UART_IER);
++ serial_out(up, UART_IER, ier & (~mask));
++
++ if (is_console)
++ console_atomic_unlock(flags);
+ }
+
+ static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask)
+ {
+- serial_out(up, UART_IER, serial_in(up, UART_IER) | mask);
++ struct uart_port *port = &up->port;
++ unsigned int flags;
++ unsigned int ier;
++
++ if (uart_console(port))
++ console_atomic_lock(&flags);
++
++ ier = serial_in(up, UART_IER);
++ serial_out(up, UART_IER, ier | mask);
++
++ if (uart_console(port))
++ console_atomic_unlock(flags);
+ }
+
+ static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index b0af13074cd3..b05f8c34b291 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -757,7 +757,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+ serial_out(p, UART_EFR, UART_EFR_ECB);
+ serial_out(p, UART_LCR, 0);
+ }
+- serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
++ serial8250_set_IER(p, sleep ? UART_IERX_SLEEP : 0);
+ if (p->capabilities & UART_CAP_EFR) {
+ serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
+ serial_out(p, UART_EFR, efr);
+@@ -1429,7 +1429,7 @@ static void serial8250_stop_rx(struct uart_port *port)
+
+ up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
+ up->port.read_status_mask &= ~UART_LSR_DR;
+- serial_port_out(port, UART_IER, up->ier);
++ serial8250_set_IER(up, up->ier);
+
+ serial8250_rpm_put(up);
+ }
+@@ -1459,7 +1459,7 @@ void serial8250_em485_stop_tx(struct uart_8250_port *p)
+ serial8250_clear_and_reinit_fifos(p);
+
+ p->ier |= UART_IER_RLSI | UART_IER_RDI;
+- serial_port_out(&p->port, UART_IER, p->ier);
++ serial8250_set_IER(p, p->ier);
+ }
+ }
+ EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
+@@ -1687,7 +1687,7 @@ static void serial8250_disable_ms(struct uart_port *port)
+ mctrl_gpio_disable_ms(up->gpios);
+
+ up->ier &= ~UART_IER_MSI;
+- serial_port_out(port, UART_IER, up->ier);
++ serial8250_set_IER(up, up->ier);
+ }
+
+ static void serial8250_enable_ms(struct uart_port *port)
+@@ -1703,7 +1703,7 @@ static void serial8250_enable_ms(struct uart_port *port)
+ up->ier |= UART_IER_MSI;
+
+ serial8250_rpm_get(up);
+- serial_port_out(port, UART_IER, up->ier);
++ serial8250_set_IER(up, up->ier);
+ serial8250_rpm_put(up);
+ }
+
+@@ -2118,14 +2118,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ serial8250_rpm_get(up);
+- /*
+- * First save the IER then disable the interrupts
+- */
+- ier = serial_port_in(port, UART_IER);
+- if (up->capabilities & UART_CAP_UUE)
+- serial_port_out(port, UART_IER, UART_IER_UUE);
+- else
+- serial_port_out(port, UART_IER, 0);
++ ier = serial8250_clear_IER(up);
+
+ wait_for_xmitr(up, BOTH_EMPTY);
+ /*
+@@ -2138,7 +2131,7 @@ static void serial8250_put_poll_char(struct uart_port *port,
+ * and restore the IER
+ */
+ wait_for_xmitr(up, BOTH_EMPTY);
+- serial_port_out(port, UART_IER, ier);
++ serial8250_set_IER(up, ier);
+ serial8250_rpm_put(up);
+ }
+
+@@ -2441,7 +2434,7 @@ void serial8250_do_shutdown(struct uart_port *port)
+ */
+ spin_lock_irqsave(&port->lock, flags);
+ up->ier = 0;
+- serial_port_out(port, UART_IER, 0);
++ serial8250_set_IER(up, 0);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ synchronize_irq(port->irq);
+@@ -2771,7 +2764,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+ if (up->capabilities & UART_CAP_RTOIE)
+ up->ier |= UART_IER_RTOIE;
+
+- serial_port_out(port, UART_IER, up->ier);
++ serial8250_set_IER(up, up->ier);
+
+ if (up->capabilities & UART_CAP_EFR) {
+ unsigned char efr = 0;
+@@ -3237,7 +3230,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_defaults);
+
+ #ifdef CONFIG_SERIAL_8250_CONSOLE
+
+-static void serial8250_console_putchar(struct uart_port *port, int ch)
++static void serial8250_console_putchar_locked(struct uart_port *port, int ch)
+ {
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+@@ -3245,6 +3238,18 @@ static void serial8250_console_putchar(struct uart_port *port, int ch)
+ serial_port_out(port, UART_TX, ch);
+ }
+
++static void serial8250_console_putchar(struct uart_port *port, int ch)
++{
++ struct uart_8250_port *up = up_to_u8250p(port);
++ unsigned int flags;
++
++ wait_for_xmitr(up, UART_LSR_THRE);
++
++ console_atomic_lock(&flags);
++ serial8250_console_putchar_locked(port, ch);
++ console_atomic_unlock(flags);
++}
++
+ /*
+ * Restore serial console when h/w power-off detected
+ */
+@@ -3266,6 +3271,32 @@ static void serial8250_console_restore(struct uart_8250_port *up)
+ serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
+ }
+
++void serial8250_console_write_atomic(struct uart_8250_port *up,
++ const char *s, unsigned int count)
++{
++ struct uart_port *port = &up->port;
++ unsigned int flags;
++ unsigned int ier;
++
++ console_atomic_lock(&flags);
++
++ touch_nmi_watchdog();
++
++ ier = serial8250_clear_IER(up);
++
++ if (atomic_fetch_inc(&up->console_printing)) {
++ uart_console_write(port, "\n", 1,
++ serial8250_console_putchar_locked);
++ }
++ uart_console_write(port, s, count, serial8250_console_putchar_locked);
++ atomic_dec(&up->console_printing);
++
++ wait_for_xmitr(up, BOTH_EMPTY);
++ serial8250_set_IER(up, ier);
++
++ console_atomic_unlock(flags);
++}
++
+ /*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+@@ -3282,24 +3313,12 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ struct uart_port *port = &up->port;
+ unsigned long flags;
+ unsigned int ier;
+- int locked = 1;
+
+ touch_nmi_watchdog();
+
+- if (oops_in_progress)
+- locked = spin_trylock_irqsave(&port->lock, flags);
+- else
+- spin_lock_irqsave(&port->lock, flags);
+-
+- /*
+- * First save the IER then disable the interrupts
+- */
+- ier = serial_port_in(port, UART_IER);
++ spin_lock_irqsave(&port->lock, flags);
+
+- if (up->capabilities & UART_CAP_UUE)
+- serial_port_out(port, UART_IER, UART_IER_UUE);
+- else
+- serial_port_out(port, UART_IER, 0);
++ ier = serial8250_clear_IER(up);
+
+ /* check scratch reg to see if port powered off during system sleep */
+ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
+@@ -3313,7 +3332,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ mdelay(port->rs485.delay_rts_before_send);
+ }
+
++ atomic_inc(&up->console_printing);
+ uart_console_write(port, s, count, serial8250_console_putchar);
++ atomic_dec(&up->console_printing);
+
+ /*
+ * Finally, wait for transmitter to become empty
+@@ -3326,8 +3347,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ if (em485->tx_stopped)
+ up->rs485_stop_tx(up);
+ }
+-
+- serial_port_out(port, UART_IER, ier);
++ serial8250_set_IER(up, ier);
+
+ /*
+ * The receive handling will happen properly because the
+@@ -3339,8 +3359,7 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ if (up->msr_saved_flags)
+ serial8250_modem_status(up);
+
+- if (locked)
+- spin_unlock_irqrestore(&port->lock, flags);
++ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ static unsigned int probe_baud(struct uart_port *port)
+@@ -3360,6 +3379,7 @@ static unsigned int probe_baud(struct uart_port *port)
+
+ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
+ {
++ struct uart_8250_port *up = up_to_u8250p(port);
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+@@ -3369,6 +3389,8 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
+ if (!port->iobase && !port->membase)
+ return -ENODEV;
+
++ atomic_set(&up->console_printing, 0);
++
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ else if (probe)
+diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
+index 9e655055112d..ffef674deda7 100644
+--- a/include/linux/serial_8250.h
++++ b/include/linux/serial_8250.h
+@@ -7,6 +7,7 @@
+ #ifndef _LINUX_SERIAL_8250_H
+ #define _LINUX_SERIAL_8250_H
+
++#include <linux/atomic.h>
+ #include <linux/serial_core.h>
+ #include <linux/serial_reg.h>
+ #include <linux/platform_device.h>
+@@ -125,6 +126,8 @@ struct uart_8250_port {
+ #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+ unsigned char msr_saved_flags;
+
++ atomic_t console_printing;
++
+ struct uart_8250_dma *dma;
+ const struct uart_8250_ops *ops;
+
+@@ -180,6 +183,8 @@ void serial8250_init_port(struct uart_8250_port *up);
+ void serial8250_set_defaults(struct uart_8250_port *up);
+ void serial8250_console_write(struct uart_8250_port *up, const char *s,
+ unsigned int count);
++void serial8250_console_write_atomic(struct uart_8250_port *up, const char *s,
++ unsigned int count);
+ int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
+ int serial8250_console_exit(struct uart_port *port);
+
+--
+2.19.1
+
diff --git a/features/rt/shmem-Use-raw_spinlock_t-for-stat_lock.patch b/features/rt/shmem-Use-raw_spinlock_t-for-stat_lock.patch
new file mode 100644
index 00000000..509bc7e7
--- /dev/null
+++ b/features/rt/shmem-Use-raw_spinlock_t-for-stat_lock.patch
@@ -0,0 +1,146 @@
+From 214fdbf6532b39b266f6a6f836fb4ff5e8eb8458 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 14 Aug 2020 18:53:34 +0200
+Subject: [PATCH 037/191] shmem: Use raw_spinlock_t for ->stat_lock
+
+Each CPU has SHMEM_INO_BATCH inodes available in `->ino_batch' which is
+per-CPU. Access here is serialized by disabling preemption. If the pool is
+empty, it gets reloaded from `->next_ino'. Access here is serialized by
+->stat_lock which is a spinlock_t and can not be acquired with disabled
+preemption.
+One way around it would make per-CPU ino_batch struct containing the inode
+number a local_lock_t.
+Another sollution is to promote ->stat_lock to a raw_spinlock_t. The critical
+sections are short. The mpol_put() should be moved outside of the critical
+section to avoid invoking the destrutor with disabled preemption.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/shmem_fs.h | 2 +-
+ mm/shmem.c | 31 +++++++++++++++++--------------
+ 2 files changed, 18 insertions(+), 15 deletions(-)
+
+diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
+index d82b6f396588..12b2e41d8f47 100644
+--- a/include/linux/shmem_fs.h
++++ b/include/linux/shmem_fs.h
+@@ -31,7 +31,7 @@ struct shmem_sb_info {
+ struct percpu_counter used_blocks; /* How many are allocated */
+ unsigned long max_inodes; /* How many inodes are allowed */
+ unsigned long free_inodes; /* How many are left for allocation */
+- spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
++ raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */
+ umode_t mode; /* Mount mode for root directory */
+ unsigned char huge; /* Whether to try for hugepages */
+ kuid_t uid; /* Mount uid for root directory */
+diff --git a/mm/shmem.c b/mm/shmem.c
+index b2db4ed0fbc7..b85dafef6529 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -278,10 +278,10 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
+ ino_t ino;
+
+ if (!(sb->s_flags & SB_KERNMOUNT)) {
+- spin_lock(&sbinfo->stat_lock);
++ raw_spin_lock(&sbinfo->stat_lock);
+ if (sbinfo->max_inodes) {
+ if (!sbinfo->free_inodes) {
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ return -ENOSPC;
+ }
+ sbinfo->free_inodes--;
+@@ -304,7 +304,7 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
+ }
+ *inop = ino;
+ }
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ } else if (inop) {
+ /*
+ * __shmem_file_setup, one of our callers, is lock-free: it
+@@ -319,13 +319,14 @@ static int shmem_reserve_inode(struct super_block *sb, ino_t *inop)
+ * to worry about things like glibc compatibility.
+ */
+ ino_t *next_ino;
++
+ next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu());
+ ino = *next_ino;
+ if (unlikely(ino % SHMEM_INO_BATCH == 0)) {
+- spin_lock(&sbinfo->stat_lock);
++ raw_spin_lock(&sbinfo->stat_lock);
+ ino = sbinfo->next_ino;
+ sbinfo->next_ino += SHMEM_INO_BATCH;
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ if (unlikely(is_zero_ino(ino)))
+ ino++;
+ }
+@@ -341,9 +342,9 @@ static void shmem_free_inode(struct super_block *sb)
+ {
+ struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+ if (sbinfo->max_inodes) {
+- spin_lock(&sbinfo->stat_lock);
++ raw_spin_lock(&sbinfo->stat_lock);
+ sbinfo->free_inodes++;
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ }
+ }
+
+@@ -1453,10 +1454,10 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+ {
+ struct mempolicy *mpol = NULL;
+ if (sbinfo->mpol) {
+- spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
++ raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
+ mpol = sbinfo->mpol;
+ mpol_get(mpol);
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ }
+ return mpol;
+ }
+@@ -3533,9 +3534,10 @@ static int shmem_reconfigure(struct fs_context *fc)
+ struct shmem_options *ctx = fc->fs_private;
+ struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb);
+ unsigned long inodes;
++ struct mempolicy *mpol = NULL;
+ const char *err;
+
+- spin_lock(&sbinfo->stat_lock);
++ raw_spin_lock(&sbinfo->stat_lock);
+ inodes = sbinfo->max_inodes - sbinfo->free_inodes;
+ if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) {
+ if (!sbinfo->max_blocks) {
+@@ -3580,14 +3582,15 @@ static int shmem_reconfigure(struct fs_context *fc)
+ * Preserve previous mempolicy unless mpol remount option was specified.
+ */
+ if (ctx->mpol) {
+- mpol_put(sbinfo->mpol);
++ mpol = sbinfo->mpol;
+ sbinfo->mpol = ctx->mpol; /* transfers initial ref */
+ ctx->mpol = NULL;
+ }
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
++ mpol_put(mpol);
+ return 0;
+ out:
+- spin_unlock(&sbinfo->stat_lock);
++ raw_spin_unlock(&sbinfo->stat_lock);
+ return invalfc(fc, "%s", err);
+ }
+
+@@ -3704,7 +3707,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
+ sbinfo->mpol = ctx->mpol;
+ ctx->mpol = NULL;
+
+- spin_lock_init(&sbinfo->stat_lock);
++ raw_spin_lock_init(&sbinfo->stat_lock);
+ if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
+ goto failed;
+ spin_lock_init(&sbinfo->shrinklist_lock);
+--
+2.19.1
+
diff --git a/features/rt/signal-Revert-ptrace-preempt-magic.patch b/features/rt/signal-Revert-ptrace-preempt-magic.patch
new file mode 100644
index 00000000..2408f040
--- /dev/null
+++ b/features/rt/signal-Revert-ptrace-preempt-magic.patch
@@ -0,0 +1,38 @@
+From 06589e25a1763adbe6ac70a0cb1ab769704163ec Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 21 Sep 2011 19:57:12 +0200
+Subject: [PATCH 083/191] signal: Revert ptrace preempt magic
+
+Upstream commit '53da1d9456fe7f8 fix ptrace slowness' is nothing more
+than a bandaid around the ptrace design trainwreck. It's not a
+correctness issue, it's merily a cosmetic bandaid.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/signal.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index ba4d1ef39a9e..98c48e1ea82e 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2203,16 +2203,8 @@ static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t
+ if (gstop_done && ptrace_reparented(current))
+ do_notify_parent_cldstop(current, false, why);
+
+- /*
+- * Don't want to allow preemption here, because
+- * sys_ptrace() needs this task to be inactive.
+- *
+- * XXX: implement read_unlock_no_resched().
+- */
+- preempt_disable();
+ read_unlock(&tasklist_lock);
+ cgroup_enter_frozen();
+- preempt_enable_no_resched();
+ freezable_schedule();
+ cgroup_leave_frozen(true);
+ } else {
+--
+2.19.1
+
diff --git a/features/rt/signal-x86-Delay-calling-signals-in-atomic.patch b/features/rt/signal-x86-Delay-calling-signals-in-atomic.patch
new file mode 100644
index 00000000..6e4be111
--- /dev/null
+++ b/features/rt/signal-x86-Delay-calling-signals-in-atomic.patch
@@ -0,0 +1,139 @@
+From 3cbbc9627110b6a4140d688a54c57711d3ba8560 Mon Sep 17 00:00:00 2001
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Tue, 14 Jul 2015 14:26:34 +0200
+Subject: [PATCH 100/191] signal/x86: Delay calling signals in atomic
+
+On x86_64 we must disable preemption before we enable interrupts
+for stack faults, int3 and debugging, because the current task is using
+a per CPU debug stack defined by the IST. If we schedule out, another task
+can come in and use the same stack and cause the stack to be corrupted
+and crash the kernel on return.
+
+When CONFIG_PREEMPT_RT is enabled, spin_locks become mutexes, and
+one of these is the spin lock used in signal handling.
+
+Some of the debug code (int3) causes do_trap() to send a signal.
+This function calls a spin lock that has been converted to a mutex
+and has the possibility to sleep. If this happens, the above issues with
+the corrupted stack is possible.
+
+Instead of calling the signal right away, for PREEMPT_RT and x86_64,
+the signal information is stored on the stacks task_struct and
+TIF_NOTIFY_RESUME is set. Then on exit of the trap, the signal resume
+code will send the signal when preemption is enabled.
+
+[ rostedt: Switched from #ifdef CONFIG_PREEMPT_RT to
+ ARCH_RT_DELAYS_SIGNAL_SEND and added comments to the code. ]
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[bigeasy: also needed on 32bit as per Yang Shi <yang.shi@linaro.org>]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/include/asm/signal.h | 13 +++++++++++++
+ include/linux/sched.h | 4 ++++
+ kernel/entry/common.c | 8 ++++++++
+ kernel/signal.c | 28 ++++++++++++++++++++++++++++
+ 4 files changed, 53 insertions(+)
+
+diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
+index 6fd8410a3910..f3bf2f515edb 100644
+--- a/arch/x86/include/asm/signal.h
++++ b/arch/x86/include/asm/signal.h
+@@ -28,6 +28,19 @@ typedef struct {
+ #define SA_IA32_ABI 0x02000000u
+ #define SA_X32_ABI 0x01000000u
+
++/*
++ * Because some traps use the IST stack, we must keep preemption
++ * disabled while calling do_trap(), but do_trap() may call
++ * force_sig_info() which will grab the signal spin_locks for the
++ * task, which in PREEMPT_RT are mutexes. By defining
++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
++ * trap.
++ */
++#if defined(CONFIG_PREEMPT_RT)
++#define ARCH_RT_DELAYS_SIGNAL_SEND
++#endif
++
+ #ifndef CONFIG_COMPAT
+ typedef sigset_t compat_sigset_t;
+ #endif
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index fb5350358bc8..1d85cfa28fe6 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -997,6 +997,10 @@ struct task_struct {
+ /* Restored if set_restore_sigmask() was used: */
+ sigset_t saved_sigmask;
+ struct sigpending pending;
++#ifdef CONFIG_PREEMPT_RT
++ /* TODO: move me into ->restart_block ? */
++ struct kernel_siginfo forced_info;
++#endif
+ unsigned long sas_ss_sp;
+ size_t sas_ss_size;
+ unsigned int sas_ss_flags;
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index 8442e5c9cfa2..2a7bcc744033 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -161,6 +161,14 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+ if (ti_work & _TIF_NEED_RESCHED)
+ schedule();
+
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++ if (unlikely(current->forced_info.si_signo)) {
++ struct task_struct *t = current;
++ force_sig_info(&t->forced_info);
++ t->forced_info.si_signo = 0;
++ }
++#endif
++
+ if (ti_work & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 98c48e1ea82e..a5d16084f54c 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -1314,6 +1314,34 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
+ struct k_sigaction *action;
+ int sig = info->si_signo;
+
++ /*
++ * On some archs, PREEMPT_RT has to delay sending a signal from a trap
++ * since it can not enable preemption, and the signal code's spin_locks
++ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
++ * send the signal on exit of the trap.
++ */
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++ if (in_atomic()) {
++ struct task_struct *t = current;
++
++ if (WARN_ON_ONCE(t->forced_info.si_signo))
++ return 0;
++
++ if (is_si_special(info)) {
++ WARN_ON_ONCE(info != SEND_SIG_PRIV);
++ t->forced_info.si_signo = info->si_signo;
++ t->forced_info.si_errno = 0;
++ t->forced_info.si_code = SI_KERNEL;
++ t->forced_info.si_pid = 0;
++ t->forced_info.si_uid = 0;
++ } else {
++ t->forced_info = *info;
++ }
++
++ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
++ return 0;
++ }
++#endif
+ spin_lock_irqsave(&t->sighand->siglock, flags);
+ action = &t->sighand->action[sig-1];
+ ignored = action->sa.sa_handler == SIG_IGN;
+--
+2.19.1
+
diff --git a/features/rt/signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch b/features/rt/signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch
new file mode 100644
index 00000000..ae43826e
--- /dev/null
+++ b/features/rt/signals-Allow-RT-tasks-to-cache-one-sigqueue-struct.patch
@@ -0,0 +1,216 @@
+From 7f989d7d619da9e6ad512bc325d36b0055fca72c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:56 -0500
+Subject: [PATCH 188/191] signals: Allow RT tasks to cache one sigqueue struct
+
+Allow realtime tasks to cache one sigqueue in task struct. This avoids an
+allocation which can cause latencies or fail.
+Ideally the sigqueue is cached after first sucessfull delivery and will be
+available for next signal delivery. This works under the assumption that the RT
+task has never an unprocessed singal while one is about to be queued.
+The caching is not used for SIGQUEUE_PREALLOC because this kind of sigqueue is
+handled differently (and not used for regular signal delivery).
+
+[bigeasy: With a fix from Matt Fleming <matt@codeblueprint.co.uk>]
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/sched.h | 1 +
+ include/linux/signal.h | 1 +
+ kernel/exit.c | 2 +-
+ kernel/fork.c | 1 +
+ kernel/signal.c | 67 +++++++++++++++++++++++++++++++++++++++---
+ 5 files changed, 67 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 8e566b8049aa..635e08a5aa71 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -988,6 +988,7 @@ struct task_struct {
+ /* Signal handlers: */
+ struct signal_struct *signal;
+ struct sighand_struct __rcu *sighand;
++ struct sigqueue *sigqueue_cache;
+ sigset_t blocked;
+ sigset_t real_blocked;
+ /* Restored if set_restore_sigmask() was used: */
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index 205526c4003a..d47a86790edc 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -265,6 +265,7 @@ static inline void init_sigpending(struct sigpending *sig)
+ }
+
+ extern void flush_sigqueue(struct sigpending *queue);
++extern void flush_task_sigqueue(struct task_struct *tsk);
+
+ /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
+ static inline int valid_signal(unsigned long sig)
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 04029e35e69a..346f7b76ceca 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -152,7 +152,7 @@ static void __exit_signal(struct task_struct *tsk)
+ * Do this under ->siglock, we can race with another thread
+ * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
+ */
+- flush_sigqueue(&tsk->pending);
++ flush_task_sigqueue(tsk);
+ tsk->sighand = NULL;
+ spin_unlock(&sighand->siglock);
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 5fdb0a1bbad8..146a019df72a 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2027,6 +2027,7 @@ static __latent_entropy struct task_struct *copy_process(
+ spin_lock_init(&p->alloc_lock);
+
+ init_sigpending(&p->pending);
++ p->sigqueue_cache = NULL;
+
+ p->utime = p->stime = p->gtime = 0;
+ #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+diff --git a/kernel/signal.c b/kernel/signal.c
+index a5d16084f54c..5baac8c5d2e9 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -20,6 +20,7 @@
+ #include <linux/sched/task.h>
+ #include <linux/sched/task_stack.h>
+ #include <linux/sched/cputime.h>
++#include <linux/sched/rt.h>
+ #include <linux/file.h>
+ #include <linux/fs.h>
+ #include <linux/proc_fs.h>
+@@ -404,13 +405,30 @@ void task_join_group_stop(struct task_struct *task)
+ task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
+ }
+
++static struct sigqueue *sigqueue_from_cache(struct task_struct *t)
++{
++ struct sigqueue *q = t->sigqueue_cache;
++
++ if (q && cmpxchg(&t->sigqueue_cache, q, NULL) == q)
++ return q;
++ return NULL;
++}
++
++static bool sigqueue_add_cache(struct task_struct *t, struct sigqueue *q)
++{
++ if (!t->sigqueue_cache && cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
++ return true;
++ return false;
++}
++
+ /*
+ * allocate a new signal queue record
+ * - this may be called without locks if and only if t == current, otherwise an
+ * appropriate lock must be held to stop the target task from exiting
+ */
+ static struct sigqueue *
+-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit, bool fromslab)
+ {
+ struct sigqueue *q = NULL;
+ struct user_struct *user;
+@@ -432,7 +450,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+ rcu_read_unlock();
+
+ if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
+- q = kmem_cache_alloc(sigqueue_cachep, flags);
++ if (!fromslab)
++ q = sigqueue_from_cache(t);
++ if (!q)
++ q = kmem_cache_alloc(sigqueue_cachep, flags);
+ } else {
+ print_dropped_signal(sig);
+ }
+@@ -449,6 +470,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
+ return q;
+ }
+
++static struct sigqueue *
++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit)
++{
++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, false);
++}
++
+ static void __sigqueue_free(struct sigqueue *q)
+ {
+ if (q->flags & SIGQUEUE_PREALLOC)
+@@ -458,6 +486,20 @@ static void __sigqueue_free(struct sigqueue *q)
+ kmem_cache_free(sigqueue_cachep, q);
+ }
+
++static void __sigqueue_cache_or_free(struct sigqueue *q)
++{
++ struct user_struct *up;
++
++ if (q->flags & SIGQUEUE_PREALLOC)
++ return;
++
++ up = q->user;
++ if (atomic_dec_and_test(&up->sigpending))
++ free_uid(up);
++ if (!task_is_realtime(current) || !sigqueue_add_cache(current, q))
++ kmem_cache_free(sigqueue_cachep, q);
++}
++
+ void flush_sigqueue(struct sigpending *queue)
+ {
+ struct sigqueue *q;
+@@ -470,6 +512,21 @@ void flush_sigqueue(struct sigpending *queue)
+ }
+ }
+
++/*
++ * Called from __exit_signal. Flush tsk->pending and
++ * tsk->sigqueue_cache
++ */
++void flush_task_sigqueue(struct task_struct *tsk)
++{
++ struct sigqueue *q;
++
++ flush_sigqueue(&tsk->pending);
++
++ q = sigqueue_from_cache(tsk);
++ if (q)
++ kmem_cache_free(sigqueue_cachep, q);
++}
++
+ /*
+ * Flush all pending signals for this kthread.
+ */
+@@ -594,7 +651,7 @@ static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *i
+ (info->si_code == SI_TIMER) &&
+ (info->si_sys_private);
+
+- __sigqueue_free(first);
++ __sigqueue_cache_or_free(first);
+ } else {
+ /*
+ * Ok, it wasn't in the queue. This must be
+@@ -631,6 +688,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
+ bool resched_timer = false;
+ int signr;
+
++ WARN_ON_ONCE(tsk != current);
++
+ /* We only dequeue private signals from ourselves, we don't let
+ * signalfd steal them
+ */
+@@ -1835,7 +1894,7 @@ EXPORT_SYMBOL(kill_pid);
+ */
+ struct sigqueue *sigqueue_alloc(void)
+ {
+- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, true);
+
+ if (q)
+ q->flags |= SIGQUEUE_PREALLOC;
+--
+2.19.1
+
diff --git a/features/rt/smp-Wake-ksoftirqd-on-PREEMPT_RT-instead-do_softirq.patch b/features/rt/smp-Wake-ksoftirqd-on-PREEMPT_RT-instead-do_softirq.patch
new file mode 100644
index 00000000..3c937ac4
--- /dev/null
+++ b/features/rt/smp-Wake-ksoftirqd-on-PREEMPT_RT-instead-do_softirq.patch
@@ -0,0 +1,47 @@
+From ba932c3063ef622c49da8054b96051fc0ba50a38 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 15 Feb 2021 18:44:12 +0100
+Subject: [PATCH 040/191] smp: Wake ksoftirqd on PREEMPT_RT instead
+ do_softirq().
+
+The softirq implementation on PREEMPT_RT does not provide do_softirq().
+The other user of do_softirq() is replaced with a local_bh_disable()
++ enable() around the possible raise-softirq invocation. This can not be
+done here because migration_cpu_stop() is invoked with disabled
+preemption.
+
+Wake the softirq thread on PREEMPT_RT if there are any pending softirqs.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/smp.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/smp.c b/kernel/smp.c
+index aeb0adfa0606..01e9d01d1866 100644
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -450,8 +450,18 @@ void flush_smp_call_function_from_idle(void)
+
+ local_irq_save(flags);
+ flush_smp_call_function_queue(true);
+- if (local_softirq_pending())
+- do_softirq();
++
++ if (local_softirq_pending()) {
++
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ do_softirq();
++ } else {
++ struct task_struct *ksoftirqd = this_cpu_ksoftirqd();
++
++ if (ksoftirqd && ksoftirqd->state != TASK_RUNNING)
++ wake_up_process(ksoftirqd);
++ }
++ }
+
+ local_irq_restore(flags);
+ }
+--
+2.19.1
+
diff --git a/features/rt/softirq-Add-RT-specific-softirq-accounting.patch b/features/rt/softirq-Add-RT-specific-softirq-accounting.patch
new file mode 100644
index 00000000..2f9c1130
--- /dev/null
+++ b/features/rt/softirq-Add-RT-specific-softirq-accounting.patch
@@ -0,0 +1,74 @@
+From e9763da838fb3d678d582b5bbbe56c960a899fb1 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:55:53 +0100
+Subject: [PATCH 055/191] softirq: Add RT specific softirq accounting
+
+RT requires the softirq processing and local bottomhalf disabled regions to
+be preemptible. Using the normal preempt count based serialization is
+therefore not possible because this implicitely disables preemption.
+
+RT kernels use a per CPU local lock to serialize bottomhalfs. As
+local_bh_disable() can nest the lock can only be acquired on the outermost
+invocation of local_bh_disable() and released when the nest count becomes
+zero. Tasks which hold the local lock can be preempted so its required to
+keep track of the nest count per task.
+
+Add a RT only counter to task struct and adjust the relevant macros in
+preempt.h.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hardirq.h | 1 +
+ include/linux/preempt.h | 6 +++++-
+ include/linux/sched.h | 3 +++
+ 3 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
+index 0926e9ca4d85..76878b357ffa 100644
+--- a/include/linux/hardirq.h
++++ b/include/linux/hardirq.h
+@@ -6,6 +6,7 @@
+ #include <linux/preempt.h>
+ #include <linux/lockdep.h>
+ #include <linux/ftrace_irq.h>
++#include <linux/sched.h>
+ #include <linux/vtime.h>
+ #include <asm/hardirq.h>
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 69cc8b64aa3a..9881eac0698f 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -79,7 +79,11 @@
+
+ #define nmi_count() (preempt_count() & NMI_MASK)
+ #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++#ifdef CONFIG_PREEMPT_RT
++# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
++#else
++# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++#endif
+ #define irq_count() (nmi_count() | hardirq_count() | softirq_count())
+
+ /*
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index cf245bc237e7..c8998312d7bc 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1043,6 +1043,9 @@ struct task_struct {
+ int softirq_context;
+ int irq_config;
+ #endif
++#ifdef CONFIG_PREEMPT_RT
++ int softirq_disable_cnt;
++#endif
+
+ #ifdef CONFIG_LOCKDEP
+ # define MAX_LOCK_DEPTH 48UL
+--
+2.19.1
+
diff --git a/features/rt/softirq-Check-preemption-after-reenabling-interrupts.patch b/features/rt/softirq-Check-preemption-after-reenabling-interrupts.patch
new file mode 100644
index 00000000..21b60936
--- /dev/null
+++ b/features/rt/softirq-Check-preemption-after-reenabling-interrupts.patch
@@ -0,0 +1,150 @@
+From 601c56cd3ae5a8d6c944ec0b378e10877f102bdc Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 13 Nov 2011 17:17:09 +0100
+Subject: [PATCH 124/191] softirq: Check preemption after reenabling interrupts
+
+raise_softirq_irqoff() disables interrupts and wakes the softirq
+daemon, but after reenabling interrupts there is no preemption check,
+so the execution of the softirq thread might be delayed arbitrarily.
+
+In principle we could add that check to local_irq_enable/restore, but
+that's overkill as the rasie_softirq_irqoff() sections are the only
+ones which show this behaviour.
+
+Reported-by: Carsten Emde <cbe@osadl.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/preempt.h | 3 +++
+ lib/irq_poll.c | 5 +++++
+ net/core/dev.c | 7 +++++++
+ 3 files changed, 15 insertions(+)
+
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index 5ceac863e729..fb140e00f74d 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -190,8 +190,10 @@ do { \
+
+ #ifdef CONFIG_PREEMPT_RT
+ # define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++# define preempt_check_resched_rt() preempt_check_resched()
+ #else
+ # define preempt_enable_no_resched() preempt_enable()
++# define preempt_check_resched_rt() barrier();
+ #endif
+
+ #define preemptible() (preempt_count() == 0 && !irqs_disabled())
+@@ -262,6 +264,7 @@ do { \
+ #define preempt_disable_notrace() barrier()
+ #define preempt_enable_no_resched_notrace() barrier()
+ #define preempt_enable_notrace() barrier()
++#define preempt_check_resched_rt() barrier()
+ #define preemptible() 0
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+diff --git a/lib/irq_poll.c b/lib/irq_poll.c
+index 2f17b488d58e..7557bf7ecf1f 100644
+--- a/lib/irq_poll.c
++++ b/lib/irq_poll.c
+@@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop)
+ list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
+ raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(irq_poll_sched);
+
+@@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *iop)
+ local_irq_save(flags);
+ __irq_poll_complete(iop);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(irq_poll_complete);
+
+@@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
+ }
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Even though interrupts have been re-enabled, this
+ * access is safe because interrupts can only add new
+@@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ /**
+@@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned int cpu)
+ this_cpu_ptr(&blk_cpu_iopoll));
+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ return 0;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 6c5967e80132..86a599a41062 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3060,6 +3060,7 @@ static void __netif_reschedule(struct Qdisc *q)
+ sd->output_queue_tailp = &q->next_sched;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ void __netif_schedule(struct Qdisc *q)
+@@ -3122,6 +3123,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+ __this_cpu_write(softnet_data.completion_queue, skb);
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(__dev_kfree_skb_irq);
+
+@@ -4617,6 +4619,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+ rps_unlock(sd);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+
+ atomic_long_inc(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+@@ -6306,12 +6309,14 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
+ sd->rps_ipi_list = NULL;
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Send pending IPI's to kick RPS processing on remote cpus. */
+ net_rps_send_ipi(remsd);
+ } else
+ #endif
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
+@@ -6389,6 +6394,7 @@ void __napi_schedule(struct napi_struct *n)
+ local_irq_save(flags);
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(__napi_schedule);
+
+@@ -11144,6 +11150,7 @@ static int dev_cpu_dead(unsigned int oldcpu)
+
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ #ifdef CONFIG_RPS
+ remsd = oldsd->rps_ipi_list;
+--
+2.19.1
+
diff --git a/features/rt/softirq-Disable-softirq-stacks-for-RT.patch b/features/rt/softirq-Disable-softirq-stacks-for-RT.patch
new file mode 100644
index 00000000..17d140e9
--- /dev/null
+++ b/features/rt/softirq-Disable-softirq-stacks-for-RT.patch
@@ -0,0 +1,174 @@
+From 9b48938bb80459112366ce3bd8d17212644d58bc Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Jul 2011 13:59:17 +0200
+Subject: [PATCH 125/191] softirq: Disable softirq stacks for RT
+
+Disable extra stacks for softirqs. We want to preempt softirqs and
+having them on special IRQ-stack does not make this easier.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/powerpc/kernel/irq.c | 2 ++
+ arch/powerpc/kernel/misc_32.S | 2 ++
+ arch/powerpc/kernel/misc_64.S | 2 ++
+ arch/sh/kernel/irq.c | 2 ++
+ arch/sparc/kernel/irq_64.c | 2 ++
+ arch/x86/include/asm/irq_stack.h | 3 +++
+ arch/x86/kernel/irq_32.c | 2 ++
+ include/asm-generic/softirq_stack.h | 2 +-
+ 8 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
+index d71fd10a1dd4..4135f7c44dec 100644
+--- a/arch/powerpc/kernel/irq.c
++++ b/arch/powerpc/kernel/irq.c
+@@ -751,10 +751,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly;
+ void *softirq_ctx[NR_CPUS] __read_mostly;
+ void *hardirq_ctx[NR_CPUS] __read_mostly;
+
++#ifndef CONFIG_PREEMPT_RT
+ void do_softirq_own_stack(void)
+ {
+ call_do_softirq(softirq_ctx[smp_processor_id()]);
+ }
++#endif
+
+ irq_hw_number_t virq_to_hw(unsigned int virq)
+ {
+diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
+index 717e658b90fd..08ee95ad6593 100644
+--- a/arch/powerpc/kernel/misc_32.S
++++ b/arch/powerpc/kernel/misc_32.S
+@@ -31,6 +31,7 @@
+ * We store the saved ksp_limit in the unused part
+ * of the STACK_FRAME_OVERHEAD
+ */
++#ifndef CONFIG_PREEMPT_RT
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ stw r0,4(r1)
+@@ -46,6 +47,7 @@ _GLOBAL(call_do_softirq)
+ stw r10,THREAD+KSP_LIMIT(r2)
+ mtlr r0
+ blr
++#endif
+
+ /*
+ * void call_do_irq(struct pt_regs *regs, void *sp);
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 070465825c21..a6b33f7b3264 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -27,6 +27,7 @@
+
+ .text
+
++#ifndef CONFIG_PREEMPT_RT
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ std r0,16(r1)
+@@ -37,6 +38,7 @@ _GLOBAL(call_do_softirq)
+ ld r0,16(r1)
+ mtlr r0
+ blr
++#endif
+
+ _GLOBAL(call_do_irq)
+ mflr r0
+diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
+index ef0f0827cf57..2d3eca8fee01 100644
+--- a/arch/sh/kernel/irq.c
++++ b/arch/sh/kernel/irq.c
+@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu)
+ hardirq_ctx[cpu] = NULL;
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ void do_softirq_own_stack(void)
+ {
+ struct thread_info *curctx;
+@@ -176,6 +177,7 @@ void do_softirq_own_stack(void)
+ "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
+ );
+ }
++#endif
+ #else
+ static inline void handle_one_irq(unsigned int irq)
+ {
+diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
+index c8848bb681a1..41fa1be980a3 100644
+--- a/arch/sparc/kernel/irq_64.c
++++ b/arch/sparc/kernel/irq_64.c
+@@ -855,6 +855,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
+ set_irq_regs(old_regs);
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ void do_softirq_own_stack(void)
+ {
+ void *orig_sp, *sp = softirq_stack[smp_processor_id()];
+@@ -869,6 +870,7 @@ void do_softirq_own_stack(void)
+ __asm__ __volatile__("mov %0, %%sp"
+ : : "r" (orig_sp));
+ }
++#endif
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ void fixup_irqs(void)
+diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
+index 9b2a0ff76c73..fe12ba2a5c65 100644
+--- a/arch/x86/include/asm/irq_stack.h
++++ b/arch/x86/include/asm/irq_stack.h
+@@ -188,6 +188,7 @@
+ #define ASM_CALL_SOFTIRQ \
+ "call %P[__func] \n"
+
++#ifndef CONFIG_PREEMPT_RT
+ /*
+ * Macro to invoke __do_softirq on the irq stack. This is only called from
+ * task context when bottom halfs are about to be reenabled and soft
+@@ -201,6 +202,8 @@
+ __this_cpu_write(hardirq_stack_inuse, false); \
+ }
+
++#endif
++
+ #else /* CONFIG_X86_64 */
+ /* System vector handlers always run on the stack they interrupted. */
+ #define run_sysvec_on_irqstack_cond(func, regs) \
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index 044902d5a3c4..e5dd6da78713 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -132,6 +132,7 @@ int irq_init_percpu_irqstack(unsigned int cpu)
+ return 0;
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ void do_softirq_own_stack(void)
+ {
+ struct irq_stack *irqstk;
+@@ -148,6 +149,7 @@ void do_softirq_own_stack(void)
+
+ call_on_stack(__do_softirq, isp);
+ }
++#endif
+
+ void __handle_irq(struct irq_desc *desc, struct pt_regs *regs)
+ {
+diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h
+index eceeecf6a5bd..d3e2d81656e0 100644
+--- a/include/asm-generic/softirq_stack.h
++++ b/include/asm-generic/softirq_stack.h
+@@ -2,7 +2,7 @@
+ #ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
+ #define __ASM_GENERIC_SOFTIRQ_STACK_H
+
+-#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
++#if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT)
+ void do_softirq_own_stack(void);
+ #else
+ static inline void do_softirq_own_stack(void)
+--
+2.19.1
+
diff --git a/features/rt/softirq-Make-softirq-control-and-processing-RT-aware.patch b/features/rt/softirq-Make-softirq-control-and-processing-RT-aware.patch
new file mode 100644
index 00000000..d5463dab
--- /dev/null
+++ b/features/rt/softirq-Make-softirq-control-and-processing-RT-aware.patch
@@ -0,0 +1,266 @@
+From 02c3efe5d766cd7ed6f81c784981b76c91238a21 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:55:56 +0100
+Subject: [PATCH 058/191] softirq: Make softirq control and processing RT aware
+
+Provide a local lock based serialization for soft interrupts on RT which
+allows the local_bh_disabled() sections and servicing soft interrupts to be
+preemptible.
+
+Provide the necessary inline helpers which allow to reuse the bulk of the
+softirq processing code.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/bottom_half.h | 2 +-
+ kernel/softirq.c | 188 ++++++++++++++++++++++++++++++++++--
+ 2 files changed, 182 insertions(+), 8 deletions(-)
+
+diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
+index a19519f4241d..e4dd613a070e 100644
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
+@@ -4,7 +4,7 @@
+
+ #include <linux/preempt.h>
+
+-#ifdef CONFIG_TRACE_IRQFLAGS
++#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS)
+ extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+ #else
+ static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 373c6bb84f39..54fd7cc97a4a 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -13,6 +13,7 @@
+ #include <linux/kernel_stat.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
++#include <linux/local_lock.h>
+ #include <linux/mm.h>
+ #include <linux/notifier.h>
+ #include <linux/percpu.h>
+@@ -103,20 +104,189 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
+ #endif
+
+ /*
+- * preempt_count and SOFTIRQ_OFFSET usage:
+- * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+- * softirq processing.
+- * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
++ * SOFTIRQ_OFFSET usage:
++ *
++ * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
++ * to a per CPU counter and to task::softirqs_disabled_cnt.
++ *
++ * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
++ * processing.
++ *
++ * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
+ * on local_bh_disable or local_bh_enable.
++ *
+ * This lets us distinguish between whether we are currently processing
+ * softirq and whether we just have bh disabled.
+ */
++#ifdef CONFIG_PREEMPT_RT
+
+-#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+- * This is for softirq.c-internal use, where hardirqs are disabled
++ * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
++ * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
++ * softirq disabled section to be preempted.
++ *
++ * The per task counter is used for softirq_count(), in_softirq() and
++ * in_serving_softirqs() because these counts are only valid when the task
++ * holding softirq_ctrl::lock is running.
++ *
++ * The per CPU counter prevents pointless wakeups of ksoftirqd in case that
++ * the task which is in a softirq disabled section is preempted or blocks.
++ */
++struct softirq_ctrl {
++ local_lock_t lock;
++ int cnt;
++};
++
++static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
++ .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
++};
++
++void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
++{
++ unsigned long flags;
++ int newcnt;
++
++ WARN_ON_ONCE(in_hardirq());
++
++ /* First entry of a task into a BH disabled section? */
++ if (!current->softirq_disable_cnt) {
++ if (preemptible()) {
++ local_lock(&softirq_ctrl.lock);
++ /* Required to meet the RCU bottomhalf requirements. */
++ rcu_read_lock();
++ } else {
++ DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt));
++ }
++ }
++
++ /*
++ * Track the per CPU softirq disabled state. On RT this is per CPU
++ * state to allow preemption of bottom half disabled sections.
++ */
++ newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt);
++ /*
++ * Reflect the result in the task state to prevent recursion on the
++ * local lock and to make softirq_count() & al work.
++ */
++ current->softirq_disable_cnt = newcnt;
++
++ if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) {
++ raw_local_irq_save(flags);
++ lockdep_softirqs_off(ip);
++ raw_local_irq_restore(flags);
++ }
++}
++EXPORT_SYMBOL(__local_bh_disable_ip);
++
++static void __local_bh_enable(unsigned int cnt, bool unlock)
++{
++ unsigned long flags;
++ int newcnt;
++
++ DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt !=
++ this_cpu_read(softirq_ctrl.cnt));
++
++ if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) {
++ raw_local_irq_save(flags);
++ lockdep_softirqs_on(_RET_IP_);
++ raw_local_irq_restore(flags);
++ }
++
++ newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt);
++ current->softirq_disable_cnt = newcnt;
++
++ if (!newcnt && unlock) {
++ rcu_read_unlock();
++ local_unlock(&softirq_ctrl.lock);
++ }
++}
++
++void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
++{
++ bool preempt_on = preemptible();
++ unsigned long flags;
++ u32 pending;
++ int curcnt;
++
++ WARN_ON_ONCE(in_irq());
++ lockdep_assert_irqs_enabled();
++
++ local_irq_save(flags);
++ curcnt = __this_cpu_read(softirq_ctrl.cnt);
++
++ /*
++ * If this is not reenabling soft interrupts, no point in trying to
++ * run pending ones.
++ */
++ if (curcnt != cnt)
++ goto out;
++
++ pending = local_softirq_pending();
++ if (!pending || ksoftirqd_running(pending))
++ goto out;
++
++ /*
++ * If this was called from non preemptible context, wake up the
++ * softirq daemon.
++ */
++ if (!preempt_on) {
++ wakeup_softirqd();
++ goto out;
++ }
++
++ /*
++ * Adjust softirq count to SOFTIRQ_OFFSET which makes
++ * in_serving_softirq() become true.
++ */
++ cnt = SOFTIRQ_OFFSET;
++ __local_bh_enable(cnt, false);
++ __do_softirq();
++
++out:
++ __local_bh_enable(cnt, preempt_on);
++ local_irq_restore(flags);
++}
++EXPORT_SYMBOL(__local_bh_enable_ip);
++
++/*
++ * Invoked from ksoftirqd_run() outside of the interrupt disabled section
++ * to acquire the per CPU local lock for reentrancy protection.
++ */
++static inline void ksoftirqd_run_begin(void)
++{
++ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
++ local_irq_disable();
++}
++
++/* Counterpart to ksoftirqd_run_begin() */
++static inline void ksoftirqd_run_end(void)
++{
++ __local_bh_enable(SOFTIRQ_OFFSET, true);
++ WARN_ON_ONCE(in_interrupt());
++ local_irq_enable();
++}
++
++static inline void softirq_handle_begin(void) { }
++static inline void softirq_handle_end(void) { }
++
++static inline bool should_wake_ksoftirqd(void)
++{
++ return !this_cpu_read(softirq_ctrl.cnt);
++}
++
++static inline void invoke_softirq(void)
++{
++ if (should_wake_ksoftirqd())
++ wakeup_softirqd();
++}
++
++#else /* CONFIG_PREEMPT_RT */
++
++/*
++ * This one is for softirq.c-internal use, where hardirqs are disabled
+ * legitimately:
+ */
++#ifdef CONFIG_TRACE_IRQFLAGS
+ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+ {
+ unsigned long flags;
+@@ -277,6 +447,8 @@ asmlinkage __visible void do_softirq(void)
+ local_irq_restore(flags);
+ }
+
++#endif /* !CONFIG_PREEMPT_RT */
++
+ /*
+ * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
+ * but break the loop if need_resched() is set or after 2 ms.
+@@ -381,8 +553,10 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+ pending >>= softirq_bit;
+ }
+
+- if (__this_cpu_read(ksoftirqd) == current)
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT) &&
++ __this_cpu_read(ksoftirqd) == current)
+ rcu_softirq_qs();
++
+ local_irq_disable();
+
+ pending = local_softirq_pending();
+--
+2.19.1
+
diff --git a/features/rt/softirq-Move-various-protections-into-inline-helpers.patch b/features/rt/softirq-Move-various-protections-into-inline-helpers.patch
new file mode 100644
index 00000000..d8ee863b
--- /dev/null
+++ b/features/rt/softirq-Move-various-protections-into-inline-helpers.patch
@@ -0,0 +1,107 @@
+From 8ed4f8ea1246e9edcf2daad3538292712a22620d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:55:55 +0100
+Subject: [PATCH 057/191] softirq: Move various protections into inline helpers
+
+To allow reuse of the bulk of softirq processing code for RT and to avoid
+#ifdeffery all over the place, split protections for various code sections
+out into inline helpers so the RT variant can just replace them in one go.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/softirq.c | 39 ++++++++++++++++++++++++++++++++-------
+ 1 file changed, 32 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 47321dde8f32..373c6bb84f39 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -207,6 +207,32 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
+ }
+ EXPORT_SYMBOL(__local_bh_enable_ip);
+
++static inline void softirq_handle_begin(void)
++{
++ __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
++}
++
++static inline void softirq_handle_end(void)
++{
++ __local_bh_enable(SOFTIRQ_OFFSET);
++ WARN_ON_ONCE(in_interrupt());
++}
++
++static inline void ksoftirqd_run_begin(void)
++{
++ local_irq_disable();
++}
++
++static inline void ksoftirqd_run_end(void)
++{
++ local_irq_enable();
++}
++
++static inline bool should_wake_ksoftirqd(void)
++{
++ return true;
++}
++
+ static inline void invoke_softirq(void)
+ {
+ if (ksoftirqd_running(local_softirq_pending()))
+@@ -319,7 +345,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+
+ pending = local_softirq_pending();
+
+- __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
++ softirq_handle_begin();
+ in_hardirq = lockdep_softirq_start();
+ account_softirq_enter(current);
+
+@@ -370,8 +396,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
+
+ account_softirq_exit(current);
+ lockdep_softirq_end(in_hardirq);
+- __local_bh_enable(SOFTIRQ_OFFSET);
+- WARN_ON_ONCE(in_interrupt());
++ softirq_handle_end();
+ current_restore_flags(old_flags, PF_MEMALLOC);
+ }
+
+@@ -466,7 +491,7 @@ inline void raise_softirq_irqoff(unsigned int nr)
+ * Otherwise we wake up ksoftirqd to make sure we
+ * schedule the softirq soon.
+ */
+- if (!in_interrupt())
++ if (!in_interrupt() && should_wake_ksoftirqd())
+ wakeup_softirqd();
+ }
+
+@@ -694,18 +719,18 @@ static int ksoftirqd_should_run(unsigned int cpu)
+
+ static void run_ksoftirqd(unsigned int cpu)
+ {
+- local_irq_disable();
++ ksoftirqd_run_begin();
+ if (local_softirq_pending()) {
+ /*
+ * We can safely run softirq on inline stack, as we are not deep
+ * in the task stack here.
+ */
+ __do_softirq();
+- local_irq_enable();
++ ksoftirqd_run_end();
+ cond_resched();
+ return;
+ }
+- local_irq_enable();
++ ksoftirqd_run_end();
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+--
+2.19.1
+
diff --git a/features/rt/sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/features/rt/sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch
new file mode 100644
index 00000000..bb5be4d0
--- /dev/null
+++ b/features/rt/sunrpc-Make-svc_xprt_do_enqueue-use-get_cpu_light.patch
@@ -0,0 +1,59 @@
+From bea756647716135bea94d786e25a517fd9bb1331 Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <umgwanakikbuti@gmail.com>
+Date: Wed, 18 Feb 2015 16:05:28 +0100
+Subject: [PATCH 137/191] sunrpc: Make svc_xprt_do_enqueue() use
+ get_cpu_light()
+
+|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:915
+|in_atomic(): 1, irqs_disabled(): 0, pid: 3194, name: rpc.nfsd
+|Preemption disabled at:[<ffffffffa06bf0bb>] svc_xprt_received+0x4b/0xc0 [sunrpc]
+|CPU: 6 PID: 3194 Comm: rpc.nfsd Not tainted 3.18.7-rt1 #9
+|Hardware name: MEDION MS-7848/MS-7848, BIOS M7848W08.404 11/06/2014
+| ffff880409630000 ffff8800d9a33c78 ffffffff815bdeb5 0000000000000002
+| 0000000000000000 ffff8800d9a33c98 ffffffff81073c86 ffff880408dd6008
+| ffff880408dd6000 ffff8800d9a33cb8 ffffffff815c3d84 ffff88040b3ac000
+|Call Trace:
+| [<ffffffff815bdeb5>] dump_stack+0x4f/0x9e
+| [<ffffffff81073c86>] __might_sleep+0xe6/0x150
+| [<ffffffff815c3d84>] rt_spin_lock+0x24/0x50
+| [<ffffffffa06beec0>] svc_xprt_do_enqueue+0x80/0x230 [sunrpc]
+| [<ffffffffa06bf0bb>] svc_xprt_received+0x4b/0xc0 [sunrpc]
+| [<ffffffffa06c03ed>] svc_add_new_perm_xprt+0x6d/0x80 [sunrpc]
+| [<ffffffffa06b2693>] svc_addsock+0x143/0x200 [sunrpc]
+| [<ffffffffa072e69c>] write_ports+0x28c/0x340 [nfsd]
+| [<ffffffffa072d2ac>] nfsctl_transaction_write+0x4c/0x80 [nfsd]
+| [<ffffffff8117ee83>] vfs_write+0xb3/0x1d0
+| [<ffffffff8117f889>] SyS_write+0x49/0xb0
+| [<ffffffff815c4556>] system_call_fastpath+0x16/0x1b
+
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/sunrpc/svc_xprt.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index dcc50ae54550..e4a0dc8f8e40 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -422,7 +422,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
+ if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags))
+ return;
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
+
+ atomic_long_inc(&pool->sp_stats.packets);
+@@ -446,7 +446,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
+ rqstp = NULL;
+ out_unlock:
+ rcu_read_unlock();
+- put_cpu();
++ put_cpu_light();
+ trace_svc_xprt_do_enqueue(xprt, rqstp);
+ }
+ EXPORT_SYMBOL_GPL(svc_xprt_do_enqueue);
+--
+2.19.1
+
diff --git a/features/rt/sysfs-Add-sys-kernel-realtime-entry.patch b/features/rt/sysfs-Add-sys-kernel-realtime-entry.patch
new file mode 100644
index 00000000..a70552cd
--- /dev/null
+++ b/features/rt/sysfs-Add-sys-kernel-realtime-entry.patch
@@ -0,0 +1,53 @@
+From 0467137e4e4d875cc0711eaefbd0da8800274d86 Mon Sep 17 00:00:00 2001
+From: Clark Williams <williams@redhat.com>
+Date: Sat, 30 Jul 2011 21:55:53 -0500
+Subject: [PATCH 190/191] sysfs: Add /sys/kernel/realtime entry
+
+Add a /sys/kernel entry to indicate that the kernel is a
+realtime kernel.
+
+Clark says that he needs this for udev rules, udev needs to evaluate
+if its a PREEMPT_RT kernel a few thousand times and parsing uname
+output is too slow or so.
+
+Are there better solutions? Should it exist and return 0 on !-rt?
+
+Signed-off-by: Clark Williams <williams@redhat.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+---
+ kernel/ksysfs.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
+index 35859da8bd4f..dfff31ed644a 100644
+--- a/kernel/ksysfs.c
++++ b/kernel/ksysfs.c
+@@ -138,6 +138,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
+
+ #endif /* CONFIG_CRASH_CORE */
+
++#if defined(CONFIG_PREEMPT_RT)
++static ssize_t realtime_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", 1);
++}
++KERNEL_ATTR_RO(realtime);
++#endif
++
+ /* whether file capabilities are enabled */
+ static ssize_t fscaps_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+@@ -228,6 +237,9 @@ static struct attribute * kernel_attrs[] = {
+ #ifndef CONFIG_TINY_RCU
+ &rcu_expedited_attr.attr,
+ &rcu_normal_attr.attr,
++#endif
++#ifdef CONFIG_PREEMPT_RT
++ &realtime_attr.attr,
+ #endif
+ NULL
+ };
+--
+2.19.1
+
diff --git a/features/rt/tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch b/features/rt/tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch
new file mode 100644
index 00000000..7f085a8d
--- /dev/null
+++ b/features/rt/tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch
@@ -0,0 +1,108 @@
+From 733206b1d60fe02ac81756b268804c19445362d2 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:10 +0100
+Subject: [PATCH 047/191] tasklets: Prevent tasklet_unlock_spin_wait() deadlock
+ on RT
+
+tasklet_unlock_spin_wait() spin waits for the TASKLET_STATE_SCHED bit in
+the tasklet state to be cleared. This works on !RT nicely because the
+corresponding execution can only happen on a different CPU.
+
+On RT softirq processing is preemptible, therefore a task preempting the
+softirq processing thread can spin forever.
+
+Prevent this by invoking local_bh_disable()/enable() inside the loop. In
+case that the softirq processing thread was preempted by the current task,
+current will block on the local lock which yields the CPU to the preempted
+softirq processing thread. If the tasklet is processed on a different CPU
+then the local_bh_disable()/enable() pair is just a waste of processor
+cycles.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 12 ++----------
+ kernel/softirq.c | 28 +++++++++++++++++++++++++++-
+ 2 files changed, 29 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index ee3ce4f852b7..d6876e1aa6c1 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -654,7 +654,7 @@ enum
+ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
+ };
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+ static inline int tasklet_trylock(struct tasklet_struct *t)
+ {
+ return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+@@ -662,16 +662,8 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
+
+ void tasklet_unlock(struct tasklet_struct *t);
+ void tasklet_unlock_wait(struct tasklet_struct *t);
++void tasklet_unlock_spin_wait(struct tasklet_struct *t);
+
+-/*
+- * Do not use in new code. Waiting for tasklets from atomic contexts is
+- * error prone and should be avoided.
+- */
+-static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t)
+-{
+- while (test_bit(TASKLET_STATE_RUN, &t->state))
+- cpu_relax();
+-}
+ #else
+ static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
+ static inline void tasklet_unlock(struct tasklet_struct *t) { }
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 1d910294faf7..47321dde8f32 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -616,6 +616,32 @@ void tasklet_init(struct tasklet_struct *t,
+ }
+ EXPORT_SYMBOL(tasklet_init);
+
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
++/*
++ * Do not use in new code. Waiting for tasklets from atomic contexts is
++ * error prone and should be avoided.
++ */
++void tasklet_unlock_spin_wait(struct tasklet_struct *t)
++{
++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
++ /*
++ * Prevent a live lock when current preempted soft
++ * interrupt processing or prevents ksoftirqd from
++ * running. If the tasklet runs on a different CPU
++ * then this has no effect other than doing the BH
++ * disable/enable dance for nothing.
++ */
++ local_bh_disable();
++ local_bh_enable();
++ } else {
++ cpu_relax();
++ }
++ }
++}
++EXPORT_SYMBOL(tasklet_unlock_spin_wait);
++#endif
++
+ void tasklet_kill(struct tasklet_struct *t)
+ {
+ if (in_interrupt())
+@@ -629,7 +655,7 @@ void tasklet_kill(struct tasklet_struct *t)
+ }
+ EXPORT_SYMBOL(tasklet_kill);
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
+ void tasklet_unlock(struct tasklet_struct *t)
+ {
+ smp_mb__before_atomic();
+--
+2.19.1
+
diff --git a/features/rt/tasklets-Provide-tasklet_disable_in_atomic.patch b/features/rt/tasklets-Provide-tasklet_disable_in_atomic.patch
new file mode 100644
index 00000000..89d0730f
--- /dev/null
+++ b/features/rt/tasklets-Provide-tasklet_disable_in_atomic.patch
@@ -0,0 +1,67 @@
+From 21b5c4734bb2b5f6e37dad86b2857ac1b2ae1a16 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:06 +0100
+Subject: [PATCH 043/191] tasklets: Provide tasklet_disable_in_atomic()
+
+Replacing the spin wait loops in tasklet_unlock_wait() with
+wait_var_event() is not possible as a handful of tasklet_disable()
+invocations are happening in atomic context. All other invocations are in
+teardown paths which can sleep.
+
+Provide tasklet_disable_in_atomic() and tasklet_unlock_spin_wait() to
+convert the few atomic use cases over, which allows to change
+tasklet_disable() and tasklet_unlock_wait() in a later step.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index b49ac2639b3e..9d9475f7b89f 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -671,10 +671,21 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+ while (test_bit(TASKLET_STATE_RUN, &t->state))
+ cpu_relax();
+ }
++
++/*
++ * Do not use in new code. Waiting for tasklets from atomic contexts is
++ * error prone and should be avoided.
++ */
++static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t)
++{
++ while (test_bit(TASKLET_STATE_RUN, &t->state))
++ cpu_relax();
++}
+ #else
+ static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
+ static inline void tasklet_unlock(struct tasklet_struct *t) { }
+ static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
++static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
+ #endif
+
+ extern void __tasklet_schedule(struct tasklet_struct *t);
+@@ -699,6 +710,17 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t)
+ smp_mb__after_atomic();
+ }
+
++/*
++ * Do not use in new code. Disabling tasklets from atomic contexts is
++ * error prone and should be avoided.
++ */
++static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
++{
++ tasklet_disable_nosync(t);
++ tasklet_unlock_spin_wait(t);
++ smp_mb();
++}
++
+ static inline void tasklet_disable(struct tasklet_struct *t)
+ {
+ tasklet_disable_nosync(t);
+--
+2.19.1
+
diff --git a/features/rt/tasklets-Replace-barrier-with-cpu_relax-in-tasklet_u.patch b/features/rt/tasklets-Replace-barrier-with-cpu_relax-in-tasklet_u.patch
new file mode 100644
index 00000000..1a86c472
--- /dev/null
+++ b/features/rt/tasklets-Replace-barrier-with-cpu_relax-in-tasklet_u.patch
@@ -0,0 +1,34 @@
+From 5c591a9398a637929ea3593614fadff800e6009f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:04 +0100
+Subject: [PATCH 041/191] tasklets: Replace barrier() with cpu_relax() in
+ tasklet_unlock_wait()
+
+A barrier() in a tight loop which waits for something to happen on a remote
+CPU is a pointless exercise. Replace it with cpu_relax() which allows HT
+siblings to make progress.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 967e25767153..b4cf773638dc 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -668,7 +668,8 @@ static inline void tasklet_unlock(struct tasklet_struct *t)
+
+ static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+ {
+- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
++ while (test_bit(TASKLET_STATE_RUN, &t->state))
++ cpu_relax();
+ }
+ #else
+ #define tasklet_trylock(t) 1
+--
+2.19.1
+
diff --git a/features/rt/tasklets-Replace-spin-wait-in-tasklet_kill.patch b/features/rt/tasklets-Replace-spin-wait-in-tasklet_kill.patch
new file mode 100644
index 00000000..1beafa8d
--- /dev/null
+++ b/features/rt/tasklets-Replace-spin-wait-in-tasklet_kill.patch
@@ -0,0 +1,73 @@
+From e7314e8cbafc258fde5e864eafd6661a847790ec Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 9 Mar 2021 09:42:09 +0100
+Subject: [PATCH 046/191] tasklets: Replace spin wait in tasklet_kill()
+
+tasklet_kill() spin waits for TASKLET_STATE_SCHED to be cleared invoking
+yield() from inside the loop. yield() is an ill defined mechanism and the
+result might still be wasting CPU cycles in a tight loop which is
+especially painful in a guest when the CPU running the tasklet is scheduled
+out.
+
+tasklet_kill() is used in teardown paths and not performance critical at
+all. Replace the spin wait with wait_var_event().
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/softirq.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 7cd63df59e1c..1d910294faf7 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -532,6 +532,16 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
+ }
+ EXPORT_SYMBOL(__tasklet_hi_schedule);
+
++static inline bool tasklet_clear_sched(struct tasklet_struct *t)
++{
++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
++ wake_up_var(&t->state);
++ return true;
++ }
++
++ return false;
++}
++
+ static void tasklet_action_common(struct softirq_action *a,
+ struct tasklet_head *tl_head,
+ unsigned int softirq_nr)
+@@ -551,8 +561,7 @@ static void tasklet_action_common(struct softirq_action *a,
+
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+- &t->state))
++ if (!tasklet_clear_sched(t))
+ BUG();
+ if (t->use_callback)
+ t->callback(t);
+@@ -612,13 +621,11 @@ void tasklet_kill(struct tasklet_struct *t)
+ if (in_interrupt())
+ pr_notice("Attempt to kill tasklet from interrupt\n");
+
+- while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+- do {
+- yield();
+- } while (test_bit(TASKLET_STATE_SCHED, &t->state));
+- }
++ while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
++ wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
++
+ tasklet_unlock_wait(t);
+- clear_bit(TASKLET_STATE_SCHED, &t->state);
++ tasklet_clear_sched(t);
+ }
+ EXPORT_SYMBOL(tasklet_kill);
+
+--
+2.19.1
+
diff --git a/features/rt/tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch b/features/rt/tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch
new file mode 100644
index 00000000..edca165f
--- /dev/null
+++ b/features/rt/tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch
@@ -0,0 +1,89 @@
+From 9c90471b76134571fa2dfddb7423d48c521146ab Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 9 Mar 2021 09:42:08 +0100
+Subject: [PATCH 045/191] tasklets: Replace spin wait in tasklet_unlock_wait()
+
+tasklet_unlock_wait() spin waits for TASKLET_STATE_RUN to be cleared. This
+is wasting CPU cycles in a tight loop which is especially painful in a
+guest when the CPU running the tasklet is scheduled out.
+
+tasklet_unlock_wait() is invoked from tasklet_kill() which is used in
+teardown paths and not performance critical at all. Replace the spin wait
+with wait_var_event().
+
+There are no users of tasklet_unlock_wait() which are invoked from atomic
+contexts. The usage in tasklet_disable() has been replaced temporarily with
+the spin waiting variant until the atomic users are fixed up and will be
+converted to the sleep wait variant later.
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 13 ++-----------
+ kernel/softirq.c | 18 ++++++++++++++++++
+ 2 files changed, 20 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index f0b82429950c..ee3ce4f852b7 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -660,17 +660,8 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
+ return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
+-static inline void tasklet_unlock(struct tasklet_struct *t)
+-{
+- smp_mb__before_atomic();
+- clear_bit(TASKLET_STATE_RUN, &(t)->state);
+-}
+-
+-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+-{
+- while (test_bit(TASKLET_STATE_RUN, &t->state))
+- cpu_relax();
+-}
++void tasklet_unlock(struct tasklet_struct *t);
++void tasklet_unlock_wait(struct tasklet_struct *t);
+
+ /*
+ * Do not use in new code. Waiting for tasklets from atomic contexts is
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 9908ec4a9bfe..7cd63df59e1c 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -25,6 +25,7 @@
+ #include <linux/smpboot.h>
+ #include <linux/tick.h>
+ #include <linux/irq.h>
++#include <linux/wait_bit.h>
+
+ #include <asm/softirq_stack.h>
+
+@@ -621,6 +622,23 @@ void tasklet_kill(struct tasklet_struct *t)
+ }
+ EXPORT_SYMBOL(tasklet_kill);
+
++#ifdef CONFIG_SMP
++void tasklet_unlock(struct tasklet_struct *t)
++{
++ smp_mb__before_atomic();
++ clear_bit(TASKLET_STATE_RUN, &t->state);
++ smp_mb__after_atomic();
++ wake_up_var(&t->state);
++}
++EXPORT_SYMBOL_GPL(tasklet_unlock);
++
++void tasklet_unlock_wait(struct tasklet_struct *t)
++{
++ wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
++}
++EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
++#endif
++
+ void __init softirq_init(void)
+ {
+ int cpu;
+--
+2.19.1
+
diff --git a/features/rt/tasklets-Switch-tasklet_disable-to-the-sleep-wait-va.patch b/features/rt/tasklets-Switch-tasklet_disable-to-the-sleep-wait-va.patch
new file mode 100644
index 00000000..22004d44
--- /dev/null
+++ b/features/rt/tasklets-Switch-tasklet_disable-to-the-sleep-wait-va.patch
@@ -0,0 +1,34 @@
+From 608657964209120854b4be1d11f6d052f34465a9 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:17 +0100
+Subject: [PATCH 054/191] tasklets: Switch tasklet_disable() to the sleep wait
+ variant
+
+ -- NOT FOR IMMEDIATE MERGING --
+
+Now that all users of tasklet_disable() are invoked from sleepable context,
+convert it to use tasklet_unlock_wait() which might sleep.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index d6876e1aa6c1..3b56b73deca2 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -707,8 +707,7 @@ static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
+ static inline void tasklet_disable(struct tasklet_struct *t)
+ {
+ tasklet_disable_nosync(t);
+- /* Spin wait until all atomic users are converted */
+- tasklet_unlock_spin_wait(t);
++ tasklet_unlock_wait(t);
+ smp_mb();
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/tasklets-Use-spin-wait-in-tasklet_disable-temporaril.patch b/features/rt/tasklets-Use-spin-wait-in-tasklet_disable-temporaril.patch
new file mode 100644
index 00000000..0847915a
--- /dev/null
+++ b/features/rt/tasklets-Use-spin-wait-in-tasklet_disable-temporaril.patch
@@ -0,0 +1,32 @@
+From a627fa5abc3e55cddc704d352ac54b9cae090728 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:07 +0100
+Subject: [PATCH 044/191] tasklets: Use spin wait in tasklet_disable()
+ temporarily
+
+To ease the transition use spin waiting in tasklet_disable() until all
+usage sites from atomic context have been cleaned up.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 9d9475f7b89f..f0b82429950c 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -724,7 +724,8 @@ static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
+ static inline void tasklet_disable(struct tasklet_struct *t)
+ {
+ tasklet_disable_nosync(t);
+- tasklet_unlock_wait(t);
++ /* Spin wait until all atomic users are converted */
++ tasklet_unlock_spin_wait(t);
+ smp_mb();
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/tasklets-Use-static-inlines-for-stub-implementations.patch b/features/rt/tasklets-Use-static-inlines-for-stub-implementations.patch
new file mode 100644
index 00000000..8c933503
--- /dev/null
+++ b/features/rt/tasklets-Use-static-inlines-for-stub-implementations.patch
@@ -0,0 +1,34 @@
+From cc4367c43efdde19bd1a291f56a0327e2b1890ca Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:42:05 +0100
+Subject: [PATCH 042/191] tasklets: Use static inlines for stub implementations
+
+Inlines exist for a reason.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index b4cf773638dc..b49ac2639b3e 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -672,9 +672,9 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+ cpu_relax();
+ }
+ #else
+-#define tasklet_trylock(t) 1
+-#define tasklet_unlock_wait(t) do { } while (0)
+-#define tasklet_unlock(t) do { } while (0)
++static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
++static inline void tasklet_unlock(struct tasklet_struct *t) { }
++static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
+ #endif
+
+ extern void __tasklet_schedule(struct tasklet_struct *t);
+--
+2.19.1
+
diff --git a/features/rt/tcp-Remove-superfluous-BH-disable-around-listening_h.patch b/features/rt/tcp-Remove-superfluous-BH-disable-around-listening_h.patch
new file mode 100644
index 00000000..0f158b80
--- /dev/null
+++ b/features/rt/tcp-Remove-superfluous-BH-disable-around-listening_h.patch
@@ -0,0 +1,108 @@
+From fcb2e8b29c71939fca604cb4e527c14fd83cc683 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 12 Oct 2020 17:33:54 +0200
+Subject: [PATCH 039/191] tcp: Remove superfluous BH-disable around
+ listening_hash
+
+Commit
+ 9652dc2eb9e40 ("tcp: relax listening_hash operations")
+
+removed the need to disable bottom half while acquiring
+listening_hash.lock. There are still two callers left which disable
+bottom half before the lock is acquired.
+
+Drop local_bh_disable() around __inet_hash() which acquires
+listening_hash->lock, invoke inet_ehash_nolisten() with disabled BH.
+inet_unhash() conditionally acquires listening_hash->lock.
+
+Reported-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/linux-rt-users/12d6f9879a97cd56c09fb53dee343cbb14f7f1f7.camel@gmx.de/
+Link: https://lkml.kernel.org/r/X9CheYjuXWc75Spa@hirez.programming.kicks-ass.net
+---
+ net/ipv4/inet_hashtables.c | 19 ++++++++++++-------
+ net/ipv6/inet6_hashtables.c | 5 +----
+ 2 files changed, 13 insertions(+), 11 deletions(-)
+
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index c96866a53a66..388e3ebb7f57 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -635,7 +635,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
+ int err = 0;
+
+ if (sk->sk_state != TCP_LISTEN) {
++ local_bh_disable();
+ inet_ehash_nolisten(sk, osk, NULL);
++ local_bh_enable();
+ return 0;
+ }
+ WARN_ON(!sk_unhashed(sk));
+@@ -667,11 +669,8 @@ int inet_hash(struct sock *sk)
+ {
+ int err = 0;
+
+- if (sk->sk_state != TCP_CLOSE) {
+- local_bh_disable();
++ if (sk->sk_state != TCP_CLOSE)
+ err = __inet_hash(sk, NULL);
+- local_bh_enable();
+- }
+
+ return err;
+ }
+@@ -682,17 +681,20 @@ void inet_unhash(struct sock *sk)
+ struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
+ struct inet_listen_hashbucket *ilb = NULL;
+ spinlock_t *lock;
++ bool state_listen;
+
+ if (sk_unhashed(sk))
+ return;
+
+ if (sk->sk_state == TCP_LISTEN) {
++ state_listen = true;
+ ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
+- lock = &ilb->lock;
++ spin_lock(&ilb->lock);
+ } else {
++ state_listen = false;
+ lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
++ spin_lock_bh(lock);
+ }
+- spin_lock_bh(lock);
+ if (sk_unhashed(sk))
+ goto unlock;
+
+@@ -705,7 +707,10 @@ void inet_unhash(struct sock *sk)
+ __sk_nulls_del_node_init_rcu(sk);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+ unlock:
+- spin_unlock_bh(lock);
++ if (state_listen)
++ spin_unlock(&ilb->lock);
++ else
++ spin_unlock_bh(lock);
+ }
+ EXPORT_SYMBOL_GPL(inet_unhash);
+
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index 55c290d55605..9bad345cba9a 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -333,11 +333,8 @@ int inet6_hash(struct sock *sk)
+ {
+ int err = 0;
+
+- if (sk->sk_state != TCP_CLOSE) {
+- local_bh_disable();
++ if (sk->sk_state != TCP_CLOSE)
+ err = __inet_hash(sk, NULL);
+- local_bh_enable();
+- }
+
+ return err;
+ }
+--
+2.19.1
+
diff --git a/features/rt/tick-sched-Prevent-false-positive-softirq-pending-wa.patch b/features/rt/tick-sched-Prevent-false-positive-softirq-pending-wa.patch
new file mode 100644
index 00000000..fdb4a427
--- /dev/null
+++ b/features/rt/tick-sched-Prevent-false-positive-softirq-pending-wa.patch
@@ -0,0 +1,83 @@
+From 571cd3959f5bd78c55407d18eeb3b1f39d62aac2 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 9 Mar 2021 09:55:57 +0100
+Subject: [PATCH 059/191] tick/sched: Prevent false positive softirq pending
+ warnings on RT
+
+On RT a task which has soft interrupts disabled can block on a lock and
+schedule out to idle while soft interrupts are pending. This triggers the
+warning in the NOHZ idle code which complains about going idle with pending
+soft interrupts. But as the task is blocked soft interrupt processing is
+temporarily blocked as well which means that such a warning is a false
+positive.
+
+To prevent that check the per CPU state which indicates that a scheduled
+out task has soft interrupts disabled.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/bottom_half.h | 6 ++++++
+ kernel/softirq.c | 15 +++++++++++++++
+ kernel/time/tick-sched.c | 2 +-
+ 3 files changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
+index e4dd613a070e..eed86eb0a1de 100644
+--- a/include/linux/bottom_half.h
++++ b/include/linux/bottom_half.h
+@@ -32,4 +32,10 @@ static inline void local_bh_enable(void)
+ __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
+ }
+
++#ifdef CONFIG_PREEMPT_RT
++extern bool local_bh_blocked(void);
++#else
++static inline bool local_bh_blocked(void) { return false; }
++#endif
++
+ #endif /* _LINUX_BH_H */
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 54fd7cc97a4a..4e9077577043 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -141,6 +141,21 @@ static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
+ .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
+ };
+
++/**
++ * local_bh_blocked() - Check for idle whether BH processing is blocked
++ *
++ * Returns false if the per CPU softirq::cnt is 0 otherwise true.
++ *
++ * This is invoked from the idle task to guard against false positive
++ * softirq pending warnings, which would happen when the task which holds
++ * softirq_ctrl::lock was the only running task on the CPU and blocks on
++ * some other lock.
++ */
++bool local_bh_blocked(void)
++{
++ return __this_cpu_read(softirq_ctrl.cnt) != 0;
++}
++
+ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
+ {
+ unsigned long flags;
+diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
+index e10a4af88737..0cc55791b2b6 100644
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -973,7 +973,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
+ if (unlikely(local_softirq_pending())) {
+ static int ratelimit;
+
+- if (ratelimit < 10 &&
++ if (ratelimit < 10 && !local_bh_blocked() &&
+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
+ pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n",
+ (unsigned int) local_softirq_pending());
+--
+2.19.1
+
diff --git a/features/rt/timers-Move-clearing-of-base-timer_running-under-bas.patch b/features/rt/timers-Move-clearing-of-base-timer_running-under-bas.patch
new file mode 100644
index 00000000..c95178d3
--- /dev/null
+++ b/features/rt/timers-Move-clearing-of-base-timer_running-under-bas.patch
@@ -0,0 +1,62 @@
+From 13ae7118b5e43a9fd44640f31eb1df12f580a13e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 6 Dec 2020 22:40:07 +0100
+Subject: [PATCH 002/191] timers: Move clearing of base::timer_running under
+ base::lock
+
+syzbot reported KCSAN data races vs. timer_base::timer_running being set to
+NULL without holding base::lock in expire_timers().
+
+This looks innocent and most reads are clearly not problematic but for a
+non-RT kernel it's completely irrelevant whether the store happens before
+or after taking the lock. For an RT kernel moving the store under the lock
+requires an extra unlock/lock pair in the case that there is a waiter for
+the timer. But that's not the end of the world and definitely not worth the
+trouble of adding boatloads of comments and annotations to the code. Famous
+last words...
+
+Reported-by: syzbot+aa7c2385d46c5eba0b89@syzkaller.appspotmail.com
+Reported-by: syzbot+abea4558531bae1ba9fe@syzkaller.appspotmail.com
+Link: https://lkml.kernel.org/r/87lfea7gw8.fsf@nanos.tec.linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ kernel/time/timer.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/time/timer.c b/kernel/time/timer.c
+index f475f1a027c8..a0ec4450b1d8 100644
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1277,8 +1277,10 @@ static inline void timer_base_unlock_expiry(struct timer_base *base)
+ static void timer_sync_wait_running(struct timer_base *base)
+ {
+ if (atomic_read(&base->timer_waiters)) {
++ raw_spin_unlock_irq(&base->lock);
+ spin_unlock(&base->expiry_lock);
+ spin_lock(&base->expiry_lock);
++ raw_spin_lock_irq(&base->lock);
+ }
+ }
+
+@@ -1469,14 +1471,14 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head)
+ if (timer->flags & TIMER_IRQSAFE) {
+ raw_spin_unlock(&base->lock);
+ call_timer_fn(timer, fn, baseclk);
+- base->running_timer = NULL;
+ raw_spin_lock(&base->lock);
++ base->running_timer = NULL;
+ } else {
+ raw_spin_unlock_irq(&base->lock);
+ call_timer_fn(timer, fn, baseclk);
++ raw_spin_lock_irq(&base->lock);
+ base->running_timer = NULL;
+ timer_sync_wait_running(base);
+- raw_spin_lock_irq(&base->lock);
+ }
+ }
+ }
+--
+2.19.1
+
diff --git a/features/rt/tpm_tis-fix-stall-after-iowrite-s.patch b/features/rt/tpm_tis-fix-stall-after-iowrite-s.patch
new file mode 100644
index 00000000..f18ee11b
--- /dev/null
+++ b/features/rt/tpm_tis-fix-stall-after-iowrite-s.patch
@@ -0,0 +1,83 @@
+From fb6a0698c965b81df94b9d9abe384dfdcd7789e7 Mon Sep 17 00:00:00 2001
+From: Haris Okanovic <haris.okanovic@ni.com>
+Date: Tue, 15 Aug 2017 15:13:08 -0500
+Subject: [PATCH 187/191] tpm_tis: fix stall after iowrite*()s
+
+ioread8() operations to TPM MMIO addresses can stall the cpu when
+immediately following a sequence of iowrite*()'s to the same region.
+
+For example, cyclitest measures ~400us latency spikes when a non-RT
+usermode application communicates with an SPI-based TPM chip (Intel Atom
+E3940 system, PREEMPT_RT kernel). The spikes are caused by a
+stalling ioread8() operation following a sequence of 30+ iowrite8()s to
+the same address. I believe this happens because the write sequence is
+buffered (in cpu or somewhere along the bus), and gets flushed on the
+first LOAD instruction (ioread*()) that follows.
+
+The enclosed change appears to fix this issue: read the TPM chip's
+access register (status code) after every iowrite*() operation to
+amortize the cost of flushing data to chip across multiple instructions.
+
+Signed-off-by: Haris Okanovic <haris.okanovic@ni.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/char/tpm/tpm_tis.c | 29 +++++++++++++++++++++++++++--
+ 1 file changed, 27 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 4ed6e660273a..c2bd0d40b5fc 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -50,6 +50,31 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
+ return container_of(data, struct tpm_tis_tcg_phy, priv);
+ }
+
++#ifdef CONFIG_PREEMPT_RT
++/*
++ * Flushes previous write operations to chip so that a subsequent
++ * ioread*()s won't stall a cpu.
++ */
++static inline void tpm_tis_flush(void __iomem *iobase)
++{
++ ioread8(iobase + TPM_ACCESS(0));
++}
++#else
++#define tpm_tis_flush(iobase) do { } while (0)
++#endif
++
++static inline void tpm_tis_iowrite8(u8 b, void __iomem *iobase, u32 addr)
++{
++ iowrite8(b, iobase + addr);
++ tpm_tis_flush(iobase);
++}
++
++static inline void tpm_tis_iowrite32(u32 b, void __iomem *iobase, u32 addr)
++{
++ iowrite32(b, iobase + addr);
++ tpm_tis_flush(iobase);
++}
++
+ static int interrupts = -1;
+ module_param(interrupts, int, 0444);
+ MODULE_PARM_DESC(interrupts, "Enable interrupts");
+@@ -169,7 +194,7 @@ static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+ while (len--)
+- iowrite8(*value++, phy->iobase + addr);
++ tpm_tis_iowrite8(*value++, phy->iobase, addr);
+
+ return 0;
+ }
+@@ -196,7 +221,7 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
+ {
+ struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
+
+- iowrite32(value, phy->iobase + addr);
++ tpm_tis_iowrite32(value, phy->iobase, addr);
+
+ return 0;
+ }
+--
+2.19.1
+
diff --git a/features/rt/trace-Add-migrate-disabled-counter-to-tracing-output.patch b/features/rt/trace-Add-migrate-disabled-counter-to-tracing-output.patch
new file mode 100644
index 00000000..06042832
--- /dev/null
+++ b/features/rt/trace-Add-migrate-disabled-counter-to-tracing-output.patch
@@ -0,0 +1,122 @@
+From 928f16ff0761da409ca9379638e74251da9ab39c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 21:56:42 +0200
+Subject: [PATCH 102/191] trace: Add migrate-disabled counter to tracing output
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/trace_events.h | 2 ++
+ kernel/trace/trace.c | 26 +++++++++++++++++++-------
+ kernel/trace/trace_events.c | 1 +
+ kernel/trace/trace_output.c | 5 +++++
+ 4 files changed, 27 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 28e7af1406f2..1048965a8750 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -69,6 +69,7 @@ struct trace_entry {
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
++ unsigned char migrate_disable;
+ };
+
+ #define TRACE_EVENT_TYPE_MAX \
+@@ -157,6 +158,7 @@ static inline void tracing_generic_entry_update(struct trace_entry *entry,
+ unsigned int trace_ctx)
+ {
+ entry->preempt_count = trace_ctx & 0xff;
++ entry->migrate_disable = (trace_ctx >> 8) & 0xff;
+ entry->pid = current->pid;
+ entry->type = type;
+ entry->flags = trace_ctx >> 16;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index a6baa9b174e4..55bcdaedf7a8 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2587,6 +2587,15 @@ enum print_line_t trace_handle_return(struct trace_seq *s)
+ }
+ EXPORT_SYMBOL_GPL(trace_handle_return);
+
++static unsigned short migration_disable_value(void)
++{
++#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
++ return current->migration_disabled;
++#else
++ return 0;
++#endif
++}
++
+ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
+ {
+ unsigned int trace_flags = irqs_status;
+@@ -2605,7 +2614,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
+ trace_flags |= TRACE_FLAG_NEED_RESCHED;
+ if (test_preempt_need_resched())
+ trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
+- return (trace_flags << 16) | (pc & 0xff);
++ return (trace_flags << 16) | (pc & 0xff) |
++ (migration_disable_value() & 0xff) << 8;
+ }
+
+ struct ring_buffer_event *
+@@ -3870,9 +3880,10 @@ static void print_lat_help_header(struct seq_file *m)
+ "# | / _----=> need-resched \n"
+ "# || / _---=> hardirq/softirq \n"
+ "# ||| / _--=> preempt-depth \n"
+- "# |||| / delay \n"
+- "# cmd pid ||||| time | caller \n"
+- "# \\ / ||||| \\ | / \n");
++ "# |||| / _-=> migrate-disable \n"
++ "# ||||| / delay \n"
++ "# cmd pid |||||| time | caller \n"
++ "# \\ / |||||| \\ | / \n");
+ }
+
+ static void print_event_info(struct array_buffer *buf, struct seq_file *m)
+@@ -3910,9 +3921,10 @@ static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file
+ seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
+ seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
+ seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
+- seq_printf(m, "# %.*s||| / delay\n", prec, space);
+- seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
+- seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
++ seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
++ seq_printf(m, "# %.*s|||| / delay\n", prec, space);
++ seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
++ seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
+ }
+
+ void
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index a3563afd412d..ba0b9edd652d 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -183,6 +183,7 @@ static int trace_define_common_fields(void)
+ __common_field(unsigned char, flags);
+ __common_field(unsigned char, preempt_count);
+ __common_field(int, pid);
++ __common_field(unsigned char, migrate_disable);
+
+ return ret;
+ }
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index 61255bad7e01..5e32edb8a90a 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -497,6 +497,11 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
+ else
+ trace_seq_putc(s, '.');
+
++ if (entry->migrate_disable)
++ trace_seq_printf(s, "%x", entry->migrate_disable);
++ else
++ trace_seq_putc(s, '.');
++
+ return !trace_seq_has_overflowed(s);
+ }
+
+--
+2.19.1
+
diff --git a/features/rt/tty-serial-omap-Make-the-locking-RT-aware.patch b/features/rt/tty-serial-omap-Make-the-locking-RT-aware.patch
new file mode 100644
index 00000000..a78020da
--- /dev/null
+++ b/features/rt/tty-serial-omap-Make-the-locking-RT-aware.patch
@@ -0,0 +1,48 @@
+From d818b28e0298adb1eeb3b3d722d3bf10318fa90d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 28 Jul 2011 13:32:57 +0200
+Subject: [PATCH 171/191] tty/serial/omap: Make the locking RT aware
+
+The lock is a sleeping lock and local_irq_save() is not the
+optimsation we are looking for. Redo it to make it work on -RT and
+non-RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/tty/serial/omap-serial.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 76b94d0ff586..80371598efea 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -1301,13 +1301,10 @@ serial_omap_console_write(struct console *co, const char *s,
+
+ pm_runtime_get_sync(up->dev);
+
+- local_irq_save(flags);
+- if (up->port.sysrq)
+- locked = 0;
+- else if (oops_in_progress)
+- locked = spin_trylock(&up->port.lock);
++ if (up->port.sysrq || oops_in_progress)
++ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+- spin_lock(&up->port.lock);
++ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -1336,8 +1333,7 @@ serial_omap_console_write(struct console *co, const char *s,
+ pm_runtime_mark_last_busy(up->dev);
+ pm_runtime_put_autosuspend(up->dev);
+ if (locked)
+- spin_unlock(&up->port.lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+
+ static int __init
+--
+2.19.1
+
diff --git a/features/rt/tty-serial-pl011-Make-the-locking-work-on-RT.patch b/features/rt/tty-serial-pl011-Make-the-locking-work-on-RT.patch
new file mode 100644
index 00000000..8d9cbe4a
--- /dev/null
+++ b/features/rt/tty-serial-pl011-Make-the-locking-work-on-RT.patch
@@ -0,0 +1,59 @@
+From c93bc4171de555594d642d4f673d68377fbcc274 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 8 Jan 2013 21:36:51 +0100
+Subject: [PATCH 172/191] tty/serial/pl011: Make the locking work on RT
+
+The lock is a sleeping lock and local_irq_save() is not the optimsation
+we are looking for. Redo it to make it work on -RT and non-RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/tty/serial/amba-pl011.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 4ead0c9048a8..1bbc56263c2d 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -2201,18 +2201,24 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+ {
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int old_cr = 0, new_cr;
+- unsigned long flags;
++ unsigned long flags = 0;
+ int locked = 1;
+
+ clk_enable(uap->clk);
+
+- local_irq_save(flags);
++ /*
++ * local_irq_save(flags);
++ *
++ * This local_irq_save() is nonsense. If we come in via sysrq
++ * handling then interrupts are already disabled. Aside of
++ * that the port.sysrq check is racy on SMP regardless.
++ */
+ if (uap->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+- locked = spin_trylock(&uap->port.lock);
++ locked = spin_trylock_irqsave(&uap->port.lock, flags);
+ else
+- spin_lock(&uap->port.lock);
++ spin_lock_irqsave(&uap->port.lock, flags);
+
+ /*
+ * First save the CR then disable the interrupts
+@@ -2238,8 +2244,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
+ pl011_write(old_cr, uap, REG_CR);
+
+ if (locked)
+- spin_unlock(&uap->port.lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&uap->port.lock, flags);
+
+ clk_disable(uap->clk);
+ }
+--
+2.19.1
+
diff --git a/features/rt/u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch b/features/rt/u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
new file mode 100644
index 00000000..af52aa75
--- /dev/null
+++ b/features/rt/u64_stats-Disable-preemption-on-32bit-UP-SMP-with-RT.patch
@@ -0,0 +1,151 @@
+From f134d18cc24a5f4bbd6fa3b658749d22a00136c6 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 17 Aug 2020 12:28:10 +0200
+Subject: [PATCH 088/191] u64_stats: Disable preemption on 32bit-UP/SMP with RT
+ during updates
+
+On RT the seqcount_t is required even on UP because the softirq can be
+preempted. The IRQ handler is threaded so it is also preemptible.
+
+Disable preemption on 32bit-RT during value updates. There is no need to
+disable interrupts on RT because the handler is run threaded. Therefore
+disabling preemption is enough to guarantee that the update is not
+interruped.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/u64_stats_sync.h | 42 ++++++++++++++++++++++------------
+ 1 file changed, 28 insertions(+), 14 deletions(-)
+
+diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
+index e81856c0ba13..66eb968a09d4 100644
+--- a/include/linux/u64_stats_sync.h
++++ b/include/linux/u64_stats_sync.h
+@@ -66,7 +66,7 @@
+ #include <linux/seqlock.h>
+
+ struct u64_stats_sync {
+-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++#if BITS_PER_LONG==32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
+ seqcount_t seq;
+ #endif
+ };
+@@ -115,7 +115,7 @@ static inline void u64_stats_inc(u64_stats_t *p)
+ }
+ #endif
+
+-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
++#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
+ #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
+ #else
+ static inline void u64_stats_init(struct u64_stats_sync *syncp)
+@@ -125,15 +125,19 @@ static inline void u64_stats_init(struct u64_stats_sync *syncp)
+
+ static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
+ {
+-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_disable();
+ write_seqcount_begin(&syncp->seq);
+ #endif
+ }
+
+ static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
+ {
+-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
+ write_seqcount_end(&syncp->seq);
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_enable();
+ #endif
+ }
+
+@@ -142,8 +146,11 @@ u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
+ {
+ unsigned long flags = 0;
+
+-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+- local_irq_save(flags);
++#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_disable();
++ else
++ local_irq_save(flags);
+ write_seqcount_begin(&syncp->seq);
+ #endif
+ return flags;
+@@ -153,15 +160,18 @@ static inline void
+ u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
+ unsigned long flags)
+ {
+-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
+ write_seqcount_end(&syncp->seq);
+- local_irq_restore(flags);
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ preempt_enable();
++ else
++ local_irq_restore(flags);
+ #endif
+ }
+
+ static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+ {
+-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
+ return read_seqcount_begin(&syncp->seq);
+ #else
+ return 0;
+@@ -170,7 +180,7 @@ static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *
+
+ static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+ {
+-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
++#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
+ preempt_disable();
+ #endif
+ return __u64_stats_fetch_begin(syncp);
+@@ -179,7 +189,7 @@ static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *sy
+ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+ {
+-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
++#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
+ return read_seqcount_retry(&syncp->seq, start);
+ #else
+ return false;
+@@ -189,7 +199,7 @@ static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ unsigned int start)
+ {
+-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
++#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
+ preempt_enable();
+ #endif
+ return __u64_stats_fetch_retry(syncp, start);
+@@ -203,7 +213,9 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+ */
+ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
+ {
+-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
++#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
++ preempt_disable();
++#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
+ local_irq_disable();
+ #endif
+ return __u64_stats_fetch_begin(syncp);
+@@ -212,7 +224,9 @@ static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync
+ static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
+ unsigned int start)
+ {
+-#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
++#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
++ preempt_enable();
++#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
+ local_irq_enable();
+ #endif
+ return __u64_stats_fetch_retry(syncp, start);
+--
+2.19.1
+
diff --git a/features/rt/um-synchronize-kmsg_dumper.patch b/features/rt/um-synchronize-kmsg_dumper.patch
new file mode 100644
index 00000000..e966e64f
--- /dev/null
+++ b/features/rt/um-synchronize-kmsg_dumper.patch
@@ -0,0 +1,60 @@
+From f3906ea0eeb323f758000d9ac28d7c08d769aee5 Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 21 Dec 2020 11:10:03 +0106
+Subject: [PATCH 016/191] um: synchronize kmsg_dumper
+
+The kmsg_dumper can be called from any context and CPU, possibly
+from multiple CPUs simultaneously. Since a static buffer is used
+to retrieve the kernel logs, this buffer must be protected against
+simultaneous dumping.
+
+Cc: Richard Weinberger <richard@nod.at>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/um/kernel/kmsg_dump.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c
+index 78befecb79d2..deab9b56b51f 100644
+--- a/arch/um/kernel/kmsg_dump.c
++++ b/arch/um/kernel/kmsg_dump.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <linux/kmsg_dump.h>
++#include <linux/spinlock.h>
+ #include <linux/console.h>
+ #include <linux/string.h>
+ #include <shared/init.h>
+@@ -10,8 +11,10 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason,
+ struct kmsg_dumper_iter *iter)
+ {
++ static DEFINE_SPINLOCK(lock);
+ static char line[1024];
+ struct console *con;
++ unsigned long flags;
+ size_t len = 0;
+
+ /* only dump kmsg when no console is available */
+@@ -30,11 +33,16 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
+ if (con)
+ return;
+
++ if (!spin_trylock_irqsave(&lock, flags))
++ return;
++
+ printf("kmsg_dump:\n");
+ while (kmsg_dump_get_line(iter, true, line, sizeof(line), &len)) {
+ line[len] = '\0';
+ printf("%s", line);
+ }
++
++ spin_unlock_irqrestore(&lock, flags);
+ }
+
+ static struct kmsg_dumper kmsg_dumper = {
+--
+2.19.1
+
diff --git a/features/rt/wait.h-include-atomic.h.patch b/features/rt/wait.h-include-atomic.h.patch
new file mode 100644
index 00000000..3b04974c
--- /dev/null
+++ b/features/rt/wait.h-include-atomic.h.patch
@@ -0,0 +1,41 @@
+From 185942fe0a67d2f3018d9384faa6f2e3ac0ed9ea Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 28 Oct 2013 12:19:57 +0100
+Subject: [PATCH 118/191] wait.h: include atomic.h
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+| CC init/main.o
+|In file included from include/linux/mmzone.h:9:0,
+| from include/linux/gfp.h:4,
+| from include/linux/kmod.h:22,
+| from include/linux/module.h:13,
+| from init/main.c:15:
+|include/linux/wait.h: In function ‘wait_on_atomic_t’:
+|include/linux/wait.h:982:2: error: implicit declaration of function ‘atomic_read’ [-Werror=implicit-function-declaration]
+| if (atomic_read(val) == 0)
+| ^
+
+This pops up on ARM. Non-RT gets its atomic.h include from spinlock.h
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/wait.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index fe10e8570a52..e9ce878a4906 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -10,6 +10,7 @@
+
+ #include <asm/current.h>
+ #include <uapi/linux/wait.h>
++#include <linux/atomic.h>
+
+ typedef struct wait_queue_entry wait_queue_entry_t;
+
+--
+2.19.1
+
diff --git a/features/rt/x86-Allow-to-enable-RT.patch b/features/rt/x86-Allow-to-enable-RT.patch
new file mode 100644
index 00000000..804952e6
--- /dev/null
+++ b/features/rt/x86-Allow-to-enable-RT.patch
@@ -0,0 +1,27 @@
+From 923160aecfc3fc15a461c6b706c6f2df869474b0 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 7 Aug 2019 18:15:38 +0200
+Subject: [PATCH 161/191] x86: Allow to enable RT
+
+Allow to select RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 2792879d398e..bb7b237aaa3d 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -27,6 +27,7 @@ config X86_64
+ # Options that are inherently 64-bit kernel only:
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
++ select ARCH_SUPPORTS_RT
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select HAVE_ARCH_SOFT_DIRTY
+ select MODULES_USE_ELF_RELA
+--
+2.19.1
+
diff --git a/features/rt/x86-Enable-RT-also-on-32bit.patch b/features/rt/x86-Enable-RT-also-on-32bit.patch
new file mode 100644
index 00000000..dbf5db26
--- /dev/null
+++ b/features/rt/x86-Enable-RT-also-on-32bit.patch
@@ -0,0 +1,33 @@
+From e3fd694fc2bc4be6d9688ba7fe81cafed748d3ae Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 7 Nov 2019 17:49:20 +0100
+Subject: [PATCH 177/191] x86: Enable RT also on 32bit
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 98ba6c71f93b..2f4fd6a7e82e 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -27,7 +27,6 @@ config X86_64
+ # Options that are inherently 64-bit kernel only:
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
+- select ARCH_SUPPORTS_RT
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select HAVE_ARCH_SOFT_DIRTY
+ select MODULES_USE_ELF_RELA
+@@ -100,6 +99,7 @@ config X86
+ select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
+ select ARCH_SUPPORTS_LTO_CLANG if X86_64
+ select ARCH_SUPPORTS_LTO_CLANG_THIN if X86_64
++ select ARCH_SUPPORTS_RT
+ select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS
+--
+2.19.1
+
diff --git a/features/rt/x86-Support-for-lazy-preemption.patch b/features/rt/x86-Support-for-lazy-preemption.patch
new file mode 100644
index 00000000..41923c12
--- /dev/null
+++ b/features/rt/x86-Support-for-lazy-preemption.patch
@@ -0,0 +1,155 @@
+From b9863a8455b93ee2b1fa568a57a16c67fb38bea5 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 1 Nov 2012 11:03:47 +0100
+Subject: [PATCH 165/191] x86: Support for lazy preemption
+
+Implement the x86 pieces for lazy preempt.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/Kconfig | 1 +
+ arch/x86/include/asm/preempt.h | 33 +++++++++++++++++++++++++++++-
+ arch/x86/include/asm/thread_info.h | 7 +++++++
+ include/linux/entry-common.h | 2 +-
+ kernel/entry/common.c | 2 +-
+ 5 files changed, 42 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index bb7b237aaa3d..98ba6c71f93b 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -221,6 +221,7 @@ config X86
+ select HAVE_PCI
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
++ select HAVE_PREEMPT_LAZY
+ select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT
+ select HAVE_POSIX_CPU_TIMERS_TASK_WORK
+ select HAVE_REGS_AND_STACK_ACCESS_API
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index 7e0358f99d22..dde8f20cb702 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -90,17 +90,48 @@ static __always_inline void __preempt_count_sub(int val)
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
+-static __always_inline bool __preempt_count_dec_and_test(void)
++static __always_inline bool ____preempt_count_dec_and_test(void)
+ {
+ return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
+ }
+
++static __always_inline bool __preempt_count_dec_and_test(void)
++{
++ if (____preempt_count_dec_and_test())
++ return true;
++#ifdef CONFIG_PREEMPT_LAZY
++ if (preempt_count())
++ return false;
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
++ return false;
++#endif
++}
++
+ /*
+ * Returns true when we need to resched and can (barring IRQ state).
+ */
+ static __always_inline bool should_resched(int preempt_offset)
+ {
++#ifdef CONFIG_PREEMPT_LAZY
++ u32 tmp;
++ tmp = raw_cpu_read_4(__preempt_count);
++ if (tmp == preempt_offset)
++ return true;
++
++ /* preempt count == 0 ? */
++ tmp &= ~PREEMPT_NEED_RESCHED;
++ if (tmp != preempt_offset)
++ return false;
++ /* XXX PREEMPT_LOCK_OFFSET */
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
++#else
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
++#endif
+ }
+
+ #ifdef CONFIG_PREEMPTION
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index 0d751d5da702..2e62434951fa 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -57,11 +57,14 @@ struct thread_info {
+ unsigned long flags; /* low level flags */
+ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
+ u32 status; /* thread synchronous flags */
++ int preempt_lazy_count; /* 0 => lazy preemptable
++ <0 => BUG */
+ };
+
+ #define INIT_THREAD_INFO(tsk) \
+ { \
+ .flags = 0, \
++ .preempt_lazy_count = 0, \
+ }
+
+ #else /* !__ASSEMBLY__ */
+@@ -90,6 +93,7 @@ struct thread_info {
+ #define TIF_NOTSC 16 /* TSC is not accessible in userland */
+ #define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */
+ #define TIF_SLD 18 /* Restore split lock detection on context switch */
++#define TIF_NEED_RESCHED_LAZY 19 /* lazy rescheduling necessary */
+ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */
+ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
+ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */
+@@ -113,6 +117,7 @@ struct thread_info {
+ #define _TIF_NOTSC (1 << TIF_NOTSC)
+ #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+ #define _TIF_SLD (1 << TIF_SLD)
++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
+ #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
+ #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
+ #define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
+@@ -143,6 +148,8 @@ struct thread_info {
+
+ #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
+
++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
++
+ #define STACK_WARN (THREAD_SIZE/8)
+
+ /*
+diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
+index 883acef895bc..0aa546532b31 100644
+--- a/include/linux/entry-common.h
++++ b/include/linux/entry-common.h
+@@ -59,7 +59,7 @@
+
+ #define EXIT_TO_USER_MODE_WORK \
+ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
+- _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
++ _TIF_NEED_RESCHED_MASK | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
+ ARCH_EXIT_TO_USER_MODE_WORK)
+
+ /**
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index 162c49016f0f..16b4356494bc 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -158,7 +158,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
+
+ local_irq_enable_exit_to_user(ti_work);
+
+- if (ti_work & _TIF_NEED_RESCHED)
++ if (ti_work & _TIF_NEED_RESCHED_MASK)
+ schedule();
+
+ #ifdef ARCH_RT_DELAYS_SIGNAL_SEND
+--
+2.19.1
+
diff --git a/features/rt/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch b/features/rt/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
new file mode 100644
index 00000000..2d757aa5
--- /dev/null
+++ b/features/rt/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
@@ -0,0 +1,34 @@
+From ea85707dd291bf6df28d3d791e1169e8c9317c24 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 30 Jun 2020 11:45:14 +0200
+Subject: [PATCH 164/191] x86/entry: Use should_resched() in
+ idtentry_exit_cond_resched()
+
+The TIF_NEED_RESCHED bit is inlined on x86 into the preemption counter.
+By using should_resched(0) instead of need_resched() the same check can
+be performed which uses the same variable as 'preempt_count()` which was
+issued before.
+
+Use should_resched(0) instead need_resched().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/entry/common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/entry/common.c b/kernel/entry/common.c
+index 2a7bcc744033..162c49016f0f 100644
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -396,7 +396,7 @@ void irqentry_exit_cond_resched(void)
+ rcu_irq_exit_check_preempt();
+ if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
+ WARN_ON_ONCE(!on_thread_stack());
+- if (need_resched())
++ if (should_resched(0))
+ preempt_schedule_irq();
+ }
+ }
+--
+2.19.1
+
diff --git a/features/rt/x86-kvm-Require-const-tsc-for-RT.patch b/features/rt/x86-kvm-Require-const-tsc-for-RT.patch
new file mode 100644
index 00000000..0b9a6b96
--- /dev/null
+++ b/features/rt/x86-kvm-Require-const-tsc-for-RT.patch
@@ -0,0 +1,37 @@
+From bc5cb37aa8e610d93aada2612abf77e85d034c75 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 6 Nov 2011 12:26:18 +0100
+Subject: [PATCH 117/191] x86: kvm Require const tsc for RT
+
+Non constant TSC is a nightmare on bare metal already, but with
+virtualization it becomes a complete disaster because the workarounds
+are horrible latency wise. That's also a preliminary for running RT in
+a guest on top of a RT host.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kvm/x86.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 47e021bdcc94..891c3239e2b0 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7979,6 +7979,14 @@ int kvm_arch_init(void *opaque)
+ goto out;
+ }
+
++#ifdef CONFIG_PREEMPT_RT
++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
++ pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n");
++ r = -EOPNOTSUPP;
++ goto out;
++ }
++#endif
++
+ r = -ENOMEM;
+ x86_fpu_cache = kmem_cache_create("x86_fpu", sizeof(struct fpu),
+ __alignof__(struct fpu), SLAB_ACCOUNT,
+--
+2.19.1
+
diff --git a/features/rt/x86-stackprotector-Avoid-random-pool-on-rt.patch b/features/rt/x86-stackprotector-Avoid-random-pool-on-rt.patch
new file mode 100644
index 00000000..6bb99f71
--- /dev/null
+++ b/features/rt/x86-stackprotector-Avoid-random-pool-on-rt.patch
@@ -0,0 +1,50 @@
+From f40609f2fbc20bb8d816c6d6a193f66f3d49d267 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 16 Dec 2010 14:25:18 +0100
+Subject: [PATCH 148/191] x86: stackprotector: Avoid random pool on rt
+
+CPU bringup calls into the random pool to initialize the stack
+canary. During boot that works nicely even on RT as the might sleep
+checks are disabled. During CPU hotplug the might sleep checks
+trigger. Making the locks in random raw is a major PITA, so avoid the
+call on RT is the only sensible solution. This is basically the same
+randomness which we get during boot where the random pool has no
+entropy and we rely on the TSC randomnness.
+
+Reported-by: Carsten Emde <carsten.emde@osadl.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/include/asm/stackprotector.h | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
+index 7fb482f0f25b..3df0a95c9e13 100644
+--- a/arch/x86/include/asm/stackprotector.h
++++ b/arch/x86/include/asm/stackprotector.h
+@@ -65,7 +65,7 @@
+ */
+ static __always_inline void boot_init_stack_canary(void)
+ {
+- u64 canary;
++ u64 canary = 0;
+ u64 tsc;
+
+ #ifdef CONFIG_X86_64
+@@ -76,8 +76,14 @@ static __always_inline void boot_init_stack_canary(void)
+ * of randomness. The TSC only matters for very early init,
+ * there it already has some randomness on most systems. Later
+ * on during the bootup the random pool has true entropy too.
++ * For preempt-rt we need to weaken the randomness a bit, as
++ * we can't call into the random generator from atomic context
++ * due to locking constraints. We just leave canary
++ * uninitialized and use the TSC based randomness on top of it.
+ */
++#ifndef CONFIG_PREEMPT_RT
+ get_random_bytes(&canary, sizeof(canary));
++#endif
+ tsc = rdtsc();
+ canary += tsc + (tsc << 32UL);
+ canary &= CANARY_MASK;
+--
+2.19.1
+
diff --git a/features/rt/xfrm-Use-sequence-counter-with-associated-spinlock.patch b/features/rt/xfrm-Use-sequence-counter-with-associated-spinlock.patch
new file mode 100644
index 00000000..78fb2552
--- /dev/null
+++ b/features/rt/xfrm-Use-sequence-counter-with-associated-spinlock.patch
@@ -0,0 +1,65 @@
+From bdb062ede4003ee86bd6c2665b92856d69b14c6a Mon Sep 17 00:00:00 2001
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Wed, 10 Jun 2020 12:53:22 +0200
+Subject: [PATCH 087/191] xfrm: Use sequence counter with associated spinlock
+
+A sequence counter write side critical section must be protected by some
+form of locking to serialize writers. A plain seqcount_t does not
+contain the information of which lock must be held when entering a write
+side critical section.
+
+Use the new seqcount_spinlock_t data type, which allows to associate a
+spinlock with the sequence counter. This enables lockdep to verify that
+the spinlock used for writer serialization is held when the write side
+critical section is entered.
+
+If lockdep is disabled this lock association is compiled out and has
+neither storage size nor runtime overhead.
+
+Upstream-status: The xfrm locking used for seqcoun writer serialization
+appears to be broken. If that's the case, a proper fix will need to be
+submitted upstream. (e.g. make the seqcount per network namespace?)
+
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/xfrm/xfrm_state.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index d01ca1a18418..14059a9051b8 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -44,7 +44,7 @@ static void xfrm_state_gc_task(struct work_struct *work);
+ */
+
+ static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
+-static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
++static __read_mostly seqcount_spinlock_t xfrm_state_hash_generation;
+ static struct kmem_cache *xfrm_state_cache __ro_after_init;
+
+ static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
+@@ -139,6 +139,11 @@ static void xfrm_hash_resize(struct work_struct *work)
+ return;
+ }
+
++ /* XXX - the locking which protects the sequence counter appears
++ * to be broken here. The sequence counter is global, but the
++ * spinlock used for the sequence counter write serialization is
++ * per network namespace...
++ */
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ write_seqcount_begin(&xfrm_state_hash_generation);
+
+@@ -2666,6 +2671,8 @@ int __net_init xfrm_state_init(struct net *net)
+ net->xfrm.state_num = 0;
+ INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
+ spin_lock_init(&net->xfrm.xfrm_state_lock);
++ seqcount_spinlock_init(&xfrm_state_hash_generation,
++ &net->xfrm.xfrm_state_lock);
+ return 0;
+
+ out_byspi:
+--
+2.19.1
+