aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/locking-rtmutex-wire-up-RT-s-locking.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/locking-rtmutex-wire-up-RT-s-locking.patch')
-rw-r--r--features/rt/locking-rtmutex-wire-up-RT-s-locking.patch346
1 files changed, 346 insertions, 0 deletions
diff --git a/features/rt/locking-rtmutex-wire-up-RT-s-locking.patch b/features/rt/locking-rtmutex-wire-up-RT-s-locking.patch
new file mode 100644
index 00000000..c6b001b5
--- /dev/null
+++ b/features/rt/locking-rtmutex-wire-up-RT-s-locking.patch
@@ -0,0 +1,346 @@
+From cd8326d4ea23e407e1168cc2c6f1005d5c0ec743 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 12 Oct 2017 17:31:14 +0200
+Subject: [PATCH 080/191] locking/rtmutex: wire up RT's locking
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/mutex.h | 25 +++++++++++++++----------
+ include/linux/rwsem.h | 12 ++++++++++++
+ include/linux/spinlock.h | 12 +++++++++++-
+ include/linux/spinlock_api_smp.h | 4 +++-
+ include/linux/spinlock_types.h | 11 ++++++++---
+ include/linux/spinlock_types_up.h | 2 +-
+ kernel/Kconfig.preempt | 1 +
+ kernel/locking/Makefile | 10 +++++++---
+ kernel/locking/rwsem.c | 6 ++++++
+ kernel/locking/spinlock.c | 7 +++++++
+ kernel/locking/spinlock_debug.c | 5 +++++
+ 11 files changed, 76 insertions(+), 19 deletions(-)
+
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 0cd631a19727..50c873744ab0 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -22,6 +22,20 @@
+
+ struct ww_acquire_ctx;
+
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
++ , .dep_map = { \
++ .name = #lockname, \
++ .wait_type_inner = LD_WAIT_SLEEP, \
++ }
++#else
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
++#endif
++
++#ifdef CONFIG_PREEMPT_RT
++# include <linux/mutex_rt.h>
++#else
++
+ /*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+@@ -119,16 +133,6 @@ do { \
+ __mutex_init((mutex), #mutex, &__key); \
+ } while (0)
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+- , .dep_map = { \
+- .name = #lockname, \
+- .wait_type_inner = LD_WAIT_SLEEP, \
+- }
+-#else
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+-#endif
+-
+ #define __MUTEX_INITIALIZER(lockname) \
+ { .owner = ATOMIC_LONG_INIT(0) \
+ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+@@ -199,4 +203,5 @@ extern void mutex_unlock(struct mutex *lock);
+
+ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+
++#endif /* !PREEMPT_RT */
+ #endif /* __LINUX_MUTEX_H */
+diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
+index 4c715be48717..9323af8a9244 100644
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
+@@ -16,6 +16,11 @@
+ #include <linux/spinlock.h>
+ #include <linux/atomic.h>
+ #include <linux/err.h>
++
++#ifdef CONFIG_PREEMPT_RT
++#include <linux/rwsem-rt.h>
++#else /* PREEMPT_RT */
++
+ #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+ #include <linux/osq_lock.h>
+ #endif
+@@ -119,6 +124,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
+ return !list_empty(&sem->wait_list);
+ }
+
++#endif /* !PREEMPT_RT */
++
++/*
++ * The functions below are the same for all rwsem implementations including
++ * the RT specific variant.
++ */
++
+ /*
+ * lock for reading
+ */
+diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
+index 79897841a2cc..c3c70291b46c 100644
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -309,7 +309,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
+ })
+
+ /* Include rwlock functions */
+-#include <linux/rwlock.h>
++#ifdef CONFIG_PREEMPT_RT
++# include <linux/rwlock_rt.h>
++#else
++# include <linux/rwlock.h>
++#endif
+
+ /*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+@@ -320,6 +324,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
+ # include <linux/spinlock_api_up.h>
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT
++# include <linux/spinlock_rt.h>
++#else /* PREEMPT_RT */
++
+ /*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+@@ -454,6 +462,8 @@ static __always_inline int spin_is_contended(spinlock_t *lock)
+
+ #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
+
++#endif /* !PREEMPT_RT */
++
+ /*
+ * Pull the atomic_t declaration:
+ * (asm-mips/atomic.h needs above definitions)
+diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
+index 19a9be9d97ee..da38149f2843 100644
+--- a/include/linux/spinlock_api_smp.h
++++ b/include/linux/spinlock_api_smp.h
+@@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
+ return 0;
+ }
+
+-#include <linux/rwlock_api_smp.h>
++#ifndef CONFIG_PREEMPT_RT
++# include <linux/rwlock_api_smp.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_API_SMP_H */
+diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
+index 5c8664d57fb8..8d896d3e1a01 100644
+--- a/include/linux/spinlock_types.h
++++ b/include/linux/spinlock_types.h
+@@ -11,8 +11,13 @@
+
+ #include <linux/spinlock_types_raw.h>
+
+-#include <linux/spinlock_types_nort.h>
+-
+-#include <linux/rwlock_types.h>
++#ifndef CONFIG_PREEMPT_RT
++# include <linux/spinlock_types_nort.h>
++# include <linux/rwlock_types.h>
++#else
++# include <linux/rtmutex.h>
++# include <linux/spinlock_types_rt.h>
++# include <linux/rwlock_types_rt.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_TYPES_H */
+diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
+index c09b6407ae1b..d9b371fa13e0 100644
+--- a/include/linux/spinlock_types_up.h
++++ b/include/linux/spinlock_types_up.h
+@@ -1,7 +1,7 @@
+ #ifndef __LINUX_SPINLOCK_TYPES_UP_H
+ #define __LINUX_SPINLOCK_TYPES_UP_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
++#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__LINUX_RT_MUTEX_H)
+ # error "please don't include this file directly"
+ #endif
+
+diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
+index 416017301660..c8e516f6db20 100644
+--- a/kernel/Kconfig.preempt
++++ b/kernel/Kconfig.preempt
+@@ -60,6 +60,7 @@ config PREEMPT_RT
+ bool "Fully Preemptible Kernel (Real-Time)"
+ depends on EXPERT && ARCH_SUPPORTS_RT
+ select PREEMPTION
++ select RT_MUTEXES
+ help
+ This option turns the kernel into a real-time kernel by replacing
+ various locking primitives (spinlocks, rwlocks, etc.) with
+diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
+index 8838f1d7c4a2..b4bdab93e2fc 100644
+--- a/kernel/locking/Makefile
++++ b/kernel/locking/Makefile
+@@ -3,7 +3,7 @@
+ # and is generally not a function of system call inputs.
+ KCOV_INSTRUMENT := n
+
+-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
++obj-y += semaphore.o rwsem.o percpu-rwsem.o
+
+ # Avoid recursion lockdep -> KCSAN -> ... -> lockdep.
+ KCSAN_SANITIZE_lockdep.o := n
+@@ -16,19 +16,23 @@ CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
+ endif
+
+ obj-$(CONFIG_DEBUG_IRQFLAGS) += irqflag-debug.o
+-obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
+ obj-$(CONFIG_LOCKDEP) += lockdep.o
+ ifeq ($(CONFIG_PROC_FS),y)
+ obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+ endif
+ obj-$(CONFIG_SMP) += spinlock.o
+-obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
+ obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
+ obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
+ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
++ifneq ($(CONFIG_PREEMPT_RT),y)
++obj-y += mutex.o
++obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
++obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
++endif
++obj-$(CONFIG_PREEMPT_RT) += mutex-rt.o rwsem-rt.o rwlock-rt.o
+ obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
+ obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+ obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index abba5df50006..bed9a5e32a16 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -28,6 +28,7 @@
+ #include <linux/rwsem.h>
+ #include <linux/atomic.h>
+
++#ifndef CONFIG_PREEMPT_RT
+ #include "lock_events.h"
+
+ /*
+@@ -1343,6 +1344,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
+ if (tmp & RWSEM_FLAG_WAITERS)
+ rwsem_downgrade_wake(sem);
+ }
++#endif
+
+ /*
+ * lock for reading
+@@ -1506,7 +1508,9 @@ void down_read_non_owner(struct rw_semaphore *sem)
+ {
+ might_sleep();
+ __down_read(sem);
++#ifndef CONFIG_PREEMPT_RT
+ __rwsem_set_reader_owned(sem, NULL);
++#endif
+ }
+ EXPORT_SYMBOL(down_read_non_owner);
+
+@@ -1535,7 +1539,9 @@ EXPORT_SYMBOL(down_write_killable_nested);
+
+ void up_read_non_owner(struct rw_semaphore *sem)
+ {
++#ifndef CONFIG_PREEMPT_RT
+ DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
++#endif
+ __up_read(sem);
+ }
+ EXPORT_SYMBOL(up_read_non_owner);
+diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
+index 0ff08380f531..45445a2f1799 100644
+--- a/kernel/locking/spinlock.c
++++ b/kernel/locking/spinlock.c
+@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
+ * __[spin|read|write]_lock_bh()
+ */
+ BUILD_LOCK_OPS(spin, raw_spinlock);
++
++#ifndef CONFIG_PREEMPT_RT
+ BUILD_LOCK_OPS(read, rwlock);
+ BUILD_LOCK_OPS(write, rwlock);
++#endif
+
+ #endif
+
+@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
+ EXPORT_SYMBOL(_raw_spin_unlock_bh);
+ #endif
+
++#ifndef CONFIG_PREEMPT_RT
++
+ #ifndef CONFIG_INLINE_READ_TRYLOCK
+ int __lockfunc _raw_read_trylock(rwlock_t *lock)
+ {
+@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
+ EXPORT_SYMBOL(_raw_write_unlock_bh);
+ #endif
+
++#endif /* !PREEMPT_RT */
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
+index b9d93087ee66..72e306e0e8a3 100644
+--- a/kernel/locking/spinlock_debug.c
++++ b/kernel/locking/spinlock_debug.c
+@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+
+ EXPORT_SYMBOL(__raw_spin_lock_init);
+
++#ifndef CONFIG_PREEMPT_RT
+ void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key)
+ {
+@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name,
+ }
+
+ EXPORT_SYMBOL(__rwlock_init);
++#endif
+
+ static void spin_dump(raw_spinlock_t *lock, const char *msg)
+ {
+@@ -139,6 +141,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock)
+ arch_spin_unlock(&lock->raw_lock);
+ }
+
++#ifndef CONFIG_PREEMPT_RT
+ static void rwlock_bug(rwlock_t *lock, const char *msg)
+ {
+ if (!debug_locks_off())
+@@ -228,3 +231,5 @@ void do_raw_write_unlock(rwlock_t *lock)
+ debug_write_unlock(lock);
+ arch_write_unlock(&lock->raw_lock);
+ }
++
++#endif
+--
+2.19.1
+