aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/rt-Add-local-irq-locks.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/rt-Add-local-irq-locks.patch')
-rw-r--r--features/rt/rt-Add-local-irq-locks.patch210
1 files changed, 210 insertions, 0 deletions
diff --git a/features/rt/rt-Add-local-irq-locks.patch b/features/rt/rt-Add-local-irq-locks.patch
new file mode 100644
index 00000000..0ffdcac6
--- /dev/null
+++ b/features/rt/rt-Add-local-irq-locks.patch
@@ -0,0 +1,210 @@
+From 751ab616fb58a462f6d5a1ac6fc31662d58e33e4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 20 Jun 2011 09:03:47 +0200
+Subject: [PATCH 099/191] rt: Add local irq locks
+
+Introduce locallock. For !RT this maps to preempt_disable()/
+local_irq_disable() so there is not much that changes. For RT this will
+map to a spinlock. This makes preemption possible and locked "ressource"
+gets the lockdep anotation it wouldn't have otherwise. The locks are
+recursive for owner == current. Also, all locks user migrate_disable()
+which ensures that the task is not migrated to another CPU while the lock
+is held and the owner is preempted.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/local_lock_internal.h | 129 +++++++++++++++++++++++++---
+ 1 file changed, 115 insertions(+), 14 deletions(-)
+
+diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
+index ded90b097e6e..7162dcd0a847 100644
+--- a/include/linux/local_lock_internal.h
++++ b/include/linux/local_lock_internal.h
+@@ -7,36 +7,94 @@
+ #include <linux/lockdep.h>
+
+ typedef struct {
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
++#ifdef CONFIG_PREEMPT_RT
++ spinlock_t lock;
++ struct task_struct *owner;
++ int nestcnt;
++
++#elif defined(CONFIG_DEBUG_LOCK_ALLOC)
+ struct lockdep_map dep_map;
+ struct task_struct *owner;
+ #endif
+ } local_lock_t;
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define LL_DEP_MAP_INIT(lockname) \
++#ifdef CONFIG_PREEMPT_RT
++
++#define INIT_LOCAL_LOCK(lockname) { \
++ __SPIN_LOCK_UNLOCKED((lockname).lock), \
++ .owner = NULL, \
++ .nestcnt = 0, \
++ }
++#else
++
++# ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define LL_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+- .lock_type = LD_LOCK_PERCPU, \
++ .lock_type = LD_LOCK_PERCPU, \
+ }
+-#else
+-# define LL_DEP_MAP_INIT(lockname)
+-#endif
++# else
++# define LL_DEP_MAP_INIT(lockname)
++# endif
+
+ #define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) }
+
+-#define __local_lock_init(lock) \
++#endif
++
++#ifdef CONFIG_PREEMPT_RT
++
++static inline void ___local_lock_init(local_lock_t *l)
++{
++ l->owner = NULL;
++ l->nestcnt = 0;
++}
++
++#define __local_lock_init(l) \
++do { \
++ spin_lock_init(&(l)->lock); \
++ ___local_lock_init(l); \
++} while (0)
++
++#else
++
++#define __local_lock_init(l) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+- debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+- lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
++ debug_check_no_locks_freed((void *)l, sizeof(*l)); \
++ lockdep_init_map_type(&(l)->dep_map, #l, &__key, 0, \
+ LD_WAIT_CONFIG, LD_WAIT_INV, \
+ LD_LOCK_PERCPU); \
+ } while (0)
++#endif
++
++#ifdef CONFIG_PREEMPT_RT
++
++static inline void local_lock_acquire(local_lock_t *l)
++{
++ if (l->owner != current) {
++ spin_lock(&l->lock);
++ DEBUG_LOCKS_WARN_ON(l->owner);
++ DEBUG_LOCKS_WARN_ON(l->nestcnt);
++ l->owner = current;
++ }
++ l->nestcnt++;
++}
++
++static inline void local_lock_release(local_lock_t *l)
++{
++ DEBUG_LOCKS_WARN_ON(l->nestcnt == 0);
++ DEBUG_LOCKS_WARN_ON(l->owner != current);
++ if (--l->nestcnt)
++ return;
++
++ l->owner = NULL;
++ spin_unlock(&l->lock);
++}
++
++#elif defined(CONFIG_DEBUG_LOCK_ALLOC)
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ static inline void local_lock_acquire(local_lock_t *l)
+ {
+ lock_map_acquire(&l->dep_map);
+@@ -56,21 +114,50 @@ static inline void local_lock_acquire(local_lock_t *l) { }
+ static inline void local_lock_release(local_lock_t *l) { }
+ #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+
++#ifdef CONFIG_PREEMPT_RT
++
+ #define __local_lock(lock) \
+ do { \
+- preempt_disable(); \
++ migrate_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
++#define __local_unlock(lock) \
++ do { \
++ local_lock_release(this_cpu_ptr(lock)); \
++ migrate_enable(); \
++ } while (0)
++
+ #define __local_lock_irq(lock) \
+ do { \
+- local_irq_disable(); \
++ migrate_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+ #define __local_lock_irqsave(lock, flags) \
+ do { \
+- local_irq_save(flags); \
++ migrate_disable(); \
++ flags = 0; \
++ local_lock_acquire(this_cpu_ptr(lock)); \
++ } while (0)
++
++#define __local_unlock_irq(lock) \
++ do { \
++ local_lock_release(this_cpu_ptr(lock)); \
++ migrate_enable(); \
++ } while (0)
++
++#define __local_unlock_irqrestore(lock, flags) \
++ do { \
++ local_lock_release(this_cpu_ptr(lock)); \
++ migrate_enable(); \
++ } while (0)
++
++#else
++
++#define __local_lock(lock) \
++ do { \
++ preempt_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+@@ -80,6 +167,18 @@ static inline void local_lock_release(local_lock_t *l) { }
+ preempt_enable(); \
+ } while (0)
+
++#define __local_lock_irq(lock) \
++ do { \
++ local_irq_disable(); \
++ local_lock_acquire(this_cpu_ptr(lock)); \
++ } while (0)
++
++#define __local_lock_irqsave(lock, flags) \
++ do { \
++ local_irq_save(flags); \
++ local_lock_acquire(this_cpu_ptr(lock)); \
++ } while (0)
++
+ #define __local_unlock_irq(lock) \
+ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
+@@ -91,3 +190,5 @@ static inline void local_lock_release(local_lock_t *l) { }
+ local_lock_release(this_cpu_ptr(lock)); \
+ local_irq_restore(flags); \
+ } while (0)
++
++#endif
+--
+2.19.1
+