aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lglock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/lglock.c')
-rw-r--r--kernel/locking/lglock.c91
1 files changed, 67 insertions, 24 deletions
diff --git a/kernel/locking/lglock.c b/kernel/locking/lglock.c
index 951cfcd10b4a..57e0ea72c28a 100644
--- a/kernel/locking/lglock.c
+++ b/kernel/locking/lglock.c
@@ -4,6 +4,15 @@
#include <linux/cpu.h>
#include <linux/string.h>
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define lg_lock_ptr arch_spinlock_t
+# define lg_do_lock(l) arch_spin_lock(l)
+# define lg_do_unlock(l) arch_spin_unlock(l)
+#else
+# define lg_lock_ptr struct rt_mutex
+# define lg_do_lock(l) __rt_spin_lock__no_mg(l)
+# define lg_do_unlock(l) __rt_spin_unlock(l)
+#endif
/*
* Note there is no uninit, so lglocks cannot be defined in
* modules (but it's fine to use them from there)
@@ -12,51 +21,60 @@
void lg_lock_init(struct lglock *lg, char *name)
{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
+
+ rt_mutex_init(lock);
+ }
+#endif
LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
}
EXPORT_SYMBOL(lg_lock_init);
void lg_local_lock(struct lglock *lg)
{
- arch_spinlock_t *lock;
+ lg_lock_ptr *lock;
- preempt_disable();
+ migrate_disable();
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
lock = this_cpu_ptr(lg->lock);
- arch_spin_lock(lock);
+ lg_do_lock(lock);
}
EXPORT_SYMBOL(lg_local_lock);
void lg_local_unlock(struct lglock *lg)
{
- arch_spinlock_t *lock;
+ lg_lock_ptr *lock;
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
lock = this_cpu_ptr(lg->lock);
- arch_spin_unlock(lock);
- preempt_enable();
+ lg_do_unlock(lock);
+ migrate_enable();
}
EXPORT_SYMBOL(lg_local_unlock);
void lg_local_lock_cpu(struct lglock *lg, int cpu)
{
- arch_spinlock_t *lock;
+ lg_lock_ptr *lock;
- preempt_disable();
+ preempt_disable_nort();
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
lock = per_cpu_ptr(lg->lock, cpu);
- arch_spin_lock(lock);
+ lg_do_lock(lock);
}
EXPORT_SYMBOL(lg_local_lock_cpu);
void lg_local_unlock_cpu(struct lglock *lg, int cpu)
{
- arch_spinlock_t *lock;
+ lg_lock_ptr *lock;
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
lock = per_cpu_ptr(lg->lock, cpu);
- arch_spin_unlock(lock);
- preempt_enable();
+ lg_do_unlock(lock);
+ preempt_enable_nort();
}
EXPORT_SYMBOL(lg_local_unlock_cpu);
@@ -68,30 +86,30 @@ void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
if (cpu2 < cpu1)
swap(cpu1, cpu2);
- preempt_disable();
+ preempt_disable_nort();
lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
- arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
+ lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
+ lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
}
void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
{
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
- preempt_enable();
+ lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
+ lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
+ preempt_enable_nort();
}
void lg_global_lock(struct lglock *lg)
{
int i;
- preempt_disable();
+ preempt_disable_nort();
lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
for_each_possible_cpu(i) {
- arch_spinlock_t *lock;
+ lg_lock_ptr *lock;
lock = per_cpu_ptr(lg->lock, i);
- arch_spin_lock(lock);
+ lg_do_lock(lock);
}
}
EXPORT_SYMBOL(lg_global_lock);
@@ -102,10 +120,35 @@ void lg_global_unlock(struct lglock *lg)
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
for_each_possible_cpu(i) {
- arch_spinlock_t *lock;
+ lg_lock_ptr *lock;
lock = per_cpu_ptr(lg->lock, i);
- arch_spin_unlock(lock);
+ lg_do_unlock(lock);
}
- preempt_enable();
+ preempt_enable_nort();
}
EXPORT_SYMBOL(lg_global_unlock);
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * HACK: If you use this, you get to keep the pieces.
+ * Used in queue_stop_cpus_work() when stop machinery
+ * is called from inactive CPU, so we can't schedule.
+ */
+# define lg_do_trylock_relax(l) \
+ do { \
+ while (!__rt_spin_trylock(l)) \
+ cpu_relax(); \
+ } while (0)
+
+void lg_global_trylock_relax(struct lglock *lg)
+{
+ int i;
+
+ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+ for_each_possible_cpu(i) {
+ lg_lock_ptr *lock;
+ lock = per_cpu_ptr(lg->lock, i);
+ lg_do_trylock_relax(lock);
+ }
+}
+#endif