aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lglock.c
blob: 57e0ea72c28adf8895db7e35bc0c3dc72af82740 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
/* See include/linux/lglock.h for description */
#include <linux/module.h>
#include <linux/lglock.h>
#include <linux/cpu.h>
#include <linux/string.h>

#ifndef CONFIG_PREEMPT_RT_FULL
# define lg_lock_ptr		arch_spinlock_t
# define lg_do_lock(l)		arch_spin_lock(l)
# define lg_do_unlock(l)	arch_spin_unlock(l)
#else
# define lg_lock_ptr		struct rt_mutex
# define lg_do_lock(l)		__rt_spin_lock__no_mg(l)
# define lg_do_unlock(l)	__rt_spin_unlock(l)
#endif
/*
 * Note there is no uninit, so lglocks cannot be defined in
 * modules (but it's fine to use them from there)
 * Could be added though, just undo lg_lock_init
 */

void lg_lock_init(struct lglock *lg, char *name)
{
#ifdef CONFIG_PREEMPT_RT_FULL
	int i;

	for_each_possible_cpu(i) {
		struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);

		rt_mutex_init(lock);
	}
#endif
	LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
}
EXPORT_SYMBOL(lg_lock_init);

void lg_local_lock(struct lglock *lg)
{
	lg_lock_ptr *lock;

	migrate_disable();
	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
	lock = this_cpu_ptr(lg->lock);
	lg_do_lock(lock);
}
EXPORT_SYMBOL(lg_local_lock);

void lg_local_unlock(struct lglock *lg)
{
	lg_lock_ptr *lock;

	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
	lock = this_cpu_ptr(lg->lock);
	lg_do_unlock(lock);
	migrate_enable();
}
EXPORT_SYMBOL(lg_local_unlock);

void lg_local_lock_cpu(struct lglock *lg, int cpu)
{
	lg_lock_ptr *lock;

	preempt_disable_nort();
	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
	lock = per_cpu_ptr(lg->lock, cpu);
	lg_do_lock(lock);
}
EXPORT_SYMBOL(lg_local_lock_cpu);

void lg_local_unlock_cpu(struct lglock *lg, int cpu)
{
	lg_lock_ptr *lock;

	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
	lock = per_cpu_ptr(lg->lock, cpu);
	lg_do_unlock(lock);
	preempt_enable_nort();
}
EXPORT_SYMBOL(lg_local_unlock_cpu);

void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
{
	BUG_ON(cpu1 == cpu2);

	/* lock in cpu order, just like lg_global_lock */
	if (cpu2 < cpu1)
		swap(cpu1, cpu2);

	preempt_disable_nort();
	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
	lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
	lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
}

void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
{
	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
	lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
	lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
	preempt_enable_nort();
}

void lg_global_lock(struct lglock *lg)
{
	int i;

	preempt_disable_nort();
	lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
	for_each_possible_cpu(i) {
		lg_lock_ptr *lock;
		lock = per_cpu_ptr(lg->lock, i);
		lg_do_lock(lock);
	}
}
EXPORT_SYMBOL(lg_global_lock);

void lg_global_unlock(struct lglock *lg)
{
	int i;

	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
	for_each_possible_cpu(i) {
		lg_lock_ptr *lock;
		lock = per_cpu_ptr(lg->lock, i);
		lg_do_unlock(lock);
	}
	preempt_enable_nort();
}
EXPORT_SYMBOL(lg_global_unlock);

#ifdef CONFIG_PREEMPT_RT_FULL
/*
 * HACK: If you use this, you get to keep the pieces.
 * Used in queue_stop_cpus_work() when stop machinery
 * is called from inactive CPU, so we can't schedule.
 */
# define lg_do_trylock_relax(l)			\
	do {					\
		while (!__rt_spin_trylock(l))	\
			cpu_relax();		\
	} while (0)

void lg_global_trylock_relax(struct lglock *lg)
{
	int i;

	lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
	for_each_possible_cpu(i) {
		lg_lock_ptr *lock;
		lock = per_cpu_ptr(lg->lock, i);
		lg_do_trylock_relax(lock);
	}
}
#endif