Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/locking/spinlock_rt.c
25923 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* PREEMPT_RT substitution for spin/rw_locks
4
*
5
* spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
6
* resemble the non RT semantics:
7
*
8
* - Contrary to plain rtmutexes, spinlocks and rwlocks are state
9
* preserving. The task state is saved before blocking on the underlying
10
* rtmutex, and restored when the lock has been acquired. Regular wakeups
11
* during that time are redirected to the saved state so no wake up is
12
* missed.
13
*
14
* - Non RT spin/rwlocks disable preemption and eventually interrupts.
15
* Disabling preemption has the side effect of disabling migration and
16
* preventing RCU grace periods.
17
*
18
* The RT substitutions explicitly disable migration and take
19
* rcu_read_lock() across the lock held section.
20
*/
21
#include <linux/spinlock.h>
22
#include <linux/export.h>
23
24
#define RT_MUTEX_BUILD_SPINLOCKS
25
#include "rtmutex.c"
26
27
/*
28
* __might_resched() skips the state check as rtlocks are state
29
* preserving. Take RCU nesting into account as spin/read/write_lock() can
30
* legitimately nest into an RCU read side critical section.
31
*/
32
#define RTLOCK_RESCHED_OFFSETS \
33
(rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
34
35
#define rtlock_might_resched() \
36
__might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS)
37
38
static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
39
{
40
lockdep_assert(!current->pi_blocked_on);
41
42
if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
43
rtlock_slowlock(rtm);
44
}
45
46
static __always_inline void __rt_spin_lock(spinlock_t *lock)
47
{
48
rtlock_might_resched();
49
rtlock_lock(&lock->lock);
50
rcu_read_lock();
51
migrate_disable();
52
}
53
54
void __sched rt_spin_lock(spinlock_t *lock) __acquires(RCU)
55
{
56
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
57
__rt_spin_lock(lock);
58
}
59
EXPORT_SYMBOL(rt_spin_lock);
60
61
#ifdef CONFIG_DEBUG_LOCK_ALLOC
62
void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
63
{
64
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
65
__rt_spin_lock(lock);
66
}
67
EXPORT_SYMBOL(rt_spin_lock_nested);
68
69
void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
70
struct lockdep_map *nest_lock)
71
{
72
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
73
__rt_spin_lock(lock);
74
}
75
EXPORT_SYMBOL(rt_spin_lock_nest_lock);
76
#endif
77
78
void __sched rt_spin_unlock(spinlock_t *lock) __releases(RCU)
79
{
80
spin_release(&lock->dep_map, _RET_IP_);
81
migrate_enable();
82
rcu_read_unlock();
83
84
if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
85
rt_mutex_slowunlock(&lock->lock);
86
}
87
EXPORT_SYMBOL(rt_spin_unlock);
88
89
/*
90
* Wait for the lock to get unlocked: instead of polling for an unlock
91
* (like raw spinlocks do), lock and unlock, to force the kernel to
92
* schedule if there's contention:
93
*/
94
void __sched rt_spin_lock_unlock(spinlock_t *lock)
95
{
96
spin_lock(lock);
97
spin_unlock(lock);
98
}
99
EXPORT_SYMBOL(rt_spin_lock_unlock);
100
101
static __always_inline int __rt_spin_trylock(spinlock_t *lock)
102
{
103
int ret = 1;
104
105
if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
106
ret = rt_mutex_slowtrylock(&lock->lock);
107
108
if (ret) {
109
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
110
rcu_read_lock();
111
migrate_disable();
112
}
113
return ret;
114
}
115
116
int __sched rt_spin_trylock(spinlock_t *lock)
117
{
118
return __rt_spin_trylock(lock);
119
}
120
EXPORT_SYMBOL(rt_spin_trylock);
121
122
int __sched rt_spin_trylock_bh(spinlock_t *lock)
123
{
124
int ret;
125
126
local_bh_disable();
127
ret = __rt_spin_trylock(lock);
128
if (!ret)
129
local_bh_enable();
130
return ret;
131
}
132
EXPORT_SYMBOL(rt_spin_trylock_bh);
133
134
#ifdef CONFIG_DEBUG_LOCK_ALLOC
135
void __rt_spin_lock_init(spinlock_t *lock, const char *name,
136
struct lock_class_key *key, bool percpu)
137
{
138
u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
139
140
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
141
lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
142
LD_WAIT_INV, type);
143
}
144
EXPORT_SYMBOL(__rt_spin_lock_init);
145
#endif
146
147
/*
148
* RT-specific reader/writer locks
149
*/
150
#define rwbase_set_and_save_current_state(state) \
151
current_save_and_set_rtlock_wait_state()
152
153
#define rwbase_restore_current_state() \
154
current_restore_rtlock_saved_state()
155
156
static __always_inline int
157
rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
158
{
159
if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
160
rtlock_slowlock(rtm);
161
return 0;
162
}
163
164
static __always_inline int
165
rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state,
166
struct wake_q_head *wake_q)
167
{
168
rtlock_slowlock_locked(rtm, wake_q);
169
return 0;
170
}
171
172
static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
173
{
174
if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
175
return;
176
177
rt_mutex_slowunlock(rtm);
178
}
179
180
static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
181
{
182
if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
183
return 1;
184
185
return rt_mutex_slowtrylock(rtm);
186
}
187
188
#define rwbase_signal_pending_state(state, current) (0)
189
190
#define rwbase_pre_schedule()
191
192
#define rwbase_schedule() \
193
schedule_rtlock()
194
195
#define rwbase_post_schedule()
196
197
#include "rwbase_rt.c"
198
/*
199
* The common functions which get wrapped into the rwlock API.
200
*/
201
int __sched rt_read_trylock(rwlock_t *rwlock)
202
{
203
int ret;
204
205
ret = rwbase_read_trylock(&rwlock->rwbase);
206
if (ret) {
207
rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
208
rcu_read_lock();
209
migrate_disable();
210
}
211
return ret;
212
}
213
EXPORT_SYMBOL(rt_read_trylock);
214
215
int __sched rt_write_trylock(rwlock_t *rwlock)
216
{
217
int ret;
218
219
ret = rwbase_write_trylock(&rwlock->rwbase);
220
if (ret) {
221
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
222
rcu_read_lock();
223
migrate_disable();
224
}
225
return ret;
226
}
227
EXPORT_SYMBOL(rt_write_trylock);
228
229
void __sched rt_read_lock(rwlock_t *rwlock) __acquires(RCU)
230
{
231
rtlock_might_resched();
232
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
233
rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
234
rcu_read_lock();
235
migrate_disable();
236
}
237
EXPORT_SYMBOL(rt_read_lock);
238
239
void __sched rt_write_lock(rwlock_t *rwlock) __acquires(RCU)
240
{
241
rtlock_might_resched();
242
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
243
rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
244
rcu_read_lock();
245
migrate_disable();
246
}
247
EXPORT_SYMBOL(rt_write_lock);
248
249
#ifdef CONFIG_DEBUG_LOCK_ALLOC
250
void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(RCU)
251
{
252
rtlock_might_resched();
253
rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
254
rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
255
rcu_read_lock();
256
migrate_disable();
257
}
258
EXPORT_SYMBOL(rt_write_lock_nested);
259
#endif
260
261
void __sched rt_read_unlock(rwlock_t *rwlock) __releases(RCU)
262
{
263
rwlock_release(&rwlock->dep_map, _RET_IP_);
264
migrate_enable();
265
rcu_read_unlock();
266
rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
267
}
268
EXPORT_SYMBOL(rt_read_unlock);
269
270
void __sched rt_write_unlock(rwlock_t *rwlock) __releases(RCU)
271
{
272
rwlock_release(&rwlock->dep_map, _RET_IP_);
273
rcu_read_unlock();
274
migrate_enable();
275
rwbase_write_unlock(&rwlock->rwbase);
276
}
277
EXPORT_SYMBOL(rt_write_unlock);
278
279
#ifdef CONFIG_DEBUG_LOCK_ALLOC
280
void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
281
struct lock_class_key *key)
282
{
283
debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
284
lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
285
}
286
EXPORT_SYMBOL(__rt_rwlock_init);
287
#endif
288
289