Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/kernel/locking/rwbase_rt.c
25923 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
/*
4
* RT-specific reader/writer semaphores and reader/writer locks
5
*
6
* down_write/write_lock()
7
* 1) Lock rtmutex
8
* 2) Remove the reader BIAS to force readers into the slow path
9
* 3) Wait until all readers have left the critical section
10
* 4) Mark it write locked
11
*
12
* up_write/write_unlock()
13
* 1) Remove the write locked marker
14
* 2) Set the reader BIAS, so readers can use the fast path again
15
* 3) Unlock rtmutex, to release blocked readers
16
*
17
* down_read/read_lock()
18
* 1) Try fast path acquisition (reader BIAS is set)
19
* 2) Take tmutex::wait_lock, which protects the writelocked flag
20
* 3) If !writelocked, acquire it for read
21
* 4) If writelocked, block on tmutex
22
* 5) unlock rtmutex, goto 1)
23
*
24
* up_read/read_unlock()
25
* 1) Try fast path release (reader count != 1)
26
* 2) Wake the writer waiting in down_write()/write_lock() #3
27
*
28
* down_read/read_lock()#3 has the consequence, that rw semaphores and rw
29
* locks on RT are not writer fair, but writers, which should be avoided in
30
* RT tasks (think mmap_sem), are subject to the rtmutex priority/DL
31
* inheritance mechanism.
32
*
33
* It's possible to make the rw primitives writer fair by keeping a list of
34
* active readers. A blocked writer would force all newly incoming readers
35
* to block on the rtmutex, but the rtmutex would have to be proxy locked
36
* for one reader after the other. We can't use multi-reader inheritance
37
* because there is no way to support that with SCHED_DEADLINE.
38
* Implementing the one by one reader boosting/handover mechanism is a
39
* major surgery for a very dubious value.
40
*
41
* The risk of writer starvation is there, but the pathological use cases
42
* which trigger it are not necessarily the typical RT workloads.
43
*
44
* Fast-path orderings:
45
* The lock/unlock of readers can run in fast paths: lock and unlock are only
46
* atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
47
* semantics of rwbase_rt. Atomic ops should thus provide _acquire()
48
* and _release() (or stronger).
49
*
50
* Common code shared between RT rw_semaphore and rwlock
51
*/
52
53
static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
54
{
55
int r;
56
57
/*
58
* Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
59
* set.
60
*/
61
for (r = atomic_read(&rwb->readers); r < 0;) {
62
if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1)))
63
return 1;
64
}
65
return 0;
66
}
67
68
static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
69
unsigned int state)
70
{
71
struct rt_mutex_base *rtm = &rwb->rtmutex;
72
DEFINE_WAKE_Q(wake_q);
73
int ret;
74
75
rwbase_pre_schedule();
76
raw_spin_lock_irq(&rtm->wait_lock);
77
78
/*
79
* Call into the slow lock path with the rtmutex->wait_lock
80
* held, so this can't result in the following race:
81
*
82
* Reader1 Reader2 Writer
83
* down_read()
84
* down_write()
85
* rtmutex_lock(m)
86
* wait()
87
* down_read()
88
* unlock(m->wait_lock)
89
* up_read()
90
* wake(Writer)
91
* lock(m->wait_lock)
92
* sem->writelocked=true
93
* unlock(m->wait_lock)
94
*
95
* up_write()
96
* sem->writelocked=false
97
* rtmutex_unlock(m)
98
* down_read()
99
* down_write()
100
* rtmutex_lock(m)
101
* wait()
102
* rtmutex_lock(m)
103
*
104
* That would put Reader1 behind the writer waiting on
105
* Reader2 to call up_read(), which might be unbound.
106
*/
107
108
trace_contention_begin(rwb, LCB_F_RT | LCB_F_READ);
109
110
/*
111
* For rwlocks this returns 0 unconditionally, so the below
112
* !ret conditionals are optimized out.
113
*/
114
ret = rwbase_rtmutex_slowlock_locked(rtm, state, &wake_q);
115
116
/*
117
* On success the rtmutex is held, so there can't be a writer
118
* active. Increment the reader count and immediately drop the
119
* rtmutex again.
120
*
121
* rtmutex->wait_lock has to be unlocked in any case of course.
122
*/
123
if (!ret)
124
atomic_inc(&rwb->readers);
125
126
preempt_disable();
127
raw_spin_unlock_irq(&rtm->wait_lock);
128
wake_up_q(&wake_q);
129
preempt_enable();
130
131
if (!ret)
132
rwbase_rtmutex_unlock(rtm);
133
134
trace_contention_end(rwb, ret);
135
rwbase_post_schedule();
136
return ret;
137
}
138
139
static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
140
unsigned int state)
141
{
142
lockdep_assert(!current->pi_blocked_on);
143
144
if (rwbase_read_trylock(rwb))
145
return 0;
146
147
return __rwbase_read_lock(rwb, state);
148
}
149
150
static void __sched __rwbase_read_unlock(struct rwbase_rt *rwb,
151
unsigned int state)
152
{
153
struct rt_mutex_base *rtm = &rwb->rtmutex;
154
struct task_struct *owner;
155
DEFINE_RT_WAKE_Q(wqh);
156
157
raw_spin_lock_irq(&rtm->wait_lock);
158
/*
159
* Wake the writer, i.e. the rtmutex owner. It might release the
160
* rtmutex concurrently in the fast path (due to a signal), but to
161
* clean up rwb->readers it needs to acquire rtm->wait_lock. The
162
* worst case which can happen is a spurious wakeup.
163
*/
164
owner = rt_mutex_owner(rtm);
165
if (owner)
166
rt_mutex_wake_q_add_task(&wqh, owner, state);
167
168
/* Pairs with the preempt_enable in rt_mutex_wake_up_q() */
169
preempt_disable();
170
raw_spin_unlock_irq(&rtm->wait_lock);
171
rt_mutex_wake_up_q(&wqh);
172
}
173
174
static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
175
unsigned int state)
176
{
177
/*
178
* rwb->readers can only hit 0 when a writer is waiting for the
179
* active readers to leave the critical section.
180
*
181
* dec_and_test() is fully ordered, provides RELEASE.
182
*/
183
if (unlikely(atomic_dec_and_test(&rwb->readers)))
184
__rwbase_read_unlock(rwb, state);
185
}
186
187
static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
188
unsigned long flags)
189
{
190
struct rt_mutex_base *rtm = &rwb->rtmutex;
191
192
/*
193
* _release() is needed in case that reader is in fast path, pairing
194
* with atomic_try_cmpxchg_acquire() in rwbase_read_trylock().
195
*/
196
(void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
197
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
198
rwbase_rtmutex_unlock(rtm);
199
}
200
201
static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
202
{
203
struct rt_mutex_base *rtm = &rwb->rtmutex;
204
unsigned long flags;
205
206
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
207
__rwbase_write_unlock(rwb, WRITER_BIAS, flags);
208
}
209
210
static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
211
{
212
struct rt_mutex_base *rtm = &rwb->rtmutex;
213
unsigned long flags;
214
215
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
216
/* Release it and account current as reader */
217
__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
218
}
219
220
static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
221
{
222
/* Can do without CAS because we're serialized by wait_lock. */
223
lockdep_assert_held(&rwb->rtmutex.wait_lock);
224
225
/*
226
* _acquire is needed in case the reader is in the fast path, pairing
227
* with rwbase_read_unlock(), provides ACQUIRE.
228
*/
229
if (!atomic_read_acquire(&rwb->readers)) {
230
atomic_set(&rwb->readers, WRITER_BIAS);
231
return 1;
232
}
233
234
return 0;
235
}
236
237
static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
238
unsigned int state)
239
{
240
struct rt_mutex_base *rtm = &rwb->rtmutex;
241
unsigned long flags;
242
243
/* Take the rtmutex as a first step */
244
if (rwbase_rtmutex_lock_state(rtm, state))
245
return -EINTR;
246
247
/* Force readers into slow path */
248
atomic_sub(READER_BIAS, &rwb->readers);
249
250
rwbase_pre_schedule();
251
252
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
253
if (__rwbase_write_trylock(rwb))
254
goto out_unlock;
255
256
rwbase_set_and_save_current_state(state);
257
trace_contention_begin(rwb, LCB_F_RT | LCB_F_WRITE);
258
for (;;) {
259
/* Optimized out for rwlocks */
260
if (rwbase_signal_pending_state(state, current)) {
261
rwbase_restore_current_state();
262
__rwbase_write_unlock(rwb, 0, flags);
263
rwbase_post_schedule();
264
trace_contention_end(rwb, -EINTR);
265
return -EINTR;
266
}
267
268
if (__rwbase_write_trylock(rwb))
269
break;
270
271
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
272
rwbase_schedule();
273
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
274
275
set_current_state(state);
276
}
277
rwbase_restore_current_state();
278
trace_contention_end(rwb, 0);
279
280
out_unlock:
281
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
282
rwbase_post_schedule();
283
return 0;
284
}
285
286
static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
287
{
288
struct rt_mutex_base *rtm = &rwb->rtmutex;
289
unsigned long flags;
290
291
if (!rwbase_rtmutex_trylock(rtm))
292
return 0;
293
294
atomic_sub(READER_BIAS, &rwb->readers);
295
296
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
297
if (__rwbase_write_trylock(rwb)) {
298
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
299
return 1;
300
}
301
__rwbase_write_unlock(rwb, 0, flags);
302
return 0;
303
}
304
305