Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/fs/dlm/ast.c
15109 views
1
/******************************************************************************
2
*******************************************************************************
3
**
4
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5
** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
6
**
7
** This copyrighted material is made available to anyone wishing to use,
8
** modify, copy, or redistribute it subject to the terms and conditions
9
** of the GNU General Public License v.2.
10
**
11
*******************************************************************************
12
******************************************************************************/
13
14
#include "dlm_internal.h"
15
#include "lock.h"
16
#include "user.h"
17
#include "ast.h"
18
19
#define WAKE_ASTS 0
20
21
static uint64_t ast_seq_count;
22
static struct list_head ast_queue;
23
static spinlock_t ast_queue_lock;
24
static struct task_struct * astd_task;
25
static unsigned long astd_wakeflags;
26
static struct mutex astd_running;
27
28
29
static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb)
30
{
31
int i;
32
33
log_print("last_bast %x %llu flags %x mode %d sb %d %x",
34
lkb->lkb_id,
35
(unsigned long long)lkb->lkb_last_bast.seq,
36
lkb->lkb_last_bast.flags,
37
lkb->lkb_last_bast.mode,
38
lkb->lkb_last_bast.sb_status,
39
lkb->lkb_last_bast.sb_flags);
40
41
log_print("last_cast %x %llu flags %x mode %d sb %d %x",
42
lkb->lkb_id,
43
(unsigned long long)lkb->lkb_last_cast.seq,
44
lkb->lkb_last_cast.flags,
45
lkb->lkb_last_cast.mode,
46
lkb->lkb_last_cast.sb_status,
47
lkb->lkb_last_cast.sb_flags);
48
49
for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
50
log_print("cb %x %llu flags %x mode %d sb %d %x",
51
lkb->lkb_id,
52
(unsigned long long)lkb->lkb_callbacks[i].seq,
53
lkb->lkb_callbacks[i].flags,
54
lkb->lkb_callbacks[i].mode,
55
lkb->lkb_callbacks[i].sb_status,
56
lkb->lkb_callbacks[i].sb_flags);
57
}
58
}
59
60
void dlm_del_ast(struct dlm_lkb *lkb)
61
{
62
spin_lock(&ast_queue_lock);
63
if (!list_empty(&lkb->lkb_astqueue))
64
list_del_init(&lkb->lkb_astqueue);
65
spin_unlock(&ast_queue_lock);
66
}
67
68
int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
69
int status, uint32_t sbflags, uint64_t seq)
70
{
71
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
72
uint64_t prev_seq;
73
int prev_mode;
74
int i;
75
76
for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
77
if (lkb->lkb_callbacks[i].seq)
78
continue;
79
80
/*
81
* Suppress some redundant basts here, do more on removal.
82
* Don't even add a bast if the callback just before it
83
* is a bast for the same mode or a more restrictive mode.
84
* (the addional > PR check is needed for PR/CW inversion)
85
*/
86
87
if ((i > 0) && (flags & DLM_CB_BAST) &&
88
(lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) {
89
90
prev_seq = lkb->lkb_callbacks[i-1].seq;
91
prev_mode = lkb->lkb_callbacks[i-1].mode;
92
93
if ((prev_mode == mode) ||
94
(prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
95
96
log_debug(ls, "skip %x add bast %llu mode %d "
97
"for bast %llu mode %d",
98
lkb->lkb_id,
99
(unsigned long long)seq,
100
mode,
101
(unsigned long long)prev_seq,
102
prev_mode);
103
return 0;
104
}
105
}
106
107
lkb->lkb_callbacks[i].seq = seq;
108
lkb->lkb_callbacks[i].flags = flags;
109
lkb->lkb_callbacks[i].mode = mode;
110
lkb->lkb_callbacks[i].sb_status = status;
111
lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF);
112
break;
113
}
114
115
if (i == DLM_CALLBACKS_SIZE) {
116
log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x",
117
lkb->lkb_id, (unsigned long long)seq,
118
flags, mode, status, sbflags);
119
dlm_dump_lkb_callbacks(lkb);
120
return -1;
121
}
122
123
return 0;
124
}
125
126
int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
127
struct dlm_callback *cb, int *resid)
128
{
129
int i;
130
131
*resid = 0;
132
133
if (!lkb->lkb_callbacks[0].seq)
134
return -ENOENT;
135
136
/* oldest undelivered cb is callbacks[0] */
137
138
memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback));
139
memset(&lkb->lkb_callbacks[0], 0, sizeof(struct dlm_callback));
140
141
/* shift others down */
142
143
for (i = 1; i < DLM_CALLBACKS_SIZE; i++) {
144
if (!lkb->lkb_callbacks[i].seq)
145
break;
146
memcpy(&lkb->lkb_callbacks[i-1], &lkb->lkb_callbacks[i],
147
sizeof(struct dlm_callback));
148
memset(&lkb->lkb_callbacks[i], 0, sizeof(struct dlm_callback));
149
(*resid)++;
150
}
151
152
/* if cb is a bast, it should be skipped if the blocking mode is
153
compatible with the last granted mode */
154
155
if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) {
156
if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) {
157
cb->flags |= DLM_CB_SKIP;
158
159
log_debug(ls, "skip %x bast %llu mode %d "
160
"for cast %llu mode %d",
161
lkb->lkb_id,
162
(unsigned long long)cb->seq,
163
cb->mode,
164
(unsigned long long)lkb->lkb_last_cast.seq,
165
lkb->lkb_last_cast.mode);
166
return 0;
167
}
168
}
169
170
if (cb->flags & DLM_CB_CAST) {
171
memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback));
172
lkb->lkb_last_cast_time = ktime_get();
173
}
174
175
if (cb->flags & DLM_CB_BAST) {
176
memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback));
177
lkb->lkb_last_bast_time = ktime_get();
178
}
179
180
return 0;
181
}
182
183
void dlm_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
184
uint32_t sbflags)
185
{
186
uint64_t seq;
187
int rv;
188
189
spin_lock(&ast_queue_lock);
190
191
seq = ++ast_seq_count;
192
193
if (lkb->lkb_flags & DLM_IFL_USER) {
194
spin_unlock(&ast_queue_lock);
195
dlm_user_add_ast(lkb, flags, mode, status, sbflags, seq);
196
return;
197
}
198
199
rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
200
if (rv < 0) {
201
spin_unlock(&ast_queue_lock);
202
return;
203
}
204
205
if (list_empty(&lkb->lkb_astqueue)) {
206
kref_get(&lkb->lkb_ref);
207
list_add_tail(&lkb->lkb_astqueue, &ast_queue);
208
}
209
spin_unlock(&ast_queue_lock);
210
211
set_bit(WAKE_ASTS, &astd_wakeflags);
212
wake_up_process(astd_task);
213
}
214
215
static void process_asts(void)
216
{
217
struct dlm_ls *ls = NULL;
218
struct dlm_rsb *r = NULL;
219
struct dlm_lkb *lkb;
220
void (*castfn) (void *astparam);
221
void (*bastfn) (void *astparam, int mode);
222
struct dlm_callback callbacks[DLM_CALLBACKS_SIZE];
223
int i, rv, resid;
224
225
repeat:
226
spin_lock(&ast_queue_lock);
227
list_for_each_entry(lkb, &ast_queue, lkb_astqueue) {
228
r = lkb->lkb_resource;
229
ls = r->res_ls;
230
231
if (dlm_locking_stopped(ls))
232
continue;
233
234
/* we remove from astqueue list and remove everything in
235
lkb_callbacks before releasing the spinlock so empty
236
lkb_astqueue is always consistent with empty lkb_callbacks */
237
238
list_del_init(&lkb->lkb_astqueue);
239
240
castfn = lkb->lkb_astfn;
241
bastfn = lkb->lkb_bastfn;
242
243
memset(&callbacks, 0, sizeof(callbacks));
244
245
for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
246
rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid);
247
if (rv < 0)
248
break;
249
}
250
spin_unlock(&ast_queue_lock);
251
252
if (resid) {
253
/* shouldn't happen, for loop should have removed all */
254
log_error(ls, "callback resid %d lkb %x",
255
resid, lkb->lkb_id);
256
}
257
258
for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
259
if (!callbacks[i].seq)
260
break;
261
if (callbacks[i].flags & DLM_CB_SKIP) {
262
continue;
263
} else if (callbacks[i].flags & DLM_CB_BAST) {
264
bastfn(lkb->lkb_astparam, callbacks[i].mode);
265
} else if (callbacks[i].flags & DLM_CB_CAST) {
266
lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
267
lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
268
castfn(lkb->lkb_astparam);
269
}
270
}
271
272
/* removes ref for ast_queue, may cause lkb to be freed */
273
dlm_put_lkb(lkb);
274
275
cond_resched();
276
goto repeat;
277
}
278
spin_unlock(&ast_queue_lock);
279
}
280
281
static inline int no_asts(void)
282
{
283
int ret;
284
285
spin_lock(&ast_queue_lock);
286
ret = list_empty(&ast_queue);
287
spin_unlock(&ast_queue_lock);
288
return ret;
289
}
290
291
static int dlm_astd(void *data)
292
{
293
while (!kthread_should_stop()) {
294
set_current_state(TASK_INTERRUPTIBLE);
295
if (!test_bit(WAKE_ASTS, &astd_wakeflags))
296
schedule();
297
set_current_state(TASK_RUNNING);
298
299
mutex_lock(&astd_running);
300
if (test_and_clear_bit(WAKE_ASTS, &astd_wakeflags))
301
process_asts();
302
mutex_unlock(&astd_running);
303
}
304
return 0;
305
}
306
307
void dlm_astd_wake(void)
308
{
309
if (!no_asts()) {
310
set_bit(WAKE_ASTS, &astd_wakeflags);
311
wake_up_process(astd_task);
312
}
313
}
314
315
int dlm_astd_start(void)
316
{
317
struct task_struct *p;
318
int error = 0;
319
320
INIT_LIST_HEAD(&ast_queue);
321
spin_lock_init(&ast_queue_lock);
322
mutex_init(&astd_running);
323
324
p = kthread_run(dlm_astd, NULL, "dlm_astd");
325
if (IS_ERR(p))
326
error = PTR_ERR(p);
327
else
328
astd_task = p;
329
return error;
330
}
331
332
void dlm_astd_stop(void)
333
{
334
kthread_stop(astd_task);
335
}
336
337
void dlm_astd_suspend(void)
338
{
339
mutex_lock(&astd_running);
340
}
341
342
void dlm_astd_resume(void)
343
{
344
mutex_unlock(&astd_running);
345
}
346
347
348