Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/afs/flock.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/* AFS file locking support
3
*
4
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5
* Written by David Howells ([email protected])
6
*/
7
8
#include "internal.h"
9
10
#define AFS_LOCK_GRANTED 0
11
#define AFS_LOCK_PENDING 1
12
#define AFS_LOCK_YOUR_TRY 2
13
14
struct workqueue_struct *afs_lock_manager;
15
16
static void afs_next_locker(struct afs_vnode *vnode, int error);
17
static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
18
static void afs_fl_release_private(struct file_lock *fl);
19
20
static const struct file_lock_operations afs_lock_ops = {
21
.fl_copy_lock = afs_fl_copy_lock,
22
.fl_release_private = afs_fl_release_private,
23
};
24
25
static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
26
{
27
_debug("STATE %u -> %u", vnode->lock_state, state);
28
vnode->lock_state = state;
29
}
30
31
static atomic_t afs_file_lock_debug_id;
32
33
/*
34
* if the callback is broken on this vnode, then the lock may now be available
35
*/
36
void afs_lock_may_be_available(struct afs_vnode *vnode)
37
{
38
_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
39
40
spin_lock(&vnode->lock);
41
if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
42
afs_next_locker(vnode, 0);
43
trace_afs_flock_ev(vnode, NULL, afs_flock_callback_break, 0);
44
spin_unlock(&vnode->lock);
45
}
46
47
/*
48
* the lock will time out in 5 minutes unless we extend it, so schedule
49
* extension in a bit less than that time
50
*/
51
static void afs_schedule_lock_extension(struct afs_vnode *vnode)
52
{
53
ktime_t expires_at, now, duration;
54
u64 duration_j;
55
56
expires_at = ktime_add_ms(vnode->locked_at, AFS_LOCKWAIT * 1000 / 2);
57
now = ktime_get_real();
58
duration = ktime_sub(expires_at, now);
59
if (duration <= 0)
60
duration_j = 0;
61
else
62
duration_j = nsecs_to_jiffies(ktime_to_ns(duration));
63
64
queue_delayed_work(afs_lock_manager, &vnode->lock_work, duration_j);
65
}
66
67
/*
68
* In the case of successful completion of a lock operation, record the time
69
* the reply appeared and start the lock extension timer.
70
*/
71
void afs_lock_op_done(struct afs_call *call)
72
{
73
struct afs_operation *op = call->op;
74
struct afs_vnode *vnode = op->file[0].vnode;
75
76
if (call->error == 0) {
77
spin_lock(&vnode->lock);
78
trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
79
vnode->locked_at = call->issue_time;
80
afs_schedule_lock_extension(vnode);
81
spin_unlock(&vnode->lock);
82
}
83
}
84
85
/*
86
* grant one or more locks (readlocks are allowed to jump the queue if the
87
* first lock in the queue is itself a readlock)
88
* - the caller must hold the vnode lock
89
*/
90
static void afs_grant_locks(struct afs_vnode *vnode)
91
{
92
struct file_lock *p, *_p;
93
bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
94
95
list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
96
if (!exclusive && lock_is_write(p))
97
continue;
98
99
list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
100
p->fl_u.afs.state = AFS_LOCK_GRANTED;
101
trace_afs_flock_op(vnode, p, afs_flock_op_grant);
102
locks_wake_up(p);
103
}
104
}
105
106
/*
107
* If an error is specified, reject every pending lock that matches the
108
* authentication and type of the lock we failed to get. If there are any
109
* remaining lockers, try to wake up one of them to have a go.
110
*/
111
static void afs_next_locker(struct afs_vnode *vnode, int error)
112
{
113
struct file_lock *p, *_p, *next = NULL;
114
struct key *key = vnode->lock_key;
115
unsigned int type = F_RDLCK;
116
117
_enter("");
118
119
if (vnode->lock_type == AFS_LOCK_WRITE)
120
type = F_WRLCK;
121
122
list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
123
if (error &&
124
p->c.flc_type == type &&
125
afs_file_key(p->c.flc_file) == key) {
126
list_del_init(&p->fl_u.afs.link);
127
p->fl_u.afs.state = error;
128
locks_wake_up(p);
129
}
130
131
/* Select the next locker to hand off to. */
132
if (next && (lock_is_write(next) || lock_is_read(p)))
133
continue;
134
next = p;
135
}
136
137
vnode->lock_key = NULL;
138
key_put(key);
139
140
if (next) {
141
afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
142
next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
143
trace_afs_flock_op(vnode, next, afs_flock_op_wake);
144
locks_wake_up(next);
145
} else {
146
afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
147
trace_afs_flock_ev(vnode, NULL, afs_flock_no_lockers, 0);
148
}
149
150
_leave("");
151
}
152
153
/*
154
* Kill off all waiters in the the pending lock queue due to the vnode being
155
* deleted.
156
*/
157
static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
158
{
159
struct file_lock *p;
160
161
afs_set_lock_state(vnode, AFS_VNODE_LOCK_DELETED);
162
163
while (!list_empty(&vnode->pending_locks)) {
164
p = list_entry(vnode->pending_locks.next,
165
struct file_lock, fl_u.afs.link);
166
list_del_init(&p->fl_u.afs.link);
167
p->fl_u.afs.state = -ENOENT;
168
locks_wake_up(p);
169
}
170
171
key_put(vnode->lock_key);
172
vnode->lock_key = NULL;
173
}
174
175
static void afs_lock_success(struct afs_operation *op)
176
{
177
_enter("op=%08x", op->debug_id);
178
afs_vnode_commit_status(op, &op->file[0]);
179
}
180
181
static const struct afs_operation_ops afs_set_lock_operation = {
182
.issue_afs_rpc = afs_fs_set_lock,
183
.issue_yfs_rpc = yfs_fs_set_lock,
184
.success = afs_lock_success,
185
.aborted = afs_check_for_remote_deletion,
186
};
187
188
/*
189
* Get a lock on a file
190
*/
191
static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
192
afs_lock_type_t type)
193
{
194
struct afs_operation *op;
195
196
_enter("%s{%llx:%llu.%u},%x,%u",
197
vnode->volume->name,
198
vnode->fid.vid,
199
vnode->fid.vnode,
200
vnode->fid.unique,
201
key_serial(key), type);
202
203
op = afs_alloc_operation(key, vnode->volume);
204
if (IS_ERR(op))
205
return PTR_ERR(op);
206
207
afs_op_set_vnode(op, 0, vnode);
208
209
op->lock.type = type;
210
op->ops = &afs_set_lock_operation;
211
return afs_do_sync_operation(op);
212
}
213
214
static const struct afs_operation_ops afs_extend_lock_operation = {
215
.issue_afs_rpc = afs_fs_extend_lock,
216
.issue_yfs_rpc = yfs_fs_extend_lock,
217
.success = afs_lock_success,
218
};
219
220
/*
221
* Extend a lock on a file
222
*/
223
static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
224
{
225
struct afs_operation *op;
226
227
_enter("%s{%llx:%llu.%u},%x",
228
vnode->volume->name,
229
vnode->fid.vid,
230
vnode->fid.vnode,
231
vnode->fid.unique,
232
key_serial(key));
233
234
op = afs_alloc_operation(key, vnode->volume);
235
if (IS_ERR(op))
236
return PTR_ERR(op);
237
238
afs_op_set_vnode(op, 0, vnode);
239
240
op->flags |= AFS_OPERATION_UNINTR;
241
op->ops = &afs_extend_lock_operation;
242
return afs_do_sync_operation(op);
243
}
244
245
static const struct afs_operation_ops afs_release_lock_operation = {
246
.issue_afs_rpc = afs_fs_release_lock,
247
.issue_yfs_rpc = yfs_fs_release_lock,
248
.success = afs_lock_success,
249
};
250
251
/*
252
* Release a lock on a file
253
*/
254
static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
255
{
256
struct afs_operation *op;
257
258
_enter("%s{%llx:%llu.%u},%x",
259
vnode->volume->name,
260
vnode->fid.vid,
261
vnode->fid.vnode,
262
vnode->fid.unique,
263
key_serial(key));
264
265
op = afs_alloc_operation(key, vnode->volume);
266
if (IS_ERR(op))
267
return PTR_ERR(op);
268
269
afs_op_set_vnode(op, 0, vnode);
270
271
op->flags |= AFS_OPERATION_UNINTR;
272
op->ops = &afs_release_lock_operation;
273
return afs_do_sync_operation(op);
274
}
275
276
/*
277
* do work for a lock, including:
278
* - probing for a lock we're waiting on but didn't get immediately
279
* - extending a lock that's close to timing out
280
*/
281
void afs_lock_work(struct work_struct *work)
282
{
283
struct afs_vnode *vnode =
284
container_of(work, struct afs_vnode, lock_work.work);
285
struct key *key;
286
int ret;
287
288
_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
289
290
spin_lock(&vnode->lock);
291
292
again:
293
_debug("wstate %u for %p", vnode->lock_state, vnode);
294
switch (vnode->lock_state) {
295
case AFS_VNODE_LOCK_NEED_UNLOCK:
296
afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
297
trace_afs_flock_ev(vnode, NULL, afs_flock_work_unlocking, 0);
298
spin_unlock(&vnode->lock);
299
300
/* attempt to release the server lock; if it fails, we just
301
* wait 5 minutes and it'll expire anyway */
302
ret = afs_release_lock(vnode, vnode->lock_key);
303
if (ret < 0 && vnode->lock_state != AFS_VNODE_LOCK_DELETED) {
304
trace_afs_flock_ev(vnode, NULL, afs_flock_release_fail,
305
ret);
306
printk(KERN_WARNING "AFS:"
307
" Failed to release lock on {%llx:%llx} error %d\n",
308
vnode->fid.vid, vnode->fid.vnode, ret);
309
}
310
311
spin_lock(&vnode->lock);
312
if (ret == -ENOENT)
313
afs_kill_lockers_enoent(vnode);
314
else
315
afs_next_locker(vnode, 0);
316
spin_unlock(&vnode->lock);
317
return;
318
319
/* If we've already got a lock, then it must be time to extend that
320
* lock as AFS locks time out after 5 minutes.
321
*/
322
case AFS_VNODE_LOCK_GRANTED:
323
_debug("extend");
324
325
ASSERT(!list_empty(&vnode->granted_locks));
326
327
key = key_get(vnode->lock_key);
328
afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
329
trace_afs_flock_ev(vnode, NULL, afs_flock_work_extending, 0);
330
spin_unlock(&vnode->lock);
331
332
ret = afs_extend_lock(vnode, key); /* RPC */
333
key_put(key);
334
335
if (ret < 0) {
336
trace_afs_flock_ev(vnode, NULL, afs_flock_extend_fail,
337
ret);
338
pr_warn("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
339
vnode->fid.vid, vnode->fid.vnode, ret);
340
}
341
342
spin_lock(&vnode->lock);
343
344
if (ret == -ENOENT) {
345
afs_kill_lockers_enoent(vnode);
346
spin_unlock(&vnode->lock);
347
return;
348
}
349
350
if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
351
goto again;
352
afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
353
354
if (ret != 0)
355
queue_delayed_work(afs_lock_manager, &vnode->lock_work,
356
HZ * 10);
357
spin_unlock(&vnode->lock);
358
_leave(" [ext]");
359
return;
360
361
/* If we're waiting for a callback to indicate lock release, we can't
362
* actually rely on this, so need to recheck at regular intervals. The
363
* problem is that the server might not notify us if the lock just
364
* expires (say because a client died) rather than being explicitly
365
* released.
366
*/
367
case AFS_VNODE_LOCK_WAITING_FOR_CB:
368
_debug("retry");
369
afs_next_locker(vnode, 0);
370
spin_unlock(&vnode->lock);
371
return;
372
373
case AFS_VNODE_LOCK_DELETED:
374
afs_kill_lockers_enoent(vnode);
375
spin_unlock(&vnode->lock);
376
return;
377
378
default:
379
/* Looks like a lock request was withdrawn. */
380
spin_unlock(&vnode->lock);
381
_leave(" [no]");
382
return;
383
}
384
}
385
386
/*
387
* pass responsibility for the unlocking of a vnode on the server to the
388
* manager thread, lest a pending signal in the calling thread interrupt
389
* AF_RXRPC
390
* - the caller must hold the vnode lock
391
*/
392
static void afs_defer_unlock(struct afs_vnode *vnode)
393
{
394
_enter("%u", vnode->lock_state);
395
396
if (list_empty(&vnode->granted_locks) &&
397
(vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
398
vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
399
cancel_delayed_work(&vnode->lock_work);
400
401
afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
402
trace_afs_flock_ev(vnode, NULL, afs_flock_defer_unlock, 0);
403
queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
404
}
405
}
406
407
/*
408
* Check that our view of the file metadata is up to date and check to see
409
* whether we think that we have a locking permit.
410
*/
411
static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
412
enum afs_flock_mode mode, afs_lock_type_t type)
413
{
414
afs_access_t access;
415
int ret;
416
417
/* Make sure we've got a callback on this file and that our view of the
418
* data version is up to date.
419
*/
420
ret = afs_validate(vnode, key);
421
if (ret < 0)
422
return ret;
423
424
/* Check the permission set to see if we're actually going to be
425
* allowed to get a lock on this file.
426
*/
427
ret = afs_check_permit(vnode, key, &access);
428
if (ret < 0)
429
return ret;
430
431
/* At a rough estimation, you need LOCK, WRITE or INSERT perm to
432
* read-lock a file and WRITE or INSERT perm to write-lock a file.
433
*
434
* We can't rely on the server to do this for us since if we want to
435
* share a read lock that we already have, we won't go the server.
436
*/
437
if (type == AFS_LOCK_READ) {
438
if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
439
return -EACCES;
440
} else {
441
if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
442
return -EACCES;
443
}
444
445
return 0;
446
}
447
448
/*
449
* request a lock on a file on the server
450
*/
451
static int afs_do_setlk(struct file *file, struct file_lock *fl)
452
{
453
struct inode *inode = file_inode(file);
454
struct afs_vnode *vnode = AFS_FS_I(inode);
455
enum afs_flock_mode mode = AFS_FS_S(inode->i_sb)->flock_mode;
456
afs_lock_type_t type;
457
struct key *key = afs_file_key(file);
458
bool partial, no_server_lock = false;
459
int ret;
460
461
if (mode == afs_flock_mode_unset)
462
mode = afs_flock_mode_openafs;
463
464
_enter("{%llx:%llu},%llu-%llu,%u,%u",
465
vnode->fid.vid, vnode->fid.vnode,
466
fl->fl_start, fl->fl_end, fl->c.flc_type, mode);
467
468
fl->fl_ops = &afs_lock_ops;
469
INIT_LIST_HEAD(&fl->fl_u.afs.link);
470
fl->fl_u.afs.state = AFS_LOCK_PENDING;
471
472
partial = (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX);
473
type = lock_is_read(fl) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
474
if (mode == afs_flock_mode_write && partial)
475
type = AFS_LOCK_WRITE;
476
477
ret = afs_do_setlk_check(vnode, key, mode, type);
478
if (ret < 0)
479
return ret;
480
481
trace_afs_flock_op(vnode, fl, afs_flock_op_set_lock);
482
483
/* AFS3 protocol only supports full-file locks and doesn't provide any
484
* method of upgrade/downgrade, so we need to emulate for partial-file
485
* locks.
486
*
487
* The OpenAFS client only gets a server lock for a full-file lock and
488
* keeps partial-file locks local. Allow this behaviour to be emulated
489
* (as the default).
490
*/
491
if (mode == afs_flock_mode_local ||
492
(partial && mode == afs_flock_mode_openafs)) {
493
no_server_lock = true;
494
goto skip_server_lock;
495
}
496
497
spin_lock(&vnode->lock);
498
list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
499
500
ret = -ENOENT;
501
if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
502
goto error_unlock;
503
504
/* If we've already got a lock on the server then try to move to having
505
* the VFS grant the requested lock. Note that this means that other
506
* clients may get starved out.
507
*/
508
_debug("try %u", vnode->lock_state);
509
if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
510
if (type == AFS_LOCK_READ) {
511
_debug("instant readlock");
512
list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
513
fl->fl_u.afs.state = AFS_LOCK_GRANTED;
514
goto vnode_is_locked_u;
515
}
516
517
if (vnode->lock_type == AFS_LOCK_WRITE) {
518
_debug("instant writelock");
519
list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
520
fl->fl_u.afs.state = AFS_LOCK_GRANTED;
521
goto vnode_is_locked_u;
522
}
523
}
524
525
if (vnode->lock_state == AFS_VNODE_LOCK_NONE &&
526
!(fl->c.flc_flags & FL_SLEEP)) {
527
ret = -EAGAIN;
528
if (type == AFS_LOCK_READ) {
529
if (vnode->status.lock_count == -1)
530
goto lock_is_contended; /* Write locked */
531
} else {
532
if (vnode->status.lock_count != 0)
533
goto lock_is_contended; /* Locked */
534
}
535
}
536
537
if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
538
goto need_to_wait;
539
540
try_to_lock:
541
/* We don't have a lock on this vnode and we aren't currently waiting
542
* for one either, so ask the server for a lock.
543
*
544
* Note that we need to be careful if we get interrupted by a signal
545
* after dispatching the request as we may still get the lock, even
546
* though we don't wait for the reply (it's not too bad a problem - the
547
* lock will expire in 5 mins anyway).
548
*/
549
trace_afs_flock_ev(vnode, fl, afs_flock_try_to_lock, 0);
550
vnode->lock_key = key_get(key);
551
vnode->lock_type = type;
552
afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
553
spin_unlock(&vnode->lock);
554
555
ret = afs_set_lock(vnode, key, type); /* RPC */
556
557
spin_lock(&vnode->lock);
558
switch (ret) {
559
case -EKEYREJECTED:
560
case -EKEYEXPIRED:
561
case -EKEYREVOKED:
562
case -EPERM:
563
case -EACCES:
564
fl->fl_u.afs.state = ret;
565
trace_afs_flock_ev(vnode, fl, afs_flock_fail_perm, ret);
566
list_del_init(&fl->fl_u.afs.link);
567
afs_next_locker(vnode, ret);
568
goto error_unlock;
569
570
case -ENOENT:
571
fl->fl_u.afs.state = ret;
572
trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
573
list_del_init(&fl->fl_u.afs.link);
574
afs_kill_lockers_enoent(vnode);
575
goto error_unlock;
576
577
default:
578
fl->fl_u.afs.state = ret;
579
trace_afs_flock_ev(vnode, fl, afs_flock_fail_other, ret);
580
list_del_init(&fl->fl_u.afs.link);
581
afs_next_locker(vnode, 0);
582
goto error_unlock;
583
584
case -EWOULDBLOCK:
585
/* The server doesn't have a lock-waiting queue, so the client
586
* will have to retry. The server will break the outstanding
587
* callbacks on a file when a lock is released.
588
*/
589
ASSERT(list_empty(&vnode->granted_locks));
590
ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
591
goto lock_is_contended;
592
593
case 0:
594
afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
595
trace_afs_flock_ev(vnode, fl, afs_flock_acquired, type);
596
afs_grant_locks(vnode);
597
goto vnode_is_locked_u;
598
}
599
600
vnode_is_locked_u:
601
spin_unlock(&vnode->lock);
602
vnode_is_locked:
603
/* the lock has been granted by the server... */
604
ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED);
605
606
skip_server_lock:
607
/* ... but the VFS still needs to distribute access on this client. */
608
trace_afs_flock_ev(vnode, fl, afs_flock_vfs_locking, 0);
609
ret = locks_lock_file_wait(file, fl);
610
trace_afs_flock_ev(vnode, fl, afs_flock_vfs_lock, ret);
611
if (ret < 0)
612
goto vfs_rejected_lock;
613
614
/* Again, make sure we've got a callback on this file and, again, make
615
* sure that our view of the data version is up to date (we ignore
616
* errors incurred here and deal with the consequences elsewhere).
617
*/
618
afs_validate(vnode, key);
619
_leave(" = 0");
620
return 0;
621
622
lock_is_contended:
623
if (!(fl->c.flc_flags & FL_SLEEP)) {
624
list_del_init(&fl->fl_u.afs.link);
625
afs_next_locker(vnode, 0);
626
ret = -EAGAIN;
627
goto error_unlock;
628
}
629
630
afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
631
trace_afs_flock_ev(vnode, fl, afs_flock_would_block, ret);
632
queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
633
634
need_to_wait:
635
/* We're going to have to wait. Either this client doesn't have a lock
636
* on the server yet and we need to wait for a callback to occur, or
637
* the client does have a lock on the server, but it's shared and we
638
* need an exclusive lock.
639
*/
640
spin_unlock(&vnode->lock);
641
642
trace_afs_flock_ev(vnode, fl, afs_flock_waiting, 0);
643
ret = wait_event_interruptible(fl->c.flc_wait,
644
fl->fl_u.afs.state != AFS_LOCK_PENDING);
645
trace_afs_flock_ev(vnode, fl, afs_flock_waited, ret);
646
647
if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) {
648
spin_lock(&vnode->lock);
649
650
switch (fl->fl_u.afs.state) {
651
case AFS_LOCK_YOUR_TRY:
652
fl->fl_u.afs.state = AFS_LOCK_PENDING;
653
goto try_to_lock;
654
case AFS_LOCK_PENDING:
655
if (ret > 0) {
656
/* We need to retry the lock. We may not be
657
* notified by the server if it just expired
658
* rather than being released.
659
*/
660
ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
661
afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
662
fl->fl_u.afs.state = AFS_LOCK_PENDING;
663
goto try_to_lock;
664
}
665
goto error_unlock;
666
case AFS_LOCK_GRANTED:
667
default:
668
break;
669
}
670
671
spin_unlock(&vnode->lock);
672
}
673
674
if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
675
goto vnode_is_locked;
676
ret = fl->fl_u.afs.state;
677
goto error;
678
679
vfs_rejected_lock:
680
/* The VFS rejected the lock we just obtained, so we have to discard
681
* what we just got. We defer this to the lock manager work item to
682
* deal with.
683
*/
684
_debug("vfs refused %d", ret);
685
if (no_server_lock)
686
goto error;
687
spin_lock(&vnode->lock);
688
list_del_init(&fl->fl_u.afs.link);
689
afs_defer_unlock(vnode);
690
691
error_unlock:
692
spin_unlock(&vnode->lock);
693
error:
694
_leave(" = %d", ret);
695
return ret;
696
}
697
698
/*
699
* unlock on a file on the server
700
*/
701
static int afs_do_unlk(struct file *file, struct file_lock *fl)
702
{
703
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
704
int ret;
705
706
_enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode,
707
fl->c.flc_type);
708
709
trace_afs_flock_op(vnode, fl, afs_flock_op_unlock);
710
711
/* Flush all pending writes before doing anything with locks. */
712
vfs_fsync(file, 0);
713
714
ret = locks_lock_file_wait(file, fl);
715
_leave(" = %d [%u]", ret, vnode->lock_state);
716
return ret;
717
}
718
719
/*
720
* return information about a lock we currently hold, if indeed we hold one
721
*/
722
static int afs_do_getlk(struct file *file, struct file_lock *fl)
723
{
724
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
725
struct key *key = afs_file_key(file);
726
int ret, lock_count;
727
728
_enter("");
729
730
if (vnode->lock_state == AFS_VNODE_LOCK_DELETED)
731
return -ENOENT;
732
733
fl->c.flc_type = F_UNLCK;
734
735
/* check local lock records first */
736
posix_test_lock(file, fl);
737
if (lock_is_unlock(fl)) {
738
/* no local locks; consult the server */
739
ret = afs_fetch_status(vnode, key, false, NULL);
740
if (ret < 0)
741
goto error;
742
743
lock_count = READ_ONCE(vnode->status.lock_count);
744
if (lock_count != 0) {
745
if (lock_count > 0)
746
fl->c.flc_type = F_RDLCK;
747
else
748
fl->c.flc_type = F_WRLCK;
749
fl->fl_start = 0;
750
fl->fl_end = OFFSET_MAX;
751
fl->c.flc_pid = 0;
752
}
753
}
754
755
ret = 0;
756
error:
757
_leave(" = %d [%hd]", ret, fl->c.flc_type);
758
return ret;
759
}
760
761
/*
762
* manage POSIX locks on a file
763
*/
764
int afs_lock(struct file *file, int cmd, struct file_lock *fl)
765
{
766
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
767
enum afs_flock_operation op;
768
int ret;
769
770
_enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
771
vnode->fid.vid, vnode->fid.vnode, cmd,
772
fl->c.flc_type, fl->c.flc_flags,
773
(long long) fl->fl_start, (long long) fl->fl_end);
774
775
if (IS_GETLK(cmd))
776
return afs_do_getlk(file, fl);
777
778
fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
779
trace_afs_flock_op(vnode, fl, afs_flock_op_lock);
780
781
if (lock_is_unlock(fl))
782
ret = afs_do_unlk(file, fl);
783
else
784
ret = afs_do_setlk(file, fl);
785
786
switch (ret) {
787
case 0: op = afs_flock_op_return_ok; break;
788
case -EAGAIN: op = afs_flock_op_return_eagain; break;
789
case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
790
default: op = afs_flock_op_return_error; break;
791
}
792
trace_afs_flock_op(vnode, fl, op);
793
return ret;
794
}
795
796
/*
797
* manage FLOCK locks on a file
798
*/
799
int afs_flock(struct file *file, int cmd, struct file_lock *fl)
800
{
801
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
802
enum afs_flock_operation op;
803
int ret;
804
805
_enter("{%llx:%llu},%d,{t=%x,fl=%x}",
806
vnode->fid.vid, vnode->fid.vnode, cmd,
807
fl->c.flc_type, fl->c.flc_flags);
808
809
/*
810
* No BSD flocks over NFS allowed.
811
* Note: we could try to fake a POSIX lock request here by
812
* using ((u32) filp | 0x80000000) or some such as the pid.
813
* Not sure whether that would be unique, though, or whether
814
* that would break in other places.
815
*/
816
if (!(fl->c.flc_flags & FL_FLOCK))
817
return -ENOLCK;
818
819
fl->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
820
trace_afs_flock_op(vnode, fl, afs_flock_op_flock);
821
822
/* we're simulating flock() locks using posix locks on the server */
823
if (lock_is_unlock(fl))
824
ret = afs_do_unlk(file, fl);
825
else
826
ret = afs_do_setlk(file, fl);
827
828
switch (ret) {
829
case 0: op = afs_flock_op_return_ok; break;
830
case -EAGAIN: op = afs_flock_op_return_eagain; break;
831
case -EDEADLK: op = afs_flock_op_return_edeadlk; break;
832
default: op = afs_flock_op_return_error; break;
833
}
834
trace_afs_flock_op(vnode, fl, op);
835
return ret;
836
}
837
838
/*
839
* the POSIX lock management core VFS code copies the lock record and adds the
840
* copy into its own list, so we need to add that copy to the vnode's lock
841
* queue in the same place as the original (which will be deleted shortly
842
* after)
843
*/
844
static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
845
{
846
struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file));
847
848
_enter("");
849
850
new->fl_u.afs.debug_id = atomic_inc_return(&afs_file_lock_debug_id);
851
852
spin_lock(&vnode->lock);
853
trace_afs_flock_op(vnode, new, afs_flock_op_copy_lock);
854
list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
855
spin_unlock(&vnode->lock);
856
}
857
858
/*
859
* need to remove this lock from the vnode queue when it's removed from the
860
* VFS's list
861
*/
862
static void afs_fl_release_private(struct file_lock *fl)
863
{
864
struct afs_vnode *vnode = AFS_FS_I(file_inode(fl->c.flc_file));
865
866
_enter("");
867
868
spin_lock(&vnode->lock);
869
870
trace_afs_flock_op(vnode, fl, afs_flock_op_release_lock);
871
list_del_init(&fl->fl_u.afs.link);
872
if (list_empty(&vnode->granted_locks))
873
afs_defer_unlock(vnode);
874
875
_debug("state %u for %p", vnode->lock_state, vnode);
876
spin_unlock(&vnode->lock);
877
}
878
879