Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/afs/cell.c
49472 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/* AFS cell and server record management
3
*
4
* Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
5
* Written by David Howells ([email protected])
6
*/
7
8
#include <linux/slab.h>
9
#include <linux/key.h>
10
#include <linux/ctype.h>
11
#include <linux/dns_resolver.h>
12
#include <linux/sched.h>
13
#include <linux/inet.h>
14
#include <linux/namei.h>
15
#include <keys/rxrpc-type.h>
16
#include "internal.h"
17
18
static unsigned __read_mostly afs_cell_gc_delay = 10;
19
static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
20
static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
21
static atomic_t cell_debug_id;
22
23
static void afs_cell_timer(struct timer_list *timer);
24
static void afs_destroy_cell_work(struct work_struct *work);
25
static void afs_manage_cell_work(struct work_struct *work);
26
27
static void afs_dec_cells_outstanding(struct afs_net *net)
28
{
29
if (atomic_dec_and_test(&net->cells_outstanding))
30
wake_up_var(&net->cells_outstanding);
31
}
32
33
static void afs_set_cell_state(struct afs_cell *cell, enum afs_cell_state state)
34
{
35
smp_store_release(&cell->state, state); /* Commit cell changes before state */
36
smp_wmb(); /* Set cell state before task state */
37
wake_up_var(&cell->state);
38
}
39
40
/*
41
* Look up and get an activation reference on a cell record. The caller must
42
* hold net->cells_lock at least read-locked.
43
*/
44
static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
45
const char *name, unsigned int namesz,
46
enum afs_cell_trace reason)
47
{
48
struct afs_cell *cell = NULL;
49
struct rb_node *p;
50
int n;
51
52
_enter("%*.*s", namesz, namesz, name);
53
54
if (name && namesz == 0)
55
return ERR_PTR(-EINVAL);
56
if (namesz > AFS_MAXCELLNAME)
57
return ERR_PTR(-ENAMETOOLONG);
58
59
if (!name) {
60
cell = rcu_dereference_protected(net->ws_cell,
61
lockdep_is_held(&net->cells_lock));
62
if (!cell)
63
return ERR_PTR(-EDESTADDRREQ);
64
goto found;
65
}
66
67
p = net->cells.rb_node;
68
while (p) {
69
cell = rb_entry(p, struct afs_cell, net_node);
70
71
n = strncasecmp(cell->name, name,
72
min_t(size_t, cell->name_len, namesz));
73
if (n == 0)
74
n = cell->name_len - namesz;
75
if (n < 0)
76
p = p->rb_left;
77
else if (n > 0)
78
p = p->rb_right;
79
else
80
goto found;
81
}
82
83
return ERR_PTR(-ENOENT);
84
85
found:
86
return afs_use_cell(cell, reason);
87
}
88
89
/*
90
* Look up and get an activation reference on a cell record.
91
*/
92
struct afs_cell *afs_find_cell(struct afs_net *net,
93
const char *name, unsigned int namesz,
94
enum afs_cell_trace reason)
95
{
96
struct afs_cell *cell;
97
98
down_read(&net->cells_lock);
99
cell = afs_find_cell_locked(net, name, namesz, reason);
100
up_read(&net->cells_lock);
101
return cell;
102
}
103
104
/*
105
* Set up a cell record and fill in its name, VL server address list and
106
* allocate an anonymous key
107
*/
108
static struct afs_cell *afs_alloc_cell(struct afs_net *net,
109
const char *name, unsigned int namelen,
110
const char *addresses)
111
{
112
struct afs_vlserver_list *vllist = NULL;
113
struct afs_cell *cell;
114
int i, ret;
115
116
ASSERT(name);
117
if (namelen == 0)
118
return ERR_PTR(-EINVAL);
119
if (namelen > AFS_MAXCELLNAME) {
120
_leave(" = -ENAMETOOLONG");
121
return ERR_PTR(-ENAMETOOLONG);
122
}
123
124
/* Prohibit cell names that contain unprintable chars, '/' and '@' or
125
* that begin with a dot. This also precludes "@cell".
126
*/
127
if (name[0] == '.')
128
return ERR_PTR(-EINVAL);
129
for (i = 0; i < namelen; i++) {
130
char ch = name[i];
131
if (!isprint(ch) || ch == '/' || ch == '@')
132
return ERR_PTR(-EINVAL);
133
}
134
135
_enter("%*.*s,%s", namelen, namelen, name, addresses);
136
137
cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
138
if (!cell) {
139
_leave(" = -ENOMEM");
140
return ERR_PTR(-ENOMEM);
141
}
142
143
/* Allocate the cell name and the key name in one go. */
144
cell->name = kmalloc(1 + namelen + 1 +
145
4 + namelen + 1, GFP_KERNEL);
146
if (!cell->name) {
147
kfree(cell);
148
return ERR_PTR(-ENOMEM);
149
}
150
151
cell->name[0] = '.';
152
cell->name++;
153
cell->name_len = namelen;
154
for (i = 0; i < namelen; i++)
155
cell->name[i] = tolower(name[i]);
156
cell->name[i++] = 0;
157
158
cell->key_desc = cell->name + i;
159
memcpy(cell->key_desc, "afs@", 4);
160
memcpy(cell->key_desc + 4, cell->name, cell->name_len + 1);
161
162
cell->net = net;
163
refcount_set(&cell->ref, 1);
164
atomic_set(&cell->active, 0);
165
INIT_WORK(&cell->destroyer, afs_destroy_cell_work);
166
INIT_WORK(&cell->manager, afs_manage_cell_work);
167
timer_setup(&cell->management_timer, afs_cell_timer, 0);
168
init_rwsem(&cell->vs_lock);
169
cell->volumes = RB_ROOT;
170
INIT_HLIST_HEAD(&cell->proc_volumes);
171
seqlock_init(&cell->volume_lock);
172
cell->fs_servers = RB_ROOT;
173
init_rwsem(&cell->fs_lock);
174
rwlock_init(&cell->vl_servers_lock);
175
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
176
177
/* Provide a VL server list, filling it in if we were given a list of
178
* addresses to use.
179
*/
180
if (addresses) {
181
vllist = afs_parse_text_addrs(net,
182
addresses, strlen(addresses), ':',
183
VL_SERVICE, AFS_VL_PORT);
184
if (IS_ERR(vllist)) {
185
ret = PTR_ERR(vllist);
186
vllist = NULL;
187
goto parse_failed;
188
}
189
190
vllist->source = DNS_RECORD_FROM_CONFIG;
191
vllist->status = DNS_LOOKUP_NOT_DONE;
192
cell->dns_expiry = TIME64_MAX;
193
} else {
194
ret = -ENOMEM;
195
vllist = afs_alloc_vlserver_list(0);
196
if (!vllist)
197
goto error;
198
vllist->source = DNS_RECORD_UNAVAILABLE;
199
vllist->status = DNS_LOOKUP_NOT_DONE;
200
cell->dns_expiry = ktime_get_real_seconds();
201
}
202
203
rcu_assign_pointer(cell->vl_servers, vllist);
204
205
cell->dns_source = vllist->source;
206
cell->dns_status = vllist->status;
207
smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
208
atomic_inc(&net->cells_outstanding);
209
ret = idr_alloc_cyclic(&net->cells_dyn_ino, cell,
210
2, INT_MAX / 2, GFP_KERNEL);
211
if (ret < 0)
212
goto error;
213
cell->dynroot_ino = ret;
214
cell->debug_id = atomic_inc_return(&cell_debug_id);
215
216
trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
217
218
_leave(" = %p", cell);
219
return cell;
220
221
parse_failed:
222
if (ret == -EINVAL)
223
printk(KERN_ERR "kAFS: bad VL server IP address\n");
224
error:
225
afs_put_vlserverlist(cell->net, vllist);
226
kfree(cell->name - 1);
227
kfree(cell);
228
_leave(" = %d", ret);
229
return ERR_PTR(ret);
230
}
231
232
/*
233
* afs_lookup_cell - Look up or create a cell record.
234
* @net: The network namespace
235
* @name: The name of the cell.
236
* @namesz: The strlen of the cell name.
237
* @vllist: A colon/comma separated list of numeric IP addresses or NULL.
238
* @reason: The reason we're doing the lookup
239
* @trace: The reason to be logged if the lookup is successful.
240
*
241
* Look up a cell record by name and query the DNS for VL server addresses if
242
* needed. Note that that actual DNS query is punted off to the manager thread
243
* so that this function can return immediately if interrupted whilst allowing
244
* cell records to be shared even if not yet fully constructed.
245
*/
246
struct afs_cell *afs_lookup_cell(struct afs_net *net,
247
const char *name, unsigned int namesz,
248
const char *vllist,
249
enum afs_lookup_cell_for reason,
250
enum afs_cell_trace trace)
251
{
252
struct afs_cell *cell, *candidate, *cursor;
253
struct rb_node *parent, **pp;
254
enum afs_cell_state state;
255
int ret, n;
256
257
_enter("%s,%s,%u", name, vllist, reason);
258
259
if (reason != AFS_LOOKUP_CELL_PRELOAD) {
260
cell = afs_find_cell(net, name, namesz, trace);
261
if (!IS_ERR(cell)) {
262
if (reason == AFS_LOOKUP_CELL_DYNROOT)
263
goto no_wait;
264
if (cell->state == AFS_CELL_SETTING_UP ||
265
cell->state == AFS_CELL_UNLOOKED)
266
goto lookup_cell;
267
goto wait_for_cell;
268
}
269
}
270
271
/* Assume we're probably going to create a cell and preallocate and
272
* mostly set up a candidate record. We can then use this to stash the
273
* name, the net namespace and VL server addresses.
274
*
275
* We also want to do this before we hold any locks as it may involve
276
* upcalling to userspace to make DNS queries.
277
*/
278
candidate = afs_alloc_cell(net, name, namesz, vllist);
279
if (IS_ERR(candidate)) {
280
_leave(" = %ld", PTR_ERR(candidate));
281
return candidate;
282
}
283
284
/* Find the insertion point and check to see if someone else added a
285
* cell whilst we were allocating.
286
*/
287
down_write(&net->cells_lock);
288
289
pp = &net->cells.rb_node;
290
parent = NULL;
291
while (*pp) {
292
parent = *pp;
293
cursor = rb_entry(parent, struct afs_cell, net_node);
294
295
n = strncasecmp(cursor->name, name,
296
min_t(size_t, cursor->name_len, namesz));
297
if (n == 0)
298
n = cursor->name_len - namesz;
299
if (n < 0)
300
pp = &(*pp)->rb_left;
301
else if (n > 0)
302
pp = &(*pp)->rb_right;
303
else
304
goto cell_already_exists;
305
}
306
307
cell = candidate;
308
candidate = NULL;
309
afs_use_cell(cell, trace);
310
rb_link_node_rcu(&cell->net_node, parent, pp);
311
rb_insert_color(&cell->net_node, &net->cells);
312
up_write(&net->cells_lock);
313
314
lookup_cell:
315
if (reason != AFS_LOOKUP_CELL_PRELOAD &&
316
reason != AFS_LOOKUP_CELL_ROOTCELL) {
317
set_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags);
318
afs_queue_cell(cell, afs_cell_trace_queue_new);
319
}
320
321
wait_for_cell:
322
state = smp_load_acquire(&cell->state); /* vs error */
323
switch (state) {
324
case AFS_CELL_ACTIVE:
325
case AFS_CELL_DEAD:
326
break;
327
case AFS_CELL_UNLOOKED:
328
default:
329
if (reason == AFS_LOOKUP_CELL_PRELOAD ||
330
reason == AFS_LOOKUP_CELL_ROOTCELL)
331
break;
332
_debug("wait_for_cell");
333
afs_see_cell(cell, afs_cell_trace_wait);
334
wait_var_event(&cell->state,
335
({
336
state = smp_load_acquire(&cell->state); /* vs error */
337
state == AFS_CELL_ACTIVE || state == AFS_CELL_DEAD;
338
}));
339
_debug("waited_for_cell %d %d", cell->state, cell->error);
340
}
341
342
no_wait:
343
/* Check the state obtained from the wait check. */
344
state = smp_load_acquire(&cell->state); /* vs error */
345
if (state == AFS_CELL_DEAD) {
346
ret = cell->error;
347
goto error;
348
}
349
if (state == AFS_CELL_ACTIVE) {
350
switch (cell->dns_status) {
351
case DNS_LOOKUP_NOT_DONE:
352
if (cell->dns_source == DNS_RECORD_FROM_CONFIG) {
353
ret = 0;
354
break;
355
}
356
fallthrough;
357
default:
358
ret = -EIO;
359
goto error;
360
case DNS_LOOKUP_GOOD:
361
case DNS_LOOKUP_GOOD_WITH_BAD:
362
ret = 0;
363
break;
364
case DNS_LOOKUP_GOT_NOT_FOUND:
365
ret = -ENOENT;
366
goto error;
367
case DNS_LOOKUP_BAD:
368
ret = -EREMOTEIO;
369
goto error;
370
case DNS_LOOKUP_GOT_LOCAL_FAILURE:
371
case DNS_LOOKUP_GOT_TEMP_FAILURE:
372
case DNS_LOOKUP_GOT_NS_FAILURE:
373
ret = -EDESTADDRREQ;
374
goto error;
375
}
376
}
377
378
_leave(" = %p [cell]", cell);
379
return cell;
380
381
cell_already_exists:
382
_debug("cell exists");
383
cell = cursor;
384
if (reason == AFS_LOOKUP_CELL_PRELOAD) {
385
ret = -EEXIST;
386
} else {
387
afs_use_cell(cursor, trace);
388
ret = 0;
389
}
390
up_write(&net->cells_lock);
391
if (candidate)
392
afs_put_cell(candidate, afs_cell_trace_put_candidate);
393
if (ret == 0)
394
goto wait_for_cell;
395
goto error_noput;
396
error:
397
afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_error);
398
error_noput:
399
_leave(" = %d [error]", ret);
400
return ERR_PTR(ret);
401
}
402
403
/*
404
* set the root cell information
405
* - can be called with a module parameter string
406
* - can be called from a write to /proc/fs/afs/rootcell
407
*/
408
int afs_cell_init(struct afs_net *net, const char *rootcell)
409
{
410
struct afs_cell *old_root, *new_root;
411
const char *cp, *vllist;
412
size_t len;
413
414
_enter("");
415
416
if (!rootcell) {
417
/* module is loaded with no parameters, or built statically.
418
* - in the future we might initialize cell DB here.
419
*/
420
_leave(" = 0 [no root]");
421
return 0;
422
}
423
424
cp = strchr(rootcell, ':');
425
if (!cp) {
426
_debug("kAFS: no VL server IP addresses specified");
427
vllist = NULL;
428
len = strlen(rootcell);
429
} else {
430
vllist = cp + 1;
431
len = cp - rootcell;
432
}
433
434
if (len == 0 || !rootcell[0] || rootcell[0] == '.' || rootcell[len - 1] == '.')
435
return -EINVAL;
436
if (memchr(rootcell, '/', len))
437
return -EINVAL;
438
cp = strstr(rootcell, "..");
439
if (cp && cp < rootcell + len)
440
return -EINVAL;
441
442
/* allocate a cell record for the root/workstation cell */
443
new_root = afs_lookup_cell(net, rootcell, len, vllist,
444
AFS_LOOKUP_CELL_ROOTCELL,
445
afs_cell_trace_use_lookup_ws);
446
if (IS_ERR(new_root)) {
447
_leave(" = %ld", PTR_ERR(new_root));
448
return PTR_ERR(new_root);
449
}
450
451
if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
452
afs_use_cell(new_root, afs_cell_trace_use_pin);
453
454
/* install the new cell */
455
down_write(&net->cells_lock);
456
old_root = rcu_replace_pointer(net->ws_cell, new_root,
457
lockdep_is_held(&net->cells_lock));
458
up_write(&net->cells_lock);
459
460
afs_unuse_cell(old_root, afs_cell_trace_unuse_ws);
461
_leave(" = 0");
462
return 0;
463
}
464
465
/*
466
* Update a cell's VL server address list from the DNS.
467
*/
468
static int afs_update_cell(struct afs_cell *cell)
469
{
470
struct afs_vlserver_list *vllist, *old = NULL, *p;
471
unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
472
unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
473
time64_t now, expiry = 0;
474
int ret = 0;
475
476
_enter("%s", cell->name);
477
478
vllist = afs_dns_query(cell, &expiry);
479
if (IS_ERR(vllist)) {
480
ret = PTR_ERR(vllist);
481
482
_debug("%s: fail %d", cell->name, ret);
483
if (ret == -ENOMEM)
484
goto out_wake;
485
486
vllist = afs_alloc_vlserver_list(0);
487
if (!vllist) {
488
if (ret >= 0)
489
ret = -ENOMEM;
490
goto out_wake;
491
}
492
493
switch (ret) {
494
case -ENODATA:
495
case -EDESTADDRREQ:
496
vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
497
break;
498
case -EAGAIN:
499
case -ECONNREFUSED:
500
vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
501
break;
502
default:
503
vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
504
break;
505
}
506
}
507
508
_debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
509
cell->dns_status = vllist->status;
510
511
now = ktime_get_real_seconds();
512
if (min_ttl > max_ttl)
513
max_ttl = min_ttl;
514
if (expiry < now + min_ttl)
515
expiry = now + min_ttl;
516
else if (expiry > now + max_ttl)
517
expiry = now + max_ttl;
518
519
_debug("%s: status %d", cell->name, vllist->status);
520
if (vllist->source == DNS_RECORD_UNAVAILABLE) {
521
switch (vllist->status) {
522
case DNS_LOOKUP_GOT_NOT_FOUND:
523
/* The DNS said that the cell does not exist or there
524
* weren't any addresses to be had.
525
*/
526
cell->dns_expiry = expiry;
527
break;
528
529
case DNS_LOOKUP_BAD:
530
case DNS_LOOKUP_GOT_LOCAL_FAILURE:
531
case DNS_LOOKUP_GOT_TEMP_FAILURE:
532
case DNS_LOOKUP_GOT_NS_FAILURE:
533
default:
534
cell->dns_expiry = now + 10;
535
break;
536
}
537
} else {
538
cell->dns_expiry = expiry;
539
}
540
541
/* Replace the VL server list if the new record has servers or the old
542
* record doesn't.
543
*/
544
write_lock(&cell->vl_servers_lock);
545
p = rcu_dereference_protected(cell->vl_servers, true);
546
if (vllist->nr_servers > 0 || p->nr_servers == 0) {
547
rcu_assign_pointer(cell->vl_servers, vllist);
548
cell->dns_source = vllist->source;
549
old = p;
550
}
551
write_unlock(&cell->vl_servers_lock);
552
afs_put_vlserverlist(cell->net, old);
553
554
out_wake:
555
smp_store_release(&cell->dns_lookup_count,
556
cell->dns_lookup_count + 1); /* vs source/status */
557
wake_up_var(&cell->dns_lookup_count);
558
_leave(" = %d", ret);
559
return ret;
560
}
561
562
/*
563
* Destroy a cell record
564
*/
565
static void afs_cell_destroy(struct rcu_head *rcu)
566
{
567
struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
568
struct afs_net *net = cell->net;
569
int r;
570
571
_enter("%p{%s}", cell, cell->name);
572
573
r = refcount_read(&cell->ref);
574
ASSERTCMP(r, ==, 0);
575
trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
576
577
afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
578
afs_unuse_cell(cell->alias_of, afs_cell_trace_unuse_alias);
579
key_put(cell->anonymous_key);
580
idr_remove(&net->cells_dyn_ino, cell->dynroot_ino);
581
kfree(cell->name - 1);
582
kfree(cell);
583
584
afs_dec_cells_outstanding(net);
585
_leave(" [destroyed]");
586
}
587
588
static void afs_destroy_cell_work(struct work_struct *work)
589
{
590
struct afs_cell *cell = container_of(work, struct afs_cell, destroyer);
591
592
afs_see_cell(cell, afs_cell_trace_destroy);
593
timer_delete_sync(&cell->management_timer);
594
cancel_work_sync(&cell->manager);
595
call_rcu(&cell->rcu, afs_cell_destroy);
596
}
597
598
/*
599
* Get a reference on a cell record.
600
*/
601
struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason)
602
{
603
int r;
604
605
__refcount_inc(&cell->ref, &r);
606
trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason);
607
return cell;
608
}
609
610
/*
611
* Drop a reference on a cell record.
612
*/
613
void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
614
{
615
if (cell) {
616
unsigned int debug_id = cell->debug_id;
617
unsigned int a;
618
bool zero;
619
int r;
620
621
a = atomic_read(&cell->active);
622
zero = __refcount_dec_and_test(&cell->ref, &r);
623
trace_afs_cell(debug_id, r - 1, a, reason);
624
if (zero) {
625
a = atomic_read(&cell->active);
626
WARN(a != 0, "Cell active count %u > 0\n", a);
627
WARN_ON(!queue_work(afs_wq, &cell->destroyer));
628
}
629
}
630
}
631
632
/*
633
* Note a cell becoming more active.
634
*/
635
struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
636
{
637
int r, a;
638
639
__refcount_inc(&cell->ref, &r);
640
a = atomic_inc_return(&cell->active);
641
trace_afs_cell(cell->debug_id, r + 1, a, reason);
642
return cell;
643
}
644
645
/*
646
* Record a cell becoming less active. When the active counter reaches 1, it
647
* is scheduled for destruction, but may get reactivated.
648
*/
649
void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason)
650
{
651
unsigned int debug_id;
652
time64_t now, expire_delay;
653
bool zero;
654
int r, a;
655
656
if (!cell)
657
return;
658
659
_enter("%s", cell->name);
660
661
now = ktime_get_real_seconds();
662
cell->last_inactive = now;
663
expire_delay = 0;
664
if (cell->vl_servers->nr_servers)
665
expire_delay = afs_cell_gc_delay;
666
667
debug_id = cell->debug_id;
668
a = atomic_dec_return(&cell->active);
669
if (!a)
670
/* 'cell' may now be garbage collected. */
671
afs_set_cell_timer(cell, expire_delay);
672
673
zero = __refcount_dec_and_test(&cell->ref, &r);
674
trace_afs_cell(debug_id, r - 1, a, reason);
675
if (zero)
676
WARN_ON(!queue_work(afs_wq, &cell->destroyer));
677
}
678
679
/*
680
* Note that a cell has been seen.
681
*/
682
void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
683
{
684
int r, a;
685
686
r = refcount_read(&cell->ref);
687
a = atomic_read(&cell->active);
688
trace_afs_cell(cell->debug_id, r, a, reason);
689
}
690
691
/*
692
* Queue a cell for management, giving the workqueue a ref to hold.
693
*/
694
void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
695
{
696
queue_work(afs_wq, &cell->manager);
697
}
698
699
/*
700
* Cell-specific management timer.
701
*/
702
static void afs_cell_timer(struct timer_list *timer)
703
{
704
struct afs_cell *cell = container_of(timer, struct afs_cell, management_timer);
705
706
afs_see_cell(cell, afs_cell_trace_see_mgmt_timer);
707
if (refcount_read(&cell->ref) > 0 && cell->net->live)
708
queue_work(afs_wq, &cell->manager);
709
}
710
711
/*
712
* Set/reduce the cell timer.
713
*/
714
void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs)
715
{
716
timer_reduce(&cell->management_timer, jiffies + delay_secs * HZ);
717
}
718
719
/*
720
* Activate a cell.
721
*/
722
static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
723
{
724
struct hlist_node **p;
725
struct afs_cell *pcell;
726
int ret;
727
728
ret = afs_proc_cell_setup(cell);
729
if (ret < 0)
730
return ret;
731
732
mutex_lock(&net->proc_cells_lock);
733
for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
734
pcell = hlist_entry(*p, struct afs_cell, proc_link);
735
if (strcmp(cell->name, pcell->name) < 0)
736
break;
737
}
738
739
cell->proc_link.pprev = p;
740
cell->proc_link.next = *p;
741
rcu_assign_pointer(*p, &cell->proc_link.next);
742
if (cell->proc_link.next)
743
cell->proc_link.next->pprev = &cell->proc_link.next;
744
745
mutex_unlock(&net->proc_cells_lock);
746
return 0;
747
}
748
749
/*
750
* Deactivate a cell.
751
*/
752
static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
753
{
754
_enter("%s", cell->name);
755
756
afs_proc_cell_remove(cell);
757
758
mutex_lock(&net->proc_cells_lock);
759
if (!hlist_unhashed(&cell->proc_link))
760
hlist_del_rcu(&cell->proc_link);
761
mutex_unlock(&net->proc_cells_lock);
762
763
_leave("");
764
}
765
766
static bool afs_has_cell_expired(struct afs_cell *cell, time64_t *_next_manage)
767
{
768
const struct afs_vlserver_list *vllist;
769
time64_t expire_at = cell->last_inactive;
770
time64_t now = ktime_get_real_seconds();
771
772
if (atomic_read(&cell->active))
773
return false;
774
if (!cell->net->live)
775
return true;
776
777
vllist = rcu_dereference_protected(cell->vl_servers, true);
778
if (vllist && vllist->nr_servers > 0)
779
expire_at += afs_cell_gc_delay;
780
781
if (expire_at <= now)
782
return true;
783
if (expire_at < *_next_manage)
784
*_next_manage = expire_at;
785
return false;
786
}
787
788
/*
789
* Manage a cell record, initialising and destroying it, maintaining its DNS
790
* records.
791
*/
792
static bool afs_manage_cell(struct afs_cell *cell)
793
{
794
struct afs_net *net = cell->net;
795
time64_t next_manage = TIME64_MAX;
796
int ret;
797
798
_enter("%s", cell->name);
799
800
_debug("state %u", cell->state);
801
switch (cell->state) {
802
case AFS_CELL_SETTING_UP:
803
goto set_up_cell;
804
case AFS_CELL_UNLOOKED:
805
case AFS_CELL_ACTIVE:
806
goto cell_is_active;
807
case AFS_CELL_REMOVING:
808
WARN_ON_ONCE(1);
809
return false;
810
case AFS_CELL_DEAD:
811
return false;
812
default:
813
_debug("bad state %u", cell->state);
814
WARN_ON_ONCE(1); /* Unhandled state */
815
return false;
816
}
817
818
set_up_cell:
819
ret = afs_activate_cell(net, cell);
820
if (ret < 0) {
821
cell->error = ret;
822
goto remove_cell;
823
}
824
825
afs_set_cell_state(cell, AFS_CELL_UNLOOKED);
826
827
cell_is_active:
828
if (afs_has_cell_expired(cell, &next_manage))
829
goto remove_cell;
830
831
if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
832
ret = afs_update_cell(cell);
833
if (ret < 0)
834
cell->error = ret;
835
if (cell->state == AFS_CELL_UNLOOKED)
836
afs_set_cell_state(cell, AFS_CELL_ACTIVE);
837
}
838
839
if (next_manage < TIME64_MAX && cell->net->live) {
840
time64_t now = ktime_get_real_seconds();
841
842
if (next_manage - now <= 0)
843
afs_queue_cell(cell, afs_cell_trace_queue_again);
844
else
845
afs_set_cell_timer(cell, next_manage - now);
846
}
847
_leave(" [done %u]", cell->state);
848
return false;
849
850
remove_cell:
851
down_write(&net->cells_lock);
852
853
if (atomic_read(&cell->active)) {
854
up_write(&net->cells_lock);
855
goto cell_is_active;
856
}
857
858
/* Make sure that the expiring server records are going to see the fact
859
* that the cell is caput.
860
*/
861
afs_set_cell_state(cell, AFS_CELL_REMOVING);
862
863
afs_deactivate_cell(net, cell);
864
afs_purge_servers(cell);
865
866
rb_erase(&cell->net_node, &net->cells);
867
afs_see_cell(cell, afs_cell_trace_unuse_delete);
868
up_write(&net->cells_lock);
869
870
/* The root volume is pinning the cell */
871
afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root);
872
cell->root_volume = NULL;
873
874
afs_set_cell_state(cell, AFS_CELL_DEAD);
875
return true;
876
}
877
878
static void afs_manage_cell_work(struct work_struct *work)
879
{
880
struct afs_cell *cell = container_of(work, struct afs_cell, manager);
881
bool final_put;
882
883
afs_see_cell(cell, afs_cell_trace_manage);
884
final_put = afs_manage_cell(cell);
885
afs_see_cell(cell, afs_cell_trace_managed);
886
if (final_put)
887
afs_put_cell(cell, afs_cell_trace_put_final);
888
}
889
890
/*
891
* Purge in-memory cell database.
892
*/
893
void afs_cell_purge(struct afs_net *net)
894
{
895
struct afs_cell *ws;
896
struct rb_node *cursor;
897
898
_enter("");
899
900
down_write(&net->cells_lock);
901
ws = rcu_replace_pointer(net->ws_cell, NULL,
902
lockdep_is_held(&net->cells_lock));
903
up_write(&net->cells_lock);
904
afs_unuse_cell(ws, afs_cell_trace_unuse_ws);
905
906
_debug("kick cells");
907
down_read(&net->cells_lock);
908
for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
909
struct afs_cell *cell = rb_entry(cursor, struct afs_cell, net_node);
910
911
afs_see_cell(cell, afs_cell_trace_purge);
912
913
if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
914
afs_unuse_cell(cell, afs_cell_trace_unuse_pin);
915
916
afs_queue_cell(cell, afs_cell_trace_queue_purge);
917
}
918
up_read(&net->cells_lock);
919
920
_debug("wait");
921
wait_var_event(&net->cells_outstanding,
922
!atomic_read(&net->cells_outstanding));
923
_leave("");
924
}
925
926