Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/fs/afs/cell.c
26289 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/* AFS cell and server record management
3
*
4
* Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
5
* Written by David Howells ([email protected])
6
*/
7
8
#include <linux/slab.h>
9
#include <linux/key.h>
10
#include <linux/ctype.h>
11
#include <linux/dns_resolver.h>
12
#include <linux/sched.h>
13
#include <linux/inet.h>
14
#include <linux/namei.h>
15
#include <keys/rxrpc-type.h>
16
#include "internal.h"
17
18
static unsigned __read_mostly afs_cell_gc_delay = 10;
19
static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
20
static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
21
static atomic_t cell_debug_id;
22
23
static void afs_cell_timer(struct timer_list *timer);
24
static void afs_destroy_cell_work(struct work_struct *work);
25
static void afs_manage_cell_work(struct work_struct *work);
26
27
static void afs_dec_cells_outstanding(struct afs_net *net)
28
{
29
if (atomic_dec_and_test(&net->cells_outstanding))
30
wake_up_var(&net->cells_outstanding);
31
}
32
33
static void afs_set_cell_state(struct afs_cell *cell, enum afs_cell_state state)
34
{
35
smp_store_release(&cell->state, state); /* Commit cell changes before state */
36
smp_wmb(); /* Set cell state before task state */
37
wake_up_var(&cell->state);
38
}
39
40
/*
41
* Look up and get an activation reference on a cell record. The caller must
42
* hold net->cells_lock at least read-locked.
43
*/
44
static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
45
const char *name, unsigned int namesz,
46
enum afs_cell_trace reason)
47
{
48
struct afs_cell *cell = NULL;
49
struct rb_node *p;
50
int n;
51
52
_enter("%*.*s", namesz, namesz, name);
53
54
if (name && namesz == 0)
55
return ERR_PTR(-EINVAL);
56
if (namesz > AFS_MAXCELLNAME)
57
return ERR_PTR(-ENAMETOOLONG);
58
59
if (!name) {
60
cell = rcu_dereference_protected(net->ws_cell,
61
lockdep_is_held(&net->cells_lock));
62
if (!cell)
63
return ERR_PTR(-EDESTADDRREQ);
64
goto found;
65
}
66
67
p = net->cells.rb_node;
68
while (p) {
69
cell = rb_entry(p, struct afs_cell, net_node);
70
71
n = strncasecmp(cell->name, name,
72
min_t(size_t, cell->name_len, namesz));
73
if (n == 0)
74
n = cell->name_len - namesz;
75
if (n < 0)
76
p = p->rb_left;
77
else if (n > 0)
78
p = p->rb_right;
79
else
80
goto found;
81
}
82
83
return ERR_PTR(-ENOENT);
84
85
found:
86
return afs_use_cell(cell, reason);
87
}
88
89
/*
90
* Look up and get an activation reference on a cell record.
91
*/
92
struct afs_cell *afs_find_cell(struct afs_net *net,
93
const char *name, unsigned int namesz,
94
enum afs_cell_trace reason)
95
{
96
struct afs_cell *cell;
97
98
down_read(&net->cells_lock);
99
cell = afs_find_cell_locked(net, name, namesz, reason);
100
up_read(&net->cells_lock);
101
return cell;
102
}
103
104
/*
105
* Set up a cell record and fill in its name, VL server address list and
106
* allocate an anonymous key
107
*/
108
static struct afs_cell *afs_alloc_cell(struct afs_net *net,
109
const char *name, unsigned int namelen,
110
const char *addresses)
111
{
112
struct afs_vlserver_list *vllist = NULL;
113
struct afs_cell *cell;
114
int i, ret;
115
116
ASSERT(name);
117
if (namelen == 0)
118
return ERR_PTR(-EINVAL);
119
if (namelen > AFS_MAXCELLNAME) {
120
_leave(" = -ENAMETOOLONG");
121
return ERR_PTR(-ENAMETOOLONG);
122
}
123
124
/* Prohibit cell names that contain unprintable chars, '/' and '@' or
125
* that begin with a dot. This also precludes "@cell".
126
*/
127
if (name[0] == '.')
128
return ERR_PTR(-EINVAL);
129
for (i = 0; i < namelen; i++) {
130
char ch = name[i];
131
if (!isprint(ch) || ch == '/' || ch == '@')
132
return ERR_PTR(-EINVAL);
133
}
134
135
_enter("%*.*s,%s", namelen, namelen, name, addresses);
136
137
cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
138
if (!cell) {
139
_leave(" = -ENOMEM");
140
return ERR_PTR(-ENOMEM);
141
}
142
143
cell->name = kmalloc(1 + namelen + 1, GFP_KERNEL);
144
if (!cell->name) {
145
kfree(cell);
146
return ERR_PTR(-ENOMEM);
147
}
148
149
cell->name[0] = '.';
150
cell->name++;
151
cell->name_len = namelen;
152
for (i = 0; i < namelen; i++)
153
cell->name[i] = tolower(name[i]);
154
cell->name[i] = 0;
155
156
cell->net = net;
157
refcount_set(&cell->ref, 1);
158
atomic_set(&cell->active, 0);
159
INIT_WORK(&cell->destroyer, afs_destroy_cell_work);
160
INIT_WORK(&cell->manager, afs_manage_cell_work);
161
timer_setup(&cell->management_timer, afs_cell_timer, 0);
162
init_rwsem(&cell->vs_lock);
163
cell->volumes = RB_ROOT;
164
INIT_HLIST_HEAD(&cell->proc_volumes);
165
seqlock_init(&cell->volume_lock);
166
cell->fs_servers = RB_ROOT;
167
init_rwsem(&cell->fs_lock);
168
rwlock_init(&cell->vl_servers_lock);
169
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
170
171
/* Provide a VL server list, filling it in if we were given a list of
172
* addresses to use.
173
*/
174
if (addresses) {
175
vllist = afs_parse_text_addrs(net,
176
addresses, strlen(addresses), ':',
177
VL_SERVICE, AFS_VL_PORT);
178
if (IS_ERR(vllist)) {
179
ret = PTR_ERR(vllist);
180
vllist = NULL;
181
goto parse_failed;
182
}
183
184
vllist->source = DNS_RECORD_FROM_CONFIG;
185
vllist->status = DNS_LOOKUP_NOT_DONE;
186
cell->dns_expiry = TIME64_MAX;
187
} else {
188
ret = -ENOMEM;
189
vllist = afs_alloc_vlserver_list(0);
190
if (!vllist)
191
goto error;
192
vllist->source = DNS_RECORD_UNAVAILABLE;
193
vllist->status = DNS_LOOKUP_NOT_DONE;
194
cell->dns_expiry = ktime_get_real_seconds();
195
}
196
197
rcu_assign_pointer(cell->vl_servers, vllist);
198
199
cell->dns_source = vllist->source;
200
cell->dns_status = vllist->status;
201
smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
202
atomic_inc(&net->cells_outstanding);
203
ret = idr_alloc_cyclic(&net->cells_dyn_ino, cell,
204
2, INT_MAX / 2, GFP_KERNEL);
205
if (ret < 0)
206
goto error;
207
cell->dynroot_ino = ret;
208
cell->debug_id = atomic_inc_return(&cell_debug_id);
209
210
trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
211
212
_leave(" = %p", cell);
213
return cell;
214
215
parse_failed:
216
if (ret == -EINVAL)
217
printk(KERN_ERR "kAFS: bad VL server IP address\n");
218
error:
219
afs_put_vlserverlist(cell->net, vllist);
220
kfree(cell->name - 1);
221
kfree(cell);
222
_leave(" = %d", ret);
223
return ERR_PTR(ret);
224
}
225
226
/*
227
* afs_lookup_cell - Look up or create a cell record.
228
* @net: The network namespace
229
* @name: The name of the cell.
230
* @namesz: The strlen of the cell name.
231
* @vllist: A colon/comma separated list of numeric IP addresses or NULL.
232
* @excl: T if an error should be given if the cell name already exists.
233
* @trace: The reason to be logged if the lookup is successful.
234
*
235
* Look up a cell record by name and query the DNS for VL server addresses if
236
* needed. Note that that actual DNS query is punted off to the manager thread
237
* so that this function can return immediately if interrupted whilst allowing
238
* cell records to be shared even if not yet fully constructed.
239
*/
240
struct afs_cell *afs_lookup_cell(struct afs_net *net,
241
const char *name, unsigned int namesz,
242
const char *vllist, bool excl,
243
enum afs_cell_trace trace)
244
{
245
struct afs_cell *cell, *candidate, *cursor;
246
struct rb_node *parent, **pp;
247
enum afs_cell_state state;
248
int ret, n;
249
250
_enter("%s,%s", name, vllist);
251
252
if (!excl) {
253
cell = afs_find_cell(net, name, namesz, trace);
254
if (!IS_ERR(cell))
255
goto wait_for_cell;
256
}
257
258
/* Assume we're probably going to create a cell and preallocate and
259
* mostly set up a candidate record. We can then use this to stash the
260
* name, the net namespace and VL server addresses.
261
*
262
* We also want to do this before we hold any locks as it may involve
263
* upcalling to userspace to make DNS queries.
264
*/
265
candidate = afs_alloc_cell(net, name, namesz, vllist);
266
if (IS_ERR(candidate)) {
267
_leave(" = %ld", PTR_ERR(candidate));
268
return candidate;
269
}
270
271
/* Find the insertion point and check to see if someone else added a
272
* cell whilst we were allocating.
273
*/
274
down_write(&net->cells_lock);
275
276
pp = &net->cells.rb_node;
277
parent = NULL;
278
while (*pp) {
279
parent = *pp;
280
cursor = rb_entry(parent, struct afs_cell, net_node);
281
282
n = strncasecmp(cursor->name, name,
283
min_t(size_t, cursor->name_len, namesz));
284
if (n == 0)
285
n = cursor->name_len - namesz;
286
if (n < 0)
287
pp = &(*pp)->rb_left;
288
else if (n > 0)
289
pp = &(*pp)->rb_right;
290
else
291
goto cell_already_exists;
292
}
293
294
cell = candidate;
295
candidate = NULL;
296
afs_use_cell(cell, trace);
297
rb_link_node_rcu(&cell->net_node, parent, pp);
298
rb_insert_color(&cell->net_node, &net->cells);
299
up_write(&net->cells_lock);
300
301
afs_queue_cell(cell, afs_cell_trace_queue_new);
302
303
wait_for_cell:
304
_debug("wait_for_cell");
305
state = smp_load_acquire(&cell->state); /* vs error */
306
if (state != AFS_CELL_ACTIVE &&
307
state != AFS_CELL_DEAD) {
308
afs_see_cell(cell, afs_cell_trace_wait);
309
wait_var_event(&cell->state,
310
({
311
state = smp_load_acquire(&cell->state); /* vs error */
312
state == AFS_CELL_ACTIVE || state == AFS_CELL_DEAD;
313
}));
314
}
315
316
/* Check the state obtained from the wait check. */
317
if (state == AFS_CELL_DEAD) {
318
ret = cell->error;
319
goto error;
320
}
321
322
_leave(" = %p [cell]", cell);
323
return cell;
324
325
cell_already_exists:
326
_debug("cell exists");
327
cell = cursor;
328
if (excl) {
329
ret = -EEXIST;
330
} else {
331
afs_use_cell(cursor, trace);
332
ret = 0;
333
}
334
up_write(&net->cells_lock);
335
if (candidate)
336
afs_put_cell(candidate, afs_cell_trace_put_candidate);
337
if (ret == 0)
338
goto wait_for_cell;
339
goto error_noput;
340
error:
341
afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_error);
342
error_noput:
343
_leave(" = %d [error]", ret);
344
return ERR_PTR(ret);
345
}
346
347
/*
348
* set the root cell information
349
* - can be called with a module parameter string
350
* - can be called from a write to /proc/fs/afs/rootcell
351
*/
352
int afs_cell_init(struct afs_net *net, const char *rootcell)
353
{
354
struct afs_cell *old_root, *new_root;
355
const char *cp, *vllist;
356
size_t len;
357
358
_enter("");
359
360
if (!rootcell) {
361
/* module is loaded with no parameters, or built statically.
362
* - in the future we might initialize cell DB here.
363
*/
364
_leave(" = 0 [no root]");
365
return 0;
366
}
367
368
cp = strchr(rootcell, ':');
369
if (!cp) {
370
_debug("kAFS: no VL server IP addresses specified");
371
vllist = NULL;
372
len = strlen(rootcell);
373
} else {
374
vllist = cp + 1;
375
len = cp - rootcell;
376
}
377
378
if (len == 0 || !rootcell[0] || rootcell[0] == '.' || rootcell[len - 1] == '.')
379
return -EINVAL;
380
if (memchr(rootcell, '/', len))
381
return -EINVAL;
382
cp = strstr(rootcell, "..");
383
if (cp && cp < rootcell + len)
384
return -EINVAL;
385
386
/* allocate a cell record for the root/workstation cell */
387
new_root = afs_lookup_cell(net, rootcell, len, vllist, false,
388
afs_cell_trace_use_lookup_ws);
389
if (IS_ERR(new_root)) {
390
_leave(" = %ld", PTR_ERR(new_root));
391
return PTR_ERR(new_root);
392
}
393
394
if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
395
afs_use_cell(new_root, afs_cell_trace_use_pin);
396
397
/* install the new cell */
398
down_write(&net->cells_lock);
399
old_root = rcu_replace_pointer(net->ws_cell, new_root,
400
lockdep_is_held(&net->cells_lock));
401
up_write(&net->cells_lock);
402
403
afs_unuse_cell(old_root, afs_cell_trace_unuse_ws);
404
_leave(" = 0");
405
return 0;
406
}
407
408
/*
409
* Update a cell's VL server address list from the DNS.
410
*/
411
static int afs_update_cell(struct afs_cell *cell)
412
{
413
struct afs_vlserver_list *vllist, *old = NULL, *p;
414
unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
415
unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
416
time64_t now, expiry = 0;
417
int ret = 0;
418
419
_enter("%s", cell->name);
420
421
vllist = afs_dns_query(cell, &expiry);
422
if (IS_ERR(vllist)) {
423
ret = PTR_ERR(vllist);
424
425
_debug("%s: fail %d", cell->name, ret);
426
if (ret == -ENOMEM)
427
goto out_wake;
428
429
vllist = afs_alloc_vlserver_list(0);
430
if (!vllist) {
431
if (ret >= 0)
432
ret = -ENOMEM;
433
goto out_wake;
434
}
435
436
switch (ret) {
437
case -ENODATA:
438
case -EDESTADDRREQ:
439
vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
440
break;
441
case -EAGAIN:
442
case -ECONNREFUSED:
443
vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
444
break;
445
default:
446
vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
447
break;
448
}
449
}
450
451
_debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
452
cell->dns_status = vllist->status;
453
454
now = ktime_get_real_seconds();
455
if (min_ttl > max_ttl)
456
max_ttl = min_ttl;
457
if (expiry < now + min_ttl)
458
expiry = now + min_ttl;
459
else if (expiry > now + max_ttl)
460
expiry = now + max_ttl;
461
462
_debug("%s: status %d", cell->name, vllist->status);
463
if (vllist->source == DNS_RECORD_UNAVAILABLE) {
464
switch (vllist->status) {
465
case DNS_LOOKUP_GOT_NOT_FOUND:
466
/* The DNS said that the cell does not exist or there
467
* weren't any addresses to be had.
468
*/
469
cell->dns_expiry = expiry;
470
break;
471
472
case DNS_LOOKUP_BAD:
473
case DNS_LOOKUP_GOT_LOCAL_FAILURE:
474
case DNS_LOOKUP_GOT_TEMP_FAILURE:
475
case DNS_LOOKUP_GOT_NS_FAILURE:
476
default:
477
cell->dns_expiry = now + 10;
478
break;
479
}
480
} else {
481
cell->dns_expiry = expiry;
482
}
483
484
/* Replace the VL server list if the new record has servers or the old
485
* record doesn't.
486
*/
487
write_lock(&cell->vl_servers_lock);
488
p = rcu_dereference_protected(cell->vl_servers, true);
489
if (vllist->nr_servers > 0 || p->nr_servers == 0) {
490
rcu_assign_pointer(cell->vl_servers, vllist);
491
cell->dns_source = vllist->source;
492
old = p;
493
}
494
write_unlock(&cell->vl_servers_lock);
495
afs_put_vlserverlist(cell->net, old);
496
497
out_wake:
498
smp_store_release(&cell->dns_lookup_count,
499
cell->dns_lookup_count + 1); /* vs source/status */
500
wake_up_var(&cell->dns_lookup_count);
501
_leave(" = %d", ret);
502
return ret;
503
}
504
505
/*
506
* Destroy a cell record
507
*/
508
static void afs_cell_destroy(struct rcu_head *rcu)
509
{
510
struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
511
struct afs_net *net = cell->net;
512
int r;
513
514
_enter("%p{%s}", cell, cell->name);
515
516
r = refcount_read(&cell->ref);
517
ASSERTCMP(r, ==, 0);
518
trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
519
520
afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
521
afs_unuse_cell(cell->alias_of, afs_cell_trace_unuse_alias);
522
key_put(cell->anonymous_key);
523
idr_remove(&net->cells_dyn_ino, cell->dynroot_ino);
524
kfree(cell->name - 1);
525
kfree(cell);
526
527
afs_dec_cells_outstanding(net);
528
_leave(" [destroyed]");
529
}
530
531
static void afs_destroy_cell_work(struct work_struct *work)
532
{
533
struct afs_cell *cell = container_of(work, struct afs_cell, destroyer);
534
535
afs_see_cell(cell, afs_cell_trace_destroy);
536
timer_delete_sync(&cell->management_timer);
537
cancel_work_sync(&cell->manager);
538
call_rcu(&cell->rcu, afs_cell_destroy);
539
}
540
541
/*
542
* Get a reference on a cell record.
543
*/
544
struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason)
545
{
546
int r;
547
548
__refcount_inc(&cell->ref, &r);
549
trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason);
550
return cell;
551
}
552
553
/*
554
* Drop a reference on a cell record.
555
*/
556
void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
557
{
558
if (cell) {
559
unsigned int debug_id = cell->debug_id;
560
unsigned int a;
561
bool zero;
562
int r;
563
564
a = atomic_read(&cell->active);
565
zero = __refcount_dec_and_test(&cell->ref, &r);
566
trace_afs_cell(debug_id, r - 1, a, reason);
567
if (zero) {
568
a = atomic_read(&cell->active);
569
WARN(a != 0, "Cell active count %u > 0\n", a);
570
WARN_ON(!queue_work(afs_wq, &cell->destroyer));
571
}
572
}
573
}
574
575
/*
576
* Note a cell becoming more active.
577
*/
578
struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
579
{
580
int r, a;
581
582
__refcount_inc(&cell->ref, &r);
583
a = atomic_inc_return(&cell->active);
584
trace_afs_cell(cell->debug_id, r + 1, a, reason);
585
return cell;
586
}
587
588
/*
589
* Record a cell becoming less active. When the active counter reaches 1, it
590
* is scheduled for destruction, but may get reactivated.
591
*/
592
void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason)
593
{
594
unsigned int debug_id;
595
time64_t now, expire_delay;
596
bool zero;
597
int r, a;
598
599
if (!cell)
600
return;
601
602
_enter("%s", cell->name);
603
604
now = ktime_get_real_seconds();
605
cell->last_inactive = now;
606
expire_delay = 0;
607
if (cell->vl_servers->nr_servers)
608
expire_delay = afs_cell_gc_delay;
609
610
debug_id = cell->debug_id;
611
a = atomic_dec_return(&cell->active);
612
if (!a)
613
/* 'cell' may now be garbage collected. */
614
afs_set_cell_timer(cell, expire_delay);
615
616
zero = __refcount_dec_and_test(&cell->ref, &r);
617
trace_afs_cell(debug_id, r - 1, a, reason);
618
if (zero)
619
WARN_ON(!queue_work(afs_wq, &cell->destroyer));
620
}
621
622
/*
623
* Note that a cell has been seen.
624
*/
625
void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
626
{
627
int r, a;
628
629
r = refcount_read(&cell->ref);
630
a = atomic_read(&cell->active);
631
trace_afs_cell(cell->debug_id, r, a, reason);
632
}
633
634
/*
635
* Queue a cell for management, giving the workqueue a ref to hold.
636
*/
637
void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
638
{
639
queue_work(afs_wq, &cell->manager);
640
}
641
642
/*
643
* Cell-specific management timer.
644
*/
645
static void afs_cell_timer(struct timer_list *timer)
646
{
647
struct afs_cell *cell = container_of(timer, struct afs_cell, management_timer);
648
649
afs_see_cell(cell, afs_cell_trace_see_mgmt_timer);
650
if (refcount_read(&cell->ref) > 0 && cell->net->live)
651
queue_work(afs_wq, &cell->manager);
652
}
653
654
/*
655
* Set/reduce the cell timer.
656
*/
657
void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs)
658
{
659
timer_reduce(&cell->management_timer, jiffies + delay_secs * HZ);
660
}
661
662
/*
663
* Allocate a key to use as a placeholder for anonymous user security.
664
*/
665
static int afs_alloc_anon_key(struct afs_cell *cell)
666
{
667
struct key *key;
668
char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
669
670
/* Create a key to represent an anonymous user. */
671
memcpy(keyname, "afs@", 4);
672
dp = keyname + 4;
673
cp = cell->name;
674
do {
675
*dp++ = tolower(*cp);
676
} while (*cp++);
677
678
key = rxrpc_get_null_key(keyname);
679
if (IS_ERR(key))
680
return PTR_ERR(key);
681
682
cell->anonymous_key = key;
683
684
_debug("anon key %p{%x}",
685
cell->anonymous_key, key_serial(cell->anonymous_key));
686
return 0;
687
}
688
689
/*
690
* Activate a cell.
691
*/
692
static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
693
{
694
struct hlist_node **p;
695
struct afs_cell *pcell;
696
int ret;
697
698
if (!cell->anonymous_key) {
699
ret = afs_alloc_anon_key(cell);
700
if (ret < 0)
701
return ret;
702
}
703
704
ret = afs_proc_cell_setup(cell);
705
if (ret < 0)
706
return ret;
707
708
mutex_lock(&net->proc_cells_lock);
709
for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
710
pcell = hlist_entry(*p, struct afs_cell, proc_link);
711
if (strcmp(cell->name, pcell->name) < 0)
712
break;
713
}
714
715
cell->proc_link.pprev = p;
716
cell->proc_link.next = *p;
717
rcu_assign_pointer(*p, &cell->proc_link.next);
718
if (cell->proc_link.next)
719
cell->proc_link.next->pprev = &cell->proc_link.next;
720
721
mutex_unlock(&net->proc_cells_lock);
722
return 0;
723
}
724
725
/*
726
* Deactivate a cell.
727
*/
728
static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
729
{
730
_enter("%s", cell->name);
731
732
afs_proc_cell_remove(cell);
733
734
mutex_lock(&net->proc_cells_lock);
735
if (!hlist_unhashed(&cell->proc_link))
736
hlist_del_rcu(&cell->proc_link);
737
mutex_unlock(&net->proc_cells_lock);
738
739
_leave("");
740
}
741
742
static bool afs_has_cell_expired(struct afs_cell *cell, time64_t *_next_manage)
743
{
744
const struct afs_vlserver_list *vllist;
745
time64_t expire_at = cell->last_inactive;
746
time64_t now = ktime_get_real_seconds();
747
748
if (atomic_read(&cell->active))
749
return false;
750
if (!cell->net->live)
751
return true;
752
753
vllist = rcu_dereference_protected(cell->vl_servers, true);
754
if (vllist && vllist->nr_servers > 0)
755
expire_at += afs_cell_gc_delay;
756
757
if (expire_at <= now)
758
return true;
759
if (expire_at < *_next_manage)
760
*_next_manage = expire_at;
761
return false;
762
}
763
764
/*
765
* Manage a cell record, initialising and destroying it, maintaining its DNS
766
* records.
767
*/
768
static bool afs_manage_cell(struct afs_cell *cell)
769
{
770
struct afs_net *net = cell->net;
771
time64_t next_manage = TIME64_MAX;
772
int ret;
773
774
_enter("%s", cell->name);
775
776
_debug("state %u", cell->state);
777
switch (cell->state) {
778
case AFS_CELL_SETTING_UP:
779
goto set_up_cell;
780
case AFS_CELL_ACTIVE:
781
goto cell_is_active;
782
case AFS_CELL_REMOVING:
783
WARN_ON_ONCE(1);
784
return false;
785
case AFS_CELL_DEAD:
786
return false;
787
default:
788
_debug("bad state %u", cell->state);
789
WARN_ON_ONCE(1); /* Unhandled state */
790
return false;
791
}
792
793
set_up_cell:
794
ret = afs_activate_cell(net, cell);
795
if (ret < 0) {
796
cell->error = ret;
797
goto remove_cell;
798
}
799
800
afs_set_cell_state(cell, AFS_CELL_ACTIVE);
801
802
cell_is_active:
803
if (afs_has_cell_expired(cell, &next_manage))
804
goto remove_cell;
805
806
if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
807
ret = afs_update_cell(cell);
808
if (ret < 0)
809
cell->error = ret;
810
}
811
812
if (next_manage < TIME64_MAX && cell->net->live) {
813
time64_t now = ktime_get_real_seconds();
814
815
if (next_manage - now <= 0)
816
afs_queue_cell(cell, afs_cell_trace_queue_again);
817
else
818
afs_set_cell_timer(cell, next_manage - now);
819
}
820
_leave(" [done %u]", cell->state);
821
return false;
822
823
remove_cell:
824
down_write(&net->cells_lock);
825
826
if (atomic_read(&cell->active)) {
827
up_write(&net->cells_lock);
828
goto cell_is_active;
829
}
830
831
/* Make sure that the expiring server records are going to see the fact
832
* that the cell is caput.
833
*/
834
afs_set_cell_state(cell, AFS_CELL_REMOVING);
835
836
afs_deactivate_cell(net, cell);
837
afs_purge_servers(cell);
838
839
rb_erase(&cell->net_node, &net->cells);
840
afs_see_cell(cell, afs_cell_trace_unuse_delete);
841
up_write(&net->cells_lock);
842
843
/* The root volume is pinning the cell */
844
afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root);
845
cell->root_volume = NULL;
846
847
afs_set_cell_state(cell, AFS_CELL_DEAD);
848
return true;
849
}
850
851
static void afs_manage_cell_work(struct work_struct *work)
852
{
853
struct afs_cell *cell = container_of(work, struct afs_cell, manager);
854
bool final_put;
855
856
afs_see_cell(cell, afs_cell_trace_manage);
857
final_put = afs_manage_cell(cell);
858
afs_see_cell(cell, afs_cell_trace_managed);
859
if (final_put)
860
afs_put_cell(cell, afs_cell_trace_put_final);
861
}
862
863
/*
864
* Purge in-memory cell database.
865
*/
866
void afs_cell_purge(struct afs_net *net)
867
{
868
struct afs_cell *ws;
869
struct rb_node *cursor;
870
871
_enter("");
872
873
down_write(&net->cells_lock);
874
ws = rcu_replace_pointer(net->ws_cell, NULL,
875
lockdep_is_held(&net->cells_lock));
876
up_write(&net->cells_lock);
877
afs_unuse_cell(ws, afs_cell_trace_unuse_ws);
878
879
_debug("kick cells");
880
down_read(&net->cells_lock);
881
for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
882
struct afs_cell *cell = rb_entry(cursor, struct afs_cell, net_node);
883
884
afs_see_cell(cell, afs_cell_trace_purge);
885
886
if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
887
afs_unuse_cell(cell, afs_cell_trace_unuse_pin);
888
889
afs_queue_cell(cell, afs_cell_trace_queue_purge);
890
}
891
up_read(&net->cells_lock);
892
893
_debug("wait");
894
wait_var_event(&net->cells_outstanding,
895
!atomic_read(&net->cells_outstanding));
896
_leave("");
897
}
898
899