Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/ipv6/ip6_flowlabel.c
15109 views
1
/*
2
* ip6_flowlabel.c IPv6 flowlabel manager.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation; either version
7
* 2 of the License, or (at your option) any later version.
8
*
9
* Authors: Alexey Kuznetsov, <[email protected]>
10
*/
11
12
#include <linux/capability.h>
13
#include <linux/errno.h>
14
#include <linux/types.h>
15
#include <linux/socket.h>
16
#include <linux/net.h>
17
#include <linux/netdevice.h>
18
#include <linux/if_arp.h>
19
#include <linux/in6.h>
20
#include <linux/route.h>
21
#include <linux/proc_fs.h>
22
#include <linux/seq_file.h>
23
#include <linux/slab.h>
24
25
#include <net/net_namespace.h>
26
#include <net/sock.h>
27
28
#include <net/ipv6.h>
29
#include <net/ndisc.h>
30
#include <net/protocol.h>
31
#include <net/ip6_route.h>
32
#include <net/addrconf.h>
33
#include <net/rawv6.h>
34
#include <net/icmp.h>
35
#include <net/transp_v6.h>
36
37
#include <asm/uaccess.h>
38
39
#define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
40
in old IPv6 RFC. Well, it was reasonable value.
41
*/
42
#define FL_MAX_LINGER 60 /* Maximal linger timeout */
43
44
/* FL hash table */
45
46
#define FL_MAX_PER_SOCK 32
47
#define FL_MAX_SIZE 4096
48
#define FL_HASH_MASK 255
49
#define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
50
51
static atomic_t fl_size = ATOMIC_INIT(0);
52
static struct ip6_flowlabel *fl_ht[FL_HASH_MASK+1];
53
54
static void ip6_fl_gc(unsigned long dummy);
55
static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
56
57
/* FL hash table lock: it protects only of GC */
58
59
static DEFINE_RWLOCK(ip6_fl_lock);
60
61
/* Big socket sock */
62
63
static DEFINE_RWLOCK(ip6_sk_fl_lock);
64
65
66
static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
67
{
68
struct ip6_flowlabel *fl;
69
70
for (fl=fl_ht[FL_HASH(label)]; fl; fl = fl->next) {
71
if (fl->label == label && net_eq(fl->fl_net, net))
72
return fl;
73
}
74
return NULL;
75
}
76
77
static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
78
{
79
struct ip6_flowlabel *fl;
80
81
read_lock_bh(&ip6_fl_lock);
82
fl = __fl_lookup(net, label);
83
if (fl)
84
atomic_inc(&fl->users);
85
read_unlock_bh(&ip6_fl_lock);
86
return fl;
87
}
88
89
90
static void fl_free(struct ip6_flowlabel *fl)
91
{
92
if (fl) {
93
release_net(fl->fl_net);
94
kfree(fl->opt);
95
}
96
kfree(fl);
97
}
98
99
static void fl_release(struct ip6_flowlabel *fl)
100
{
101
write_lock_bh(&ip6_fl_lock);
102
103
fl->lastuse = jiffies;
104
if (atomic_dec_and_test(&fl->users)) {
105
unsigned long ttd = fl->lastuse + fl->linger;
106
if (time_after(ttd, fl->expires))
107
fl->expires = ttd;
108
ttd = fl->expires;
109
if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
110
struct ipv6_txoptions *opt = fl->opt;
111
fl->opt = NULL;
112
kfree(opt);
113
}
114
if (!timer_pending(&ip6_fl_gc_timer) ||
115
time_after(ip6_fl_gc_timer.expires, ttd))
116
mod_timer(&ip6_fl_gc_timer, ttd);
117
}
118
write_unlock_bh(&ip6_fl_lock);
119
}
120
121
static void ip6_fl_gc(unsigned long dummy)
122
{
123
int i;
124
unsigned long now = jiffies;
125
unsigned long sched = 0;
126
127
write_lock(&ip6_fl_lock);
128
129
for (i=0; i<=FL_HASH_MASK; i++) {
130
struct ip6_flowlabel *fl, **flp;
131
flp = &fl_ht[i];
132
while ((fl=*flp) != NULL) {
133
if (atomic_read(&fl->users) == 0) {
134
unsigned long ttd = fl->lastuse + fl->linger;
135
if (time_after(ttd, fl->expires))
136
fl->expires = ttd;
137
ttd = fl->expires;
138
if (time_after_eq(now, ttd)) {
139
*flp = fl->next;
140
fl_free(fl);
141
atomic_dec(&fl_size);
142
continue;
143
}
144
if (!sched || time_before(ttd, sched))
145
sched = ttd;
146
}
147
flp = &fl->next;
148
}
149
}
150
if (!sched && atomic_read(&fl_size))
151
sched = now + FL_MAX_LINGER;
152
if (sched) {
153
mod_timer(&ip6_fl_gc_timer, sched);
154
}
155
write_unlock(&ip6_fl_lock);
156
}
157
158
static void __net_exit ip6_fl_purge(struct net *net)
159
{
160
int i;
161
162
write_lock(&ip6_fl_lock);
163
for (i = 0; i <= FL_HASH_MASK; i++) {
164
struct ip6_flowlabel *fl, **flp;
165
flp = &fl_ht[i];
166
while ((fl = *flp) != NULL) {
167
if (net_eq(fl->fl_net, net) &&
168
atomic_read(&fl->users) == 0) {
169
*flp = fl->next;
170
fl_free(fl);
171
atomic_dec(&fl_size);
172
continue;
173
}
174
flp = &fl->next;
175
}
176
}
177
write_unlock(&ip6_fl_lock);
178
}
179
180
static struct ip6_flowlabel *fl_intern(struct net *net,
181
struct ip6_flowlabel *fl, __be32 label)
182
{
183
struct ip6_flowlabel *lfl;
184
185
fl->label = label & IPV6_FLOWLABEL_MASK;
186
187
write_lock_bh(&ip6_fl_lock);
188
if (label == 0) {
189
for (;;) {
190
fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
191
if (fl->label) {
192
lfl = __fl_lookup(net, fl->label);
193
if (lfl == NULL)
194
break;
195
}
196
}
197
} else {
198
/*
199
* we dropper the ip6_fl_lock, so this entry could reappear
200
* and we need to recheck with it.
201
*
202
* OTOH no need to search the active socket first, like it is
203
* done in ipv6_flowlabel_opt - sock is locked, so new entry
204
* with the same label can only appear on another sock
205
*/
206
lfl = __fl_lookup(net, fl->label);
207
if (lfl != NULL) {
208
atomic_inc(&lfl->users);
209
write_unlock_bh(&ip6_fl_lock);
210
return lfl;
211
}
212
}
213
214
fl->lastuse = jiffies;
215
fl->next = fl_ht[FL_HASH(fl->label)];
216
fl_ht[FL_HASH(fl->label)] = fl;
217
atomic_inc(&fl_size);
218
write_unlock_bh(&ip6_fl_lock);
219
return NULL;
220
}
221
222
223
224
/* Socket flowlabel lists */
225
226
struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
227
{
228
struct ipv6_fl_socklist *sfl;
229
struct ipv6_pinfo *np = inet6_sk(sk);
230
231
label &= IPV6_FLOWLABEL_MASK;
232
233
read_lock_bh(&ip6_sk_fl_lock);
234
for (sfl=np->ipv6_fl_list; sfl; sfl = sfl->next) {
235
struct ip6_flowlabel *fl = sfl->fl;
236
if (fl->label == label) {
237
fl->lastuse = jiffies;
238
atomic_inc(&fl->users);
239
read_unlock_bh(&ip6_sk_fl_lock);
240
return fl;
241
}
242
}
243
read_unlock_bh(&ip6_sk_fl_lock);
244
return NULL;
245
}
246
247
EXPORT_SYMBOL_GPL(fl6_sock_lookup);
248
249
void fl6_free_socklist(struct sock *sk)
250
{
251
struct ipv6_pinfo *np = inet6_sk(sk);
252
struct ipv6_fl_socklist *sfl;
253
254
while ((sfl = np->ipv6_fl_list) != NULL) {
255
np->ipv6_fl_list = sfl->next;
256
fl_release(sfl->fl);
257
kfree(sfl);
258
}
259
}
260
261
/* Service routines */
262
263
264
/*
265
It is the only difficult place. flowlabel enforces equal headers
266
before and including routing header, however user may supply options
267
following rthdr.
268
*/
269
270
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
271
struct ip6_flowlabel * fl,
272
struct ipv6_txoptions * fopt)
273
{
274
struct ipv6_txoptions * fl_opt = fl->opt;
275
276
if (fopt == NULL || fopt->opt_flen == 0)
277
return fl_opt;
278
279
if (fl_opt != NULL) {
280
opt_space->hopopt = fl_opt->hopopt;
281
opt_space->dst0opt = fl_opt->dst0opt;
282
opt_space->srcrt = fl_opt->srcrt;
283
opt_space->opt_nflen = fl_opt->opt_nflen;
284
} else {
285
if (fopt->opt_nflen == 0)
286
return fopt;
287
opt_space->hopopt = NULL;
288
opt_space->dst0opt = NULL;
289
opt_space->srcrt = NULL;
290
opt_space->opt_nflen = 0;
291
}
292
opt_space->dst1opt = fopt->dst1opt;
293
opt_space->opt_flen = fopt->opt_flen;
294
return opt_space;
295
}
296
297
static unsigned long check_linger(unsigned long ttl)
298
{
299
if (ttl < FL_MIN_LINGER)
300
return FL_MIN_LINGER*HZ;
301
if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
302
return 0;
303
return ttl*HZ;
304
}
305
306
static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
307
{
308
linger = check_linger(linger);
309
if (!linger)
310
return -EPERM;
311
expires = check_linger(expires);
312
if (!expires)
313
return -EPERM;
314
fl->lastuse = jiffies;
315
if (time_before(fl->linger, linger))
316
fl->linger = linger;
317
if (time_before(expires, fl->linger))
318
expires = fl->linger;
319
if (time_before(fl->expires, fl->lastuse + expires))
320
fl->expires = fl->lastuse + expires;
321
return 0;
322
}
323
324
static struct ip6_flowlabel *
325
fl_create(struct net *net, struct in6_flowlabel_req *freq, char __user *optval,
326
int optlen, int *err_p)
327
{
328
struct ip6_flowlabel *fl = NULL;
329
int olen;
330
int addr_type;
331
int err;
332
333
olen = optlen - CMSG_ALIGN(sizeof(*freq));
334
err = -EINVAL;
335
if (olen > 64 * 1024)
336
goto done;
337
338
err = -ENOMEM;
339
fl = kzalloc(sizeof(*fl), GFP_KERNEL);
340
if (fl == NULL)
341
goto done;
342
343
if (olen > 0) {
344
struct msghdr msg;
345
struct flowi6 flowi6;
346
int junk;
347
348
err = -ENOMEM;
349
fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
350
if (fl->opt == NULL)
351
goto done;
352
353
memset(fl->opt, 0, sizeof(*fl->opt));
354
fl->opt->tot_len = sizeof(*fl->opt) + olen;
355
err = -EFAULT;
356
if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
357
goto done;
358
359
msg.msg_controllen = olen;
360
msg.msg_control = (void*)(fl->opt+1);
361
memset(&flowi6, 0, sizeof(flowi6));
362
363
err = datagram_send_ctl(net, &msg, &flowi6, fl->opt, &junk,
364
&junk, &junk);
365
if (err)
366
goto done;
367
err = -EINVAL;
368
if (fl->opt->opt_flen)
369
goto done;
370
if (fl->opt->opt_nflen == 0) {
371
kfree(fl->opt);
372
fl->opt = NULL;
373
}
374
}
375
376
fl->fl_net = hold_net(net);
377
fl->expires = jiffies;
378
err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
379
if (err)
380
goto done;
381
fl->share = freq->flr_share;
382
addr_type = ipv6_addr_type(&freq->flr_dst);
383
if ((addr_type & IPV6_ADDR_MAPPED) ||
384
addr_type == IPV6_ADDR_ANY) {
385
err = -EINVAL;
386
goto done;
387
}
388
ipv6_addr_copy(&fl->dst, &freq->flr_dst);
389
atomic_set(&fl->users, 1);
390
switch (fl->share) {
391
case IPV6_FL_S_EXCL:
392
case IPV6_FL_S_ANY:
393
break;
394
case IPV6_FL_S_PROCESS:
395
fl->owner = current->pid;
396
break;
397
case IPV6_FL_S_USER:
398
fl->owner = current_euid();
399
break;
400
default:
401
err = -EINVAL;
402
goto done;
403
}
404
return fl;
405
406
done:
407
fl_free(fl);
408
*err_p = err;
409
return NULL;
410
}
411
412
static int mem_check(struct sock *sk)
413
{
414
struct ipv6_pinfo *np = inet6_sk(sk);
415
struct ipv6_fl_socklist *sfl;
416
int room = FL_MAX_SIZE - atomic_read(&fl_size);
417
int count = 0;
418
419
if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
420
return 0;
421
422
for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next)
423
count++;
424
425
if (room <= 0 ||
426
((count >= FL_MAX_PER_SOCK ||
427
(count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
428
!capable(CAP_NET_ADMIN)))
429
return -ENOBUFS;
430
431
return 0;
432
}
433
434
static int ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
435
{
436
if (h1 == h2)
437
return 0;
438
if (h1 == NULL || h2 == NULL)
439
return 1;
440
if (h1->hdrlen != h2->hdrlen)
441
return 1;
442
return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
443
}
444
445
static int ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
446
{
447
if (o1 == o2)
448
return 0;
449
if (o1 == NULL || o2 == NULL)
450
return 1;
451
if (o1->opt_nflen != o2->opt_nflen)
452
return 1;
453
if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
454
return 1;
455
if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
456
return 1;
457
if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
458
return 1;
459
return 0;
460
}
461
462
static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
463
struct ip6_flowlabel *fl)
464
{
465
write_lock_bh(&ip6_sk_fl_lock);
466
sfl->fl = fl;
467
sfl->next = np->ipv6_fl_list;
468
np->ipv6_fl_list = sfl;
469
write_unlock_bh(&ip6_sk_fl_lock);
470
}
471
472
int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
473
{
474
int uninitialized_var(err);
475
struct net *net = sock_net(sk);
476
struct ipv6_pinfo *np = inet6_sk(sk);
477
struct in6_flowlabel_req freq;
478
struct ipv6_fl_socklist *sfl1=NULL;
479
struct ipv6_fl_socklist *sfl, **sflp;
480
struct ip6_flowlabel *fl, *fl1 = NULL;
481
482
483
if (optlen < sizeof(freq))
484
return -EINVAL;
485
486
if (copy_from_user(&freq, optval, sizeof(freq)))
487
return -EFAULT;
488
489
switch (freq.flr_action) {
490
case IPV6_FL_A_PUT:
491
write_lock_bh(&ip6_sk_fl_lock);
492
for (sflp = &np->ipv6_fl_list; (sfl=*sflp)!=NULL; sflp = &sfl->next) {
493
if (sfl->fl->label == freq.flr_label) {
494
if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
495
np->flow_label &= ~IPV6_FLOWLABEL_MASK;
496
*sflp = sfl->next;
497
write_unlock_bh(&ip6_sk_fl_lock);
498
fl_release(sfl->fl);
499
kfree(sfl);
500
return 0;
501
}
502
}
503
write_unlock_bh(&ip6_sk_fl_lock);
504
return -ESRCH;
505
506
case IPV6_FL_A_RENEW:
507
read_lock_bh(&ip6_sk_fl_lock);
508
for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
509
if (sfl->fl->label == freq.flr_label) {
510
err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
511
read_unlock_bh(&ip6_sk_fl_lock);
512
return err;
513
}
514
}
515
read_unlock_bh(&ip6_sk_fl_lock);
516
517
if (freq.flr_share == IPV6_FL_S_NONE && capable(CAP_NET_ADMIN)) {
518
fl = fl_lookup(net, freq.flr_label);
519
if (fl) {
520
err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
521
fl_release(fl);
522
return err;
523
}
524
}
525
return -ESRCH;
526
527
case IPV6_FL_A_GET:
528
if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
529
return -EINVAL;
530
531
fl = fl_create(net, &freq, optval, optlen, &err);
532
if (fl == NULL)
533
return err;
534
sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
535
536
if (freq.flr_label) {
537
err = -EEXIST;
538
read_lock_bh(&ip6_sk_fl_lock);
539
for (sfl = np->ipv6_fl_list; sfl; sfl = sfl->next) {
540
if (sfl->fl->label == freq.flr_label) {
541
if (freq.flr_flags&IPV6_FL_F_EXCL) {
542
read_unlock_bh(&ip6_sk_fl_lock);
543
goto done;
544
}
545
fl1 = sfl->fl;
546
atomic_inc(&fl1->users);
547
break;
548
}
549
}
550
read_unlock_bh(&ip6_sk_fl_lock);
551
552
if (fl1 == NULL)
553
fl1 = fl_lookup(net, freq.flr_label);
554
if (fl1) {
555
recheck:
556
err = -EEXIST;
557
if (freq.flr_flags&IPV6_FL_F_EXCL)
558
goto release;
559
err = -EPERM;
560
if (fl1->share == IPV6_FL_S_EXCL ||
561
fl1->share != fl->share ||
562
fl1->owner != fl->owner)
563
goto release;
564
565
err = -EINVAL;
566
if (!ipv6_addr_equal(&fl1->dst, &fl->dst) ||
567
ipv6_opt_cmp(fl1->opt, fl->opt))
568
goto release;
569
570
err = -ENOMEM;
571
if (sfl1 == NULL)
572
goto release;
573
if (fl->linger > fl1->linger)
574
fl1->linger = fl->linger;
575
if ((long)(fl->expires - fl1->expires) > 0)
576
fl1->expires = fl->expires;
577
fl_link(np, sfl1, fl1);
578
fl_free(fl);
579
return 0;
580
581
release:
582
fl_release(fl1);
583
goto done;
584
}
585
}
586
err = -ENOENT;
587
if (!(freq.flr_flags&IPV6_FL_F_CREATE))
588
goto done;
589
590
err = -ENOMEM;
591
if (sfl1 == NULL || (err = mem_check(sk)) != 0)
592
goto done;
593
594
fl1 = fl_intern(net, fl, freq.flr_label);
595
if (fl1 != NULL)
596
goto recheck;
597
598
if (!freq.flr_label) {
599
if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
600
&fl->label, sizeof(fl->label))) {
601
/* Intentionally ignore fault. */
602
}
603
}
604
605
fl_link(np, sfl1, fl);
606
return 0;
607
608
default:
609
return -EINVAL;
610
}
611
612
done:
613
fl_free(fl);
614
kfree(sfl1);
615
return err;
616
}
617
618
#ifdef CONFIG_PROC_FS
619
620
struct ip6fl_iter_state {
621
struct seq_net_private p;
622
int bucket;
623
};
624
625
#define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
626
627
static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
628
{
629
struct ip6_flowlabel *fl = NULL;
630
struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
631
struct net *net = seq_file_net(seq);
632
633
for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
634
fl = fl_ht[state->bucket];
635
636
while (fl && !net_eq(fl->fl_net, net))
637
fl = fl->next;
638
if (fl)
639
break;
640
}
641
return fl;
642
}
643
644
static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
645
{
646
struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
647
struct net *net = seq_file_net(seq);
648
649
fl = fl->next;
650
try_again:
651
while (fl && !net_eq(fl->fl_net, net))
652
fl = fl->next;
653
654
while (!fl) {
655
if (++state->bucket <= FL_HASH_MASK) {
656
fl = fl_ht[state->bucket];
657
goto try_again;
658
} else
659
break;
660
}
661
return fl;
662
}
663
664
static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
665
{
666
struct ip6_flowlabel *fl = ip6fl_get_first(seq);
667
if (fl)
668
while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
669
--pos;
670
return pos ? NULL : fl;
671
}
672
673
static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
674
__acquires(ip6_fl_lock)
675
{
676
read_lock_bh(&ip6_fl_lock);
677
return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
678
}
679
680
static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
681
{
682
struct ip6_flowlabel *fl;
683
684
if (v == SEQ_START_TOKEN)
685
fl = ip6fl_get_first(seq);
686
else
687
fl = ip6fl_get_next(seq, v);
688
++*pos;
689
return fl;
690
}
691
692
static void ip6fl_seq_stop(struct seq_file *seq, void *v)
693
__releases(ip6_fl_lock)
694
{
695
read_unlock_bh(&ip6_fl_lock);
696
}
697
698
static int ip6fl_seq_show(struct seq_file *seq, void *v)
699
{
700
if (v == SEQ_START_TOKEN)
701
seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
702
"Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
703
else {
704
struct ip6_flowlabel *fl = v;
705
seq_printf(seq,
706
"%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
707
(unsigned)ntohl(fl->label),
708
fl->share,
709
(unsigned)fl->owner,
710
atomic_read(&fl->users),
711
fl->linger/HZ,
712
(long)(fl->expires - jiffies)/HZ,
713
&fl->dst,
714
fl->opt ? fl->opt->opt_nflen : 0);
715
}
716
return 0;
717
}
718
719
static const struct seq_operations ip6fl_seq_ops = {
720
.start = ip6fl_seq_start,
721
.next = ip6fl_seq_next,
722
.stop = ip6fl_seq_stop,
723
.show = ip6fl_seq_show,
724
};
725
726
static int ip6fl_seq_open(struct inode *inode, struct file *file)
727
{
728
return seq_open_net(inode, file, &ip6fl_seq_ops,
729
sizeof(struct ip6fl_iter_state));
730
}
731
732
static const struct file_operations ip6fl_seq_fops = {
733
.owner = THIS_MODULE,
734
.open = ip6fl_seq_open,
735
.read = seq_read,
736
.llseek = seq_lseek,
737
.release = seq_release_net,
738
};
739
740
static int __net_init ip6_flowlabel_proc_init(struct net *net)
741
{
742
if (!proc_net_fops_create(net, "ip6_flowlabel",
743
S_IRUGO, &ip6fl_seq_fops))
744
return -ENOMEM;
745
return 0;
746
}
747
748
static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
749
{
750
proc_net_remove(net, "ip6_flowlabel");
751
}
752
#else
753
static inline int ip6_flowlabel_proc_init(struct net *net)
754
{
755
return 0;
756
}
757
static inline void ip6_flowlabel_proc_fini(struct net *net)
758
{
759
}
760
#endif
761
762
static void __net_exit ip6_flowlabel_net_exit(struct net *net)
763
{
764
ip6_fl_purge(net);
765
ip6_flowlabel_proc_fini(net);
766
}
767
768
static struct pernet_operations ip6_flowlabel_net_ops = {
769
.init = ip6_flowlabel_proc_init,
770
.exit = ip6_flowlabel_net_exit,
771
};
772
773
int ip6_flowlabel_init(void)
774
{
775
return register_pernet_subsys(&ip6_flowlabel_net_ops);
776
}
777
778
void ip6_flowlabel_cleanup(void)
779
{
780
del_timer(&ip6_fl_gc_timer);
781
unregister_pernet_subsys(&ip6_flowlabel_net_ops);
782
}
783
784