Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/core/fib_rules.c
15109 views
1
/*
2
* net/core/fib_rules.c Generic Routing Rules
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License as
6
* published by the Free Software Foundation, version 2.
7
*
8
* Authors: Thomas Graf <[email protected]>
9
*/
10
11
#include <linux/types.h>
12
#include <linux/kernel.h>
13
#include <linux/slab.h>
14
#include <linux/list.h>
15
#include <net/net_namespace.h>
16
#include <net/sock.h>
17
#include <net/fib_rules.h>
18
19
int fib_default_rule_add(struct fib_rules_ops *ops,
20
u32 pref, u32 table, u32 flags)
21
{
22
struct fib_rule *r;
23
24
r = kzalloc(ops->rule_size, GFP_KERNEL);
25
if (r == NULL)
26
return -ENOMEM;
27
28
atomic_set(&r->refcnt, 1);
29
r->action = FR_ACT_TO_TBL;
30
r->pref = pref;
31
r->table = table;
32
r->flags = flags;
33
r->fr_net = hold_net(ops->fro_net);
34
35
/* The lock is not required here, the list in unreacheable
36
* at the moment this function is called */
37
list_add_tail(&r->list, &ops->rules_list);
38
return 0;
39
}
40
EXPORT_SYMBOL(fib_default_rule_add);
41
42
u32 fib_default_rule_pref(struct fib_rules_ops *ops)
43
{
44
struct list_head *pos;
45
struct fib_rule *rule;
46
47
if (!list_empty(&ops->rules_list)) {
48
pos = ops->rules_list.next;
49
if (pos->next != &ops->rules_list) {
50
rule = list_entry(pos->next, struct fib_rule, list);
51
if (rule->pref)
52
return rule->pref - 1;
53
}
54
}
55
56
return 0;
57
}
58
EXPORT_SYMBOL(fib_default_rule_pref);
59
60
static void notify_rule_change(int event, struct fib_rule *rule,
61
struct fib_rules_ops *ops, struct nlmsghdr *nlh,
62
u32 pid);
63
64
static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
65
{
66
struct fib_rules_ops *ops;
67
68
rcu_read_lock();
69
list_for_each_entry_rcu(ops, &net->rules_ops, list) {
70
if (ops->family == family) {
71
if (!try_module_get(ops->owner))
72
ops = NULL;
73
rcu_read_unlock();
74
return ops;
75
}
76
}
77
rcu_read_unlock();
78
79
return NULL;
80
}
81
82
static void rules_ops_put(struct fib_rules_ops *ops)
83
{
84
if (ops)
85
module_put(ops->owner);
86
}
87
88
static void flush_route_cache(struct fib_rules_ops *ops)
89
{
90
if (ops->flush_cache)
91
ops->flush_cache(ops);
92
}
93
94
static int __fib_rules_register(struct fib_rules_ops *ops)
95
{
96
int err = -EEXIST;
97
struct fib_rules_ops *o;
98
struct net *net;
99
100
net = ops->fro_net;
101
102
if (ops->rule_size < sizeof(struct fib_rule))
103
return -EINVAL;
104
105
if (ops->match == NULL || ops->configure == NULL ||
106
ops->compare == NULL || ops->fill == NULL ||
107
ops->action == NULL)
108
return -EINVAL;
109
110
spin_lock(&net->rules_mod_lock);
111
list_for_each_entry(o, &net->rules_ops, list)
112
if (ops->family == o->family)
113
goto errout;
114
115
hold_net(net);
116
list_add_tail_rcu(&ops->list, &net->rules_ops);
117
err = 0;
118
errout:
119
spin_unlock(&net->rules_mod_lock);
120
121
return err;
122
}
123
124
struct fib_rules_ops *
125
fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
126
{
127
struct fib_rules_ops *ops;
128
int err;
129
130
ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
131
if (ops == NULL)
132
return ERR_PTR(-ENOMEM);
133
134
INIT_LIST_HEAD(&ops->rules_list);
135
ops->fro_net = net;
136
137
err = __fib_rules_register(ops);
138
if (err) {
139
kfree(ops);
140
ops = ERR_PTR(err);
141
}
142
143
return ops;
144
}
145
EXPORT_SYMBOL_GPL(fib_rules_register);
146
147
static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
148
{
149
struct fib_rule *rule, *tmp;
150
151
list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
152
list_del_rcu(&rule->list);
153
fib_rule_put(rule);
154
}
155
}
156
157
static void fib_rules_put_rcu(struct rcu_head *head)
158
{
159
struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
160
struct net *net = ops->fro_net;
161
162
release_net(net);
163
kfree(ops);
164
}
165
166
void fib_rules_unregister(struct fib_rules_ops *ops)
167
{
168
struct net *net = ops->fro_net;
169
170
spin_lock(&net->rules_mod_lock);
171
list_del_rcu(&ops->list);
172
fib_rules_cleanup_ops(ops);
173
spin_unlock(&net->rules_mod_lock);
174
175
call_rcu(&ops->rcu, fib_rules_put_rcu);
176
}
177
EXPORT_SYMBOL_GPL(fib_rules_unregister);
178
179
static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
180
struct flowi *fl, int flags)
181
{
182
int ret = 0;
183
184
if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
185
goto out;
186
187
if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
188
goto out;
189
190
if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
191
goto out;
192
193
ret = ops->match(rule, fl, flags);
194
out:
195
return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
196
}
197
198
int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
199
int flags, struct fib_lookup_arg *arg)
200
{
201
struct fib_rule *rule;
202
int err;
203
204
rcu_read_lock();
205
206
list_for_each_entry_rcu(rule, &ops->rules_list, list) {
207
jumped:
208
if (!fib_rule_match(rule, ops, fl, flags))
209
continue;
210
211
if (rule->action == FR_ACT_GOTO) {
212
struct fib_rule *target;
213
214
target = rcu_dereference(rule->ctarget);
215
if (target == NULL) {
216
continue;
217
} else {
218
rule = target;
219
goto jumped;
220
}
221
} else if (rule->action == FR_ACT_NOP)
222
continue;
223
else
224
err = ops->action(rule, fl, flags, arg);
225
226
if (err != -EAGAIN) {
227
if ((arg->flags & FIB_LOOKUP_NOREF) ||
228
likely(atomic_inc_not_zero(&rule->refcnt))) {
229
arg->rule = rule;
230
goto out;
231
}
232
break;
233
}
234
}
235
236
err = -ESRCH;
237
out:
238
rcu_read_unlock();
239
240
return err;
241
}
242
EXPORT_SYMBOL_GPL(fib_rules_lookup);
243
244
static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
245
struct fib_rules_ops *ops)
246
{
247
int err = -EINVAL;
248
249
if (frh->src_len)
250
if (tb[FRA_SRC] == NULL ||
251
frh->src_len > (ops->addr_size * 8) ||
252
nla_len(tb[FRA_SRC]) != ops->addr_size)
253
goto errout;
254
255
if (frh->dst_len)
256
if (tb[FRA_DST] == NULL ||
257
frh->dst_len > (ops->addr_size * 8) ||
258
nla_len(tb[FRA_DST]) != ops->addr_size)
259
goto errout;
260
261
err = 0;
262
errout:
263
return err;
264
}
265
266
static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
267
{
268
struct net *net = sock_net(skb->sk);
269
struct fib_rule_hdr *frh = nlmsg_data(nlh);
270
struct fib_rules_ops *ops = NULL;
271
struct fib_rule *rule, *r, *last = NULL;
272
struct nlattr *tb[FRA_MAX+1];
273
int err = -EINVAL, unresolved = 0;
274
275
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
276
goto errout;
277
278
ops = lookup_rules_ops(net, frh->family);
279
if (ops == NULL) {
280
err = -EAFNOSUPPORT;
281
goto errout;
282
}
283
284
err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
285
if (err < 0)
286
goto errout;
287
288
err = validate_rulemsg(frh, tb, ops);
289
if (err < 0)
290
goto errout;
291
292
rule = kzalloc(ops->rule_size, GFP_KERNEL);
293
if (rule == NULL) {
294
err = -ENOMEM;
295
goto errout;
296
}
297
rule->fr_net = hold_net(net);
298
299
if (tb[FRA_PRIORITY])
300
rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
301
302
if (tb[FRA_IIFNAME]) {
303
struct net_device *dev;
304
305
rule->iifindex = -1;
306
nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
307
dev = __dev_get_by_name(net, rule->iifname);
308
if (dev)
309
rule->iifindex = dev->ifindex;
310
}
311
312
if (tb[FRA_OIFNAME]) {
313
struct net_device *dev;
314
315
rule->oifindex = -1;
316
nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
317
dev = __dev_get_by_name(net, rule->oifname);
318
if (dev)
319
rule->oifindex = dev->ifindex;
320
}
321
322
if (tb[FRA_FWMARK]) {
323
rule->mark = nla_get_u32(tb[FRA_FWMARK]);
324
if (rule->mark)
325
/* compatibility: if the mark value is non-zero all bits
326
* are compared unless a mask is explicitly specified.
327
*/
328
rule->mark_mask = 0xFFFFFFFF;
329
}
330
331
if (tb[FRA_FWMASK])
332
rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
333
334
rule->action = frh->action;
335
rule->flags = frh->flags;
336
rule->table = frh_get_table(frh, tb);
337
338
if (!tb[FRA_PRIORITY] && ops->default_pref)
339
rule->pref = ops->default_pref(ops);
340
341
err = -EINVAL;
342
if (tb[FRA_GOTO]) {
343
if (rule->action != FR_ACT_GOTO)
344
goto errout_free;
345
346
rule->target = nla_get_u32(tb[FRA_GOTO]);
347
/* Backward jumps are prohibited to avoid endless loops */
348
if (rule->target <= rule->pref)
349
goto errout_free;
350
351
list_for_each_entry(r, &ops->rules_list, list) {
352
if (r->pref == rule->target) {
353
RCU_INIT_POINTER(rule->ctarget, r);
354
break;
355
}
356
}
357
358
if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
359
unresolved = 1;
360
} else if (rule->action == FR_ACT_GOTO)
361
goto errout_free;
362
363
err = ops->configure(rule, skb, frh, tb);
364
if (err < 0)
365
goto errout_free;
366
367
list_for_each_entry(r, &ops->rules_list, list) {
368
if (r->pref > rule->pref)
369
break;
370
last = r;
371
}
372
373
fib_rule_get(rule);
374
375
if (last)
376
list_add_rcu(&rule->list, &last->list);
377
else
378
list_add_rcu(&rule->list, &ops->rules_list);
379
380
if (ops->unresolved_rules) {
381
/*
382
* There are unresolved goto rules in the list, check if
383
* any of them are pointing to this new rule.
384
*/
385
list_for_each_entry(r, &ops->rules_list, list) {
386
if (r->action == FR_ACT_GOTO &&
387
r->target == rule->pref) {
388
BUG_ON(rtnl_dereference(r->ctarget) != NULL);
389
rcu_assign_pointer(r->ctarget, rule);
390
if (--ops->unresolved_rules == 0)
391
break;
392
}
393
}
394
}
395
396
if (rule->action == FR_ACT_GOTO)
397
ops->nr_goto_rules++;
398
399
if (unresolved)
400
ops->unresolved_rules++;
401
402
notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
403
flush_route_cache(ops);
404
rules_ops_put(ops);
405
return 0;
406
407
errout_free:
408
release_net(rule->fr_net);
409
kfree(rule);
410
errout:
411
rules_ops_put(ops);
412
return err;
413
}
414
415
static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
416
{
417
struct net *net = sock_net(skb->sk);
418
struct fib_rule_hdr *frh = nlmsg_data(nlh);
419
struct fib_rules_ops *ops = NULL;
420
struct fib_rule *rule, *tmp;
421
struct nlattr *tb[FRA_MAX+1];
422
int err = -EINVAL;
423
424
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
425
goto errout;
426
427
ops = lookup_rules_ops(net, frh->family);
428
if (ops == NULL) {
429
err = -EAFNOSUPPORT;
430
goto errout;
431
}
432
433
err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
434
if (err < 0)
435
goto errout;
436
437
err = validate_rulemsg(frh, tb, ops);
438
if (err < 0)
439
goto errout;
440
441
list_for_each_entry(rule, &ops->rules_list, list) {
442
if (frh->action && (frh->action != rule->action))
443
continue;
444
445
if (frh->table && (frh_get_table(frh, tb) != rule->table))
446
continue;
447
448
if (tb[FRA_PRIORITY] &&
449
(rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
450
continue;
451
452
if (tb[FRA_IIFNAME] &&
453
nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
454
continue;
455
456
if (tb[FRA_OIFNAME] &&
457
nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
458
continue;
459
460
if (tb[FRA_FWMARK] &&
461
(rule->mark != nla_get_u32(tb[FRA_FWMARK])))
462
continue;
463
464
if (tb[FRA_FWMASK] &&
465
(rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
466
continue;
467
468
if (!ops->compare(rule, frh, tb))
469
continue;
470
471
if (rule->flags & FIB_RULE_PERMANENT) {
472
err = -EPERM;
473
goto errout;
474
}
475
476
list_del_rcu(&rule->list);
477
478
if (rule->action == FR_ACT_GOTO)
479
ops->nr_goto_rules--;
480
481
/*
482
* Check if this rule is a target to any of them. If so,
483
* disable them. As this operation is eventually very
484
* expensive, it is only performed if goto rules have
485
* actually been added.
486
*/
487
if (ops->nr_goto_rules > 0) {
488
list_for_each_entry(tmp, &ops->rules_list, list) {
489
if (rtnl_dereference(tmp->ctarget) == rule) {
490
rcu_assign_pointer(tmp->ctarget, NULL);
491
ops->unresolved_rules++;
492
}
493
}
494
}
495
496
notify_rule_change(RTM_DELRULE, rule, ops, nlh,
497
NETLINK_CB(skb).pid);
498
fib_rule_put(rule);
499
flush_route_cache(ops);
500
rules_ops_put(ops);
501
return 0;
502
}
503
504
err = -ENOENT;
505
errout:
506
rules_ops_put(ops);
507
return err;
508
}
509
510
static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
511
struct fib_rule *rule)
512
{
513
size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
514
+ nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
515
+ nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
516
+ nla_total_size(4) /* FRA_PRIORITY */
517
+ nla_total_size(4) /* FRA_TABLE */
518
+ nla_total_size(4) /* FRA_FWMARK */
519
+ nla_total_size(4); /* FRA_FWMASK */
520
521
if (ops->nlmsg_payload)
522
payload += ops->nlmsg_payload(rule);
523
524
return payload;
525
}
526
527
static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
528
u32 pid, u32 seq, int type, int flags,
529
struct fib_rules_ops *ops)
530
{
531
struct nlmsghdr *nlh;
532
struct fib_rule_hdr *frh;
533
534
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
535
if (nlh == NULL)
536
return -EMSGSIZE;
537
538
frh = nlmsg_data(nlh);
539
frh->family = ops->family;
540
frh->table = rule->table;
541
NLA_PUT_U32(skb, FRA_TABLE, rule->table);
542
frh->res1 = 0;
543
frh->res2 = 0;
544
frh->action = rule->action;
545
frh->flags = rule->flags;
546
547
if (rule->action == FR_ACT_GOTO &&
548
rcu_dereference_raw(rule->ctarget) == NULL)
549
frh->flags |= FIB_RULE_UNRESOLVED;
550
551
if (rule->iifname[0]) {
552
NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname);
553
554
if (rule->iifindex == -1)
555
frh->flags |= FIB_RULE_IIF_DETACHED;
556
}
557
558
if (rule->oifname[0]) {
559
NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname);
560
561
if (rule->oifindex == -1)
562
frh->flags |= FIB_RULE_OIF_DETACHED;
563
}
564
565
if (rule->pref)
566
NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
567
568
if (rule->mark)
569
NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
570
571
if (rule->mark_mask || rule->mark)
572
NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
573
574
if (rule->target)
575
NLA_PUT_U32(skb, FRA_GOTO, rule->target);
576
577
if (ops->fill(rule, skb, frh) < 0)
578
goto nla_put_failure;
579
580
return nlmsg_end(skb, nlh);
581
582
nla_put_failure:
583
nlmsg_cancel(skb, nlh);
584
return -EMSGSIZE;
585
}
586
587
static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
588
struct fib_rules_ops *ops)
589
{
590
int idx = 0;
591
struct fib_rule *rule;
592
593
rcu_read_lock();
594
list_for_each_entry_rcu(rule, &ops->rules_list, list) {
595
if (idx < cb->args[1])
596
goto skip;
597
598
if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
599
cb->nlh->nlmsg_seq, RTM_NEWRULE,
600
NLM_F_MULTI, ops) < 0)
601
break;
602
skip:
603
idx++;
604
}
605
rcu_read_unlock();
606
cb->args[1] = idx;
607
rules_ops_put(ops);
608
609
return skb->len;
610
}
611
612
static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
613
{
614
struct net *net = sock_net(skb->sk);
615
struct fib_rules_ops *ops;
616
int idx = 0, family;
617
618
family = rtnl_msg_family(cb->nlh);
619
if (family != AF_UNSPEC) {
620
/* Protocol specific dump request */
621
ops = lookup_rules_ops(net, family);
622
if (ops == NULL)
623
return -EAFNOSUPPORT;
624
625
return dump_rules(skb, cb, ops);
626
}
627
628
rcu_read_lock();
629
list_for_each_entry_rcu(ops, &net->rules_ops, list) {
630
if (idx < cb->args[0] || !try_module_get(ops->owner))
631
goto skip;
632
633
if (dump_rules(skb, cb, ops) < 0)
634
break;
635
636
cb->args[1] = 0;
637
skip:
638
idx++;
639
}
640
rcu_read_unlock();
641
cb->args[0] = idx;
642
643
return skb->len;
644
}
645
646
static void notify_rule_change(int event, struct fib_rule *rule,
647
struct fib_rules_ops *ops, struct nlmsghdr *nlh,
648
u32 pid)
649
{
650
struct net *net;
651
struct sk_buff *skb;
652
int err = -ENOBUFS;
653
654
net = ops->fro_net;
655
skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
656
if (skb == NULL)
657
goto errout;
658
659
err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
660
if (err < 0) {
661
/* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
662
WARN_ON(err == -EMSGSIZE);
663
kfree_skb(skb);
664
goto errout;
665
}
666
667
rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
668
return;
669
errout:
670
if (err < 0)
671
rtnl_set_sk_err(net, ops->nlgroup, err);
672
}
673
674
static void attach_rules(struct list_head *rules, struct net_device *dev)
675
{
676
struct fib_rule *rule;
677
678
list_for_each_entry(rule, rules, list) {
679
if (rule->iifindex == -1 &&
680
strcmp(dev->name, rule->iifname) == 0)
681
rule->iifindex = dev->ifindex;
682
if (rule->oifindex == -1 &&
683
strcmp(dev->name, rule->oifname) == 0)
684
rule->oifindex = dev->ifindex;
685
}
686
}
687
688
static void detach_rules(struct list_head *rules, struct net_device *dev)
689
{
690
struct fib_rule *rule;
691
692
list_for_each_entry(rule, rules, list) {
693
if (rule->iifindex == dev->ifindex)
694
rule->iifindex = -1;
695
if (rule->oifindex == dev->ifindex)
696
rule->oifindex = -1;
697
}
698
}
699
700
701
static int fib_rules_event(struct notifier_block *this, unsigned long event,
702
void *ptr)
703
{
704
struct net_device *dev = ptr;
705
struct net *net = dev_net(dev);
706
struct fib_rules_ops *ops;
707
708
ASSERT_RTNL();
709
710
switch (event) {
711
case NETDEV_REGISTER:
712
list_for_each_entry(ops, &net->rules_ops, list)
713
attach_rules(&ops->rules_list, dev);
714
break;
715
716
case NETDEV_UNREGISTER:
717
list_for_each_entry(ops, &net->rules_ops, list)
718
detach_rules(&ops->rules_list, dev);
719
break;
720
}
721
722
return NOTIFY_DONE;
723
}
724
725
static struct notifier_block fib_rules_notifier = {
726
.notifier_call = fib_rules_event,
727
};
728
729
static int __net_init fib_rules_net_init(struct net *net)
730
{
731
INIT_LIST_HEAD(&net->rules_ops);
732
spin_lock_init(&net->rules_mod_lock);
733
return 0;
734
}
735
736
static struct pernet_operations fib_rules_net_ops = {
737
.init = fib_rules_net_init,
738
};
739
740
static int __init fib_rules_init(void)
741
{
742
int err;
743
rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL);
744
rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL);
745
rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule);
746
747
err = register_pernet_subsys(&fib_rules_net_ops);
748
if (err < 0)
749
goto fail;
750
751
err = register_netdevice_notifier(&fib_rules_notifier);
752
if (err < 0)
753
goto fail_unregister;
754
755
return 0;
756
757
fail_unregister:
758
unregister_pernet_subsys(&fib_rules_net_ops);
759
fail:
760
rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
761
rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
762
rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
763
return err;
764
}
765
766
subsys_initcall(fib_rules_init);
767
768