Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/decnet/dn_fib.c
15112 views
1
/*
2
* DECnet An implementation of the DECnet protocol suite for the LINUX
3
* operating system. DECnet is implemented using the BSD Socket
4
* interface as the means of communication with the user level.
5
*
6
* DECnet Routing Forwarding Information Base (Glue/Info List)
7
*
8
* Author: Steve Whitehouse <[email protected]>
9
*
10
*
11
* Changes:
12
* Alexey Kuznetsov : SMP locking changes
13
* Steve Whitehouse : Rewrote it... Well to be more correct, I
14
* copied most of it from the ipv4 fib code.
15
* Steve Whitehouse : Updated it in style and fixed a few bugs
16
* which were fixed in the ipv4 code since
17
* this code was copied from it.
18
*
19
*/
20
#include <linux/string.h>
21
#include <linux/net.h>
22
#include <linux/socket.h>
23
#include <linux/slab.h>
24
#include <linux/sockios.h>
25
#include <linux/init.h>
26
#include <linux/skbuff.h>
27
#include <linux/netlink.h>
28
#include <linux/rtnetlink.h>
29
#include <linux/proc_fs.h>
30
#include <linux/netdevice.h>
31
#include <linux/timer.h>
32
#include <linux/spinlock.h>
33
#include <asm/atomic.h>
34
#include <asm/uaccess.h>
35
#include <net/neighbour.h>
36
#include <net/dst.h>
37
#include <net/flow.h>
38
#include <net/fib_rules.h>
39
#include <net/dn.h>
40
#include <net/dn_route.h>
41
#include <net/dn_fib.h>
42
#include <net/dn_neigh.h>
43
#include <net/dn_dev.h>
44
45
#define RT_MIN_TABLE 1
46
47
#define for_fib_info() { struct dn_fib_info *fi;\
48
for(fi = dn_fib_info_list; fi; fi = fi->fib_next)
49
#define endfor_fib_info() }
50
51
#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
52
for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
53
54
#define change_nexthops(fi) { int nhsel; struct dn_fib_nh *nh;\
55
for(nhsel = 0, nh = (struct dn_fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++)
56
57
#define endfor_nexthops(fi) }
58
59
static DEFINE_SPINLOCK(dn_fib_multipath_lock);
60
static struct dn_fib_info *dn_fib_info_list;
61
static DEFINE_SPINLOCK(dn_fib_info_lock);
62
63
static struct
64
{
65
int error;
66
u8 scope;
67
} dn_fib_props[RTN_MAX+1] = {
68
[RTN_UNSPEC] = { .error = 0, .scope = RT_SCOPE_NOWHERE },
69
[RTN_UNICAST] = { .error = 0, .scope = RT_SCOPE_UNIVERSE },
70
[RTN_LOCAL] = { .error = 0, .scope = RT_SCOPE_HOST },
71
[RTN_BROADCAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE },
72
[RTN_ANYCAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE },
73
[RTN_MULTICAST] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE },
74
[RTN_BLACKHOLE] = { .error = -EINVAL, .scope = RT_SCOPE_UNIVERSE },
75
[RTN_UNREACHABLE] = { .error = -EHOSTUNREACH, .scope = RT_SCOPE_UNIVERSE },
76
[RTN_PROHIBIT] = { .error = -EACCES, .scope = RT_SCOPE_UNIVERSE },
77
[RTN_THROW] = { .error = -EAGAIN, .scope = RT_SCOPE_UNIVERSE },
78
[RTN_NAT] = { .error = 0, .scope = RT_SCOPE_NOWHERE },
79
[RTN_XRESOLVE] = { .error = -EINVAL, .scope = RT_SCOPE_NOWHERE },
80
};
81
82
static int dn_fib_sync_down(__le16 local, struct net_device *dev, int force);
83
static int dn_fib_sync_up(struct net_device *dev);
84
85
void dn_fib_free_info(struct dn_fib_info *fi)
86
{
87
if (fi->fib_dead == 0) {
88
printk(KERN_DEBUG "DECnet: BUG! Attempt to free alive dn_fib_info\n");
89
return;
90
}
91
92
change_nexthops(fi) {
93
if (nh->nh_dev)
94
dev_put(nh->nh_dev);
95
nh->nh_dev = NULL;
96
} endfor_nexthops(fi);
97
kfree(fi);
98
}
99
100
void dn_fib_release_info(struct dn_fib_info *fi)
101
{
102
spin_lock(&dn_fib_info_lock);
103
if (fi && --fi->fib_treeref == 0) {
104
if (fi->fib_next)
105
fi->fib_next->fib_prev = fi->fib_prev;
106
if (fi->fib_prev)
107
fi->fib_prev->fib_next = fi->fib_next;
108
if (fi == dn_fib_info_list)
109
dn_fib_info_list = fi->fib_next;
110
fi->fib_dead = 1;
111
dn_fib_info_put(fi);
112
}
113
spin_unlock(&dn_fib_info_lock);
114
}
115
116
static inline int dn_fib_nh_comp(const struct dn_fib_info *fi, const struct dn_fib_info *ofi)
117
{
118
const struct dn_fib_nh *onh = ofi->fib_nh;
119
120
for_nexthops(fi) {
121
if (nh->nh_oif != onh->nh_oif ||
122
nh->nh_gw != onh->nh_gw ||
123
nh->nh_scope != onh->nh_scope ||
124
nh->nh_weight != onh->nh_weight ||
125
((nh->nh_flags^onh->nh_flags)&~RTNH_F_DEAD))
126
return -1;
127
onh++;
128
} endfor_nexthops(fi);
129
return 0;
130
}
131
132
static inline struct dn_fib_info *dn_fib_find_info(const struct dn_fib_info *nfi)
133
{
134
for_fib_info() {
135
if (fi->fib_nhs != nfi->fib_nhs)
136
continue;
137
if (nfi->fib_protocol == fi->fib_protocol &&
138
nfi->fib_prefsrc == fi->fib_prefsrc &&
139
nfi->fib_priority == fi->fib_priority &&
140
memcmp(nfi->fib_metrics, fi->fib_metrics, sizeof(fi->fib_metrics)) == 0 &&
141
((nfi->fib_flags^fi->fib_flags)&~RTNH_F_DEAD) == 0 &&
142
(nfi->fib_nhs == 0 || dn_fib_nh_comp(fi, nfi) == 0))
143
return fi;
144
} endfor_fib_info();
145
return NULL;
146
}
147
148
__le16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type)
149
{
150
while(RTA_OK(attr,attrlen)) {
151
if (attr->rta_type == type)
152
return *(__le16*)RTA_DATA(attr);
153
attr = RTA_NEXT(attr, attrlen);
154
}
155
156
return 0;
157
}
158
159
static int dn_fib_count_nhs(struct rtattr *rta)
160
{
161
int nhs = 0;
162
struct rtnexthop *nhp = RTA_DATA(rta);
163
int nhlen = RTA_PAYLOAD(rta);
164
165
while(nhlen >= (int)sizeof(struct rtnexthop)) {
166
if ((nhlen -= nhp->rtnh_len) < 0)
167
return 0;
168
nhs++;
169
nhp = RTNH_NEXT(nhp);
170
}
171
172
return nhs;
173
}
174
175
static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct rtattr *rta, const struct rtmsg *r)
176
{
177
struct rtnexthop *nhp = RTA_DATA(rta);
178
int nhlen = RTA_PAYLOAD(rta);
179
180
change_nexthops(fi) {
181
int attrlen = nhlen - sizeof(struct rtnexthop);
182
if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
183
return -EINVAL;
184
185
nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
186
nh->nh_oif = nhp->rtnh_ifindex;
187
nh->nh_weight = nhp->rtnh_hops + 1;
188
189
if (attrlen) {
190
nh->nh_gw = dn_fib_get_attr16(RTNH_DATA(nhp), attrlen, RTA_GATEWAY);
191
}
192
nhp = RTNH_NEXT(nhp);
193
} endfor_nexthops(fi);
194
195
return 0;
196
}
197
198
199
static int dn_fib_check_nh(const struct rtmsg *r, struct dn_fib_info *fi, struct dn_fib_nh *nh)
200
{
201
int err;
202
203
if (nh->nh_gw) {
204
struct flowidn fld;
205
struct dn_fib_res res;
206
207
if (nh->nh_flags&RTNH_F_ONLINK) {
208
struct net_device *dev;
209
210
if (r->rtm_scope >= RT_SCOPE_LINK)
211
return -EINVAL;
212
if (dnet_addr_type(nh->nh_gw) != RTN_UNICAST)
213
return -EINVAL;
214
if ((dev = __dev_get_by_index(&init_net, nh->nh_oif)) == NULL)
215
return -ENODEV;
216
if (!(dev->flags&IFF_UP))
217
return -ENETDOWN;
218
nh->nh_dev = dev;
219
dev_hold(dev);
220
nh->nh_scope = RT_SCOPE_LINK;
221
return 0;
222
}
223
224
memset(&fld, 0, sizeof(fld));
225
fld.daddr = nh->nh_gw;
226
fld.flowidn_oif = nh->nh_oif;
227
fld.flowidn_scope = r->rtm_scope + 1;
228
229
if (fld.flowidn_scope < RT_SCOPE_LINK)
230
fld.flowidn_scope = RT_SCOPE_LINK;
231
232
if ((err = dn_fib_lookup(&fld, &res)) != 0)
233
return err;
234
235
err = -EINVAL;
236
if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
237
goto out;
238
nh->nh_scope = res.scope;
239
nh->nh_oif = DN_FIB_RES_OIF(res);
240
nh->nh_dev = DN_FIB_RES_DEV(res);
241
if (nh->nh_dev == NULL)
242
goto out;
243
dev_hold(nh->nh_dev);
244
err = -ENETDOWN;
245
if (!(nh->nh_dev->flags & IFF_UP))
246
goto out;
247
err = 0;
248
out:
249
dn_fib_res_put(&res);
250
return err;
251
} else {
252
struct net_device *dev;
253
254
if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK))
255
return -EINVAL;
256
257
dev = __dev_get_by_index(&init_net, nh->nh_oif);
258
if (dev == NULL || dev->dn_ptr == NULL)
259
return -ENODEV;
260
if (!(dev->flags&IFF_UP))
261
return -ENETDOWN;
262
nh->nh_dev = dev;
263
dev_hold(nh->nh_dev);
264
nh->nh_scope = RT_SCOPE_HOST;
265
}
266
267
return 0;
268
}
269
270
271
struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta *rta, const struct nlmsghdr *nlh, int *errp)
272
{
273
int err;
274
struct dn_fib_info *fi = NULL;
275
struct dn_fib_info *ofi;
276
int nhs = 1;
277
278
if (r->rtm_type > RTN_MAX)
279
goto err_inval;
280
281
if (dn_fib_props[r->rtm_type].scope > r->rtm_scope)
282
goto err_inval;
283
284
if (rta->rta_mp) {
285
nhs = dn_fib_count_nhs(rta->rta_mp);
286
if (nhs == 0)
287
goto err_inval;
288
}
289
290
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL);
291
err = -ENOBUFS;
292
if (fi == NULL)
293
goto failure;
294
295
fi->fib_protocol = r->rtm_protocol;
296
fi->fib_nhs = nhs;
297
fi->fib_flags = r->rtm_flags;
298
if (rta->rta_priority)
299
fi->fib_priority = *rta->rta_priority;
300
if (rta->rta_mx) {
301
int attrlen = RTA_PAYLOAD(rta->rta_mx);
302
struct rtattr *attr = RTA_DATA(rta->rta_mx);
303
304
while(RTA_OK(attr, attrlen)) {
305
unsigned flavour = attr->rta_type;
306
if (flavour) {
307
if (flavour > RTAX_MAX)
308
goto err_inval;
309
fi->fib_metrics[flavour-1] = *(unsigned*)RTA_DATA(attr);
310
}
311
attr = RTA_NEXT(attr, attrlen);
312
}
313
}
314
if (rta->rta_prefsrc)
315
memcpy(&fi->fib_prefsrc, rta->rta_prefsrc, 2);
316
317
if (rta->rta_mp) {
318
if ((err = dn_fib_get_nhs(fi, rta->rta_mp, r)) != 0)
319
goto failure;
320
if (rta->rta_oif && fi->fib_nh->nh_oif != *rta->rta_oif)
321
goto err_inval;
322
if (rta->rta_gw && memcmp(&fi->fib_nh->nh_gw, rta->rta_gw, 2))
323
goto err_inval;
324
} else {
325
struct dn_fib_nh *nh = fi->fib_nh;
326
if (rta->rta_oif)
327
nh->nh_oif = *rta->rta_oif;
328
if (rta->rta_gw)
329
memcpy(&nh->nh_gw, rta->rta_gw, 2);
330
nh->nh_flags = r->rtm_flags;
331
nh->nh_weight = 1;
332
}
333
334
if (r->rtm_type == RTN_NAT) {
335
if (rta->rta_gw == NULL || nhs != 1 || rta->rta_oif)
336
goto err_inval;
337
memcpy(&fi->fib_nh->nh_gw, rta->rta_gw, 2);
338
goto link_it;
339
}
340
341
if (dn_fib_props[r->rtm_type].error) {
342
if (rta->rta_gw || rta->rta_oif || rta->rta_mp)
343
goto err_inval;
344
goto link_it;
345
}
346
347
if (r->rtm_scope > RT_SCOPE_HOST)
348
goto err_inval;
349
350
if (r->rtm_scope == RT_SCOPE_HOST) {
351
struct dn_fib_nh *nh = fi->fib_nh;
352
353
/* Local address is added */
354
if (nhs != 1 || nh->nh_gw)
355
goto err_inval;
356
nh->nh_scope = RT_SCOPE_NOWHERE;
357
nh->nh_dev = dev_get_by_index(&init_net, fi->fib_nh->nh_oif);
358
err = -ENODEV;
359
if (nh->nh_dev == NULL)
360
goto failure;
361
} else {
362
change_nexthops(fi) {
363
if ((err = dn_fib_check_nh(r, fi, nh)) != 0)
364
goto failure;
365
} endfor_nexthops(fi)
366
}
367
368
if (fi->fib_prefsrc) {
369
if (r->rtm_type != RTN_LOCAL || rta->rta_dst == NULL ||
370
memcmp(&fi->fib_prefsrc, rta->rta_dst, 2))
371
if (dnet_addr_type(fi->fib_prefsrc) != RTN_LOCAL)
372
goto err_inval;
373
}
374
375
link_it:
376
if ((ofi = dn_fib_find_info(fi)) != NULL) {
377
fi->fib_dead = 1;
378
dn_fib_free_info(fi);
379
ofi->fib_treeref++;
380
return ofi;
381
}
382
383
fi->fib_treeref++;
384
atomic_inc(&fi->fib_clntref);
385
spin_lock(&dn_fib_info_lock);
386
fi->fib_next = dn_fib_info_list;
387
fi->fib_prev = NULL;
388
if (dn_fib_info_list)
389
dn_fib_info_list->fib_prev = fi;
390
dn_fib_info_list = fi;
391
spin_unlock(&dn_fib_info_lock);
392
return fi;
393
394
err_inval:
395
err = -EINVAL;
396
397
failure:
398
*errp = err;
399
if (fi) {
400
fi->fib_dead = 1;
401
dn_fib_free_info(fi);
402
}
403
404
return NULL;
405
}
406
407
int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn *fld, struct dn_fib_res *res)
408
{
409
int err = dn_fib_props[type].error;
410
411
if (err == 0) {
412
if (fi->fib_flags & RTNH_F_DEAD)
413
return 1;
414
415
res->fi = fi;
416
417
switch(type) {
418
case RTN_NAT:
419
DN_FIB_RES_RESET(*res);
420
atomic_inc(&fi->fib_clntref);
421
return 0;
422
case RTN_UNICAST:
423
case RTN_LOCAL:
424
for_nexthops(fi) {
425
if (nh->nh_flags & RTNH_F_DEAD)
426
continue;
427
if (!fld->flowidn_oif ||
428
fld->flowidn_oif == nh->nh_oif)
429
break;
430
}
431
if (nhsel < fi->fib_nhs) {
432
res->nh_sel = nhsel;
433
atomic_inc(&fi->fib_clntref);
434
return 0;
435
}
436
endfor_nexthops(fi);
437
res->fi = NULL;
438
return 1;
439
default:
440
if (net_ratelimit())
441
printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", type);
442
res->fi = NULL;
443
return -EINVAL;
444
}
445
}
446
return err;
447
}
448
449
void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res)
450
{
451
struct dn_fib_info *fi = res->fi;
452
int w;
453
454
spin_lock_bh(&dn_fib_multipath_lock);
455
if (fi->fib_power <= 0) {
456
int power = 0;
457
change_nexthops(fi) {
458
if (!(nh->nh_flags&RTNH_F_DEAD)) {
459
power += nh->nh_weight;
460
nh->nh_power = nh->nh_weight;
461
}
462
} endfor_nexthops(fi);
463
fi->fib_power = power;
464
if (power < 0) {
465
spin_unlock_bh(&dn_fib_multipath_lock);
466
res->nh_sel = 0;
467
return;
468
}
469
}
470
471
w = jiffies % fi->fib_power;
472
473
change_nexthops(fi) {
474
if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) {
475
if ((w -= nh->nh_power) <= 0) {
476
nh->nh_power--;
477
fi->fib_power--;
478
res->nh_sel = nhsel;
479
spin_unlock_bh(&dn_fib_multipath_lock);
480
return;
481
}
482
}
483
} endfor_nexthops(fi);
484
res->nh_sel = 0;
485
spin_unlock_bh(&dn_fib_multipath_lock);
486
}
487
488
489
static int dn_fib_check_attr(struct rtmsg *r, struct rtattr **rta)
490
{
491
int i;
492
493
for(i = 1; i <= RTA_MAX; i++) {
494
struct rtattr *attr = rta[i-1];
495
if (attr) {
496
if (RTA_PAYLOAD(attr) < 4 && RTA_PAYLOAD(attr) != 2)
497
return -EINVAL;
498
if (i != RTA_MULTIPATH && i != RTA_METRICS &&
499
i != RTA_TABLE)
500
rta[i-1] = (struct rtattr *)RTA_DATA(attr);
501
}
502
}
503
504
return 0;
505
}
506
507
static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
508
{
509
struct net *net = sock_net(skb->sk);
510
struct dn_fib_table *tb;
511
struct rtattr **rta = arg;
512
struct rtmsg *r = NLMSG_DATA(nlh);
513
514
if (!net_eq(net, &init_net))
515
return -EINVAL;
516
517
if (dn_fib_check_attr(r, rta))
518
return -EINVAL;
519
520
tb = dn_fib_get_table(rtm_get_table(rta, r->rtm_table), 0);
521
if (tb)
522
return tb->delete(tb, r, (struct dn_kern_rta *)rta, nlh, &NETLINK_CB(skb));
523
524
return -ESRCH;
525
}
526
527
static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
528
{
529
struct net *net = sock_net(skb->sk);
530
struct dn_fib_table *tb;
531
struct rtattr **rta = arg;
532
struct rtmsg *r = NLMSG_DATA(nlh);
533
534
if (!net_eq(net, &init_net))
535
return -EINVAL;
536
537
if (dn_fib_check_attr(r, rta))
538
return -EINVAL;
539
540
tb = dn_fib_get_table(rtm_get_table(rta, r->rtm_table), 1);
541
if (tb)
542
return tb->insert(tb, r, (struct dn_kern_rta *)rta, nlh, &NETLINK_CB(skb));
543
544
return -ENOBUFS;
545
}
546
547
static void fib_magic(int cmd, int type, __le16 dst, int dst_len, struct dn_ifaddr *ifa)
548
{
549
struct dn_fib_table *tb;
550
struct {
551
struct nlmsghdr nlh;
552
struct rtmsg rtm;
553
} req;
554
struct dn_kern_rta rta;
555
556
memset(&req.rtm, 0, sizeof(req.rtm));
557
memset(&rta, 0, sizeof(rta));
558
559
if (type == RTN_UNICAST)
560
tb = dn_fib_get_table(RT_MIN_TABLE, 1);
561
else
562
tb = dn_fib_get_table(RT_TABLE_LOCAL, 1);
563
564
if (tb == NULL)
565
return;
566
567
req.nlh.nlmsg_len = sizeof(req);
568
req.nlh.nlmsg_type = cmd;
569
req.nlh.nlmsg_flags = NLM_F_REQUEST|NLM_F_CREATE|NLM_F_APPEND;
570
req.nlh.nlmsg_pid = 0;
571
req.nlh.nlmsg_seq = 0;
572
573
req.rtm.rtm_dst_len = dst_len;
574
req.rtm.rtm_table = tb->n;
575
req.rtm.rtm_protocol = RTPROT_KERNEL;
576
req.rtm.rtm_scope = (type != RTN_LOCAL ? RT_SCOPE_LINK : RT_SCOPE_HOST);
577
req.rtm.rtm_type = type;
578
579
rta.rta_dst = &dst;
580
rta.rta_prefsrc = &ifa->ifa_local;
581
rta.rta_oif = &ifa->ifa_dev->dev->ifindex;
582
583
if (cmd == RTM_NEWROUTE)
584
tb->insert(tb, &req.rtm, &rta, &req.nlh, NULL);
585
else
586
tb->delete(tb, &req.rtm, &rta, &req.nlh, NULL);
587
}
588
589
static void dn_fib_add_ifaddr(struct dn_ifaddr *ifa)
590
{
591
592
fib_magic(RTM_NEWROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa);
593
594
#if 0
595
if (!(dev->flags&IFF_UP))
596
return;
597
/* In the future, we will want to add default routes here */
598
599
#endif
600
}
601
602
static void dn_fib_del_ifaddr(struct dn_ifaddr *ifa)
603
{
604
int found_it = 0;
605
struct net_device *dev;
606
struct dn_dev *dn_db;
607
struct dn_ifaddr *ifa2;
608
609
ASSERT_RTNL();
610
611
/* Scan device list */
612
rcu_read_lock();
613
for_each_netdev_rcu(&init_net, dev) {
614
dn_db = rcu_dereference(dev->dn_ptr);
615
if (dn_db == NULL)
616
continue;
617
for (ifa2 = rcu_dereference(dn_db->ifa_list);
618
ifa2 != NULL;
619
ifa2 = rcu_dereference(ifa2->ifa_next)) {
620
if (ifa2->ifa_local == ifa->ifa_local) {
621
found_it = 1;
622
break;
623
}
624
}
625
}
626
rcu_read_unlock();
627
628
if (found_it == 0) {
629
fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 16, ifa);
630
631
if (dnet_addr_type(ifa->ifa_local) != RTN_LOCAL) {
632
if (dn_fib_sync_down(ifa->ifa_local, NULL, 0))
633
dn_fib_flush();
634
}
635
}
636
}
637
638
static void dn_fib_disable_addr(struct net_device *dev, int force)
639
{
640
if (dn_fib_sync_down(0, dev, force))
641
dn_fib_flush();
642
dn_rt_cache_flush(0);
643
neigh_ifdown(&dn_neigh_table, dev);
644
}
645
646
static int dn_fib_dnaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
647
{
648
struct dn_ifaddr *ifa = (struct dn_ifaddr *)ptr;
649
650
switch(event) {
651
case NETDEV_UP:
652
dn_fib_add_ifaddr(ifa);
653
dn_fib_sync_up(ifa->ifa_dev->dev);
654
dn_rt_cache_flush(-1);
655
break;
656
case NETDEV_DOWN:
657
dn_fib_del_ifaddr(ifa);
658
if (ifa->ifa_dev && ifa->ifa_dev->ifa_list == NULL) {
659
dn_fib_disable_addr(ifa->ifa_dev->dev, 1);
660
} else {
661
dn_rt_cache_flush(-1);
662
}
663
break;
664
}
665
return NOTIFY_DONE;
666
}
667
668
static int dn_fib_sync_down(__le16 local, struct net_device *dev, int force)
669
{
670
int ret = 0;
671
int scope = RT_SCOPE_NOWHERE;
672
673
if (force)
674
scope = -1;
675
676
for_fib_info() {
677
/*
678
* This makes no sense for DECnet.... we will almost
679
* certainly have more than one local address the same
680
* over all our interfaces. It needs thinking about
681
* some more.
682
*/
683
if (local && fi->fib_prefsrc == local) {
684
fi->fib_flags |= RTNH_F_DEAD;
685
ret++;
686
} else if (dev && fi->fib_nhs) {
687
int dead = 0;
688
689
change_nexthops(fi) {
690
if (nh->nh_flags&RTNH_F_DEAD)
691
dead++;
692
else if (nh->nh_dev == dev &&
693
nh->nh_scope != scope) {
694
spin_lock_bh(&dn_fib_multipath_lock);
695
nh->nh_flags |= RTNH_F_DEAD;
696
fi->fib_power -= nh->nh_power;
697
nh->nh_power = 0;
698
spin_unlock_bh(&dn_fib_multipath_lock);
699
dead++;
700
}
701
} endfor_nexthops(fi)
702
if (dead == fi->fib_nhs) {
703
fi->fib_flags |= RTNH_F_DEAD;
704
ret++;
705
}
706
}
707
} endfor_fib_info();
708
return ret;
709
}
710
711
712
static int dn_fib_sync_up(struct net_device *dev)
713
{
714
int ret = 0;
715
716
if (!(dev->flags&IFF_UP))
717
return 0;
718
719
for_fib_info() {
720
int alive = 0;
721
722
change_nexthops(fi) {
723
if (!(nh->nh_flags&RTNH_F_DEAD)) {
724
alive++;
725
continue;
726
}
727
if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
728
continue;
729
if (nh->nh_dev != dev || dev->dn_ptr == NULL)
730
continue;
731
alive++;
732
spin_lock_bh(&dn_fib_multipath_lock);
733
nh->nh_power = 0;
734
nh->nh_flags &= ~RTNH_F_DEAD;
735
spin_unlock_bh(&dn_fib_multipath_lock);
736
} endfor_nexthops(fi);
737
738
if (alive > 0) {
739
fi->fib_flags &= ~RTNH_F_DEAD;
740
ret++;
741
}
742
} endfor_fib_info();
743
return ret;
744
}
745
746
static struct notifier_block dn_fib_dnaddr_notifier = {
747
.notifier_call = dn_fib_dnaddr_event,
748
};
749
750
void __exit dn_fib_cleanup(void)
751
{
752
dn_fib_table_cleanup();
753
dn_fib_rules_cleanup();
754
755
unregister_dnaddr_notifier(&dn_fib_dnaddr_notifier);
756
}
757
758
759
void __init dn_fib_init(void)
760
{
761
dn_fib_table_init();
762
dn_fib_rules_init();
763
764
register_dnaddr_notifier(&dn_fib_dnaddr_notifier);
765
766
rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL);
767
rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL);
768
}
769
770
771
772