Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/batman-adv/hard-interface.c
15109 views
1
/*
2
* Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3
*
4
* Marek Lindner, Simon Wunderlich
5
*
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of version 2 of the GNU General Public
8
* License as published by the Free Software Foundation.
9
*
10
* This program is distributed in the hope that it will be useful, but
11
* WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
* General Public License for more details.
14
*
15
* You should have received a copy of the GNU General Public License
16
* along with this program; if not, write to the Free Software
17
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18
* 02110-1301, USA
19
*
20
*/
21
22
#include "main.h"
23
#include "hard-interface.h"
24
#include "soft-interface.h"
25
#include "send.h"
26
#include "translation-table.h"
27
#include "routing.h"
28
#include "bat_sysfs.h"
29
#include "originator.h"
30
#include "hash.h"
31
32
#include <linux/if_arp.h>
33
34
35
static int batman_skb_recv(struct sk_buff *skb,
36
struct net_device *dev,
37
struct packet_type *ptype,
38
struct net_device *orig_dev);
39
40
void hardif_free_rcu(struct rcu_head *rcu)
41
{
42
struct hard_iface *hard_iface;
43
44
hard_iface = container_of(rcu, struct hard_iface, rcu);
45
dev_put(hard_iface->net_dev);
46
kfree(hard_iface);
47
}
48
49
struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev)
50
{
51
struct hard_iface *hard_iface;
52
53
rcu_read_lock();
54
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
55
if (hard_iface->net_dev == net_dev &&
56
atomic_inc_not_zero(&hard_iface->refcount))
57
goto out;
58
}
59
60
hard_iface = NULL;
61
62
out:
63
rcu_read_unlock();
64
return hard_iface;
65
}
66
67
static int is_valid_iface(struct net_device *net_dev)
68
{
69
if (net_dev->flags & IFF_LOOPBACK)
70
return 0;
71
72
if (net_dev->type != ARPHRD_ETHER)
73
return 0;
74
75
if (net_dev->addr_len != ETH_ALEN)
76
return 0;
77
78
/* no batman over batman */
79
if (softif_is_valid(net_dev))
80
return 0;
81
82
/* Device is being bridged */
83
/* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
84
return 0; */
85
86
return 1;
87
}
88
89
static struct hard_iface *hardif_get_active(struct net_device *soft_iface)
90
{
91
struct hard_iface *hard_iface;
92
93
rcu_read_lock();
94
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
95
if (hard_iface->soft_iface != soft_iface)
96
continue;
97
98
if (hard_iface->if_status == IF_ACTIVE &&
99
atomic_inc_not_zero(&hard_iface->refcount))
100
goto out;
101
}
102
103
hard_iface = NULL;
104
105
out:
106
rcu_read_unlock();
107
return hard_iface;
108
}
109
110
static void primary_if_update_addr(struct bat_priv *bat_priv)
111
{
112
struct vis_packet *vis_packet;
113
struct hard_iface *primary_if;
114
115
primary_if = primary_if_get_selected(bat_priv);
116
if (!primary_if)
117
goto out;
118
119
vis_packet = (struct vis_packet *)
120
bat_priv->my_vis_info->skb_packet->data;
121
memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
122
memcpy(vis_packet->sender_orig,
123
primary_if->net_dev->dev_addr, ETH_ALEN);
124
125
out:
126
if (primary_if)
127
hardif_free_ref(primary_if);
128
}
129
130
static void primary_if_select(struct bat_priv *bat_priv,
131
struct hard_iface *new_hard_iface)
132
{
133
struct hard_iface *curr_hard_iface;
134
struct batman_packet *batman_packet;
135
136
ASSERT_RTNL();
137
138
if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount))
139
new_hard_iface = NULL;
140
141
curr_hard_iface = bat_priv->primary_if;
142
rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
143
144
if (curr_hard_iface)
145
hardif_free_ref(curr_hard_iface);
146
147
if (!new_hard_iface)
148
return;
149
150
batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff);
151
batman_packet->flags = PRIMARIES_FIRST_HOP;
152
batman_packet->ttl = TTL;
153
154
primary_if_update_addr(bat_priv);
155
156
/***
157
* hacky trick to make sure that we send the TT information via
158
* our new primary interface
159
*/
160
atomic_set(&bat_priv->tt_local_changed, 1);
161
}
162
163
static bool hardif_is_iface_up(struct hard_iface *hard_iface)
164
{
165
if (hard_iface->net_dev->flags & IFF_UP)
166
return true;
167
168
return false;
169
}
170
171
static void update_mac_addresses(struct hard_iface *hard_iface)
172
{
173
memcpy(((struct batman_packet *)(hard_iface->packet_buff))->orig,
174
hard_iface->net_dev->dev_addr, ETH_ALEN);
175
memcpy(((struct batman_packet *)(hard_iface->packet_buff))->prev_sender,
176
hard_iface->net_dev->dev_addr, ETH_ALEN);
177
}
178
179
static void check_known_mac_addr(struct net_device *net_dev)
180
{
181
struct hard_iface *hard_iface;
182
183
rcu_read_lock();
184
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
185
if ((hard_iface->if_status != IF_ACTIVE) &&
186
(hard_iface->if_status != IF_TO_BE_ACTIVATED))
187
continue;
188
189
if (hard_iface->net_dev == net_dev)
190
continue;
191
192
if (!compare_eth(hard_iface->net_dev->dev_addr,
193
net_dev->dev_addr))
194
continue;
195
196
pr_warning("The newly added mac address (%pM) already exists "
197
"on: %s\n", net_dev->dev_addr,
198
hard_iface->net_dev->name);
199
pr_warning("It is strongly recommended to keep mac addresses "
200
"unique to avoid problems!\n");
201
}
202
rcu_read_unlock();
203
}
204
205
int hardif_min_mtu(struct net_device *soft_iface)
206
{
207
struct bat_priv *bat_priv = netdev_priv(soft_iface);
208
struct hard_iface *hard_iface;
209
/* allow big frames if all devices are capable to do so
210
* (have MTU > 1500 + BAT_HEADER_LEN) */
211
int min_mtu = ETH_DATA_LEN;
212
213
if (atomic_read(&bat_priv->fragmentation))
214
goto out;
215
216
rcu_read_lock();
217
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
218
if ((hard_iface->if_status != IF_ACTIVE) &&
219
(hard_iface->if_status != IF_TO_BE_ACTIVATED))
220
continue;
221
222
if (hard_iface->soft_iface != soft_iface)
223
continue;
224
225
min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
226
min_mtu);
227
}
228
rcu_read_unlock();
229
out:
230
return min_mtu;
231
}
232
233
/* adjusts the MTU if a new interface with a smaller MTU appeared. */
234
void update_min_mtu(struct net_device *soft_iface)
235
{
236
int min_mtu;
237
238
min_mtu = hardif_min_mtu(soft_iface);
239
if (soft_iface->mtu != min_mtu)
240
soft_iface->mtu = min_mtu;
241
}
242
243
static void hardif_activate_interface(struct hard_iface *hard_iface)
244
{
245
struct bat_priv *bat_priv;
246
struct hard_iface *primary_if = NULL;
247
248
if (hard_iface->if_status != IF_INACTIVE)
249
goto out;
250
251
bat_priv = netdev_priv(hard_iface->soft_iface);
252
253
update_mac_addresses(hard_iface);
254
hard_iface->if_status = IF_TO_BE_ACTIVATED;
255
256
/**
257
* the first active interface becomes our primary interface or
258
* the next active interface after the old primay interface was removed
259
*/
260
primary_if = primary_if_get_selected(bat_priv);
261
if (!primary_if)
262
primary_if_select(bat_priv, hard_iface);
263
264
bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
265
hard_iface->net_dev->name);
266
267
update_min_mtu(hard_iface->soft_iface);
268
269
out:
270
if (primary_if)
271
hardif_free_ref(primary_if);
272
}
273
274
static void hardif_deactivate_interface(struct hard_iface *hard_iface)
275
{
276
if ((hard_iface->if_status != IF_ACTIVE) &&
277
(hard_iface->if_status != IF_TO_BE_ACTIVATED))
278
return;
279
280
hard_iface->if_status = IF_INACTIVE;
281
282
bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
283
hard_iface->net_dev->name);
284
285
update_min_mtu(hard_iface->soft_iface);
286
}
287
288
int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
289
{
290
struct bat_priv *bat_priv;
291
struct batman_packet *batman_packet;
292
struct net_device *soft_iface;
293
int ret;
294
295
if (hard_iface->if_status != IF_NOT_IN_USE)
296
goto out;
297
298
if (!atomic_inc_not_zero(&hard_iface->refcount))
299
goto out;
300
301
soft_iface = dev_get_by_name(&init_net, iface_name);
302
303
if (!soft_iface) {
304
soft_iface = softif_create(iface_name);
305
306
if (!soft_iface) {
307
ret = -ENOMEM;
308
goto err;
309
}
310
311
/* dev_get_by_name() increases the reference counter for us */
312
dev_hold(soft_iface);
313
}
314
315
if (!softif_is_valid(soft_iface)) {
316
pr_err("Can't create batman mesh interface %s: "
317
"already exists as regular interface\n",
318
soft_iface->name);
319
dev_put(soft_iface);
320
ret = -EINVAL;
321
goto err;
322
}
323
324
hard_iface->soft_iface = soft_iface;
325
bat_priv = netdev_priv(hard_iface->soft_iface);
326
hard_iface->packet_len = BAT_PACKET_LEN;
327
hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
328
329
if (!hard_iface->packet_buff) {
330
bat_err(hard_iface->soft_iface, "Can't add interface packet "
331
"(%s): out of memory\n", hard_iface->net_dev->name);
332
ret = -ENOMEM;
333
goto err;
334
}
335
336
batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
337
batman_packet->packet_type = BAT_PACKET;
338
batman_packet->version = COMPAT_VERSION;
339
batman_packet->flags = 0;
340
batman_packet->ttl = 2;
341
batman_packet->tq = TQ_MAX_VALUE;
342
batman_packet->num_tt = 0;
343
344
hard_iface->if_num = bat_priv->num_ifaces;
345
bat_priv->num_ifaces++;
346
hard_iface->if_status = IF_INACTIVE;
347
orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
348
349
hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
350
hard_iface->batman_adv_ptype.func = batman_skb_recv;
351
hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
352
dev_add_pack(&hard_iface->batman_adv_ptype);
353
354
atomic_set(&hard_iface->seqno, 1);
355
atomic_set(&hard_iface->frag_seqno, 1);
356
bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
357
hard_iface->net_dev->name);
358
359
if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
360
ETH_DATA_LEN + BAT_HEADER_LEN)
361
bat_info(hard_iface->soft_iface,
362
"The MTU of interface %s is too small (%i) to handle "
363
"the transport of batman-adv packets. Packets going "
364
"over this interface will be fragmented on layer2 "
365
"which could impact the performance. Setting the MTU "
366
"to %zi would solve the problem.\n",
367
hard_iface->net_dev->name, hard_iface->net_dev->mtu,
368
ETH_DATA_LEN + BAT_HEADER_LEN);
369
370
if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
371
ETH_DATA_LEN + BAT_HEADER_LEN)
372
bat_info(hard_iface->soft_iface,
373
"The MTU of interface %s is too small (%i) to handle "
374
"the transport of batman-adv packets. If you experience"
375
" problems getting traffic through try increasing the "
376
"MTU to %zi.\n",
377
hard_iface->net_dev->name, hard_iface->net_dev->mtu,
378
ETH_DATA_LEN + BAT_HEADER_LEN);
379
380
if (hardif_is_iface_up(hard_iface))
381
hardif_activate_interface(hard_iface);
382
else
383
bat_err(hard_iface->soft_iface, "Not using interface %s "
384
"(retrying later): interface not active\n",
385
hard_iface->net_dev->name);
386
387
/* begin scheduling originator messages on that interface */
388
schedule_own_packet(hard_iface);
389
390
out:
391
return 0;
392
393
err:
394
hardif_free_ref(hard_iface);
395
return ret;
396
}
397
398
void hardif_disable_interface(struct hard_iface *hard_iface)
399
{
400
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
401
struct hard_iface *primary_if = NULL;
402
403
if (hard_iface->if_status == IF_ACTIVE)
404
hardif_deactivate_interface(hard_iface);
405
406
if (hard_iface->if_status != IF_INACTIVE)
407
goto out;
408
409
bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
410
hard_iface->net_dev->name);
411
dev_remove_pack(&hard_iface->batman_adv_ptype);
412
413
bat_priv->num_ifaces--;
414
orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
415
416
primary_if = primary_if_get_selected(bat_priv);
417
if (hard_iface == primary_if) {
418
struct hard_iface *new_if;
419
420
new_if = hardif_get_active(hard_iface->soft_iface);
421
primary_if_select(bat_priv, new_if);
422
423
if (new_if)
424
hardif_free_ref(new_if);
425
}
426
427
kfree(hard_iface->packet_buff);
428
hard_iface->packet_buff = NULL;
429
hard_iface->if_status = IF_NOT_IN_USE;
430
431
/* delete all references to this hard_iface */
432
purge_orig_ref(bat_priv);
433
purge_outstanding_packets(bat_priv, hard_iface);
434
dev_put(hard_iface->soft_iface);
435
436
/* nobody uses this interface anymore */
437
if (!bat_priv->num_ifaces)
438
softif_destroy(hard_iface->soft_iface);
439
440
hard_iface->soft_iface = NULL;
441
hardif_free_ref(hard_iface);
442
443
out:
444
if (primary_if)
445
hardif_free_ref(primary_if);
446
}
447
448
static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
449
{
450
struct hard_iface *hard_iface;
451
int ret;
452
453
ASSERT_RTNL();
454
455
ret = is_valid_iface(net_dev);
456
if (ret != 1)
457
goto out;
458
459
dev_hold(net_dev);
460
461
hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC);
462
if (!hard_iface) {
463
pr_err("Can't add interface (%s): out of memory\n",
464
net_dev->name);
465
goto release_dev;
466
}
467
468
ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
469
if (ret)
470
goto free_if;
471
472
hard_iface->if_num = -1;
473
hard_iface->net_dev = net_dev;
474
hard_iface->soft_iface = NULL;
475
hard_iface->if_status = IF_NOT_IN_USE;
476
INIT_LIST_HEAD(&hard_iface->list);
477
/* extra reference for return */
478
atomic_set(&hard_iface->refcount, 2);
479
480
check_known_mac_addr(hard_iface->net_dev);
481
list_add_tail_rcu(&hard_iface->list, &hardif_list);
482
483
return hard_iface;
484
485
free_if:
486
kfree(hard_iface);
487
release_dev:
488
dev_put(net_dev);
489
out:
490
return NULL;
491
}
492
493
static void hardif_remove_interface(struct hard_iface *hard_iface)
494
{
495
ASSERT_RTNL();
496
497
/* first deactivate interface */
498
if (hard_iface->if_status != IF_NOT_IN_USE)
499
hardif_disable_interface(hard_iface);
500
501
if (hard_iface->if_status != IF_NOT_IN_USE)
502
return;
503
504
hard_iface->if_status = IF_TO_BE_REMOVED;
505
sysfs_del_hardif(&hard_iface->hardif_obj);
506
hardif_free_ref(hard_iface);
507
}
508
509
void hardif_remove_interfaces(void)
510
{
511
struct hard_iface *hard_iface, *hard_iface_tmp;
512
513
rtnl_lock();
514
list_for_each_entry_safe(hard_iface, hard_iface_tmp,
515
&hardif_list, list) {
516
list_del_rcu(&hard_iface->list);
517
hardif_remove_interface(hard_iface);
518
}
519
rtnl_unlock();
520
}
521
522
static int hard_if_event(struct notifier_block *this,
523
unsigned long event, void *ptr)
524
{
525
struct net_device *net_dev = (struct net_device *)ptr;
526
struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
527
struct hard_iface *primary_if = NULL;
528
struct bat_priv *bat_priv;
529
530
if (!hard_iface && event == NETDEV_REGISTER)
531
hard_iface = hardif_add_interface(net_dev);
532
533
if (!hard_iface)
534
goto out;
535
536
switch (event) {
537
case NETDEV_UP:
538
hardif_activate_interface(hard_iface);
539
break;
540
case NETDEV_GOING_DOWN:
541
case NETDEV_DOWN:
542
hardif_deactivate_interface(hard_iface);
543
break;
544
case NETDEV_UNREGISTER:
545
list_del_rcu(&hard_iface->list);
546
547
hardif_remove_interface(hard_iface);
548
break;
549
case NETDEV_CHANGEMTU:
550
if (hard_iface->soft_iface)
551
update_min_mtu(hard_iface->soft_iface);
552
break;
553
case NETDEV_CHANGEADDR:
554
if (hard_iface->if_status == IF_NOT_IN_USE)
555
goto hardif_put;
556
557
check_known_mac_addr(hard_iface->net_dev);
558
update_mac_addresses(hard_iface);
559
560
bat_priv = netdev_priv(hard_iface->soft_iface);
561
primary_if = primary_if_get_selected(bat_priv);
562
if (!primary_if)
563
goto hardif_put;
564
565
if (hard_iface == primary_if)
566
primary_if_update_addr(bat_priv);
567
break;
568
default:
569
break;
570
};
571
572
hardif_put:
573
hardif_free_ref(hard_iface);
574
out:
575
if (primary_if)
576
hardif_free_ref(primary_if);
577
return NOTIFY_DONE;
578
}
579
580
/* receive a packet with the batman ethertype coming on a hard
581
* interface */
582
static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
583
struct packet_type *ptype,
584
struct net_device *orig_dev)
585
{
586
struct bat_priv *bat_priv;
587
struct batman_packet *batman_packet;
588
struct hard_iface *hard_iface;
589
int ret;
590
591
hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
592
skb = skb_share_check(skb, GFP_ATOMIC);
593
594
/* skb was released by skb_share_check() */
595
if (!skb)
596
goto err_out;
597
598
/* packet should hold at least type and version */
599
if (unlikely(!pskb_may_pull(skb, 2)))
600
goto err_free;
601
602
/* expect a valid ethernet header here. */
603
if (unlikely(skb->mac_len != sizeof(struct ethhdr)
604
|| !skb_mac_header(skb)))
605
goto err_free;
606
607
if (!hard_iface->soft_iface)
608
goto err_free;
609
610
bat_priv = netdev_priv(hard_iface->soft_iface);
611
612
if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
613
goto err_free;
614
615
/* discard frames on not active interfaces */
616
if (hard_iface->if_status != IF_ACTIVE)
617
goto err_free;
618
619
batman_packet = (struct batman_packet *)skb->data;
620
621
if (batman_packet->version != COMPAT_VERSION) {
622
bat_dbg(DBG_BATMAN, bat_priv,
623
"Drop packet: incompatible batman version (%i)\n",
624
batman_packet->version);
625
goto err_free;
626
}
627
628
/* all receive handlers return whether they received or reused
629
* the supplied skb. if not, we have to free the skb. */
630
631
switch (batman_packet->packet_type) {
632
/* batman originator packet */
633
case BAT_PACKET:
634
ret = recv_bat_packet(skb, hard_iface);
635
break;
636
637
/* batman icmp packet */
638
case BAT_ICMP:
639
ret = recv_icmp_packet(skb, hard_iface);
640
break;
641
642
/* unicast packet */
643
case BAT_UNICAST:
644
ret = recv_unicast_packet(skb, hard_iface);
645
break;
646
647
/* fragmented unicast packet */
648
case BAT_UNICAST_FRAG:
649
ret = recv_ucast_frag_packet(skb, hard_iface);
650
break;
651
652
/* broadcast packet */
653
case BAT_BCAST:
654
ret = recv_bcast_packet(skb, hard_iface);
655
break;
656
657
/* vis packet */
658
case BAT_VIS:
659
ret = recv_vis_packet(skb, hard_iface);
660
break;
661
default:
662
ret = NET_RX_DROP;
663
}
664
665
if (ret == NET_RX_DROP)
666
kfree_skb(skb);
667
668
/* return NET_RX_SUCCESS in any case as we
669
* most probably dropped the packet for
670
* routing-logical reasons. */
671
672
return NET_RX_SUCCESS;
673
674
err_free:
675
kfree_skb(skb);
676
err_out:
677
return NET_RX_DROP;
678
}
679
680
struct notifier_block hard_if_notifier = {
681
.notifier_call = hard_if_event,
682
};
683
684