Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/8021q/vlan.c
49841 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* INET 802.1Q VLAN
4
* Ethernet-type device handling.
5
*
6
* Authors: Ben Greear <[email protected]>
7
* Please send support related email to: [email protected]
8
* VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
9
*
10
* Fixes:
11
* Fix for packet capture - Nick Eggleston <[email protected]>;
12
* Add HW acceleration hooks - David S. Miller <[email protected]>;
13
* Correct all the locking - David S. Miller <[email protected]>;
14
* Use hash table for VLAN groups - David S. Miller <[email protected]>
15
*/
16
17
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19
#include <linux/capability.h>
20
#include <linux/module.h>
21
#include <linux/netdevice.h>
22
#include <linux/skbuff.h>
23
#include <linux/slab.h>
24
#include <linux/init.h>
25
#include <linux/rculist.h>
26
#include <net/arp.h>
27
#include <linux/rtnetlink.h>
28
#include <linux/notifier.h>
29
#include <net/rtnetlink.h>
30
#include <net/net_namespace.h>
31
#include <net/netns/generic.h>
32
#include <linux/uaccess.h>
33
34
#include <linux/if_vlan.h>
35
#include "vlan.h"
36
#include "vlanproc.h"
37
38
#define DRV_VERSION "1.8"
39
40
/* Global VLAN variables */
41
42
unsigned int vlan_net_id __read_mostly;
43
44
const char vlan_fullname[] = "802.1Q VLAN Support";
45
const char vlan_version[] = DRV_VERSION;
46
47
/* End of global variables definitions. */
48
49
static int vlan_group_prealloc_vid(struct vlan_group *vg,
50
__be16 vlan_proto, u16 vlan_id)
51
{
52
struct net_device **array;
53
unsigned int vidx;
54
unsigned int size;
55
int pidx;
56
57
ASSERT_RTNL();
58
59
pidx = vlan_proto_idx(vlan_proto);
60
if (pidx < 0)
61
return -EINVAL;
62
63
vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
64
array = vg->vlan_devices_arrays[pidx][vidx];
65
if (array != NULL)
66
return 0;
67
68
size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
69
array = kzalloc(size, GFP_KERNEL_ACCOUNT);
70
if (array == NULL)
71
return -ENOBUFS;
72
73
/* paired with smp_rmb() in __vlan_group_get_device() */
74
smp_wmb();
75
76
vg->vlan_devices_arrays[pidx][vidx] = array;
77
return 0;
78
}
79
80
static void vlan_stacked_transfer_operstate(const struct net_device *rootdev,
81
struct net_device *dev,
82
struct vlan_dev_priv *vlan)
83
{
84
if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
85
netif_stacked_transfer_operstate(rootdev, dev);
86
}
87
88
void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
89
{
90
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
91
struct net_device *real_dev = vlan->real_dev;
92
struct vlan_info *vlan_info;
93
struct vlan_group *grp;
94
u16 vlan_id = vlan->vlan_id;
95
96
ASSERT_RTNL();
97
98
vlan_info = rtnl_dereference(real_dev->vlan_info);
99
BUG_ON(!vlan_info);
100
101
grp = &vlan_info->grp;
102
103
grp->nr_vlan_devs--;
104
105
if (vlan->flags & VLAN_FLAG_MVRP)
106
vlan_mvrp_request_leave(dev);
107
if (vlan->flags & VLAN_FLAG_GVRP)
108
vlan_gvrp_request_leave(dev);
109
110
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
111
112
netdev_upper_dev_unlink(real_dev, dev);
113
/* Because unregister_netdevice_queue() makes sure at least one rcu
114
* grace period is respected before device freeing,
115
* we dont need to call synchronize_net() here.
116
*/
117
unregister_netdevice_queue(dev, head);
118
119
if (grp->nr_vlan_devs == 0) {
120
vlan_mvrp_uninit_applicant(real_dev);
121
vlan_gvrp_uninit_applicant(real_dev);
122
}
123
124
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
125
}
126
127
int vlan_check_real_dev(struct net_device *real_dev,
128
__be16 protocol, u16 vlan_id,
129
struct netlink_ext_ack *extack)
130
{
131
const char *name = real_dev->name;
132
133
if (real_dev->features & NETIF_F_VLAN_CHALLENGED ||
134
real_dev->type != ARPHRD_ETHER) {
135
pr_info("VLANs not supported on %s\n", name);
136
NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
137
return -EOPNOTSUPP;
138
}
139
140
if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) {
141
NL_SET_ERR_MSG_MOD(extack, "VLAN device already exists");
142
return -EEXIST;
143
}
144
145
return 0;
146
}
147
148
int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
149
{
150
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
151
struct net_device *real_dev = vlan->real_dev;
152
u16 vlan_id = vlan->vlan_id;
153
struct vlan_info *vlan_info;
154
struct vlan_group *grp;
155
int err;
156
157
err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
158
if (err)
159
return err;
160
161
vlan_info = rtnl_dereference(real_dev->vlan_info);
162
/* vlan_info should be there now. vlan_vid_add took care of it */
163
BUG_ON(!vlan_info);
164
165
grp = &vlan_info->grp;
166
if (grp->nr_vlan_devs == 0) {
167
err = vlan_gvrp_init_applicant(real_dev);
168
if (err < 0)
169
goto out_vid_del;
170
err = vlan_mvrp_init_applicant(real_dev);
171
if (err < 0)
172
goto out_uninit_gvrp;
173
}
174
175
err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
176
if (err < 0)
177
goto out_uninit_mvrp;
178
179
err = register_netdevice(dev);
180
if (err < 0)
181
goto out_uninit_mvrp;
182
183
err = netdev_upper_dev_link(real_dev, dev, extack);
184
if (err)
185
goto out_unregister_netdev;
186
187
vlan_stacked_transfer_operstate(real_dev, dev, vlan);
188
linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
189
190
/* So, got the sucker initialized, now lets place
191
* it into our local structure.
192
*/
193
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
194
grp->nr_vlan_devs++;
195
196
netdev_update_features(dev);
197
198
return 0;
199
200
out_unregister_netdev:
201
unregister_netdevice(dev);
202
out_uninit_mvrp:
203
if (grp->nr_vlan_devs == 0)
204
vlan_mvrp_uninit_applicant(real_dev);
205
out_uninit_gvrp:
206
if (grp->nr_vlan_devs == 0)
207
vlan_gvrp_uninit_applicant(real_dev);
208
out_vid_del:
209
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
210
return err;
211
}
212
213
/* Attach a VLAN device to a mac address (ie Ethernet Card).
214
* Returns 0 if the device was created or a negative error code otherwise.
215
*/
216
static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
217
{
218
struct net_device *new_dev;
219
struct vlan_dev_priv *vlan;
220
struct net *net = dev_net(real_dev);
221
struct vlan_net *vn = net_generic(net, vlan_net_id);
222
char name[IFNAMSIZ];
223
int err;
224
225
if (vlan_id >= VLAN_VID_MASK)
226
return -ERANGE;
227
228
err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id,
229
NULL);
230
if (err < 0)
231
return err;
232
233
/* Gotta set up the fields for the device. */
234
switch (vn->name_type) {
235
case VLAN_NAME_TYPE_RAW_PLUS_VID:
236
/* name will look like: eth1.0005 */
237
snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
238
break;
239
case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
240
/* Put our vlan.VID in the name.
241
* Name will look like: vlan5
242
*/
243
snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
244
break;
245
case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
246
/* Put our vlan.VID in the name.
247
* Name will look like: eth0.5
248
*/
249
snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
250
break;
251
case VLAN_NAME_TYPE_PLUS_VID:
252
/* Put our vlan.VID in the name.
253
* Name will look like: vlan0005
254
*/
255
default:
256
snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
257
}
258
259
new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
260
NET_NAME_UNKNOWN, vlan_setup);
261
262
if (new_dev == NULL)
263
return -ENOBUFS;
264
265
dev_net_set(new_dev, net);
266
/* need 4 bytes for extra VLAN header info,
267
* hope the underlying device can handle it.
268
*/
269
new_dev->mtu = real_dev->mtu;
270
271
vlan = vlan_dev_priv(new_dev);
272
vlan->vlan_proto = htons(ETH_P_8021Q);
273
vlan->vlan_id = vlan_id;
274
vlan->real_dev = real_dev;
275
vlan->dent = NULL;
276
vlan->flags = VLAN_FLAG_REORDER_HDR;
277
278
new_dev->rtnl_link_ops = &vlan_link_ops;
279
err = register_vlan_dev(new_dev, NULL);
280
if (err < 0)
281
goto out_free_newdev;
282
283
return 0;
284
285
out_free_newdev:
286
free_netdev(new_dev);
287
return err;
288
}
289
290
static void vlan_sync_address(struct net_device *dev,
291
struct net_device *vlandev)
292
{
293
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
294
295
/* May be called without an actual change */
296
if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
297
return;
298
299
/* vlan continues to inherit address of lower device */
300
if (vlan_dev_inherit_address(vlandev, dev))
301
goto out;
302
303
/* vlan address was different from the old address and is equal to
304
* the new address */
305
if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
306
ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
307
dev_uc_del(dev, vlandev->dev_addr);
308
309
/* vlan address was equal to the old address and is different from
310
* the new address */
311
if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
312
!ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
313
dev_uc_add(dev, vlandev->dev_addr);
314
315
out:
316
ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
317
}
318
319
static void vlan_transfer_features(struct net_device *dev,
320
struct net_device *vlandev)
321
{
322
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
323
324
netif_inherit_tso_max(vlandev, dev);
325
326
if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
327
vlandev->hard_header_len = dev->hard_header_len;
328
else
329
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
330
331
#if IS_ENABLED(CONFIG_FCOE)
332
vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
333
#endif
334
335
vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
336
vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
337
vlandev->hw_enc_features = vlan_tnl_features(vlan->real_dev);
338
339
netdev_update_features(vlandev);
340
}
341
342
static int __vlan_device_event(struct net_device *dev, unsigned long event)
343
{
344
int err = 0;
345
346
switch (event) {
347
case NETDEV_CHANGENAME:
348
vlan_proc_rem_dev(dev);
349
err = vlan_proc_add_dev(dev);
350
break;
351
case NETDEV_REGISTER:
352
err = vlan_proc_add_dev(dev);
353
break;
354
case NETDEV_UNREGISTER:
355
vlan_proc_rem_dev(dev);
356
break;
357
}
358
359
return err;
360
}
361
362
static void vlan_vid0_add(struct net_device *dev)
363
{
364
struct vlan_info *vlan_info;
365
int err;
366
367
if (!(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
368
return;
369
370
pr_info("adding VLAN 0 to HW filter on device %s\n", dev->name);
371
372
err = vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
373
if (err)
374
return;
375
376
vlan_info = rtnl_dereference(dev->vlan_info);
377
vlan_info->auto_vid0 = true;
378
}
379
380
static void vlan_vid0_del(struct net_device *dev)
381
{
382
struct vlan_info *vlan_info = rtnl_dereference(dev->vlan_info);
383
384
if (!vlan_info || !vlan_info->auto_vid0)
385
return;
386
387
vlan_info->auto_vid0 = false;
388
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
389
}
390
391
static int vlan_device_event(struct notifier_block *unused, unsigned long event,
392
void *ptr)
393
{
394
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
395
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
396
struct vlan_group *grp;
397
struct vlan_info *vlan_info;
398
int i, flgs;
399
struct net_device *vlandev;
400
struct vlan_dev_priv *vlan;
401
bool last = false;
402
LIST_HEAD(list);
403
int err;
404
405
if (is_vlan_dev(dev)) {
406
int err = __vlan_device_event(dev, event);
407
408
if (err)
409
return notifier_from_errno(err);
410
}
411
412
if (event == NETDEV_UP)
413
vlan_vid0_add(dev);
414
else if (event == NETDEV_DOWN)
415
vlan_vid0_del(dev);
416
417
vlan_info = rtnl_dereference(dev->vlan_info);
418
if (!vlan_info)
419
goto out;
420
grp = &vlan_info->grp;
421
422
/* It is OK that we do not hold the group lock right now,
423
* as we run under the RTNL lock.
424
*/
425
426
switch (event) {
427
case NETDEV_CHANGE:
428
/* Propagate real device state to vlan devices */
429
vlan_group_for_each_dev(grp, i, vlandev)
430
vlan_stacked_transfer_operstate(dev, vlandev,
431
vlan_dev_priv(vlandev));
432
break;
433
434
case NETDEV_CHANGEADDR:
435
/* Adjust unicast filters on underlying device */
436
vlan_group_for_each_dev(grp, i, vlandev) {
437
flgs = vlandev->flags;
438
if (!(flgs & IFF_UP))
439
continue;
440
441
vlan_sync_address(dev, vlandev);
442
}
443
break;
444
445
case NETDEV_CHANGEMTU:
446
vlan_group_for_each_dev(grp, i, vlandev) {
447
if (vlandev->mtu <= dev->mtu)
448
continue;
449
450
dev_set_mtu(vlandev, dev->mtu);
451
}
452
break;
453
454
case NETDEV_FEAT_CHANGE:
455
/* Propagate device features to underlying device */
456
vlan_group_for_each_dev(grp, i, vlandev)
457
vlan_transfer_features(dev, vlandev);
458
break;
459
460
case NETDEV_DOWN: {
461
struct net_device *tmp;
462
LIST_HEAD(close_list);
463
464
/* Put all VLANs for this dev in the down state too. */
465
vlan_group_for_each_dev(grp, i, vlandev) {
466
flgs = vlandev->flags;
467
if (!(flgs & IFF_UP))
468
continue;
469
470
vlan = vlan_dev_priv(vlandev);
471
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
472
list_add(&vlandev->close_list, &close_list);
473
}
474
475
netif_close_many(&close_list, false);
476
477
list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
478
vlan_stacked_transfer_operstate(dev, vlandev,
479
vlan_dev_priv(vlandev));
480
list_del_init(&vlandev->close_list);
481
}
482
list_del(&close_list);
483
break;
484
}
485
case NETDEV_UP:
486
/* Put all VLANs for this dev in the up state too. */
487
vlan_group_for_each_dev(grp, i, vlandev) {
488
flgs = netif_get_flags(vlandev);
489
if (flgs & IFF_UP)
490
continue;
491
492
vlan = vlan_dev_priv(vlandev);
493
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
494
dev_change_flags(vlandev, flgs | IFF_UP,
495
extack);
496
vlan_stacked_transfer_operstate(dev, vlandev, vlan);
497
}
498
break;
499
500
case NETDEV_UNREGISTER:
501
/* twiddle thumbs on netns device moves */
502
if (dev->reg_state != NETREG_UNREGISTERING)
503
break;
504
505
vlan_group_for_each_dev(grp, i, vlandev) {
506
/* removal of last vid destroys vlan_info, abort
507
* afterwards */
508
if (vlan_info->nr_vids == 1)
509
last = true;
510
511
unregister_vlan_dev(vlandev, &list);
512
if (last)
513
break;
514
}
515
unregister_netdevice_many(&list);
516
break;
517
518
case NETDEV_PRE_TYPE_CHANGE:
519
/* Forbid underlaying device to change its type. */
520
if (vlan_uses_dev(dev))
521
return NOTIFY_BAD;
522
break;
523
524
case NETDEV_NOTIFY_PEERS:
525
case NETDEV_BONDING_FAILOVER:
526
case NETDEV_RESEND_IGMP:
527
/* Propagate to vlan devices */
528
vlan_group_for_each_dev(grp, i, vlandev)
529
call_netdevice_notifiers(event, vlandev);
530
break;
531
532
case NETDEV_CVLAN_FILTER_PUSH_INFO:
533
err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021Q));
534
if (err)
535
return notifier_from_errno(err);
536
break;
537
538
case NETDEV_CVLAN_FILTER_DROP_INFO:
539
vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021Q));
540
break;
541
542
case NETDEV_SVLAN_FILTER_PUSH_INFO:
543
err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021AD));
544
if (err)
545
return notifier_from_errno(err);
546
break;
547
548
case NETDEV_SVLAN_FILTER_DROP_INFO:
549
vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021AD));
550
break;
551
}
552
553
out:
554
return NOTIFY_DONE;
555
}
556
557
static struct notifier_block vlan_notifier_block __read_mostly = {
558
.notifier_call = vlan_device_event,
559
};
560
561
/*
562
* VLAN IOCTL handler.
563
* o execute requested action or pass command to the device driver
564
* arg is really a struct vlan_ioctl_args __user *.
565
*/
566
static int vlan_ioctl_handler(struct net *net, void __user *arg)
567
{
568
int err;
569
struct vlan_ioctl_args args;
570
struct net_device *dev = NULL;
571
572
if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
573
return -EFAULT;
574
575
/* Null terminate this sucker, just in case. */
576
args.device1[sizeof(args.device1) - 1] = 0;
577
args.u.device2[sizeof(args.u.device2) - 1] = 0;
578
579
rtnl_lock();
580
581
switch (args.cmd) {
582
case SET_VLAN_INGRESS_PRIORITY_CMD:
583
case SET_VLAN_EGRESS_PRIORITY_CMD:
584
case SET_VLAN_FLAG_CMD:
585
case ADD_VLAN_CMD:
586
case DEL_VLAN_CMD:
587
case GET_VLAN_REALDEV_NAME_CMD:
588
case GET_VLAN_VID_CMD:
589
err = -ENODEV;
590
dev = __dev_get_by_name(net, args.device1);
591
if (!dev)
592
goto out;
593
594
err = -EINVAL;
595
if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
596
goto out;
597
}
598
599
switch (args.cmd) {
600
case SET_VLAN_INGRESS_PRIORITY_CMD:
601
err = -EPERM;
602
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
603
break;
604
vlan_dev_set_ingress_priority(dev,
605
args.u.skb_priority,
606
args.vlan_qos);
607
err = 0;
608
break;
609
610
case SET_VLAN_EGRESS_PRIORITY_CMD:
611
err = -EPERM;
612
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
613
break;
614
err = vlan_dev_set_egress_priority(dev,
615
args.u.skb_priority,
616
args.vlan_qos);
617
break;
618
619
case SET_VLAN_FLAG_CMD:
620
err = -EPERM;
621
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
622
break;
623
err = vlan_dev_change_flags(dev,
624
args.vlan_qos ? args.u.flag : 0,
625
args.u.flag);
626
break;
627
628
case SET_VLAN_NAME_TYPE_CMD:
629
err = -EPERM;
630
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
631
break;
632
if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
633
struct vlan_net *vn;
634
635
vn = net_generic(net, vlan_net_id);
636
vn->name_type = args.u.name_type;
637
err = 0;
638
} else {
639
err = -EINVAL;
640
}
641
break;
642
643
case ADD_VLAN_CMD:
644
err = -EPERM;
645
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
646
break;
647
err = register_vlan_device(dev, args.u.VID);
648
break;
649
650
case DEL_VLAN_CMD:
651
err = -EPERM;
652
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
653
break;
654
unregister_vlan_dev(dev, NULL);
655
err = 0;
656
break;
657
658
case GET_VLAN_REALDEV_NAME_CMD:
659
err = 0;
660
vlan_dev_get_realdev_name(dev, args.u.device2,
661
sizeof(args.u.device2));
662
if (copy_to_user(arg, &args,
663
sizeof(struct vlan_ioctl_args)))
664
err = -EFAULT;
665
break;
666
667
case GET_VLAN_VID_CMD:
668
err = 0;
669
args.u.VID = vlan_dev_vlan_id(dev);
670
if (copy_to_user(arg, &args,
671
sizeof(struct vlan_ioctl_args)))
672
err = -EFAULT;
673
break;
674
675
default:
676
err = -EOPNOTSUPP;
677
break;
678
}
679
out:
680
rtnl_unlock();
681
return err;
682
}
683
684
static int __net_init vlan_init_net(struct net *net)
685
{
686
struct vlan_net *vn = net_generic(net, vlan_net_id);
687
int err;
688
689
vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
690
691
err = vlan_proc_init(net);
692
693
return err;
694
}
695
696
static void __net_exit vlan_exit_net(struct net *net)
697
{
698
vlan_proc_cleanup(net);
699
}
700
701
static struct pernet_operations vlan_net_ops = {
702
.init = vlan_init_net,
703
.exit = vlan_exit_net,
704
.id = &vlan_net_id,
705
.size = sizeof(struct vlan_net),
706
};
707
708
static int __init vlan_proto_init(void)
709
{
710
int err;
711
712
pr_info("%s v%s\n", vlan_fullname, vlan_version);
713
714
err = register_pernet_subsys(&vlan_net_ops);
715
if (err < 0)
716
goto err0;
717
718
err = register_netdevice_notifier(&vlan_notifier_block);
719
if (err < 0)
720
goto err2;
721
722
err = vlan_gvrp_init();
723
if (err < 0)
724
goto err3;
725
726
err = vlan_mvrp_init();
727
if (err < 0)
728
goto err4;
729
730
err = vlan_netlink_init();
731
if (err < 0)
732
goto err5;
733
734
vlan_ioctl_set(vlan_ioctl_handler);
735
return 0;
736
737
err5:
738
vlan_mvrp_uninit();
739
err4:
740
vlan_gvrp_uninit();
741
err3:
742
unregister_netdevice_notifier(&vlan_notifier_block);
743
err2:
744
unregister_pernet_subsys(&vlan_net_ops);
745
err0:
746
return err;
747
}
748
749
static void __exit vlan_cleanup_module(void)
750
{
751
vlan_ioctl_set(NULL);
752
753
vlan_netlink_fini();
754
755
unregister_netdevice_notifier(&vlan_notifier_block);
756
757
unregister_pernet_subsys(&vlan_net_ops);
758
rcu_barrier(); /* Wait for completion of call_rcu()'s */
759
760
vlan_mvrp_uninit();
761
vlan_gvrp_uninit();
762
}
763
764
module_init(vlan_proto_init);
765
module_exit(vlan_cleanup_module);
766
767
MODULE_DESCRIPTION("802.1Q/802.1ad VLAN Protocol");
768
MODULE_LICENSE("GPL");
769
MODULE_VERSION(DRV_VERSION);
770
MODULE_IMPORT_NS("NETDEV_INTERNAL");
771
772