Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/8021q/vlan.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* INET 802.1Q VLAN
4
* Ethernet-type device handling.
5
*
6
* Authors: Ben Greear <[email protected]>
7
* Please send support related email to: [email protected]
8
* VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
9
*
10
* Fixes:
11
* Fix for packet capture - Nick Eggleston <[email protected]>;
12
* Add HW acceleration hooks - David S. Miller <[email protected]>;
13
* Correct all the locking - David S. Miller <[email protected]>;
14
* Use hash table for VLAN groups - David S. Miller <[email protected]>
15
*/
16
17
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19
#include <linux/capability.h>
20
#include <linux/module.h>
21
#include <linux/netdevice.h>
22
#include <linux/skbuff.h>
23
#include <linux/slab.h>
24
#include <linux/init.h>
25
#include <linux/rculist.h>
26
#include <net/arp.h>
27
#include <linux/rtnetlink.h>
28
#include <linux/notifier.h>
29
#include <net/rtnetlink.h>
30
#include <net/net_namespace.h>
31
#include <net/netns/generic.h>
32
#include <linux/uaccess.h>
33
34
#include <linux/if_vlan.h>
35
#include "vlan.h"
36
#include "vlanproc.h"
37
38
#define DRV_VERSION "1.8"
39
40
/* Global VLAN variables */
41
42
unsigned int vlan_net_id __read_mostly;
43
44
const char vlan_fullname[] = "802.1Q VLAN Support";
45
const char vlan_version[] = DRV_VERSION;
46
47
/* End of global variables definitions. */
48
49
static int vlan_group_prealloc_vid(struct vlan_group *vg,
50
__be16 vlan_proto, u16 vlan_id)
51
{
52
struct net_device **array;
53
unsigned int vidx;
54
unsigned int size;
55
int pidx;
56
57
ASSERT_RTNL();
58
59
pidx = vlan_proto_idx(vlan_proto);
60
if (pidx < 0)
61
return -EINVAL;
62
63
vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
64
array = vg->vlan_devices_arrays[pidx][vidx];
65
if (array != NULL)
66
return 0;
67
68
size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
69
array = kzalloc(size, GFP_KERNEL_ACCOUNT);
70
if (array == NULL)
71
return -ENOBUFS;
72
73
/* paired with smp_rmb() in __vlan_group_get_device() */
74
smp_wmb();
75
76
vg->vlan_devices_arrays[pidx][vidx] = array;
77
return 0;
78
}
79
80
static void vlan_stacked_transfer_operstate(const struct net_device *rootdev,
81
struct net_device *dev,
82
struct vlan_dev_priv *vlan)
83
{
84
if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
85
netif_stacked_transfer_operstate(rootdev, dev);
86
}
87
88
void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
89
{
90
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
91
struct net_device *real_dev = vlan->real_dev;
92
struct vlan_info *vlan_info;
93
struct vlan_group *grp;
94
u16 vlan_id = vlan->vlan_id;
95
96
ASSERT_RTNL();
97
98
vlan_info = rtnl_dereference(real_dev->vlan_info);
99
BUG_ON(!vlan_info);
100
101
grp = &vlan_info->grp;
102
103
grp->nr_vlan_devs--;
104
105
if (vlan->flags & VLAN_FLAG_MVRP)
106
vlan_mvrp_request_leave(dev);
107
if (vlan->flags & VLAN_FLAG_GVRP)
108
vlan_gvrp_request_leave(dev);
109
110
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
111
112
netdev_upper_dev_unlink(real_dev, dev);
113
/* Because unregister_netdevice_queue() makes sure at least one rcu
114
* grace period is respected before device freeing,
115
* we dont need to call synchronize_net() here.
116
*/
117
unregister_netdevice_queue(dev, head);
118
119
if (grp->nr_vlan_devs == 0) {
120
vlan_mvrp_uninit_applicant(real_dev);
121
vlan_gvrp_uninit_applicant(real_dev);
122
}
123
124
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
125
}
126
127
int vlan_check_real_dev(struct net_device *real_dev,
128
__be16 protocol, u16 vlan_id,
129
struct netlink_ext_ack *extack)
130
{
131
const char *name = real_dev->name;
132
133
if (real_dev->features & NETIF_F_VLAN_CHALLENGED ||
134
real_dev->type != ARPHRD_ETHER) {
135
pr_info("VLANs not supported on %s\n", name);
136
NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
137
return -EOPNOTSUPP;
138
}
139
140
if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) {
141
NL_SET_ERR_MSG_MOD(extack, "VLAN device already exists");
142
return -EEXIST;
143
}
144
145
return 0;
146
}
147
148
int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
149
{
150
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
151
struct net_device *real_dev = vlan->real_dev;
152
u16 vlan_id = vlan->vlan_id;
153
struct vlan_info *vlan_info;
154
struct vlan_group *grp;
155
int err;
156
157
err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
158
if (err)
159
return err;
160
161
vlan_info = rtnl_dereference(real_dev->vlan_info);
162
/* vlan_info should be there now. vlan_vid_add took care of it */
163
BUG_ON(!vlan_info);
164
165
grp = &vlan_info->grp;
166
if (grp->nr_vlan_devs == 0) {
167
err = vlan_gvrp_init_applicant(real_dev);
168
if (err < 0)
169
goto out_vid_del;
170
err = vlan_mvrp_init_applicant(real_dev);
171
if (err < 0)
172
goto out_uninit_gvrp;
173
}
174
175
err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
176
if (err < 0)
177
goto out_uninit_mvrp;
178
179
err = register_netdevice(dev);
180
if (err < 0)
181
goto out_uninit_mvrp;
182
183
err = netdev_upper_dev_link(real_dev, dev, extack);
184
if (err)
185
goto out_unregister_netdev;
186
187
vlan_stacked_transfer_operstate(real_dev, dev, vlan);
188
linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
189
190
/* So, got the sucker initialized, now lets place
191
* it into our local structure.
192
*/
193
vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
194
grp->nr_vlan_devs++;
195
196
return 0;
197
198
out_unregister_netdev:
199
unregister_netdevice(dev);
200
out_uninit_mvrp:
201
if (grp->nr_vlan_devs == 0)
202
vlan_mvrp_uninit_applicant(real_dev);
203
out_uninit_gvrp:
204
if (grp->nr_vlan_devs == 0)
205
vlan_gvrp_uninit_applicant(real_dev);
206
out_vid_del:
207
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
208
return err;
209
}
210
211
/* Attach a VLAN device to a mac address (ie Ethernet Card).
212
* Returns 0 if the device was created or a negative error code otherwise.
213
*/
214
static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
215
{
216
struct net_device *new_dev;
217
struct vlan_dev_priv *vlan;
218
struct net *net = dev_net(real_dev);
219
struct vlan_net *vn = net_generic(net, vlan_net_id);
220
char name[IFNAMSIZ];
221
int err;
222
223
if (vlan_id >= VLAN_VID_MASK)
224
return -ERANGE;
225
226
err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id,
227
NULL);
228
if (err < 0)
229
return err;
230
231
/* Gotta set up the fields for the device. */
232
switch (vn->name_type) {
233
case VLAN_NAME_TYPE_RAW_PLUS_VID:
234
/* name will look like: eth1.0005 */
235
snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
236
break;
237
case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
238
/* Put our vlan.VID in the name.
239
* Name will look like: vlan5
240
*/
241
snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
242
break;
243
case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
244
/* Put our vlan.VID in the name.
245
* Name will look like: eth0.5
246
*/
247
snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
248
break;
249
case VLAN_NAME_TYPE_PLUS_VID:
250
/* Put our vlan.VID in the name.
251
* Name will look like: vlan0005
252
*/
253
default:
254
snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
255
}
256
257
new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
258
NET_NAME_UNKNOWN, vlan_setup);
259
260
if (new_dev == NULL)
261
return -ENOBUFS;
262
263
dev_net_set(new_dev, net);
264
/* need 4 bytes for extra VLAN header info,
265
* hope the underlying device can handle it.
266
*/
267
new_dev->mtu = real_dev->mtu;
268
269
vlan = vlan_dev_priv(new_dev);
270
vlan->vlan_proto = htons(ETH_P_8021Q);
271
vlan->vlan_id = vlan_id;
272
vlan->real_dev = real_dev;
273
vlan->dent = NULL;
274
vlan->flags = VLAN_FLAG_REORDER_HDR;
275
276
new_dev->rtnl_link_ops = &vlan_link_ops;
277
err = register_vlan_dev(new_dev, NULL);
278
if (err < 0)
279
goto out_free_newdev;
280
281
return 0;
282
283
out_free_newdev:
284
free_netdev(new_dev);
285
return err;
286
}
287
288
static void vlan_sync_address(struct net_device *dev,
289
struct net_device *vlandev)
290
{
291
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
292
293
/* May be called without an actual change */
294
if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
295
return;
296
297
/* vlan continues to inherit address of lower device */
298
if (vlan_dev_inherit_address(vlandev, dev))
299
goto out;
300
301
/* vlan address was different from the old address and is equal to
302
* the new address */
303
if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
304
ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
305
dev_uc_del(dev, vlandev->dev_addr);
306
307
/* vlan address was equal to the old address and is different from
308
* the new address */
309
if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
310
!ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
311
dev_uc_add(dev, vlandev->dev_addr);
312
313
out:
314
ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
315
}
316
317
static void vlan_transfer_features(struct net_device *dev,
318
struct net_device *vlandev)
319
{
320
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
321
322
netif_inherit_tso_max(vlandev, dev);
323
324
if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
325
vlandev->hard_header_len = dev->hard_header_len;
326
else
327
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
328
329
#if IS_ENABLED(CONFIG_FCOE)
330
vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
331
#endif
332
333
vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
334
vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
335
vlandev->hw_enc_features = vlan_tnl_features(vlan->real_dev);
336
337
netdev_update_features(vlandev);
338
}
339
340
static int __vlan_device_event(struct net_device *dev, unsigned long event)
341
{
342
int err = 0;
343
344
switch (event) {
345
case NETDEV_CHANGENAME:
346
vlan_proc_rem_dev(dev);
347
err = vlan_proc_add_dev(dev);
348
break;
349
case NETDEV_REGISTER:
350
err = vlan_proc_add_dev(dev);
351
break;
352
case NETDEV_UNREGISTER:
353
vlan_proc_rem_dev(dev);
354
break;
355
}
356
357
return err;
358
}
359
360
static void vlan_vid0_add(struct net_device *dev)
361
{
362
struct vlan_info *vlan_info;
363
int err;
364
365
if (!(dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
366
return;
367
368
pr_info("adding VLAN 0 to HW filter on device %s\n", dev->name);
369
370
err = vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
371
if (err)
372
return;
373
374
vlan_info = rtnl_dereference(dev->vlan_info);
375
vlan_info->auto_vid0 = true;
376
}
377
378
static void vlan_vid0_del(struct net_device *dev)
379
{
380
struct vlan_info *vlan_info = rtnl_dereference(dev->vlan_info);
381
382
if (!vlan_info || !vlan_info->auto_vid0)
383
return;
384
385
vlan_info->auto_vid0 = false;
386
vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
387
}
388
389
static int vlan_device_event(struct notifier_block *unused, unsigned long event,
390
void *ptr)
391
{
392
struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
393
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
394
struct vlan_group *grp;
395
struct vlan_info *vlan_info;
396
int i, flgs;
397
struct net_device *vlandev;
398
struct vlan_dev_priv *vlan;
399
bool last = false;
400
LIST_HEAD(list);
401
int err;
402
403
if (is_vlan_dev(dev)) {
404
int err = __vlan_device_event(dev, event);
405
406
if (err)
407
return notifier_from_errno(err);
408
}
409
410
if (event == NETDEV_UP)
411
vlan_vid0_add(dev);
412
else if (event == NETDEV_DOWN)
413
vlan_vid0_del(dev);
414
415
vlan_info = rtnl_dereference(dev->vlan_info);
416
if (!vlan_info)
417
goto out;
418
grp = &vlan_info->grp;
419
420
/* It is OK that we do not hold the group lock right now,
421
* as we run under the RTNL lock.
422
*/
423
424
switch (event) {
425
case NETDEV_CHANGE:
426
/* Propagate real device state to vlan devices */
427
vlan_group_for_each_dev(grp, i, vlandev)
428
vlan_stacked_transfer_operstate(dev, vlandev,
429
vlan_dev_priv(vlandev));
430
break;
431
432
case NETDEV_CHANGEADDR:
433
/* Adjust unicast filters on underlying device */
434
vlan_group_for_each_dev(grp, i, vlandev) {
435
flgs = vlandev->flags;
436
if (!(flgs & IFF_UP))
437
continue;
438
439
vlan_sync_address(dev, vlandev);
440
}
441
break;
442
443
case NETDEV_CHANGEMTU:
444
vlan_group_for_each_dev(grp, i, vlandev) {
445
if (vlandev->mtu <= dev->mtu)
446
continue;
447
448
dev_set_mtu(vlandev, dev->mtu);
449
}
450
break;
451
452
case NETDEV_FEAT_CHANGE:
453
/* Propagate device features to underlying device */
454
vlan_group_for_each_dev(grp, i, vlandev)
455
vlan_transfer_features(dev, vlandev);
456
break;
457
458
case NETDEV_DOWN: {
459
struct net_device *tmp;
460
LIST_HEAD(close_list);
461
462
/* Put all VLANs for this dev in the down state too. */
463
vlan_group_for_each_dev(grp, i, vlandev) {
464
flgs = vlandev->flags;
465
if (!(flgs & IFF_UP))
466
continue;
467
468
vlan = vlan_dev_priv(vlandev);
469
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
470
list_add(&vlandev->close_list, &close_list);
471
}
472
473
netif_close_many(&close_list, false);
474
475
list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
476
vlan_stacked_transfer_operstate(dev, vlandev,
477
vlan_dev_priv(vlandev));
478
list_del_init(&vlandev->close_list);
479
}
480
list_del(&close_list);
481
break;
482
}
483
case NETDEV_UP:
484
/* Put all VLANs for this dev in the up state too. */
485
vlan_group_for_each_dev(grp, i, vlandev) {
486
flgs = netif_get_flags(vlandev);
487
if (flgs & IFF_UP)
488
continue;
489
490
vlan = vlan_dev_priv(vlandev);
491
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
492
dev_change_flags(vlandev, flgs | IFF_UP,
493
extack);
494
vlan_stacked_transfer_operstate(dev, vlandev, vlan);
495
}
496
break;
497
498
case NETDEV_UNREGISTER:
499
/* twiddle thumbs on netns device moves */
500
if (dev->reg_state != NETREG_UNREGISTERING)
501
break;
502
503
vlan_group_for_each_dev(grp, i, vlandev) {
504
/* removal of last vid destroys vlan_info, abort
505
* afterwards */
506
if (vlan_info->nr_vids == 1)
507
last = true;
508
509
unregister_vlan_dev(vlandev, &list);
510
if (last)
511
break;
512
}
513
unregister_netdevice_many(&list);
514
break;
515
516
case NETDEV_PRE_TYPE_CHANGE:
517
/* Forbid underlaying device to change its type. */
518
if (vlan_uses_dev(dev))
519
return NOTIFY_BAD;
520
break;
521
522
case NETDEV_NOTIFY_PEERS:
523
case NETDEV_BONDING_FAILOVER:
524
case NETDEV_RESEND_IGMP:
525
/* Propagate to vlan devices */
526
vlan_group_for_each_dev(grp, i, vlandev)
527
call_netdevice_notifiers(event, vlandev);
528
break;
529
530
case NETDEV_CVLAN_FILTER_PUSH_INFO:
531
err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021Q));
532
if (err)
533
return notifier_from_errno(err);
534
break;
535
536
case NETDEV_CVLAN_FILTER_DROP_INFO:
537
vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021Q));
538
break;
539
540
case NETDEV_SVLAN_FILTER_PUSH_INFO:
541
err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021AD));
542
if (err)
543
return notifier_from_errno(err);
544
break;
545
546
case NETDEV_SVLAN_FILTER_DROP_INFO:
547
vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021AD));
548
break;
549
}
550
551
out:
552
return NOTIFY_DONE;
553
}
554
555
static struct notifier_block vlan_notifier_block __read_mostly = {
556
.notifier_call = vlan_device_event,
557
};
558
559
/*
560
* VLAN IOCTL handler.
561
* o execute requested action or pass command to the device driver
562
* arg is really a struct vlan_ioctl_args __user *.
563
*/
564
static int vlan_ioctl_handler(struct net *net, void __user *arg)
565
{
566
int err;
567
struct vlan_ioctl_args args;
568
struct net_device *dev = NULL;
569
570
if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
571
return -EFAULT;
572
573
/* Null terminate this sucker, just in case. */
574
args.device1[sizeof(args.device1) - 1] = 0;
575
args.u.device2[sizeof(args.u.device2) - 1] = 0;
576
577
rtnl_lock();
578
579
switch (args.cmd) {
580
case SET_VLAN_INGRESS_PRIORITY_CMD:
581
case SET_VLAN_EGRESS_PRIORITY_CMD:
582
case SET_VLAN_FLAG_CMD:
583
case ADD_VLAN_CMD:
584
case DEL_VLAN_CMD:
585
case GET_VLAN_REALDEV_NAME_CMD:
586
case GET_VLAN_VID_CMD:
587
err = -ENODEV;
588
dev = __dev_get_by_name(net, args.device1);
589
if (!dev)
590
goto out;
591
592
err = -EINVAL;
593
if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
594
goto out;
595
}
596
597
switch (args.cmd) {
598
case SET_VLAN_INGRESS_PRIORITY_CMD:
599
err = -EPERM;
600
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
601
break;
602
vlan_dev_set_ingress_priority(dev,
603
args.u.skb_priority,
604
args.vlan_qos);
605
err = 0;
606
break;
607
608
case SET_VLAN_EGRESS_PRIORITY_CMD:
609
err = -EPERM;
610
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
611
break;
612
err = vlan_dev_set_egress_priority(dev,
613
args.u.skb_priority,
614
args.vlan_qos);
615
break;
616
617
case SET_VLAN_FLAG_CMD:
618
err = -EPERM;
619
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
620
break;
621
err = vlan_dev_change_flags(dev,
622
args.vlan_qos ? args.u.flag : 0,
623
args.u.flag);
624
break;
625
626
case SET_VLAN_NAME_TYPE_CMD:
627
err = -EPERM;
628
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
629
break;
630
if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
631
struct vlan_net *vn;
632
633
vn = net_generic(net, vlan_net_id);
634
vn->name_type = args.u.name_type;
635
err = 0;
636
} else {
637
err = -EINVAL;
638
}
639
break;
640
641
case ADD_VLAN_CMD:
642
err = -EPERM;
643
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
644
break;
645
err = register_vlan_device(dev, args.u.VID);
646
break;
647
648
case DEL_VLAN_CMD:
649
err = -EPERM;
650
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
651
break;
652
unregister_vlan_dev(dev, NULL);
653
err = 0;
654
break;
655
656
case GET_VLAN_REALDEV_NAME_CMD:
657
err = 0;
658
vlan_dev_get_realdev_name(dev, args.u.device2,
659
sizeof(args.u.device2));
660
if (copy_to_user(arg, &args,
661
sizeof(struct vlan_ioctl_args)))
662
err = -EFAULT;
663
break;
664
665
case GET_VLAN_VID_CMD:
666
err = 0;
667
args.u.VID = vlan_dev_vlan_id(dev);
668
if (copy_to_user(arg, &args,
669
sizeof(struct vlan_ioctl_args)))
670
err = -EFAULT;
671
break;
672
673
default:
674
err = -EOPNOTSUPP;
675
break;
676
}
677
out:
678
rtnl_unlock();
679
return err;
680
}
681
682
static int __net_init vlan_init_net(struct net *net)
683
{
684
struct vlan_net *vn = net_generic(net, vlan_net_id);
685
int err;
686
687
vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
688
689
err = vlan_proc_init(net);
690
691
return err;
692
}
693
694
static void __net_exit vlan_exit_net(struct net *net)
695
{
696
vlan_proc_cleanup(net);
697
}
698
699
static struct pernet_operations vlan_net_ops = {
700
.init = vlan_init_net,
701
.exit = vlan_exit_net,
702
.id = &vlan_net_id,
703
.size = sizeof(struct vlan_net),
704
};
705
706
static int __init vlan_proto_init(void)
707
{
708
int err;
709
710
pr_info("%s v%s\n", vlan_fullname, vlan_version);
711
712
err = register_pernet_subsys(&vlan_net_ops);
713
if (err < 0)
714
goto err0;
715
716
err = register_netdevice_notifier(&vlan_notifier_block);
717
if (err < 0)
718
goto err2;
719
720
err = vlan_gvrp_init();
721
if (err < 0)
722
goto err3;
723
724
err = vlan_mvrp_init();
725
if (err < 0)
726
goto err4;
727
728
err = vlan_netlink_init();
729
if (err < 0)
730
goto err5;
731
732
vlan_ioctl_set(vlan_ioctl_handler);
733
return 0;
734
735
err5:
736
vlan_mvrp_uninit();
737
err4:
738
vlan_gvrp_uninit();
739
err3:
740
unregister_netdevice_notifier(&vlan_notifier_block);
741
err2:
742
unregister_pernet_subsys(&vlan_net_ops);
743
err0:
744
return err;
745
}
746
747
static void __exit vlan_cleanup_module(void)
748
{
749
vlan_ioctl_set(NULL);
750
751
vlan_netlink_fini();
752
753
unregister_netdevice_notifier(&vlan_notifier_block);
754
755
unregister_pernet_subsys(&vlan_net_ops);
756
rcu_barrier(); /* Wait for completion of call_rcu()'s */
757
758
vlan_mvrp_uninit();
759
vlan_gvrp_uninit();
760
}
761
762
module_init(vlan_proto_init);
763
module_exit(vlan_cleanup_module);
764
765
MODULE_DESCRIPTION("802.1Q/802.1ad VLAN Protocol");
766
MODULE_LICENSE("GPL");
767
MODULE_VERSION(DRV_VERSION);
768
MODULE_IMPORT_NS("NETDEV_INTERNAL");
769
770