Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/dsa/dsa.c
49174 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* DSA topology and switch handling
4
*
5
* Copyright (c) 2008-2009 Marvell Semiconductor
6
* Copyright (c) 2013 Florian Fainelli <[email protected]>
7
* Copyright (c) 2016 Andrew Lunn <[email protected]>
8
*/
9
10
#include <linux/device.h>
11
#include <linux/err.h>
12
#include <linux/if_hsr.h>
13
#include <linux/list.h>
14
#include <linux/module.h>
15
#include <linux/netdevice.h>
16
#include <linux/slab.h>
17
#include <linux/rtnetlink.h>
18
#include <linux/of.h>
19
#include <linux/of_net.h>
20
#include <net/dsa_stubs.h>
21
#include <net/sch_generic.h>
22
23
#include "conduit.h"
24
#include "devlink.h"
25
#include "dsa.h"
26
#include "netlink.h"
27
#include "port.h"
28
#include "switch.h"
29
#include "tag.h"
30
#include "user.h"
31
32
#define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
33
34
static DEFINE_MUTEX(dsa2_mutex);
35
LIST_HEAD(dsa_tree_list);
36
37
static struct workqueue_struct *dsa_owq;
38
39
/* Track the bridges with forwarding offload enabled */
40
static unsigned long dsa_fwd_offloading_bridges;
41
42
bool dsa_schedule_work(struct work_struct *work)
43
{
44
return queue_work(dsa_owq, work);
45
}
46
47
void dsa_flush_workqueue(void)
48
{
49
flush_workqueue(dsa_owq);
50
}
51
EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
52
53
/**
54
* dsa_lag_map() - Map LAG structure to a linear LAG array
55
* @dst: Tree in which to record the mapping.
56
* @lag: LAG structure that is to be mapped to the tree's array.
57
*
58
* dsa_lag_id/dsa_lag_by_id can then be used to translate between the
59
* two spaces. The size of the mapping space is determined by the
60
* driver by setting ds->num_lag_ids. It is perfectly legal to leave
61
* it unset if it is not needed, in which case these functions become
62
* no-ops.
63
*/
64
void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag)
65
{
66
unsigned int id;
67
68
for (id = 1; id <= dst->lags_len; id++) {
69
if (!dsa_lag_by_id(dst, id)) {
70
dst->lags[id - 1] = lag;
71
lag->id = id;
72
return;
73
}
74
}
75
76
/* No IDs left, which is OK. Some drivers do not need it. The
77
* ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
78
* returns an error for this device when joining the LAG. The
79
* driver can then return -EOPNOTSUPP back to DSA, which will
80
* fall back to a software LAG.
81
*/
82
}
83
84
/**
85
* dsa_lag_unmap() - Remove a LAG ID mapping
86
* @dst: Tree in which the mapping is recorded.
87
* @lag: LAG structure that was mapped.
88
*
89
* As there may be multiple users of the mapping, it is only removed
90
* if there are no other references to it.
91
*/
92
void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag)
93
{
94
unsigned int id;
95
96
dsa_lags_foreach_id(id, dst) {
97
if (dsa_lag_by_id(dst, id) == lag) {
98
dst->lags[id - 1] = NULL;
99
lag->id = 0;
100
break;
101
}
102
}
103
}
104
105
struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst,
106
const struct net_device *lag_dev)
107
{
108
struct dsa_port *dp;
109
110
list_for_each_entry(dp, &dst->ports, list)
111
if (dsa_port_lag_dev_get(dp) == lag_dev)
112
return dp->lag;
113
114
return NULL;
115
}
116
117
struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
118
const struct net_device *br)
119
{
120
struct dsa_port *dp;
121
122
list_for_each_entry(dp, &dst->ports, list)
123
if (dsa_port_bridge_dev_get(dp) == br)
124
return dp->bridge;
125
126
return NULL;
127
}
128
129
static int dsa_bridge_num_find(const struct net_device *bridge_dev)
130
{
131
struct dsa_switch_tree *dst;
132
133
list_for_each_entry(dst, &dsa_tree_list, list) {
134
struct dsa_bridge *bridge;
135
136
bridge = dsa_tree_bridge_find(dst, bridge_dev);
137
if (bridge)
138
return bridge->num;
139
}
140
141
return 0;
142
}
143
144
unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
145
{
146
unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
147
148
/* Switches without FDB isolation support don't get unique
149
* bridge numbering
150
*/
151
if (!max)
152
return 0;
153
154
if (!bridge_num) {
155
/* First port that requests FDB isolation or TX forwarding
156
* offload for this bridge
157
*/
158
bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
159
DSA_MAX_NUM_OFFLOADING_BRIDGES,
160
1);
161
if (bridge_num > max)
162
return 0;
163
164
set_bit(bridge_num, &dsa_fwd_offloading_bridges);
165
}
166
167
return bridge_num;
168
}
169
170
void dsa_bridge_num_put(const struct net_device *bridge_dev,
171
unsigned int bridge_num)
172
{
173
/* Since we refcount bridges, we know that when we call this function
174
* it is no longer in use, so we can just go ahead and remove it from
175
* the bit mask.
176
*/
177
clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
178
}
179
180
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
181
{
182
struct dsa_switch_tree *dst;
183
struct dsa_port *dp;
184
185
list_for_each_entry(dst, &dsa_tree_list, list) {
186
if (dst->index != tree_index)
187
continue;
188
189
list_for_each_entry(dp, &dst->ports, list) {
190
if (dp->ds->index != sw_index)
191
continue;
192
193
return dp->ds;
194
}
195
}
196
197
return NULL;
198
}
199
EXPORT_SYMBOL_GPL(dsa_switch_find);
200
201
static struct dsa_switch_tree *dsa_tree_find(int index)
202
{
203
struct dsa_switch_tree *dst;
204
205
list_for_each_entry(dst, &dsa_tree_list, list)
206
if (dst->index == index)
207
return dst;
208
209
return NULL;
210
}
211
212
static struct dsa_switch_tree *dsa_tree_alloc(int index)
213
{
214
struct dsa_switch_tree *dst;
215
216
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
217
if (!dst)
218
return NULL;
219
220
dst->index = index;
221
222
INIT_LIST_HEAD(&dst->rtable);
223
224
INIT_LIST_HEAD(&dst->ports);
225
226
INIT_LIST_HEAD(&dst->list);
227
list_add_tail(&dst->list, &dsa_tree_list);
228
229
kref_init(&dst->refcount);
230
231
return dst;
232
}
233
234
static void dsa_tree_free(struct dsa_switch_tree *dst)
235
{
236
if (dst->tag_ops)
237
dsa_tag_driver_put(dst->tag_ops);
238
list_del(&dst->list);
239
kfree(dst);
240
}
241
242
static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
243
{
244
if (dst)
245
kref_get(&dst->refcount);
246
247
return dst;
248
}
249
250
static struct dsa_switch_tree *dsa_tree_touch(int index)
251
{
252
struct dsa_switch_tree *dst;
253
254
dst = dsa_tree_find(index);
255
if (dst)
256
return dsa_tree_get(dst);
257
else
258
return dsa_tree_alloc(index);
259
}
260
261
static void dsa_tree_release(struct kref *ref)
262
{
263
struct dsa_switch_tree *dst;
264
265
dst = container_of(ref, struct dsa_switch_tree, refcount);
266
267
dsa_tree_free(dst);
268
}
269
270
static void dsa_tree_put(struct dsa_switch_tree *dst)
271
{
272
if (dst)
273
kref_put(&dst->refcount, dsa_tree_release);
274
}
275
276
static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
277
struct device_node *dn)
278
{
279
struct dsa_port *dp;
280
281
list_for_each_entry(dp, &dst->ports, list)
282
if (dp->dn == dn)
283
return dp;
284
285
return NULL;
286
}
287
288
static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
289
struct dsa_port *link_dp)
290
{
291
struct dsa_switch *ds = dp->ds;
292
struct dsa_switch_tree *dst;
293
struct dsa_link *dl;
294
295
dst = ds->dst;
296
297
list_for_each_entry(dl, &dst->rtable, list)
298
if (dl->dp == dp && dl->link_dp == link_dp)
299
return dl;
300
301
dl = kzalloc(sizeof(*dl), GFP_KERNEL);
302
if (!dl)
303
return NULL;
304
305
dl->dp = dp;
306
dl->link_dp = link_dp;
307
308
INIT_LIST_HEAD(&dl->list);
309
list_add_tail(&dl->list, &dst->rtable);
310
311
return dl;
312
}
313
314
static bool dsa_port_setup_routing_table(struct dsa_port *dp)
315
{
316
struct dsa_switch *ds = dp->ds;
317
struct dsa_switch_tree *dst = ds->dst;
318
struct device_node *dn = dp->dn;
319
struct of_phandle_iterator it;
320
struct dsa_port *link_dp;
321
struct dsa_link *dl;
322
int err;
323
324
of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
325
link_dp = dsa_tree_find_port_by_node(dst, it.node);
326
if (!link_dp) {
327
of_node_put(it.node);
328
return false;
329
}
330
331
dl = dsa_link_touch(dp, link_dp);
332
if (!dl) {
333
of_node_put(it.node);
334
return false;
335
}
336
}
337
338
return true;
339
}
340
341
static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
342
{
343
bool complete = true;
344
struct dsa_port *dp;
345
346
list_for_each_entry(dp, &dst->ports, list) {
347
if (dsa_port_is_dsa(dp)) {
348
complete = dsa_port_setup_routing_table(dp);
349
if (!complete)
350
break;
351
}
352
}
353
354
return complete;
355
}
356
357
static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
358
{
359
struct dsa_port *dp;
360
361
list_for_each_entry(dp, &dst->ports, list)
362
if (dsa_port_is_cpu(dp))
363
return dp;
364
365
return NULL;
366
}
367
368
struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst)
369
{
370
struct dsa_port *cpu_dp;
371
372
cpu_dp = dsa_tree_find_first_cpu(dst);
373
return cpu_dp->conduit;
374
}
375
376
/* Assign the default CPU port (the first one in the tree) to all ports of the
377
* fabric which don't already have one as part of their own switch.
378
*/
379
static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
380
{
381
struct dsa_port *cpu_dp, *dp;
382
383
cpu_dp = dsa_tree_find_first_cpu(dst);
384
if (!cpu_dp) {
385
pr_err("DSA: tree %d has no CPU port\n", dst->index);
386
return -EINVAL;
387
}
388
389
list_for_each_entry(dp, &dst->ports, list) {
390
if (dp->cpu_dp)
391
continue;
392
393
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
394
dp->cpu_dp = cpu_dp;
395
}
396
397
return 0;
398
}
399
400
static struct dsa_port *
401
dsa_switch_preferred_default_local_cpu_port(struct dsa_switch *ds)
402
{
403
struct dsa_port *cpu_dp;
404
405
if (!ds->ops->preferred_default_local_cpu_port)
406
return NULL;
407
408
cpu_dp = ds->ops->preferred_default_local_cpu_port(ds);
409
if (!cpu_dp)
410
return NULL;
411
412
if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds))
413
return NULL;
414
415
return cpu_dp;
416
}
417
418
/* Perform initial assignment of CPU ports to user ports and DSA links in the
419
* fabric, giving preference to CPU ports local to each switch. Default to
420
* using the first CPU port in the switch tree if the port does not have a CPU
421
* port local to this switch.
422
*/
423
static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
424
{
425
struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
426
427
list_for_each_entry(cpu_dp, &dst->ports, list) {
428
if (!dsa_port_is_cpu(cpu_dp))
429
continue;
430
431
preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds);
432
if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp)
433
continue;
434
435
/* Prefer a local CPU port */
436
dsa_switch_for_each_port(dp, cpu_dp->ds) {
437
/* Prefer the first local CPU port found */
438
if (dp->cpu_dp)
439
continue;
440
441
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
442
dp->cpu_dp = cpu_dp;
443
}
444
}
445
446
return dsa_tree_setup_default_cpu(dst);
447
}
448
449
static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
450
{
451
struct dsa_port *dp;
452
453
list_for_each_entry(dp, &dst->ports, list)
454
if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
455
dp->cpu_dp = NULL;
456
}
457
458
static int dsa_port_setup(struct dsa_port *dp)
459
{
460
bool dsa_port_link_registered = false;
461
struct dsa_switch *ds = dp->ds;
462
bool dsa_port_enabled = false;
463
int err = 0;
464
465
if (dp->setup)
466
return 0;
467
468
err = dsa_port_devlink_setup(dp);
469
if (err)
470
return err;
471
472
switch (dp->type) {
473
case DSA_PORT_TYPE_UNUSED:
474
dsa_port_disable(dp);
475
break;
476
case DSA_PORT_TYPE_CPU:
477
if (dp->dn) {
478
err = dsa_shared_port_link_register_of(dp);
479
if (err)
480
break;
481
dsa_port_link_registered = true;
482
} else {
483
dev_warn(ds->dev,
484
"skipping link registration for CPU port %d\n",
485
dp->index);
486
}
487
488
err = dsa_port_enable(dp, NULL);
489
if (err)
490
break;
491
dsa_port_enabled = true;
492
493
break;
494
case DSA_PORT_TYPE_DSA:
495
if (dp->dn) {
496
err = dsa_shared_port_link_register_of(dp);
497
if (err)
498
break;
499
dsa_port_link_registered = true;
500
} else {
501
dev_warn(ds->dev,
502
"skipping link registration for DSA port %d\n",
503
dp->index);
504
}
505
506
err = dsa_port_enable(dp, NULL);
507
if (err)
508
break;
509
dsa_port_enabled = true;
510
511
break;
512
case DSA_PORT_TYPE_USER:
513
of_get_mac_address(dp->dn, dp->mac);
514
err = dsa_user_create(dp);
515
break;
516
}
517
518
if (err && dsa_port_enabled)
519
dsa_port_disable(dp);
520
if (err && dsa_port_link_registered)
521
dsa_shared_port_link_unregister_of(dp);
522
if (err) {
523
dsa_port_devlink_teardown(dp);
524
return err;
525
}
526
527
dp->setup = true;
528
529
return 0;
530
}
531
532
static void dsa_port_teardown(struct dsa_port *dp)
533
{
534
if (!dp->setup)
535
return;
536
537
switch (dp->type) {
538
case DSA_PORT_TYPE_UNUSED:
539
break;
540
case DSA_PORT_TYPE_CPU:
541
dsa_port_disable(dp);
542
if (dp->dn)
543
dsa_shared_port_link_unregister_of(dp);
544
break;
545
case DSA_PORT_TYPE_DSA:
546
dsa_port_disable(dp);
547
if (dp->dn)
548
dsa_shared_port_link_unregister_of(dp);
549
break;
550
case DSA_PORT_TYPE_USER:
551
if (dp->user) {
552
dsa_user_destroy(dp->user);
553
dp->user = NULL;
554
}
555
break;
556
}
557
558
dsa_port_devlink_teardown(dp);
559
560
dp->setup = false;
561
}
562
563
static int dsa_port_setup_as_unused(struct dsa_port *dp)
564
{
565
dp->type = DSA_PORT_TYPE_UNUSED;
566
return dsa_port_setup(dp);
567
}
568
569
static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
570
{
571
const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
572
struct dsa_switch_tree *dst = ds->dst;
573
int err;
574
575
if (tag_ops->proto == dst->default_proto)
576
goto connect;
577
578
rtnl_lock();
579
err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
580
rtnl_unlock();
581
if (err) {
582
dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
583
tag_ops->name, ERR_PTR(err));
584
return err;
585
}
586
587
connect:
588
if (tag_ops->connect) {
589
err = tag_ops->connect(ds);
590
if (err)
591
return err;
592
}
593
594
if (ds->ops->connect_tag_protocol) {
595
err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
596
if (err) {
597
dev_err(ds->dev,
598
"Unable to connect to tag protocol \"%s\": %pe\n",
599
tag_ops->name, ERR_PTR(err));
600
goto disconnect;
601
}
602
}
603
604
return 0;
605
606
disconnect:
607
if (tag_ops->disconnect)
608
tag_ops->disconnect(ds);
609
610
return err;
611
}
612
613
static void dsa_switch_teardown_tag_protocol(struct dsa_switch *ds)
614
{
615
const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
616
617
if (tag_ops->disconnect)
618
tag_ops->disconnect(ds);
619
}
620
621
static int dsa_switch_setup(struct dsa_switch *ds)
622
{
623
int err;
624
625
if (ds->setup)
626
return 0;
627
628
/* Initialize ds->phys_mii_mask before registering the user MDIO bus
629
* driver and before ops->setup() has run, since the switch drivers and
630
* the user MDIO bus driver rely on these values for probing PHY
631
* devices or not
632
*/
633
ds->phys_mii_mask |= dsa_user_ports(ds);
634
635
err = dsa_switch_devlink_alloc(ds);
636
if (err)
637
return err;
638
639
err = dsa_switch_register_notifier(ds);
640
if (err)
641
goto devlink_free;
642
643
ds->configure_vlan_while_not_filtering = true;
644
645
err = ds->ops->setup(ds);
646
if (err < 0)
647
goto unregister_notifier;
648
649
err = dsa_switch_setup_tag_protocol(ds);
650
if (err)
651
goto teardown;
652
653
if (!ds->user_mii_bus && ds->ops->phy_read) {
654
ds->user_mii_bus = mdiobus_alloc();
655
if (!ds->user_mii_bus) {
656
err = -ENOMEM;
657
goto teardown;
658
}
659
660
dsa_user_mii_bus_init(ds);
661
662
err = mdiobus_register(ds->user_mii_bus);
663
if (err < 0)
664
goto free_user_mii_bus;
665
}
666
667
dsa_switch_devlink_register(ds);
668
669
ds->setup = true;
670
return 0;
671
672
free_user_mii_bus:
673
if (ds->user_mii_bus && ds->ops->phy_read)
674
mdiobus_free(ds->user_mii_bus);
675
teardown:
676
if (ds->ops->teardown)
677
ds->ops->teardown(ds);
678
unregister_notifier:
679
dsa_switch_unregister_notifier(ds);
680
devlink_free:
681
dsa_switch_devlink_free(ds);
682
return err;
683
}
684
685
static void dsa_switch_teardown(struct dsa_switch *ds)
686
{
687
if (!ds->setup)
688
return;
689
690
dsa_switch_devlink_unregister(ds);
691
692
if (ds->user_mii_bus && ds->ops->phy_read) {
693
mdiobus_unregister(ds->user_mii_bus);
694
mdiobus_free(ds->user_mii_bus);
695
ds->user_mii_bus = NULL;
696
}
697
698
dsa_switch_teardown_tag_protocol(ds);
699
700
if (ds->ops->teardown)
701
ds->ops->teardown(ds);
702
703
dsa_switch_unregister_notifier(ds);
704
705
dsa_switch_devlink_free(ds);
706
707
ds->setup = false;
708
}
709
710
/* First tear down the non-shared, then the shared ports. This ensures that
711
* all work items scheduled by our switchdev handlers for user ports have
712
* completed before we destroy the refcounting kept on the shared ports.
713
*/
714
static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
715
{
716
struct dsa_port *dp;
717
718
list_for_each_entry(dp, &dst->ports, list)
719
if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
720
dsa_port_teardown(dp);
721
722
dsa_flush_workqueue();
723
724
list_for_each_entry(dp, &dst->ports, list)
725
if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
726
dsa_port_teardown(dp);
727
}
728
729
static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
730
{
731
struct dsa_port *dp;
732
733
list_for_each_entry(dp, &dst->ports, list)
734
dsa_switch_teardown(dp->ds);
735
}
736
737
/* Bring shared ports up first, then non-shared ports */
738
static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
739
{
740
struct dsa_port *dp;
741
int err = 0;
742
743
list_for_each_entry(dp, &dst->ports, list) {
744
if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
745
err = dsa_port_setup(dp);
746
if (err)
747
goto teardown;
748
}
749
}
750
751
list_for_each_entry(dp, &dst->ports, list) {
752
if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
753
err = dsa_port_setup(dp);
754
if (err) {
755
err = dsa_port_setup_as_unused(dp);
756
if (err)
757
goto teardown;
758
}
759
}
760
}
761
762
return 0;
763
764
teardown:
765
dsa_tree_teardown_ports(dst);
766
767
return err;
768
}
769
770
static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
771
{
772
struct dsa_port *dp;
773
int err = 0;
774
775
list_for_each_entry(dp, &dst->ports, list) {
776
err = dsa_switch_setup(dp->ds);
777
if (err) {
778
dsa_tree_teardown_switches(dst);
779
break;
780
}
781
}
782
783
return err;
784
}
785
786
static int dsa_tree_setup_conduit(struct dsa_switch_tree *dst)
787
{
788
struct dsa_port *cpu_dp;
789
int err = 0;
790
791
rtnl_lock();
792
793
dsa_tree_for_each_cpu_port(cpu_dp, dst) {
794
struct net_device *conduit = cpu_dp->conduit;
795
bool admin_up = (conduit->flags & IFF_UP) &&
796
!qdisc_tx_is_noop(conduit);
797
798
err = dsa_conduit_setup(conduit, cpu_dp);
799
if (err)
800
break;
801
802
/* Replay conduit state event */
803
dsa_tree_conduit_admin_state_change(dst, conduit, admin_up);
804
dsa_tree_conduit_oper_state_change(dst, conduit,
805
netif_oper_up(conduit));
806
}
807
808
rtnl_unlock();
809
810
return err;
811
}
812
813
static void dsa_tree_teardown_conduit(struct dsa_switch_tree *dst)
814
{
815
struct dsa_port *cpu_dp;
816
817
rtnl_lock();
818
819
dsa_tree_for_each_cpu_port(cpu_dp, dst) {
820
struct net_device *conduit = cpu_dp->conduit;
821
822
/* Synthesizing an "admin down" state is sufficient for
823
* the switches to get a notification if the conduit is
824
* currently up and running.
825
*/
826
dsa_tree_conduit_admin_state_change(dst, conduit, false);
827
828
dsa_conduit_teardown(conduit);
829
}
830
831
rtnl_unlock();
832
}
833
834
static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
835
{
836
unsigned int len = 0;
837
struct dsa_port *dp;
838
839
list_for_each_entry(dp, &dst->ports, list) {
840
if (dp->ds->num_lag_ids > len)
841
len = dp->ds->num_lag_ids;
842
}
843
844
if (!len)
845
return 0;
846
847
dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
848
if (!dst->lags)
849
return -ENOMEM;
850
851
dst->lags_len = len;
852
return 0;
853
}
854
855
static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
856
{
857
kfree(dst->lags);
858
}
859
860
static void dsa_tree_teardown_routing_table(struct dsa_switch_tree *dst)
861
{
862
struct dsa_link *dl, *next;
863
864
list_for_each_entry_safe(dl, next, &dst->rtable, list) {
865
list_del(&dl->list);
866
kfree(dl);
867
}
868
}
869
870
static int dsa_tree_setup(struct dsa_switch_tree *dst)
871
{
872
bool complete;
873
int err;
874
875
if (dst->setup) {
876
pr_err("DSA: tree %d already setup! Disjoint trees?\n",
877
dst->index);
878
return -EEXIST;
879
}
880
881
complete = dsa_tree_setup_routing_table(dst);
882
if (!complete)
883
return 0;
884
885
err = dsa_tree_setup_cpu_ports(dst);
886
if (err)
887
goto teardown_rtable;
888
889
err = dsa_tree_setup_switches(dst);
890
if (err)
891
goto teardown_cpu_ports;
892
893
err = dsa_tree_setup_ports(dst);
894
if (err)
895
goto teardown_switches;
896
897
err = dsa_tree_setup_conduit(dst);
898
if (err)
899
goto teardown_ports;
900
901
err = dsa_tree_setup_lags(dst);
902
if (err)
903
goto teardown_conduit;
904
905
dst->setup = true;
906
907
pr_info("DSA: tree %d setup\n", dst->index);
908
909
return 0;
910
911
teardown_conduit:
912
dsa_tree_teardown_conduit(dst);
913
teardown_ports:
914
dsa_tree_teardown_ports(dst);
915
teardown_switches:
916
dsa_tree_teardown_switches(dst);
917
teardown_cpu_ports:
918
dsa_tree_teardown_cpu_ports(dst);
919
teardown_rtable:
920
dsa_tree_teardown_routing_table(dst);
921
922
return err;
923
}
924
925
static void dsa_tree_teardown(struct dsa_switch_tree *dst)
926
{
927
if (!dst->setup)
928
return;
929
930
dsa_tree_teardown_lags(dst);
931
932
dsa_tree_teardown_conduit(dst);
933
934
dsa_tree_teardown_ports(dst);
935
936
dsa_tree_teardown_switches(dst);
937
938
dsa_tree_teardown_cpu_ports(dst);
939
940
dsa_tree_teardown_routing_table(dst);
941
942
pr_info("DSA: tree %d torn down\n", dst->index);
943
944
dst->setup = false;
945
}
946
947
static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
948
const struct dsa_device_ops *tag_ops)
949
{
950
const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
951
struct dsa_notifier_tag_proto_info info;
952
int err;
953
954
dst->tag_ops = tag_ops;
955
956
/* Notify the switches from this tree about the connection
957
* to the new tagger
958
*/
959
info.tag_ops = tag_ops;
960
err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
961
if (err && err != -EOPNOTSUPP)
962
goto out_disconnect;
963
964
/* Notify the old tagger about the disconnection from this tree */
965
info.tag_ops = old_tag_ops;
966
dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
967
968
return 0;
969
970
out_disconnect:
971
info.tag_ops = tag_ops;
972
dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
973
dst->tag_ops = old_tag_ops;
974
975
return err;
976
}
977
978
/* Since the dsa/tagging sysfs device attribute is per conduit, the assumption
979
* is that all DSA switches within a tree share the same tagger, otherwise
980
* they would have formed disjoint trees (different "dsa,member" values).
981
*/
982
int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
983
const struct dsa_device_ops *tag_ops,
984
const struct dsa_device_ops *old_tag_ops)
985
{
986
struct dsa_notifier_tag_proto_info info;
987
struct dsa_port *dp;
988
int err = -EBUSY;
989
990
if (!rtnl_trylock())
991
return restart_syscall();
992
993
/* At the moment we don't allow changing the tag protocol under
994
* traffic. The rtnl_mutex also happens to serialize concurrent
995
* attempts to change the tagging protocol. If we ever lift the IFF_UP
996
* restriction, there needs to be another mutex which serializes this.
997
*/
998
dsa_tree_for_each_user_port(dp, dst) {
999
if (dsa_port_to_conduit(dp)->flags & IFF_UP)
1000
goto out_unlock;
1001
1002
if (dp->user->flags & IFF_UP)
1003
goto out_unlock;
1004
}
1005
1006
/* Notify the tag protocol change */
1007
info.tag_ops = tag_ops;
1008
err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1009
if (err)
1010
goto out_unwind_tagger;
1011
1012
err = dsa_tree_bind_tag_proto(dst, tag_ops);
1013
if (err)
1014
goto out_unwind_tagger;
1015
1016
rtnl_unlock();
1017
1018
return 0;
1019
1020
out_unwind_tagger:
1021
info.tag_ops = old_tag_ops;
1022
dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1023
out_unlock:
1024
rtnl_unlock();
1025
return err;
1026
}
1027
1028
static void dsa_tree_conduit_state_change(struct dsa_switch_tree *dst,
1029
struct net_device *conduit)
1030
{
1031
struct dsa_notifier_conduit_state_info info;
1032
struct dsa_port *cpu_dp = conduit->dsa_ptr;
1033
1034
info.conduit = conduit;
1035
info.operational = dsa_port_conduit_is_operational(cpu_dp);
1036
1037
dsa_tree_notify(dst, DSA_NOTIFIER_CONDUIT_STATE_CHANGE, &info);
1038
}
1039
1040
void dsa_tree_conduit_admin_state_change(struct dsa_switch_tree *dst,
1041
struct net_device *conduit,
1042
bool up)
1043
{
1044
struct dsa_port *cpu_dp = conduit->dsa_ptr;
1045
bool notify = false;
1046
1047
/* Don't keep track of admin state on LAG DSA conduits,
1048
* but rather just of physical DSA conduits
1049
*/
1050
if (netif_is_lag_master(conduit))
1051
return;
1052
1053
if ((dsa_port_conduit_is_operational(cpu_dp)) !=
1054
(up && cpu_dp->conduit_oper_up))
1055
notify = true;
1056
1057
cpu_dp->conduit_admin_up = up;
1058
1059
if (notify)
1060
dsa_tree_conduit_state_change(dst, conduit);
1061
}
1062
1063
void dsa_tree_conduit_oper_state_change(struct dsa_switch_tree *dst,
1064
struct net_device *conduit,
1065
bool up)
1066
{
1067
struct dsa_port *cpu_dp = conduit->dsa_ptr;
1068
bool notify = false;
1069
1070
/* Don't keep track of oper state on LAG DSA conduits,
1071
* but rather just of physical DSA conduits
1072
*/
1073
if (netif_is_lag_master(conduit))
1074
return;
1075
1076
if ((dsa_port_conduit_is_operational(cpu_dp)) !=
1077
(cpu_dp->conduit_admin_up && up))
1078
notify = true;
1079
1080
cpu_dp->conduit_oper_up = up;
1081
1082
if (notify)
1083
dsa_tree_conduit_state_change(dst, conduit);
1084
}
1085
1086
static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1087
{
1088
struct dsa_switch_tree *dst = ds->dst;
1089
struct dsa_port *dp;
1090
1091
dsa_switch_for_each_port(dp, ds)
1092
if (dp->index == index)
1093
return dp;
1094
1095
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1096
if (!dp)
1097
return NULL;
1098
1099
dp->ds = ds;
1100
dp->index = index;
1101
1102
mutex_init(&dp->addr_lists_lock);
1103
mutex_init(&dp->vlans_lock);
1104
INIT_LIST_HEAD(&dp->fdbs);
1105
INIT_LIST_HEAD(&dp->mdbs);
1106
INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */
1107
INIT_LIST_HEAD(&dp->list);
1108
list_add_tail(&dp->list, &dst->ports);
1109
1110
return dp;
1111
}
1112
1113
static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1114
{
1115
dp->type = DSA_PORT_TYPE_USER;
1116
dp->name = name;
1117
1118
return 0;
1119
}
1120
1121
static int dsa_port_parse_dsa(struct dsa_port *dp)
1122
{
1123
dp->type = DSA_PORT_TYPE_DSA;
1124
1125
return 0;
1126
}
1127
1128
static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1129
struct net_device *conduit)
1130
{
1131
enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1132
struct dsa_switch *mds, *ds = dp->ds;
1133
unsigned int mdp_upstream;
1134
struct dsa_port *mdp;
1135
1136
/* It is possible to stack DSA switches onto one another when that
1137
* happens the switch driver may want to know if its tagging protocol
1138
* is going to work in such a configuration.
1139
*/
1140
if (dsa_user_dev_check(conduit)) {
1141
mdp = dsa_user_to_port(conduit);
1142
mds = mdp->ds;
1143
mdp_upstream = dsa_upstream_port(mds, mdp->index);
1144
tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1145
DSA_TAG_PROTO_NONE);
1146
}
1147
1148
/* If the conduit device is not itself a DSA user in a disjoint DSA
1149
* tree, then return immediately.
1150
*/
1151
return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1152
}
1153
1154
static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *conduit,
1155
const char *user_protocol)
1156
{
1157
const struct dsa_device_ops *tag_ops = NULL;
1158
struct dsa_switch *ds = dp->ds;
1159
struct dsa_switch_tree *dst = ds->dst;
1160
enum dsa_tag_protocol default_proto;
1161
1162
/* Find out which protocol the switch would prefer. */
1163
default_proto = dsa_get_tag_protocol(dp, conduit);
1164
if (dst->default_proto) {
1165
if (dst->default_proto != default_proto) {
1166
dev_err(ds->dev,
1167
"A DSA switch tree can have only one tagging protocol\n");
1168
return -EINVAL;
1169
}
1170
} else {
1171
dst->default_proto = default_proto;
1172
}
1173
1174
/* See if the user wants to override that preference. */
1175
if (user_protocol) {
1176
if (!ds->ops->change_tag_protocol) {
1177
dev_err(ds->dev, "Tag protocol cannot be modified\n");
1178
return -EINVAL;
1179
}
1180
1181
tag_ops = dsa_tag_driver_get_by_name(user_protocol);
1182
if (IS_ERR(tag_ops)) {
1183
dev_warn(ds->dev,
1184
"Failed to find a tagging driver for protocol %s, using default\n",
1185
user_protocol);
1186
tag_ops = NULL;
1187
}
1188
}
1189
1190
if (!tag_ops)
1191
tag_ops = dsa_tag_driver_get_by_id(default_proto);
1192
1193
if (IS_ERR(tag_ops)) {
1194
if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1195
return -EPROBE_DEFER;
1196
1197
dev_warn(ds->dev, "No tagger for this switch\n");
1198
return PTR_ERR(tag_ops);
1199
}
1200
1201
if (dst->tag_ops) {
1202
if (dst->tag_ops != tag_ops) {
1203
dev_err(ds->dev,
1204
"A DSA switch tree can have only one tagging protocol\n");
1205
1206
dsa_tag_driver_put(tag_ops);
1207
return -EINVAL;
1208
}
1209
1210
/* In the case of multiple CPU ports per switch, the tagging
1211
* protocol is still reference-counted only per switch tree.
1212
*/
1213
dsa_tag_driver_put(tag_ops);
1214
} else {
1215
dst->tag_ops = tag_ops;
1216
}
1217
1218
dp->conduit = conduit;
1219
dp->type = DSA_PORT_TYPE_CPU;
1220
dsa_port_set_tag_protocol(dp, dst->tag_ops);
1221
dp->dst = dst;
1222
1223
/* At this point, the tree may be configured to use a different
1224
* tagger than the one chosen by the switch driver during
1225
* .setup, in the case when a user selects a custom protocol
1226
* through the DT.
1227
*
1228
* This is resolved by syncing the driver with the tree in
1229
* dsa_switch_setup_tag_protocol once .setup has run and the
1230
* driver is ready to accept calls to .change_tag_protocol. If
1231
* the driver does not support the custom protocol at that
1232
* point, the tree is wholly rejected, thereby ensuring that the
1233
* tree and driver are always in agreement on the protocol to
1234
* use.
1235
*/
1236
return 0;
1237
}
1238
1239
static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1240
{
1241
struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1242
const char *name = of_get_property(dn, "label", NULL);
1243
bool link = of_property_read_bool(dn, "link");
1244
1245
dp->dn = dn;
1246
1247
if (ethernet) {
1248
struct net_device *conduit;
1249
const char *user_protocol;
1250
int err;
1251
1252
rtnl_lock();
1253
conduit = of_find_net_device_by_node(ethernet);
1254
of_node_put(ethernet);
1255
if (!conduit) {
1256
rtnl_unlock();
1257
return -EPROBE_DEFER;
1258
}
1259
1260
netdev_hold(conduit, &dp->conduit_tracker, GFP_KERNEL);
1261
put_device(&conduit->dev);
1262
rtnl_unlock();
1263
1264
user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1265
err = dsa_port_parse_cpu(dp, conduit, user_protocol);
1266
if (err)
1267
netdev_put(conduit, &dp->conduit_tracker);
1268
return err;
1269
}
1270
1271
if (link)
1272
return dsa_port_parse_dsa(dp);
1273
1274
return dsa_port_parse_user(dp, name);
1275
}
1276
1277
static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1278
struct device_node *dn)
1279
{
1280
struct device_node *ports, *port;
1281
struct dsa_port *dp;
1282
int err = 0;
1283
u32 reg;
1284
1285
ports = of_get_child_by_name(dn, "ports");
1286
if (!ports) {
1287
/* The second possibility is "ethernet-ports" */
1288
ports = of_get_child_by_name(dn, "ethernet-ports");
1289
if (!ports) {
1290
dev_err(ds->dev, "no ports child node found\n");
1291
return -EINVAL;
1292
}
1293
}
1294
1295
for_each_available_child_of_node(ports, port) {
1296
err = of_property_read_u32(port, "reg", &reg);
1297
if (err) {
1298
of_node_put(port);
1299
goto out_put_node;
1300
}
1301
1302
if (reg >= ds->num_ports) {
1303
dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
1304
port, reg, ds->num_ports);
1305
of_node_put(port);
1306
err = -EINVAL;
1307
goto out_put_node;
1308
}
1309
1310
dp = dsa_to_port(ds, reg);
1311
1312
err = dsa_port_parse_of(dp, port);
1313
if (err) {
1314
of_node_put(port);
1315
goto out_put_node;
1316
}
1317
}
1318
1319
out_put_node:
1320
of_node_put(ports);
1321
return err;
1322
}
1323
1324
static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1325
struct device_node *dn)
1326
{
1327
u32 m[2] = { 0, 0 };
1328
int sz;
1329
1330
/* Don't error out if this optional property isn't found */
1331
sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1332
if (sz < 0 && sz != -EINVAL)
1333
return sz;
1334
1335
ds->index = m[1];
1336
1337
ds->dst = dsa_tree_touch(m[0]);
1338
if (!ds->dst)
1339
return -ENOMEM;
1340
1341
if (dsa_switch_find(ds->dst->index, ds->index)) {
1342
dev_err(ds->dev,
1343
"A DSA switch with index %d already exists in tree %d\n",
1344
ds->index, ds->dst->index);
1345
return -EEXIST;
1346
}
1347
1348
if (ds->dst->last_switch < ds->index)
1349
ds->dst->last_switch = ds->index;
1350
1351
return 0;
1352
}
1353
1354
static int dsa_switch_touch_ports(struct dsa_switch *ds)
1355
{
1356
struct dsa_port *dp;
1357
int port;
1358
1359
for (port = 0; port < ds->num_ports; port++) {
1360
dp = dsa_port_touch(ds, port);
1361
if (!dp)
1362
return -ENOMEM;
1363
}
1364
1365
return 0;
1366
}
1367
1368
static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1369
{
1370
int err;
1371
1372
err = dsa_switch_parse_member_of(ds, dn);
1373
if (err)
1374
return err;
1375
1376
err = dsa_switch_touch_ports(ds);
1377
if (err)
1378
return err;
1379
1380
return dsa_switch_parse_ports_of(ds, dn);
1381
}
1382
1383
static int dev_is_class(struct device *dev, const void *class)
1384
{
1385
if (dev->class != NULL && !strcmp(dev->class->name, class))
1386
return 1;
1387
1388
return 0;
1389
}
1390
1391
static struct device *dev_find_class(struct device *parent, char *class)
1392
{
1393
if (dev_is_class(parent, class)) {
1394
get_device(parent);
1395
return parent;
1396
}
1397
1398
return device_find_child(parent, class, dev_is_class);
1399
}
1400
1401
static int dsa_port_parse(struct dsa_port *dp, const char *name,
1402
struct device *dev)
1403
{
1404
if (!strcmp(name, "cpu")) {
1405
struct net_device *conduit;
1406
struct device *d;
1407
int err;
1408
1409
rtnl_lock();
1410
d = dev_find_class(dev, "net");
1411
if (!d) {
1412
rtnl_unlock();
1413
return -EPROBE_DEFER;
1414
}
1415
1416
conduit = to_net_dev(d);
1417
netdev_hold(conduit, &dp->conduit_tracker, GFP_KERNEL);
1418
put_device(d);
1419
rtnl_unlock();
1420
1421
err = dsa_port_parse_cpu(dp, conduit, NULL);
1422
if (err)
1423
netdev_put(conduit, &dp->conduit_tracker);
1424
return err;
1425
}
1426
1427
if (!strcmp(name, "dsa"))
1428
return dsa_port_parse_dsa(dp);
1429
1430
return dsa_port_parse_user(dp, name);
1431
}
1432
1433
static int dsa_switch_parse_ports(struct dsa_switch *ds,
1434
struct dsa_chip_data *cd)
1435
{
1436
bool valid_name_found = false;
1437
struct dsa_port *dp;
1438
struct device *dev;
1439
const char *name;
1440
unsigned int i;
1441
int err;
1442
1443
for (i = 0; i < DSA_MAX_PORTS; i++) {
1444
name = cd->port_names[i];
1445
dev = cd->netdev[i];
1446
dp = dsa_to_port(ds, i);
1447
1448
if (!name)
1449
continue;
1450
1451
err = dsa_port_parse(dp, name, dev);
1452
if (err)
1453
return err;
1454
1455
valid_name_found = true;
1456
}
1457
1458
if (!valid_name_found && i == DSA_MAX_PORTS)
1459
return -EINVAL;
1460
1461
return 0;
1462
}
1463
1464
static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1465
{
1466
int err;
1467
1468
ds->cd = cd;
1469
1470
/* We don't support interconnected switches nor multiple trees via
1471
* platform data, so this is the unique switch of the tree.
1472
*/
1473
ds->index = 0;
1474
ds->dst = dsa_tree_touch(0);
1475
if (!ds->dst)
1476
return -ENOMEM;
1477
1478
err = dsa_switch_touch_ports(ds);
1479
if (err)
1480
return err;
1481
1482
return dsa_switch_parse_ports(ds, cd);
1483
}
1484
1485
static void dsa_switch_release_ports(struct dsa_switch *ds)
1486
{
1487
struct dsa_mac_addr *a, *tmp;
1488
struct dsa_port *dp, *next;
1489
struct dsa_vlan *v, *n;
1490
1491
dsa_switch_for_each_port_safe(dp, next, ds) {
1492
if (dsa_port_is_cpu(dp) && dp->conduit)
1493
netdev_put(dp->conduit, &dp->conduit_tracker);
1494
1495
/* These are either entries that upper layers lost track of
1496
* (probably due to bugs), or installed through interfaces
1497
* where one does not necessarily have to remove them, like
1498
* ndo_dflt_fdb_add().
1499
*/
1500
list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
1501
dev_info(ds->dev,
1502
"Cleaning up unicast address %pM vid %u from port %d\n",
1503
a->addr, a->vid, dp->index);
1504
list_del(&a->list);
1505
kfree(a);
1506
}
1507
1508
list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
1509
dev_info(ds->dev,
1510
"Cleaning up multicast address %pM vid %u from port %d\n",
1511
a->addr, a->vid, dp->index);
1512
list_del(&a->list);
1513
kfree(a);
1514
}
1515
1516
/* These are entries that upper layers have lost track of,
1517
* probably due to bugs, but also due to dsa_port_do_vlan_del()
1518
* having failed and the VLAN entry still lingering on.
1519
*/
1520
list_for_each_entry_safe(v, n, &dp->vlans, list) {
1521
dev_info(ds->dev,
1522
"Cleaning up vid %u from port %d\n",
1523
v->vid, dp->index);
1524
list_del(&v->list);
1525
kfree(v);
1526
}
1527
1528
list_del(&dp->list);
1529
kfree(dp);
1530
}
1531
}
1532
1533
static int dsa_switch_probe(struct dsa_switch *ds)
1534
{
1535
struct dsa_switch_tree *dst;
1536
struct dsa_chip_data *pdata;
1537
struct device_node *np;
1538
int err;
1539
1540
if (!ds->dev)
1541
return -ENODEV;
1542
1543
pdata = ds->dev->platform_data;
1544
np = ds->dev->of_node;
1545
1546
if (!ds->num_ports)
1547
return -EINVAL;
1548
1549
if (np) {
1550
err = dsa_switch_parse_of(ds, np);
1551
if (err)
1552
dsa_switch_release_ports(ds);
1553
} else if (pdata) {
1554
err = dsa_switch_parse(ds, pdata);
1555
if (err)
1556
dsa_switch_release_ports(ds);
1557
} else {
1558
err = -ENODEV;
1559
}
1560
1561
if (err)
1562
return err;
1563
1564
dst = ds->dst;
1565
dsa_tree_get(dst);
1566
err = dsa_tree_setup(dst);
1567
if (err) {
1568
dsa_switch_release_ports(ds);
1569
dsa_tree_put(dst);
1570
}
1571
1572
return err;
1573
}
1574
1575
int dsa_register_switch(struct dsa_switch *ds)
1576
{
1577
int err;
1578
1579
mutex_lock(&dsa2_mutex);
1580
err = dsa_switch_probe(ds);
1581
dsa_tree_put(ds->dst);
1582
mutex_unlock(&dsa2_mutex);
1583
1584
return err;
1585
}
1586
EXPORT_SYMBOL_GPL(dsa_register_switch);
1587
1588
static void dsa_switch_remove(struct dsa_switch *ds)
1589
{
1590
struct dsa_switch_tree *dst = ds->dst;
1591
1592
dsa_tree_teardown(dst);
1593
dsa_switch_release_ports(ds);
1594
dsa_tree_put(dst);
1595
}
1596
1597
void dsa_unregister_switch(struct dsa_switch *ds)
1598
{
1599
mutex_lock(&dsa2_mutex);
1600
dsa_switch_remove(ds);
1601
mutex_unlock(&dsa2_mutex);
1602
}
1603
EXPORT_SYMBOL_GPL(dsa_unregister_switch);
1604
1605
/* If the DSA conduit chooses to unregister its net_device on .shutdown, DSA is
1606
* blocking that operation from completion, due to the dev_hold taken inside
1607
* netdev_upper_dev_link. Unlink the DSA user interfaces from being uppers of
1608
* the DSA conduit, so that the system can reboot successfully.
1609
*/
1610
void dsa_switch_shutdown(struct dsa_switch *ds)
1611
{
1612
struct net_device *conduit, *user_dev;
1613
LIST_HEAD(close_list);
1614
struct dsa_port *dp;
1615
1616
mutex_lock(&dsa2_mutex);
1617
1618
if (!ds->setup)
1619
goto out;
1620
1621
rtnl_lock();
1622
1623
dsa_switch_for_each_cpu_port(dp, ds)
1624
list_add(&dp->conduit->close_list, &close_list);
1625
1626
netif_close_many(&close_list, true);
1627
1628
dsa_switch_for_each_user_port(dp, ds) {
1629
conduit = dsa_port_to_conduit(dp);
1630
user_dev = dp->user;
1631
1632
netif_device_detach(user_dev);
1633
netdev_upper_dev_unlink(conduit, user_dev);
1634
}
1635
1636
/* Disconnect from further netdevice notifiers on the conduit,
1637
* since netdev_uses_dsa() will now return false.
1638
*/
1639
dsa_switch_for_each_cpu_port(dp, ds) {
1640
dp->conduit->dsa_ptr = NULL;
1641
netdev_put(dp->conduit, &dp->conduit_tracker);
1642
}
1643
1644
rtnl_unlock();
1645
out:
1646
mutex_unlock(&dsa2_mutex);
1647
}
1648
EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
1649
1650
#ifdef CONFIG_PM_SLEEP
1651
static bool dsa_port_is_initialized(const struct dsa_port *dp)
1652
{
1653
return dp->type == DSA_PORT_TYPE_USER && dp->user;
1654
}
1655
1656
int dsa_switch_suspend(struct dsa_switch *ds)
1657
{
1658
struct dsa_port *dp;
1659
int ret = 0;
1660
1661
/* Suspend user network devices */
1662
dsa_switch_for_each_port(dp, ds) {
1663
if (!dsa_port_is_initialized(dp))
1664
continue;
1665
1666
ret = dsa_user_suspend(dp->user);
1667
if (ret)
1668
return ret;
1669
}
1670
1671
if (ds->ops->suspend)
1672
ret = ds->ops->suspend(ds);
1673
1674
return ret;
1675
}
1676
EXPORT_SYMBOL_GPL(dsa_switch_suspend);
1677
1678
int dsa_switch_resume(struct dsa_switch *ds)
1679
{
1680
struct dsa_port *dp;
1681
int ret = 0;
1682
1683
if (ds->ops->resume)
1684
ret = ds->ops->resume(ds);
1685
1686
if (ret)
1687
return ret;
1688
1689
/* Resume user network devices */
1690
dsa_switch_for_each_port(dp, ds) {
1691
if (!dsa_port_is_initialized(dp))
1692
continue;
1693
1694
ret = dsa_user_resume(dp->user);
1695
if (ret)
1696
return ret;
1697
}
1698
1699
return 0;
1700
}
1701
EXPORT_SYMBOL_GPL(dsa_switch_resume);
1702
#endif
1703
1704
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
1705
{
1706
if (!netdev || !dsa_user_dev_check(netdev))
1707
return ERR_PTR(-ENODEV);
1708
1709
return dsa_user_to_port(netdev);
1710
}
1711
EXPORT_SYMBOL_GPL(dsa_port_from_netdev);
1712
1713
bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b)
1714
{
1715
if (a->type != b->type)
1716
return false;
1717
1718
switch (a->type) {
1719
case DSA_DB_PORT:
1720
return a->dp == b->dp;
1721
case DSA_DB_LAG:
1722
return a->lag.dev == b->lag.dev;
1723
case DSA_DB_BRIDGE:
1724
return a->bridge.num == b->bridge.num;
1725
default:
1726
WARN_ON(1);
1727
return false;
1728
}
1729
}
1730
1731
bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
1732
const unsigned char *addr, u16 vid,
1733
struct dsa_db db)
1734
{
1735
struct dsa_port *dp = dsa_to_port(ds, port);
1736
struct dsa_mac_addr *a;
1737
1738
lockdep_assert_held(&dp->addr_lists_lock);
1739
1740
list_for_each_entry(a, &dp->fdbs, list) {
1741
if (!ether_addr_equal(a->addr, addr) || a->vid != vid)
1742
continue;
1743
1744
if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
1745
return true;
1746
}
1747
1748
return false;
1749
}
1750
EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db);
1751
1752
bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
1753
const struct switchdev_obj_port_mdb *mdb,
1754
struct dsa_db db)
1755
{
1756
struct dsa_port *dp = dsa_to_port(ds, port);
1757
struct dsa_mac_addr *a;
1758
1759
lockdep_assert_held(&dp->addr_lists_lock);
1760
1761
list_for_each_entry(a, &dp->mdbs, list) {
1762
if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid)
1763
continue;
1764
1765
if (a->db.type == db.type && !dsa_db_equal(&a->db, &db))
1766
return true;
1767
}
1768
1769
return false;
1770
}
1771
EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db);
1772
1773
/* Helpers for switches without specific HSR offloads, but which can implement
1774
* NETIF_F_HW_HSR_DUP because their tagger uses dsa_xmit_port_mask()
1775
*/
1776
int dsa_port_simple_hsr_validate(struct dsa_switch *ds, int port,
1777
struct net_device *hsr,
1778
struct netlink_ext_ack *extack)
1779
{
1780
enum hsr_port_type type;
1781
int err;
1782
1783
err = hsr_get_port_type(hsr, dsa_to_port(ds, port)->user, &type);
1784
if (err)
1785
return err;
1786
1787
if (type != HSR_PT_SLAVE_A && type != HSR_PT_SLAVE_B) {
1788
NL_SET_ERR_MSG_MOD(extack,
1789
"Only HSR slave ports can be offloaded");
1790
return -EOPNOTSUPP;
1791
}
1792
1793
return 0;
1794
}
1795
EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_validate);
1796
1797
int dsa_port_simple_hsr_join(struct dsa_switch *ds, int port,
1798
struct net_device *hsr,
1799
struct netlink_ext_ack *extack)
1800
{
1801
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
1802
int err;
1803
1804
err = dsa_port_simple_hsr_validate(ds, port, hsr, extack);
1805
if (err)
1806
return err;
1807
1808
dsa_hsr_foreach_port(other_dp, ds, hsr) {
1809
if (other_dp != dp) {
1810
dp->user->features |= NETIF_F_HW_HSR_DUP;
1811
other_dp->user->features |= NETIF_F_HW_HSR_DUP;
1812
break;
1813
}
1814
}
1815
1816
return 0;
1817
}
1818
EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_join);
1819
1820
int dsa_port_simple_hsr_leave(struct dsa_switch *ds, int port,
1821
struct net_device *hsr)
1822
{
1823
struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
1824
1825
dsa_hsr_foreach_port(other_dp, ds, hsr) {
1826
if (other_dp != dp) {
1827
dp->user->features &= ~NETIF_F_HW_HSR_DUP;
1828
other_dp->user->features &= ~NETIF_F_HW_HSR_DUP;
1829
break;
1830
}
1831
}
1832
1833
return 0;
1834
}
1835
EXPORT_SYMBOL_GPL(dsa_port_simple_hsr_leave);
1836
1837
static const struct dsa_stubs __dsa_stubs = {
1838
.conduit_hwtstamp_validate = __dsa_conduit_hwtstamp_validate,
1839
};
1840
1841
static void dsa_register_stubs(void)
1842
{
1843
dsa_stubs = &__dsa_stubs;
1844
}
1845
1846
static void dsa_unregister_stubs(void)
1847
{
1848
dsa_stubs = NULL;
1849
}
1850
1851
static int __init dsa_init_module(void)
1852
{
1853
int rc;
1854
1855
dsa_owq = alloc_ordered_workqueue("dsa_ordered",
1856
WQ_MEM_RECLAIM);
1857
if (!dsa_owq)
1858
return -ENOMEM;
1859
1860
rc = dsa_user_register_notifier();
1861
if (rc)
1862
goto register_notifier_fail;
1863
1864
dev_add_pack(&dsa_pack_type);
1865
1866
rc = rtnl_link_register(&dsa_link_ops);
1867
if (rc)
1868
goto netlink_register_fail;
1869
1870
dsa_register_stubs();
1871
1872
return 0;
1873
1874
netlink_register_fail:
1875
dsa_user_unregister_notifier();
1876
dev_remove_pack(&dsa_pack_type);
1877
register_notifier_fail:
1878
destroy_workqueue(dsa_owq);
1879
1880
return rc;
1881
}
1882
module_init(dsa_init_module);
1883
1884
static void __exit dsa_cleanup_module(void)
1885
{
1886
dsa_unregister_stubs();
1887
1888
rtnl_link_unregister(&dsa_link_ops);
1889
1890
dsa_user_unregister_notifier();
1891
dev_remove_pack(&dsa_pack_type);
1892
destroy_workqueue(dsa_owq);
1893
}
1894
module_exit(dsa_cleanup_module);
1895
1896
MODULE_AUTHOR("Lennert Buytenhek <[email protected]>");
1897
MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
1898
MODULE_LICENSE("GPL");
1899
MODULE_ALIAS("platform:dsa");
1900
MODULE_IMPORT_NS("NETDEV_INTERNAL");
1901
1902