Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/include/net/dsa.h
26282 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/*
3
* include/net/dsa.h - Driver for Distributed Switch Architecture switch chips
4
* Copyright (c) 2008-2009 Marvell Semiconductor
5
*/
6
7
#ifndef __LINUX_NET_DSA_H
8
#define __LINUX_NET_DSA_H
9
10
#include <linux/if.h>
11
#include <linux/if_ether.h>
12
#include <linux/list.h>
13
#include <linux/notifier.h>
14
#include <linux/timer.h>
15
#include <linux/workqueue.h>
16
#include <linux/of.h>
17
#include <linux/ethtool.h>
18
#include <linux/net_tstamp.h>
19
#include <linux/phy.h>
20
#include <linux/platform_data/dsa.h>
21
#include <linux/phylink.h>
22
#include <net/devlink.h>
23
#include <net/switchdev.h>
24
25
struct dsa_8021q_context;
26
struct tc_action;
27
28
#define DSA_TAG_PROTO_NONE_VALUE 0
29
#define DSA_TAG_PROTO_BRCM_VALUE 1
30
#define DSA_TAG_PROTO_BRCM_PREPEND_VALUE 2
31
#define DSA_TAG_PROTO_DSA_VALUE 3
32
#define DSA_TAG_PROTO_EDSA_VALUE 4
33
#define DSA_TAG_PROTO_GSWIP_VALUE 5
34
#define DSA_TAG_PROTO_KSZ9477_VALUE 6
35
#define DSA_TAG_PROTO_KSZ9893_VALUE 7
36
#define DSA_TAG_PROTO_LAN9303_VALUE 8
37
#define DSA_TAG_PROTO_MTK_VALUE 9
38
#define DSA_TAG_PROTO_QCA_VALUE 10
39
#define DSA_TAG_PROTO_TRAILER_VALUE 11
40
#define DSA_TAG_PROTO_8021Q_VALUE 12
41
#define DSA_TAG_PROTO_SJA1105_VALUE 13
42
#define DSA_TAG_PROTO_KSZ8795_VALUE 14
43
#define DSA_TAG_PROTO_OCELOT_VALUE 15
44
#define DSA_TAG_PROTO_AR9331_VALUE 16
45
#define DSA_TAG_PROTO_RTL4_A_VALUE 17
46
#define DSA_TAG_PROTO_HELLCREEK_VALUE 18
47
#define DSA_TAG_PROTO_XRS700X_VALUE 19
48
#define DSA_TAG_PROTO_OCELOT_8021Q_VALUE 20
49
#define DSA_TAG_PROTO_SEVILLE_VALUE 21
50
#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22
51
#define DSA_TAG_PROTO_SJA1110_VALUE 23
52
#define DSA_TAG_PROTO_RTL8_4_VALUE 24
53
#define DSA_TAG_PROTO_RTL8_4T_VALUE 25
54
#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26
55
#define DSA_TAG_PROTO_LAN937X_VALUE 27
56
#define DSA_TAG_PROTO_VSC73XX_8021Q_VALUE 28
57
#define DSA_TAG_PROTO_BRCM_LEGACY_FCS_VALUE 29
58
59
enum dsa_tag_protocol {
60
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
61
DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
62
DSA_TAG_PROTO_BRCM_LEGACY = DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
63
DSA_TAG_PROTO_BRCM_LEGACY_FCS = DSA_TAG_PROTO_BRCM_LEGACY_FCS_VALUE,
64
DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
65
DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
66
DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
67
DSA_TAG_PROTO_GSWIP = DSA_TAG_PROTO_GSWIP_VALUE,
68
DSA_TAG_PROTO_KSZ9477 = DSA_TAG_PROTO_KSZ9477_VALUE,
69
DSA_TAG_PROTO_KSZ9893 = DSA_TAG_PROTO_KSZ9893_VALUE,
70
DSA_TAG_PROTO_LAN9303 = DSA_TAG_PROTO_LAN9303_VALUE,
71
DSA_TAG_PROTO_MTK = DSA_TAG_PROTO_MTK_VALUE,
72
DSA_TAG_PROTO_QCA = DSA_TAG_PROTO_QCA_VALUE,
73
DSA_TAG_PROTO_TRAILER = DSA_TAG_PROTO_TRAILER_VALUE,
74
DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE,
75
DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
76
DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
77
DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
78
DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
79
DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE,
80
DSA_TAG_PROTO_HELLCREEK = DSA_TAG_PROTO_HELLCREEK_VALUE,
81
DSA_TAG_PROTO_XRS700X = DSA_TAG_PROTO_XRS700X_VALUE,
82
DSA_TAG_PROTO_OCELOT_8021Q = DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
83
DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE,
84
DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE,
85
DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE,
86
DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE,
87
DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
88
DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE,
89
DSA_TAG_PROTO_VSC73XX_8021Q = DSA_TAG_PROTO_VSC73XX_8021Q_VALUE,
90
};
91
92
struct dsa_switch;
93
94
struct dsa_device_ops {
95
struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
96
struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
97
void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
98
int *offset);
99
int (*connect)(struct dsa_switch *ds);
100
void (*disconnect)(struct dsa_switch *ds);
101
unsigned int needed_headroom;
102
unsigned int needed_tailroom;
103
const char *name;
104
enum dsa_tag_protocol proto;
105
/* Some tagging protocols either mangle or shift the destination MAC
106
* address, in which case the DSA conduit would drop packets on ingress
107
* if what it understands out of the destination MAC address is not in
108
* its RX filter.
109
*/
110
bool promisc_on_conduit;
111
};
112
113
struct dsa_lag {
114
struct net_device *dev;
115
unsigned int id;
116
struct mutex fdb_lock;
117
struct list_head fdbs;
118
refcount_t refcount;
119
};
120
121
struct dsa_switch_tree {
122
struct list_head list;
123
124
/* List of switch ports */
125
struct list_head ports;
126
127
/* Notifier chain for switch-wide events */
128
struct raw_notifier_head nh;
129
130
/* Tree identifier */
131
unsigned int index;
132
133
/* Number of switches attached to this tree */
134
struct kref refcount;
135
136
/* Maps offloaded LAG netdevs to a zero-based linear ID for
137
* drivers that need it.
138
*/
139
struct dsa_lag **lags;
140
141
/* Tagging protocol operations */
142
const struct dsa_device_ops *tag_ops;
143
144
/* Default tagging protocol preferred by the switches in this
145
* tree.
146
*/
147
enum dsa_tag_protocol default_proto;
148
149
/* Has this tree been applied to the hardware? */
150
bool setup;
151
152
/*
153
* Configuration data for the platform device that owns
154
* this dsa switch tree instance.
155
*/
156
struct dsa_platform_data *pd;
157
158
/* List of DSA links composing the routing table */
159
struct list_head rtable;
160
161
/* Length of "lags" array */
162
unsigned int lags_len;
163
164
/* Track the largest switch index within a tree */
165
unsigned int last_switch;
166
};
167
168
/* LAG IDs are one-based, the dst->lags array is zero-based */
169
#define dsa_lags_foreach_id(_id, _dst) \
170
for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++) \
171
if ((_dst)->lags[(_id) - 1])
172
173
#define dsa_lag_foreach_port(_dp, _dst, _lag) \
174
list_for_each_entry((_dp), &(_dst)->ports, list) \
175
if (dsa_port_offloads_lag((_dp), (_lag)))
176
177
#define dsa_hsr_foreach_port(_dp, _ds, _hsr) \
178
list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
179
if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
180
181
static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst,
182
unsigned int id)
183
{
184
/* DSA LAG IDs are one-based, dst->lags is zero-based */
185
return dst->lags[id - 1];
186
}
187
188
static inline int dsa_lag_id(struct dsa_switch_tree *dst,
189
struct net_device *lag_dev)
190
{
191
unsigned int id;
192
193
dsa_lags_foreach_id(id, dst) {
194
struct dsa_lag *lag = dsa_lag_by_id(dst, id);
195
196
if (lag->dev == lag_dev)
197
return lag->id;
198
}
199
200
return -ENODEV;
201
}
202
203
/* TC matchall action types */
204
enum dsa_port_mall_action_type {
205
DSA_PORT_MALL_MIRROR,
206
DSA_PORT_MALL_POLICER,
207
};
208
209
/* TC mirroring entry */
210
struct dsa_mall_mirror_tc_entry {
211
u8 to_local_port;
212
bool ingress;
213
};
214
215
/* TC port policer entry */
216
struct dsa_mall_policer_tc_entry {
217
u32 burst;
218
u64 rate_bytes_per_sec;
219
};
220
221
/* TC matchall entry */
222
struct dsa_mall_tc_entry {
223
struct list_head list;
224
unsigned long cookie;
225
enum dsa_port_mall_action_type type;
226
union {
227
struct dsa_mall_mirror_tc_entry mirror;
228
struct dsa_mall_policer_tc_entry policer;
229
};
230
};
231
232
struct dsa_bridge {
233
struct net_device *dev;
234
unsigned int num;
235
bool tx_fwd_offload;
236
refcount_t refcount;
237
};
238
239
struct dsa_port {
240
/* A CPU port is physically connected to a conduit device. A user port
241
* exposes a network device to user-space, called 'user' here.
242
*/
243
union {
244
struct net_device *conduit;
245
struct net_device *user;
246
};
247
248
/* Copy of the tagging protocol operations, for quicker access
249
* in the data path. Valid only for the CPU ports.
250
*/
251
const struct dsa_device_ops *tag_ops;
252
253
/* Copies for faster access in conduit receive hot path */
254
struct dsa_switch_tree *dst;
255
struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
256
257
struct dsa_switch *ds;
258
259
unsigned int index;
260
261
enum {
262
DSA_PORT_TYPE_UNUSED = 0,
263
DSA_PORT_TYPE_CPU,
264
DSA_PORT_TYPE_DSA,
265
DSA_PORT_TYPE_USER,
266
} type;
267
268
const char *name;
269
struct dsa_port *cpu_dp;
270
u8 mac[ETH_ALEN];
271
272
u8 stp_state;
273
274
/* Warning: the following bit fields are not atomic, and updating them
275
* can only be done from code paths where concurrency is not possible
276
* (probe time or under rtnl_lock).
277
*/
278
u8 vlan_filtering:1;
279
280
/* Managed by DSA on user ports and by drivers on CPU and DSA ports */
281
u8 learning:1;
282
283
u8 lag_tx_enabled:1;
284
285
/* conduit state bits, valid only on CPU ports */
286
u8 conduit_admin_up:1;
287
u8 conduit_oper_up:1;
288
289
/* Valid only on user ports */
290
u8 cpu_port_in_lag:1;
291
292
u8 setup:1;
293
294
struct device_node *dn;
295
unsigned int ageing_time;
296
297
struct dsa_bridge *bridge;
298
struct devlink_port devlink_port;
299
struct phylink *pl;
300
struct phylink_config pl_config;
301
struct dsa_lag *lag;
302
struct net_device *hsr_dev;
303
304
struct list_head list;
305
306
/*
307
* Original copy of the conduit netdev ethtool_ops
308
*/
309
const struct ethtool_ops *orig_ethtool_ops;
310
311
/* List of MAC addresses that must be forwarded on this port.
312
* These are only valid on CPU ports and DSA links.
313
*/
314
struct mutex addr_lists_lock;
315
struct list_head fdbs;
316
struct list_head mdbs;
317
318
struct mutex vlans_lock;
319
union {
320
/* List of VLANs that CPU and DSA ports are members of.
321
* Access to this is serialized by the sleepable @vlans_lock.
322
*/
323
struct list_head vlans;
324
/* List of VLANs that user ports are members of.
325
* Access to this is serialized by netif_addr_lock_bh().
326
*/
327
struct list_head user_vlans;
328
};
329
};
330
331
static inline struct dsa_port *
332
dsa_phylink_to_port(struct phylink_config *config)
333
{
334
return container_of(config, struct dsa_port, pl_config);
335
}
336
337
/* TODO: ideally DSA ports would have a single dp->link_dp member,
338
* and no dst->rtable nor this struct dsa_link would be needed,
339
* but this would require some more complex tree walking,
340
* so keep it stupid at the moment and list them all.
341
*/
342
struct dsa_link {
343
struct dsa_port *dp;
344
struct dsa_port *link_dp;
345
struct list_head list;
346
};
347
348
enum dsa_db_type {
349
DSA_DB_PORT,
350
DSA_DB_LAG,
351
DSA_DB_BRIDGE,
352
};
353
354
struct dsa_db {
355
enum dsa_db_type type;
356
357
union {
358
const struct dsa_port *dp;
359
struct dsa_lag lag;
360
struct dsa_bridge bridge;
361
};
362
};
363
364
struct dsa_mac_addr {
365
unsigned char addr[ETH_ALEN];
366
u16 vid;
367
refcount_t refcount;
368
struct list_head list;
369
struct dsa_db db;
370
};
371
372
struct dsa_vlan {
373
u16 vid;
374
refcount_t refcount;
375
struct list_head list;
376
};
377
378
struct dsa_switch {
379
struct device *dev;
380
381
/*
382
* Parent switch tree, and switch index.
383
*/
384
struct dsa_switch_tree *dst;
385
unsigned int index;
386
387
/* Warning: the following bit fields are not atomic, and updating them
388
* can only be done from code paths where concurrency is not possible
389
* (probe time or under rtnl_lock).
390
*/
391
u32 setup:1;
392
393
/* Disallow bridge core from requesting different VLAN awareness
394
* settings on ports if not hardware-supported
395
*/
396
u32 vlan_filtering_is_global:1;
397
398
/* Keep VLAN filtering enabled on ports not offloading any upper */
399
u32 needs_standalone_vlan_filtering:1;
400
401
/* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges
402
* that have vlan_filtering=0. All drivers should ideally set this (and
403
* then the option would get removed), but it is unknown whether this
404
* would break things or not.
405
*/
406
u32 configure_vlan_while_not_filtering:1;
407
408
/* Pop the default_pvid of VLAN-unaware bridge ports from tagged frames.
409
* DEPRECATED: Do NOT set this field in new drivers. Instead look at
410
* the dsa_software_vlan_untag() comments.
411
*/
412
u32 untag_bridge_pvid:1;
413
/* Pop the default_pvid of VLAN-aware bridge ports from tagged frames.
414
* Useful if the switch cannot preserve the VLAN tag as seen on the
415
* wire for user port ingress, and chooses to send all frames as
416
* VLAN-tagged to the CPU, including those which were originally
417
* untagged.
418
*/
419
u32 untag_vlan_aware_bridge_pvid:1;
420
421
/* Let DSA manage the FDB entries towards the
422
* CPU, based on the software bridge database.
423
*/
424
u32 assisted_learning_on_cpu_port:1;
425
426
/* In case vlan_filtering_is_global is set, the VLAN awareness state
427
* should be retrieved from here and not from the per-port settings.
428
*/
429
u32 vlan_filtering:1;
430
431
/* For switches that only have the MRU configurable. To ensure the
432
* configured MTU is not exceeded, normalization of MRU on all bridged
433
* interfaces is needed.
434
*/
435
u32 mtu_enforcement_ingress:1;
436
437
/* Drivers that isolate the FDBs of multiple bridges must set this
438
* to true to receive the bridge as an argument in .port_fdb_{add,del}
439
* and .port_mdb_{add,del}. Otherwise, the bridge.num will always be
440
* passed as zero.
441
*/
442
u32 fdb_isolation:1;
443
444
/* Drivers that have global DSCP mapping settings must set this to
445
* true to automatically apply the settings to all ports.
446
*/
447
u32 dscp_prio_mapping_is_global:1;
448
449
/* Listener for switch fabric events */
450
struct notifier_block nb;
451
452
/*
453
* Give the switch driver somewhere to hang its private data
454
* structure.
455
*/
456
void *priv;
457
458
void *tagger_data;
459
460
/*
461
* Configuration data for this switch.
462
*/
463
struct dsa_chip_data *cd;
464
465
/*
466
* The switch operations.
467
*/
468
const struct dsa_switch_ops *ops;
469
470
/*
471
* Allow a DSA switch driver to override the phylink MAC ops
472
*/
473
const struct phylink_mac_ops *phylink_mac_ops;
474
475
/*
476
* User mii_bus and devices for the individual ports.
477
*/
478
u32 phys_mii_mask;
479
struct mii_bus *user_mii_bus;
480
481
/* Ageing Time limits in msecs */
482
unsigned int ageing_time_min;
483
unsigned int ageing_time_max;
484
485
/* Storage for drivers using tag_8021q */
486
struct dsa_8021q_context *tag_8021q_ctx;
487
488
/* devlink used to represent this switch device */
489
struct devlink *devlink;
490
491
/* Number of switch port queues */
492
unsigned int num_tx_queues;
493
494
/* Drivers that benefit from having an ID associated with each
495
* offloaded LAG should set this to the maximum number of
496
* supported IDs. DSA will then maintain a mapping of _at
497
* least_ these many IDs, accessible to drivers via
498
* dsa_lag_id().
499
*/
500
unsigned int num_lag_ids;
501
502
/* Drivers that support bridge forwarding offload or FDB isolation
503
* should set this to the maximum number of bridges spanning the same
504
* switch tree (or all trees, in the case of cross-tree bridging
505
* support) that can be offloaded.
506
*/
507
unsigned int max_num_bridges;
508
509
unsigned int num_ports;
510
};
511
512
static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
513
{
514
struct dsa_switch_tree *dst = ds->dst;
515
struct dsa_port *dp;
516
517
list_for_each_entry(dp, &dst->ports, list)
518
if (dp->ds == ds && dp->index == p)
519
return dp;
520
521
return NULL;
522
}
523
524
static inline bool dsa_port_is_dsa(struct dsa_port *port)
525
{
526
return port->type == DSA_PORT_TYPE_DSA;
527
}
528
529
static inline bool dsa_port_is_cpu(struct dsa_port *port)
530
{
531
return port->type == DSA_PORT_TYPE_CPU;
532
}
533
534
static inline bool dsa_port_is_user(struct dsa_port *dp)
535
{
536
return dp->type == DSA_PORT_TYPE_USER;
537
}
538
539
static inline bool dsa_port_is_unused(struct dsa_port *dp)
540
{
541
return dp->type == DSA_PORT_TYPE_UNUSED;
542
}
543
544
static inline bool dsa_port_conduit_is_operational(struct dsa_port *dp)
545
{
546
return dsa_port_is_cpu(dp) && dp->conduit_admin_up &&
547
dp->conduit_oper_up;
548
}
549
550
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
551
{
552
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
553
}
554
555
static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
556
{
557
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU;
558
}
559
560
static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
561
{
562
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA;
563
}
564
565
static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
566
{
567
return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
568
}
569
570
#define dsa_tree_for_each_user_port(_dp, _dst) \
571
list_for_each_entry((_dp), &(_dst)->ports, list) \
572
if (dsa_port_is_user((_dp)))
573
574
#define dsa_tree_for_each_user_port_continue_reverse(_dp, _dst) \
575
list_for_each_entry_continue_reverse((_dp), &(_dst)->ports, list) \
576
if (dsa_port_is_user((_dp)))
577
578
#define dsa_tree_for_each_cpu_port(_dp, _dst) \
579
list_for_each_entry((_dp), &(_dst)->ports, list) \
580
if (dsa_port_is_cpu((_dp)))
581
582
#define dsa_switch_for_each_port(_dp, _ds) \
583
list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
584
if ((_dp)->ds == (_ds))
585
586
#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
587
list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
588
if ((_dp)->ds == (_ds))
589
590
#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
591
list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
592
if ((_dp)->ds == (_ds))
593
594
#define dsa_switch_for_each_available_port(_dp, _ds) \
595
dsa_switch_for_each_port((_dp), (_ds)) \
596
if (!dsa_port_is_unused((_dp)))
597
598
#define dsa_switch_for_each_user_port(_dp, _ds) \
599
dsa_switch_for_each_port((_dp), (_ds)) \
600
if (dsa_port_is_user((_dp)))
601
602
#define dsa_switch_for_each_user_port_continue_reverse(_dp, _ds) \
603
dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \
604
if (dsa_port_is_user((_dp)))
605
606
#define dsa_switch_for_each_cpu_port(_dp, _ds) \
607
dsa_switch_for_each_port((_dp), (_ds)) \
608
if (dsa_port_is_cpu((_dp)))
609
610
#define dsa_switch_for_each_cpu_port_continue_reverse(_dp, _ds) \
611
dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \
612
if (dsa_port_is_cpu((_dp)))
613
614
static inline u32 dsa_user_ports(struct dsa_switch *ds)
615
{
616
struct dsa_port *dp;
617
u32 mask = 0;
618
619
dsa_switch_for_each_user_port(dp, ds)
620
mask |= BIT(dp->index);
621
622
return mask;
623
}
624
625
static inline u32 dsa_cpu_ports(struct dsa_switch *ds)
626
{
627
struct dsa_port *cpu_dp;
628
u32 mask = 0;
629
630
dsa_switch_for_each_cpu_port(cpu_dp, ds)
631
mask |= BIT(cpu_dp->index);
632
633
return mask;
634
}
635
636
/* Return the local port used to reach an arbitrary switch device */
637
static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
638
{
639
struct dsa_switch_tree *dst = ds->dst;
640
struct dsa_link *dl;
641
642
list_for_each_entry(dl, &dst->rtable, list)
643
if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
644
return dl->dp->index;
645
646
return ds->num_ports;
647
}
648
649
/* Return the local port used to reach an arbitrary switch port */
650
static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
651
int port)
652
{
653
if (device == ds->index)
654
return port;
655
else
656
return dsa_routing_port(ds, device);
657
}
658
659
/* Return the local port used to reach the dedicated CPU port */
660
static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
661
{
662
const struct dsa_port *dp = dsa_to_port(ds, port);
663
const struct dsa_port *cpu_dp = dp->cpu_dp;
664
665
if (!cpu_dp)
666
return port;
667
668
return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
669
}
670
671
/* Return true if this is the local port used to reach the CPU port */
672
static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port)
673
{
674
if (dsa_is_unused_port(ds, port))
675
return false;
676
677
return port == dsa_upstream_port(ds, port);
678
}
679
680
/* Return true if this is a DSA port leading away from the CPU */
681
static inline bool dsa_is_downstream_port(struct dsa_switch *ds, int port)
682
{
683
return dsa_is_dsa_port(ds, port) && !dsa_is_upstream_port(ds, port);
684
}
685
686
/* Return the local port used to reach the CPU port */
687
static inline unsigned int dsa_switch_upstream_port(struct dsa_switch *ds)
688
{
689
struct dsa_port *dp;
690
691
dsa_switch_for_each_available_port(dp, ds) {
692
return dsa_upstream_port(ds, dp->index);
693
}
694
695
return ds->num_ports;
696
}
697
698
/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning
699
* that the routing port from @downstream_ds to @upstream_ds is also the port
700
* which @downstream_ds uses to reach its dedicated CPU.
701
*/
702
static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds,
703
struct dsa_switch *downstream_ds)
704
{
705
int routing_port;
706
707
if (upstream_ds == downstream_ds)
708
return true;
709
710
routing_port = dsa_routing_port(downstream_ds, upstream_ds->index);
711
712
return dsa_is_upstream_port(downstream_ds, routing_port);
713
}
714
715
static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
716
{
717
const struct dsa_switch *ds = dp->ds;
718
719
if (ds->vlan_filtering_is_global)
720
return ds->vlan_filtering;
721
else
722
return dp->vlan_filtering;
723
}
724
725
static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp)
726
{
727
return dp->lag ? dp->lag->id : 0;
728
}
729
730
static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp)
731
{
732
return dp->lag ? dp->lag->dev : NULL;
733
}
734
735
static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
736
const struct dsa_lag *lag)
737
{
738
return dsa_port_lag_dev_get(dp) == lag->dev;
739
}
740
741
static inline struct net_device *dsa_port_to_conduit(const struct dsa_port *dp)
742
{
743
if (dp->cpu_port_in_lag)
744
return dsa_port_lag_dev_get(dp->cpu_dp);
745
746
return dp->cpu_dp->conduit;
747
}
748
749
static inline
750
struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
751
{
752
if (!dp->bridge)
753
return NULL;
754
755
if (dp->lag)
756
return dp->lag->dev;
757
else if (dp->hsr_dev)
758
return dp->hsr_dev;
759
760
return dp->user;
761
}
762
763
static inline struct net_device *
764
dsa_port_bridge_dev_get(const struct dsa_port *dp)
765
{
766
return dp->bridge ? dp->bridge->dev : NULL;
767
}
768
769
static inline unsigned int dsa_port_bridge_num_get(struct dsa_port *dp)
770
{
771
return dp->bridge ? dp->bridge->num : 0;
772
}
773
774
static inline bool dsa_port_bridge_same(const struct dsa_port *a,
775
const struct dsa_port *b)
776
{
777
struct net_device *br_a = dsa_port_bridge_dev_get(a);
778
struct net_device *br_b = dsa_port_bridge_dev_get(b);
779
780
/* Standalone ports are not in the same bridge with one another */
781
return (!br_a || !br_b) ? false : (br_a == br_b);
782
}
783
784
static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
785
const struct net_device *dev)
786
{
787
return dsa_port_to_bridge_port(dp) == dev;
788
}
789
790
static inline bool
791
dsa_port_offloads_bridge_dev(struct dsa_port *dp,
792
const struct net_device *bridge_dev)
793
{
794
/* DSA ports connected to a bridge, and event was emitted
795
* for the bridge.
796
*/
797
return dsa_port_bridge_dev_get(dp) == bridge_dev;
798
}
799
800
static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
801
const struct dsa_bridge *bridge)
802
{
803
return dsa_port_bridge_dev_get(dp) == bridge->dev;
804
}
805
806
/* Returns true if any port of this tree offloads the given net_device */
807
static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
808
const struct net_device *dev)
809
{
810
struct dsa_port *dp;
811
812
list_for_each_entry(dp, &dst->ports, list)
813
if (dsa_port_offloads_bridge_port(dp, dev))
814
return true;
815
816
return false;
817
}
818
819
/* Returns true if any port of this tree offloads the given bridge */
820
static inline bool
821
dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
822
const struct net_device *bridge_dev)
823
{
824
struct dsa_port *dp;
825
826
list_for_each_entry(dp, &dst->ports, list)
827
if (dsa_port_offloads_bridge_dev(dp, bridge_dev))
828
return true;
829
830
return false;
831
}
832
833
static inline bool dsa_port_tree_same(const struct dsa_port *a,
834
const struct dsa_port *b)
835
{
836
return a->ds->dst == b->ds->dst;
837
}
838
839
typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
840
bool is_static, void *data);
841
struct dsa_switch_ops {
842
/*
843
* Tagging protocol helpers called for the CPU ports and DSA links.
844
* @get_tag_protocol retrieves the initial tagging protocol and is
845
* mandatory. Switches which can operate using multiple tagging
846
* protocols should implement @change_tag_protocol and report in
847
* @get_tag_protocol the tagger in current use.
848
*/
849
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
850
int port,
851
enum dsa_tag_protocol mprot);
852
int (*change_tag_protocol)(struct dsa_switch *ds,
853
enum dsa_tag_protocol proto);
854
/*
855
* Method for switch drivers to connect to the tagging protocol driver
856
* in current use. The switch driver can provide handlers for certain
857
* types of packets for switch management.
858
*/
859
int (*connect_tag_protocol)(struct dsa_switch *ds,
860
enum dsa_tag_protocol proto);
861
862
int (*port_change_conduit)(struct dsa_switch *ds, int port,
863
struct net_device *conduit,
864
struct netlink_ext_ack *extack);
865
866
/* Optional switch-wide initialization and destruction methods */
867
int (*setup)(struct dsa_switch *ds);
868
void (*teardown)(struct dsa_switch *ds);
869
870
/* Per-port initialization and destruction methods. Mandatory if the
871
* driver registers devlink port regions, optional otherwise.
872
*/
873
int (*port_setup)(struct dsa_switch *ds, int port);
874
void (*port_teardown)(struct dsa_switch *ds, int port);
875
876
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
877
878
/*
879
* Access to the switch's PHY registers.
880
*/
881
int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
882
int (*phy_write)(struct dsa_switch *ds, int port,
883
int regnum, u16 val);
884
885
/*
886
* PHYLINK integration
887
*/
888
void (*phylink_get_caps)(struct dsa_switch *ds, int port,
889
struct phylink_config *config);
890
void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
891
struct phylink_link_state *state);
892
/*
893
* Port statistics counters.
894
*/
895
void (*get_strings)(struct dsa_switch *ds, int port,
896
u32 stringset, uint8_t *data);
897
void (*get_ethtool_stats)(struct dsa_switch *ds,
898
int port, uint64_t *data);
899
int (*get_sset_count)(struct dsa_switch *ds, int port, int sset);
900
void (*get_ethtool_phy_stats)(struct dsa_switch *ds,
901
int port, uint64_t *data);
902
void (*get_eth_phy_stats)(struct dsa_switch *ds, int port,
903
struct ethtool_eth_phy_stats *phy_stats);
904
void (*get_eth_mac_stats)(struct dsa_switch *ds, int port,
905
struct ethtool_eth_mac_stats *mac_stats);
906
void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port,
907
struct ethtool_eth_ctrl_stats *ctrl_stats);
908
void (*get_rmon_stats)(struct dsa_switch *ds, int port,
909
struct ethtool_rmon_stats *rmon_stats,
910
const struct ethtool_rmon_hist_range **ranges);
911
void (*get_ts_stats)(struct dsa_switch *ds, int port,
912
struct ethtool_ts_stats *ts_stats);
913
void (*get_stats64)(struct dsa_switch *ds, int port,
914
struct rtnl_link_stats64 *s);
915
void (*get_pause_stats)(struct dsa_switch *ds, int port,
916
struct ethtool_pause_stats *pause_stats);
917
void (*self_test)(struct dsa_switch *ds, int port,
918
struct ethtool_test *etest, u64 *data);
919
920
/*
921
* ethtool Wake-on-LAN
922
*/
923
void (*get_wol)(struct dsa_switch *ds, int port,
924
struct ethtool_wolinfo *w);
925
int (*set_wol)(struct dsa_switch *ds, int port,
926
struct ethtool_wolinfo *w);
927
928
/*
929
* ethtool timestamp info
930
*/
931
int (*get_ts_info)(struct dsa_switch *ds, int port,
932
struct kernel_ethtool_ts_info *ts);
933
934
/*
935
* ethtool MAC merge layer
936
*/
937
int (*get_mm)(struct dsa_switch *ds, int port,
938
struct ethtool_mm_state *state);
939
int (*set_mm)(struct dsa_switch *ds, int port,
940
struct ethtool_mm_cfg *cfg,
941
struct netlink_ext_ack *extack);
942
void (*get_mm_stats)(struct dsa_switch *ds, int port,
943
struct ethtool_mm_stats *stats);
944
945
/*
946
* DCB ops
947
*/
948
int (*port_get_default_prio)(struct dsa_switch *ds, int port);
949
int (*port_set_default_prio)(struct dsa_switch *ds, int port,
950
u8 prio);
951
int (*port_get_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp);
952
int (*port_add_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
953
u8 prio);
954
int (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
955
u8 prio);
956
int (*port_set_apptrust)(struct dsa_switch *ds, int port,
957
const u8 *sel, int nsel);
958
int (*port_get_apptrust)(struct dsa_switch *ds, int port, u8 *sel,
959
int *nsel);
960
961
/*
962
* Suspend and resume
963
*/
964
int (*suspend)(struct dsa_switch *ds);
965
int (*resume)(struct dsa_switch *ds);
966
967
/*
968
* Port enable/disable
969
*/
970
int (*port_enable)(struct dsa_switch *ds, int port,
971
struct phy_device *phy);
972
void (*port_disable)(struct dsa_switch *ds, int port);
973
974
975
/*
976
* Notification for MAC address changes on user ports. Drivers can
977
* currently only veto operations. They should not use the method to
978
* program the hardware, since the operation is not rolled back in case
979
* of other errors.
980
*/
981
int (*port_set_mac_address)(struct dsa_switch *ds, int port,
982
const unsigned char *addr);
983
984
/*
985
* Compatibility between device trees defining multiple CPU ports and
986
* drivers which are not OK to use by default the numerically smallest
987
* CPU port of a switch for its local ports. This can return NULL,
988
* meaning "don't know/don't care".
989
*/
990
struct dsa_port *(*preferred_default_local_cpu_port)(struct dsa_switch *ds);
991
992
/*
993
* Port's MAC EEE settings
994
*/
995
bool (*support_eee)(struct dsa_switch *ds, int port);
996
int (*set_mac_eee)(struct dsa_switch *ds, int port,
997
struct ethtool_keee *e);
998
999
/* EEPROM access */
1000
int (*get_eeprom_len)(struct dsa_switch *ds);
1001
int (*get_eeprom)(struct dsa_switch *ds,
1002
struct ethtool_eeprom *eeprom, u8 *data);
1003
int (*set_eeprom)(struct dsa_switch *ds,
1004
struct ethtool_eeprom *eeprom, u8 *data);
1005
1006
/*
1007
* Register access.
1008
*/
1009
int (*get_regs_len)(struct dsa_switch *ds, int port);
1010
void (*get_regs)(struct dsa_switch *ds, int port,
1011
struct ethtool_regs *regs, void *p);
1012
1013
/*
1014
* Upper device tracking.
1015
*/
1016
int (*port_prechangeupper)(struct dsa_switch *ds, int port,
1017
struct netdev_notifier_changeupper_info *info);
1018
1019
/*
1020
* Bridge integration
1021
*/
1022
int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
1023
int (*port_bridge_join)(struct dsa_switch *ds, int port,
1024
struct dsa_bridge bridge,
1025
bool *tx_fwd_offload,
1026
struct netlink_ext_ack *extack);
1027
void (*port_bridge_leave)(struct dsa_switch *ds, int port,
1028
struct dsa_bridge bridge);
1029
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
1030
u8 state);
1031
int (*port_mst_state_set)(struct dsa_switch *ds, int port,
1032
const struct switchdev_mst_state *state);
1033
void (*port_fast_age)(struct dsa_switch *ds, int port);
1034
int (*port_vlan_fast_age)(struct dsa_switch *ds, int port, u16 vid);
1035
int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
1036
struct switchdev_brport_flags flags,
1037
struct netlink_ext_ack *extack);
1038
int (*port_bridge_flags)(struct dsa_switch *ds, int port,
1039
struct switchdev_brport_flags flags,
1040
struct netlink_ext_ack *extack);
1041
void (*port_set_host_flood)(struct dsa_switch *ds, int port,
1042
bool uc, bool mc);
1043
1044
/*
1045
* VLAN support
1046
*/
1047
int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
1048
bool vlan_filtering,
1049
struct netlink_ext_ack *extack);
1050
int (*port_vlan_add)(struct dsa_switch *ds, int port,
1051
const struct switchdev_obj_port_vlan *vlan,
1052
struct netlink_ext_ack *extack);
1053
int (*port_vlan_del)(struct dsa_switch *ds, int port,
1054
const struct switchdev_obj_port_vlan *vlan);
1055
int (*vlan_msti_set)(struct dsa_switch *ds, struct dsa_bridge bridge,
1056
const struct switchdev_vlan_msti *msti);
1057
1058
/*
1059
* Forwarding database
1060
*/
1061
int (*port_fdb_add)(struct dsa_switch *ds, int port,
1062
const unsigned char *addr, u16 vid,
1063
struct dsa_db db);
1064
int (*port_fdb_del)(struct dsa_switch *ds, int port,
1065
const unsigned char *addr, u16 vid,
1066
struct dsa_db db);
1067
int (*port_fdb_dump)(struct dsa_switch *ds, int port,
1068
dsa_fdb_dump_cb_t *cb, void *data);
1069
int (*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag,
1070
const unsigned char *addr, u16 vid,
1071
struct dsa_db db);
1072
int (*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag,
1073
const unsigned char *addr, u16 vid,
1074
struct dsa_db db);
1075
1076
/*
1077
* Multicast database
1078
*/
1079
int (*port_mdb_add)(struct dsa_switch *ds, int port,
1080
const struct switchdev_obj_port_mdb *mdb,
1081
struct dsa_db db);
1082
int (*port_mdb_del)(struct dsa_switch *ds, int port,
1083
const struct switchdev_obj_port_mdb *mdb,
1084
struct dsa_db db);
1085
/*
1086
* RXNFC
1087
*/
1088
int (*get_rxnfc)(struct dsa_switch *ds, int port,
1089
struct ethtool_rxnfc *nfc, u32 *rule_locs);
1090
int (*set_rxnfc)(struct dsa_switch *ds, int port,
1091
struct ethtool_rxnfc *nfc);
1092
1093
/*
1094
* TC integration
1095
*/
1096
int (*cls_flower_add)(struct dsa_switch *ds, int port,
1097
struct flow_cls_offload *cls, bool ingress);
1098
int (*cls_flower_del)(struct dsa_switch *ds, int port,
1099
struct flow_cls_offload *cls, bool ingress);
1100
int (*cls_flower_stats)(struct dsa_switch *ds, int port,
1101
struct flow_cls_offload *cls, bool ingress);
1102
int (*port_mirror_add)(struct dsa_switch *ds, int port,
1103
struct dsa_mall_mirror_tc_entry *mirror,
1104
bool ingress, struct netlink_ext_ack *extack);
1105
void (*port_mirror_del)(struct dsa_switch *ds, int port,
1106
struct dsa_mall_mirror_tc_entry *mirror);
1107
int (*port_policer_add)(struct dsa_switch *ds, int port,
1108
struct dsa_mall_policer_tc_entry *policer);
1109
void (*port_policer_del)(struct dsa_switch *ds, int port);
1110
int (*port_setup_tc)(struct dsa_switch *ds, int port,
1111
enum tc_setup_type type, void *type_data);
1112
1113
/*
1114
* Cross-chip operations
1115
*/
1116
int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index,
1117
int sw_index, int port,
1118
struct dsa_bridge bridge,
1119
struct netlink_ext_ack *extack);
1120
void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
1121
int sw_index, int port,
1122
struct dsa_bridge bridge);
1123
int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
1124
int port);
1125
int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
1126
int port, struct dsa_lag lag,
1127
struct netdev_lag_upper_info *info,
1128
struct netlink_ext_ack *extack);
1129
int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
1130
int port, struct dsa_lag lag);
1131
1132
/*
1133
* PTP functionality
1134
*/
1135
int (*port_hwtstamp_get)(struct dsa_switch *ds, int port,
1136
struct kernel_hwtstamp_config *config);
1137
int (*port_hwtstamp_set)(struct dsa_switch *ds, int port,
1138
struct kernel_hwtstamp_config *config,
1139
struct netlink_ext_ack *extack);
1140
void (*port_txtstamp)(struct dsa_switch *ds, int port,
1141
struct sk_buff *skb);
1142
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
1143
struct sk_buff *skb, unsigned int type);
1144
1145
/* Devlink parameters, etc */
1146
int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
1147
struct devlink_param_gset_ctx *ctx);
1148
int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
1149
struct devlink_param_gset_ctx *ctx);
1150
int (*devlink_info_get)(struct dsa_switch *ds,
1151
struct devlink_info_req *req,
1152
struct netlink_ext_ack *extack);
1153
int (*devlink_sb_pool_get)(struct dsa_switch *ds,
1154
unsigned int sb_index, u16 pool_index,
1155
struct devlink_sb_pool_info *pool_info);
1156
int (*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index,
1157
u16 pool_index, u32 size,
1158
enum devlink_sb_threshold_type threshold_type,
1159
struct netlink_ext_ack *extack);
1160
int (*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port,
1161
unsigned int sb_index, u16 pool_index,
1162
u32 *p_threshold);
1163
int (*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port,
1164
unsigned int sb_index, u16 pool_index,
1165
u32 threshold,
1166
struct netlink_ext_ack *extack);
1167
int (*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port,
1168
unsigned int sb_index, u16 tc_index,
1169
enum devlink_sb_pool_type pool_type,
1170
u16 *p_pool_index, u32 *p_threshold);
1171
int (*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port,
1172
unsigned int sb_index, u16 tc_index,
1173
enum devlink_sb_pool_type pool_type,
1174
u16 pool_index, u32 threshold,
1175
struct netlink_ext_ack *extack);
1176
int (*devlink_sb_occ_snapshot)(struct dsa_switch *ds,
1177
unsigned int sb_index);
1178
int (*devlink_sb_occ_max_clear)(struct dsa_switch *ds,
1179
unsigned int sb_index);
1180
int (*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port,
1181
unsigned int sb_index, u16 pool_index,
1182
u32 *p_cur, u32 *p_max);
1183
int (*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port,
1184
unsigned int sb_index, u16 tc_index,
1185
enum devlink_sb_pool_type pool_type,
1186
u32 *p_cur, u32 *p_max);
1187
1188
/*
1189
* MTU change functionality. Switches can also adjust their MRU through
1190
* this method. By MTU, one understands the SDU (L2 payload) length.
1191
* If the switch needs to account for the DSA tag on the CPU port, this
1192
* method needs to do so privately.
1193
*/
1194
int (*port_change_mtu)(struct dsa_switch *ds, int port,
1195
int new_mtu);
1196
int (*port_max_mtu)(struct dsa_switch *ds, int port);
1197
1198
/*
1199
* LAG integration
1200
*/
1201
int (*port_lag_change)(struct dsa_switch *ds, int port);
1202
int (*port_lag_join)(struct dsa_switch *ds, int port,
1203
struct dsa_lag lag,
1204
struct netdev_lag_upper_info *info,
1205
struct netlink_ext_ack *extack);
1206
int (*port_lag_leave)(struct dsa_switch *ds, int port,
1207
struct dsa_lag lag);
1208
1209
/*
1210
* HSR integration
1211
*/
1212
int (*port_hsr_join)(struct dsa_switch *ds, int port,
1213
struct net_device *hsr,
1214
struct netlink_ext_ack *extack);
1215
int (*port_hsr_leave)(struct dsa_switch *ds, int port,
1216
struct net_device *hsr);
1217
1218
/*
1219
* MRP integration
1220
*/
1221
int (*port_mrp_add)(struct dsa_switch *ds, int port,
1222
const struct switchdev_obj_mrp *mrp);
1223
int (*port_mrp_del)(struct dsa_switch *ds, int port,
1224
const struct switchdev_obj_mrp *mrp);
1225
int (*port_mrp_add_ring_role)(struct dsa_switch *ds, int port,
1226
const struct switchdev_obj_ring_role_mrp *mrp);
1227
int (*port_mrp_del_ring_role)(struct dsa_switch *ds, int port,
1228
const struct switchdev_obj_ring_role_mrp *mrp);
1229
1230
/*
1231
* tag_8021q operations
1232
*/
1233
int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
1234
u16 flags);
1235
int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
1236
1237
/*
1238
* DSA conduit tracking operations
1239
*/
1240
void (*conduit_state_change)(struct dsa_switch *ds,
1241
const struct net_device *conduit,
1242
bool operational);
1243
};
1244
1245
#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
1246
DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, \
1247
dsa_devlink_param_get, dsa_devlink_param_set, NULL)
1248
1249
int dsa_devlink_param_get(struct devlink *dl, u32 id,
1250
struct devlink_param_gset_ctx *ctx);
1251
int dsa_devlink_param_set(struct devlink *dl, u32 id,
1252
struct devlink_param_gset_ctx *ctx,
1253
struct netlink_ext_ack *extack);
1254
int dsa_devlink_params_register(struct dsa_switch *ds,
1255
const struct devlink_param *params,
1256
size_t params_count);
1257
void dsa_devlink_params_unregister(struct dsa_switch *ds,
1258
const struct devlink_param *params,
1259
size_t params_count);
1260
int dsa_devlink_resource_register(struct dsa_switch *ds,
1261
const char *resource_name,
1262
u64 resource_size,
1263
u64 resource_id,
1264
u64 parent_resource_id,
1265
const struct devlink_resource_size_params *size_params);
1266
1267
void dsa_devlink_resources_unregister(struct dsa_switch *ds);
1268
1269
void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
1270
u64 resource_id,
1271
devlink_resource_occ_get_t *occ_get,
1272
void *occ_get_priv);
1273
void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
1274
u64 resource_id);
1275
struct devlink_region *
1276
dsa_devlink_region_create(struct dsa_switch *ds,
1277
const struct devlink_region_ops *ops,
1278
u32 region_max_snapshots, u64 region_size);
1279
struct devlink_region *
1280
dsa_devlink_port_region_create(struct dsa_switch *ds,
1281
int port,
1282
const struct devlink_port_region_ops *ops,
1283
u32 region_max_snapshots, u64 region_size);
1284
void dsa_devlink_region_destroy(struct devlink_region *region);
1285
1286
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
1287
1288
struct dsa_devlink_priv {
1289
struct dsa_switch *ds;
1290
};
1291
1292
static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
1293
{
1294
struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
1295
1296
return dl_priv->ds;
1297
}
1298
1299
static inline
1300
struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
1301
{
1302
struct devlink *dl = port->devlink;
1303
struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
1304
1305
return dl_priv->ds;
1306
}
1307
1308
static inline int dsa_devlink_port_to_port(struct devlink_port *port)
1309
{
1310
return port->index;
1311
}
1312
1313
struct dsa_switch_driver {
1314
struct list_head list;
1315
const struct dsa_switch_ops *ops;
1316
};
1317
1318
bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
1319
const unsigned char *addr, u16 vid,
1320
struct dsa_db db);
1321
bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
1322
const struct switchdev_obj_port_mdb *mdb,
1323
struct dsa_db db);
1324
1325
/* Keep inline for faster access in hot path */
1326
static inline bool netdev_uses_dsa(const struct net_device *dev)
1327
{
1328
#if IS_ENABLED(CONFIG_NET_DSA)
1329
return dev->dsa_ptr && dev->dsa_ptr->rcv;
1330
#endif
1331
return false;
1332
}
1333
1334
/* All DSA tags that push the EtherType to the right (basically all except tail
1335
* tags, which don't break dissection) can be treated the same from the
1336
* perspective of the flow dissector.
1337
*
1338
* We need to return:
1339
* - offset: the (B - A) difference between:
1340
* A. the position of the real EtherType and
1341
* B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
1342
* after the normal EtherType was supposed to be)
1343
* The offset in bytes is exactly equal to the tagger overhead (and half of
1344
* that, in __be16 shorts).
1345
*
1346
* - proto: the value of the real EtherType.
1347
*/
1348
static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
1349
__be16 *proto, int *offset)
1350
{
1351
#if IS_ENABLED(CONFIG_NET_DSA)
1352
const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
1353
int tag_len = ops->needed_headroom;
1354
1355
*offset = tag_len;
1356
*proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
1357
#endif
1358
}
1359
1360
void dsa_unregister_switch(struct dsa_switch *ds);
1361
int dsa_register_switch(struct dsa_switch *ds);
1362
void dsa_switch_shutdown(struct dsa_switch *ds);
1363
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
1364
void dsa_flush_workqueue(void);
1365
#ifdef CONFIG_PM_SLEEP
1366
int dsa_switch_suspend(struct dsa_switch *ds);
1367
int dsa_switch_resume(struct dsa_switch *ds);
1368
#else
1369
static inline int dsa_switch_suspend(struct dsa_switch *ds)
1370
{
1371
return 0;
1372
}
1373
static inline int dsa_switch_resume(struct dsa_switch *ds)
1374
{
1375
return 0;
1376
}
1377
#endif /* CONFIG_PM_SLEEP */
1378
1379
#if IS_ENABLED(CONFIG_NET_DSA)
1380
bool dsa_user_dev_check(const struct net_device *dev);
1381
#else
1382
static inline bool dsa_user_dev_check(const struct net_device *dev)
1383
{
1384
return false;
1385
}
1386
#endif
1387
1388
netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
1389
void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
1390
bool dsa_supports_eee(struct dsa_switch *ds, int port);
1391
1392
#endif
1393
1394