Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/hsr/hsr_netlink.c
50902 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright 2011-2014 Autronica Fire and Security AS
3
*
4
* Author(s):
5
* 2011-2014 Arvid Brodin, [email protected]
6
*
7
* Routines for handling Netlink messages for HSR and PRP.
8
*/
9
10
#include "hsr_netlink.h"
11
#include <linux/kernel.h>
12
#include <net/rtnetlink.h>
13
#include <net/genetlink.h>
14
#include "hsr_main.h"
15
#include "hsr_device.h"
16
#include "hsr_framereg.h"
17
18
static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = {
19
[IFLA_HSR_SLAVE1] = { .type = NLA_U32 },
20
[IFLA_HSR_SLAVE2] = { .type = NLA_U32 },
21
[IFLA_HSR_MULTICAST_SPEC] = { .type = NLA_U8 },
22
[IFLA_HSR_VERSION] = { .type = NLA_U8 },
23
[IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN },
24
[IFLA_HSR_SEQ_NR] = { .type = NLA_U16 },
25
[IFLA_HSR_PROTOCOL] = { .type = NLA_U8 },
26
[IFLA_HSR_INTERLINK] = { .type = NLA_U32 },
27
};
28
29
/* Here, it seems a netdevice has already been allocated for us, and the
30
* hsr_dev_setup routine has been executed. Nice!
31
*/
32
static int hsr_newlink(struct net_device *dev,
33
struct rtnl_newlink_params *params,
34
struct netlink_ext_ack *extack)
35
{
36
struct net *link_net = rtnl_newlink_link_net(params);
37
struct net_device *link[2], *interlink = NULL;
38
struct nlattr **data = params->data;
39
enum hsr_version proto_version;
40
unsigned char multicast_spec;
41
u8 proto = HSR_PROTOCOL_HSR;
42
43
if (!net_eq(link_net, dev_net(dev))) {
44
NL_SET_ERR_MSG_MOD(extack,
45
"HSR slaves/interlink must be on the same net namespace than HSR link");
46
return -EINVAL;
47
}
48
49
if (!data) {
50
NL_SET_ERR_MSG_MOD(extack, "No slave devices specified");
51
return -EINVAL;
52
}
53
if (!data[IFLA_HSR_SLAVE1]) {
54
NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified");
55
return -EINVAL;
56
}
57
link[0] = __dev_get_by_index(link_net,
58
nla_get_u32(data[IFLA_HSR_SLAVE1]));
59
if (!link[0]) {
60
NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist");
61
return -EINVAL;
62
}
63
if (!data[IFLA_HSR_SLAVE2]) {
64
NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified");
65
return -EINVAL;
66
}
67
link[1] = __dev_get_by_index(link_net,
68
nla_get_u32(data[IFLA_HSR_SLAVE2]));
69
if (!link[1]) {
70
NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist");
71
return -EINVAL;
72
}
73
74
if (link[0] == link[1]) {
75
NL_SET_ERR_MSG_MOD(extack, "Slave1 and Slave2 are same");
76
return -EINVAL;
77
}
78
79
if (data[IFLA_HSR_INTERLINK])
80
interlink = __dev_get_by_index(link_net,
81
nla_get_u32(data[IFLA_HSR_INTERLINK]));
82
83
if (interlink && interlink == link[0]) {
84
NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave1 are the same");
85
return -EINVAL;
86
}
87
88
if (interlink && interlink == link[1]) {
89
NL_SET_ERR_MSG_MOD(extack, "Interlink and Slave2 are the same");
90
return -EINVAL;
91
}
92
93
multicast_spec = nla_get_u8_default(data[IFLA_HSR_MULTICAST_SPEC], 0);
94
95
if (data[IFLA_HSR_PROTOCOL])
96
proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]);
97
98
if (proto >= HSR_PROTOCOL_MAX) {
99
NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol");
100
return -EINVAL;
101
}
102
103
if (!data[IFLA_HSR_VERSION]) {
104
proto_version = HSR_V0;
105
} else {
106
if (proto == HSR_PROTOCOL_PRP) {
107
NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported");
108
return -EINVAL;
109
}
110
111
proto_version = nla_get_u8(data[IFLA_HSR_VERSION]);
112
if (proto_version > HSR_V1) {
113
NL_SET_ERR_MSG_MOD(extack,
114
"Only HSR version 0/1 supported");
115
return -EINVAL;
116
}
117
}
118
119
if (proto == HSR_PROTOCOL_PRP) {
120
proto_version = PRP_V1;
121
if (interlink) {
122
NL_SET_ERR_MSG_MOD(extack,
123
"Interlink only works with HSR");
124
return -EINVAL;
125
}
126
}
127
128
return hsr_dev_finalize(dev, link, interlink, multicast_spec,
129
proto_version, extack);
130
}
131
132
static void hsr_dellink(struct net_device *dev, struct list_head *head)
133
{
134
struct hsr_priv *hsr = netdev_priv(dev);
135
136
timer_delete_sync(&hsr->prune_timer);
137
timer_delete_sync(&hsr->prune_proxy_timer);
138
timer_delete_sync(&hsr->announce_timer);
139
timer_delete_sync(&hsr->announce_proxy_timer);
140
141
hsr_debugfs_term(hsr);
142
hsr_del_ports(hsr);
143
144
hsr_del_self_node(hsr);
145
hsr_del_nodes(&hsr->node_db);
146
hsr_del_nodes(&hsr->proxy_node_db);
147
148
unregister_netdevice_queue(dev, head);
149
}
150
151
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
152
{
153
struct hsr_priv *hsr = netdev_priv(dev);
154
u8 proto = HSR_PROTOCOL_HSR;
155
struct hsr_port *port;
156
157
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
158
if (port) {
159
if (nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex))
160
goto nla_put_failure;
161
}
162
163
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
164
if (port) {
165
if (nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex))
166
goto nla_put_failure;
167
}
168
169
port = hsr_port_get_hsr(hsr, HSR_PT_INTERLINK);
170
if (port) {
171
if (nla_put_u32(skb, IFLA_HSR_INTERLINK, port->dev->ifindex))
172
goto nla_put_failure;
173
}
174
175
if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
176
hsr->sup_multicast_addr) ||
177
nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
178
goto nla_put_failure;
179
if (hsr->prot_version == PRP_V1)
180
proto = HSR_PROTOCOL_PRP;
181
else if (nla_put_u8(skb, IFLA_HSR_VERSION, hsr->prot_version))
182
goto nla_put_failure;
183
if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto))
184
goto nla_put_failure;
185
186
return 0;
187
188
nla_put_failure:
189
return -EMSGSIZE;
190
}
191
192
static struct rtnl_link_ops hsr_link_ops __read_mostly = {
193
.kind = "hsr",
194
.maxtype = IFLA_HSR_MAX,
195
.policy = hsr_policy,
196
.priv_size = sizeof(struct hsr_priv),
197
.setup = hsr_dev_setup,
198
.newlink = hsr_newlink,
199
.dellink = hsr_dellink,
200
.fill_info = hsr_fill_info,
201
};
202
203
/* attribute policy */
204
static const struct nla_policy hsr_genl_policy[HSR_A_MAX + 1] = {
205
[HSR_A_NODE_ADDR] = { .len = ETH_ALEN },
206
[HSR_A_NODE_ADDR_B] = { .len = ETH_ALEN },
207
[HSR_A_IFINDEX] = { .type = NLA_U32 },
208
[HSR_A_IF1_AGE] = { .type = NLA_U32 },
209
[HSR_A_IF2_AGE] = { .type = NLA_U32 },
210
[HSR_A_IF1_SEQ] = { .type = NLA_U16 },
211
[HSR_A_IF2_SEQ] = { .type = NLA_U16 },
212
};
213
214
static struct genl_family hsr_genl_family;
215
216
static const struct genl_multicast_group hsr_mcgrps[] = {
217
{ .name = "hsr-network", },
218
};
219
220
/* This is called if for some node with MAC address addr, we only get frames
221
* over one of the slave interfaces. This would indicate an open network ring
222
* (i.e. a link has failed somewhere).
223
*/
224
void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
225
struct hsr_port *port)
226
{
227
struct sk_buff *skb;
228
void *msg_head;
229
struct hsr_port *master;
230
int res;
231
232
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
233
if (!skb)
234
goto fail;
235
236
msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0,
237
HSR_C_RING_ERROR);
238
if (!msg_head)
239
goto nla_put_failure;
240
241
res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
242
if (res < 0)
243
goto nla_put_failure;
244
245
res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
246
if (res < 0)
247
goto nla_put_failure;
248
249
genlmsg_end(skb, msg_head);
250
genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
251
252
return;
253
254
nla_put_failure:
255
kfree_skb(skb);
256
257
fail:
258
rcu_read_lock();
259
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
260
netdev_warn(master->dev, "Could not send HSR ring error message\n");
261
rcu_read_unlock();
262
}
263
264
/* This is called when we haven't heard from the node with MAC address addr for
265
* some time (just before the node is removed from the node table/list).
266
*/
267
void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
268
{
269
struct sk_buff *skb;
270
void *msg_head;
271
struct hsr_port *master;
272
int res;
273
274
skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
275
if (!skb)
276
goto fail;
277
278
msg_head = genlmsg_put(skb, 0, 0, &hsr_genl_family, 0, HSR_C_NODE_DOWN);
279
if (!msg_head)
280
goto nla_put_failure;
281
282
res = nla_put(skb, HSR_A_NODE_ADDR, ETH_ALEN, addr);
283
if (res < 0)
284
goto nla_put_failure;
285
286
genlmsg_end(skb, msg_head);
287
genlmsg_multicast(&hsr_genl_family, skb, 0, 0, GFP_ATOMIC);
288
289
return;
290
291
nla_put_failure:
292
kfree_skb(skb);
293
294
fail:
295
rcu_read_lock();
296
master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
297
netdev_warn(master->dev, "Could not send HSR node down\n");
298
rcu_read_unlock();
299
}
300
301
/* HSR_C_GET_NODE_STATUS lets userspace query the internal HSR node table
302
* about the status of a specific node in the network, defined by its MAC
303
* address.
304
*
305
* Input: hsr ifindex, node mac address
306
* Output: hsr ifindex, node mac address (copied from request),
307
* age of latest frame from node over slave 1, slave 2 [ms]
308
*/
309
static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
310
{
311
/* For receiving */
312
struct nlattr *na;
313
struct net_device *hsr_dev;
314
315
/* For sending */
316
struct sk_buff *skb_out;
317
void *msg_head;
318
struct hsr_priv *hsr;
319
struct hsr_port *port;
320
unsigned char hsr_node_addr_b[ETH_ALEN];
321
int hsr_node_if1_age;
322
u16 hsr_node_if1_seq;
323
int hsr_node_if2_age;
324
u16 hsr_node_if2_seq;
325
int addr_b_ifindex;
326
int res;
327
328
if (!info)
329
goto invalid;
330
331
na = info->attrs[HSR_A_IFINDEX];
332
if (!na)
333
goto invalid;
334
na = info->attrs[HSR_A_NODE_ADDR];
335
if (!na)
336
goto invalid;
337
338
rcu_read_lock();
339
hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
340
nla_get_u32(info->attrs[HSR_A_IFINDEX]));
341
if (!hsr_dev)
342
goto rcu_unlock;
343
if (!is_hsr_master(hsr_dev))
344
goto rcu_unlock;
345
346
/* Send reply */
347
skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
348
if (!skb_out) {
349
res = -ENOMEM;
350
goto fail;
351
}
352
353
msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
354
info->snd_seq, &hsr_genl_family, 0,
355
HSR_C_SET_NODE_STATUS);
356
if (!msg_head) {
357
res = -ENOMEM;
358
goto nla_put_failure;
359
}
360
361
res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
362
if (res < 0)
363
goto nla_put_failure;
364
365
hsr = netdev_priv(hsr_dev);
366
res = hsr_get_node_data(hsr,
367
(unsigned char *)
368
nla_data(info->attrs[HSR_A_NODE_ADDR]),
369
hsr_node_addr_b,
370
&addr_b_ifindex,
371
&hsr_node_if1_age,
372
&hsr_node_if1_seq,
373
&hsr_node_if2_age,
374
&hsr_node_if2_seq);
375
if (res < 0)
376
goto nla_put_failure;
377
378
res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN,
379
nla_data(info->attrs[HSR_A_NODE_ADDR]));
380
if (res < 0)
381
goto nla_put_failure;
382
383
if (addr_b_ifindex > -1) {
384
res = nla_put(skb_out, HSR_A_NODE_ADDR_B, ETH_ALEN,
385
hsr_node_addr_b);
386
if (res < 0)
387
goto nla_put_failure;
388
389
res = nla_put_u32(skb_out, HSR_A_ADDR_B_IFINDEX,
390
addr_b_ifindex);
391
if (res < 0)
392
goto nla_put_failure;
393
}
394
395
res = nla_put_u32(skb_out, HSR_A_IF1_AGE, hsr_node_if1_age);
396
if (res < 0)
397
goto nla_put_failure;
398
res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
399
if (res < 0)
400
goto nla_put_failure;
401
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
402
if (port)
403
res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
404
port->dev->ifindex);
405
if (res < 0)
406
goto nla_put_failure;
407
408
res = nla_put_u32(skb_out, HSR_A_IF2_AGE, hsr_node_if2_age);
409
if (res < 0)
410
goto nla_put_failure;
411
res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
412
if (res < 0)
413
goto nla_put_failure;
414
port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
415
if (port)
416
res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
417
port->dev->ifindex);
418
if (res < 0)
419
goto nla_put_failure;
420
421
rcu_read_unlock();
422
423
genlmsg_end(skb_out, msg_head);
424
genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
425
426
return 0;
427
428
rcu_unlock:
429
rcu_read_unlock();
430
invalid:
431
netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
432
return 0;
433
434
nla_put_failure:
435
kfree_skb(skb_out);
436
/* Fall through */
437
438
fail:
439
rcu_read_unlock();
440
return res;
441
}
442
443
/* Get a list of MacAddressA of all nodes known to this node (including self).
444
*/
445
static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
446
{
447
unsigned char addr[ETH_ALEN];
448
struct net_device *hsr_dev;
449
struct sk_buff *skb_out;
450
struct hsr_priv *hsr;
451
bool restart = false;
452
struct nlattr *na;
453
void *pos = NULL;
454
void *msg_head;
455
int res;
456
457
if (!info)
458
goto invalid;
459
460
na = info->attrs[HSR_A_IFINDEX];
461
if (!na)
462
goto invalid;
463
464
rcu_read_lock();
465
hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
466
nla_get_u32(info->attrs[HSR_A_IFINDEX]));
467
if (!hsr_dev)
468
goto rcu_unlock;
469
if (!is_hsr_master(hsr_dev))
470
goto rcu_unlock;
471
472
restart:
473
/* Send reply */
474
skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
475
if (!skb_out) {
476
res = -ENOMEM;
477
goto fail;
478
}
479
480
msg_head = genlmsg_put(skb_out, NETLINK_CB(skb_in).portid,
481
info->snd_seq, &hsr_genl_family, 0,
482
HSR_C_SET_NODE_LIST);
483
if (!msg_head) {
484
res = -ENOMEM;
485
goto nla_put_failure;
486
}
487
488
if (!restart) {
489
res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
490
if (res < 0)
491
goto nla_put_failure;
492
}
493
494
hsr = netdev_priv(hsr_dev);
495
496
if (!pos)
497
pos = hsr_get_next_node(hsr, NULL, addr);
498
while (pos) {
499
res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
500
if (res < 0) {
501
if (res == -EMSGSIZE) {
502
genlmsg_end(skb_out, msg_head);
503
genlmsg_unicast(genl_info_net(info), skb_out,
504
info->snd_portid);
505
restart = true;
506
goto restart;
507
}
508
goto nla_put_failure;
509
}
510
pos = hsr_get_next_node(hsr, pos, addr);
511
}
512
rcu_read_unlock();
513
514
genlmsg_end(skb_out, msg_head);
515
genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
516
517
return 0;
518
519
rcu_unlock:
520
rcu_read_unlock();
521
invalid:
522
netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
523
return 0;
524
525
nla_put_failure:
526
nlmsg_free(skb_out);
527
/* Fall through */
528
529
fail:
530
rcu_read_unlock();
531
return res;
532
}
533
534
static const struct genl_small_ops hsr_ops[] = {
535
{
536
.cmd = HSR_C_GET_NODE_STATUS,
537
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
538
.flags = 0,
539
.doit = hsr_get_node_status,
540
.dumpit = NULL,
541
},
542
{
543
.cmd = HSR_C_GET_NODE_LIST,
544
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
545
.flags = 0,
546
.doit = hsr_get_node_list,
547
.dumpit = NULL,
548
},
549
};
550
551
static struct genl_family hsr_genl_family __ro_after_init = {
552
.hdrsize = 0,
553
.name = "HSR",
554
.version = 1,
555
.maxattr = HSR_A_MAX,
556
.policy = hsr_genl_policy,
557
.netnsok = true,
558
.module = THIS_MODULE,
559
.small_ops = hsr_ops,
560
.n_small_ops = ARRAY_SIZE(hsr_ops),
561
.resv_start_op = HSR_C_SET_NODE_LIST + 1,
562
.mcgrps = hsr_mcgrps,
563
.n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
564
};
565
566
int __init hsr_netlink_init(void)
567
{
568
int rc;
569
570
rc = rtnl_link_register(&hsr_link_ops);
571
if (rc)
572
goto fail_rtnl_link_register;
573
574
rc = genl_register_family(&hsr_genl_family);
575
if (rc)
576
goto fail_genl_register_family;
577
578
hsr_debugfs_create_root();
579
return 0;
580
581
fail_genl_register_family:
582
rtnl_link_unregister(&hsr_link_ops);
583
fail_rtnl_link_register:
584
585
return rc;
586
}
587
588
void __exit hsr_netlink_exit(void)
589
{
590
genl_unregister_family(&hsr_genl_family);
591
rtnl_link_unregister(&hsr_link_ops);
592
}
593
594
MODULE_ALIAS_RTNL_LINK("hsr");
595
596