Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/hsr/hsr_framereg.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* Copyright 2011-2014 Autronica Fire and Security AS
3
*
4
* Author(s):
5
* 2011-2014 Arvid Brodin, [email protected]
6
*
7
* The HSR spec says never to forward the same frame twice on the same
8
* interface. A frame is identified by its source MAC address and its HSR
9
* sequence number. This code keeps track of senders and their sequence numbers
10
* to allow filtering of duplicate frames, and to detect HSR ring errors.
11
* Same code handles filtering of duplicates for PRP as well.
12
*/
13
14
#include <linux/if_ether.h>
15
#include <linux/etherdevice.h>
16
#include <linux/slab.h>
17
#include <linux/rculist.h>
18
#include "hsr_main.h"
19
#include "hsr_framereg.h"
20
#include "hsr_netlink.h"
21
22
/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
23
* false otherwise.
24
*/
25
static bool seq_nr_after(u16 a, u16 b)
26
{
27
/* Remove inconsistency where
28
* seq_nr_after(a, b) == seq_nr_before(a, b)
29
*/
30
if ((int)b - a == 32768)
31
return false;
32
33
return (((s16)(b - a)) < 0);
34
}
35
36
#define seq_nr_before(a, b) seq_nr_after((b), (a))
37
#define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b)))
38
#define PRP_DROP_WINDOW_LEN 32768
39
40
bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr)
41
{
42
if (!hsr->redbox || !is_valid_ether_addr(hsr->macaddress_redbox))
43
return false;
44
45
return ether_addr_equal(addr, hsr->macaddress_redbox);
46
}
47
48
bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
49
{
50
struct hsr_self_node *sn;
51
bool ret = false;
52
53
rcu_read_lock();
54
sn = rcu_dereference(hsr->self_node);
55
if (!sn) {
56
WARN_ONCE(1, "HSR: No self node\n");
57
goto out;
58
}
59
60
if (ether_addr_equal(addr, sn->macaddress_A) ||
61
ether_addr_equal(addr, sn->macaddress_B))
62
ret = true;
63
out:
64
rcu_read_unlock();
65
return ret;
66
}
67
68
/* Search for mac entry. Caller must hold rcu read lock.
69
*/
70
static struct hsr_node *find_node_by_addr_A(struct list_head *node_db,
71
const unsigned char addr[ETH_ALEN])
72
{
73
struct hsr_node *node;
74
75
list_for_each_entry_rcu(node, node_db, mac_list) {
76
if (ether_addr_equal(node->macaddress_A, addr))
77
return node;
78
}
79
80
return NULL;
81
}
82
83
/* Check if node for a given MAC address is already present in data base
84
*/
85
bool hsr_is_node_in_db(struct list_head *node_db,
86
const unsigned char addr[ETH_ALEN])
87
{
88
return !!find_node_by_addr_A(node_db, addr);
89
}
90
91
/* Helper for device init; the self_node is used in hsr_rcv() to recognize
92
* frames from self that's been looped over the HSR ring.
93
*/
94
int hsr_create_self_node(struct hsr_priv *hsr,
95
const unsigned char addr_a[ETH_ALEN],
96
const unsigned char addr_b[ETH_ALEN])
97
{
98
struct hsr_self_node *sn, *old;
99
100
sn = kmalloc(sizeof(*sn), GFP_KERNEL);
101
if (!sn)
102
return -ENOMEM;
103
104
ether_addr_copy(sn->macaddress_A, addr_a);
105
ether_addr_copy(sn->macaddress_B, addr_b);
106
107
spin_lock_bh(&hsr->list_lock);
108
old = rcu_replace_pointer(hsr->self_node, sn,
109
lockdep_is_held(&hsr->list_lock));
110
spin_unlock_bh(&hsr->list_lock);
111
112
if (old)
113
kfree_rcu(old, rcu_head);
114
return 0;
115
}
116
117
void hsr_del_self_node(struct hsr_priv *hsr)
118
{
119
struct hsr_self_node *old;
120
121
spin_lock_bh(&hsr->list_lock);
122
old = rcu_replace_pointer(hsr->self_node, NULL,
123
lockdep_is_held(&hsr->list_lock));
124
spin_unlock_bh(&hsr->list_lock);
125
if (old)
126
kfree_rcu(old, rcu_head);
127
}
128
129
void hsr_del_nodes(struct list_head *node_db)
130
{
131
struct hsr_node *node;
132
struct hsr_node *tmp;
133
134
list_for_each_entry_safe(node, tmp, node_db, mac_list)
135
kfree(node);
136
}
137
138
void prp_handle_san_frame(bool san, enum hsr_port_type port,
139
struct hsr_node *node)
140
{
141
/* Mark if the SAN node is over LAN_A or LAN_B */
142
if (port == HSR_PT_SLAVE_A) {
143
node->san_a = true;
144
return;
145
}
146
147
if (port == HSR_PT_SLAVE_B)
148
node->san_b = true;
149
}
150
151
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A;
152
* seq_out is used to initialize filtering of outgoing duplicate frames
153
* originating from the newly added node.
154
*/
155
static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
156
struct list_head *node_db,
157
unsigned char addr[],
158
u16 seq_out, bool san,
159
enum hsr_port_type rx_port)
160
{
161
struct hsr_node *new_node, *node;
162
unsigned long now;
163
int i;
164
165
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
166
if (!new_node)
167
return NULL;
168
169
ether_addr_copy(new_node->macaddress_A, addr);
170
spin_lock_init(&new_node->seq_out_lock);
171
172
/* We are only interested in time diffs here, so use current jiffies
173
* as initialization. (0 could trigger an spurious ring error warning).
174
*/
175
now = jiffies;
176
for (i = 0; i < HSR_PT_PORTS; i++) {
177
new_node->time_in[i] = now;
178
new_node->time_out[i] = now;
179
}
180
for (i = 0; i < HSR_PT_PORTS; i++) {
181
new_node->seq_out[i] = seq_out;
182
new_node->seq_expected[i] = seq_out + 1;
183
new_node->seq_start[i] = seq_out + 1;
184
}
185
186
if (san && hsr->proto_ops->handle_san_frame)
187
hsr->proto_ops->handle_san_frame(san, rx_port, new_node);
188
189
spin_lock_bh(&hsr->list_lock);
190
list_for_each_entry_rcu(node, node_db, mac_list,
191
lockdep_is_held(&hsr->list_lock)) {
192
if (ether_addr_equal(node->macaddress_A, addr))
193
goto out;
194
if (ether_addr_equal(node->macaddress_B, addr))
195
goto out;
196
}
197
list_add_tail_rcu(&new_node->mac_list, node_db);
198
spin_unlock_bh(&hsr->list_lock);
199
return new_node;
200
out:
201
spin_unlock_bh(&hsr->list_lock);
202
kfree(new_node);
203
return node;
204
}
205
206
void prp_update_san_info(struct hsr_node *node, bool is_sup)
207
{
208
if (!is_sup)
209
return;
210
211
node->san_a = false;
212
node->san_b = false;
213
}
214
215
/* Get the hsr_node from which 'skb' was sent.
216
*/
217
struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
218
struct sk_buff *skb, bool is_sup,
219
enum hsr_port_type rx_port)
220
{
221
struct hsr_priv *hsr = port->hsr;
222
struct hsr_node *node;
223
struct ethhdr *ethhdr;
224
struct prp_rct *rct;
225
bool san = false;
226
u16 seq_out;
227
228
if (!skb_mac_header_was_set(skb))
229
return NULL;
230
231
ethhdr = (struct ethhdr *)skb_mac_header(skb);
232
233
list_for_each_entry_rcu(node, node_db, mac_list) {
234
if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
235
if (hsr->proto_ops->update_san_info)
236
hsr->proto_ops->update_san_info(node, is_sup);
237
return node;
238
}
239
if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) {
240
if (hsr->proto_ops->update_san_info)
241
hsr->proto_ops->update_san_info(node, is_sup);
242
return node;
243
}
244
}
245
246
/* Check if required node is not in proxy nodes table */
247
list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) {
248
if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) {
249
if (hsr->proto_ops->update_san_info)
250
hsr->proto_ops->update_san_info(node, is_sup);
251
return node;
252
}
253
}
254
255
/* Everyone may create a node entry, connected node to a HSR/PRP
256
* device.
257
*/
258
if (ethhdr->h_proto == htons(ETH_P_PRP) ||
259
ethhdr->h_proto == htons(ETH_P_HSR)) {
260
/* Check if skb contains hsr_ethhdr */
261
if (skb->mac_len < sizeof(struct hsr_ethhdr))
262
return NULL;
263
264
/* Use the existing sequence_nr from the tag as starting point
265
* for filtering duplicate frames.
266
*/
267
seq_out = hsr_get_skb_sequence_nr(skb) - 1;
268
} else {
269
rct = skb_get_PRP_rct(skb);
270
if (rct && prp_check_lsdu_size(skb, rct, is_sup)) {
271
seq_out = prp_get_skb_sequence_nr(rct);
272
} else {
273
if (rx_port != HSR_PT_MASTER)
274
san = true;
275
seq_out = HSR_SEQNR_START;
276
}
277
}
278
279
return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out,
280
san, rx_port);
281
}
282
283
/* Use the Supervision frame's info about an eventual macaddress_B for merging
284
* nodes that has previously had their macaddress_B registered as a separate
285
* node.
286
*/
287
void hsr_handle_sup_frame(struct hsr_frame_info *frame)
288
{
289
struct hsr_node *node_curr = frame->node_src;
290
struct hsr_port *port_rcv = frame->port_rcv;
291
struct hsr_priv *hsr = port_rcv->hsr;
292
struct hsr_sup_payload *hsr_sp;
293
struct hsr_sup_tlv *hsr_sup_tlv;
294
struct hsr_node *node_real;
295
struct sk_buff *skb = NULL;
296
struct list_head *node_db;
297
struct ethhdr *ethhdr;
298
int i;
299
unsigned int pull_size = 0;
300
unsigned int total_pull_size = 0;
301
302
/* Here either frame->skb_hsr or frame->skb_prp should be
303
* valid as supervision frame always will have protocol
304
* header info.
305
*/
306
if (frame->skb_hsr)
307
skb = frame->skb_hsr;
308
else if (frame->skb_prp)
309
skb = frame->skb_prp;
310
else if (frame->skb_std)
311
skb = frame->skb_std;
312
if (!skb)
313
return;
314
315
/* Leave the ethernet header. */
316
pull_size = sizeof(struct ethhdr);
317
skb_pull(skb, pull_size);
318
total_pull_size += pull_size;
319
320
ethhdr = (struct ethhdr *)skb_mac_header(skb);
321
322
/* And leave the HSR tag. */
323
if (ethhdr->h_proto == htons(ETH_P_HSR)) {
324
pull_size = sizeof(struct hsr_tag);
325
skb_pull(skb, pull_size);
326
total_pull_size += pull_size;
327
}
328
329
/* And leave the HSR sup tag. */
330
pull_size = sizeof(struct hsr_sup_tag);
331
skb_pull(skb, pull_size);
332
total_pull_size += pull_size;
333
334
/* get HSR sup payload */
335
hsr_sp = (struct hsr_sup_payload *)skb->data;
336
337
/* Merge node_curr (registered on macaddress_B) into node_real */
338
node_db = &port_rcv->hsr->node_db;
339
node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A);
340
if (!node_real)
341
/* No frame received from AddrA of this node yet */
342
node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
343
HSR_SEQNR_START - 1, true,
344
port_rcv->type);
345
if (!node_real)
346
goto done; /* No mem */
347
if (node_real == node_curr)
348
/* Node has already been merged */
349
goto done;
350
351
/* Leave the first HSR sup payload. */
352
pull_size = sizeof(struct hsr_sup_payload);
353
skb_pull(skb, pull_size);
354
total_pull_size += pull_size;
355
356
/* Get second supervision tlv */
357
hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
358
/* And check if it is a redbox mac TLV */
359
if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
360
/* We could stop here after pushing hsr_sup_payload,
361
* or proceed and allow macaddress_B and for redboxes.
362
*/
363
/* Sanity check length */
364
if (hsr_sup_tlv->HSR_TLV_length != 6)
365
goto done;
366
367
/* Leave the second HSR sup tlv. */
368
pull_size = sizeof(struct hsr_sup_tlv);
369
skb_pull(skb, pull_size);
370
total_pull_size += pull_size;
371
372
/* Get redbox mac address. */
373
hsr_sp = (struct hsr_sup_payload *)skb->data;
374
375
/* Check if redbox mac and node mac are equal. */
376
if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) {
377
/* This is a redbox supervision frame for a VDAN! */
378
goto done;
379
}
380
}
381
382
ether_addr_copy(node_real->macaddress_B, ethhdr->h_source);
383
spin_lock_bh(&node_real->seq_out_lock);
384
for (i = 0; i < HSR_PT_PORTS; i++) {
385
if (!node_curr->time_in_stale[i] &&
386
time_after(node_curr->time_in[i], node_real->time_in[i])) {
387
node_real->time_in[i] = node_curr->time_in[i];
388
node_real->time_in_stale[i] =
389
node_curr->time_in_stale[i];
390
}
391
if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
392
node_real->seq_out[i] = node_curr->seq_out[i];
393
}
394
spin_unlock_bh(&node_real->seq_out_lock);
395
node_real->addr_B_port = port_rcv->type;
396
397
spin_lock_bh(&hsr->list_lock);
398
if (!node_curr->removed) {
399
list_del_rcu(&node_curr->mac_list);
400
node_curr->removed = true;
401
kfree_rcu(node_curr, rcu_head);
402
}
403
spin_unlock_bh(&hsr->list_lock);
404
405
done:
406
/* Push back here */
407
skb_push(skb, total_pull_size);
408
}
409
410
/* 'skb' is a frame meant for this host, that is to be passed to upper layers.
411
*
412
* If the frame was sent by a node's B interface, replace the source
413
* address with that node's "official" address (macaddress_A) so that upper
414
* layers recognize where it came from.
415
*/
416
void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
417
{
418
if (!skb_mac_header_was_set(skb)) {
419
WARN_ONCE(1, "%s: Mac header not set\n", __func__);
420
return;
421
}
422
423
memcpy(&eth_hdr(skb)->h_source, node->macaddress_A, ETH_ALEN);
424
}
425
426
/* 'skb' is a frame meant for another host.
427
* 'port' is the outgoing interface
428
*
429
* Substitute the target (dest) MAC address if necessary, so the it matches the
430
* recipient interface MAC address, regardless of whether that is the
431
* recipient's A or B interface.
432
* This is needed to keep the packets flowing through switches that learn on
433
* which "side" the different interfaces are.
434
*/
435
void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
436
struct hsr_port *port)
437
{
438
struct hsr_node *node_dst;
439
440
if (!skb_mac_header_was_set(skb)) {
441
WARN_ONCE(1, "%s: Mac header not set\n", __func__);
442
return;
443
}
444
445
if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
446
return;
447
448
node_dst = find_node_by_addr_A(&port->hsr->node_db,
449
eth_hdr(skb)->h_dest);
450
if (!node_dst && port->hsr->redbox)
451
node_dst = find_node_by_addr_A(&port->hsr->proxy_node_db,
452
eth_hdr(skb)->h_dest);
453
454
if (!node_dst) {
455
if (port->hsr->prot_version != PRP_V1 && net_ratelimit())
456
netdev_err(skb->dev, "%s: Unknown node\n", __func__);
457
return;
458
}
459
if (port->type != node_dst->addr_B_port)
460
return;
461
462
if (is_valid_ether_addr(node_dst->macaddress_B))
463
ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
464
}
465
466
void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
467
u16 sequence_nr)
468
{
469
/* Don't register incoming frames without a valid sequence number. This
470
* ensures entries of restarted nodes gets pruned so that they can
471
* re-register and resume communications.
472
*/
473
if (!(port->dev->features & NETIF_F_HW_HSR_TAG_RM) &&
474
seq_nr_before(sequence_nr, node->seq_out[port->type]))
475
return;
476
477
node->time_in[port->type] = jiffies;
478
node->time_in_stale[port->type] = false;
479
}
480
481
/* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
482
* ethhdr->h_source address and skb->mac_header set.
483
*
484
* Return:
485
* 1 if frame can be shown to have been sent recently on this interface,
486
* 0 otherwise, or
487
* negative error code on error
488
*/
489
int hsr_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame)
490
{
491
struct hsr_node *node = frame->node_src;
492
u16 sequence_nr = frame->sequence_nr;
493
494
spin_lock_bh(&node->seq_out_lock);
495
if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) &&
496
time_is_after_jiffies(node->time_out[port->type] +
497
msecs_to_jiffies(HSR_ENTRY_FORGET_TIME))) {
498
spin_unlock_bh(&node->seq_out_lock);
499
return 1;
500
}
501
502
node->time_out[port->type] = jiffies;
503
node->seq_out[port->type] = sequence_nr;
504
spin_unlock_bh(&node->seq_out_lock);
505
return 0;
506
}
507
508
/* Adaptation of the PRP duplicate discard algorithm described in wireshark
509
* wiki (https://wiki.wireshark.org/PRP)
510
*
511
* A drop window is maintained for both LANs with start sequence set to the
512
* first sequence accepted on the LAN that has not been seen on the other LAN,
513
* and expected sequence set to the latest received sequence number plus one.
514
*
515
* When a frame is received on either LAN it is compared against the received
516
* frames on the other LAN. If it is outside the drop window of the other LAN
517
* the frame is accepted and the drop window is updated.
518
* The drop window for the other LAN is reset.
519
*
520
* 'port' is the outgoing interface
521
* 'frame' is the frame to be sent
522
*
523
* Return:
524
* 1 if frame can be shown to have been sent recently on this interface,
525
* 0 otherwise
526
*/
527
int prp_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame)
528
{
529
enum hsr_port_type other_port;
530
enum hsr_port_type rcv_port;
531
struct hsr_node *node;
532
u16 sequence_diff;
533
u16 sequence_exp;
534
u16 sequence_nr;
535
536
/* out-going frames are always in order
537
* and can be checked the same way as for HSR
538
*/
539
if (frame->port_rcv->type == HSR_PT_MASTER)
540
return hsr_register_frame_out(port, frame);
541
542
/* for PRP we should only forward frames from the slave ports
543
* to the master port
544
*/
545
if (port->type != HSR_PT_MASTER)
546
return 1;
547
548
node = frame->node_src;
549
sequence_nr = frame->sequence_nr;
550
sequence_exp = sequence_nr + 1;
551
rcv_port = frame->port_rcv->type;
552
other_port = rcv_port == HSR_PT_SLAVE_A ? HSR_PT_SLAVE_B :
553
HSR_PT_SLAVE_A;
554
555
spin_lock_bh(&node->seq_out_lock);
556
if (time_is_before_jiffies(node->time_out[port->type] +
557
msecs_to_jiffies(HSR_ENTRY_FORGET_TIME)) ||
558
(node->seq_start[rcv_port] == node->seq_expected[rcv_port] &&
559
node->seq_start[other_port] == node->seq_expected[other_port])) {
560
/* the node hasn't been sending for a while
561
* or both drop windows are empty, forward the frame
562
*/
563
node->seq_start[rcv_port] = sequence_nr;
564
} else if (seq_nr_before(sequence_nr, node->seq_expected[other_port]) &&
565
seq_nr_before_or_eq(node->seq_start[other_port], sequence_nr)) {
566
/* drop the frame, update the drop window for the other port
567
* and reset our drop window
568
*/
569
node->seq_start[other_port] = sequence_exp;
570
node->seq_expected[rcv_port] = sequence_exp;
571
node->seq_start[rcv_port] = node->seq_expected[rcv_port];
572
spin_unlock_bh(&node->seq_out_lock);
573
return 1;
574
}
575
576
/* update the drop window for the port where this frame was received
577
* and clear the drop window for the other port
578
*/
579
node->seq_start[other_port] = node->seq_expected[other_port];
580
node->seq_expected[rcv_port] = sequence_exp;
581
sequence_diff = sequence_exp - node->seq_start[rcv_port];
582
if (sequence_diff > PRP_DROP_WINDOW_LEN)
583
node->seq_start[rcv_port] = sequence_exp - PRP_DROP_WINDOW_LEN;
584
585
node->time_out[port->type] = jiffies;
586
node->seq_out[port->type] = sequence_nr;
587
spin_unlock_bh(&node->seq_out_lock);
588
return 0;
589
}
590
591
#if IS_MODULE(CONFIG_PRP_DUP_DISCARD_KUNIT_TEST)
592
EXPORT_SYMBOL(prp_register_frame_out);
593
#endif
594
595
static struct hsr_port *get_late_port(struct hsr_priv *hsr,
596
struct hsr_node *node)
597
{
598
if (node->time_in_stale[HSR_PT_SLAVE_A])
599
return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
600
if (node->time_in_stale[HSR_PT_SLAVE_B])
601
return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
602
603
if (time_after(node->time_in[HSR_PT_SLAVE_B],
604
node->time_in[HSR_PT_SLAVE_A] +
605
msecs_to_jiffies(MAX_SLAVE_DIFF)))
606
return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
607
if (time_after(node->time_in[HSR_PT_SLAVE_A],
608
node->time_in[HSR_PT_SLAVE_B] +
609
msecs_to_jiffies(MAX_SLAVE_DIFF)))
610
return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
611
612
return NULL;
613
}
614
615
/* Remove stale sequence_nr records. Called by timer every
616
* HSR_LIFE_CHECK_INTERVAL (two seconds or so).
617
*/
618
void hsr_prune_nodes(struct timer_list *t)
619
{
620
struct hsr_priv *hsr = timer_container_of(hsr, t, prune_timer);
621
struct hsr_node *node;
622
struct hsr_node *tmp;
623
struct hsr_port *port;
624
unsigned long timestamp;
625
unsigned long time_a, time_b;
626
627
spin_lock_bh(&hsr->list_lock);
628
list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
629
/* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A]
630
* nor time_in[HSR_PT_SLAVE_B], will ever be updated for
631
* the master port. Thus the master node will be repeatedly
632
* pruned leading to packet loss.
633
*/
634
if (hsr_addr_is_self(hsr, node->macaddress_A))
635
continue;
636
637
/* Shorthand */
638
time_a = node->time_in[HSR_PT_SLAVE_A];
639
time_b = node->time_in[HSR_PT_SLAVE_B];
640
641
/* Check for timestamps old enough to risk wrap-around */
642
if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2))
643
node->time_in_stale[HSR_PT_SLAVE_A] = true;
644
if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2))
645
node->time_in_stale[HSR_PT_SLAVE_B] = true;
646
647
/* Get age of newest frame from node.
648
* At least one time_in is OK here; nodes get pruned long
649
* before both time_ins can get stale
650
*/
651
timestamp = time_a;
652
if (node->time_in_stale[HSR_PT_SLAVE_A] ||
653
(!node->time_in_stale[HSR_PT_SLAVE_B] &&
654
time_after(time_b, time_a)))
655
timestamp = time_b;
656
657
/* Warn of ring error only as long as we get frames at all */
658
if (time_is_after_jiffies(timestamp +
659
msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) {
660
rcu_read_lock();
661
port = get_late_port(hsr, node);
662
if (port)
663
hsr_nl_ringerror(hsr, node->macaddress_A, port);
664
rcu_read_unlock();
665
}
666
667
/* Prune old entries */
668
if (time_is_before_jiffies(timestamp +
669
msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
670
hsr_nl_nodedown(hsr, node->macaddress_A);
671
if (!node->removed) {
672
list_del_rcu(&node->mac_list);
673
node->removed = true;
674
/* Note that we need to free this entry later: */
675
kfree_rcu(node, rcu_head);
676
}
677
}
678
}
679
spin_unlock_bh(&hsr->list_lock);
680
681
/* Restart timer */
682
mod_timer(&hsr->prune_timer,
683
jiffies + msecs_to_jiffies(PRUNE_PERIOD));
684
}
685
686
void hsr_prune_proxy_nodes(struct timer_list *t)
687
{
688
struct hsr_priv *hsr = timer_container_of(hsr, t, prune_proxy_timer);
689
unsigned long timestamp;
690
struct hsr_node *node;
691
struct hsr_node *tmp;
692
693
spin_lock_bh(&hsr->list_lock);
694
list_for_each_entry_safe(node, tmp, &hsr->proxy_node_db, mac_list) {
695
/* Don't prune RedBox node. */
696
if (hsr_addr_is_redbox(hsr, node->macaddress_A))
697
continue;
698
699
timestamp = node->time_in[HSR_PT_INTERLINK];
700
701
/* Prune old entries */
702
if (time_is_before_jiffies(timestamp +
703
msecs_to_jiffies(HSR_PROXY_NODE_FORGET_TIME))) {
704
hsr_nl_nodedown(hsr, node->macaddress_A);
705
if (!node->removed) {
706
list_del_rcu(&node->mac_list);
707
node->removed = true;
708
/* Note that we need to free this entry later: */
709
kfree_rcu(node, rcu_head);
710
}
711
}
712
}
713
714
spin_unlock_bh(&hsr->list_lock);
715
716
/* Restart timer */
717
mod_timer(&hsr->prune_proxy_timer,
718
jiffies + msecs_to_jiffies(PRUNE_PROXY_PERIOD));
719
}
720
721
void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
722
unsigned char addr[ETH_ALEN])
723
{
724
struct hsr_node *node;
725
726
if (!_pos) {
727
node = list_first_or_null_rcu(&hsr->node_db,
728
struct hsr_node, mac_list);
729
if (node)
730
ether_addr_copy(addr, node->macaddress_A);
731
return node;
732
}
733
734
node = _pos;
735
list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
736
ether_addr_copy(addr, node->macaddress_A);
737
return node;
738
}
739
740
return NULL;
741
}
742
743
int hsr_get_node_data(struct hsr_priv *hsr,
744
const unsigned char *addr,
745
unsigned char addr_b[ETH_ALEN],
746
unsigned int *addr_b_ifindex,
747
int *if1_age,
748
u16 *if1_seq,
749
int *if2_age,
750
u16 *if2_seq)
751
{
752
struct hsr_node *node;
753
struct hsr_port *port;
754
unsigned long tdiff;
755
756
node = find_node_by_addr_A(&hsr->node_db, addr);
757
if (!node)
758
return -ENOENT;
759
760
ether_addr_copy(addr_b, node->macaddress_B);
761
762
tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
763
if (node->time_in_stale[HSR_PT_SLAVE_A])
764
*if1_age = INT_MAX;
765
#if HZ <= MSEC_PER_SEC
766
else if (tdiff > msecs_to_jiffies(INT_MAX))
767
*if1_age = INT_MAX;
768
#endif
769
else
770
*if1_age = jiffies_to_msecs(tdiff);
771
772
tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
773
if (node->time_in_stale[HSR_PT_SLAVE_B])
774
*if2_age = INT_MAX;
775
#if HZ <= MSEC_PER_SEC
776
else if (tdiff > msecs_to_jiffies(INT_MAX))
777
*if2_age = INT_MAX;
778
#endif
779
else
780
*if2_age = jiffies_to_msecs(tdiff);
781
782
/* Present sequence numbers as if they were incoming on interface */
783
*if1_seq = node->seq_out[HSR_PT_SLAVE_B];
784
*if2_seq = node->seq_out[HSR_PT_SLAVE_A];
785
786
if (node->addr_B_port != HSR_PT_NONE) {
787
port = hsr_port_get_hsr(hsr, node->addr_B_port);
788
*addr_b_ifindex = port->dev->ifindex;
789
} else {
790
*addr_b_ifindex = -1;
791
}
792
793
return 0;
794
}
795
796