Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/batman-adv/translation-table.c
15109 views
1
/*
2
* Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
3
*
4
* Marek Lindner, Simon Wunderlich
5
*
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of version 2 of the GNU General Public
8
* License as published by the Free Software Foundation.
9
*
10
* This program is distributed in the hope that it will be useful, but
11
* WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
* General Public License for more details.
14
*
15
* You should have received a copy of the GNU General Public License
16
* along with this program; if not, write to the Free Software
17
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18
* 02110-1301, USA
19
*
20
*/
21
22
#include "main.h"
23
#include "translation-table.h"
24
#include "soft-interface.h"
25
#include "hard-interface.h"
26
#include "hash.h"
27
#include "originator.h"
28
29
static void tt_local_purge(struct work_struct *work);
30
static void _tt_global_del_orig(struct bat_priv *bat_priv,
31
struct tt_global_entry *tt_global_entry,
32
char *message);
33
34
/* returns 1 if they are the same mac addr */
35
static int compare_ltt(struct hlist_node *node, void *data2)
36
{
37
void *data1 = container_of(node, struct tt_local_entry, hash_entry);
38
39
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
40
}
41
42
/* returns 1 if they are the same mac addr */
43
static int compare_gtt(struct hlist_node *node, void *data2)
44
{
45
void *data1 = container_of(node, struct tt_global_entry, hash_entry);
46
47
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
48
}
49
50
static void tt_local_start_timer(struct bat_priv *bat_priv)
51
{
52
INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge);
53
queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ);
54
}
55
56
static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
57
void *data)
58
{
59
struct hashtable_t *hash = bat_priv->tt_local_hash;
60
struct hlist_head *head;
61
struct hlist_node *node;
62
struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
63
int index;
64
65
if (!hash)
66
return NULL;
67
68
index = choose_orig(data, hash->size);
69
head = &hash->table[index];
70
71
rcu_read_lock();
72
hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
73
if (!compare_eth(tt_local_entry, data))
74
continue;
75
76
tt_local_entry_tmp = tt_local_entry;
77
break;
78
}
79
rcu_read_unlock();
80
81
return tt_local_entry_tmp;
82
}
83
84
static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
85
void *data)
86
{
87
struct hashtable_t *hash = bat_priv->tt_global_hash;
88
struct hlist_head *head;
89
struct hlist_node *node;
90
struct tt_global_entry *tt_global_entry;
91
struct tt_global_entry *tt_global_entry_tmp = NULL;
92
int index;
93
94
if (!hash)
95
return NULL;
96
97
index = choose_orig(data, hash->size);
98
head = &hash->table[index];
99
100
rcu_read_lock();
101
hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
102
if (!compare_eth(tt_global_entry, data))
103
continue;
104
105
tt_global_entry_tmp = tt_global_entry;
106
break;
107
}
108
rcu_read_unlock();
109
110
return tt_global_entry_tmp;
111
}
112
113
int tt_local_init(struct bat_priv *bat_priv)
114
{
115
if (bat_priv->tt_local_hash)
116
return 1;
117
118
bat_priv->tt_local_hash = hash_new(1024);
119
120
if (!bat_priv->tt_local_hash)
121
return 0;
122
123
atomic_set(&bat_priv->tt_local_changed, 0);
124
tt_local_start_timer(bat_priv);
125
126
return 1;
127
}
128
129
void tt_local_add(struct net_device *soft_iface, uint8_t *addr)
130
{
131
struct bat_priv *bat_priv = netdev_priv(soft_iface);
132
struct tt_local_entry *tt_local_entry;
133
struct tt_global_entry *tt_global_entry;
134
int required_bytes;
135
136
spin_lock_bh(&bat_priv->tt_lhash_lock);
137
tt_local_entry = tt_local_hash_find(bat_priv, addr);
138
spin_unlock_bh(&bat_priv->tt_lhash_lock);
139
140
if (tt_local_entry) {
141
tt_local_entry->last_seen = jiffies;
142
return;
143
}
144
145
/* only announce as many hosts as possible in the batman-packet and
146
space in batman_packet->num_tt That also should give a limit to
147
MAC-flooding. */
148
required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
149
required_bytes += BAT_PACKET_LEN;
150
151
if ((required_bytes > ETH_DATA_LEN) ||
152
(atomic_read(&bat_priv->aggregated_ogms) &&
153
required_bytes > MAX_AGGREGATION_BYTES) ||
154
(bat_priv->num_local_tt + 1 > 255)) {
155
bat_dbg(DBG_ROUTES, bat_priv,
156
"Can't add new local tt entry (%pM): "
157
"number of local tt entries exceeds packet size\n",
158
addr);
159
return;
160
}
161
162
bat_dbg(DBG_ROUTES, bat_priv,
163
"Creating new local tt entry: %pM\n", addr);
164
165
tt_local_entry = kmalloc(sizeof(struct tt_local_entry), GFP_ATOMIC);
166
if (!tt_local_entry)
167
return;
168
169
memcpy(tt_local_entry->addr, addr, ETH_ALEN);
170
tt_local_entry->last_seen = jiffies;
171
172
/* the batman interface mac address should never be purged */
173
if (compare_eth(addr, soft_iface->dev_addr))
174
tt_local_entry->never_purge = 1;
175
else
176
tt_local_entry->never_purge = 0;
177
178
spin_lock_bh(&bat_priv->tt_lhash_lock);
179
180
hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
181
tt_local_entry, &tt_local_entry->hash_entry);
182
bat_priv->num_local_tt++;
183
atomic_set(&bat_priv->tt_local_changed, 1);
184
185
spin_unlock_bh(&bat_priv->tt_lhash_lock);
186
187
/* remove address from global hash if present */
188
spin_lock_bh(&bat_priv->tt_ghash_lock);
189
190
tt_global_entry = tt_global_hash_find(bat_priv, addr);
191
192
if (tt_global_entry)
193
_tt_global_del_orig(bat_priv, tt_global_entry,
194
"local tt received");
195
196
spin_unlock_bh(&bat_priv->tt_ghash_lock);
197
}
198
199
int tt_local_fill_buffer(struct bat_priv *bat_priv,
200
unsigned char *buff, int buff_len)
201
{
202
struct hashtable_t *hash = bat_priv->tt_local_hash;
203
struct tt_local_entry *tt_local_entry;
204
struct hlist_node *node;
205
struct hlist_head *head;
206
int i, count = 0;
207
208
spin_lock_bh(&bat_priv->tt_lhash_lock);
209
210
for (i = 0; i < hash->size; i++) {
211
head = &hash->table[i];
212
213
rcu_read_lock();
214
hlist_for_each_entry_rcu(tt_local_entry, node,
215
head, hash_entry) {
216
if (buff_len < (count + 1) * ETH_ALEN)
217
break;
218
219
memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
220
ETH_ALEN);
221
222
count++;
223
}
224
rcu_read_unlock();
225
}
226
227
/* if we did not get all new local tts see you next time ;-) */
228
if (count == bat_priv->num_local_tt)
229
atomic_set(&bat_priv->tt_local_changed, 0);
230
231
spin_unlock_bh(&bat_priv->tt_lhash_lock);
232
return count;
233
}
234
235
int tt_local_seq_print_text(struct seq_file *seq, void *offset)
236
{
237
struct net_device *net_dev = (struct net_device *)seq->private;
238
struct bat_priv *bat_priv = netdev_priv(net_dev);
239
struct hashtable_t *hash = bat_priv->tt_local_hash;
240
struct tt_local_entry *tt_local_entry;
241
struct hard_iface *primary_if;
242
struct hlist_node *node;
243
struct hlist_head *head;
244
size_t buf_size, pos;
245
char *buff;
246
int i, ret = 0;
247
248
primary_if = primary_if_get_selected(bat_priv);
249
if (!primary_if) {
250
ret = seq_printf(seq, "BATMAN mesh %s disabled - "
251
"please specify interfaces to enable it\n",
252
net_dev->name);
253
goto out;
254
}
255
256
if (primary_if->if_status != IF_ACTIVE) {
257
ret = seq_printf(seq, "BATMAN mesh %s disabled - "
258
"primary interface not active\n",
259
net_dev->name);
260
goto out;
261
}
262
263
seq_printf(seq, "Locally retrieved addresses (from %s) "
264
"announced via TT:\n",
265
net_dev->name);
266
267
spin_lock_bh(&bat_priv->tt_lhash_lock);
268
269
buf_size = 1;
270
/* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
271
for (i = 0; i < hash->size; i++) {
272
head = &hash->table[i];
273
274
rcu_read_lock();
275
__hlist_for_each_rcu(node, head)
276
buf_size += 21;
277
rcu_read_unlock();
278
}
279
280
buff = kmalloc(buf_size, GFP_ATOMIC);
281
if (!buff) {
282
spin_unlock_bh(&bat_priv->tt_lhash_lock);
283
ret = -ENOMEM;
284
goto out;
285
}
286
287
buff[0] = '\0';
288
pos = 0;
289
290
for (i = 0; i < hash->size; i++) {
291
head = &hash->table[i];
292
293
rcu_read_lock();
294
hlist_for_each_entry_rcu(tt_local_entry, node,
295
head, hash_entry) {
296
pos += snprintf(buff + pos, 22, " * %pM\n",
297
tt_local_entry->addr);
298
}
299
rcu_read_unlock();
300
}
301
302
spin_unlock_bh(&bat_priv->tt_lhash_lock);
303
304
seq_printf(seq, "%s", buff);
305
kfree(buff);
306
out:
307
if (primary_if)
308
hardif_free_ref(primary_if);
309
return ret;
310
}
311
312
static void _tt_local_del(struct hlist_node *node, void *arg)
313
{
314
struct bat_priv *bat_priv = (struct bat_priv *)arg;
315
void *data = container_of(node, struct tt_local_entry, hash_entry);
316
317
kfree(data);
318
bat_priv->num_local_tt--;
319
atomic_set(&bat_priv->tt_local_changed, 1);
320
}
321
322
static void tt_local_del(struct bat_priv *bat_priv,
323
struct tt_local_entry *tt_local_entry,
324
char *message)
325
{
326
bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n",
327
tt_local_entry->addr, message);
328
329
hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
330
tt_local_entry->addr);
331
_tt_local_del(&tt_local_entry->hash_entry, bat_priv);
332
}
333
334
void tt_local_remove(struct bat_priv *bat_priv,
335
uint8_t *addr, char *message)
336
{
337
struct tt_local_entry *tt_local_entry;
338
339
spin_lock_bh(&bat_priv->tt_lhash_lock);
340
341
tt_local_entry = tt_local_hash_find(bat_priv, addr);
342
343
if (tt_local_entry)
344
tt_local_del(bat_priv, tt_local_entry, message);
345
346
spin_unlock_bh(&bat_priv->tt_lhash_lock);
347
}
348
349
static void tt_local_purge(struct work_struct *work)
350
{
351
struct delayed_work *delayed_work =
352
container_of(work, struct delayed_work, work);
353
struct bat_priv *bat_priv =
354
container_of(delayed_work, struct bat_priv, tt_work);
355
struct hashtable_t *hash = bat_priv->tt_local_hash;
356
struct tt_local_entry *tt_local_entry;
357
struct hlist_node *node, *node_tmp;
358
struct hlist_head *head;
359
unsigned long timeout;
360
int i;
361
362
spin_lock_bh(&bat_priv->tt_lhash_lock);
363
364
for (i = 0; i < hash->size; i++) {
365
head = &hash->table[i];
366
367
hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
368
head, hash_entry) {
369
if (tt_local_entry->never_purge)
370
continue;
371
372
timeout = tt_local_entry->last_seen;
373
timeout += TT_LOCAL_TIMEOUT * HZ;
374
375
if (time_before(jiffies, timeout))
376
continue;
377
378
tt_local_del(bat_priv, tt_local_entry,
379
"address timed out");
380
}
381
}
382
383
spin_unlock_bh(&bat_priv->tt_lhash_lock);
384
tt_local_start_timer(bat_priv);
385
}
386
387
void tt_local_free(struct bat_priv *bat_priv)
388
{
389
if (!bat_priv->tt_local_hash)
390
return;
391
392
cancel_delayed_work_sync(&bat_priv->tt_work);
393
hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv);
394
bat_priv->tt_local_hash = NULL;
395
}
396
397
int tt_global_init(struct bat_priv *bat_priv)
398
{
399
if (bat_priv->tt_global_hash)
400
return 1;
401
402
bat_priv->tt_global_hash = hash_new(1024);
403
404
if (!bat_priv->tt_global_hash)
405
return 0;
406
407
return 1;
408
}
409
410
void tt_global_add_orig(struct bat_priv *bat_priv,
411
struct orig_node *orig_node,
412
unsigned char *tt_buff, int tt_buff_len)
413
{
414
struct tt_global_entry *tt_global_entry;
415
struct tt_local_entry *tt_local_entry;
416
int tt_buff_count = 0;
417
unsigned char *tt_ptr;
418
419
while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) {
420
spin_lock_bh(&bat_priv->tt_ghash_lock);
421
422
tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
423
tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
424
425
if (!tt_global_entry) {
426
spin_unlock_bh(&bat_priv->tt_ghash_lock);
427
428
tt_global_entry =
429
kmalloc(sizeof(struct tt_global_entry),
430
GFP_ATOMIC);
431
432
if (!tt_global_entry)
433
break;
434
435
memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN);
436
437
bat_dbg(DBG_ROUTES, bat_priv,
438
"Creating new global tt entry: "
439
"%pM (via %pM)\n",
440
tt_global_entry->addr, orig_node->orig);
441
442
spin_lock_bh(&bat_priv->tt_ghash_lock);
443
hash_add(bat_priv->tt_global_hash, compare_gtt,
444
choose_orig, tt_global_entry,
445
&tt_global_entry->hash_entry);
446
447
}
448
449
tt_global_entry->orig_node = orig_node;
450
spin_unlock_bh(&bat_priv->tt_ghash_lock);
451
452
/* remove address from local hash if present */
453
spin_lock_bh(&bat_priv->tt_lhash_lock);
454
455
tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
456
tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr);
457
458
if (tt_local_entry)
459
tt_local_del(bat_priv, tt_local_entry,
460
"global tt received");
461
462
spin_unlock_bh(&bat_priv->tt_lhash_lock);
463
464
tt_buff_count++;
465
}
466
467
/* initialize, and overwrite if malloc succeeds */
468
orig_node->tt_buff = NULL;
469
orig_node->tt_buff_len = 0;
470
471
if (tt_buff_len > 0) {
472
orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
473
if (orig_node->tt_buff) {
474
memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
475
orig_node->tt_buff_len = tt_buff_len;
476
}
477
}
478
}
479
480
int tt_global_seq_print_text(struct seq_file *seq, void *offset)
481
{
482
struct net_device *net_dev = (struct net_device *)seq->private;
483
struct bat_priv *bat_priv = netdev_priv(net_dev);
484
struct hashtable_t *hash = bat_priv->tt_global_hash;
485
struct tt_global_entry *tt_global_entry;
486
struct hard_iface *primary_if;
487
struct hlist_node *node;
488
struct hlist_head *head;
489
size_t buf_size, pos;
490
char *buff;
491
int i, ret = 0;
492
493
primary_if = primary_if_get_selected(bat_priv);
494
if (!primary_if) {
495
ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
496
"specify interfaces to enable it\n",
497
net_dev->name);
498
goto out;
499
}
500
501
if (primary_if->if_status != IF_ACTIVE) {
502
ret = seq_printf(seq, "BATMAN mesh %s disabled - "
503
"primary interface not active\n",
504
net_dev->name);
505
goto out;
506
}
507
508
seq_printf(seq,
509
"Globally announced TT entries received via the mesh %s\n",
510
net_dev->name);
511
512
spin_lock_bh(&bat_priv->tt_ghash_lock);
513
514
buf_size = 1;
515
/* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
516
for (i = 0; i < hash->size; i++) {
517
head = &hash->table[i];
518
519
rcu_read_lock();
520
__hlist_for_each_rcu(node, head)
521
buf_size += 43;
522
rcu_read_unlock();
523
}
524
525
buff = kmalloc(buf_size, GFP_ATOMIC);
526
if (!buff) {
527
spin_unlock_bh(&bat_priv->tt_ghash_lock);
528
ret = -ENOMEM;
529
goto out;
530
}
531
buff[0] = '\0';
532
pos = 0;
533
534
for (i = 0; i < hash->size; i++) {
535
head = &hash->table[i];
536
537
rcu_read_lock();
538
hlist_for_each_entry_rcu(tt_global_entry, node,
539
head, hash_entry) {
540
pos += snprintf(buff + pos, 44,
541
" * %pM via %pM\n",
542
tt_global_entry->addr,
543
tt_global_entry->orig_node->orig);
544
}
545
rcu_read_unlock();
546
}
547
548
spin_unlock_bh(&bat_priv->tt_ghash_lock);
549
550
seq_printf(seq, "%s", buff);
551
kfree(buff);
552
out:
553
if (primary_if)
554
hardif_free_ref(primary_if);
555
return ret;
556
}
557
558
static void _tt_global_del_orig(struct bat_priv *bat_priv,
559
struct tt_global_entry *tt_global_entry,
560
char *message)
561
{
562
bat_dbg(DBG_ROUTES, bat_priv,
563
"Deleting global tt entry %pM (via %pM): %s\n",
564
tt_global_entry->addr, tt_global_entry->orig_node->orig,
565
message);
566
567
hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
568
tt_global_entry->addr);
569
kfree(tt_global_entry);
570
}
571
572
void tt_global_del_orig(struct bat_priv *bat_priv,
573
struct orig_node *orig_node, char *message)
574
{
575
struct tt_global_entry *tt_global_entry;
576
int tt_buff_count = 0;
577
unsigned char *tt_ptr;
578
579
if (orig_node->tt_buff_len == 0)
580
return;
581
582
spin_lock_bh(&bat_priv->tt_ghash_lock);
583
584
while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) {
585
tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN);
586
tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
587
588
if ((tt_global_entry) &&
589
(tt_global_entry->orig_node == orig_node))
590
_tt_global_del_orig(bat_priv, tt_global_entry,
591
message);
592
593
tt_buff_count++;
594
}
595
596
spin_unlock_bh(&bat_priv->tt_ghash_lock);
597
598
orig_node->tt_buff_len = 0;
599
kfree(orig_node->tt_buff);
600
orig_node->tt_buff = NULL;
601
}
602
603
static void tt_global_del(struct hlist_node *node, void *arg)
604
{
605
void *data = container_of(node, struct tt_global_entry, hash_entry);
606
607
kfree(data);
608
}
609
610
void tt_global_free(struct bat_priv *bat_priv)
611
{
612
if (!bat_priv->tt_global_hash)
613
return;
614
615
hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL);
616
bat_priv->tt_global_hash = NULL;
617
}
618
619
struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
620
{
621
struct tt_global_entry *tt_global_entry;
622
struct orig_node *orig_node = NULL;
623
624
spin_lock_bh(&bat_priv->tt_ghash_lock);
625
tt_global_entry = tt_global_hash_find(bat_priv, addr);
626
627
if (!tt_global_entry)
628
goto out;
629
630
if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
631
goto out;
632
633
orig_node = tt_global_entry->orig_node;
634
635
out:
636
spin_unlock_bh(&bat_priv->tt_ghash_lock);
637
return orig_node;
638
}
639
640