Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/mac80211/mesh_pathtbl.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (c) 2008, 2009 open80211s Ltd.
4
* Copyright (C) 2023 Intel Corporation
5
* Author: Luis Carlos Cobo <[email protected]>
6
*/
7
8
#include <linux/etherdevice.h>
9
#include <linux/list.h>
10
#include <linux/random.h>
11
#include <linux/slab.h>
12
#include <linux/spinlock.h>
13
#include <linux/string.h>
14
#include <net/mac80211.h>
15
#include "wme.h"
16
#include "ieee80211_i.h"
17
#include "mesh.h"
18
#include <linux/rhashtable.h>
19
20
static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
21
22
static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
23
{
24
/* Use last four bytes of hw addr as hash index */
25
return jhash_1word(get_unaligned((u32 *)((u8 *)addr + 2)), seed);
26
}
27
28
static const struct rhashtable_params mesh_rht_params = {
29
.nelem_hint = 2,
30
.automatic_shrinking = true,
31
.key_len = ETH_ALEN,
32
.key_offset = offsetof(struct mesh_path, dst),
33
.head_offset = offsetof(struct mesh_path, rhash),
34
.hashfn = mesh_table_hash,
35
};
36
37
static const struct rhashtable_params fast_tx_rht_params = {
38
.nelem_hint = 10,
39
.automatic_shrinking = true,
40
.key_len = sizeof_field(struct ieee80211_mesh_fast_tx, key),
41
.key_offset = offsetof(struct ieee80211_mesh_fast_tx, key),
42
.head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash),
43
.hashfn = mesh_table_hash,
44
};
45
46
static void __mesh_fast_tx_entry_free(void *ptr, void *tblptr)
47
{
48
struct ieee80211_mesh_fast_tx *entry = ptr;
49
50
kfree_rcu(entry, fast_tx.rcu_head);
51
}
52
53
static void mesh_fast_tx_deinit(struct ieee80211_sub_if_data *sdata)
54
{
55
struct mesh_tx_cache *cache;
56
57
cache = &sdata->u.mesh.tx_cache;
58
rhashtable_free_and_destroy(&cache->rht,
59
__mesh_fast_tx_entry_free, NULL);
60
}
61
62
static void mesh_fast_tx_init(struct ieee80211_sub_if_data *sdata)
63
{
64
struct mesh_tx_cache *cache;
65
66
cache = &sdata->u.mesh.tx_cache;
67
rhashtable_init(&cache->rht, &fast_tx_rht_params);
68
INIT_HLIST_HEAD(&cache->walk_head);
69
spin_lock_init(&cache->walk_lock);
70
}
71
72
static inline bool mpath_expired(struct mesh_path *mpath)
73
{
74
return (mpath->flags & MESH_PATH_ACTIVE) &&
75
time_after(jiffies, mpath->exp_time) &&
76
!(mpath->flags & MESH_PATH_FIXED);
77
}
78
79
static void mesh_path_rht_free(void *ptr, void *tblptr)
80
{
81
struct mesh_path *mpath = ptr;
82
struct mesh_table *tbl = tblptr;
83
84
mesh_path_free_rcu(tbl, mpath);
85
}
86
87
static void mesh_table_init(struct mesh_table *tbl)
88
{
89
INIT_HLIST_HEAD(&tbl->known_gates);
90
INIT_HLIST_HEAD(&tbl->walk_head);
91
atomic_set(&tbl->entries, 0);
92
spin_lock_init(&tbl->gates_lock);
93
spin_lock_init(&tbl->walk_lock);
94
95
/* rhashtable_init() may fail only in case of wrong
96
* mesh_rht_params
97
*/
98
WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
99
}
100
101
static void mesh_table_free(struct mesh_table *tbl)
102
{
103
rhashtable_free_and_destroy(&tbl->rhead,
104
mesh_path_rht_free, tbl);
105
}
106
107
/**
108
* mesh_path_assign_nexthop - update mesh path next hop
109
*
110
* @mpath: mesh path to update
111
* @sta: next hop to assign
112
*
113
* Locking: mpath->state_lock must be held when calling this function
114
*/
115
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
116
{
117
struct sk_buff *skb;
118
struct ieee80211_hdr *hdr;
119
unsigned long flags;
120
121
rcu_assign_pointer(mpath->next_hop, sta);
122
123
spin_lock_irqsave(&mpath->frame_queue.lock, flags);
124
skb_queue_walk(&mpath->frame_queue, skb) {
125
hdr = (struct ieee80211_hdr *) skb->data;
126
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
127
memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
128
ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
129
}
130
131
spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
132
}
133
134
static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
135
struct mesh_path *gate_mpath)
136
{
137
struct ieee80211_hdr *hdr;
138
struct ieee80211s_hdr *mshdr;
139
int mesh_hdrlen, hdrlen;
140
char *next_hop;
141
142
hdr = (struct ieee80211_hdr *) skb->data;
143
hdrlen = ieee80211_hdrlen(hdr->frame_control);
144
mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
145
146
if (!(mshdr->flags & MESH_FLAGS_AE)) {
147
/* size of the fixed part of the mesh header */
148
mesh_hdrlen = 6;
149
150
/* make room for the two extended addresses */
151
skb_push(skb, 2 * ETH_ALEN);
152
memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
153
154
hdr = (struct ieee80211_hdr *) skb->data;
155
156
/* we preserve the previous mesh header and only add
157
* the new addresses */
158
mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
159
mshdr->flags = MESH_FLAGS_AE_A5_A6;
160
memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
161
memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
162
}
163
164
/* update next hop */
165
hdr = (struct ieee80211_hdr *) skb->data;
166
rcu_read_lock();
167
next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
168
memcpy(hdr->addr1, next_hop, ETH_ALEN);
169
rcu_read_unlock();
170
memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
171
memcpy(hdr->addr3, dst_addr, ETH_ALEN);
172
}
173
174
/**
175
* mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
176
*
177
* @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
178
* @from_mpath: The failed mpath
179
* @copy: When true, copy all the frames to the new mpath queue. When false,
180
* move them.
181
*
182
* This function is used to transfer or copy frames from an unresolved mpath to
183
* a gate mpath. The function also adds the Address Extension field and
184
* updates the next hop.
185
*
186
* If a frame already has an Address Extension field, only the next hop and
187
* destination addresses are updated.
188
*
189
* The gate mpath must be an active mpath with a valid mpath->next_hop.
190
*/
191
static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
192
struct mesh_path *from_mpath,
193
bool copy)
194
{
195
struct sk_buff *skb, *fskb, *tmp;
196
struct sk_buff_head failq;
197
unsigned long flags;
198
199
if (WARN_ON(gate_mpath == from_mpath))
200
return;
201
if (WARN_ON(!gate_mpath->next_hop))
202
return;
203
204
__skb_queue_head_init(&failq);
205
206
spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
207
skb_queue_splice_init(&from_mpath->frame_queue, &failq);
208
spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
209
210
skb_queue_walk_safe(&failq, fskb, tmp) {
211
if (skb_queue_len(&gate_mpath->frame_queue) >=
212
MESH_FRAME_QUEUE_LEN) {
213
mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
214
break;
215
}
216
217
skb = skb_copy(fskb, GFP_ATOMIC);
218
if (WARN_ON(!skb))
219
break;
220
221
prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
222
skb_queue_tail(&gate_mpath->frame_queue, skb);
223
224
if (copy)
225
continue;
226
227
__skb_unlink(fskb, &failq);
228
kfree_skb(fskb);
229
}
230
231
mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
232
gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
233
234
if (!copy)
235
return;
236
237
spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
238
skb_queue_splice(&failq, &from_mpath->frame_queue);
239
spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
240
}
241
242
243
static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
244
struct ieee80211_sub_if_data *sdata)
245
{
246
struct mesh_path *mpath;
247
248
mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params);
249
250
if (mpath && mpath_expired(mpath)) {
251
spin_lock_bh(&mpath->state_lock);
252
mpath->flags &= ~MESH_PATH_ACTIVE;
253
spin_unlock_bh(&mpath->state_lock);
254
}
255
return mpath;
256
}
257
258
/**
259
* mesh_path_lookup - look up a path in the mesh path table
260
* @sdata: local subif
261
* @dst: hardware address (ETH_ALEN length) of destination
262
*
263
* Returns: pointer to the mesh path structure, or NULL if not found
264
*
265
* Locking: must be called within a read rcu section.
266
*/
267
struct mesh_path *
268
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
269
{
270
return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
271
}
272
273
struct mesh_path *
274
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
275
{
276
return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
277
}
278
279
static struct mesh_path *
280
__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
281
{
282
int i = 0;
283
struct mesh_path *mpath;
284
285
hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
286
if (i++ == idx)
287
break;
288
}
289
290
if (!mpath)
291
return NULL;
292
293
if (mpath_expired(mpath)) {
294
spin_lock_bh(&mpath->state_lock);
295
mpath->flags &= ~MESH_PATH_ACTIVE;
296
spin_unlock_bh(&mpath->state_lock);
297
}
298
return mpath;
299
}
300
301
/**
302
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
303
* @sdata: local subif, or NULL for all entries
304
* @idx: index
305
*
306
* Returns: pointer to the mesh path structure, or NULL if not found.
307
*
308
* Locking: must be called within a read rcu section.
309
*/
310
struct mesh_path *
311
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
312
{
313
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
314
}
315
316
/**
317
* mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
318
* @sdata: local subif, or NULL for all entries
319
* @idx: index
320
*
321
* Returns: pointer to the proxy path structure, or NULL if not found.
322
*
323
* Locking: must be called within a read rcu section.
324
*/
325
struct mesh_path *
326
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
327
{
328
return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
329
}
330
331
/**
332
* mesh_path_add_gate - add the given mpath to a mesh gate to our path table
333
* @mpath: gate path to add to table
334
*
335
* Returns: 0 on success, -EEXIST
336
*/
337
int mesh_path_add_gate(struct mesh_path *mpath)
338
{
339
struct mesh_table *tbl;
340
int err;
341
342
rcu_read_lock();
343
tbl = &mpath->sdata->u.mesh.mesh_paths;
344
345
spin_lock_bh(&mpath->state_lock);
346
if (mpath->is_gate) {
347
err = -EEXIST;
348
spin_unlock_bh(&mpath->state_lock);
349
goto err_rcu;
350
}
351
mpath->is_gate = true;
352
mpath->sdata->u.mesh.num_gates++;
353
354
spin_lock(&tbl->gates_lock);
355
hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
356
spin_unlock(&tbl->gates_lock);
357
358
spin_unlock_bh(&mpath->state_lock);
359
360
mpath_dbg(mpath->sdata,
361
"Mesh path: Recorded new gate: %pM. %d known gates\n",
362
mpath->dst, mpath->sdata->u.mesh.num_gates);
363
err = 0;
364
err_rcu:
365
rcu_read_unlock();
366
return err;
367
}
368
369
/**
370
* mesh_gate_del - remove a mesh gate from the list of known gates
371
* @tbl: table which holds our list of known gates
372
* @mpath: gate mpath
373
*/
374
static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
375
{
376
lockdep_assert_held(&mpath->state_lock);
377
if (!mpath->is_gate)
378
return;
379
380
mpath->is_gate = false;
381
spin_lock_bh(&tbl->gates_lock);
382
hlist_del_rcu(&mpath->gate_list);
383
mpath->sdata->u.mesh.num_gates--;
384
spin_unlock_bh(&tbl->gates_lock);
385
386
mpath_dbg(mpath->sdata,
387
"Mesh path: Deleted gate: %pM. %d known gates\n",
388
mpath->dst, mpath->sdata->u.mesh.num_gates);
389
}
390
391
/**
392
* mesh_gate_num - number of gates known to this interface
393
* @sdata: subif data
394
*
395
* Returns: The number of gates
396
*/
397
int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
398
{
399
return sdata->u.mesh.num_gates;
400
}
401
402
static
403
struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
404
const u8 *dst, gfp_t gfp_flags)
405
{
406
struct mesh_path *new_mpath;
407
408
new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
409
if (!new_mpath)
410
return NULL;
411
412
memcpy(new_mpath->dst, dst, ETH_ALEN);
413
eth_broadcast_addr(new_mpath->rann_snd_addr);
414
new_mpath->is_root = false;
415
new_mpath->sdata = sdata;
416
new_mpath->flags = 0;
417
skb_queue_head_init(&new_mpath->frame_queue);
418
new_mpath->exp_time = jiffies;
419
spin_lock_init(&new_mpath->state_lock);
420
timer_setup(&new_mpath->timer, mesh_path_timer, 0);
421
422
return new_mpath;
423
}
424
425
static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache,
426
struct ieee80211_mesh_fast_tx *entry)
427
{
428
hlist_del_rcu(&entry->walk_list);
429
rhashtable_remove_fast(&cache->rht, &entry->rhash, fast_tx_rht_params);
430
kfree_rcu(entry, fast_tx.rcu_head);
431
}
432
433
struct ieee80211_mesh_fast_tx *
434
mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
435
struct ieee80211_mesh_fast_tx_key *key)
436
{
437
struct ieee80211_mesh_fast_tx *entry;
438
struct mesh_tx_cache *cache;
439
440
cache = &sdata->u.mesh.tx_cache;
441
entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
442
if (!entry)
443
return NULL;
444
445
if (!(entry->mpath->flags & MESH_PATH_ACTIVE) ||
446
mpath_expired(entry->mpath)) {
447
spin_lock_bh(&cache->walk_lock);
448
entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
449
if (entry)
450
mesh_fast_tx_entry_free(cache, entry);
451
spin_unlock_bh(&cache->walk_lock);
452
return NULL;
453
}
454
455
mesh_path_refresh(sdata, entry->mpath, NULL);
456
if (entry->mppath)
457
entry->mppath->exp_time = jiffies;
458
entry->timestamp = jiffies;
459
460
return entry;
461
}
462
463
void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
464
struct sk_buff *skb, struct mesh_path *mpath)
465
{
466
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
467
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
468
struct ieee80211_mesh_fast_tx *entry, *prev;
469
struct ieee80211_mesh_fast_tx build = {};
470
struct ieee80211s_hdr *meshhdr;
471
struct mesh_tx_cache *cache;
472
struct ieee80211_key *key;
473
struct mesh_path *mppath;
474
struct sta_info *sta;
475
u8 *qc;
476
477
if (sdata->noack_map ||
478
!ieee80211_is_data_qos(hdr->frame_control))
479
return;
480
481
build.fast_tx.hdr_len = ieee80211_hdrlen(hdr->frame_control);
482
meshhdr = (struct ieee80211s_hdr *)(skb->data + build.fast_tx.hdr_len);
483
build.hdrlen = ieee80211_get_mesh_hdrlen(meshhdr);
484
485
cache = &sdata->u.mesh.tx_cache;
486
if (atomic_read(&cache->rht.nelems) >= MESH_FAST_TX_CACHE_MAX_SIZE)
487
return;
488
489
sta = rcu_dereference(mpath->next_hop);
490
if (!sta)
491
return;
492
493
build.key.type = MESH_FAST_TX_TYPE_LOCAL;
494
if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) {
495
/* This is required to keep the mppath alive */
496
mppath = mpp_path_lookup(sdata, meshhdr->eaddr1);
497
if (!mppath)
498
return;
499
build.mppath = mppath;
500
if (!ether_addr_equal(meshhdr->eaddr2, sdata->vif.addr))
501
build.key.type = MESH_FAST_TX_TYPE_PROXIED;
502
} else if (ieee80211_has_a4(hdr->frame_control)) {
503
mppath = mpath;
504
} else {
505
return;
506
}
507
508
if (!ether_addr_equal(hdr->addr4, sdata->vif.addr))
509
build.key.type = MESH_FAST_TX_TYPE_FORWARDED;
510
511
/* rate limit, in case fast xmit can't be enabled */
512
if (mppath->fast_tx_check == jiffies)
513
return;
514
515
mppath->fast_tx_check = jiffies;
516
517
/*
518
* Same use of the sta lock as in ieee80211_check_fast_xmit, in order
519
* to protect against concurrent sta key updates.
520
*/
521
spin_lock_bh(&sta->lock);
522
key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
523
if (!key)
524
key = rcu_access_pointer(sdata->default_unicast_key);
525
build.fast_tx.key = key;
526
527
if (key) {
528
bool gen_iv, iv_spc;
529
530
gen_iv = key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
531
iv_spc = key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
532
533
if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
534
(key->flags & KEY_FLAG_TAINTED))
535
goto unlock_sta;
536
537
switch (key->conf.cipher) {
538
case WLAN_CIPHER_SUITE_CCMP:
539
case WLAN_CIPHER_SUITE_CCMP_256:
540
if (gen_iv)
541
build.fast_tx.pn_offs = build.fast_tx.hdr_len;
542
if (gen_iv || iv_spc)
543
build.fast_tx.hdr_len += IEEE80211_CCMP_HDR_LEN;
544
break;
545
case WLAN_CIPHER_SUITE_GCMP:
546
case WLAN_CIPHER_SUITE_GCMP_256:
547
if (gen_iv)
548
build.fast_tx.pn_offs = build.fast_tx.hdr_len;
549
if (gen_iv || iv_spc)
550
build.fast_tx.hdr_len += IEEE80211_GCMP_HDR_LEN;
551
break;
552
default:
553
goto unlock_sta;
554
}
555
}
556
557
memcpy(build.key.addr, mppath->dst, ETH_ALEN);
558
build.timestamp = jiffies;
559
build.fast_tx.band = info->band;
560
build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3);
561
build.fast_tx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
562
build.mpath = mpath;
563
memcpy(build.hdr, meshhdr, build.hdrlen);
564
memcpy(build.hdr + build.hdrlen, rfc1042_header, sizeof(rfc1042_header));
565
build.hdrlen += sizeof(rfc1042_header);
566
memcpy(build.fast_tx.hdr, hdr, build.fast_tx.hdr_len);
567
568
hdr = (struct ieee80211_hdr *)build.fast_tx.hdr;
569
if (build.fast_tx.key)
570
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
571
572
qc = ieee80211_get_qos_ctl(hdr);
573
qc[1] |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8;
574
575
entry = kmemdup(&build, sizeof(build), GFP_ATOMIC);
576
if (!entry)
577
goto unlock_sta;
578
579
spin_lock(&cache->walk_lock);
580
prev = rhashtable_lookup_get_insert_fast(&cache->rht,
581
&entry->rhash,
582
fast_tx_rht_params);
583
if (IS_ERR(prev)) {
584
kfree(entry);
585
goto unlock_cache;
586
}
587
588
/*
589
* replace any previous entry in the hash table, in case we're
590
* replacing it with a different type (e.g. mpath -> mpp)
591
*/
592
if (unlikely(prev)) {
593
rhashtable_replace_fast(&cache->rht, &prev->rhash,
594
&entry->rhash, fast_tx_rht_params);
595
hlist_del_rcu(&prev->walk_list);
596
kfree_rcu(prev, fast_tx.rcu_head);
597
}
598
599
hlist_add_head(&entry->walk_list, &cache->walk_head);
600
601
unlock_cache:
602
spin_unlock(&cache->walk_lock);
603
unlock_sta:
604
spin_unlock_bh(&sta->lock);
605
}
606
607
void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata)
608
{
609
unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT);
610
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
611
struct ieee80211_mesh_fast_tx *entry;
612
struct hlist_node *n;
613
614
if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE)
615
return;
616
617
spin_lock_bh(&cache->walk_lock);
618
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
619
if (!time_is_after_jiffies(entry->timestamp + timeout))
620
mesh_fast_tx_entry_free(cache, entry);
621
spin_unlock_bh(&cache->walk_lock);
622
}
623
624
void mesh_fast_tx_flush_mpath(struct mesh_path *mpath)
625
{
626
struct ieee80211_sub_if_data *sdata = mpath->sdata;
627
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
628
struct ieee80211_mesh_fast_tx *entry;
629
struct hlist_node *n;
630
631
spin_lock_bh(&cache->walk_lock);
632
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
633
if (entry->mpath == mpath)
634
mesh_fast_tx_entry_free(cache, entry);
635
spin_unlock_bh(&cache->walk_lock);
636
}
637
638
void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata,
639
struct sta_info *sta)
640
{
641
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
642
struct ieee80211_mesh_fast_tx *entry;
643
struct hlist_node *n;
644
645
spin_lock_bh(&cache->walk_lock);
646
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
647
if (rcu_access_pointer(entry->mpath->next_hop) == sta)
648
mesh_fast_tx_entry_free(cache, entry);
649
spin_unlock_bh(&cache->walk_lock);
650
}
651
652
void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
653
const u8 *addr)
654
{
655
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
656
struct ieee80211_mesh_fast_tx_key key = {};
657
struct ieee80211_mesh_fast_tx *entry;
658
int i;
659
660
ether_addr_copy(key.addr, addr);
661
spin_lock_bh(&cache->walk_lock);
662
for (i = 0; i < NUM_MESH_FAST_TX_TYPE; i++) {
663
key.type = i;
664
entry = rhashtable_lookup_fast(&cache->rht, &key, fast_tx_rht_params);
665
if (entry)
666
mesh_fast_tx_entry_free(cache, entry);
667
}
668
spin_unlock_bh(&cache->walk_lock);
669
}
670
671
/**
672
* mesh_path_add - allocate and add a new path to the mesh path table
673
* @sdata: local subif
674
* @dst: destination address of the path (ETH_ALEN length)
675
*
676
* Returns: 0 on success
677
*
678
* State: the initial state of the new path is set to 0
679
*/
680
struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
681
const u8 *dst)
682
{
683
struct mesh_table *tbl;
684
struct mesh_path *mpath, *new_mpath;
685
686
if (ether_addr_equal(dst, sdata->vif.addr))
687
/* never add ourselves as neighbours */
688
return ERR_PTR(-EOPNOTSUPP);
689
690
if (is_multicast_ether_addr(dst))
691
return ERR_PTR(-EOPNOTSUPP);
692
693
if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
694
return ERR_PTR(-ENOSPC);
695
696
new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
697
if (!new_mpath)
698
return ERR_PTR(-ENOMEM);
699
700
tbl = &sdata->u.mesh.mesh_paths;
701
spin_lock_bh(&tbl->walk_lock);
702
mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
703
&new_mpath->rhash,
704
mesh_rht_params);
705
if (!mpath)
706
hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
707
spin_unlock_bh(&tbl->walk_lock);
708
709
if (mpath) {
710
kfree(new_mpath);
711
712
if (IS_ERR(mpath))
713
return mpath;
714
715
new_mpath = mpath;
716
}
717
718
sdata->u.mesh.mesh_paths_generation++;
719
return new_mpath;
720
}
721
722
int mpp_path_add(struct ieee80211_sub_if_data *sdata,
723
const u8 *dst, const u8 *mpp)
724
{
725
struct mesh_table *tbl;
726
struct mesh_path *new_mpath;
727
int ret;
728
729
if (ether_addr_equal(dst, sdata->vif.addr))
730
/* never add ourselves as neighbours */
731
return -EOPNOTSUPP;
732
733
if (is_multicast_ether_addr(dst))
734
return -EOPNOTSUPP;
735
736
new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
737
738
if (!new_mpath)
739
return -ENOMEM;
740
741
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
742
tbl = &sdata->u.mesh.mpp_paths;
743
744
spin_lock_bh(&tbl->walk_lock);
745
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
746
&new_mpath->rhash,
747
mesh_rht_params);
748
if (!ret)
749
hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
750
spin_unlock_bh(&tbl->walk_lock);
751
752
if (ret)
753
kfree(new_mpath);
754
else
755
mesh_fast_tx_flush_addr(sdata, dst);
756
757
sdata->u.mesh.mpp_paths_generation++;
758
return ret;
759
}
760
761
762
/**
763
* mesh_plink_broken - deactivates paths and sends perr when a link breaks
764
*
765
* @sta: broken peer link
766
*
767
* This function must be called from the rate control algorithm if enough
768
* delivery errors suggest that a peer link is no longer usable.
769
*/
770
void mesh_plink_broken(struct sta_info *sta)
771
{
772
struct ieee80211_sub_if_data *sdata = sta->sdata;
773
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
774
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
775
struct mesh_path *mpath;
776
777
rcu_read_lock();
778
hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
779
if (rcu_access_pointer(mpath->next_hop) == sta &&
780
mpath->flags & MESH_PATH_ACTIVE &&
781
!(mpath->flags & MESH_PATH_FIXED)) {
782
spin_lock_bh(&mpath->state_lock);
783
mpath->flags &= ~MESH_PATH_ACTIVE;
784
++mpath->sn;
785
spin_unlock_bh(&mpath->state_lock);
786
mesh_path_error_tx(sdata,
787
sdata->u.mesh.mshcfg.element_ttl,
788
mpath->dst, mpath->sn,
789
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
790
}
791
}
792
rcu_read_unlock();
793
}
794
795
static void mesh_path_free_rcu(struct mesh_table *tbl,
796
struct mesh_path *mpath)
797
{
798
struct ieee80211_sub_if_data *sdata = mpath->sdata;
799
800
spin_lock_bh(&mpath->state_lock);
801
mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
802
mesh_gate_del(tbl, mpath);
803
spin_unlock_bh(&mpath->state_lock);
804
timer_shutdown_sync(&mpath->timer);
805
atomic_dec(&sdata->u.mesh.mpaths);
806
atomic_dec(&tbl->entries);
807
mesh_path_flush_pending(mpath);
808
kfree_rcu(mpath, rcu);
809
}
810
811
static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
812
{
813
hlist_del_rcu(&mpath->walk_list);
814
rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
815
if (tbl == &mpath->sdata->u.mesh.mpp_paths)
816
mesh_fast_tx_flush_addr(mpath->sdata, mpath->dst);
817
else
818
mesh_fast_tx_flush_mpath(mpath);
819
mesh_path_free_rcu(tbl, mpath);
820
}
821
822
/**
823
* mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
824
*
825
* @sta: mesh peer to match
826
*
827
* RCU notes: this function is called when a mesh plink transitions from
828
* PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
829
* allows path creation. This will happen before the sta can be freed (because
830
* sta_info_destroy() calls this) so any reader in a rcu read block will be
831
* protected against the plink disappearing.
832
*/
833
void mesh_path_flush_by_nexthop(struct sta_info *sta)
834
{
835
struct ieee80211_sub_if_data *sdata = sta->sdata;
836
struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
837
struct mesh_path *mpath;
838
struct hlist_node *n;
839
840
spin_lock_bh(&tbl->walk_lock);
841
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
842
if (rcu_access_pointer(mpath->next_hop) == sta)
843
__mesh_path_del(tbl, mpath);
844
}
845
spin_unlock_bh(&tbl->walk_lock);
846
}
847
848
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
849
const u8 *proxy)
850
{
851
struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
852
struct mesh_path *mpath;
853
struct hlist_node *n;
854
855
spin_lock_bh(&tbl->walk_lock);
856
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
857
if (ether_addr_equal(mpath->mpp, proxy))
858
__mesh_path_del(tbl, mpath);
859
}
860
spin_unlock_bh(&tbl->walk_lock);
861
}
862
863
static void table_flush_by_iface(struct mesh_table *tbl)
864
{
865
struct mesh_path *mpath;
866
struct hlist_node *n;
867
868
spin_lock_bh(&tbl->walk_lock);
869
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
870
__mesh_path_del(tbl, mpath);
871
}
872
spin_unlock_bh(&tbl->walk_lock);
873
}
874
875
/**
876
* mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
877
*
878
* @sdata: interface data to match
879
*
880
* This function deletes both mesh paths as well as mesh portal paths.
881
*/
882
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
883
{
884
table_flush_by_iface(&sdata->u.mesh.mesh_paths);
885
table_flush_by_iface(&sdata->u.mesh.mpp_paths);
886
}
887
888
/**
889
* table_path_del - delete a path from the mesh or mpp table
890
*
891
* @tbl: mesh or mpp path table
892
* @sdata: local subif
893
* @addr: dst address (ETH_ALEN length)
894
*
895
* Returns: 0 if successful
896
*/
897
static int table_path_del(struct mesh_table *tbl,
898
struct ieee80211_sub_if_data *sdata,
899
const u8 *addr)
900
{
901
struct mesh_path *mpath;
902
903
spin_lock_bh(&tbl->walk_lock);
904
mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
905
if (!mpath) {
906
spin_unlock_bh(&tbl->walk_lock);
907
return -ENXIO;
908
}
909
910
__mesh_path_del(tbl, mpath);
911
spin_unlock_bh(&tbl->walk_lock);
912
return 0;
913
}
914
915
916
/**
917
* mesh_path_del - delete a mesh path from the table
918
*
919
* @sdata: local subif
920
* @addr: dst address (ETH_ALEN length)
921
*
922
* Returns: 0 if successful
923
*/
924
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
925
{
926
int err;
927
928
/* flush relevant mpp entries first */
929
mpp_flush_by_proxy(sdata, addr);
930
931
err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
932
sdata->u.mesh.mesh_paths_generation++;
933
return err;
934
}
935
936
/**
937
* mesh_path_tx_pending - sends pending frames in a mesh path queue
938
*
939
* @mpath: mesh path to activate
940
*
941
* Locking: the state_lock of the mpath structure must NOT be held when calling
942
* this function.
943
*/
944
void mesh_path_tx_pending(struct mesh_path *mpath)
945
{
946
if (mpath->flags & MESH_PATH_ACTIVE)
947
ieee80211_add_pending_skbs(mpath->sdata->local,
948
&mpath->frame_queue);
949
}
950
951
/**
952
* mesh_path_send_to_gates - sends pending frames to all known mesh gates
953
*
954
* @mpath: mesh path whose queue will be emptied
955
*
956
* If there is only one gate, the frames are transferred from the failed mpath
957
* queue to that gate's queue. If there are more than one gates, the frames
958
* are copied from each gate to the next. After frames are copied, the
959
* mpath queues are emptied onto the transmission queue.
960
*
961
* Returns: 0 on success, -EHOSTUNREACH
962
*/
963
int mesh_path_send_to_gates(struct mesh_path *mpath)
964
{
965
struct ieee80211_sub_if_data *sdata = mpath->sdata;
966
struct mesh_table *tbl;
967
struct mesh_path *from_mpath = mpath;
968
struct mesh_path *gate;
969
bool copy = false;
970
971
tbl = &sdata->u.mesh.mesh_paths;
972
973
rcu_read_lock();
974
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
975
if (gate->flags & MESH_PATH_ACTIVE) {
976
mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
977
mesh_path_move_to_queue(gate, from_mpath, copy);
978
from_mpath = gate;
979
copy = true;
980
} else {
981
mpath_dbg(sdata,
982
"Not forwarding to %pM (flags %#x)\n",
983
gate->dst, gate->flags);
984
}
985
}
986
987
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
988
mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
989
mesh_path_tx_pending(gate);
990
}
991
rcu_read_unlock();
992
993
return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
994
}
995
996
/**
997
* mesh_path_discard_frame - discard a frame whose path could not be resolved
998
*
999
* @sdata: network subif the frame was to be sent through
1000
* @skb: frame to discard
1001
*
1002
* Locking: the function must me called within a rcu_read_lock region
1003
*/
1004
void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
1005
struct sk_buff *skb)
1006
{
1007
ieee80211_free_txskb(&sdata->local->hw, skb);
1008
sdata->u.mesh.mshstats.dropped_frames_no_route++;
1009
}
1010
1011
/**
1012
* mesh_path_flush_pending - free the pending queue of a mesh path
1013
*
1014
* @mpath: mesh path whose queue has to be freed
1015
*
1016
* Locking: the function must me called within a rcu_read_lock region
1017
*/
1018
void mesh_path_flush_pending(struct mesh_path *mpath)
1019
{
1020
struct ieee80211_sub_if_data *sdata = mpath->sdata;
1021
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1022
struct mesh_preq_queue *preq, *tmp;
1023
struct sk_buff *skb;
1024
1025
while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
1026
mesh_path_discard_frame(mpath->sdata, skb);
1027
1028
spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1029
list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
1030
if (ether_addr_equal(mpath->dst, preq->dst)) {
1031
list_del(&preq->list);
1032
kfree(preq);
1033
--ifmsh->preq_queue_len;
1034
}
1035
}
1036
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1037
}
1038
1039
/**
1040
* mesh_path_fix_nexthop - force a specific next hop for a mesh path
1041
*
1042
* @mpath: the mesh path to modify
1043
* @next_hop: the next hop to force
1044
*
1045
* Locking: this function must be called holding mpath->state_lock
1046
*/
1047
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1048
{
1049
spin_lock_bh(&mpath->state_lock);
1050
mesh_path_assign_nexthop(mpath, next_hop);
1051
mpath->sn = 0xffff;
1052
mpath->metric = 0;
1053
mpath->hop_count = 0;
1054
mpath->exp_time = 0;
1055
mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
1056
mesh_path_activate(mpath);
1057
mesh_fast_tx_flush_mpath(mpath);
1058
spin_unlock_bh(&mpath->state_lock);
1059
ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg);
1060
/* init it at a low value - 0 start is tricky */
1061
ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1);
1062
mesh_path_tx_pending(mpath);
1063
}
1064
1065
void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
1066
{
1067
mesh_table_init(&sdata->u.mesh.mesh_paths);
1068
mesh_table_init(&sdata->u.mesh.mpp_paths);
1069
mesh_fast_tx_init(sdata);
1070
}
1071
1072
static
1073
void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
1074
struct mesh_table *tbl)
1075
{
1076
struct mesh_path *mpath;
1077
struct hlist_node *n;
1078
1079
spin_lock_bh(&tbl->walk_lock);
1080
hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
1081
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1082
(!(mpath->flags & MESH_PATH_FIXED)) &&
1083
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1084
__mesh_path_del(tbl, mpath);
1085
}
1086
spin_unlock_bh(&tbl->walk_lock);
1087
}
1088
1089
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1090
{
1091
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
1092
mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
1093
}
1094
1095
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
1096
{
1097
mesh_fast_tx_deinit(sdata);
1098
mesh_table_free(&sdata->u.mesh.mesh_paths);
1099
mesh_table_free(&sdata->u.mesh.mpp_paths);
1100
}
1101
1102