Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/mac80211/mesh_hwmp.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (c) 2008, 2009 open80211s Ltd.
4
* Copyright (C) 2019, 2021-2023 Intel Corporation
5
* Author: Luis Carlos Cobo <[email protected]>
6
*/
7
8
#include <linux/slab.h>
9
#include <linux/etherdevice.h>
10
#include <linux/unaligned.h>
11
#include "wme.h"
12
#include "mesh.h"
13
14
#define TEST_FRAME_LEN 8192
15
#define MAX_METRIC 0xffffffff
16
#define ARITH_SHIFT 8
17
#define LINK_FAIL_THRESH 95
18
19
#define MAX_PREQ_QUEUE_LEN 64
20
21
static void mesh_queue_preq(struct mesh_path *, u8);
22
23
static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
24
{
25
if (ae)
26
offset += 6;
27
return get_unaligned_le32(preq_elem + offset);
28
}
29
30
static inline u16 u16_field_get(const u8 *preq_elem, int offset, bool ae)
31
{
32
if (ae)
33
offset += 6;
34
return get_unaligned_le16(preq_elem + offset);
35
}
36
37
/* HWMP IE processing macros */
38
#define AE_F (1<<6)
39
#define AE_F_SET(x) (*x & AE_F)
40
#define PREQ_IE_FLAGS(x) (*(x))
41
#define PREQ_IE_HOPCOUNT(x) (*(x + 1))
42
#define PREQ_IE_TTL(x) (*(x + 2))
43
#define PREQ_IE_PREQ_ID(x) u32_field_get(x, 3, 0)
44
#define PREQ_IE_ORIG_ADDR(x) (x + 7)
45
#define PREQ_IE_ORIG_SN(x) u32_field_get(x, 13, 0)
46
#define PREQ_IE_LIFETIME(x) u32_field_get(x, 17, AE_F_SET(x))
47
#define PREQ_IE_METRIC(x) u32_field_get(x, 21, AE_F_SET(x))
48
#define PREQ_IE_TARGET_F(x) (*(AE_F_SET(x) ? x + 32 : x + 26))
49
#define PREQ_IE_TARGET_ADDR(x) (AE_F_SET(x) ? x + 33 : x + 27)
50
#define PREQ_IE_TARGET_SN(x) u32_field_get(x, 33, AE_F_SET(x))
51
52
53
#define PREP_IE_FLAGS(x) PREQ_IE_FLAGS(x)
54
#define PREP_IE_HOPCOUNT(x) PREQ_IE_HOPCOUNT(x)
55
#define PREP_IE_TTL(x) PREQ_IE_TTL(x)
56
#define PREP_IE_ORIG_ADDR(x) (AE_F_SET(x) ? x + 27 : x + 21)
57
#define PREP_IE_ORIG_SN(x) u32_field_get(x, 27, AE_F_SET(x))
58
#define PREP_IE_LIFETIME(x) u32_field_get(x, 13, AE_F_SET(x))
59
#define PREP_IE_METRIC(x) u32_field_get(x, 17, AE_F_SET(x))
60
#define PREP_IE_TARGET_ADDR(x) (x + 3)
61
#define PREP_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
62
63
#define PERR_IE_TTL(x) (*(x))
64
#define PERR_IE_TARGET_FLAGS(x) (*(x + 2))
65
#define PERR_IE_TARGET_ADDR(x) (x + 3)
66
#define PERR_IE_TARGET_SN(x) u32_field_get(x, 9, 0)
67
#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0)
68
69
#define MSEC_TO_TU(x) (x*1000/1024)
70
#define SN_GT(x, y) ((s32)(y - x) < 0)
71
#define SN_LT(x, y) ((s32)(x - y) < 0)
72
#define MAX_SANE_SN_DELTA 32
73
74
static inline u32 SN_DELTA(u32 x, u32 y)
75
{
76
return x >= y ? x - y : y - x;
77
}
78
79
#define net_traversal_jiffies(s) \
80
msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
81
#define default_lifetime(s) \
82
MSEC_TO_TU(s->u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout)
83
#define min_preq_int_jiff(s) \
84
(msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval))
85
#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
86
#define disc_timeout_jiff(s) \
87
msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
88
#define root_path_confirmation_jiffies(s) \
89
msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
90
91
enum mpath_frame_type {
92
MPATH_PREQ = 0,
93
MPATH_PREP,
94
MPATH_PERR,
95
MPATH_RANN
96
};
97
98
static const u8 broadcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
99
100
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
101
const u8 *orig_addr, u32 orig_sn,
102
u8 target_flags, const u8 *target,
103
u32 target_sn, const u8 *da,
104
u8 hop_count, u8 ttl,
105
u32 lifetime, u32 metric, u32 preq_id,
106
struct ieee80211_sub_if_data *sdata)
107
{
108
struct ieee80211_local *local = sdata->local;
109
struct sk_buff *skb;
110
struct ieee80211_mgmt *mgmt;
111
u8 *pos, ie_len;
112
int hdr_len = offsetofend(struct ieee80211_mgmt,
113
u.action.u.mesh_action);
114
115
skb = dev_alloc_skb(local->tx_headroom +
116
hdr_len +
117
2 + 37); /* max HWMP IE */
118
if (!skb)
119
return -1;
120
skb_reserve(skb, local->tx_headroom);
121
mgmt = skb_put_zero(skb, hdr_len);
122
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
123
IEEE80211_STYPE_ACTION);
124
125
memcpy(mgmt->da, da, ETH_ALEN);
126
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
127
/* BSSID == SA */
128
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
129
mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
130
mgmt->u.action.u.mesh_action.action_code =
131
WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
132
133
switch (action) {
134
case MPATH_PREQ:
135
mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
136
ie_len = 37;
137
pos = skb_put(skb, 2 + ie_len);
138
*pos++ = WLAN_EID_PREQ;
139
break;
140
case MPATH_PREP:
141
mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
142
ie_len = 31;
143
pos = skb_put(skb, 2 + ie_len);
144
*pos++ = WLAN_EID_PREP;
145
break;
146
case MPATH_RANN:
147
mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
148
ie_len = sizeof(struct ieee80211_rann_ie);
149
pos = skb_put(skb, 2 + ie_len);
150
*pos++ = WLAN_EID_RANN;
151
break;
152
default:
153
kfree_skb(skb);
154
return -EOPNOTSUPP;
155
}
156
*pos++ = ie_len;
157
*pos++ = flags;
158
*pos++ = hop_count;
159
*pos++ = ttl;
160
if (action == MPATH_PREP) {
161
memcpy(pos, target, ETH_ALEN);
162
pos += ETH_ALEN;
163
put_unaligned_le32(target_sn, pos);
164
pos += 4;
165
} else {
166
if (action == MPATH_PREQ) {
167
put_unaligned_le32(preq_id, pos);
168
pos += 4;
169
}
170
memcpy(pos, orig_addr, ETH_ALEN);
171
pos += ETH_ALEN;
172
put_unaligned_le32(orig_sn, pos);
173
pos += 4;
174
}
175
put_unaligned_le32(lifetime, pos); /* interval for RANN */
176
pos += 4;
177
put_unaligned_le32(metric, pos);
178
pos += 4;
179
if (action == MPATH_PREQ) {
180
*pos++ = 1; /* destination count */
181
*pos++ = target_flags;
182
memcpy(pos, target, ETH_ALEN);
183
pos += ETH_ALEN;
184
put_unaligned_le32(target_sn, pos);
185
pos += 4;
186
} else if (action == MPATH_PREP) {
187
memcpy(pos, orig_addr, ETH_ALEN);
188
pos += ETH_ALEN;
189
put_unaligned_le32(orig_sn, pos);
190
pos += 4;
191
}
192
193
ieee80211_tx_skb(sdata, skb);
194
return 0;
195
}
196
197
198
/* Headroom is not adjusted. Caller should ensure that skb has sufficient
199
* headroom in case the frame is encrypted. */
200
static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
201
struct sk_buff *skb)
202
{
203
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
204
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
205
206
skb_reset_mac_header(skb);
207
skb_reset_network_header(skb);
208
skb_reset_transport_header(skb);
209
210
/* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
211
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
212
skb->priority = 7;
213
214
info->control.vif = &sdata->vif;
215
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
216
ieee80211_set_qos_hdr(sdata, skb);
217
ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
218
}
219
220
/**
221
* mesh_path_error_tx - Sends a PERR mesh management frame
222
*
223
* @sdata: local mesh subif
224
* @ttl: allowed remaining hops
225
* @target: broken destination
226
* @target_sn: SN of the broken destination
227
* @target_rcode: reason code for this PERR
228
* @ra: node this frame is addressed to
229
*
230
* Note: This function may be called with driver locks taken that the driver
231
* also acquires in the TX path. To avoid a deadlock we don't transmit the
232
* frame directly but add it to the pending queue instead.
233
*
234
* Returns: 0 on success
235
*/
236
int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
237
u8 ttl, const u8 *target, u32 target_sn,
238
u16 target_rcode, const u8 *ra)
239
{
240
struct ieee80211_local *local = sdata->local;
241
struct sk_buff *skb;
242
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
243
struct ieee80211_mgmt *mgmt;
244
u8 *pos, ie_len;
245
int hdr_len = offsetofend(struct ieee80211_mgmt,
246
u.action.u.mesh_action);
247
248
if (time_before(jiffies, ifmsh->next_perr))
249
return -EAGAIN;
250
251
skb = dev_alloc_skb(local->tx_headroom +
252
IEEE80211_ENCRYPT_HEADROOM +
253
IEEE80211_ENCRYPT_TAILROOM +
254
hdr_len +
255
2 + 15 /* PERR IE */);
256
if (!skb)
257
return -1;
258
skb_reserve(skb, local->tx_headroom + IEEE80211_ENCRYPT_HEADROOM);
259
mgmt = skb_put_zero(skb, hdr_len);
260
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
261
IEEE80211_STYPE_ACTION);
262
263
memcpy(mgmt->da, ra, ETH_ALEN);
264
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
265
/* BSSID == SA */
266
memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
267
mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
268
mgmt->u.action.u.mesh_action.action_code =
269
WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
270
ie_len = 15;
271
pos = skb_put(skb, 2 + ie_len);
272
*pos++ = WLAN_EID_PERR;
273
*pos++ = ie_len;
274
/* ttl */
275
*pos++ = ttl;
276
/* number of destinations */
277
*pos++ = 1;
278
/* Flags field has AE bit only as defined in
279
* sec 8.4.2.117 IEEE802.11-2012
280
*/
281
*pos = 0;
282
pos++;
283
memcpy(pos, target, ETH_ALEN);
284
pos += ETH_ALEN;
285
put_unaligned_le32(target_sn, pos);
286
pos += 4;
287
put_unaligned_le16(target_rcode, pos);
288
289
/* see note in function header */
290
prepare_frame_for_deferred_tx(sdata, skb);
291
ifmsh->next_perr = TU_TO_EXP_TIME(
292
ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
293
ieee80211_add_pending_skb(local, skb);
294
return 0;
295
}
296
297
void ieee80211s_update_metric(struct ieee80211_local *local,
298
struct sta_info *sta,
299
struct ieee80211_tx_status *st)
300
{
301
struct ieee80211_tx_info *txinfo = st->info;
302
int failed;
303
struct rate_info rinfo;
304
305
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
306
307
/* moving average, scaled to 100.
308
* feed failure as 100 and success as 0
309
*/
310
ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, failed * 100);
311
if (ewma_mesh_fail_avg_read(&sta->mesh->fail_avg) >
312
LINK_FAIL_THRESH)
313
mesh_plink_broken(sta);
314
315
/* use rate info set by the driver directly if present */
316
if (st->n_rates)
317
rinfo = sta->deflink.tx_stats.last_rate_info;
318
else
319
sta_set_rate_info_tx(sta, &sta->deflink.tx_stats.last_rate, &rinfo);
320
321
ewma_mesh_tx_rate_avg_add(&sta->mesh->tx_rate_avg,
322
cfg80211_calculate_bitrate(&rinfo));
323
}
324
325
u32 airtime_link_metric_get(struct ieee80211_local *local,
326
struct sta_info *sta)
327
{
328
/* This should be adjusted for each device */
329
int device_constant = 1 << ARITH_SHIFT;
330
int test_frame_len = TEST_FRAME_LEN << ARITH_SHIFT;
331
int s_unit = 1 << ARITH_SHIFT;
332
int rate, err;
333
u32 tx_time, estimated_retx;
334
u64 result;
335
unsigned long fail_avg =
336
ewma_mesh_fail_avg_read(&sta->mesh->fail_avg);
337
338
if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
339
return MAX_METRIC;
340
341
/* Try to get rate based on HW/SW RC algorithm.
342
* Rate is returned in units of Kbps, correct this
343
* to comply with airtime calculation units
344
* Round up in case we get rate < 100Kbps
345
*/
346
rate = DIV_ROUND_UP(sta_get_expected_throughput(sta), 100);
347
348
if (rate) {
349
err = 0;
350
} else {
351
if (fail_avg > LINK_FAIL_THRESH)
352
return MAX_METRIC;
353
354
rate = ewma_mesh_tx_rate_avg_read(&sta->mesh->tx_rate_avg);
355
if (WARN_ON(!rate))
356
return MAX_METRIC;
357
358
err = (fail_avg << ARITH_SHIFT) / 100;
359
}
360
361
/* bitrate is in units of 100 Kbps, while we need rate in units of
362
* 1Mbps. This will be corrected on tx_time computation.
363
*/
364
tx_time = (device_constant + 10 * test_frame_len / rate);
365
estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
366
result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
367
return (u32)result;
368
}
369
370
/* Check that the first metric is at least 10% better than the second one */
371
static bool is_metric_better(u32 x, u32 y)
372
{
373
return (x < y) && (x < (y - x / 10));
374
}
375
376
/**
377
* hwmp_route_info_get - Update routing info to originator and transmitter
378
*
379
* @sdata: local mesh subif
380
* @mgmt: mesh management frame
381
* @hwmp_ie: hwmp information element (PREP or PREQ)
382
* @action: type of hwmp ie
383
*
384
* This function updates the path routing information to the originator and the
385
* transmitter of a HWMP PREQ or PREP frame.
386
*
387
* Returns: metric to frame originator or 0 if the frame should not be further
388
* processed
389
*
390
* Notes: this function is the only place (besides user-provided info) where
391
* path routing information is updated.
392
*/
393
static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
394
struct ieee80211_mgmt *mgmt,
395
const u8 *hwmp_ie, enum mpath_frame_type action)
396
{
397
struct ieee80211_local *local = sdata->local;
398
struct mesh_path *mpath;
399
struct sta_info *sta;
400
bool fresh_info;
401
const u8 *orig_addr, *ta;
402
u32 orig_sn, orig_metric;
403
unsigned long orig_lifetime, exp_time;
404
u32 last_hop_metric, new_metric;
405
bool flush_mpath = false;
406
bool process = true;
407
u8 hopcount;
408
409
rcu_read_lock();
410
sta = sta_info_get(sdata, mgmt->sa);
411
if (!sta) {
412
rcu_read_unlock();
413
return 0;
414
}
415
416
last_hop_metric = airtime_link_metric_get(local, sta);
417
/* Update and check originator routing info */
418
fresh_info = true;
419
420
switch (action) {
421
case MPATH_PREQ:
422
orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
423
orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
424
orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
425
orig_metric = PREQ_IE_METRIC(hwmp_ie);
426
hopcount = PREQ_IE_HOPCOUNT(hwmp_ie) + 1;
427
break;
428
case MPATH_PREP:
429
/* Originator here refers to the MP that was the target in the
430
* Path Request. We divert from the nomenclature in the draft
431
* so that we can easily use a single function to gather path
432
* information from both PREQ and PREP frames.
433
*/
434
orig_addr = PREP_IE_TARGET_ADDR(hwmp_ie);
435
orig_sn = PREP_IE_TARGET_SN(hwmp_ie);
436
orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
437
orig_metric = PREP_IE_METRIC(hwmp_ie);
438
hopcount = PREP_IE_HOPCOUNT(hwmp_ie) + 1;
439
break;
440
default:
441
rcu_read_unlock();
442
return 0;
443
}
444
new_metric = orig_metric + last_hop_metric;
445
if (new_metric < orig_metric)
446
new_metric = MAX_METRIC;
447
exp_time = TU_TO_EXP_TIME(orig_lifetime);
448
449
if (ether_addr_equal(orig_addr, sdata->vif.addr)) {
450
/* This MP is the originator, we are not interested in this
451
* frame, except for updating transmitter's path info.
452
*/
453
process = false;
454
fresh_info = false;
455
} else {
456
mpath = mesh_path_lookup(sdata, orig_addr);
457
if (mpath) {
458
spin_lock_bh(&mpath->state_lock);
459
if (mpath->flags & MESH_PATH_FIXED)
460
fresh_info = false;
461
else if ((mpath->flags & MESH_PATH_ACTIVE) &&
462
(mpath->flags & MESH_PATH_SN_VALID)) {
463
if (SN_GT(mpath->sn, orig_sn) ||
464
(mpath->sn == orig_sn &&
465
(rcu_access_pointer(mpath->next_hop) !=
466
sta ?
467
!is_metric_better(new_metric, mpath->metric) :
468
new_metric >= mpath->metric))) {
469
process = false;
470
fresh_info = false;
471
}
472
} else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
473
bool have_sn, newer_sn, bounced;
474
475
have_sn = mpath->flags & MESH_PATH_SN_VALID;
476
newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
477
bounced = have_sn &&
478
(SN_DELTA(orig_sn, mpath->sn) >
479
MAX_SANE_SN_DELTA);
480
481
if (!have_sn || newer_sn) {
482
/* if SN is newer than what we had
483
* then we can take it */;
484
} else if (bounced) {
485
/* if SN is way different than what
486
* we had then assume the other side
487
* rebooted or restarted */;
488
} else {
489
process = false;
490
fresh_info = false;
491
}
492
}
493
} else {
494
mpath = mesh_path_add(sdata, orig_addr);
495
if (IS_ERR(mpath)) {
496
rcu_read_unlock();
497
return 0;
498
}
499
spin_lock_bh(&mpath->state_lock);
500
}
501
502
if (fresh_info) {
503
if (rcu_access_pointer(mpath->next_hop) != sta) {
504
mpath->path_change_count++;
505
flush_mpath = true;
506
}
507
mesh_path_assign_nexthop(mpath, sta);
508
mpath->flags |= MESH_PATH_SN_VALID;
509
mpath->metric = new_metric;
510
mpath->sn = orig_sn;
511
mpath->exp_time = time_after(mpath->exp_time, exp_time)
512
? mpath->exp_time : exp_time;
513
mpath->hop_count = hopcount;
514
mesh_path_activate(mpath);
515
spin_unlock_bh(&mpath->state_lock);
516
if (flush_mpath)
517
mesh_fast_tx_flush_mpath(mpath);
518
ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
519
/* init it at a low value - 0 start is tricky */
520
ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
521
mesh_path_tx_pending(mpath);
522
/* draft says preq_id should be saved to, but there does
523
* not seem to be any use for it, skipping by now
524
*/
525
} else
526
spin_unlock_bh(&mpath->state_lock);
527
}
528
529
/* Update and check transmitter routing info */
530
ta = mgmt->sa;
531
if (ether_addr_equal(orig_addr, ta))
532
fresh_info = false;
533
else {
534
fresh_info = true;
535
536
mpath = mesh_path_lookup(sdata, ta);
537
if (mpath) {
538
spin_lock_bh(&mpath->state_lock);
539
if ((mpath->flags & MESH_PATH_FIXED) ||
540
((mpath->flags & MESH_PATH_ACTIVE) &&
541
((rcu_access_pointer(mpath->next_hop) != sta ?
542
!is_metric_better(last_hop_metric, mpath->metric) :
543
last_hop_metric > mpath->metric))))
544
fresh_info = false;
545
} else {
546
mpath = mesh_path_add(sdata, ta);
547
if (IS_ERR(mpath)) {
548
rcu_read_unlock();
549
return 0;
550
}
551
spin_lock_bh(&mpath->state_lock);
552
}
553
554
if (fresh_info) {
555
if (rcu_access_pointer(mpath->next_hop) != sta) {
556
mpath->path_change_count++;
557
flush_mpath = true;
558
}
559
mesh_path_assign_nexthop(mpath, sta);
560
mpath->metric = last_hop_metric;
561
mpath->exp_time = time_after(mpath->exp_time, exp_time)
562
? mpath->exp_time : exp_time;
563
mpath->hop_count = 1;
564
mesh_path_activate(mpath);
565
spin_unlock_bh(&mpath->state_lock);
566
if (flush_mpath)
567
mesh_fast_tx_flush_mpath(mpath);
568
ewma_mesh_fail_avg_init(&sta->mesh->fail_avg);
569
/* init it at a low value - 0 start is tricky */
570
ewma_mesh_fail_avg_add(&sta->mesh->fail_avg, 1);
571
mesh_path_tx_pending(mpath);
572
} else
573
spin_unlock_bh(&mpath->state_lock);
574
}
575
576
rcu_read_unlock();
577
578
return process ? new_metric : 0;
579
}
580
581
static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
582
struct ieee80211_mgmt *mgmt,
583
const u8 *preq_elem, u32 orig_metric)
584
{
585
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
586
struct mesh_path *mpath = NULL;
587
const u8 *target_addr, *orig_addr;
588
const u8 *da;
589
u8 target_flags, ttl, flags;
590
u32 orig_sn, target_sn, lifetime, target_metric = 0;
591
bool reply = false;
592
bool forward = true;
593
bool root_is_gate;
594
595
/* Update target SN, if present */
596
target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
597
orig_addr = PREQ_IE_ORIG_ADDR(preq_elem);
598
target_sn = PREQ_IE_TARGET_SN(preq_elem);
599
orig_sn = PREQ_IE_ORIG_SN(preq_elem);
600
target_flags = PREQ_IE_TARGET_F(preq_elem);
601
/* Proactive PREQ gate announcements */
602
flags = PREQ_IE_FLAGS(preq_elem);
603
root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
604
605
mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
606
607
if (ether_addr_equal(target_addr, sdata->vif.addr)) {
608
mhwmp_dbg(sdata, "PREQ is for us\n");
609
forward = false;
610
reply = true;
611
target_metric = 0;
612
613
if (SN_GT(target_sn, ifmsh->sn))
614
ifmsh->sn = target_sn;
615
616
if (time_after(jiffies, ifmsh->last_sn_update +
617
net_traversal_jiffies(sdata)) ||
618
time_before(jiffies, ifmsh->last_sn_update)) {
619
++ifmsh->sn;
620
ifmsh->last_sn_update = jiffies;
621
}
622
target_sn = ifmsh->sn;
623
} else if (is_broadcast_ether_addr(target_addr) &&
624
(target_flags & IEEE80211_PREQ_TO_FLAG)) {
625
rcu_read_lock();
626
mpath = mesh_path_lookup(sdata, orig_addr);
627
if (mpath) {
628
if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
629
reply = true;
630
target_addr = sdata->vif.addr;
631
target_sn = ++ifmsh->sn;
632
target_metric = 0;
633
ifmsh->last_sn_update = jiffies;
634
}
635
if (root_is_gate)
636
mesh_path_add_gate(mpath);
637
}
638
rcu_read_unlock();
639
} else if (ifmsh->mshcfg.dot11MeshForwarding) {
640
rcu_read_lock();
641
mpath = mesh_path_lookup(sdata, target_addr);
642
if (mpath) {
643
if ((!(mpath->flags & MESH_PATH_SN_VALID)) ||
644
SN_LT(mpath->sn, target_sn)) {
645
mpath->sn = target_sn;
646
mpath->flags |= MESH_PATH_SN_VALID;
647
} else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
648
(mpath->flags & MESH_PATH_ACTIVE)) {
649
reply = true;
650
target_metric = mpath->metric;
651
target_sn = mpath->sn;
652
/* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
653
target_flags |= IEEE80211_PREQ_TO_FLAG;
654
}
655
}
656
rcu_read_unlock();
657
} else {
658
forward = false;
659
}
660
661
if (reply) {
662
lifetime = PREQ_IE_LIFETIME(preq_elem);
663
ttl = ifmsh->mshcfg.element_ttl;
664
if (ttl != 0) {
665
mhwmp_dbg(sdata, "replying to the PREQ\n");
666
mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
667
orig_sn, 0, target_addr,
668
target_sn, mgmt->sa, 0, ttl,
669
lifetime, target_metric, 0,
670
sdata);
671
} else {
672
ifmsh->mshstats.dropped_frames_ttl++;
673
}
674
}
675
676
if (forward) {
677
u32 preq_id;
678
u8 hopcount;
679
680
ttl = PREQ_IE_TTL(preq_elem);
681
lifetime = PREQ_IE_LIFETIME(preq_elem);
682
if (ttl <= 1) {
683
ifmsh->mshstats.dropped_frames_ttl++;
684
return;
685
}
686
mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
687
--ttl;
688
preq_id = PREQ_IE_PREQ_ID(preq_elem);
689
hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
690
da = (mpath && mpath->is_root) ?
691
mpath->rann_snd_addr : broadcast_addr;
692
693
if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
694
target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
695
target_sn = PREQ_IE_TARGET_SN(preq_elem);
696
}
697
698
mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
699
orig_sn, target_flags, target_addr,
700
target_sn, da, hopcount, ttl, lifetime,
701
orig_metric, preq_id, sdata);
702
if (!is_multicast_ether_addr(da))
703
ifmsh->mshstats.fwded_unicast++;
704
else
705
ifmsh->mshstats.fwded_mcast++;
706
ifmsh->mshstats.fwded_frames++;
707
}
708
}
709
710
711
static inline struct sta_info *
712
next_hop_deref_protected(struct mesh_path *mpath)
713
{
714
return rcu_dereference_protected(mpath->next_hop,
715
lockdep_is_held(&mpath->state_lock));
716
}
717
718
719
static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
720
struct ieee80211_mgmt *mgmt,
721
const u8 *prep_elem, u32 metric)
722
{
723
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
724
struct mesh_path *mpath;
725
const u8 *target_addr, *orig_addr;
726
u8 ttl, hopcount, flags;
727
u8 next_hop[ETH_ALEN];
728
u32 target_sn, orig_sn, lifetime;
729
730
mhwmp_dbg(sdata, "received PREP from %pM\n",
731
PREP_IE_TARGET_ADDR(prep_elem));
732
733
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
734
if (ether_addr_equal(orig_addr, sdata->vif.addr))
735
/* destination, no forwarding required */
736
return;
737
738
if (!ifmsh->mshcfg.dot11MeshForwarding)
739
return;
740
741
ttl = PREP_IE_TTL(prep_elem);
742
if (ttl <= 1) {
743
sdata->u.mesh.mshstats.dropped_frames_ttl++;
744
return;
745
}
746
747
rcu_read_lock();
748
mpath = mesh_path_lookup(sdata, orig_addr);
749
if (mpath)
750
spin_lock_bh(&mpath->state_lock);
751
else
752
goto fail;
753
if (!(mpath->flags & MESH_PATH_ACTIVE)) {
754
spin_unlock_bh(&mpath->state_lock);
755
goto fail;
756
}
757
memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
758
spin_unlock_bh(&mpath->state_lock);
759
--ttl;
760
flags = PREP_IE_FLAGS(prep_elem);
761
lifetime = PREP_IE_LIFETIME(prep_elem);
762
hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
763
target_addr = PREP_IE_TARGET_ADDR(prep_elem);
764
target_sn = PREP_IE_TARGET_SN(prep_elem);
765
orig_sn = PREP_IE_ORIG_SN(prep_elem);
766
767
mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0,
768
target_addr, target_sn, next_hop, hopcount,
769
ttl, lifetime, metric, 0, sdata);
770
rcu_read_unlock();
771
772
sdata->u.mesh.mshstats.fwded_unicast++;
773
sdata->u.mesh.mshstats.fwded_frames++;
774
return;
775
776
fail:
777
rcu_read_unlock();
778
sdata->u.mesh.mshstats.dropped_frames_no_route++;
779
}
780
781
static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
782
struct ieee80211_mgmt *mgmt,
783
const u8 *perr_elem)
784
{
785
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
786
struct mesh_path *mpath;
787
u8 ttl;
788
const u8 *ta, *target_addr;
789
u32 target_sn;
790
u16 target_rcode;
791
792
ta = mgmt->sa;
793
ttl = PERR_IE_TTL(perr_elem);
794
if (ttl <= 1) {
795
ifmsh->mshstats.dropped_frames_ttl++;
796
return;
797
}
798
ttl--;
799
target_addr = PERR_IE_TARGET_ADDR(perr_elem);
800
target_sn = PERR_IE_TARGET_SN(perr_elem);
801
target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
802
803
rcu_read_lock();
804
mpath = mesh_path_lookup(sdata, target_addr);
805
if (mpath) {
806
struct sta_info *sta;
807
808
spin_lock_bh(&mpath->state_lock);
809
sta = next_hop_deref_protected(mpath);
810
if (mpath->flags & MESH_PATH_ACTIVE &&
811
ether_addr_equal(ta, sta->sta.addr) &&
812
!(mpath->flags & MESH_PATH_FIXED) &&
813
(!(mpath->flags & MESH_PATH_SN_VALID) ||
814
SN_GT(target_sn, mpath->sn) || target_sn == 0)) {
815
mpath->flags &= ~MESH_PATH_ACTIVE;
816
if (target_sn != 0)
817
mpath->sn = target_sn;
818
else
819
mpath->sn += 1;
820
spin_unlock_bh(&mpath->state_lock);
821
if (!ifmsh->mshcfg.dot11MeshForwarding)
822
goto endperr;
823
mesh_path_error_tx(sdata, ttl, target_addr,
824
target_sn, target_rcode,
825
broadcast_addr);
826
} else
827
spin_unlock_bh(&mpath->state_lock);
828
}
829
endperr:
830
rcu_read_unlock();
831
}
832
833
static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
834
struct ieee80211_mgmt *mgmt,
835
const struct ieee80211_rann_ie *rann)
836
{
837
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
838
struct ieee80211_local *local = sdata->local;
839
struct sta_info *sta;
840
struct mesh_path *mpath;
841
u8 ttl, flags, hopcount;
842
const u8 *orig_addr;
843
u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
844
bool root_is_gate;
845
846
ttl = rann->rann_ttl;
847
flags = rann->rann_flags;
848
root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
849
orig_addr = rann->rann_addr;
850
orig_sn = le32_to_cpu(rann->rann_seq);
851
interval = le32_to_cpu(rann->rann_interval);
852
hopcount = rann->rann_hopcount;
853
hopcount++;
854
orig_metric = le32_to_cpu(rann->rann_metric);
855
856
/* Ignore our own RANNs */
857
if (ether_addr_equal(orig_addr, sdata->vif.addr))
858
return;
859
860
mhwmp_dbg(sdata,
861
"received RANN from %pM via neighbour %pM (is_gate=%d)\n",
862
orig_addr, mgmt->sa, root_is_gate);
863
864
rcu_read_lock();
865
sta = sta_info_get(sdata, mgmt->sa);
866
if (!sta) {
867
rcu_read_unlock();
868
return;
869
}
870
871
last_hop_metric = airtime_link_metric_get(local, sta);
872
new_metric = orig_metric + last_hop_metric;
873
if (new_metric < orig_metric)
874
new_metric = MAX_METRIC;
875
876
mpath = mesh_path_lookup(sdata, orig_addr);
877
if (!mpath) {
878
mpath = mesh_path_add(sdata, orig_addr);
879
if (IS_ERR(mpath)) {
880
rcu_read_unlock();
881
sdata->u.mesh.mshstats.dropped_frames_no_route++;
882
return;
883
}
884
}
885
886
if (!(SN_LT(mpath->sn, orig_sn)) &&
887
!(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
888
rcu_read_unlock();
889
return;
890
}
891
892
if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
893
(time_after(jiffies, mpath->last_preq_to_root +
894
root_path_confirmation_jiffies(sdata)) ||
895
time_before(jiffies, mpath->last_preq_to_root))) &&
896
!(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
897
mhwmp_dbg(sdata,
898
"time to refresh root mpath %pM\n",
899
orig_addr);
900
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
901
mpath->last_preq_to_root = jiffies;
902
}
903
904
mpath->sn = orig_sn;
905
mpath->rann_metric = new_metric;
906
mpath->is_root = true;
907
/* Recording RANNs sender address to send individually
908
* addressed PREQs destined for root mesh STA */
909
memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
910
911
if (root_is_gate)
912
mesh_path_add_gate(mpath);
913
914
if (ttl <= 1) {
915
ifmsh->mshstats.dropped_frames_ttl++;
916
rcu_read_unlock();
917
return;
918
}
919
ttl--;
920
921
if (ifmsh->mshcfg.dot11MeshForwarding) {
922
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
923
orig_sn, 0, NULL, 0, broadcast_addr,
924
hopcount, ttl, interval,
925
new_metric, 0, sdata);
926
}
927
928
rcu_read_unlock();
929
}
930
931
932
void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
933
struct ieee80211_mgmt *mgmt, size_t len)
934
{
935
struct ieee802_11_elems *elems;
936
size_t baselen;
937
u32 path_metric;
938
struct sta_info *sta;
939
940
/* need action_code */
941
if (len < IEEE80211_MIN_ACTION_SIZE + 1)
942
return;
943
944
rcu_read_lock();
945
sta = sta_info_get(sdata, mgmt->sa);
946
if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
947
rcu_read_unlock();
948
return;
949
}
950
rcu_read_unlock();
951
952
baselen = (u8 *) mgmt->u.action.u.mesh_action.variable - (u8 *) mgmt;
953
elems = ieee802_11_parse_elems(mgmt->u.action.u.mesh_action.variable,
954
len - baselen, false, NULL);
955
if (!elems)
956
return;
957
958
if (elems->preq) {
959
if (elems->preq_len != 37)
960
/* Right now we support just 1 destination and no AE */
961
goto free;
962
path_metric = hwmp_route_info_get(sdata, mgmt, elems->preq,
963
MPATH_PREQ);
964
if (path_metric)
965
hwmp_preq_frame_process(sdata, mgmt, elems->preq,
966
path_metric);
967
}
968
if (elems->prep) {
969
if (elems->prep_len != 31)
970
/* Right now we support no AE */
971
goto free;
972
path_metric = hwmp_route_info_get(sdata, mgmt, elems->prep,
973
MPATH_PREP);
974
if (path_metric)
975
hwmp_prep_frame_process(sdata, mgmt, elems->prep,
976
path_metric);
977
}
978
if (elems->perr) {
979
if (elems->perr_len != 15)
980
/* Right now we support only one destination per PERR */
981
goto free;
982
hwmp_perr_frame_process(sdata, mgmt, elems->perr);
983
}
984
if (elems->rann)
985
hwmp_rann_frame_process(sdata, mgmt, elems->rann);
986
free:
987
kfree(elems);
988
}
989
990
/**
991
* mesh_queue_preq - queue a PREQ to a given destination
992
*
993
* @mpath: mesh path to discover
994
* @flags: special attributes of the PREQ to be sent
995
*
996
* Locking: the function must be called from within a rcu read lock block.
997
*
998
*/
999
static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
1000
{
1001
struct ieee80211_sub_if_data *sdata = mpath->sdata;
1002
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1003
struct mesh_preq_queue *preq_node;
1004
1005
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
1006
if (!preq_node) {
1007
mhwmp_dbg(sdata, "could not allocate PREQ node\n");
1008
return;
1009
}
1010
1011
spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1012
if (ifmsh->preq_queue_len == MAX_PREQ_QUEUE_LEN) {
1013
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1014
kfree(preq_node);
1015
if (printk_ratelimit())
1016
mhwmp_dbg(sdata, "PREQ node queue full\n");
1017
return;
1018
}
1019
1020
spin_lock(&mpath->state_lock);
1021
if (mpath->flags & MESH_PATH_REQ_QUEUED) {
1022
spin_unlock(&mpath->state_lock);
1023
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1024
kfree(preq_node);
1025
return;
1026
}
1027
1028
memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
1029
preq_node->flags = flags;
1030
1031
mpath->flags |= MESH_PATH_REQ_QUEUED;
1032
spin_unlock(&mpath->state_lock);
1033
1034
list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
1035
++ifmsh->preq_queue_len;
1036
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1037
1038
if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
1039
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
1040
1041
else if (time_before(jiffies, ifmsh->last_preq)) {
1042
/* avoid long wait if did not send preqs for a long time
1043
* and jiffies wrapped around
1044
*/
1045
ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
1046
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
1047
} else
1048
mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
1049
min_preq_int_jiff(sdata));
1050
}
1051
1052
/**
1053
* mesh_path_start_discovery - launch a path discovery from the PREQ queue
1054
*
1055
* @sdata: local mesh subif
1056
*/
1057
void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
1058
{
1059
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1060
struct mesh_preq_queue *preq_node;
1061
struct mesh_path *mpath;
1062
u8 ttl, target_flags = 0;
1063
const u8 *da;
1064
u32 lifetime;
1065
1066
spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1067
if (!ifmsh->preq_queue_len ||
1068
time_before(jiffies, ifmsh->last_preq +
1069
min_preq_int_jiff(sdata))) {
1070
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1071
return;
1072
}
1073
1074
preq_node = list_first_entry(&ifmsh->preq_queue.list,
1075
struct mesh_preq_queue, list);
1076
list_del(&preq_node->list);
1077
--ifmsh->preq_queue_len;
1078
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1079
1080
rcu_read_lock();
1081
mpath = mesh_path_lookup(sdata, preq_node->dst);
1082
if (!mpath)
1083
goto enddiscovery;
1084
1085
spin_lock_bh(&mpath->state_lock);
1086
if (mpath->flags & (MESH_PATH_DELETED | MESH_PATH_FIXED)) {
1087
spin_unlock_bh(&mpath->state_lock);
1088
goto enddiscovery;
1089
}
1090
mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1091
if (preq_node->flags & PREQ_Q_F_START) {
1092
if (mpath->flags & MESH_PATH_RESOLVING) {
1093
spin_unlock_bh(&mpath->state_lock);
1094
goto enddiscovery;
1095
} else {
1096
mpath->flags &= ~MESH_PATH_RESOLVED;
1097
mpath->flags |= MESH_PATH_RESOLVING;
1098
mpath->discovery_retries = 0;
1099
mpath->discovery_timeout = disc_timeout_jiff(sdata);
1100
}
1101
} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
1102
mpath->flags & MESH_PATH_RESOLVED) {
1103
mpath->flags &= ~MESH_PATH_RESOLVING;
1104
spin_unlock_bh(&mpath->state_lock);
1105
goto enddiscovery;
1106
}
1107
1108
ifmsh->last_preq = jiffies;
1109
1110
if (time_after(jiffies, ifmsh->last_sn_update +
1111
net_traversal_jiffies(sdata)) ||
1112
time_before(jiffies, ifmsh->last_sn_update)) {
1113
++ifmsh->sn;
1114
sdata->u.mesh.last_sn_update = jiffies;
1115
}
1116
lifetime = default_lifetime(sdata);
1117
ttl = sdata->u.mesh.mshcfg.element_ttl;
1118
if (ttl == 0) {
1119
sdata->u.mesh.mshstats.dropped_frames_ttl++;
1120
spin_unlock_bh(&mpath->state_lock);
1121
goto enddiscovery;
1122
}
1123
1124
if (preq_node->flags & PREQ_Q_F_REFRESH)
1125
target_flags |= IEEE80211_PREQ_TO_FLAG;
1126
else
1127
target_flags &= ~IEEE80211_PREQ_TO_FLAG;
1128
1129
spin_unlock_bh(&mpath->state_lock);
1130
da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
1131
mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
1132
target_flags, mpath->dst, mpath->sn, da, 0,
1133
ttl, lifetime, 0, ifmsh->preq_id++, sdata);
1134
1135
spin_lock_bh(&mpath->state_lock);
1136
if (!(mpath->flags & MESH_PATH_DELETED))
1137
mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
1138
spin_unlock_bh(&mpath->state_lock);
1139
1140
enddiscovery:
1141
rcu_read_unlock();
1142
kfree(preq_node);
1143
}
1144
1145
/**
1146
* mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
1147
*
1148
* @sdata: network subif the frame will be sent through
1149
* @skb: 802.11 frame to be sent
1150
*
1151
* Lookup next hop for given skb and start path discovery if no
1152
* forwarding information is found.
1153
*
1154
* Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
1155
* skb is freed here if no mpath could be allocated.
1156
*/
1157
int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
1158
struct sk_buff *skb)
1159
{
1160
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1161
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1162
struct mesh_path *mpath;
1163
struct sk_buff *skb_to_free = NULL;
1164
u8 *target_addr = hdr->addr3;
1165
1166
/* Nulls are only sent to peers for PS and should be pre-addressed */
1167
if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1168
return 0;
1169
1170
/* Allow injected packets to bypass mesh routing */
1171
if (info->control.flags & IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP)
1172
return 0;
1173
1174
if (!mesh_nexthop_lookup(sdata, skb))
1175
return 0;
1176
1177
/* no nexthop found, start resolving */
1178
mpath = mesh_path_lookup(sdata, target_addr);
1179
if (!mpath) {
1180
mpath = mesh_path_add(sdata, target_addr);
1181
if (IS_ERR(mpath)) {
1182
mesh_path_discard_frame(sdata, skb);
1183
return PTR_ERR(mpath);
1184
}
1185
}
1186
1187
if (!(mpath->flags & MESH_PATH_RESOLVING) &&
1188
mesh_path_sel_is_hwmp(sdata))
1189
mesh_queue_preq(mpath, PREQ_Q_F_START);
1190
1191
if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
1192
skb_to_free = skb_dequeue(&mpath->frame_queue);
1193
1194
info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
1195
ieee80211_set_qos_hdr(sdata, skb);
1196
skb_queue_tail(&mpath->frame_queue, skb);
1197
if (skb_to_free)
1198
mesh_path_discard_frame(sdata, skb_to_free);
1199
1200
return -ENOENT;
1201
}
1202
1203
/**
1204
* mesh_nexthop_lookup_nolearn - try to set next hop without path discovery
1205
* @skb: 802.11 frame to be sent
1206
* @sdata: network subif the frame will be sent through
1207
*
1208
* Check if the meshDA (addr3) of a unicast frame is a direct neighbor.
1209
* And if so, set the RA (addr1) to it to transmit to this node directly,
1210
* avoiding PREQ/PREP path discovery.
1211
*
1212
* Returns: 0 if the next hop was found and -ENOENT otherwise.
1213
*/
1214
static int mesh_nexthop_lookup_nolearn(struct ieee80211_sub_if_data *sdata,
1215
struct sk_buff *skb)
1216
{
1217
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1218
struct sta_info *sta;
1219
1220
if (is_multicast_ether_addr(hdr->addr1))
1221
return -ENOENT;
1222
1223
rcu_read_lock();
1224
sta = sta_info_get(sdata, hdr->addr3);
1225
1226
if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
1227
rcu_read_unlock();
1228
return -ENOENT;
1229
}
1230
rcu_read_unlock();
1231
1232
memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
1233
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1234
return 0;
1235
}
1236
1237
void mesh_path_refresh(struct ieee80211_sub_if_data *sdata,
1238
struct mesh_path *mpath, const u8 *addr)
1239
{
1240
if (mpath->flags & (MESH_PATH_REQ_QUEUED | MESH_PATH_FIXED |
1241
MESH_PATH_RESOLVING))
1242
return;
1243
1244
if (time_after(jiffies,
1245
mpath->exp_time -
1246
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
1247
(!addr || ether_addr_equal(sdata->vif.addr, addr)))
1248
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
1249
}
1250
1251
/**
1252
* mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling
1253
* this function is considered "using" the associated mpath, so preempt a path
1254
* refresh if this mpath expires soon.
1255
*
1256
* @sdata: network subif the frame will be sent through
1257
* @skb: 802.11 frame to be sent
1258
*
1259
* Returns: 0 if the next hop was found. Nonzero otherwise.
1260
*/
1261
int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata,
1262
struct sk_buff *skb)
1263
{
1264
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1265
struct mesh_path *mpath;
1266
struct sta_info *next_hop;
1267
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1268
u8 *target_addr = hdr->addr3;
1269
1270
if (ifmsh->mshcfg.dot11MeshNolearn &&
1271
!mesh_nexthop_lookup_nolearn(sdata, skb))
1272
return 0;
1273
1274
mpath = mesh_path_lookup(sdata, target_addr);
1275
if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE))
1276
return -ENOENT;
1277
1278
mesh_path_refresh(sdata, mpath, hdr->addr4);
1279
1280
next_hop = rcu_dereference(mpath->next_hop);
1281
if (next_hop) {
1282
memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
1283
memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
1284
ieee80211_mps_set_frame_flags(sdata, next_hop, hdr);
1285
if (ieee80211_hw_check(&sdata->local->hw, SUPPORT_FAST_XMIT))
1286
mesh_fast_tx_cache(sdata, skb, mpath);
1287
return 0;
1288
}
1289
1290
return -ENOENT;
1291
}
1292
1293
void mesh_path_timer(struct timer_list *t)
1294
{
1295
struct mesh_path *mpath = timer_container_of(mpath, t, timer);
1296
struct ieee80211_sub_if_data *sdata = mpath->sdata;
1297
int ret;
1298
1299
if (sdata->local->quiescing)
1300
return;
1301
1302
spin_lock_bh(&mpath->state_lock);
1303
if (mpath->flags & MESH_PATH_RESOLVED ||
1304
(!(mpath->flags & MESH_PATH_RESOLVING))) {
1305
mpath->flags &= ~(MESH_PATH_RESOLVING | MESH_PATH_RESOLVED);
1306
spin_unlock_bh(&mpath->state_lock);
1307
} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
1308
++mpath->discovery_retries;
1309
mpath->discovery_timeout *= 2;
1310
mpath->flags &= ~MESH_PATH_REQ_QUEUED;
1311
spin_unlock_bh(&mpath->state_lock);
1312
mesh_queue_preq(mpath, 0);
1313
} else {
1314
mpath->flags &= ~(MESH_PATH_RESOLVING |
1315
MESH_PATH_RESOLVED |
1316
MESH_PATH_REQ_QUEUED);
1317
mpath->exp_time = jiffies;
1318
spin_unlock_bh(&mpath->state_lock);
1319
if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
1320
ret = mesh_path_send_to_gates(mpath);
1321
if (ret)
1322
mhwmp_dbg(sdata, "no gate was reachable\n");
1323
} else
1324
mesh_path_flush_pending(mpath);
1325
}
1326
}
1327
1328
void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
1329
{
1330
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1331
u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
1332
u8 flags, target_flags = 0;
1333
1334
flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
1335
? RANN_FLAG_IS_GATE : 0;
1336
1337
switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
1338
case IEEE80211_PROACTIVE_RANN:
1339
mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
1340
++ifmsh->sn, 0, NULL, 0, broadcast_addr,
1341
0, ifmsh->mshcfg.element_ttl,
1342
interval, 0, 0, sdata);
1343
break;
1344
case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
1345
flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
1346
fallthrough;
1347
case IEEE80211_PROACTIVE_PREQ_NO_PREP:
1348
interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
1349
target_flags |= IEEE80211_PREQ_TO_FLAG |
1350
IEEE80211_PREQ_USN_FLAG;
1351
mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
1352
++ifmsh->sn, target_flags,
1353
(u8 *) broadcast_addr, 0, broadcast_addr,
1354
0, ifmsh->mshcfg.element_ttl, interval,
1355
0, ifmsh->preq_id++, sdata);
1356
break;
1357
default:
1358
mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
1359
return;
1360
}
1361
}
1362
1363