Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/broadcom/brcm80211/brcmfmac/pno.c
178665 views
1
// SPDX-License-Identifier: ISC
2
/*
3
* Copyright (c) 2016 Broadcom
4
*/
5
#include <linux/netdevice.h>
6
#include <linux/gcd.h>
7
#include <net/cfg80211.h>
8
9
#include "core.h"
10
#include "debug.h"
11
#include "fwil.h"
12
#include "fwil_types.h"
13
#include "cfg80211.h"
14
#include "pno.h"
15
16
#define BRCMF_PNO_VERSION 2
17
#define BRCMF_PNO_REPEAT 4
18
#define BRCMF_PNO_FREQ_EXPO_MAX 3
19
#define BRCMF_PNO_IMMEDIATE_SCAN_BIT 3
20
#define BRCMF_PNO_ENABLE_BD_SCAN_BIT 5
21
#define BRCMF_PNO_ENABLE_ADAPTSCAN_BIT 6
22
#define BRCMF_PNO_REPORT_SEPARATELY_BIT 11
23
#define BRCMF_PNO_SCAN_INCOMPLETE 0
24
#define BRCMF_PNO_WPA_AUTH_ANY 0xFFFFFFFF
25
#define BRCMF_PNO_HIDDEN_BIT 2
26
#define BRCMF_PNO_SCHED_SCAN_PERIOD 30
27
28
#define BRCMF_PNO_MAX_BUCKETS 16
29
#define GSCAN_BATCH_NO_THR_SET 101
30
#define GSCAN_RETRY_THRESHOLD 3
31
32
struct brcmf_pno_info {
33
int n_reqs;
34
struct cfg80211_sched_scan_request *reqs[BRCMF_PNO_MAX_BUCKETS];
35
struct mutex req_lock;
36
};
37
38
#define ifp_to_pno(_ifp) ((_ifp)->drvr->config->pno)
39
40
static int brcmf_pno_store_request(struct brcmf_pno_info *pi,
41
struct cfg80211_sched_scan_request *req)
42
{
43
if (WARN(pi->n_reqs == BRCMF_PNO_MAX_BUCKETS,
44
"pno request storage full\n"))
45
return -ENOSPC;
46
47
#if defined(__linux__)
48
brcmf_dbg(SCAN, "reqid=%llu\n", req->reqid);
49
#elif defined(__FreeBSD__)
50
brcmf_dbg(SCAN, "reqid=%ju\n", (uintmax_t)req->reqid);
51
#endif
52
mutex_lock(&pi->req_lock);
53
pi->reqs[pi->n_reqs++] = req;
54
mutex_unlock(&pi->req_lock);
55
return 0;
56
}
57
58
static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
59
{
60
int i, err = 0;
61
62
mutex_lock(&pi->req_lock);
63
64
/* Nothing to do if we have no requests */
65
if (pi->n_reqs == 0)
66
goto done;
67
68
/* find request */
69
for (i = 0; i < pi->n_reqs; i++) {
70
if (pi->reqs[i]->reqid == reqid)
71
break;
72
}
73
/* request not found */
74
if (WARN(i == pi->n_reqs, "reqid not found\n")) {
75
err = -ENOENT;
76
goto done;
77
}
78
79
#if defined(__linux__)
80
brcmf_dbg(SCAN, "reqid=%llu\n", reqid);
81
#elif defined(__FreeBSD__)
82
brcmf_dbg(SCAN, "reqid=%ju\n", (uintmax_t)reqid);
83
#endif
84
pi->n_reqs--;
85
86
/* if last we are done */
87
if (!pi->n_reqs || i == pi->n_reqs)
88
goto done;
89
90
/* fill the gap with remaining requests */
91
while (i <= pi->n_reqs - 1) {
92
pi->reqs[i] = pi->reqs[i + 1];
93
i++;
94
}
95
96
done:
97
mutex_unlock(&pi->req_lock);
98
return err;
99
}
100
101
static int brcmf_pno_channel_config(struct brcmf_if *ifp,
102
struct brcmf_pno_config_le *cfg)
103
{
104
cfg->reporttype = 0;
105
cfg->flags = 0;
106
107
return brcmf_fil_iovar_data_set(ifp, "pfn_cfg", cfg, sizeof(*cfg));
108
}
109
110
static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
111
u32 mscan, u32 bestn)
112
{
113
struct brcmf_pub *drvr = ifp->drvr;
114
struct brcmf_pno_param_le pfn_param;
115
u16 flags;
116
u32 pfnmem;
117
s32 err;
118
119
memset(&pfn_param, 0, sizeof(pfn_param));
120
pfn_param.version = cpu_to_le32(BRCMF_PNO_VERSION);
121
122
/* set extra pno params */
123
flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
124
BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
125
pfn_param.repeat = BRCMF_PNO_REPEAT;
126
pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
127
128
/* set up pno scan fr */
129
pfn_param.scan_freq = cpu_to_le32(scan_freq);
130
131
if (mscan) {
132
pfnmem = bestn;
133
134
/* set bestn in firmware */
135
err = brcmf_fil_iovar_int_set(ifp, "pfnmem", pfnmem);
136
if (err < 0) {
137
bphy_err(drvr, "failed to set pfnmem\n");
138
goto exit;
139
}
140
/* get max mscan which the firmware supports */
141
err = brcmf_fil_iovar_int_get(ifp, "pfnmem", &pfnmem);
142
if (err < 0) {
143
bphy_err(drvr, "failed to get pfnmem\n");
144
goto exit;
145
}
146
mscan = min_t(u32, mscan, pfnmem);
147
pfn_param.mscan = mscan;
148
pfn_param.bestn = bestn;
149
flags |= BIT(BRCMF_PNO_ENABLE_BD_SCAN_BIT);
150
brcmf_dbg(INFO, "mscan=%d, bestn=%d\n", mscan, bestn);
151
}
152
153
pfn_param.flags = cpu_to_le16(flags);
154
err = brcmf_fil_iovar_data_set(ifp, "pfn_set", &pfn_param,
155
sizeof(pfn_param));
156
if (err)
157
bphy_err(drvr, "pfn_set failed, err=%d\n", err);
158
159
exit:
160
return err;
161
}
162
163
static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
164
{
165
struct brcmf_pub *drvr = ifp->drvr;
166
struct brcmf_pno_macaddr_le pfn_mac;
167
u8 *mac_addr = NULL;
168
u8 *mac_mask = NULL;
169
int err, i, ri;
170
171
for (ri = 0; ri < pi->n_reqs; ri++)
172
if (pi->reqs[ri]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
173
mac_addr = pi->reqs[ri]->mac_addr;
174
mac_mask = pi->reqs[ri]->mac_addr_mask;
175
break;
176
}
177
178
/* no random mac requested */
179
if (!mac_addr)
180
return 0;
181
182
pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
183
pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
184
185
memcpy(pfn_mac.mac, mac_addr, ETH_ALEN);
186
for (i = 0; i < ETH_ALEN; i++) {
187
pfn_mac.mac[i] &= mac_mask[i];
188
pfn_mac.mac[i] |= get_random_u8() & ~(mac_mask[i]);
189
}
190
/* Clear multi bit */
191
pfn_mac.mac[0] &= 0xFE;
192
/* Set locally administered */
193
pfn_mac.mac[0] |= 0x02;
194
195
#if defined(__linux__)
196
brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
197
pi->reqs[ri]->reqid, pfn_mac.mac);
198
#elif defined(__FreeBSD__)
199
brcmf_dbg(SCAN, "enabling random mac: reqid=%ju mac=%6D\n",
200
(uintmax_t)pi->reqs[ri]->reqid, pfn_mac.mac, ":");
201
#endif
202
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
203
sizeof(pfn_mac));
204
if (err)
205
bphy_err(drvr, "pfn_macaddr failed, err=%d\n", err);
206
207
return err;
208
}
209
210
static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
211
bool active)
212
{
213
struct brcmf_pub *drvr = ifp->drvr;
214
struct brcmf_pno_net_param_le pfn;
215
int err;
216
217
pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
218
pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
219
pfn.wsec = cpu_to_le32(0);
220
pfn.infra = cpu_to_le32(1);
221
pfn.flags = 0;
222
if (active)
223
pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
224
pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
225
memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
226
227
brcmf_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active);
228
err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
229
if (err < 0)
230
bphy_err(drvr, "adding failed: err=%d\n", err);
231
return err;
232
}
233
234
static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid)
235
{
236
struct brcmf_pub *drvr = ifp->drvr;
237
struct brcmf_pno_bssid_le bssid_cfg;
238
int err;
239
240
memcpy(bssid_cfg.bssid, bssid, ETH_ALEN);
241
bssid_cfg.flags = 0;
242
243
brcmf_dbg(SCAN, "adding bssid=%pM\n", bssid);
244
err = brcmf_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg,
245
sizeof(bssid_cfg));
246
if (err < 0)
247
bphy_err(drvr, "adding failed: err=%d\n", err);
248
return err;
249
}
250
251
static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
252
struct cfg80211_sched_scan_request *req)
253
{
254
int i;
255
256
if (!ssid || !req->ssids || !req->n_ssids)
257
return false;
258
259
for (i = 0; i < req->n_ssids; i++) {
260
if (ssid->ssid_len == req->ssids[i].ssid_len) {
261
if (!strncmp(ssid->ssid, req->ssids[i].ssid,
262
ssid->ssid_len))
263
return true;
264
}
265
}
266
return false;
267
}
268
269
static int brcmf_pno_clean(struct brcmf_if *ifp)
270
{
271
struct brcmf_pub *drvr = ifp->drvr;
272
int ret;
273
274
/* Disable pfn */
275
ret = brcmf_fil_iovar_int_set(ifp, "pfn", 0);
276
if (ret == 0) {
277
/* clear pfn */
278
ret = brcmf_fil_iovar_data_set(ifp, "pfnclear", NULL, 0);
279
}
280
if (ret < 0)
281
bphy_err(drvr, "failed code %d\n", ret);
282
283
return ret;
284
}
285
286
static int brcmf_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r,
287
struct brcmf_pno_config_le *pno_cfg)
288
{
289
u32 n_chan = le32_to_cpu(pno_cfg->channel_num);
290
u16 chan;
291
int i, err = 0;
292
293
for (i = 0; i < r->n_channels; i++) {
294
if (n_chan >= BRCMF_NUMCHANNELS) {
295
err = -ENOSPC;
296
goto done;
297
}
298
chan = r->channels[i]->hw_value;
299
brcmf_dbg(SCAN, "[%d] Chan : %u\n", n_chan, chan);
300
pno_cfg->channel_list[n_chan++] = cpu_to_le16(chan);
301
}
302
/* return number of channels */
303
err = n_chan;
304
done:
305
pno_cfg->channel_num = cpu_to_le32(n_chan);
306
return err;
307
}
308
309
static int brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi,
310
struct brcmf_pno_config_le *pno_cfg,
311
struct brcmf_gscan_bucket_config **buckets,
312
u32 *scan_freq)
313
{
314
struct cfg80211_sched_scan_request *sr;
315
struct brcmf_gscan_bucket_config *fw_buckets;
316
int i, err, chidx;
317
318
brcmf_dbg(SCAN, "n_reqs=%d\n", pi->n_reqs);
319
if (WARN_ON(!pi->n_reqs))
320
return -ENODATA;
321
322
/*
323
* actual scan period is determined using gcd() for each
324
* scheduled scan period.
325
*/
326
*scan_freq = pi->reqs[0]->scan_plans[0].interval;
327
for (i = 1; i < pi->n_reqs; i++) {
328
sr = pi->reqs[i];
329
*scan_freq = gcd(sr->scan_plans[0].interval, *scan_freq);
330
}
331
if (*scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
332
brcmf_dbg(SCAN, "scan period too small, using minimum\n");
333
*scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
334
}
335
336
*buckets = NULL;
337
fw_buckets = kcalloc(pi->n_reqs, sizeof(*fw_buckets), GFP_KERNEL);
338
if (!fw_buckets)
339
return -ENOMEM;
340
341
memset(pno_cfg, 0, sizeof(*pno_cfg));
342
for (i = 0; i < pi->n_reqs; i++) {
343
sr = pi->reqs[i];
344
chidx = brcmf_pno_get_bucket_channels(sr, pno_cfg);
345
if (chidx < 0) {
346
err = chidx;
347
goto fail;
348
}
349
fw_buckets[i].bucket_end_index = chidx - 1;
350
fw_buckets[i].bucket_freq_multiple =
351
sr->scan_plans[0].interval / *scan_freq;
352
/* assure period is non-zero */
353
if (!fw_buckets[i].bucket_freq_multiple)
354
fw_buckets[i].bucket_freq_multiple = 1;
355
fw_buckets[i].flag = BRCMF_PNO_REPORT_NO_BATCH;
356
}
357
358
if (BRCMF_SCAN_ON()) {
359
brcmf_err("base period=%u\n", *scan_freq);
360
for (i = 0; i < pi->n_reqs; i++) {
361
brcmf_err("[%d] period %u max %u repeat %u flag %x idx %u\n",
362
i, fw_buckets[i].bucket_freq_multiple,
363
le16_to_cpu(fw_buckets[i].max_freq_multiple),
364
fw_buckets[i].repeat, fw_buckets[i].flag,
365
fw_buckets[i].bucket_end_index);
366
}
367
}
368
*buckets = fw_buckets;
369
return pi->n_reqs;
370
371
fail:
372
kfree(fw_buckets);
373
return err;
374
}
375
376
static int brcmf_pno_config_networks(struct brcmf_if *ifp,
377
struct brcmf_pno_info *pi)
378
{
379
struct cfg80211_sched_scan_request *r;
380
struct cfg80211_match_set *ms;
381
bool active;
382
int i, j, err = 0;
383
384
for (i = 0; i < pi->n_reqs; i++) {
385
r = pi->reqs[i];
386
387
for (j = 0; j < r->n_match_sets; j++) {
388
ms = &r->match_sets[j];
389
if (ms->ssid.ssid_len) {
390
active = brcmf_is_ssid_active(&ms->ssid, r);
391
err = brcmf_pno_add_ssid(ifp, &ms->ssid,
392
active);
393
}
394
if (!err && is_valid_ether_addr(ms->bssid))
395
err = brcmf_pno_add_bssid(ifp, ms->bssid);
396
397
if (err < 0)
398
return err;
399
}
400
}
401
return 0;
402
}
403
404
static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
405
{
406
struct brcmf_pub *drvr = ifp->drvr;
407
struct brcmf_pno_info *pi;
408
struct brcmf_gscan_config *gscan_cfg;
409
struct brcmf_gscan_bucket_config *buckets;
410
struct brcmf_pno_config_le pno_cfg;
411
size_t gsz;
412
u32 scan_freq;
413
int err, n_buckets;
414
415
pi = ifp_to_pno(ifp);
416
n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
417
&scan_freq);
418
if (n_buckets < 0)
419
return n_buckets;
420
421
gsz = struct_size(gscan_cfg, bucket, n_buckets);
422
gscan_cfg = kzalloc(gsz, GFP_KERNEL);
423
if (!gscan_cfg) {
424
err = -ENOMEM;
425
goto free_buckets;
426
}
427
428
/* clean up everything */
429
err = brcmf_pno_clean(ifp);
430
if (err < 0) {
431
bphy_err(drvr, "failed error=%d\n", err);
432
goto free_gscan;
433
}
434
435
/* configure pno */
436
err = brcmf_pno_config(ifp, scan_freq, 0, 0);
437
if (err < 0)
438
goto free_gscan;
439
440
err = brcmf_pno_channel_config(ifp, &pno_cfg);
441
if (err < 0)
442
goto clean;
443
444
gscan_cfg->version = cpu_to_le16(BRCMF_GSCAN_CFG_VERSION);
445
gscan_cfg->retry_threshold = GSCAN_RETRY_THRESHOLD;
446
gscan_cfg->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
447
gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
448
449
gscan_cfg->count_of_channel_buckets = n_buckets;
450
memcpy(gscan_cfg->bucket, buckets,
451
array_size(n_buckets, sizeof(*buckets)));
452
453
err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);
454
455
if (err < 0)
456
goto clean;
457
458
/* configure random mac */
459
err = brcmf_pno_set_random(ifp, pi);
460
if (err < 0)
461
goto clean;
462
463
err = brcmf_pno_config_networks(ifp, pi);
464
if (err < 0)
465
goto clean;
466
467
/* Enable the PNO */
468
err = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
469
470
clean:
471
if (err < 0)
472
brcmf_pno_clean(ifp);
473
free_gscan:
474
kfree(gscan_cfg);
475
free_buckets:
476
kfree(buckets);
477
return err;
478
}
479
480
int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
481
struct cfg80211_sched_scan_request *req)
482
{
483
struct brcmf_pno_info *pi;
484
int ret;
485
486
#if defined(__linux__)
487
brcmf_dbg(TRACE, "reqid=%llu\n", req->reqid);
488
#elif defined(__FreeBSD__)
489
brcmf_dbg(TRACE, "reqid=%ju\n", (uintmax_t)req->reqid);
490
#endif
491
492
pi = ifp_to_pno(ifp);
493
ret = brcmf_pno_store_request(pi, req);
494
if (ret < 0)
495
return ret;
496
497
ret = brcmf_pno_config_sched_scans(ifp);
498
if (ret < 0) {
499
brcmf_pno_remove_request(pi, req->reqid);
500
if (pi->n_reqs)
501
(void)brcmf_pno_config_sched_scans(ifp);
502
return ret;
503
}
504
return 0;
505
}
506
507
int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
508
{
509
struct brcmf_pno_info *pi;
510
int err;
511
512
#if defined(__linux__)
513
brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
514
#elif defined(__FreeBSD__)
515
brcmf_dbg(TRACE, "reqid=%ju\n", (uintmax_t)reqid);
516
#endif
517
518
pi = ifp_to_pno(ifp);
519
520
/* No PNO request */
521
if (!pi->n_reqs)
522
return 0;
523
524
err = brcmf_pno_remove_request(pi, reqid);
525
if (err)
526
return err;
527
528
brcmf_pno_clean(ifp);
529
530
if (pi->n_reqs)
531
(void)brcmf_pno_config_sched_scans(ifp);
532
533
return 0;
534
}
535
536
int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg)
537
{
538
struct brcmf_pno_info *pi;
539
540
brcmf_dbg(TRACE, "enter\n");
541
pi = kzalloc(sizeof(*pi), GFP_KERNEL);
542
if (!pi)
543
return -ENOMEM;
544
545
cfg->pno = pi;
546
mutex_init(&pi->req_lock);
547
return 0;
548
}
549
550
void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg)
551
{
552
struct brcmf_pno_info *pi;
553
554
brcmf_dbg(TRACE, "enter\n");
555
pi = cfg->pno;
556
cfg->pno = NULL;
557
558
WARN_ON(pi->n_reqs);
559
mutex_destroy(&pi->req_lock);
560
kfree(pi);
561
}
562
563
void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan)
564
{
565
/* scheduled scan settings */
566
wiphy->max_sched_scan_reqs = gscan ? BRCMF_PNO_MAX_BUCKETS : 1;
567
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
568
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
569
wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
570
wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
571
}
572
573
u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
574
{
575
u64 reqid = 0;
576
577
mutex_lock(&pi->req_lock);
578
579
if (bucket < pi->n_reqs)
580
reqid = pi->reqs[bucket]->reqid;
581
582
mutex_unlock(&pi->req_lock);
583
return reqid;
584
}
585
586
u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
587
struct brcmf_pno_net_info_le *ni)
588
{
589
struct cfg80211_sched_scan_request *req;
590
struct cfg80211_match_set *ms;
591
u32 bucket_map = 0;
592
int i, j;
593
594
mutex_lock(&pi->req_lock);
595
for (i = 0; i < pi->n_reqs; i++) {
596
req = pi->reqs[i];
597
598
if (!req->n_match_sets)
599
continue;
600
for (j = 0; j < req->n_match_sets; j++) {
601
ms = &req->match_sets[j];
602
if (ms->ssid.ssid_len == ni->SSID_len &&
603
!memcmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) {
604
bucket_map |= BIT(i);
605
break;
606
}
607
if (is_valid_ether_addr(ms->bssid) &&
608
!memcmp(ms->bssid, ni->bssid, ETH_ALEN)) {
609
bucket_map |= BIT(i);
610
break;
611
}
612
}
613
}
614
mutex_unlock(&pi->req_lock);
615
return bucket_map;
616
}
617
618