Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath12k/dp.c
48375 views
1
// SPDX-License-Identifier: BSD-3-Clause-Clear
2
/*
3
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5
*/
6
7
#include <crypto/hash.h>
8
#include "core.h"
9
#include "dp_tx.h"
10
#include "hal_tx.h"
11
#include "hif.h"
12
#include "debug.h"
13
#include "dp_rx.h"
14
#include "peer.h"
15
#include "dp_mon.h"
16
17
static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
18
struct sk_buff *skb)
19
{
20
dev_kfree_skb_any(skb);
21
}
22
23
void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
24
{
25
struct ath12k_base *ab = ar->ab;
26
struct ath12k_peer *peer;
27
28
/* TODO: Any other peer specific DP cleanup */
29
30
spin_lock_bh(&ab->base_lock);
31
peer = ath12k_peer_find(ab, vdev_id, addr);
32
if (!peer) {
33
ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
34
addr, vdev_id);
35
spin_unlock_bh(&ab->base_lock);
36
return;
37
}
38
39
ath12k_dp_rx_peer_tid_cleanup(ar, peer);
40
crypto_free_shash(peer->tfm_mmic);
41
spin_unlock_bh(&ab->base_lock);
42
}
43
44
int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
45
{
46
struct ath12k_base *ab = ar->ab;
47
struct ath12k_peer *peer;
48
u32 reo_dest;
49
int ret = 0, tid;
50
51
/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
52
reo_dest = ar->dp.mac_id + 1;
53
ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
54
WMI_PEER_SET_DEFAULT_ROUTING,
55
DP_RX_HASH_ENABLE | (reo_dest << 1));
56
57
if (ret) {
58
ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
59
ret, addr, vdev_id);
60
return ret;
61
}
62
63
for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
64
ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
65
HAL_PN_TYPE_NONE);
66
if (ret) {
67
ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
68
tid, ret);
69
goto peer_clean;
70
}
71
}
72
73
ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
74
if (ret) {
75
ath12k_warn(ab, "failed to setup rx defrag context\n");
76
goto peer_clean;
77
}
78
79
/* TODO: Setup other peer specific resource used in data path */
80
81
return 0;
82
83
peer_clean:
84
spin_lock_bh(&ab->base_lock);
85
86
peer = ath12k_peer_find(ab, vdev_id, addr);
87
if (!peer) {
88
ath12k_warn(ab, "failed to find the peer to del rx tid\n");
89
spin_unlock_bh(&ab->base_lock);
90
return -ENOENT;
91
}
92
93
for (; tid >= 0; tid--)
94
ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
95
96
spin_unlock_bh(&ab->base_lock);
97
98
return ret;
99
}
100
101
void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring)
102
{
103
if (!ring->vaddr_unaligned)
104
return;
105
106
dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
107
ring->paddr_unaligned);
108
109
ring->vaddr_unaligned = NULL;
110
}
111
112
static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
113
{
114
int ext_group_num;
115
u8 mask = 1 << ring_num;
116
117
for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
118
ext_group_num++) {
119
if (mask & grp_mask[ext_group_num])
120
return ext_group_num;
121
}
122
123
return -ENOENT;
124
}
125
126
static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
127
enum hal_ring_type type, int ring_num)
128
{
129
const u8 *grp_mask;
130
131
switch (type) {
132
case HAL_WBM2SW_RELEASE:
133
if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
134
grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
135
ring_num = 0;
136
} else {
137
grp_mask = &ab->hw_params->ring_mask->tx[0];
138
}
139
break;
140
case HAL_REO_EXCEPTION:
141
grp_mask = &ab->hw_params->ring_mask->rx_err[0];
142
break;
143
case HAL_REO_DST:
144
grp_mask = &ab->hw_params->ring_mask->rx[0];
145
break;
146
case HAL_REO_STATUS:
147
grp_mask = &ab->hw_params->ring_mask->reo_status[0];
148
break;
149
case HAL_RXDMA_MONITOR_STATUS:
150
case HAL_RXDMA_MONITOR_DST:
151
grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
152
break;
153
case HAL_TX_MONITOR_DST:
154
grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
155
break;
156
case HAL_RXDMA_BUF:
157
grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
158
break;
159
case HAL_RXDMA_MONITOR_BUF:
160
case HAL_TCL_DATA:
161
case HAL_TCL_CMD:
162
case HAL_REO_CMD:
163
case HAL_SW2WBM_RELEASE:
164
case HAL_WBM_IDLE_LINK:
165
case HAL_TCL_STATUS:
166
case HAL_REO_REINJECT:
167
case HAL_CE_SRC:
168
case HAL_CE_DST:
169
case HAL_CE_DST_STATUS:
170
default:
171
return -ENOENT;
172
}
173
174
return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
175
}
176
177
static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab,
178
struct hal_srng_params *ring_params,
179
enum hal_ring_type type, int ring_num)
180
{
181
int msi_group_number, msi_data_count;
182
u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
183
int ret;
184
185
ret = ath12k_hif_get_user_msi_vector(ab, "DP",
186
&msi_data_count, &msi_data_start,
187
&msi_irq_start);
188
if (ret)
189
return;
190
191
msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type,
192
ring_num);
193
if (msi_group_number < 0) {
194
ath12k_dbg(ab, ATH12K_DBG_PCI,
195
"ring not part of an ext_group; ring_type: %d,ring_num %d",
196
type, ring_num);
197
ring_params->msi_addr = 0;
198
ring_params->msi_data = 0;
199
return;
200
}
201
202
if (msi_group_number > msi_data_count) {
203
ath12k_dbg(ab, ATH12K_DBG_PCI,
204
"multiple msi_groups share one msi, msi_group_num %d",
205
msi_group_number);
206
}
207
208
ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
209
210
ring_params->msi_addr = addr_lo;
211
ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
212
ring_params->msi_data = (msi_group_number % msi_data_count)
213
+ msi_data_start;
214
ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
215
}
216
217
int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
218
enum hal_ring_type type, int ring_num,
219
int mac_id, int num_entries)
220
{
221
struct hal_srng_params params = { 0 };
222
int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
223
int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
224
int ret;
225
226
if (max_entries < 0 || entry_sz < 0)
227
return -EINVAL;
228
229
if (num_entries > max_entries)
230
num_entries = max_entries;
231
232
ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
233
ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
234
&ring->paddr_unaligned,
235
GFP_KERNEL);
236
if (!ring->vaddr_unaligned)
237
return -ENOMEM;
238
239
ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
240
ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
241
(unsigned long)ring->vaddr_unaligned);
242
243
params.ring_base_vaddr = ring->vaddr;
244
params.ring_base_paddr = ring->paddr;
245
params.num_entries = num_entries;
246
ath12k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
247
248
switch (type) {
249
case HAL_REO_DST:
250
params.intr_batch_cntr_thres_entries =
251
HAL_SRNG_INT_BATCH_THRESHOLD_RX;
252
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
253
break;
254
case HAL_RXDMA_BUF:
255
case HAL_RXDMA_MONITOR_BUF:
256
case HAL_RXDMA_MONITOR_STATUS:
257
params.low_threshold = num_entries >> 3;
258
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
259
params.intr_batch_cntr_thres_entries = 0;
260
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
261
break;
262
case HAL_TX_MONITOR_DST:
263
params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
264
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
265
params.intr_batch_cntr_thres_entries = 0;
266
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
267
break;
268
case HAL_WBM2SW_RELEASE:
269
if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
270
params.intr_batch_cntr_thres_entries =
271
HAL_SRNG_INT_BATCH_THRESHOLD_TX;
272
params.intr_timer_thres_us =
273
HAL_SRNG_INT_TIMER_THRESHOLD_TX;
274
break;
275
}
276
/* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
277
fallthrough;
278
case HAL_REO_EXCEPTION:
279
case HAL_REO_REINJECT:
280
case HAL_REO_CMD:
281
case HAL_REO_STATUS:
282
case HAL_TCL_DATA:
283
case HAL_TCL_CMD:
284
case HAL_TCL_STATUS:
285
case HAL_WBM_IDLE_LINK:
286
case HAL_SW2WBM_RELEASE:
287
case HAL_RXDMA_DST:
288
case HAL_RXDMA_MONITOR_DST:
289
case HAL_RXDMA_MONITOR_DESC:
290
params.intr_batch_cntr_thres_entries =
291
HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
292
params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
293
break;
294
case HAL_RXDMA_DIR_BUF:
295
break;
296
default:
297
ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
298
return -EINVAL;
299
}
300
301
ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
302
if (ret < 0) {
303
ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
304
ret, ring_num);
305
return ret;
306
}
307
308
ring->ring_id = ret;
309
310
return 0;
311
}
312
313
static
314
u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif)
315
{
316
u32 bank_config = 0;
317
318
/* Only valid for raw frames with HW crypto enabled.
319
* With SW crypto, mac80211 sets key per packet
320
*/
321
if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
322
test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
323
bank_config |=
324
u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
325
HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
326
327
bank_config |= u32_encode_bits(arvif->tx_encap_type,
328
HAL_TX_BANK_CONFIG_ENCAP_TYPE);
329
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
330
u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
331
u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
332
333
/* only valid if idx_lookup_override is not set in tcl_data_cmd */
334
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
335
336
bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
337
HAL_TX_BANK_CONFIG_ADDRX_EN) |
338
u32_encode_bits(!!(arvif->hal_addr_search_flags &
339
HAL_TX_ADDRY_EN),
340
HAL_TX_BANK_CONFIG_ADDRY_EN);
341
342
bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0,
343
HAL_TX_BANK_CONFIG_MESH_EN) |
344
u32_encode_bits(arvif->vdev_id_check_en,
345
HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
346
347
bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
348
349
return bank_config;
350
}
351
352
static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif,
353
struct ath12k_dp *dp)
354
{
355
int bank_id = DP_INVALID_BANK_ID;
356
int i;
357
u32 bank_config;
358
bool configure_register = false;
359
360
/* convert vdev params into hal_tx_bank_config */
361
bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif);
362
363
spin_lock_bh(&dp->tx_bank_lock);
364
/* TODO: implement using idr kernel framework*/
365
for (i = 0; i < dp->num_bank_profiles; i++) {
366
if (dp->bank_profiles[i].is_configured &&
367
(dp->bank_profiles[i].bank_config ^ bank_config) == 0) {
368
bank_id = i;
369
goto inc_ref_and_return;
370
}
371
if (!dp->bank_profiles[i].is_configured ||
372
!dp->bank_profiles[i].num_users) {
373
bank_id = i;
374
goto configure_and_return;
375
}
376
}
377
378
if (bank_id == DP_INVALID_BANK_ID) {
379
spin_unlock_bh(&dp->tx_bank_lock);
380
ath12k_err(ab, "unable to find TX bank!");
381
return bank_id;
382
}
383
384
configure_and_return:
385
dp->bank_profiles[bank_id].is_configured = true;
386
dp->bank_profiles[bank_id].bank_config = bank_config;
387
configure_register = true;
388
inc_ref_and_return:
389
dp->bank_profiles[bank_id].num_users++;
390
spin_unlock_bh(&dp->tx_bank_lock);
391
392
if (configure_register)
393
ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id);
394
395
ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
396
bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
397
dp->bank_profiles[bank_id].num_users);
398
399
return bank_id;
400
}
401
402
void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
403
{
404
spin_lock_bh(&dp->tx_bank_lock);
405
dp->bank_profiles[bank_id].num_users--;
406
spin_unlock_bh(&dp->tx_bank_lock);
407
}
408
409
static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
410
{
411
struct ath12k_dp *dp = &ab->dp;
412
413
kfree(dp->bank_profiles);
414
dp->bank_profiles = NULL;
415
}
416
417
static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
418
{
419
struct ath12k_dp *dp = &ab->dp;
420
u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
421
int i;
422
423
dp->num_bank_profiles = num_tcl_banks;
424
dp->bank_profiles = kmalloc_array(num_tcl_banks,
425
sizeof(struct ath12k_dp_tx_bank_profile),
426
GFP_KERNEL);
427
if (!dp->bank_profiles)
428
return -ENOMEM;
429
430
spin_lock_init(&dp->tx_bank_lock);
431
432
for (i = 0; i < num_tcl_banks; i++) {
433
dp->bank_profiles[i].is_configured = false;
434
dp->bank_profiles[i].num_users = 0;
435
}
436
437
return 0;
438
}
439
440
static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
441
{
442
struct ath12k_dp *dp = &ab->dp;
443
int i;
444
445
ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring);
446
ath12k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
447
ath12k_dp_srng_cleanup(ab, &dp->reo_except_ring);
448
ath12k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
449
ath12k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
450
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
451
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
452
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
453
}
454
ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
455
ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
456
ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
457
}
458
459
static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
460
{
461
struct ath12k_dp *dp = &ab->dp;
462
const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
463
struct hal_srng *srng;
464
int i, ret, tx_comp_ring_num;
465
u32 ring_hash_map;
466
467
ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
468
HAL_SW2WBM_RELEASE, 0, 0,
469
DP_WBM_RELEASE_RING_SIZE);
470
if (ret) {
471
ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
472
ret);
473
goto err;
474
}
475
476
ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
477
DP_TCL_CMD_RING_SIZE);
478
if (ret) {
479
ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
480
goto err;
481
}
482
483
ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
484
0, 0, DP_TCL_STATUS_RING_SIZE);
485
if (ret) {
486
ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
487
goto err;
488
}
489
490
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
491
map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
492
tx_comp_ring_num = map[i].wbm_ring_num;
493
494
ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
495
HAL_TCL_DATA, i, 0,
496
DP_TCL_DATA_RING_SIZE);
497
if (ret) {
498
ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
499
i, ret);
500
goto err;
501
}
502
503
ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
504
HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
505
DP_TX_COMP_RING_SIZE);
506
if (ret) {
507
ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
508
tx_comp_ring_num, ret);
509
goto err;
510
}
511
}
512
513
ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
514
0, 0, DP_REO_REINJECT_RING_SIZE);
515
if (ret) {
516
ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
517
ret);
518
goto err;
519
}
520
521
ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
522
HAL_WBM2SW_REL_ERR_RING_NUM, 0,
523
DP_RX_RELEASE_RING_SIZE);
524
if (ret) {
525
ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
526
goto err;
527
}
528
529
ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
530
0, 0, DP_REO_EXCEPTION_RING_SIZE);
531
if (ret) {
532
ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
533
ret);
534
goto err;
535
}
536
537
ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
538
0, 0, DP_REO_CMD_RING_SIZE);
539
if (ret) {
540
ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
541
goto err;
542
}
543
544
srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
545
ath12k_hal_reo_init_cmd_ring(ab, srng);
546
547
ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
548
0, 0, DP_REO_STATUS_RING_SIZE);
549
if (ret) {
550
ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
551
goto err;
552
}
553
554
/* When hash based routing of rx packet is enabled, 32 entries to map
555
* the hash values to the ring will be configured. Each hash entry uses
556
* four bits to map to a particular ring. The ring mapping will be
557
* 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
558
* 8:SW6, 9:SW7, 10:SW8, 11:Not used.
559
*/
560
ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
561
HAL_HASH_ROUTING_RING_SW2 << 4 |
562
HAL_HASH_ROUTING_RING_SW3 << 8 |
563
HAL_HASH_ROUTING_RING_SW4 << 12 |
564
HAL_HASH_ROUTING_RING_SW1 << 16 |
565
HAL_HASH_ROUTING_RING_SW2 << 20 |
566
HAL_HASH_ROUTING_RING_SW3 << 24 |
567
HAL_HASH_ROUTING_RING_SW4 << 28;
568
569
ath12k_hal_reo_hw_setup(ab, ring_hash_map);
570
571
return 0;
572
573
err:
574
ath12k_dp_srng_common_cleanup(ab);
575
576
return ret;
577
}
578
579
static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
580
{
581
struct ath12k_dp *dp = &ab->dp;
582
struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
583
int i;
584
585
for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
586
if (!slist[i].vaddr)
587
continue;
588
589
dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
590
slist[i].vaddr, slist[i].paddr);
591
slist[i].vaddr = NULL;
592
}
593
}
594
595
static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
596
int size,
597
u32 n_link_desc_bank,
598
u32 n_link_desc,
599
u32 last_bank_sz)
600
{
601
struct ath12k_dp *dp = &ab->dp;
602
struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
603
struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
604
u32 n_entries_per_buf;
605
int num_scatter_buf, scatter_idx;
606
struct hal_wbm_link_desc *scatter_buf;
607
int align_bytes, n_entries;
608
dma_addr_t paddr;
609
int rem_entries;
610
int i;
611
int ret = 0;
612
u32 end_offset, cookie;
613
614
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
615
ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
616
num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
617
618
if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
619
return -EINVAL;
620
621
for (i = 0; i < num_scatter_buf; i++) {
622
slist[i].vaddr = dma_alloc_coherent(ab->dev,
623
HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
624
&slist[i].paddr, GFP_KERNEL);
625
if (!slist[i].vaddr) {
626
ret = -ENOMEM;
627
goto err;
628
}
629
}
630
631
scatter_idx = 0;
632
scatter_buf = slist[scatter_idx].vaddr;
633
rem_entries = n_entries_per_buf;
634
635
for (i = 0; i < n_link_desc_bank; i++) {
636
#if defined(__linux__)
637
align_bytes = link_desc_banks[i].vaddr -
638
link_desc_banks[i].vaddr_unaligned;
639
#elif defined(__FreeBSD__)
640
align_bytes = (uintptr_t)link_desc_banks[i].vaddr -
641
(uintptr_t)link_desc_banks[i].vaddr_unaligned;
642
#endif
643
n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
644
HAL_LINK_DESC_SIZE;
645
paddr = link_desc_banks[i].paddr;
646
while (n_entries) {
647
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
648
ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
649
n_entries--;
650
paddr += HAL_LINK_DESC_SIZE;
651
if (rem_entries) {
652
rem_entries--;
653
scatter_buf++;
654
continue;
655
}
656
657
rem_entries = n_entries_per_buf;
658
scatter_idx++;
659
scatter_buf = slist[scatter_idx].vaddr;
660
}
661
}
662
663
end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
664
sizeof(struct hal_wbm_link_desc);
665
ath12k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
666
n_link_desc, end_offset);
667
668
return 0;
669
670
err:
671
ath12k_dp_scatter_idle_link_desc_cleanup(ab);
672
673
return ret;
674
}
675
676
static void
677
ath12k_dp_link_desc_bank_free(struct ath12k_base *ab,
678
struct dp_link_desc_bank *link_desc_banks)
679
{
680
int i;
681
682
for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
683
if (link_desc_banks[i].vaddr_unaligned) {
684
dma_free_coherent(ab->dev,
685
link_desc_banks[i].size,
686
link_desc_banks[i].vaddr_unaligned,
687
link_desc_banks[i].paddr_unaligned);
688
link_desc_banks[i].vaddr_unaligned = NULL;
689
}
690
}
691
}
692
693
static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
694
struct dp_link_desc_bank *desc_bank,
695
int n_link_desc_bank,
696
int last_bank_sz)
697
{
698
struct ath12k_dp *dp = &ab->dp;
699
int i;
700
int ret = 0;
701
int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
702
703
for (i = 0; i < n_link_desc_bank; i++) {
704
if (i == (n_link_desc_bank - 1) && last_bank_sz)
705
desc_sz = last_bank_sz;
706
707
desc_bank[i].vaddr_unaligned =
708
dma_alloc_coherent(ab->dev, desc_sz,
709
&desc_bank[i].paddr_unaligned,
710
GFP_KERNEL);
711
if (!desc_bank[i].vaddr_unaligned) {
712
ret = -ENOMEM;
713
goto err;
714
}
715
716
desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
717
HAL_LINK_DESC_ALIGN);
718
desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
719
((unsigned long)desc_bank[i].vaddr -
720
(unsigned long)desc_bank[i].vaddr_unaligned);
721
desc_bank[i].size = desc_sz;
722
}
723
724
return 0;
725
726
err:
727
ath12k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
728
729
return ret;
730
}
731
732
void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
733
struct dp_link_desc_bank *desc_bank,
734
u32 ring_type, struct dp_srng *ring)
735
{
736
ath12k_dp_link_desc_bank_free(ab, desc_bank);
737
738
if (ring_type != HAL_RXDMA_MONITOR_DESC) {
739
ath12k_dp_srng_cleanup(ab, ring);
740
ath12k_dp_scatter_idle_link_desc_cleanup(ab);
741
}
742
}
743
744
static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
745
{
746
struct ath12k_dp *dp = &ab->dp;
747
u32 n_mpdu_link_desc, n_mpdu_queue_desc;
748
u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
749
int ret = 0;
750
751
n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
752
HAL_NUM_MPDUS_PER_LINK_DESC;
753
754
n_mpdu_queue_desc = n_mpdu_link_desc /
755
HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
756
757
n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
758
DP_AVG_MSDUS_PER_FLOW) /
759
HAL_NUM_TX_MSDUS_PER_LINK_DESC;
760
761
n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
762
DP_AVG_MSDUS_PER_MPDU) /
763
HAL_NUM_RX_MSDUS_PER_LINK_DESC;
764
765
*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
766
n_tx_msdu_link_desc + n_rx_msdu_link_desc;
767
768
if (*n_link_desc & (*n_link_desc - 1))
769
*n_link_desc = 1 << fls(*n_link_desc);
770
771
ret = ath12k_dp_srng_setup(ab, &dp->wbm_idle_ring,
772
HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
773
if (ret) {
774
ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
775
return ret;
776
}
777
return ret;
778
}
779
780
int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
781
struct dp_link_desc_bank *link_desc_banks,
782
u32 ring_type, struct hal_srng *srng,
783
u32 n_link_desc)
784
{
785
u32 tot_mem_sz;
786
u32 n_link_desc_bank, last_bank_sz;
787
u32 entry_sz, align_bytes, n_entries;
788
struct hal_wbm_link_desc *desc;
789
u32 paddr;
790
int i, ret;
791
u32 cookie;
792
793
tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
794
tot_mem_sz += HAL_LINK_DESC_ALIGN;
795
796
if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
797
n_link_desc_bank = 1;
798
last_bank_sz = tot_mem_sz;
799
} else {
800
n_link_desc_bank = tot_mem_sz /
801
(DP_LINK_DESC_ALLOC_SIZE_THRESH -
802
HAL_LINK_DESC_ALIGN);
803
last_bank_sz = tot_mem_sz %
804
(DP_LINK_DESC_ALLOC_SIZE_THRESH -
805
HAL_LINK_DESC_ALIGN);
806
807
if (last_bank_sz)
808
n_link_desc_bank += 1;
809
}
810
811
if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
812
return -EINVAL;
813
814
ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
815
n_link_desc_bank, last_bank_sz);
816
if (ret)
817
return ret;
818
819
/* Setup link desc idle list for HW internal usage */
820
entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
821
tot_mem_sz = entry_sz * n_link_desc;
822
823
/* Setup scatter desc list when the total memory requirement is more */
824
if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
825
ring_type != HAL_RXDMA_MONITOR_DESC) {
826
ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
827
n_link_desc_bank,
828
n_link_desc,
829
last_bank_sz);
830
if (ret) {
831
ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
832
ret);
833
goto fail_desc_bank_free;
834
}
835
836
return 0;
837
}
838
839
spin_lock_bh(&srng->lock);
840
841
ath12k_hal_srng_access_begin(ab, srng);
842
843
for (i = 0; i < n_link_desc_bank; i++) {
844
#if defined(__linux__)
845
align_bytes = link_desc_banks[i].vaddr -
846
link_desc_banks[i].vaddr_unaligned;
847
#elif defined(__FreeBSD__)
848
align_bytes = (uintptr_t)link_desc_banks[i].vaddr -
849
(uintptr_t)link_desc_banks[i].vaddr_unaligned;
850
#endif
851
n_entries = (link_desc_banks[i].size - align_bytes) /
852
HAL_LINK_DESC_SIZE;
853
paddr = link_desc_banks[i].paddr;
854
while (n_entries &&
855
(desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
856
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
857
ath12k_hal_set_link_desc_addr(desc,
858
cookie, paddr);
859
n_entries--;
860
paddr += HAL_LINK_DESC_SIZE;
861
}
862
}
863
864
ath12k_hal_srng_access_end(ab, srng);
865
866
spin_unlock_bh(&srng->lock);
867
868
return 0;
869
870
fail_desc_bank_free:
871
ath12k_dp_link_desc_bank_free(ab, link_desc_banks);
872
873
return ret;
874
}
875
876
int ath12k_dp_service_srng(struct ath12k_base *ab,
877
struct ath12k_ext_irq_grp *irq_grp,
878
int budget)
879
{
880
struct napi_struct *napi = &irq_grp->napi;
881
int grp_id = irq_grp->grp_id;
882
int work_done = 0;
883
int i = 0, j;
884
int tot_work_done = 0;
885
enum dp_monitor_mode monitor_mode;
886
u8 ring_mask;
887
888
while (i < ab->hw_params->max_tx_ring) {
889
if (ab->hw_params->ring_mask->tx[grp_id] &
890
BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
891
ath12k_dp_tx_completion_handler(ab, i);
892
i++;
893
}
894
895
if (ab->hw_params->ring_mask->rx_err[grp_id]) {
896
work_done = ath12k_dp_rx_process_err(ab, napi, budget);
897
budget -= work_done;
898
tot_work_done += work_done;
899
if (budget <= 0)
900
goto done;
901
}
902
903
if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
904
work_done = ath12k_dp_rx_process_wbm_err(ab,
905
napi,
906
budget);
907
budget -= work_done;
908
tot_work_done += work_done;
909
910
if (budget <= 0)
911
goto done;
912
}
913
914
if (ab->hw_params->ring_mask->rx[grp_id]) {
915
i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
916
work_done = ath12k_dp_rx_process(ab, i, napi,
917
budget);
918
budget -= work_done;
919
tot_work_done += work_done;
920
if (budget <= 0)
921
goto done;
922
}
923
924
if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
925
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
926
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
927
for (i = 0; i < ab->num_radios; i++) {
928
for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
929
int id = i * ab->hw_params->num_rxmda_per_pdev + j;
930
931
if (ring_mask & BIT(id)) {
932
work_done =
933
ath12k_dp_mon_process_ring(ab, id, napi, budget,
934
monitor_mode);
935
budget -= work_done;
936
tot_work_done += work_done;
937
938
if (budget <= 0)
939
goto done;
940
}
941
}
942
}
943
}
944
945
if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
946
monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
947
ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
948
for (i = 0; i < ab->num_radios; i++) {
949
for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
950
int id = i * ab->hw_params->num_rxmda_per_pdev + j;
951
952
if (ring_mask & BIT(id)) {
953
work_done =
954
ath12k_dp_mon_process_ring(ab, id, napi, budget,
955
monitor_mode);
956
budget -= work_done;
957
tot_work_done += work_done;
958
959
if (budget <= 0)
960
goto done;
961
}
962
}
963
}
964
}
965
966
if (ab->hw_params->ring_mask->reo_status[grp_id])
967
ath12k_dp_rx_process_reo_status(ab);
968
969
if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
970
struct ath12k_dp *dp = &ab->dp;
971
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
972
973
ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, 0,
974
ab->hw_params->hal_params->rx_buf_rbm,
975
true);
976
}
977
978
/* TODO: Implement handler for other interrupts */
979
980
done:
981
return tot_work_done;
982
}
983
984
void ath12k_dp_pdev_free(struct ath12k_base *ab)
985
{
986
int i;
987
988
del_timer_sync(&ab->mon_reap_timer);
989
990
for (i = 0; i < ab->num_radios; i++)
991
ath12k_dp_rx_pdev_free(ab, i);
992
}
993
994
void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
995
{
996
struct ath12k *ar;
997
struct ath12k_pdev_dp *dp;
998
int i;
999
1000
for (i = 0; i < ab->num_radios; i++) {
1001
ar = ab->pdevs[i].ar;
1002
dp = &ar->dp;
1003
dp->mac_id = i;
1004
atomic_set(&dp->num_tx_pending, 0);
1005
init_waitqueue_head(&dp->tx_empty_waitq);
1006
1007
/* TODO: Add any RXDMA setup required per pdev */
1008
}
1009
}
1010
1011
static void ath12k_dp_service_mon_ring(struct timer_list *t)
1012
{
1013
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
1014
int i;
1015
1016
for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
1017
ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
1018
ATH12K_DP_RX_MONITOR_MODE);
1019
1020
mod_timer(&ab->mon_reap_timer, jiffies +
1021
msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
1022
}
1023
1024
static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
1025
{
1026
if (ab->hw_params->rxdma1_enable)
1027
return;
1028
1029
timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
1030
}
1031
1032
int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
1033
{
1034
struct ath12k *ar;
1035
int ret;
1036
int i;
1037
1038
ret = ath12k_dp_rx_htt_setup(ab);
1039
if (ret)
1040
goto out;
1041
1042
ath12k_dp_mon_reap_timer_init(ab);
1043
1044
/* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
1045
for (i = 0; i < ab->num_radios; i++) {
1046
ar = ab->pdevs[i].ar;
1047
ret = ath12k_dp_rx_pdev_alloc(ab, i);
1048
if (ret) {
1049
ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
1050
i);
1051
goto err;
1052
}
1053
ret = ath12k_dp_rx_pdev_mon_attach(ar);
1054
if (ret) {
1055
ath12k_warn(ab, "failed to initialize mon pdev %d\n", i);
1056
goto err;
1057
}
1058
}
1059
1060
return 0;
1061
err:
1062
ath12k_dp_pdev_free(ab);
1063
out:
1064
return ret;
1065
}
1066
1067
int ath12k_dp_htt_connect(struct ath12k_dp *dp)
1068
{
1069
struct ath12k_htc_svc_conn_req conn_req = {0};
1070
struct ath12k_htc_svc_conn_resp conn_resp = {0};
1071
int status;
1072
1073
conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
1074
conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
1075
1076
/* connect to control service */
1077
conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
1078
1079
status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
1080
&conn_resp);
1081
1082
if (status)
1083
return status;
1084
1085
dp->eid = conn_resp.eid;
1086
1087
return 0;
1088
}
1089
1090
static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
1091
{
1092
switch (arvif->vdev_type) {
1093
case WMI_VDEV_TYPE_STA:
1094
/* TODO: Verify the search type and flags since ast hash
1095
* is not part of peer mapv3
1096
*/
1097
arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
1098
arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1099
break;
1100
case WMI_VDEV_TYPE_AP:
1101
case WMI_VDEV_TYPE_IBSS:
1102
arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
1103
arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1104
break;
1105
case WMI_VDEV_TYPE_MONITOR:
1106
default:
1107
return;
1108
}
1109
}
1110
1111
void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
1112
{
1113
struct ath12k_base *ab = ar->ab;
1114
1115
arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
1116
u32_encode_bits(arvif->vdev_id,
1117
HTT_TCL_META_DATA_VDEV_ID) |
1118
u32_encode_bits(ar->pdev->pdev_id,
1119
HTT_TCL_META_DATA_PDEV_ID);
1120
1121
/* set HTT extension valid bit to 0 by default */
1122
arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1123
1124
ath12k_dp_update_vdev_search(arvif);
1125
arvif->vdev_id_check_en = true;
1126
arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp);
1127
1128
/* TODO: error path for bank id failure */
1129
if (arvif->bank_id == DP_INVALID_BANK_ID) {
1130
ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
1131
return;
1132
}
1133
}
1134
1135
static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
1136
{
1137
struct ath12k_rx_desc_info *desc_info, *tmp;
1138
struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
1139
struct ath12k_dp *dp = &ab->dp;
1140
struct sk_buff *skb;
1141
int i;
1142
1143
if (!dp->spt_info)
1144
return;
1145
1146
/* RX Descriptor cleanup */
1147
spin_lock_bh(&dp->rx_desc_lock);
1148
1149
list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) {
1150
list_del(&desc_info->list);
1151
skb = desc_info->skb;
1152
1153
if (!skb)
1154
continue;
1155
1156
dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
1157
skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
1158
dev_kfree_skb_any(skb);
1159
}
1160
1161
spin_unlock_bh(&dp->rx_desc_lock);
1162
1163
/* TX Descriptor cleanup */
1164
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1165
spin_lock_bh(&dp->tx_desc_lock[i]);
1166
1167
list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
1168
list) {
1169
list_del(&tx_desc_info->list);
1170
skb = tx_desc_info->skb;
1171
1172
if (!skb)
1173
continue;
1174
1175
dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
1176
skb->len, DMA_TO_DEVICE);
1177
dev_kfree_skb_any(skb);
1178
}
1179
1180
spin_unlock_bh(&dp->tx_desc_lock[i]);
1181
}
1182
1183
/* unmap SPT pages */
1184
for (i = 0; i < dp->num_spt_pages; i++) {
1185
if (!dp->spt_info[i].vaddr)
1186
continue;
1187
1188
dma_free_coherent(ab->dev, ATH12K_PAGE_SIZE,
1189
dp->spt_info[i].vaddr, dp->spt_info[i].paddr);
1190
dp->spt_info[i].vaddr = NULL;
1191
}
1192
1193
kfree(dp->spt_info);
1194
}
1195
1196
static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
1197
{
1198
struct ath12k_dp *dp = &ab->dp;
1199
1200
if (!ab->hw_params->reoq_lut_support)
1201
return;
1202
1203
if (!dp->reoq_lut.vaddr)
1204
return;
1205
1206
dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
1207
dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
1208
dp->reoq_lut.vaddr = NULL;
1209
1210
ath12k_hif_write32(ab,
1211
HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
1212
}
1213
1214
void ath12k_dp_free(struct ath12k_base *ab)
1215
{
1216
struct ath12k_dp *dp = &ab->dp;
1217
int i;
1218
1219
ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1220
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1221
1222
ath12k_dp_cc_cleanup(ab);
1223
ath12k_dp_reoq_lut_cleanup(ab);
1224
ath12k_dp_deinit_bank_profiles(ab);
1225
ath12k_dp_srng_common_cleanup(ab);
1226
1227
ath12k_dp_rx_reo_cmd_list_cleanup(ab);
1228
1229
for (i = 0; i < ab->hw_params->max_tx_ring; i++)
1230
kfree(dp->tx_ring[i].tx_status);
1231
1232
ath12k_dp_rx_free(ab);
1233
/* Deinit any SOC level resource */
1234
}
1235
1236
void ath12k_dp_cc_config(struct ath12k_base *ab)
1237
{
1238
u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1239
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1240
u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
1241
u32 val = 0;
1242
1243
ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
1244
1245
val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1246
HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1247
u32_encode_bits(ATH12K_CC_PPT_MSB,
1248
HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1249
u32_encode_bits(ATH12K_CC_SPT_MSB,
1250
HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1251
u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
1252
u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
1253
u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
1254
1255
ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), val);
1256
1257
/* Enable HW CC for WBM */
1258
ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base);
1259
1260
val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1261
HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1262
u32_encode_bits(ATH12K_CC_PPT_MSB,
1263
HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1264
u32_encode_bits(ATH12K_CC_SPT_MSB,
1265
HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1266
u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
1267
1268
ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val);
1269
1270
/* Enable conversion complete indication */
1271
val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2);
1272
val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
1273
u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
1274
u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
1275
1276
ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val);
1277
1278
/* Enable Cookie conversion for WBM2SW Rings */
1279
val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
1280
val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
1281
ab->hw_params->hal_params->wbm2sw_cc_enable;
1282
1283
ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val);
1284
}
1285
1286
static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
1287
{
1288
return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
1289
}
1290
1291
static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
1292
u16 ppt_idx, u16 spt_idx)
1293
{
1294
struct ath12k_dp *dp = &ab->dp;
1295
1296
return dp->spt_info[ppt_idx].vaddr + spt_idx;
1297
}
1298
1299
struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
1300
u32 cookie)
1301
{
1302
struct ath12k_rx_desc_info **desc_addr_ptr;
1303
u16 ppt_idx, spt_idx;
1304
1305
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1306
spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
1307
1308
if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES ||
1309
spt_idx > ATH12K_MAX_SPT_ENTRIES)
1310
return NULL;
1311
1312
desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1313
1314
return *desc_addr_ptr;
1315
}
1316
1317
struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
1318
u32 cookie)
1319
{
1320
struct ath12k_tx_desc_info **desc_addr_ptr;
1321
u16 ppt_idx, spt_idx;
1322
1323
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1324
spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
1325
1326
if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES ||
1327
ppt_idx > ab->dp.num_spt_pages ||
1328
spt_idx > ATH12K_MAX_SPT_ENTRIES)
1329
return NULL;
1330
1331
desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1332
1333
return *desc_addr_ptr;
1334
}
1335
1336
static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
1337
{
1338
struct ath12k_dp *dp = &ab->dp;
1339
struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
1340
struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
1341
u32 i, j, pool_id, tx_spt_page;
1342
u32 ppt_idx;
1343
1344
spin_lock_bh(&dp->rx_desc_lock);
1345
1346
/* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
1347
for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1348
rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
1349
GFP_ATOMIC);
1350
1351
if (!rx_descs) {
1352
spin_unlock_bh(&dp->rx_desc_lock);
1353
return -ENOMEM;
1354
}
1355
1356
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1357
rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
1358
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
1359
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
1360
1361
/* Update descriptor VA in SPT */
1362
rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j);
1363
*rx_desc_addr = &rx_descs[j];
1364
}
1365
}
1366
1367
spin_unlock_bh(&dp->rx_desc_lock);
1368
1369
for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1370
spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1371
for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1372
tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
1373
GFP_ATOMIC);
1374
1375
if (!tx_descs) {
1376
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1377
/* Caller takes care of TX pending and RX desc cleanup */
1378
return -ENOMEM;
1379
}
1380
1381
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1382
tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1383
ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
1384
tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
1385
tx_descs[j].pool_id = pool_id;
1386
list_add_tail(&tx_descs[j].list,
1387
&dp->tx_desc_free_list[pool_id]);
1388
1389
/* Update descriptor VA in SPT */
1390
tx_desc_addr =
1391
ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
1392
*tx_desc_addr = &tx_descs[j];
1393
}
1394
}
1395
spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1396
}
1397
return 0;
1398
}
1399
1400
static int ath12k_dp_cc_init(struct ath12k_base *ab)
1401
{
1402
struct ath12k_dp *dp = &ab->dp;
1403
int i, ret = 0;
1404
u32 cmem_base;
1405
1406
INIT_LIST_HEAD(&dp->rx_desc_free_list);
1407
INIT_LIST_HEAD(&dp->rx_desc_used_list);
1408
spin_lock_init(&dp->rx_desc_lock);
1409
1410
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1411
INIT_LIST_HEAD(&dp->tx_desc_free_list[i]);
1412
INIT_LIST_HEAD(&dp->tx_desc_used_list[i]);
1413
spin_lock_init(&dp->tx_desc_lock[i]);
1414
}
1415
1416
dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
1417
if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
1418
dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
1419
1420
dp->spt_info = kcalloc(dp->num_spt_pages, sizeof(struct ath12k_spt_info),
1421
GFP_KERNEL);
1422
1423
if (!dp->spt_info) {
1424
ath12k_warn(ab, "SPT page allocation failure");
1425
return -ENOMEM;
1426
}
1427
1428
cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1429
1430
for (i = 0; i < dp->num_spt_pages; i++) {
1431
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
1432
ATH12K_PAGE_SIZE,
1433
&dp->spt_info[i].paddr,
1434
GFP_KERNEL);
1435
1436
if (!dp->spt_info[i].vaddr) {
1437
ret = -ENOMEM;
1438
goto free;
1439
}
1440
1441
if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
1442
ath12k_warn(ab, "SPT allocated memory is not 4K aligned");
1443
ret = -EINVAL;
1444
goto free;
1445
}
1446
1447
/* Write to PPT in CMEM */
1448
ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
1449
dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
1450
}
1451
1452
ret = ath12k_dp_cc_desc_init(ab);
1453
if (ret) {
1454
ath12k_warn(ab, "HW CC desc init failed %d", ret);
1455
goto free;
1456
}
1457
1458
return 0;
1459
free:
1460
ath12k_dp_cc_cleanup(ab);
1461
return ret;
1462
}
1463
1464
static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
1465
{
1466
struct ath12k_dp *dp = &ab->dp;
1467
1468
if (!ab->hw_params->reoq_lut_support)
1469
return 0;
1470
1471
dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
1472
DP_REOQ_LUT_SIZE,
1473
&dp->reoq_lut.paddr,
1474
GFP_KERNEL | __GFP_ZERO);
1475
if (!dp->reoq_lut.vaddr) {
1476
ath12k_warn(ab, "failed to allocate memory for reoq table");
1477
return -ENOMEM;
1478
}
1479
1480
ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
1481
dp->reoq_lut.paddr);
1482
return 0;
1483
}
1484
1485
int ath12k_dp_alloc(struct ath12k_base *ab)
1486
{
1487
struct ath12k_dp *dp = &ab->dp;
1488
struct hal_srng *srng = NULL;
1489
size_t size = 0;
1490
u32 n_link_desc = 0;
1491
int ret;
1492
int i;
1493
1494
dp->ab = ab;
1495
1496
INIT_LIST_HEAD(&dp->reo_cmd_list);
1497
INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1498
spin_lock_init(&dp->reo_cmd_lock);
1499
1500
dp->reo_cmd_cache_flush_count = 0;
1501
1502
ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
1503
if (ret) {
1504
ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1505
return ret;
1506
}
1507
1508
srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1509
1510
ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks,
1511
HAL_WBM_IDLE_LINK, srng, n_link_desc);
1512
if (ret) {
1513
ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
1514
return ret;
1515
}
1516
1517
ret = ath12k_dp_cc_init(ab);
1518
1519
if (ret) {
1520
ath12k_warn(ab, "failed to setup cookie converter %d\n", ret);
1521
goto fail_link_desc_cleanup;
1522
}
1523
ret = ath12k_dp_init_bank_profiles(ab);
1524
if (ret) {
1525
ath12k_warn(ab, "failed to setup bank profiles %d\n", ret);
1526
goto fail_hw_cc_cleanup;
1527
}
1528
1529
ret = ath12k_dp_srng_common_setup(ab);
1530
if (ret)
1531
goto fail_dp_bank_profiles_cleanup;
1532
1533
size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
1534
1535
ret = ath12k_dp_reoq_lut_setup(ab);
1536
if (ret) {
1537
ath12k_warn(ab, "failed to setup reoq table %d\n", ret);
1538
goto fail_cmn_srng_cleanup;
1539
}
1540
1541
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
1542
dp->tx_ring[i].tcl_data_ring_id = i;
1543
1544
dp->tx_ring[i].tx_status_head = 0;
1545
dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1546
dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1547
if (!dp->tx_ring[i].tx_status) {
1548
ret = -ENOMEM;
1549
/* FIXME: The allocated tx status is not freed
1550
* properly here
1551
*/
1552
goto fail_cmn_reoq_cleanup;
1553
}
1554
}
1555
1556
for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1557
ath12k_hal_tx_set_dscp_tid_map(ab, i);
1558
1559
ret = ath12k_dp_rx_alloc(ab);
1560
if (ret)
1561
goto fail_dp_rx_free;
1562
1563
/* Init any SOC level resource for DP */
1564
1565
return 0;
1566
1567
fail_dp_rx_free:
1568
ath12k_dp_rx_free(ab);
1569
1570
fail_cmn_reoq_cleanup:
1571
ath12k_dp_reoq_lut_cleanup(ab);
1572
1573
fail_cmn_srng_cleanup:
1574
ath12k_dp_srng_common_cleanup(ab);
1575
1576
fail_dp_bank_profiles_cleanup:
1577
ath12k_dp_deinit_bank_profiles(ab);
1578
1579
fail_hw_cc_cleanup:
1580
ath12k_dp_cc_cleanup(ab);
1581
1582
fail_link_desc_cleanup:
1583
ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1584
HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1585
1586
return ret;
1587
}
1588
1589