Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath11k/ce.c
106929 views
1
// SPDX-License-Identifier: BSD-3-Clause-Clear
2
/*
3
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6
*/
7
8
#include <linux/export.h>
9
#include "dp_rx.h"
10
#include "debug.h"
11
#include "hif.h"
12
13
const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
14
/* CE0: host->target HTC control and raw streams */
15
{
16
.flags = CE_ATTR_FLAGS,
17
.src_nentries = 16,
18
.src_sz_max = 2048,
19
.dest_nentries = 0,
20
.send_cb = ath11k_htc_tx_completion_handler,
21
},
22
23
/* CE1: target->host HTT + HTC control */
24
{
25
.flags = CE_ATTR_FLAGS,
26
.src_nentries = 0,
27
.src_sz_max = 2048,
28
.dest_nentries = 512,
29
.recv_cb = ath11k_htc_rx_completion_handler,
30
},
31
32
/* CE2: target->host WMI */
33
{
34
.flags = CE_ATTR_FLAGS,
35
.src_nentries = 0,
36
.src_sz_max = 2048,
37
.dest_nentries = 512,
38
.recv_cb = ath11k_htc_rx_completion_handler,
39
},
40
41
/* CE3: host->target WMI (mac0) */
42
{
43
.flags = CE_ATTR_FLAGS,
44
.src_nentries = 32,
45
.src_sz_max = 2048,
46
.dest_nentries = 0,
47
.send_cb = ath11k_htc_tx_completion_handler,
48
},
49
50
/* CE4: host->target HTT */
51
{
52
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
53
.src_nentries = 2048,
54
.src_sz_max = 256,
55
.dest_nentries = 0,
56
},
57
58
/* CE5: target->host pktlog */
59
{
60
.flags = CE_ATTR_FLAGS,
61
.src_nentries = 0,
62
.src_sz_max = 2048,
63
.dest_nentries = 512,
64
.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
65
},
66
67
/* CE6: target autonomous hif_memcpy */
68
{
69
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
70
.src_nentries = 0,
71
.src_sz_max = 0,
72
.dest_nentries = 0,
73
},
74
75
/* CE7: host->target WMI (mac1) */
76
{
77
.flags = CE_ATTR_FLAGS,
78
.src_nentries = 32,
79
.src_sz_max = 2048,
80
.dest_nentries = 0,
81
.send_cb = ath11k_htc_tx_completion_handler,
82
},
83
84
/* CE8: target autonomous hif_memcpy */
85
{
86
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
87
.src_nentries = 0,
88
.src_sz_max = 0,
89
.dest_nentries = 0,
90
},
91
92
/* CE9: host->target WMI (mac2) */
93
{
94
.flags = CE_ATTR_FLAGS,
95
.src_nentries = 32,
96
.src_sz_max = 2048,
97
.dest_nentries = 0,
98
.send_cb = ath11k_htc_tx_completion_handler,
99
},
100
101
/* CE10: target->host HTT */
102
{
103
.flags = CE_ATTR_FLAGS,
104
.src_nentries = 0,
105
.src_sz_max = 2048,
106
.dest_nentries = 512,
107
.recv_cb = ath11k_htc_rx_completion_handler,
108
},
109
110
/* CE11: Not used */
111
{
112
.flags = CE_ATTR_FLAGS,
113
.src_nentries = 0,
114
.src_sz_max = 0,
115
.dest_nentries = 0,
116
},
117
};
118
119
const struct ce_attr ath11k_host_ce_config_qca6390[] = {
120
/* CE0: host->target HTC control and raw streams */
121
{
122
.flags = CE_ATTR_FLAGS,
123
.src_nentries = 16,
124
.src_sz_max = 2048,
125
.dest_nentries = 0,
126
},
127
128
/* CE1: target->host HTT + HTC control */
129
{
130
.flags = CE_ATTR_FLAGS,
131
.src_nentries = 0,
132
.src_sz_max = 2048,
133
.dest_nentries = 512,
134
.recv_cb = ath11k_htc_rx_completion_handler,
135
},
136
137
/* CE2: target->host WMI */
138
{
139
.flags = CE_ATTR_FLAGS,
140
.src_nentries = 0,
141
.src_sz_max = 2048,
142
.dest_nentries = 512,
143
.recv_cb = ath11k_htc_rx_completion_handler,
144
},
145
146
/* CE3: host->target WMI (mac0) */
147
{
148
.flags = CE_ATTR_FLAGS,
149
.src_nentries = 32,
150
.src_sz_max = 2048,
151
.dest_nentries = 0,
152
.send_cb = ath11k_htc_tx_completion_handler,
153
},
154
155
/* CE4: host->target HTT */
156
{
157
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
158
.src_nentries = 2048,
159
.src_sz_max = 256,
160
.dest_nentries = 0,
161
},
162
163
/* CE5: target->host pktlog */
164
{
165
.flags = CE_ATTR_FLAGS,
166
.src_nentries = 0,
167
.src_sz_max = 2048,
168
.dest_nentries = 512,
169
.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
170
},
171
172
/* CE6: target autonomous hif_memcpy */
173
{
174
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
175
.src_nentries = 0,
176
.src_sz_max = 0,
177
.dest_nentries = 0,
178
},
179
180
/* CE7: host->target WMI (mac1) */
181
{
182
.flags = CE_ATTR_FLAGS,
183
.src_nentries = 32,
184
.src_sz_max = 2048,
185
.dest_nentries = 0,
186
.send_cb = ath11k_htc_tx_completion_handler,
187
},
188
189
/* CE8: target autonomous hif_memcpy */
190
{
191
.flags = CE_ATTR_FLAGS,
192
.src_nentries = 0,
193
.src_sz_max = 0,
194
.dest_nentries = 0,
195
},
196
197
};
198
199
const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
200
/* CE0: host->target HTC control and raw streams */
201
{
202
.flags = CE_ATTR_FLAGS,
203
.src_nentries = 16,
204
.src_sz_max = 2048,
205
.dest_nentries = 0,
206
},
207
208
/* CE1: target->host HTT + HTC control */
209
{
210
.flags = CE_ATTR_FLAGS,
211
.src_nentries = 0,
212
.src_sz_max = 2048,
213
.dest_nentries = 512,
214
.recv_cb = ath11k_htc_rx_completion_handler,
215
},
216
217
/* CE2: target->host WMI */
218
{
219
.flags = CE_ATTR_FLAGS,
220
.src_nentries = 0,
221
.src_sz_max = 2048,
222
.dest_nentries = 32,
223
.recv_cb = ath11k_htc_rx_completion_handler,
224
},
225
226
/* CE3: host->target WMI (mac0) */
227
{
228
.flags = CE_ATTR_FLAGS,
229
.src_nentries = 32,
230
.src_sz_max = 2048,
231
.dest_nentries = 0,
232
.send_cb = ath11k_htc_tx_completion_handler,
233
},
234
235
/* CE4: host->target HTT */
236
{
237
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
238
.src_nentries = 2048,
239
.src_sz_max = 256,
240
.dest_nentries = 0,
241
},
242
243
/* CE5: target->host pktlog */
244
{
245
.flags = CE_ATTR_FLAGS,
246
.src_nentries = 0,
247
.src_sz_max = 2048,
248
.dest_nentries = 512,
249
.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
250
},
251
};
252
253
static bool ath11k_ce_need_shadow_fix(int ce_id)
254
{
255
/* only ce4 needs shadow workaround */
256
if (ce_id == 4)
257
return true;
258
return false;
259
}
260
261
void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
262
{
263
int i;
264
265
if (!ab->hw_params.supports_shadow_regs)
266
return;
267
268
for (i = 0; i < ab->hw_params.ce_count; i++)
269
if (ath11k_ce_need_shadow_fix(i))
270
ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
271
}
272
273
static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
274
struct sk_buff *skb, dma_addr_t paddr)
275
{
276
struct ath11k_base *ab = pipe->ab;
277
struct ath11k_ce_ring *ring = pipe->dest_ring;
278
struct hal_srng *srng;
279
unsigned int write_index;
280
unsigned int nentries_mask = ring->nentries_mask;
281
u32 *desc;
282
int ret;
283
284
lockdep_assert_held(&ab->ce.ce_lock);
285
286
write_index = ring->write_index;
287
288
srng = &ab->hal.srng_list[ring->hal_ring_id];
289
290
spin_lock_bh(&srng->lock);
291
292
ath11k_hal_srng_access_begin(ab, srng);
293
294
if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
295
ret = -ENOSPC;
296
goto exit;
297
}
298
299
desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
300
if (!desc) {
301
ret = -ENOSPC;
302
goto exit;
303
}
304
305
ath11k_hal_ce_dst_set_desc(desc, paddr);
306
307
ring->skb[write_index] = skb;
308
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
309
ring->write_index = write_index;
310
311
pipe->rx_buf_needed--;
312
313
ret = 0;
314
exit:
315
ath11k_hal_srng_access_end(ab, srng);
316
317
spin_unlock_bh(&srng->lock);
318
319
return ret;
320
}
321
322
static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
323
{
324
struct ath11k_base *ab = pipe->ab;
325
struct sk_buff *skb;
326
dma_addr_t paddr;
327
int ret = 0;
328
329
if (!(pipe->dest_ring || pipe->status_ring))
330
return 0;
331
332
spin_lock_bh(&ab->ce.ce_lock);
333
while (pipe->rx_buf_needed) {
334
skb = dev_alloc_skb(pipe->buf_sz);
335
if (!skb) {
336
ret = -ENOMEM;
337
goto exit;
338
}
339
340
WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
341
342
paddr = dma_map_single(ab->dev, skb->data,
343
skb->len + skb_tailroom(skb),
344
DMA_FROM_DEVICE);
345
if (unlikely(dma_mapping_error(ab->dev, paddr))) {
346
ath11k_warn(ab, "failed to dma map ce rx buf\n");
347
dev_kfree_skb_any(skb);
348
ret = -EIO;
349
goto exit;
350
}
351
352
ATH11K_SKB_RXCB(skb)->paddr = paddr;
353
354
ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
355
356
if (ret) {
357
ath11k_dbg(ab, ATH11K_DBG_CE, "failed to enqueue rx buf: %d\n",
358
ret);
359
dma_unmap_single(ab->dev, paddr,
360
skb->len + skb_tailroom(skb),
361
DMA_FROM_DEVICE);
362
dev_kfree_skb_any(skb);
363
goto exit;
364
}
365
}
366
367
exit:
368
spin_unlock_bh(&ab->ce.ce_lock);
369
return ret;
370
}
371
372
static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
373
struct sk_buff **skb, int *nbytes)
374
{
375
struct ath11k_base *ab = pipe->ab;
376
struct hal_srng *srng;
377
unsigned int sw_index;
378
unsigned int nentries_mask;
379
u32 *desc;
380
int ret = 0;
381
382
spin_lock_bh(&ab->ce.ce_lock);
383
384
sw_index = pipe->dest_ring->sw_index;
385
nentries_mask = pipe->dest_ring->nentries_mask;
386
387
srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
388
389
spin_lock_bh(&srng->lock);
390
391
ath11k_hal_srng_access_begin(ab, srng);
392
393
desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
394
if (!desc) {
395
ret = -EIO;
396
goto err;
397
}
398
399
*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
400
401
*skb = pipe->dest_ring->skb[sw_index];
402
pipe->dest_ring->skb[sw_index] = NULL;
403
404
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
405
pipe->dest_ring->sw_index = sw_index;
406
407
pipe->rx_buf_needed++;
408
err:
409
ath11k_hal_srng_access_end(ab, srng);
410
411
spin_unlock_bh(&srng->lock);
412
413
spin_unlock_bh(&ab->ce.ce_lock);
414
415
return ret;
416
}
417
418
static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
419
{
420
struct ath11k_base *ab = pipe->ab;
421
struct sk_buff *skb;
422
struct sk_buff_head list;
423
unsigned int nbytes, max_nbytes;
424
int ret;
425
426
__skb_queue_head_init(&list);
427
while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
428
max_nbytes = skb->len + skb_tailroom(skb);
429
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
430
max_nbytes, DMA_FROM_DEVICE);
431
432
if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
433
ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
434
nbytes, max_nbytes);
435
dev_kfree_skb_any(skb);
436
continue;
437
}
438
439
skb_put(skb, nbytes);
440
__skb_queue_tail(&list, skb);
441
}
442
443
while ((skb = __skb_dequeue(&list))) {
444
ath11k_dbg(ab, ATH11K_DBG_CE, "rx ce pipe %d len %d\n",
445
pipe->pipe_num, skb->len);
446
pipe->recv_cb(ab, skb);
447
}
448
449
ret = ath11k_ce_rx_post_pipe(pipe);
450
if (ret && ret != -ENOSPC) {
451
ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
452
pipe->pipe_num, ret);
453
mod_timer(&ab->rx_replenish_retry,
454
jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
455
}
456
}
457
458
static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
459
{
460
struct ath11k_base *ab = pipe->ab;
461
struct hal_srng *srng;
462
unsigned int sw_index;
463
unsigned int nentries_mask;
464
struct sk_buff *skb;
465
u32 *desc;
466
467
spin_lock_bh(&ab->ce.ce_lock);
468
469
sw_index = pipe->src_ring->sw_index;
470
nentries_mask = pipe->src_ring->nentries_mask;
471
472
srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
473
474
spin_lock_bh(&srng->lock);
475
476
ath11k_hal_srng_access_begin(ab, srng);
477
478
desc = ath11k_hal_srng_src_reap_next(ab, srng);
479
if (!desc) {
480
skb = ERR_PTR(-EIO);
481
goto err_unlock;
482
}
483
484
skb = pipe->src_ring->skb[sw_index];
485
486
pipe->src_ring->skb[sw_index] = NULL;
487
488
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
489
pipe->src_ring->sw_index = sw_index;
490
491
err_unlock:
492
spin_unlock_bh(&srng->lock);
493
494
spin_unlock_bh(&ab->ce.ce_lock);
495
496
return skb;
497
}
498
499
static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
500
{
501
struct ath11k_base *ab = pipe->ab;
502
struct sk_buff *skb;
503
struct sk_buff_head list;
504
505
__skb_queue_head_init(&list);
506
while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
507
if (!skb)
508
continue;
509
510
dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
511
DMA_TO_DEVICE);
512
513
if ((!pipe->send_cb) || ab->hw_params.credit_flow) {
514
dev_kfree_skb_any(skb);
515
continue;
516
}
517
518
__skb_queue_tail(&list, skb);
519
}
520
521
while ((skb = __skb_dequeue(&list))) {
522
ath11k_dbg(ab, ATH11K_DBG_CE, "tx ce pipe %d len %d\n",
523
pipe->pipe_num, skb->len);
524
pipe->send_cb(ab, skb);
525
}
526
}
527
528
static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
529
struct hal_srng_params *ring_params)
530
{
531
u32 msi_data_start;
532
u32 msi_data_count, msi_data_idx;
533
u32 msi_irq_start;
534
u32 addr_lo;
535
u32 addr_hi;
536
int ret;
537
538
ret = ath11k_get_user_msi_vector(ab, "CE",
539
&msi_data_count, &msi_data_start,
540
&msi_irq_start);
541
542
if (ret)
543
return;
544
545
ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
546
ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
547
548
ring_params->msi_addr = addr_lo;
549
ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
550
ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
551
ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
552
}
553
554
static int ath11k_ce_init_ring(struct ath11k_base *ab,
555
struct ath11k_ce_ring *ce_ring,
556
int ce_id, enum hal_ring_type type)
557
{
558
struct hal_srng_params params = {};
559
int ret;
560
561
params.ring_base_paddr = ce_ring->base_addr_ce_space;
562
params.ring_base_vaddr = ce_ring->base_addr_owner_space;
563
params.num_entries = ce_ring->nentries;
564
565
if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
566
ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
567
568
switch (type) {
569
case HAL_CE_SRC:
570
if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
571
params.intr_batch_cntr_thres_entries = 1;
572
break;
573
case HAL_CE_DST:
574
params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
575
if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
576
params.intr_timer_thres_us = 1024;
577
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
578
params.low_threshold = ce_ring->nentries - 3;
579
}
580
break;
581
case HAL_CE_DST_STATUS:
582
if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
583
params.intr_batch_cntr_thres_entries = 1;
584
params.intr_timer_thres_us = 0x1000;
585
}
586
break;
587
default:
588
ath11k_warn(ab, "Invalid CE ring type %d\n", type);
589
return -EINVAL;
590
}
591
592
/* TODO: Init other params needed by HAL to init the ring */
593
594
ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
595
if (ret < 0) {
596
ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
597
ret, ce_id);
598
return ret;
599
}
600
601
ce_ring->hal_ring_id = ret;
602
603
if (ab->hw_params.supports_shadow_regs &&
604
ath11k_ce_need_shadow_fix(ce_id))
605
ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
606
ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
607
ce_ring->hal_ring_id);
608
609
return 0;
610
}
611
612
static struct ath11k_ce_ring *
613
ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
614
{
615
struct ath11k_ce_ring *ce_ring;
616
dma_addr_t base_addr;
617
618
ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
619
if (ce_ring == NULL)
620
return ERR_PTR(-ENOMEM);
621
622
ce_ring->nentries = nentries;
623
ce_ring->nentries_mask = nentries - 1;
624
625
/* Legacy platforms that do not support cache
626
* coherent DMA are unsupported
627
*/
628
ce_ring->base_addr_owner_space_unaligned =
629
dma_alloc_coherent(ab->dev,
630
nentries * desc_sz + CE_DESC_RING_ALIGN,
631
&base_addr, GFP_KERNEL);
632
if (!ce_ring->base_addr_owner_space_unaligned) {
633
kfree(ce_ring);
634
return ERR_PTR(-ENOMEM);
635
}
636
637
ce_ring->base_addr_ce_space_unaligned = base_addr;
638
639
ce_ring->base_addr_owner_space = PTR_ALIGN(
640
ce_ring->base_addr_owner_space_unaligned,
641
CE_DESC_RING_ALIGN);
642
ce_ring->base_addr_ce_space = ALIGN(
643
ce_ring->base_addr_ce_space_unaligned,
644
CE_DESC_RING_ALIGN);
645
646
return ce_ring;
647
}
648
649
static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
650
{
651
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
652
const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
653
struct ath11k_ce_ring *ring;
654
int nentries;
655
int desc_sz;
656
657
pipe->attr_flags = attr->flags;
658
659
if (attr->src_nentries) {
660
pipe->send_cb = attr->send_cb;
661
nentries = roundup_pow_of_two(attr->src_nentries);
662
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
663
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
664
if (IS_ERR(ring))
665
return PTR_ERR(ring);
666
pipe->src_ring = ring;
667
}
668
669
if (attr->dest_nentries) {
670
pipe->recv_cb = attr->recv_cb;
671
nentries = roundup_pow_of_two(attr->dest_nentries);
672
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
673
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
674
if (IS_ERR(ring))
675
return PTR_ERR(ring);
676
pipe->dest_ring = ring;
677
678
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
679
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
680
if (IS_ERR(ring))
681
return PTR_ERR(ring);
682
pipe->status_ring = ring;
683
}
684
685
return 0;
686
}
687
688
void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
689
{
690
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
691
const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
692
693
if (attr->src_nentries)
694
ath11k_ce_tx_process_cb(pipe);
695
696
if (pipe->recv_cb)
697
ath11k_ce_recv_process_cb(pipe);
698
}
699
700
void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
701
{
702
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
703
const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id];
704
705
if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
706
ath11k_ce_tx_process_cb(pipe);
707
}
708
EXPORT_SYMBOL(ath11k_ce_per_engine_service);
709
710
int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
711
u16 transfer_id)
712
{
713
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
714
struct hal_srng *srng;
715
u32 *desc;
716
unsigned int write_index, sw_index;
717
unsigned int nentries_mask;
718
int ret = 0;
719
u8 byte_swap_data = 0;
720
int num_used;
721
722
/* Check if some entries could be regained by handling tx completion if
723
* the CE has interrupts disabled and the used entries is more than the
724
* defined usage threshold.
725
*/
726
if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
727
spin_lock_bh(&ab->ce.ce_lock);
728
write_index = pipe->src_ring->write_index;
729
730
sw_index = pipe->src_ring->sw_index;
731
732
if (write_index >= sw_index)
733
num_used = write_index - sw_index;
734
else
735
num_used = pipe->src_ring->nentries - sw_index +
736
write_index;
737
738
spin_unlock_bh(&ab->ce.ce_lock);
739
740
if (num_used > ATH11K_CE_USAGE_THRESHOLD)
741
ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
742
}
743
744
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
745
return -ESHUTDOWN;
746
747
spin_lock_bh(&ab->ce.ce_lock);
748
749
write_index = pipe->src_ring->write_index;
750
nentries_mask = pipe->src_ring->nentries_mask;
751
752
srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
753
754
spin_lock_bh(&srng->lock);
755
756
ath11k_hal_srng_access_begin(ab, srng);
757
758
if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
759
ath11k_hal_srng_access_end(ab, srng);
760
ret = -ENOBUFS;
761
goto err_unlock;
762
}
763
764
desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
765
if (!desc) {
766
ath11k_hal_srng_access_end(ab, srng);
767
ret = -ENOBUFS;
768
goto err_unlock;
769
}
770
771
if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
772
byte_swap_data = 1;
773
774
ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
775
skb->len, transfer_id, byte_swap_data);
776
777
pipe->src_ring->skb[write_index] = skb;
778
pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
779
write_index);
780
781
ath11k_hal_srng_access_end(ab, srng);
782
783
if (ath11k_ce_need_shadow_fix(pipe_id))
784
ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
785
786
spin_unlock_bh(&srng->lock);
787
788
spin_unlock_bh(&ab->ce.ce_lock);
789
790
return 0;
791
792
err_unlock:
793
spin_unlock_bh(&srng->lock);
794
795
spin_unlock_bh(&ab->ce.ce_lock);
796
797
return ret;
798
}
799
800
static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
801
{
802
struct ath11k_base *ab = pipe->ab;
803
struct ath11k_ce_ring *ring = pipe->dest_ring;
804
struct sk_buff *skb;
805
int i;
806
807
if (!(ring && pipe->buf_sz))
808
return;
809
810
for (i = 0; i < ring->nentries; i++) {
811
skb = ring->skb[i];
812
if (!skb)
813
continue;
814
815
ring->skb[i] = NULL;
816
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
817
skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
818
dev_kfree_skb_any(skb);
819
}
820
}
821
822
static void ath11k_ce_shadow_config(struct ath11k_base *ab)
823
{
824
int i;
825
826
for (i = 0; i < ab->hw_params.ce_count; i++) {
827
if (ab->hw_params.host_ce_config[i].src_nentries)
828
ath11k_hal_srng_update_shadow_config(ab,
829
HAL_CE_SRC, i);
830
831
if (ab->hw_params.host_ce_config[i].dest_nentries) {
832
ath11k_hal_srng_update_shadow_config(ab,
833
HAL_CE_DST, i);
834
835
ath11k_hal_srng_update_shadow_config(ab,
836
HAL_CE_DST_STATUS, i);
837
}
838
}
839
}
840
841
void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
842
u32 **shadow_cfg, u32 *shadow_cfg_len)
843
{
844
if (!ab->hw_params.supports_shadow_regs)
845
return;
846
847
ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
848
849
/* shadow is already configured */
850
if (*shadow_cfg_len)
851
return;
852
853
/* shadow isn't configured yet, configure now.
854
* non-CE srngs are configured firstly, then
855
* all CE srngs.
856
*/
857
ath11k_hal_srng_shadow_config(ab);
858
ath11k_ce_shadow_config(ab);
859
860
/* get the shadow configuration */
861
ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
862
}
863
EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
864
865
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
866
{
867
struct ath11k_ce_pipe *pipe;
868
int pipe_num;
869
870
ath11k_ce_stop_shadow_timers(ab);
871
872
for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
873
pipe = &ab->ce.ce_pipe[pipe_num];
874
ath11k_ce_rx_pipe_cleanup(pipe);
875
876
/* Cleanup any src CE's which have interrupts disabled */
877
ath11k_ce_poll_send_completed(ab, pipe_num);
878
879
/* NOTE: Should we also clean up tx buffer in all pipes? */
880
}
881
}
882
EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
883
884
void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
885
{
886
struct ath11k_ce_pipe *pipe;
887
int i;
888
int ret;
889
890
for (i = 0; i < ab->hw_params.ce_count; i++) {
891
pipe = &ab->ce.ce_pipe[i];
892
ret = ath11k_ce_rx_post_pipe(pipe);
893
if (ret) {
894
if (ret == -ENOSPC)
895
continue;
896
897
ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
898
i, ret);
899
mod_timer(&ab->rx_replenish_retry,
900
jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
901
902
return;
903
}
904
}
905
}
906
EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
907
908
void ath11k_ce_rx_replenish_retry(struct timer_list *t)
909
{
910
struct ath11k_base *ab = timer_container_of(ab, t, rx_replenish_retry);
911
912
ath11k_ce_rx_post_buf(ab);
913
}
914
915
int ath11k_ce_init_pipes(struct ath11k_base *ab)
916
{
917
struct ath11k_ce_pipe *pipe;
918
int i;
919
int ret;
920
921
for (i = 0; i < ab->hw_params.ce_count; i++) {
922
pipe = &ab->ce.ce_pipe[i];
923
924
if (pipe->src_ring) {
925
ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
926
HAL_CE_SRC);
927
if (ret) {
928
ath11k_warn(ab, "failed to init src ring: %d\n",
929
ret);
930
/* Should we clear any partial init */
931
return ret;
932
}
933
934
pipe->src_ring->write_index = 0;
935
pipe->src_ring->sw_index = 0;
936
}
937
938
if (pipe->dest_ring) {
939
ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
940
HAL_CE_DST);
941
if (ret) {
942
ath11k_warn(ab, "failed to init dest ring: %d\n",
943
ret);
944
/* Should we clear any partial init */
945
return ret;
946
}
947
948
pipe->rx_buf_needed = pipe->dest_ring->nentries ?
949
pipe->dest_ring->nentries - 2 : 0;
950
951
pipe->dest_ring->write_index = 0;
952
pipe->dest_ring->sw_index = 0;
953
}
954
955
if (pipe->status_ring) {
956
ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
957
HAL_CE_DST_STATUS);
958
if (ret) {
959
ath11k_warn(ab, "failed to init dest status ing: %d\n",
960
ret);
961
/* Should we clear any partial init */
962
return ret;
963
}
964
965
pipe->status_ring->write_index = 0;
966
pipe->status_ring->sw_index = 0;
967
}
968
}
969
970
return 0;
971
}
972
973
void ath11k_ce_free_pipes(struct ath11k_base *ab)
974
{
975
struct ath11k_ce_pipe *pipe;
976
struct ath11k_ce_ring *ce_ring;
977
int desc_sz;
978
int i;
979
980
for (i = 0; i < ab->hw_params.ce_count; i++) {
981
pipe = &ab->ce.ce_pipe[i];
982
983
if (ath11k_ce_need_shadow_fix(i))
984
ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
985
986
if (pipe->src_ring) {
987
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
988
ce_ring = pipe->src_ring;
989
dma_free_coherent(ab->dev,
990
pipe->src_ring->nentries * desc_sz +
991
CE_DESC_RING_ALIGN,
992
ce_ring->base_addr_owner_space_unaligned,
993
ce_ring->base_addr_ce_space_unaligned);
994
kfree(pipe->src_ring);
995
pipe->src_ring = NULL;
996
}
997
998
if (pipe->dest_ring) {
999
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
1000
ce_ring = pipe->dest_ring;
1001
dma_free_coherent(ab->dev,
1002
pipe->dest_ring->nentries * desc_sz +
1003
CE_DESC_RING_ALIGN,
1004
ce_ring->base_addr_owner_space_unaligned,
1005
ce_ring->base_addr_ce_space_unaligned);
1006
kfree(pipe->dest_ring);
1007
pipe->dest_ring = NULL;
1008
}
1009
1010
if (pipe->status_ring) {
1011
desc_sz =
1012
ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
1013
ce_ring = pipe->status_ring;
1014
dma_free_coherent(ab->dev,
1015
pipe->status_ring->nentries * desc_sz +
1016
CE_DESC_RING_ALIGN,
1017
ce_ring->base_addr_owner_space_unaligned,
1018
ce_ring->base_addr_ce_space_unaligned);
1019
kfree(pipe->status_ring);
1020
pipe->status_ring = NULL;
1021
}
1022
}
1023
}
1024
EXPORT_SYMBOL(ath11k_ce_free_pipes);
1025
1026
int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
1027
{
1028
struct ath11k_ce_pipe *pipe;
1029
int i;
1030
int ret;
1031
const struct ce_attr *attr;
1032
1033
spin_lock_init(&ab->ce.ce_lock);
1034
1035
for (i = 0; i < ab->hw_params.ce_count; i++) {
1036
attr = &ab->hw_params.host_ce_config[i];
1037
pipe = &ab->ce.ce_pipe[i];
1038
pipe->pipe_num = i;
1039
pipe->ab = ab;
1040
pipe->buf_sz = attr->src_sz_max;
1041
1042
ret = ath11k_ce_alloc_pipe(ab, i);
1043
if (ret) {
1044
/* Free any partial successful allocation */
1045
ath11k_ce_free_pipes(ab);
1046
return ret;
1047
}
1048
}
1049
1050
return 0;
1051
}
1052
EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
1053
1054
/* For Big Endian Host, Copy Engine byte_swap is enabled
1055
* When Copy Engine does byte_swap, need to byte swap again for the
1056
* Host to get/put buffer content in the correct byte order
1057
*/
1058
void ath11k_ce_byte_swap(void *mem, u32 len)
1059
{
1060
int i;
1061
#if defined(__FreeBSD__)
1062
u32 *m = mem;
1063
#endif
1064
1065
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
1066
if (!mem)
1067
return;
1068
1069
for (i = 0; i < (len / 4); i++) {
1070
#if defined(__linux__)
1071
*(u32 *)mem = swab32(*(u32 *)mem);
1072
mem += 4;
1073
#elif defined(__FreeBSD__)
1074
*m = swab32(*m);
1075
m++;
1076
#endif
1077
}
1078
}
1079
}
1080
1081
int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
1082
{
1083
if (ce_id >= ab->hw_params.ce_count)
1084
return -EINVAL;
1085
1086
return ab->hw_params.host_ce_config[ce_id].flags;
1087
}
1088
EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
1089
1090