Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/athk/ath11k/ce.c
48378 views
1
// SPDX-License-Identifier: BSD-3-Clause-Clear
2
/*
3
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4
* Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
5
*/
6
7
#include "dp_rx.h"
8
#include "debug.h"
9
#include "hif.h"
10
11
const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
12
/* CE0: host->target HTC control and raw streams */
13
{
14
.flags = CE_ATTR_FLAGS,
15
.src_nentries = 16,
16
.src_sz_max = 2048,
17
.dest_nentries = 0,
18
.send_cb = ath11k_htc_tx_completion_handler,
19
},
20
21
/* CE1: target->host HTT + HTC control */
22
{
23
.flags = CE_ATTR_FLAGS,
24
.src_nentries = 0,
25
.src_sz_max = 2048,
26
.dest_nentries = 512,
27
.recv_cb = ath11k_htc_rx_completion_handler,
28
},
29
30
/* CE2: target->host WMI */
31
{
32
.flags = CE_ATTR_FLAGS,
33
.src_nentries = 0,
34
.src_sz_max = 2048,
35
.dest_nentries = 512,
36
.recv_cb = ath11k_htc_rx_completion_handler,
37
},
38
39
/* CE3: host->target WMI (mac0) */
40
{
41
.flags = CE_ATTR_FLAGS,
42
.src_nentries = 32,
43
.src_sz_max = 2048,
44
.dest_nentries = 0,
45
.send_cb = ath11k_htc_tx_completion_handler,
46
},
47
48
/* CE4: host->target HTT */
49
{
50
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
51
.src_nentries = 2048,
52
.src_sz_max = 256,
53
.dest_nentries = 0,
54
},
55
56
/* CE5: target->host pktlog */
57
{
58
.flags = CE_ATTR_FLAGS,
59
.src_nentries = 0,
60
.src_sz_max = 2048,
61
.dest_nentries = 512,
62
.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
63
},
64
65
/* CE6: target autonomous hif_memcpy */
66
{
67
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
68
.src_nentries = 0,
69
.src_sz_max = 0,
70
.dest_nentries = 0,
71
},
72
73
/* CE7: host->target WMI (mac1) */
74
{
75
.flags = CE_ATTR_FLAGS,
76
.src_nentries = 32,
77
.src_sz_max = 2048,
78
.dest_nentries = 0,
79
.send_cb = ath11k_htc_tx_completion_handler,
80
},
81
82
/* CE8: target autonomous hif_memcpy */
83
{
84
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
85
.src_nentries = 0,
86
.src_sz_max = 0,
87
.dest_nentries = 0,
88
},
89
90
/* CE9: host->target WMI (mac2) */
91
{
92
.flags = CE_ATTR_FLAGS,
93
.src_nentries = 32,
94
.src_sz_max = 2048,
95
.dest_nentries = 0,
96
.send_cb = ath11k_htc_tx_completion_handler,
97
},
98
99
/* CE10: target->host HTT */
100
{
101
.flags = CE_ATTR_FLAGS,
102
.src_nentries = 0,
103
.src_sz_max = 2048,
104
.dest_nentries = 512,
105
.recv_cb = ath11k_htc_rx_completion_handler,
106
},
107
108
/* CE11: Not used */
109
{
110
.flags = CE_ATTR_FLAGS,
111
.src_nentries = 0,
112
.src_sz_max = 0,
113
.dest_nentries = 0,
114
},
115
};
116
117
const struct ce_attr ath11k_host_ce_config_qca6390[] = {
118
/* CE0: host->target HTC control and raw streams */
119
{
120
.flags = CE_ATTR_FLAGS,
121
.src_nentries = 16,
122
.src_sz_max = 2048,
123
.dest_nentries = 0,
124
},
125
126
/* CE1: target->host HTT + HTC control */
127
{
128
.flags = CE_ATTR_FLAGS,
129
.src_nentries = 0,
130
.src_sz_max = 2048,
131
.dest_nentries = 512,
132
.recv_cb = ath11k_htc_rx_completion_handler,
133
},
134
135
/* CE2: target->host WMI */
136
{
137
.flags = CE_ATTR_FLAGS,
138
.src_nentries = 0,
139
.src_sz_max = 2048,
140
.dest_nentries = 512,
141
.recv_cb = ath11k_htc_rx_completion_handler,
142
},
143
144
/* CE3: host->target WMI (mac0) */
145
{
146
.flags = CE_ATTR_FLAGS,
147
.src_nentries = 32,
148
.src_sz_max = 2048,
149
.dest_nentries = 0,
150
.send_cb = ath11k_htc_tx_completion_handler,
151
},
152
153
/* CE4: host->target HTT */
154
{
155
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
156
.src_nentries = 2048,
157
.src_sz_max = 256,
158
.dest_nentries = 0,
159
},
160
161
/* CE5: target->host pktlog */
162
{
163
.flags = CE_ATTR_FLAGS,
164
.src_nentries = 0,
165
.src_sz_max = 2048,
166
.dest_nentries = 512,
167
.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
168
},
169
170
/* CE6: target autonomous hif_memcpy */
171
{
172
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
173
.src_nentries = 0,
174
.src_sz_max = 0,
175
.dest_nentries = 0,
176
},
177
178
/* CE7: host->target WMI (mac1) */
179
{
180
.flags = CE_ATTR_FLAGS,
181
.src_nentries = 32,
182
.src_sz_max = 2048,
183
.dest_nentries = 0,
184
.send_cb = ath11k_htc_tx_completion_handler,
185
},
186
187
/* CE8: target autonomous hif_memcpy */
188
{
189
.flags = CE_ATTR_FLAGS,
190
.src_nentries = 0,
191
.src_sz_max = 0,
192
.dest_nentries = 0,
193
},
194
195
};
196
197
const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
198
/* CE0: host->target HTC control and raw streams */
199
{
200
.flags = CE_ATTR_FLAGS,
201
.src_nentries = 16,
202
.src_sz_max = 2048,
203
.dest_nentries = 0,
204
},
205
206
/* CE1: target->host HTT + HTC control */
207
{
208
.flags = CE_ATTR_FLAGS,
209
.src_nentries = 0,
210
.src_sz_max = 2048,
211
.dest_nentries = 512,
212
.recv_cb = ath11k_htc_rx_completion_handler,
213
},
214
215
/* CE2: target->host WMI */
216
{
217
.flags = CE_ATTR_FLAGS,
218
.src_nentries = 0,
219
.src_sz_max = 2048,
220
.dest_nentries = 32,
221
.recv_cb = ath11k_htc_rx_completion_handler,
222
},
223
224
/* CE3: host->target WMI (mac0) */
225
{
226
.flags = CE_ATTR_FLAGS,
227
.src_nentries = 32,
228
.src_sz_max = 2048,
229
.dest_nentries = 0,
230
.send_cb = ath11k_htc_tx_completion_handler,
231
},
232
233
/* CE4: host->target HTT */
234
{
235
.flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
236
.src_nentries = 2048,
237
.src_sz_max = 256,
238
.dest_nentries = 0,
239
},
240
241
/* CE5: target->host pktlog */
242
{
243
.flags = CE_ATTR_FLAGS,
244
.src_nentries = 0,
245
.src_sz_max = 2048,
246
.dest_nentries = 512,
247
.recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
248
},
249
};
250
251
static bool ath11k_ce_need_shadow_fix(int ce_id)
252
{
253
/* only ce4 needs shadow workaround */
254
if (ce_id == 4)
255
return true;
256
return false;
257
}
258
259
void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
260
{
261
int i;
262
263
if (!ab->hw_params.supports_shadow_regs)
264
return;
265
266
for (i = 0; i < ab->hw_params.ce_count; i++)
267
if (ath11k_ce_need_shadow_fix(i))
268
ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
269
}
270
271
static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
272
struct sk_buff *skb, dma_addr_t paddr)
273
{
274
struct ath11k_base *ab = pipe->ab;
275
struct ath11k_ce_ring *ring = pipe->dest_ring;
276
struct hal_srng *srng;
277
unsigned int write_index;
278
unsigned int nentries_mask = ring->nentries_mask;
279
u32 *desc;
280
int ret;
281
282
lockdep_assert_held(&ab->ce.ce_lock);
283
284
write_index = ring->write_index;
285
286
srng = &ab->hal.srng_list[ring->hal_ring_id];
287
288
spin_lock_bh(&srng->lock);
289
290
ath11k_hal_srng_access_begin(ab, srng);
291
292
if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
293
ret = -ENOSPC;
294
goto exit;
295
}
296
297
desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
298
if (!desc) {
299
ret = -ENOSPC;
300
goto exit;
301
}
302
303
ath11k_hal_ce_dst_set_desc(desc, paddr);
304
305
ring->skb[write_index] = skb;
306
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
307
ring->write_index = write_index;
308
309
pipe->rx_buf_needed--;
310
311
ret = 0;
312
exit:
313
ath11k_hal_srng_access_end(ab, srng);
314
315
spin_unlock_bh(&srng->lock);
316
317
return ret;
318
}
319
320
static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
321
{
322
struct ath11k_base *ab = pipe->ab;
323
struct sk_buff *skb;
324
dma_addr_t paddr;
325
int ret = 0;
326
327
if (!(pipe->dest_ring || pipe->status_ring))
328
return 0;
329
330
spin_lock_bh(&ab->ce.ce_lock);
331
while (pipe->rx_buf_needed) {
332
skb = dev_alloc_skb(pipe->buf_sz);
333
if (!skb) {
334
ret = -ENOMEM;
335
goto exit;
336
}
337
338
WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
339
340
paddr = dma_map_single(ab->dev, skb->data,
341
skb->len + skb_tailroom(skb),
342
DMA_FROM_DEVICE);
343
if (unlikely(dma_mapping_error(ab->dev, paddr))) {
344
ath11k_warn(ab, "failed to dma map ce rx buf\n");
345
dev_kfree_skb_any(skb);
346
ret = -EIO;
347
goto exit;
348
}
349
350
ATH11K_SKB_RXCB(skb)->paddr = paddr;
351
352
ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
353
354
if (ret) {
355
ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
356
dma_unmap_single(ab->dev, paddr,
357
skb->len + skb_tailroom(skb),
358
DMA_FROM_DEVICE);
359
dev_kfree_skb_any(skb);
360
goto exit;
361
}
362
}
363
364
exit:
365
spin_unlock_bh(&ab->ce.ce_lock);
366
return ret;
367
}
368
369
static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
370
struct sk_buff **skb, int *nbytes)
371
{
372
struct ath11k_base *ab = pipe->ab;
373
struct hal_srng *srng;
374
unsigned int sw_index;
375
unsigned int nentries_mask;
376
u32 *desc;
377
int ret = 0;
378
379
spin_lock_bh(&ab->ce.ce_lock);
380
381
sw_index = pipe->dest_ring->sw_index;
382
nentries_mask = pipe->dest_ring->nentries_mask;
383
384
srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
385
386
spin_lock_bh(&srng->lock);
387
388
ath11k_hal_srng_access_begin(ab, srng);
389
390
desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
391
if (!desc) {
392
ret = -EIO;
393
goto err;
394
}
395
396
*nbytes = ath11k_hal_ce_dst_status_get_length(desc);
397
if (*nbytes == 0) {
398
ret = -EIO;
399
goto err;
400
}
401
402
*skb = pipe->dest_ring->skb[sw_index];
403
pipe->dest_ring->skb[sw_index] = NULL;
404
405
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
406
pipe->dest_ring->sw_index = sw_index;
407
408
pipe->rx_buf_needed++;
409
err:
410
ath11k_hal_srng_access_end(ab, srng);
411
412
spin_unlock_bh(&srng->lock);
413
414
spin_unlock_bh(&ab->ce.ce_lock);
415
416
return ret;
417
}
418
419
static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
420
{
421
struct ath11k_base *ab = pipe->ab;
422
struct sk_buff *skb;
423
struct sk_buff_head list;
424
unsigned int nbytes, max_nbytes;
425
int ret;
426
427
__skb_queue_head_init(&list);
428
while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
429
max_nbytes = skb->len + skb_tailroom(skb);
430
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
431
max_nbytes, DMA_FROM_DEVICE);
432
433
if (unlikely(max_nbytes < nbytes)) {
434
ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
435
nbytes, max_nbytes);
436
dev_kfree_skb_any(skb);
437
continue;
438
}
439
440
skb_put(skb, nbytes);
441
__skb_queue_tail(&list, skb);
442
}
443
444
while ((skb = __skb_dequeue(&list))) {
445
ath11k_dbg(ab, ATH11K_DBG_CE, "rx ce pipe %d len %d\n",
446
pipe->pipe_num, skb->len);
447
pipe->recv_cb(ab, skb);
448
}
449
450
ret = ath11k_ce_rx_post_pipe(pipe);
451
if (ret && ret != -ENOSPC) {
452
ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
453
pipe->pipe_num, ret);
454
mod_timer(&ab->rx_replenish_retry,
455
jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
456
}
457
}
458
459
static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
460
{
461
struct ath11k_base *ab = pipe->ab;
462
struct hal_srng *srng;
463
unsigned int sw_index;
464
unsigned int nentries_mask;
465
struct sk_buff *skb;
466
u32 *desc;
467
468
spin_lock_bh(&ab->ce.ce_lock);
469
470
sw_index = pipe->src_ring->sw_index;
471
nentries_mask = pipe->src_ring->nentries_mask;
472
473
srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
474
475
spin_lock_bh(&srng->lock);
476
477
ath11k_hal_srng_access_begin(ab, srng);
478
479
desc = ath11k_hal_srng_src_reap_next(ab, srng);
480
if (!desc) {
481
skb = ERR_PTR(-EIO);
482
goto err_unlock;
483
}
484
485
skb = pipe->src_ring->skb[sw_index];
486
487
pipe->src_ring->skb[sw_index] = NULL;
488
489
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
490
pipe->src_ring->sw_index = sw_index;
491
492
err_unlock:
493
spin_unlock_bh(&srng->lock);
494
495
spin_unlock_bh(&ab->ce.ce_lock);
496
497
return skb;
498
}
499
500
static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
501
{
502
struct ath11k_base *ab = pipe->ab;
503
struct sk_buff *skb;
504
struct sk_buff_head list;
505
506
__skb_queue_head_init(&list);
507
while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
508
if (!skb)
509
continue;
510
511
dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
512
DMA_TO_DEVICE);
513
514
if ((!pipe->send_cb) || ab->hw_params.credit_flow) {
515
dev_kfree_skb_any(skb);
516
continue;
517
}
518
519
__skb_queue_tail(&list, skb);
520
}
521
522
while ((skb = __skb_dequeue(&list))) {
523
ath11k_dbg(ab, ATH11K_DBG_CE, "tx ce pipe %d len %d\n",
524
pipe->pipe_num, skb->len);
525
pipe->send_cb(ab, skb);
526
}
527
}
528
529
static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
530
struct hal_srng_params *ring_params)
531
{
532
u32 msi_data_start;
533
u32 msi_data_count, msi_data_idx;
534
u32 msi_irq_start;
535
u32 addr_lo;
536
u32 addr_hi;
537
int ret;
538
539
ret = ath11k_get_user_msi_vector(ab, "CE",
540
&msi_data_count, &msi_data_start,
541
&msi_irq_start);
542
543
if (ret)
544
return;
545
546
ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
547
ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
548
549
ring_params->msi_addr = addr_lo;
550
ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
551
ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
552
ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
553
}
554
555
static int ath11k_ce_init_ring(struct ath11k_base *ab,
556
struct ath11k_ce_ring *ce_ring,
557
int ce_id, enum hal_ring_type type)
558
{
559
struct hal_srng_params params = { 0 };
560
int ret;
561
562
params.ring_base_paddr = ce_ring->base_addr_ce_space;
563
params.ring_base_vaddr = ce_ring->base_addr_owner_space;
564
params.num_entries = ce_ring->nentries;
565
566
if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
567
ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
568
569
switch (type) {
570
case HAL_CE_SRC:
571
if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
572
params.intr_batch_cntr_thres_entries = 1;
573
break;
574
case HAL_CE_DST:
575
params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
576
if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
577
params.intr_timer_thres_us = 1024;
578
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
579
params.low_threshold = ce_ring->nentries - 3;
580
}
581
break;
582
case HAL_CE_DST_STATUS:
583
if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
584
params.intr_batch_cntr_thres_entries = 1;
585
params.intr_timer_thres_us = 0x1000;
586
}
587
break;
588
default:
589
ath11k_warn(ab, "Invalid CE ring type %d\n", type);
590
return -EINVAL;
591
}
592
593
/* TODO: Init other params needed by HAL to init the ring */
594
595
ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
596
if (ret < 0) {
597
ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
598
ret, ce_id);
599
return ret;
600
}
601
602
ce_ring->hal_ring_id = ret;
603
604
if (ab->hw_params.supports_shadow_regs &&
605
ath11k_ce_need_shadow_fix(ce_id))
606
ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
607
ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
608
ce_ring->hal_ring_id);
609
610
return 0;
611
}
612
613
static struct ath11k_ce_ring *
614
ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
615
{
616
struct ath11k_ce_ring *ce_ring;
617
dma_addr_t base_addr;
618
619
ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
620
if (ce_ring == NULL)
621
return ERR_PTR(-ENOMEM);
622
623
ce_ring->nentries = nentries;
624
ce_ring->nentries_mask = nentries - 1;
625
626
/* Legacy platforms that do not support cache
627
* coherent DMA are unsupported
628
*/
629
ce_ring->base_addr_owner_space_unaligned =
630
dma_alloc_coherent(ab->dev,
631
nentries * desc_sz + CE_DESC_RING_ALIGN,
632
&base_addr, GFP_KERNEL);
633
if (!ce_ring->base_addr_owner_space_unaligned) {
634
kfree(ce_ring);
635
return ERR_PTR(-ENOMEM);
636
}
637
638
ce_ring->base_addr_ce_space_unaligned = base_addr;
639
640
ce_ring->base_addr_owner_space = PTR_ALIGN(
641
ce_ring->base_addr_owner_space_unaligned,
642
CE_DESC_RING_ALIGN);
643
ce_ring->base_addr_ce_space = ALIGN(
644
ce_ring->base_addr_ce_space_unaligned,
645
CE_DESC_RING_ALIGN);
646
647
return ce_ring;
648
}
649
650
static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
651
{
652
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
653
const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
654
struct ath11k_ce_ring *ring;
655
int nentries;
656
int desc_sz;
657
658
pipe->attr_flags = attr->flags;
659
660
if (attr->src_nentries) {
661
pipe->send_cb = attr->send_cb;
662
nentries = roundup_pow_of_two(attr->src_nentries);
663
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
664
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
665
if (IS_ERR(ring))
666
return PTR_ERR(ring);
667
pipe->src_ring = ring;
668
}
669
670
if (attr->dest_nentries) {
671
pipe->recv_cb = attr->recv_cb;
672
nentries = roundup_pow_of_two(attr->dest_nentries);
673
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
674
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
675
if (IS_ERR(ring))
676
return PTR_ERR(ring);
677
pipe->dest_ring = ring;
678
679
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
680
ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
681
if (IS_ERR(ring))
682
return PTR_ERR(ring);
683
pipe->status_ring = ring;
684
}
685
686
return 0;
687
}
688
689
void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
690
{
691
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
692
const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
693
694
if (attr->src_nentries)
695
ath11k_ce_tx_process_cb(pipe);
696
697
if (pipe->recv_cb)
698
ath11k_ce_recv_process_cb(pipe);
699
}
700
701
void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
702
{
703
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
704
const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id];
705
706
if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
707
ath11k_ce_tx_process_cb(pipe);
708
}
709
EXPORT_SYMBOL(ath11k_ce_per_engine_service);
710
711
int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
712
u16 transfer_id)
713
{
714
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
715
struct hal_srng *srng;
716
u32 *desc;
717
unsigned int write_index, sw_index;
718
unsigned int nentries_mask;
719
int ret = 0;
720
u8 byte_swap_data = 0;
721
int num_used;
722
723
/* Check if some entries could be regained by handling tx completion if
724
* the CE has interrupts disabled and the used entries is more than the
725
* defined usage threshold.
726
*/
727
if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
728
spin_lock_bh(&ab->ce.ce_lock);
729
write_index = pipe->src_ring->write_index;
730
731
sw_index = pipe->src_ring->sw_index;
732
733
if (write_index >= sw_index)
734
num_used = write_index - sw_index;
735
else
736
num_used = pipe->src_ring->nentries - sw_index +
737
write_index;
738
739
spin_unlock_bh(&ab->ce.ce_lock);
740
741
if (num_used > ATH11K_CE_USAGE_THRESHOLD)
742
ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
743
}
744
745
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
746
return -ESHUTDOWN;
747
748
spin_lock_bh(&ab->ce.ce_lock);
749
750
write_index = pipe->src_ring->write_index;
751
nentries_mask = pipe->src_ring->nentries_mask;
752
753
srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
754
755
spin_lock_bh(&srng->lock);
756
757
ath11k_hal_srng_access_begin(ab, srng);
758
759
if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
760
ath11k_hal_srng_access_end(ab, srng);
761
ret = -ENOBUFS;
762
goto err_unlock;
763
}
764
765
desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
766
if (!desc) {
767
ath11k_hal_srng_access_end(ab, srng);
768
ret = -ENOBUFS;
769
goto err_unlock;
770
}
771
772
if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
773
byte_swap_data = 1;
774
775
ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
776
skb->len, transfer_id, byte_swap_data);
777
778
pipe->src_ring->skb[write_index] = skb;
779
pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
780
write_index);
781
782
ath11k_hal_srng_access_end(ab, srng);
783
784
if (ath11k_ce_need_shadow_fix(pipe_id))
785
ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
786
787
spin_unlock_bh(&srng->lock);
788
789
spin_unlock_bh(&ab->ce.ce_lock);
790
791
return 0;
792
793
err_unlock:
794
spin_unlock_bh(&srng->lock);
795
796
spin_unlock_bh(&ab->ce.ce_lock);
797
798
return ret;
799
}
800
801
static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
802
{
803
struct ath11k_base *ab = pipe->ab;
804
struct ath11k_ce_ring *ring = pipe->dest_ring;
805
struct sk_buff *skb;
806
int i;
807
808
if (!(ring && pipe->buf_sz))
809
return;
810
811
for (i = 0; i < ring->nentries; i++) {
812
skb = ring->skb[i];
813
if (!skb)
814
continue;
815
816
ring->skb[i] = NULL;
817
dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
818
skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
819
dev_kfree_skb_any(skb);
820
}
821
}
822
823
static void ath11k_ce_shadow_config(struct ath11k_base *ab)
824
{
825
int i;
826
827
for (i = 0; i < ab->hw_params.ce_count; i++) {
828
if (ab->hw_params.host_ce_config[i].src_nentries)
829
ath11k_hal_srng_update_shadow_config(ab,
830
HAL_CE_SRC, i);
831
832
if (ab->hw_params.host_ce_config[i].dest_nentries) {
833
ath11k_hal_srng_update_shadow_config(ab,
834
HAL_CE_DST, i);
835
836
ath11k_hal_srng_update_shadow_config(ab,
837
HAL_CE_DST_STATUS, i);
838
}
839
}
840
}
841
842
void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
843
u32 **shadow_cfg, u32 *shadow_cfg_len)
844
{
845
if (!ab->hw_params.supports_shadow_regs)
846
return;
847
848
ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
849
850
/* shadow is already configured */
851
if (*shadow_cfg_len)
852
return;
853
854
/* shadow isn't configured yet, configure now.
855
* non-CE srngs are configured firstly, then
856
* all CE srngs.
857
*/
858
ath11k_hal_srng_shadow_config(ab);
859
ath11k_ce_shadow_config(ab);
860
861
/* get the shadow configuration */
862
ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
863
}
864
EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
865
866
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
867
{
868
struct ath11k_ce_pipe *pipe;
869
int pipe_num;
870
871
ath11k_ce_stop_shadow_timers(ab);
872
873
for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
874
pipe = &ab->ce.ce_pipe[pipe_num];
875
ath11k_ce_rx_pipe_cleanup(pipe);
876
877
/* Cleanup any src CE's which have interrupts disabled */
878
ath11k_ce_poll_send_completed(ab, pipe_num);
879
880
/* NOTE: Should we also clean up tx buffer in all pipes? */
881
}
882
}
883
EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
884
885
void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
886
{
887
struct ath11k_ce_pipe *pipe;
888
int i;
889
int ret;
890
891
for (i = 0; i < ab->hw_params.ce_count; i++) {
892
pipe = &ab->ce.ce_pipe[i];
893
ret = ath11k_ce_rx_post_pipe(pipe);
894
if (ret) {
895
if (ret == -ENOSPC)
896
continue;
897
898
ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
899
i, ret);
900
mod_timer(&ab->rx_replenish_retry,
901
jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
902
903
return;
904
}
905
}
906
}
907
EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
908
909
void ath11k_ce_rx_replenish_retry(struct timer_list *t)
910
{
911
struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
912
913
ath11k_ce_rx_post_buf(ab);
914
}
915
916
int ath11k_ce_init_pipes(struct ath11k_base *ab)
917
{
918
struct ath11k_ce_pipe *pipe;
919
int i;
920
int ret;
921
922
for (i = 0; i < ab->hw_params.ce_count; i++) {
923
pipe = &ab->ce.ce_pipe[i];
924
925
if (pipe->src_ring) {
926
ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
927
HAL_CE_SRC);
928
if (ret) {
929
ath11k_warn(ab, "failed to init src ring: %d\n",
930
ret);
931
/* Should we clear any partial init */
932
return ret;
933
}
934
935
pipe->src_ring->write_index = 0;
936
pipe->src_ring->sw_index = 0;
937
}
938
939
if (pipe->dest_ring) {
940
ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
941
HAL_CE_DST);
942
if (ret) {
943
ath11k_warn(ab, "failed to init dest ring: %d\n",
944
ret);
945
/* Should we clear any partial init */
946
return ret;
947
}
948
949
pipe->rx_buf_needed = pipe->dest_ring->nentries ?
950
pipe->dest_ring->nentries - 2 : 0;
951
952
pipe->dest_ring->write_index = 0;
953
pipe->dest_ring->sw_index = 0;
954
}
955
956
if (pipe->status_ring) {
957
ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
958
HAL_CE_DST_STATUS);
959
if (ret) {
960
ath11k_warn(ab, "failed to init dest status ing: %d\n",
961
ret);
962
/* Should we clear any partial init */
963
return ret;
964
}
965
966
pipe->status_ring->write_index = 0;
967
pipe->status_ring->sw_index = 0;
968
}
969
}
970
971
return 0;
972
}
973
974
void ath11k_ce_free_pipes(struct ath11k_base *ab)
975
{
976
struct ath11k_ce_pipe *pipe;
977
struct ath11k_ce_ring *ce_ring;
978
int desc_sz;
979
int i;
980
981
for (i = 0; i < ab->hw_params.ce_count; i++) {
982
pipe = &ab->ce.ce_pipe[i];
983
984
if (ath11k_ce_need_shadow_fix(i))
985
ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
986
987
if (pipe->src_ring) {
988
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
989
ce_ring = pipe->src_ring;
990
dma_free_coherent(ab->dev,
991
pipe->src_ring->nentries * desc_sz +
992
CE_DESC_RING_ALIGN,
993
ce_ring->base_addr_owner_space_unaligned,
994
ce_ring->base_addr_ce_space_unaligned);
995
kfree(pipe->src_ring);
996
pipe->src_ring = NULL;
997
}
998
999
if (pipe->dest_ring) {
1000
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
1001
ce_ring = pipe->dest_ring;
1002
dma_free_coherent(ab->dev,
1003
pipe->dest_ring->nentries * desc_sz +
1004
CE_DESC_RING_ALIGN,
1005
ce_ring->base_addr_owner_space_unaligned,
1006
ce_ring->base_addr_ce_space_unaligned);
1007
kfree(pipe->dest_ring);
1008
pipe->dest_ring = NULL;
1009
}
1010
1011
if (pipe->status_ring) {
1012
desc_sz =
1013
ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
1014
ce_ring = pipe->status_ring;
1015
dma_free_coherent(ab->dev,
1016
pipe->status_ring->nentries * desc_sz +
1017
CE_DESC_RING_ALIGN,
1018
ce_ring->base_addr_owner_space_unaligned,
1019
ce_ring->base_addr_ce_space_unaligned);
1020
kfree(pipe->status_ring);
1021
pipe->status_ring = NULL;
1022
}
1023
}
1024
}
1025
EXPORT_SYMBOL(ath11k_ce_free_pipes);
1026
1027
int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
1028
{
1029
struct ath11k_ce_pipe *pipe;
1030
int i;
1031
int ret;
1032
const struct ce_attr *attr;
1033
1034
spin_lock_init(&ab->ce.ce_lock);
1035
1036
for (i = 0; i < ab->hw_params.ce_count; i++) {
1037
attr = &ab->hw_params.host_ce_config[i];
1038
pipe = &ab->ce.ce_pipe[i];
1039
pipe->pipe_num = i;
1040
pipe->ab = ab;
1041
pipe->buf_sz = attr->src_sz_max;
1042
1043
ret = ath11k_ce_alloc_pipe(ab, i);
1044
if (ret) {
1045
/* Free any partial successful allocation */
1046
ath11k_ce_free_pipes(ab);
1047
return ret;
1048
}
1049
}
1050
1051
return 0;
1052
}
1053
EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
1054
1055
/* For Big Endian Host, Copy Engine byte_swap is enabled
1056
* When Copy Engine does byte_swap, need to byte swap again for the
1057
* Host to get/put buffer content in the correct byte order
1058
*/
1059
void ath11k_ce_byte_swap(void *mem, u32 len)
1060
{
1061
int i;
1062
#if defined(__FreeBSD__)
1063
u32 *m = mem;
1064
#endif
1065
1066
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
1067
if (!mem)
1068
return;
1069
1070
for (i = 0; i < (len / 4); i++) {
1071
#if defined(__linux__)
1072
*(u32 *)mem = swab32(*(u32 *)mem);
1073
mem += 4;
1074
#elif defined(__FreeBSD__)
1075
*m = swab32(*m);
1076
m++;
1077
#endif
1078
}
1079
}
1080
}
1081
1082
int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
1083
{
1084
if (ce_id >= ab->hw_params.ce_count)
1085
return -EINVAL;
1086
1087
return ab->hw_params.host_ce_config[ce_id].flags;
1088
}
1089
EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
1090
1091