Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/dev/mediatek/mt76/dma.c
48378 views
1
// SPDX-License-Identifier: ISC
2
/*
3
* Copyright (C) 2016 Felix Fietkau <[email protected]>
4
*/
5
6
#include <linux/dma-mapping.h>
7
#if defined(__FreeBSD__)
8
#include <linux/cache.h>
9
#include <net/page_pool/helpers.h>
10
#endif
11
#include "mt76.h"
12
#include "dma.h"
13
14
#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
15
16
#define Q_READ(_q, _field) ({ \
17
u32 _offset = offsetof(struct mt76_queue_regs, _field); \
18
u32 _val; \
19
if ((_q)->flags & MT_QFLAG_WED) \
20
_val = mtk_wed_device_reg_read((_q)->wed, \
21
((_q)->wed_regs + \
22
_offset)); \
23
else \
24
_val = readl(&(_q)->regs->_field); \
25
_val; \
26
})
27
28
#define Q_WRITE(_q, _field, _val) do { \
29
u32 _offset = offsetof(struct mt76_queue_regs, _field); \
30
if ((_q)->flags & MT_QFLAG_WED) \
31
mtk_wed_device_reg_write((_q)->wed, \
32
((_q)->wed_regs + _offset), \
33
_val); \
34
else \
35
writel(_val, &(_q)->regs->_field); \
36
} while (0)
37
38
#else
39
40
#define Q_READ(_q, _field) readl(&(_q)->regs->_field)
41
#define Q_WRITE(_q, _field, _val) writel(_val, &(_q)->regs->_field)
42
43
#endif
44
45
static struct mt76_txwi_cache *
46
mt76_alloc_txwi(struct mt76_dev *dev)
47
{
48
struct mt76_txwi_cache *t;
49
dma_addr_t addr;
50
u8 *txwi;
51
int size;
52
53
size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
54
txwi = kzalloc(size, GFP_ATOMIC);
55
if (!txwi)
56
return NULL;
57
58
addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
59
DMA_TO_DEVICE);
60
if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
61
kfree(txwi);
62
return NULL;
63
}
64
65
t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
66
t->dma_addr = addr;
67
68
return t;
69
}
70
71
static struct mt76_txwi_cache *
72
mt76_alloc_rxwi(struct mt76_dev *dev)
73
{
74
struct mt76_txwi_cache *t;
75
76
t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
77
if (!t)
78
return NULL;
79
80
t->ptr = NULL;
81
return t;
82
}
83
84
static struct mt76_txwi_cache *
85
__mt76_get_txwi(struct mt76_dev *dev)
86
{
87
struct mt76_txwi_cache *t = NULL;
88
89
spin_lock(&dev->lock);
90
if (!list_empty(&dev->txwi_cache)) {
91
t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
92
list);
93
list_del(&t->list);
94
}
95
spin_unlock(&dev->lock);
96
97
return t;
98
}
99
100
static struct mt76_txwi_cache *
101
__mt76_get_rxwi(struct mt76_dev *dev)
102
{
103
struct mt76_txwi_cache *t = NULL;
104
105
spin_lock_bh(&dev->wed_lock);
106
if (!list_empty(&dev->rxwi_cache)) {
107
t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
108
list);
109
list_del(&t->list);
110
}
111
spin_unlock_bh(&dev->wed_lock);
112
113
return t;
114
}
115
116
static struct mt76_txwi_cache *
117
mt76_get_txwi(struct mt76_dev *dev)
118
{
119
struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
120
121
if (t)
122
return t;
123
124
return mt76_alloc_txwi(dev);
125
}
126
127
struct mt76_txwi_cache *
128
mt76_get_rxwi(struct mt76_dev *dev)
129
{
130
struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
131
132
if (t)
133
return t;
134
135
return mt76_alloc_rxwi(dev);
136
}
137
EXPORT_SYMBOL_GPL(mt76_get_rxwi);
138
139
void
140
mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
141
{
142
if (!t)
143
return;
144
145
spin_lock(&dev->lock);
146
list_add(&t->list, &dev->txwi_cache);
147
spin_unlock(&dev->lock);
148
}
149
EXPORT_SYMBOL_GPL(mt76_put_txwi);
150
151
void
152
mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
153
{
154
if (!t)
155
return;
156
157
spin_lock_bh(&dev->wed_lock);
158
list_add(&t->list, &dev->rxwi_cache);
159
spin_unlock_bh(&dev->wed_lock);
160
}
161
EXPORT_SYMBOL_GPL(mt76_put_rxwi);
162
163
static void
164
mt76_free_pending_txwi(struct mt76_dev *dev)
165
{
166
struct mt76_txwi_cache *t;
167
168
local_bh_disable();
169
while ((t = __mt76_get_txwi(dev)) != NULL) {
170
dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
171
DMA_TO_DEVICE);
172
kfree(mt76_get_txwi_ptr(dev, t));
173
}
174
local_bh_enable();
175
}
176
177
void
178
mt76_free_pending_rxwi(struct mt76_dev *dev)
179
{
180
struct mt76_txwi_cache *t;
181
182
local_bh_disable();
183
while ((t = __mt76_get_rxwi(dev)) != NULL) {
184
if (t->ptr)
185
mt76_put_page_pool_buf(t->ptr, false);
186
kfree(t);
187
}
188
local_bh_enable();
189
}
190
EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
191
192
static void
193
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
194
{
195
Q_WRITE(q, desc_base, q->desc_dma);
196
if (q->flags & MT_QFLAG_WED_RRO_EN)
197
Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
198
else
199
Q_WRITE(q, ring_size, q->ndesc);
200
q->head = Q_READ(q, dma_idx);
201
q->tail = q->head;
202
}
203
204
void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
205
bool reset_idx)
206
{
207
if (!q || !q->ndesc)
208
return;
209
210
if (!mt76_queue_is_wed_rro_ind(q)) {
211
int i;
212
213
/* clear descriptors */
214
for (i = 0; i < q->ndesc; i++)
215
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
216
}
217
218
if (reset_idx) {
219
Q_WRITE(q, cpu_idx, 0);
220
Q_WRITE(q, dma_idx, 0);
221
}
222
mt76_dma_sync_idx(dev, q);
223
}
224
225
void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
226
{
227
__mt76_dma_queue_reset(dev, q, true);
228
}
229
230
static int
231
mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
232
struct mt76_queue_buf *buf, void *data)
233
{
234
struct mt76_queue_entry *entry = &q->entry[q->head];
235
struct mt76_txwi_cache *txwi = NULL;
236
struct mt76_desc *desc;
237
int idx = q->head;
238
u32 buf1 = 0, ctrl;
239
int rx_token;
240
241
if (mt76_queue_is_wed_rro_ind(q)) {
242
struct mt76_wed_rro_desc *rro_desc;
243
244
rro_desc = (struct mt76_wed_rro_desc *)q->desc;
245
data = &rro_desc[q->head];
246
goto done;
247
}
248
249
desc = &q->desc[q->head];
250
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
251
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
252
buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
253
#endif
254
255
if (mt76_queue_is_wed_rx(q)) {
256
txwi = mt76_get_rxwi(dev);
257
if (!txwi)
258
return -ENOMEM;
259
260
rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
261
if (rx_token < 0) {
262
mt76_put_rxwi(dev, txwi);
263
return -ENOMEM;
264
}
265
266
buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
267
ctrl |= MT_DMA_CTL_TO_HOST;
268
}
269
270
WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
271
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
272
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
273
WRITE_ONCE(desc->info, 0);
274
275
done:
276
entry->dma_addr[0] = buf->addr;
277
entry->dma_len[0] = buf->len;
278
entry->txwi = txwi;
279
entry->buf = data;
280
entry->wcid = 0xffff;
281
entry->skip_buf1 = true;
282
q->head = (q->head + 1) % q->ndesc;
283
q->queued++;
284
285
return idx;
286
}
287
288
static int
289
mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
290
struct mt76_queue_buf *buf, int nbufs, u32 info,
291
struct sk_buff *skb, void *txwi)
292
{
293
struct mt76_queue_entry *entry;
294
struct mt76_desc *desc;
295
int i, idx = -1;
296
u32 ctrl, next;
297
298
if (txwi) {
299
q->entry[q->head].txwi = DMA_DUMMY_DATA;
300
q->entry[q->head].skip_buf0 = true;
301
}
302
303
for (i = 0; i < nbufs; i += 2, buf += 2) {
304
u32 buf0 = buf[0].addr, buf1 = 0;
305
306
idx = q->head;
307
next = (q->head + 1) % q->ndesc;
308
309
desc = &q->desc[idx];
310
entry = &q->entry[idx];
311
312
if (buf[0].skip_unmap)
313
entry->skip_buf0 = true;
314
entry->skip_buf1 = i == nbufs - 1;
315
316
entry->dma_addr[0] = buf[0].addr;
317
entry->dma_len[0] = buf[0].len;
318
319
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
320
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
321
info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32);
322
#endif
323
if (i < nbufs - 1) {
324
entry->dma_addr[1] = buf[1].addr;
325
entry->dma_len[1] = buf[1].len;
326
buf1 = buf[1].addr;
327
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
328
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
329
info |= FIELD_PREP(MT_DMA_CTL_SDP1_H,
330
buf[1].addr >> 32);
331
#endif
332
if (buf[1].skip_unmap)
333
entry->skip_buf1 = true;
334
}
335
336
if (i == nbufs - 1)
337
ctrl |= MT_DMA_CTL_LAST_SEC0;
338
else if (i == nbufs - 2)
339
ctrl |= MT_DMA_CTL_LAST_SEC1;
340
341
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
342
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
343
WRITE_ONCE(desc->info, cpu_to_le32(info));
344
WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
345
346
q->head = next;
347
q->queued++;
348
}
349
350
q->entry[idx].txwi = txwi;
351
q->entry[idx].skb = skb;
352
q->entry[idx].wcid = 0xffff;
353
354
return idx;
355
}
356
357
static void
358
mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
359
struct mt76_queue_entry *prev_e)
360
{
361
struct mt76_queue_entry *e = &q->entry[idx];
362
363
if (!e->skip_buf0)
364
dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
365
DMA_TO_DEVICE);
366
367
if (!e->skip_buf1)
368
dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
369
DMA_TO_DEVICE);
370
371
if (e->txwi == DMA_DUMMY_DATA)
372
e->txwi = NULL;
373
374
*prev_e = *e;
375
memset(e, 0, sizeof(*e));
376
}
377
378
static void
379
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
380
{
381
wmb();
382
Q_WRITE(q, cpu_idx, q->head);
383
}
384
385
static void
386
mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
387
{
388
struct mt76_queue_entry entry;
389
int last;
390
391
if (!q || !q->ndesc)
392
return;
393
394
spin_lock_bh(&q->cleanup_lock);
395
if (flush)
396
last = -1;
397
else
398
last = Q_READ(q, dma_idx);
399
400
while (q->queued > 0 && q->tail != last) {
401
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
402
mt76_queue_tx_complete(dev, q, &entry);
403
404
if (entry.txwi) {
405
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
406
mt76_put_txwi(dev, entry.txwi);
407
}
408
409
if (!flush && q->tail == last)
410
last = Q_READ(q, dma_idx);
411
}
412
spin_unlock_bh(&q->cleanup_lock);
413
414
if (flush) {
415
spin_lock_bh(&q->lock);
416
mt76_dma_sync_idx(dev, q);
417
mt76_dma_kick_queue(dev, q);
418
spin_unlock_bh(&q->lock);
419
}
420
421
if (!q->queued)
422
wake_up(&dev->tx_wait);
423
}
424
425
static void *
426
mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
427
int *len, u32 *info, bool *more, bool *drop)
428
{
429
struct mt76_queue_entry *e = &q->entry[idx];
430
struct mt76_desc *desc = &q->desc[idx];
431
u32 ctrl, desc_info, buf1;
432
void *buf = e->buf;
433
434
if (mt76_queue_is_wed_rro_ind(q))
435
goto done;
436
437
ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
438
if (len) {
439
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
440
*more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
441
}
442
443
desc_info = le32_to_cpu(desc->info);
444
if (info)
445
*info = desc_info;
446
447
buf1 = le32_to_cpu(desc->buf1);
448
mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
449
450
if (mt76_queue_is_wed_rx(q)) {
451
u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
452
struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
453
454
if (!t)
455
return NULL;
456
457
dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
458
SKB_WITH_OVERHEAD(q->buf_size),
459
page_pool_get_dma_dir(q->page_pool));
460
461
buf = t->ptr;
462
t->dma_addr = 0;
463
t->ptr = NULL;
464
465
mt76_put_rxwi(dev, t);
466
if (drop)
467
*drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
468
} else {
469
dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
470
SKB_WITH_OVERHEAD(q->buf_size),
471
page_pool_get_dma_dir(q->page_pool));
472
}
473
474
done:
475
e->buf = NULL;
476
return buf;
477
}
478
479
static void *
480
mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
481
int *len, u32 *info, bool *more, bool *drop)
482
{
483
int idx = q->tail;
484
485
*more = false;
486
if (!q->queued)
487
return NULL;
488
489
if (mt76_queue_is_wed_rro_data(q))
490
return NULL;
491
492
if (!mt76_queue_is_wed_rro_ind(q)) {
493
if (flush)
494
q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
495
else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
496
return NULL;
497
}
498
499
q->tail = (q->tail + 1) % q->ndesc;
500
q->queued--;
501
502
return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
503
}
504
505
static int
506
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
507
struct sk_buff *skb, u32 tx_info)
508
{
509
struct mt76_queue_buf buf = {};
510
dma_addr_t addr;
511
512
if (test_bit(MT76_MCU_RESET, &dev->phy.state))
513
goto error;
514
515
if (q->queued + 1 >= q->ndesc - 1)
516
goto error;
517
518
addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
519
DMA_TO_DEVICE);
520
if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
521
goto error;
522
523
buf.addr = addr;
524
buf.len = skb->len;
525
526
spin_lock_bh(&q->lock);
527
mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
528
mt76_dma_kick_queue(dev, q);
529
spin_unlock_bh(&q->lock);
530
531
return 0;
532
533
error:
534
dev_kfree_skb(skb);
535
return -ENOMEM;
536
}
537
538
static int
539
mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
540
enum mt76_txq_id qid, struct sk_buff *skb,
541
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
542
{
543
struct ieee80211_tx_status status = {
544
.sta = sta,
545
};
546
struct mt76_tx_info tx_info = {
547
.skb = skb,
548
};
549
struct mt76_dev *dev = phy->dev;
550
struct ieee80211_hw *hw;
551
int len, n = 0, ret = -ENOMEM;
552
struct mt76_txwi_cache *t;
553
struct sk_buff *iter;
554
dma_addr_t addr;
555
u8 *txwi;
556
557
if (test_bit(MT76_RESET, &phy->state))
558
goto free_skb;
559
560
t = mt76_get_txwi(dev);
561
if (!t)
562
goto free_skb;
563
564
txwi = mt76_get_txwi_ptr(dev, t);
565
566
skb->prev = skb->next = NULL;
567
if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
568
mt76_insert_hdr_pad(skb);
569
570
len = skb_headlen(skb);
571
addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
572
if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
573
goto free;
574
575
tx_info.buf[n].addr = t->dma_addr;
576
tx_info.buf[n++].len = dev->drv->txwi_size;
577
tx_info.buf[n].addr = addr;
578
tx_info.buf[n++].len = len;
579
580
skb_walk_frags(skb, iter) {
581
if (n == ARRAY_SIZE(tx_info.buf))
582
goto unmap;
583
584
addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
585
DMA_TO_DEVICE);
586
if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
587
goto unmap;
588
589
tx_info.buf[n].addr = addr;
590
tx_info.buf[n++].len = iter->len;
591
}
592
tx_info.nbuf = n;
593
594
if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
595
ret = -ENOMEM;
596
goto unmap;
597
}
598
599
dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
600
DMA_TO_DEVICE);
601
ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
602
dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
603
DMA_TO_DEVICE);
604
if (ret < 0)
605
goto unmap;
606
607
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
608
tx_info.info, tx_info.skb, t);
609
610
unmap:
611
for (n--; n > 0; n--)
612
dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
613
tx_info.buf[n].len, DMA_TO_DEVICE);
614
615
free:
616
#ifdef CONFIG_NL80211_TESTMODE
617
/* fix tx_done accounting on queue overflow */
618
if (mt76_is_testmode_skb(dev, skb, &hw)) {
619
struct mt76_phy *phy = hw->priv;
620
621
if (tx_info.skb == phy->test.tx_skb)
622
phy->test.tx_done--;
623
}
624
#endif
625
626
mt76_put_txwi(dev, t);
627
628
free_skb:
629
status.skb = tx_info.skb;
630
hw = mt76_tx_status_get_hw(dev, tx_info.skb);
631
spin_lock_bh(&dev->rx_lock);
632
ieee80211_tx_status_ext(hw, &status);
633
spin_unlock_bh(&dev->rx_lock);
634
635
return ret;
636
}
637
638
static int
639
mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
640
bool allow_direct)
641
{
642
int len = SKB_WITH_OVERHEAD(q->buf_size);
643
int frames = 0;
644
645
if (!q->ndesc)
646
return 0;
647
648
while (q->queued < q->ndesc - 1) {
649
struct mt76_queue_buf qbuf = {};
650
void *buf = NULL;
651
int offset;
652
653
if (mt76_queue_is_wed_rro_ind(q))
654
goto done;
655
656
buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
657
if (!buf)
658
break;
659
660
qbuf.addr = page_pool_get_dma_addr(virt_to_head_page(buf)) +
661
offset + q->buf_offset;
662
done:
663
qbuf.len = len - q->buf_offset;
664
qbuf.skip_unmap = false;
665
if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
666
mt76_put_page_pool_buf(buf, allow_direct);
667
break;
668
}
669
frames++;
670
}
671
672
if (frames || mt76_queue_is_wed_rx(q))
673
mt76_dma_kick_queue(dev, q);
674
675
return frames;
676
}
677
678
int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
679
bool allow_direct)
680
{
681
int frames;
682
683
if (!q->ndesc)
684
return 0;
685
686
spin_lock_bh(&q->lock);
687
frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
688
spin_unlock_bh(&q->lock);
689
690
return frames;
691
}
692
693
static int
694
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
695
int idx, int n_desc, int bufsize,
696
u32 ring_base)
697
{
698
int ret, size;
699
700
spin_lock_init(&q->lock);
701
spin_lock_init(&q->cleanup_lock);
702
703
#if defined(__linux__)
704
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
705
#elif defined(__FreeBSD__)
706
q->regs = (void *)((u8 *)dev->mmio.regs + ring_base + idx * MT_RING_SIZE);
707
#endif
708
q->ndesc = n_desc;
709
q->buf_size = bufsize;
710
q->hw_idx = idx;
711
712
size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
713
: sizeof(struct mt76_desc);
714
q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
715
&q->desc_dma, GFP_KERNEL);
716
if (!q->desc)
717
return -ENOMEM;
718
719
if (mt76_queue_is_wed_rro_ind(q)) {
720
struct mt76_wed_rro_desc *rro_desc;
721
int i;
722
723
rro_desc = (struct mt76_wed_rro_desc *)q->desc;
724
for (i = 0; i < q->ndesc; i++) {
725
struct mt76_wed_rro_ind *cmd;
726
727
cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
728
cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
729
}
730
}
731
732
size = q->ndesc * sizeof(*q->entry);
733
q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
734
if (!q->entry)
735
return -ENOMEM;
736
737
ret = mt76_create_page_pool(dev, q);
738
if (ret)
739
return ret;
740
741
ret = mt76_wed_dma_setup(dev, q, false);
742
if (ret)
743
return ret;
744
745
if (mtk_wed_device_active(&dev->mmio.wed)) {
746
if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
747
mt76_queue_is_wed_tx_free(q))
748
return 0;
749
}
750
751
mt76_dma_queue_reset(dev, q);
752
753
return 0;
754
}
755
756
static void
757
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
758
{
759
void *buf;
760
bool more;
761
762
if (!q->ndesc)
763
return;
764
765
do {
766
spin_lock_bh(&q->lock);
767
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
768
spin_unlock_bh(&q->lock);
769
770
if (!buf)
771
break;
772
773
if (!mt76_queue_is_wed_rro(q))
774
mt76_put_page_pool_buf(buf, false);
775
} while (1);
776
777
spin_lock_bh(&q->lock);
778
if (q->rx_head) {
779
dev_kfree_skb(q->rx_head);
780
q->rx_head = NULL;
781
}
782
783
spin_unlock_bh(&q->lock);
784
}
785
786
static void
787
mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
788
{
789
struct mt76_queue *q = &dev->q_rx[qid];
790
791
if (!q->ndesc)
792
return;
793
794
if (!mt76_queue_is_wed_rro_ind(q)) {
795
int i;
796
797
for (i = 0; i < q->ndesc; i++)
798
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
799
}
800
801
mt76_dma_rx_cleanup(dev, q);
802
803
/* reset WED rx queues */
804
mt76_wed_dma_setup(dev, q, true);
805
806
if (mt76_queue_is_wed_tx_free(q))
807
return;
808
809
if (mtk_wed_device_active(&dev->mmio.wed) &&
810
mt76_queue_is_wed_rro(q))
811
return;
812
813
mt76_dma_sync_idx(dev, q);
814
mt76_dma_rx_fill_buf(dev, q, false);
815
}
816
817
static void
818
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
819
int len, bool more, u32 info, bool allow_direct)
820
{
821
struct sk_buff *skb = q->rx_head;
822
struct skb_shared_info *shinfo = skb_shinfo(skb);
823
int nr_frags = shinfo->nr_frags;
824
825
if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
826
struct page *page = virt_to_head_page(data);
827
#if defined(__linux__)
828
int offset = data - page_address(page) + q->buf_offset;
829
#elif defined(__FreeBSD__)
830
int offset = (u8 *)data - (u8 *)page_address(page) + q->buf_offset;
831
#endif
832
833
skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
834
} else {
835
mt76_put_page_pool_buf(data, allow_direct);
836
}
837
838
if (more)
839
return;
840
841
q->rx_head = NULL;
842
if (nr_frags < ARRAY_SIZE(shinfo->frags))
843
dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
844
else
845
dev_kfree_skb(skb);
846
}
847
848
static int
849
mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
850
{
851
int len, data_len, done = 0, dma_idx;
852
struct sk_buff *skb;
853
unsigned char *data;
854
bool check_ddone = false;
855
bool allow_direct = !mt76_queue_is_wed_rx(q);
856
bool more;
857
858
if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
859
mt76_queue_is_wed_tx_free(q)) {
860
dma_idx = Q_READ(q, dma_idx);
861
check_ddone = true;
862
}
863
864
while (done < budget) {
865
bool drop = false;
866
u32 info;
867
868
if (check_ddone) {
869
if (q->tail == dma_idx)
870
dma_idx = Q_READ(q, dma_idx);
871
872
if (q->tail == dma_idx)
873
break;
874
}
875
876
data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
877
&drop);
878
if (!data)
879
break;
880
881
if (drop)
882
goto free_frag;
883
884
if (q->rx_head)
885
data_len = q->buf_size;
886
else
887
data_len = SKB_WITH_OVERHEAD(q->buf_size);
888
889
if (data_len < len + q->buf_offset) {
890
dev_kfree_skb(q->rx_head);
891
q->rx_head = NULL;
892
goto free_frag;
893
}
894
895
if (q->rx_head) {
896
mt76_add_fragment(dev, q, data, len, more, info,
897
allow_direct);
898
continue;
899
}
900
901
if (!more && dev->drv->rx_check &&
902
!(dev->drv->rx_check(dev, data, len)))
903
goto free_frag;
904
905
skb = napi_build_skb(data, q->buf_size);
906
if (!skb)
907
goto free_frag;
908
909
skb_reserve(skb, q->buf_offset);
910
skb_mark_for_recycle(skb);
911
912
*(u32 *)skb->cb = info;
913
914
__skb_put(skb, len);
915
done++;
916
917
if (more) {
918
q->rx_head = skb;
919
continue;
920
}
921
922
dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
923
continue;
924
925
free_frag:
926
mt76_put_page_pool_buf(data, allow_direct);
927
}
928
929
mt76_dma_rx_fill(dev, q, true);
930
return done;
931
}
932
933
int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
934
{
935
struct mt76_dev *dev;
936
int qid, done = 0, cur;
937
938
dev = mt76_priv(napi->dev);
939
qid = napi - dev->napi;
940
941
rcu_read_lock();
942
943
do {
944
cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
945
mt76_rx_poll_complete(dev, qid, napi);
946
done += cur;
947
} while (cur && done < budget);
948
949
rcu_read_unlock();
950
951
if (done < budget && napi_complete(napi))
952
dev->drv->rx_poll_complete(dev, qid);
953
954
return done;
955
}
956
EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
957
958
static int
959
mt76_dma_init(struct mt76_dev *dev,
960
int (*poll)(struct napi_struct *napi, int budget))
961
{
962
struct mt76_dev **priv;
963
int i;
964
965
dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
966
if (!dev->napi_dev)
967
return -ENOMEM;
968
969
/* napi_dev private data points to mt76_dev parent, so, mt76_dev
970
* can be retrieved given napi_dev
971
*/
972
priv = netdev_priv(dev->napi_dev);
973
*priv = dev;
974
975
dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
976
if (!dev->tx_napi_dev) {
977
free_netdev(dev->napi_dev);
978
return -ENOMEM;
979
}
980
priv = netdev_priv(dev->tx_napi_dev);
981
*priv = dev;
982
983
snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s",
984
wiphy_name(dev->hw->wiphy));
985
dev->napi_dev->threaded = 1;
986
init_completion(&dev->mmio.wed_reset);
987
init_completion(&dev->mmio.wed_reset_complete);
988
989
mt76_for_each_q_rx(dev, i) {
990
netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
991
mt76_dma_rx_fill_buf(dev, &dev->q_rx[i], false);
992
napi_enable(&dev->napi[i]);
993
}
994
995
return 0;
996
}
997
998
static const struct mt76_queue_ops mt76_dma_ops = {
999
.init = mt76_dma_init,
1000
.alloc = mt76_dma_alloc_queue,
1001
.reset_q = mt76_dma_queue_reset,
1002
.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
1003
.tx_queue_skb = mt76_dma_tx_queue_skb,
1004
.tx_cleanup = mt76_dma_tx_cleanup,
1005
.rx_cleanup = mt76_dma_rx_cleanup,
1006
.rx_reset = mt76_dma_rx_reset,
1007
.kick = mt76_dma_kick_queue,
1008
};
1009
1010
void mt76_dma_attach(struct mt76_dev *dev)
1011
{
1012
dev->queue_ops = &mt76_dma_ops;
1013
}
1014
EXPORT_SYMBOL_GPL(mt76_dma_attach);
1015
1016
void mt76_dma_cleanup(struct mt76_dev *dev)
1017
{
1018
int i;
1019
1020
mt76_worker_disable(&dev->tx_worker);
1021
napi_disable(&dev->tx_napi);
1022
netif_napi_del(&dev->tx_napi);
1023
1024
for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
1025
struct mt76_phy *phy = dev->phys[i];
1026
int j;
1027
1028
if (!phy)
1029
continue;
1030
1031
for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
1032
mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
1033
}
1034
1035
for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
1036
mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
1037
1038
mt76_for_each_q_rx(dev, i) {
1039
struct mt76_queue *q = &dev->q_rx[i];
1040
1041
if (mtk_wed_device_active(&dev->mmio.wed) &&
1042
mt76_queue_is_wed_rro(q))
1043
continue;
1044
1045
netif_napi_del(&dev->napi[i]);
1046
mt76_dma_rx_cleanup(dev, q);
1047
1048
page_pool_destroy(q->page_pool);
1049
}
1050
1051
if (mtk_wed_device_active(&dev->mmio.wed))
1052
mtk_wed_device_detach(&dev->mmio.wed);
1053
1054
if (mtk_wed_device_active(&dev->mmio.wed_hif2))
1055
mtk_wed_device_detach(&dev->mmio.wed_hif2);
1056
1057
mt76_free_pending_txwi(dev);
1058
mt76_free_pending_rxwi(dev);
1059
free_netdev(dev->napi_dev);
1060
free_netdev(dev->tx_napi_dev);
1061
}
1062
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
1063
1064