Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/net/wireless/realtek/rtw88/pci.c
25924 views
1
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
/* Copyright(c) 2018-2019 Realtek Corporation
3
*/
4
5
#include <linux/module.h>
6
#include <linux/pci.h>
7
#include "main.h"
8
#include "pci.h"
9
#include "reg.h"
10
#include "tx.h"
11
#include "rx.h"
12
#include "fw.h"
13
#include "ps.h"
14
#include "debug.h"
15
#include "mac.h"
16
17
static bool rtw_disable_msi;
18
static bool rtw_pci_disable_aspm;
19
module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
20
module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
21
MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
22
MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
23
24
static const u32 rtw_pci_tx_queue_idx_addr[] = {
25
[RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
26
[RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
27
[RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
28
[RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ,
29
[RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ,
30
[RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q,
31
[RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
32
};
33
34
static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb,
35
enum rtw_tx_queue_type queue)
36
{
37
switch (queue) {
38
case RTW_TX_QUEUE_BCN:
39
return TX_DESC_QSEL_BEACON;
40
case RTW_TX_QUEUE_H2C:
41
return TX_DESC_QSEL_H2C;
42
case RTW_TX_QUEUE_MGMT:
43
return TX_DESC_QSEL_MGMT;
44
case RTW_TX_QUEUE_HI0:
45
return TX_DESC_QSEL_HIGH;
46
default:
47
return skb->priority;
48
}
49
};
50
51
static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
52
{
53
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
54
55
return readb(rtwpci->mmap + addr);
56
}
57
58
static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
59
{
60
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
61
62
return readw(rtwpci->mmap + addr);
63
}
64
65
static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
66
{
67
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
68
69
return readl(rtwpci->mmap + addr);
70
}
71
72
static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
73
{
74
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
75
76
writeb(val, rtwpci->mmap + addr);
77
}
78
79
static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
80
{
81
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
82
83
writew(val, rtwpci->mmap + addr);
84
}
85
86
static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
87
{
88
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
89
90
writel(val, rtwpci->mmap + addr);
91
}
92
93
static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
94
struct rtw_pci_tx_ring *tx_ring)
95
{
96
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
97
struct rtw_pci_tx_data *tx_data;
98
struct sk_buff *skb, *tmp;
99
dma_addr_t dma;
100
101
/* free every skb remained in tx list */
102
skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
103
__skb_unlink(skb, &tx_ring->queue);
104
tx_data = rtw_pci_get_tx_data(skb);
105
dma = tx_data->dma;
106
107
dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
108
dev_kfree_skb_any(skb);
109
}
110
}
111
112
static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
113
struct rtw_pci_tx_ring *tx_ring)
114
{
115
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
116
u8 *head = tx_ring->r.head;
117
u32 len = tx_ring->r.len;
118
int ring_sz = len * tx_ring->r.desc_size;
119
120
rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
121
122
/* free the ring itself */
123
dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
124
tx_ring->r.head = NULL;
125
}
126
127
static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
128
struct rtw_pci_rx_ring *rx_ring)
129
{
130
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
131
struct sk_buff *skb;
132
int buf_sz = RTK_PCI_RX_BUF_SIZE;
133
dma_addr_t dma;
134
int i;
135
136
for (i = 0; i < rx_ring->r.len; i++) {
137
skb = rx_ring->buf[i];
138
if (!skb)
139
continue;
140
141
dma = *((dma_addr_t *)skb->cb);
142
dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
143
dev_kfree_skb(skb);
144
rx_ring->buf[i] = NULL;
145
}
146
}
147
148
static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
149
struct rtw_pci_rx_ring *rx_ring)
150
{
151
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
152
u8 *head = rx_ring->r.head;
153
int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
154
155
rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
156
157
dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
158
}
159
160
static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
161
{
162
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
163
struct rtw_pci_tx_ring *tx_ring;
164
struct rtw_pci_rx_ring *rx_ring;
165
int i;
166
167
for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
168
tx_ring = &rtwpci->tx_rings[i];
169
rtw_pci_free_tx_ring(rtwdev, tx_ring);
170
}
171
172
for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
173
rx_ring = &rtwpci->rx_rings[i];
174
rtw_pci_free_rx_ring(rtwdev, rx_ring);
175
}
176
}
177
178
static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
179
struct rtw_pci_tx_ring *tx_ring,
180
u8 desc_size, u32 len)
181
{
182
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
183
int ring_sz = desc_size * len;
184
dma_addr_t dma;
185
u8 *head;
186
187
if (len > TRX_BD_IDX_MASK) {
188
rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
189
return -EINVAL;
190
}
191
192
head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
193
if (!head) {
194
rtw_err(rtwdev, "failed to allocate tx ring\n");
195
return -ENOMEM;
196
}
197
198
skb_queue_head_init(&tx_ring->queue);
199
tx_ring->r.head = head;
200
tx_ring->r.dma = dma;
201
tx_ring->r.len = len;
202
tx_ring->r.desc_size = desc_size;
203
tx_ring->r.wp = 0;
204
tx_ring->r.rp = 0;
205
206
return 0;
207
}
208
209
static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
210
struct rtw_pci_rx_ring *rx_ring,
211
u32 idx, u32 desc_sz)
212
{
213
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
214
struct rtw_pci_rx_buffer_desc *buf_desc;
215
int buf_sz = RTK_PCI_RX_BUF_SIZE;
216
dma_addr_t dma;
217
218
if (!skb)
219
return -EINVAL;
220
221
dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
222
if (dma_mapping_error(&pdev->dev, dma))
223
return -EBUSY;
224
225
*((dma_addr_t *)skb->cb) = dma;
226
buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
227
idx * desc_sz);
228
memset(buf_desc, 0, sizeof(*buf_desc));
229
buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
230
buf_desc->dma = cpu_to_le32(dma);
231
232
return 0;
233
}
234
235
static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
236
struct rtw_pci_rx_ring *rx_ring,
237
u32 idx, u32 desc_sz)
238
{
239
struct device *dev = rtwdev->dev;
240
struct rtw_pci_rx_buffer_desc *buf_desc;
241
int buf_sz = RTK_PCI_RX_BUF_SIZE;
242
243
dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
244
245
buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
246
idx * desc_sz);
247
memset(buf_desc, 0, sizeof(*buf_desc));
248
buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
249
buf_desc->dma = cpu_to_le32(dma);
250
}
251
252
static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
253
struct rtw_pci_rx_ring *rx_ring,
254
u8 desc_size, u32 len)
255
{
256
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
257
struct sk_buff *skb = NULL;
258
dma_addr_t dma;
259
u8 *head;
260
int ring_sz = desc_size * len;
261
int buf_sz = RTK_PCI_RX_BUF_SIZE;
262
int i, allocated;
263
int ret = 0;
264
265
head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
266
if (!head) {
267
rtw_err(rtwdev, "failed to allocate rx ring\n");
268
return -ENOMEM;
269
}
270
rx_ring->r.head = head;
271
272
for (i = 0; i < len; i++) {
273
skb = dev_alloc_skb(buf_sz);
274
if (!skb) {
275
allocated = i;
276
ret = -ENOMEM;
277
goto err_out;
278
}
279
280
memset(skb->data, 0, buf_sz);
281
rx_ring->buf[i] = skb;
282
ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
283
if (ret) {
284
allocated = i;
285
dev_kfree_skb_any(skb);
286
goto err_out;
287
}
288
}
289
290
rx_ring->r.dma = dma;
291
rx_ring->r.len = len;
292
rx_ring->r.desc_size = desc_size;
293
rx_ring->r.wp = 0;
294
rx_ring->r.rp = 0;
295
296
return 0;
297
298
err_out:
299
for (i = 0; i < allocated; i++) {
300
skb = rx_ring->buf[i];
301
if (!skb)
302
continue;
303
dma = *((dma_addr_t *)skb->cb);
304
dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
305
dev_kfree_skb_any(skb);
306
rx_ring->buf[i] = NULL;
307
}
308
dma_free_coherent(&pdev->dev, ring_sz, head, dma);
309
310
rtw_err(rtwdev, "failed to init rx buffer\n");
311
312
return ret;
313
}
314
315
static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
316
{
317
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
318
struct rtw_pci_tx_ring *tx_ring;
319
struct rtw_pci_rx_ring *rx_ring;
320
const struct rtw_chip_info *chip = rtwdev->chip;
321
int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
322
int tx_desc_size, rx_desc_size;
323
u32 len;
324
int ret;
325
326
tx_desc_size = chip->tx_buf_desc_sz;
327
328
for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
329
tx_ring = &rtwpci->tx_rings[i];
330
len = max_num_of_tx_queue(i);
331
ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
332
if (ret)
333
goto out;
334
}
335
336
rx_desc_size = chip->rx_buf_desc_sz;
337
338
for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
339
rx_ring = &rtwpci->rx_rings[j];
340
ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
341
RTK_MAX_RX_DESC_NUM);
342
if (ret)
343
goto out;
344
}
345
346
return 0;
347
348
out:
349
tx_alloced = i;
350
for (i = 0; i < tx_alloced; i++) {
351
tx_ring = &rtwpci->tx_rings[i];
352
rtw_pci_free_tx_ring(rtwdev, tx_ring);
353
}
354
355
rx_alloced = j;
356
for (j = 0; j < rx_alloced; j++) {
357
rx_ring = &rtwpci->rx_rings[j];
358
rtw_pci_free_rx_ring(rtwdev, rx_ring);
359
}
360
361
return ret;
362
}
363
364
static void rtw_pci_deinit(struct rtw_dev *rtwdev)
365
{
366
rtw_pci_free_trx_ring(rtwdev);
367
}
368
369
static int rtw_pci_init(struct rtw_dev *rtwdev)
370
{
371
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
372
int ret = 0;
373
374
rtwpci->irq_mask[0] = IMR_HIGHDOK |
375
IMR_MGNTDOK |
376
IMR_BKDOK |
377
IMR_BEDOK |
378
IMR_VIDOK |
379
IMR_VODOK |
380
IMR_ROK |
381
IMR_BCNDMAINT_E |
382
IMR_C2HCMD |
383
0;
384
rtwpci->irq_mask[1] = IMR_TXFOVW |
385
0;
386
rtwpci->irq_mask[3] = IMR_H2CDOK |
387
0;
388
spin_lock_init(&rtwpci->irq_lock);
389
spin_lock_init(&rtwpci->hwirq_lock);
390
ret = rtw_pci_init_trx_ring(rtwdev);
391
392
return ret;
393
}
394
395
static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
396
{
397
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
398
u32 len;
399
u8 tmp;
400
dma_addr_t dma;
401
402
tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
403
rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
404
405
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
406
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
407
408
if (!rtw_chip_wcpu_8051(rtwdev)) {
409
len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
410
dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
411
rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
412
rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
413
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
414
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
415
}
416
417
len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
418
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
419
rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
420
rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
421
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
422
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
423
424
len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
425
dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
426
rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
427
rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
428
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
429
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
430
431
len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
432
dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
433
rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
434
rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
435
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
436
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
437
438
len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
439
dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
440
rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
441
rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
442
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
443
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
444
445
len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
446
dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
447
rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
448
rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
449
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
450
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
451
452
len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
453
dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
454
rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
455
rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
456
rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
457
rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
458
459
len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
460
dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
461
rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
462
rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
463
rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
464
rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
465
466
/* reset read/write point */
467
rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
468
469
/* reset H2C Queue index in a single write */
470
if (rtw_chip_wcpu_3081(rtwdev))
471
rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
472
BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
473
}
474
475
static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
476
{
477
rtw_pci_reset_buf_desc(rtwdev);
478
}
479
480
static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
481
struct rtw_pci *rtwpci, bool exclude_rx)
482
{
483
unsigned long flags;
484
u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
485
486
spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
487
488
rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
489
rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
490
if (rtw_chip_wcpu_3081(rtwdev))
491
rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
492
493
rtwpci->irq_enabled = true;
494
495
spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
496
}
497
498
static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
499
struct rtw_pci *rtwpci)
500
{
501
unsigned long flags;
502
503
spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
504
505
if (!rtwpci->irq_enabled)
506
goto out;
507
508
rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
509
rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
510
if (rtw_chip_wcpu_3081(rtwdev))
511
rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
512
513
rtwpci->irq_enabled = false;
514
515
out:
516
spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
517
}
518
519
static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
520
{
521
/* reset dma and rx tag */
522
rtw_write32_set(rtwdev, RTK_PCI_CTRL,
523
BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
524
rtwpci->rx_tag = 0;
525
}
526
527
static int rtw_pci_setup(struct rtw_dev *rtwdev)
528
{
529
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
530
531
rtw_pci_reset_trx_ring(rtwdev);
532
rtw_pci_dma_reset(rtwdev, rtwpci);
533
534
return 0;
535
}
536
537
static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
538
{
539
struct rtw_pci_tx_ring *tx_ring;
540
enum rtw_tx_queue_type queue;
541
542
rtw_pci_reset_trx_ring(rtwdev);
543
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
544
tx_ring = &rtwpci->tx_rings[queue];
545
rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
546
}
547
}
548
549
static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
550
{
551
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
552
553
if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
554
return;
555
556
napi_enable(&rtwpci->napi);
557
}
558
559
static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
560
{
561
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
562
563
if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
564
return;
565
566
napi_synchronize(&rtwpci->napi);
567
napi_disable(&rtwpci->napi);
568
}
569
570
static int rtw_pci_start(struct rtw_dev *rtwdev)
571
{
572
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
573
574
rtw_pci_napi_start(rtwdev);
575
576
spin_lock_bh(&rtwpci->irq_lock);
577
rtwpci->running = true;
578
rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
579
spin_unlock_bh(&rtwpci->irq_lock);
580
581
return 0;
582
}
583
584
static void rtw_pci_stop(struct rtw_dev *rtwdev)
585
{
586
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
587
struct pci_dev *pdev = rtwpci->pdev;
588
589
spin_lock_bh(&rtwpci->irq_lock);
590
rtwpci->running = false;
591
rtw_pci_disable_interrupt(rtwdev, rtwpci);
592
spin_unlock_bh(&rtwpci->irq_lock);
593
594
synchronize_irq(pdev->irq);
595
rtw_pci_napi_stop(rtwdev);
596
597
spin_lock_bh(&rtwpci->irq_lock);
598
rtw_pci_dma_release(rtwdev, rtwpci);
599
spin_unlock_bh(&rtwpci->irq_lock);
600
}
601
602
static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
603
{
604
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
605
struct rtw_pci_tx_ring *tx_ring;
606
enum rtw_tx_queue_type queue;
607
bool tx_empty = true;
608
609
if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
610
goto enter_deep_ps;
611
612
lockdep_assert_held(&rtwpci->irq_lock);
613
614
/* Deep PS state is not allowed to TX-DMA */
615
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
616
/* BCN queue is rsvd page, does not have DMA interrupt
617
* H2C queue is managed by firmware
618
*/
619
if (queue == RTW_TX_QUEUE_BCN ||
620
queue == RTW_TX_QUEUE_H2C)
621
continue;
622
623
tx_ring = &rtwpci->tx_rings[queue];
624
625
/* check if there is any skb DMAing */
626
if (skb_queue_len(&tx_ring->queue)) {
627
tx_empty = false;
628
break;
629
}
630
}
631
632
if (!tx_empty) {
633
rtw_dbg(rtwdev, RTW_DBG_PS,
634
"TX path not empty, cannot enter deep power save state\n");
635
return;
636
}
637
enter_deep_ps:
638
set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
639
rtw_power_mode_change(rtwdev, true);
640
}
641
642
static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
643
{
644
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
645
646
lockdep_assert_held(&rtwpci->irq_lock);
647
648
if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
649
rtw_power_mode_change(rtwdev, false);
650
}
651
652
static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
653
{
654
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
655
656
spin_lock_bh(&rtwpci->irq_lock);
657
658
if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
659
rtw_pci_deep_ps_enter(rtwdev);
660
661
if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
662
rtw_pci_deep_ps_leave(rtwdev);
663
664
spin_unlock_bh(&rtwpci->irq_lock);
665
}
666
667
static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
668
struct rtw_pci_tx_ring *ring)
669
{
670
struct sk_buff *prev = skb_dequeue(&ring->queue);
671
struct rtw_pci_tx_data *tx_data;
672
dma_addr_t dma;
673
674
if (!prev)
675
return;
676
677
tx_data = rtw_pci_get_tx_data(prev);
678
dma = tx_data->dma;
679
dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
680
dev_kfree_skb_any(prev);
681
}
682
683
static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
684
struct rtw_pci_rx_ring *rx_ring,
685
u32 idx)
686
{
687
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
688
const struct rtw_chip_info *chip = rtwdev->chip;
689
struct rtw_pci_rx_buffer_desc *buf_desc;
690
u32 desc_sz = chip->rx_buf_desc_sz;
691
u16 total_pkt_size;
692
693
buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
694
idx * desc_sz);
695
total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
696
697
/* rx tag mismatch, throw a warning */
698
if (total_pkt_size != rtwpci->rx_tag)
699
rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
700
701
rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
702
}
703
704
static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
705
{
706
u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
707
u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
708
709
return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
710
}
711
712
static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
713
{
714
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
715
struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
716
u32 cur_rp;
717
u8 i;
718
719
/* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
720
* bit dynamic, it's hard to define a reasonable fixed total timeout to
721
* use read_poll_timeout* helper. Instead, we can ensure a reasonable
722
* polling times, so we just use for loop with udelay here.
723
*/
724
for (i = 0; i < 30; i++) {
725
cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
726
if (cur_rp == ring->r.wp)
727
return;
728
729
udelay(1);
730
}
731
732
if (!drop)
733
rtw_dbg(rtwdev, RTW_DBG_UNEXP,
734
"timed out to flush pci tx ring[%d]\n", pci_q);
735
}
736
737
static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
738
bool drop)
739
{
740
u8 q;
741
742
for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
743
/* Unnecessary to flush BCN, H2C and HI tx queues. */
744
if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C ||
745
q == RTW_TX_QUEUE_HI0)
746
continue;
747
748
if (pci_queues & BIT(q))
749
__pci_flush_queue(rtwdev, q, drop);
750
}
751
}
752
753
static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
754
{
755
u32 pci_queues = 0;
756
u8 i;
757
758
/* If all of the hardware queues are requested to flush,
759
* flush all of the pci queues.
760
*/
761
if (queues == BIT(rtwdev->hw->queues) - 1) {
762
pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
763
} else {
764
for (i = 0; i < rtwdev->hw->queues; i++)
765
if (queues & BIT(i))
766
pci_queues |= BIT(rtw_tx_ac_to_hwq(i));
767
}
768
769
__rtw_pci_flush_queues(rtwdev, pci_queues, drop);
770
}
771
772
static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev,
773
enum rtw_tx_queue_type queue)
774
{
775
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
776
struct rtw_pci_tx_ring *ring;
777
u32 bd_idx;
778
779
ring = &rtwpci->tx_rings[queue];
780
bd_idx = rtw_pci_tx_queue_idx_addr[queue];
781
782
spin_lock_bh(&rtwpci->irq_lock);
783
if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
784
rtw_pci_deep_ps_leave(rtwdev);
785
rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
786
spin_unlock_bh(&rtwpci->irq_lock);
787
}
788
789
static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
790
{
791
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
792
enum rtw_tx_queue_type queue;
793
794
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
795
if (test_and_clear_bit(queue, rtwpci->tx_queued))
796
rtw_pci_tx_kick_off_queue(rtwdev, queue);
797
}
798
799
static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
800
struct rtw_tx_pkt_info *pkt_info,
801
struct sk_buff *skb,
802
enum rtw_tx_queue_type queue)
803
{
804
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
805
const struct rtw_chip_info *chip = rtwdev->chip;
806
struct rtw_pci_tx_ring *ring;
807
struct rtw_pci_tx_data *tx_data;
808
dma_addr_t dma;
809
u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
810
u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
811
u32 size;
812
u32 psb_len;
813
u8 *pkt_desc;
814
struct rtw_pci_tx_buffer_desc *buf_desc;
815
816
ring = &rtwpci->tx_rings[queue];
817
818
size = skb->len;
819
820
if (queue == RTW_TX_QUEUE_BCN)
821
rtw_pci_release_rsvd_page(rtwpci, ring);
822
else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
823
return -ENOSPC;
824
825
pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
826
memset(pkt_desc, 0, tx_pkt_desc_sz);
827
pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
828
rtw_tx_fill_tx_desc(rtwdev, pkt_info, skb);
829
dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
830
DMA_TO_DEVICE);
831
if (dma_mapping_error(&rtwpci->pdev->dev, dma))
832
return -EBUSY;
833
834
/* after this we got dma mapped, there is no way back */
835
buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
836
memset(buf_desc, 0, tx_buf_desc_sz);
837
psb_len = (skb->len - 1) / 128 + 1;
838
if (queue == RTW_TX_QUEUE_BCN)
839
psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
840
841
buf_desc[0].psb_len = cpu_to_le16(psb_len);
842
buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
843
buf_desc[0].dma = cpu_to_le32(dma);
844
buf_desc[1].buf_size = cpu_to_le16(size);
845
buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
846
847
tx_data = rtw_pci_get_tx_data(skb);
848
tx_data->dma = dma;
849
tx_data->sn = pkt_info->sn;
850
851
spin_lock_bh(&rtwpci->irq_lock);
852
853
skb_queue_tail(&ring->queue, skb);
854
855
if (queue == RTW_TX_QUEUE_BCN)
856
goto out_unlock;
857
858
/* update write-index, and kick it off later */
859
set_bit(queue, rtwpci->tx_queued);
860
if (++ring->r.wp >= ring->r.len)
861
ring->r.wp = 0;
862
863
out_unlock:
864
spin_unlock_bh(&rtwpci->irq_lock);
865
866
return 0;
867
}
868
869
static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
870
u32 size)
871
{
872
struct sk_buff *skb;
873
struct rtw_tx_pkt_info pkt_info = {0};
874
u8 reg_bcn_work;
875
int ret;
876
877
skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
878
if (!skb)
879
return -ENOMEM;
880
881
ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
882
if (ret) {
883
rtw_err(rtwdev, "failed to write rsvd page data\n");
884
return ret;
885
}
886
887
/* reserved pages go through beacon queue */
888
reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
889
reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
890
rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
891
892
return 0;
893
}
894
895
static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
896
{
897
struct sk_buff *skb;
898
struct rtw_tx_pkt_info pkt_info = {0};
899
int ret;
900
901
skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
902
if (!skb)
903
return -ENOMEM;
904
905
ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
906
if (ret) {
907
rtw_err(rtwdev, "failed to write h2c data\n");
908
return ret;
909
}
910
911
rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
912
913
return 0;
914
}
915
916
static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
917
struct rtw_tx_pkt_info *pkt_info,
918
struct sk_buff *skb)
919
{
920
enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
921
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
922
struct rtw_pci_tx_ring *ring;
923
int ret;
924
925
ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
926
if (ret)
927
return ret;
928
929
ring = &rtwpci->tx_rings[queue];
930
spin_lock_bh(&rtwpci->irq_lock);
931
if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
932
ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
933
ring->queue_stopped = true;
934
}
935
spin_unlock_bh(&rtwpci->irq_lock);
936
937
return 0;
938
}
939
940
static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
941
u8 hw_queue)
942
{
943
struct ieee80211_hw *hw = rtwdev->hw;
944
struct ieee80211_tx_info *info;
945
struct rtw_pci_tx_ring *ring;
946
struct rtw_pci_tx_data *tx_data;
947
struct sk_buff *skb;
948
u32 count;
949
u32 bd_idx_addr;
950
u32 bd_idx, cur_rp, rp_idx;
951
u16 q_map;
952
953
ring = &rtwpci->tx_rings[hw_queue];
954
955
bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
956
bd_idx = rtw_read32(rtwdev, bd_idx_addr);
957
cur_rp = bd_idx >> 16;
958
cur_rp &= TRX_BD_IDX_MASK;
959
rp_idx = ring->r.rp;
960
if (cur_rp >= ring->r.rp)
961
count = cur_rp - ring->r.rp;
962
else
963
count = ring->r.len - (ring->r.rp - cur_rp);
964
965
while (count--) {
966
skb = skb_dequeue(&ring->queue);
967
if (!skb) {
968
rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
969
count, hw_queue, bd_idx, ring->r.rp, cur_rp);
970
break;
971
}
972
tx_data = rtw_pci_get_tx_data(skb);
973
dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
974
DMA_TO_DEVICE);
975
976
/* just free command packets from host to card */
977
if (hw_queue == RTW_TX_QUEUE_H2C) {
978
dev_kfree_skb_irq(skb);
979
continue;
980
}
981
982
if (ring->queue_stopped &&
983
avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
984
q_map = skb_get_queue_mapping(skb);
985
ieee80211_wake_queue(hw, q_map);
986
ring->queue_stopped = false;
987
}
988
989
if (++rp_idx >= ring->r.len)
990
rp_idx = 0;
991
992
skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
993
994
info = IEEE80211_SKB_CB(skb);
995
996
/* enqueue to wait for tx report */
997
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
998
rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
999
continue;
1000
}
1001
1002
/* always ACK for others, then they won't be marked as drop */
1003
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1004
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1005
else
1006
info->flags |= IEEE80211_TX_STAT_ACK;
1007
1008
ieee80211_tx_info_clear_status(info);
1009
ieee80211_tx_status_irqsafe(hw, skb);
1010
}
1011
1012
ring->r.rp = cur_rp;
1013
}
1014
1015
static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1016
{
1017
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1018
struct napi_struct *napi = &rtwpci->napi;
1019
1020
napi_schedule(napi);
1021
}
1022
1023
static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1024
struct rtw_pci *rtwpci)
1025
{
1026
struct rtw_pci_rx_ring *ring;
1027
int count = 0;
1028
u32 tmp, cur_wp;
1029
1030
ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1031
tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1032
cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1033
if (cur_wp >= ring->r.wp)
1034
count = cur_wp - ring->r.wp;
1035
else
1036
count = ring->r.len - (ring->r.wp - cur_wp);
1037
1038
return count;
1039
}
1040
1041
static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1042
u8 hw_queue, u32 limit)
1043
{
1044
const struct rtw_chip_info *chip = rtwdev->chip;
1045
struct napi_struct *napi = &rtwpci->napi;
1046
struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1047
struct rtw_rx_pkt_stat pkt_stat;
1048
struct ieee80211_rx_status rx_status;
1049
struct sk_buff *skb, *new;
1050
u32 cur_rp = ring->r.rp;
1051
u32 count, rx_done = 0;
1052
u32 pkt_offset;
1053
u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1054
u32 buf_desc_sz = chip->rx_buf_desc_sz;
1055
u32 new_len;
1056
u8 *rx_desc;
1057
dma_addr_t dma;
1058
1059
count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1060
count = min(count, limit);
1061
1062
while (count--) {
1063
rtw_pci_dma_check(rtwdev, ring, cur_rp);
1064
skb = ring->buf[cur_rp];
1065
dma = *((dma_addr_t *)skb->cb);
1066
dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1067
DMA_FROM_DEVICE);
1068
rx_desc = skb->data;
1069
rtw_rx_query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1070
1071
/* offset from rx_desc to payload */
1072
pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1073
pkt_stat.shift;
1074
1075
/* allocate a new skb for this frame,
1076
* discard the frame if none available
1077
*/
1078
new_len = pkt_stat.pkt_len + pkt_offset;
1079
new = dev_alloc_skb(new_len);
1080
if (WARN_ONCE(!new, "rx routine starvation\n"))
1081
goto next_rp;
1082
1083
/* put the DMA data including rx_desc from phy to new skb */
1084
skb_put_data(new, skb->data, new_len);
1085
1086
if (pkt_stat.is_c2h) {
1087
rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1088
} else {
1089
/* remove rx_desc */
1090
skb_pull(new, pkt_offset);
1091
1092
rtw_update_rx_freq_for_invalid(rtwdev, new, &rx_status, &pkt_stat);
1093
rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1094
memcpy(new->cb, &rx_status, sizeof(rx_status));
1095
ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1096
rx_done++;
1097
}
1098
1099
next_rp:
1100
/* new skb delivered to mac80211, re-enable original skb DMA */
1101
rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1102
buf_desc_sz);
1103
1104
/* host read next element in ring */
1105
if (++cur_rp >= ring->r.len)
1106
cur_rp = 0;
1107
}
1108
1109
ring->r.rp = cur_rp;
1110
/* 'rp', the last position we have read, is seen as previous posistion
1111
* of 'wp' that is used to calculate 'count' next time.
1112
*/
1113
ring->r.wp = cur_rp;
1114
rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1115
1116
return rx_done;
1117
}
1118
1119
static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1120
struct rtw_pci *rtwpci, u32 *irq_status)
1121
{
1122
unsigned long flags;
1123
1124
spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1125
1126
irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1127
irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1128
if (rtw_chip_wcpu_3081(rtwdev))
1129
irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1130
else
1131
irq_status[3] = 0;
1132
irq_status[0] &= rtwpci->irq_mask[0];
1133
irq_status[1] &= rtwpci->irq_mask[1];
1134
irq_status[3] &= rtwpci->irq_mask[3];
1135
rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1136
rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1137
if (rtw_chip_wcpu_3081(rtwdev))
1138
rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1139
1140
spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1141
}
1142
1143
static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1144
{
1145
struct rtw_dev *rtwdev = dev;
1146
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1147
1148
/* disable RTW PCI interrupt to avoid more interrupts before the end of
1149
* thread function
1150
*
1151
* disable HIMR here to also avoid new HISR flag being raised before
1152
* the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1153
* are cleared, the edge-triggered interrupt will not be generated when
1154
* a new HISR flag is set.
1155
*/
1156
rtw_pci_disable_interrupt(rtwdev, rtwpci);
1157
1158
return IRQ_WAKE_THREAD;
1159
}
1160
1161
static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1162
{
1163
struct rtw_dev *rtwdev = dev;
1164
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1165
u32 irq_status[4];
1166
bool rx = false;
1167
1168
spin_lock_bh(&rtwpci->irq_lock);
1169
rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1170
1171
if (irq_status[0] & IMR_MGNTDOK)
1172
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1173
if (irq_status[0] & IMR_HIGHDOK)
1174
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1175
if (irq_status[0] & IMR_BEDOK)
1176
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1177
if (irq_status[0] & IMR_BKDOK)
1178
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1179
if (irq_status[0] & IMR_VODOK)
1180
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1181
if (irq_status[0] & IMR_VIDOK)
1182
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1183
if (irq_status[3] & IMR_H2CDOK)
1184
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1185
if (irq_status[0] & IMR_ROK) {
1186
rtw_pci_rx_isr(rtwdev);
1187
rx = true;
1188
}
1189
if (unlikely(irq_status[0] & IMR_C2HCMD))
1190
rtw_fw_c2h_cmd_isr(rtwdev);
1191
1192
/* all of the jobs for this interrupt have been done */
1193
if (rtwpci->running)
1194
rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1195
spin_unlock_bh(&rtwpci->irq_lock);
1196
1197
return IRQ_HANDLED;
1198
}
1199
1200
static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1201
struct pci_dev *pdev)
1202
{
1203
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1204
unsigned long len;
1205
u8 bar_id = 2;
1206
int ret;
1207
1208
ret = pci_request_regions(pdev, KBUILD_MODNAME);
1209
if (ret) {
1210
rtw_err(rtwdev, "failed to request pci regions\n");
1211
return ret;
1212
}
1213
1214
len = pci_resource_len(pdev, bar_id);
1215
rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1216
if (!rtwpci->mmap) {
1217
pci_release_regions(pdev);
1218
rtw_err(rtwdev, "failed to map pci memory\n");
1219
return -ENOMEM;
1220
}
1221
1222
return 0;
1223
}
1224
1225
static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1226
struct pci_dev *pdev)
1227
{
1228
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1229
1230
if (rtwpci->mmap) {
1231
pci_iounmap(pdev, rtwpci->mmap);
1232
pci_release_regions(pdev);
1233
}
1234
}
1235
1236
static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1237
{
1238
u16 write_addr;
1239
u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1240
u8 flag;
1241
u8 cnt;
1242
1243
write_addr = addr & BITS_DBI_ADDR_MASK;
1244
write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1245
rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1246
rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1247
rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1248
1249
for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1250
flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1251
if (flag == 0)
1252
return;
1253
1254
udelay(10);
1255
}
1256
1257
WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1258
}
1259
1260
static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1261
{
1262
u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1263
u8 flag;
1264
u8 cnt;
1265
1266
rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1267
rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1268
1269
for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1270
flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1271
if (flag == 0) {
1272
read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1273
*value = rtw_read8(rtwdev, read_addr);
1274
return 0;
1275
}
1276
1277
udelay(10);
1278
}
1279
1280
WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1281
return -EIO;
1282
}
1283
1284
static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1285
{
1286
u8 page;
1287
u8 wflag;
1288
u8 cnt;
1289
1290
rtw_write16(rtwdev, REG_MDIO_V1, data);
1291
1292
page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1293
page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1294
rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1295
rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1296
rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1297
1298
for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1299
wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1300
BIT_MDIO_WFLAG_V1);
1301
if (wflag == 0)
1302
return;
1303
1304
udelay(10);
1305
}
1306
1307
WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1308
}
1309
1310
static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1311
{
1312
u8 value;
1313
int ret;
1314
1315
if (rtw_pci_disable_aspm)
1316
return;
1317
1318
ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1319
if (ret) {
1320
rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1321
return;
1322
}
1323
1324
if (enable)
1325
value |= BIT_CLKREQ_SW_EN;
1326
else
1327
value &= ~BIT_CLKREQ_SW_EN;
1328
1329
rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1330
}
1331
1332
static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable)
1333
{
1334
u8 value;
1335
int ret;
1336
1337
ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1338
if (ret) {
1339
rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1340
return;
1341
}
1342
1343
if (enable)
1344
value &= ~BIT_CLKREQ_N_PAD;
1345
else
1346
value |= BIT_CLKREQ_N_PAD;
1347
1348
rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1349
}
1350
1351
static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1352
{
1353
u8 value;
1354
int ret;
1355
1356
if (rtw_pci_disable_aspm)
1357
return;
1358
1359
ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1360
if (ret) {
1361
rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1362
return;
1363
}
1364
1365
if (enable)
1366
value |= BIT_L1_SW_EN;
1367
else
1368
value &= ~BIT_L1_SW_EN;
1369
1370
rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1371
}
1372
1373
static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1374
{
1375
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1376
1377
/* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1378
* only be enabled when host supports it.
1379
*
1380
* And ASPM mechanism should be enabled when driver/firmware enters
1381
* power save mode, without having heavy traffic. Because we've
1382
* experienced some inter-operability issues that the link tends
1383
* to enter L1 state on the fly even when driver is having high
1384
* throughput. This is probably because the ASPM behavior slightly
1385
* varies from different SOC.
1386
*/
1387
if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))
1388
return;
1389
1390
if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||
1391
(!enter && atomic_inc_return(&rtwpci->link_usage) == 1))
1392
rtw_pci_aspm_set(rtwdev, enter);
1393
}
1394
1395
static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1396
{
1397
const struct rtw_chip_info *chip = rtwdev->chip;
1398
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1399
struct pci_dev *pdev = rtwpci->pdev;
1400
u16 link_ctrl;
1401
int ret;
1402
1403
/* RTL8822CE has enabled REFCLK auto calibration, it does not need
1404
* to add clock delay to cover the REFCLK timing gap.
1405
*/
1406
if (chip->id == RTW_CHIP_TYPE_8822C)
1407
rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1408
1409
/* Though there is standard PCIE configuration space to set the
1410
* link control register, but by Realtek's design, driver should
1411
* check if host supports CLKREQ/ASPM to enable the HW module.
1412
*
1413
* These functions are implemented by two HW modules associated,
1414
* one is responsible to access PCIE configuration space to
1415
* follow the host settings, and another is in charge of doing
1416
* CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1417
* the host does not support it, and due to some reasons or wrong
1418
* settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1419
* loss if HW misbehaves on the link.
1420
*
1421
* Hence it's designed that driver should first check the PCIE
1422
* configuration space is sync'ed and enabled, then driver can turn
1423
* on the other module that is actually working on the mechanism.
1424
*/
1425
ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1426
if (ret) {
1427
rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1428
return;
1429
}
1430
1431
if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1432
rtw_pci_clkreq_set(rtwdev, true);
1433
1434
rtwpci->link_ctrl = link_ctrl;
1435
}
1436
1437
static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1438
{
1439
const struct rtw_chip_info *chip = rtwdev->chip;
1440
1441
switch (chip->id) {
1442
case RTW_CHIP_TYPE_8822C:
1443
if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1444
rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1445
BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1446
break;
1447
default:
1448
break;
1449
}
1450
}
1451
1452
static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1453
{
1454
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1455
const struct rtw_chip_info *chip = rtwdev->chip;
1456
struct rtw_efuse *efuse = &rtwdev->efuse;
1457
struct pci_dev *pdev = rtwpci->pdev;
1458
const struct rtw_intf_phy_para *para;
1459
u16 cut;
1460
u16 value;
1461
u16 offset;
1462
int i;
1463
int ret;
1464
1465
cut = BIT(0) << rtwdev->hal.cut_version;
1466
1467
for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1468
para = &chip->intf_table->gen1_para[i];
1469
if (!(para->cut_mask & cut))
1470
continue;
1471
if (para->offset == 0xffff)
1472
break;
1473
offset = para->offset;
1474
value = para->value;
1475
if (para->ip_sel == RTW_IP_SEL_PHY)
1476
rtw_mdio_write(rtwdev, offset, value, true);
1477
else
1478
rtw_dbi_write8(rtwdev, offset, value);
1479
}
1480
1481
for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1482
para = &chip->intf_table->gen2_para[i];
1483
if (!(para->cut_mask & cut))
1484
continue;
1485
if (para->offset == 0xffff)
1486
break;
1487
offset = para->offset;
1488
value = para->value;
1489
if (para->ip_sel == RTW_IP_SEL_PHY)
1490
rtw_mdio_write(rtwdev, offset, value, false);
1491
else
1492
rtw_dbi_write8(rtwdev, offset, value);
1493
}
1494
1495
rtw_pci_link_cfg(rtwdev);
1496
1497
/* Disable 8821ce completion timeout by default */
1498
if (chip->id == RTW_CHIP_TYPE_8821C) {
1499
ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1500
PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
1501
if (ret)
1502
rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
1503
ret);
1504
}
1505
1506
if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 5)
1507
rtw_write32_mask(rtwdev, REG_ANAPARSW_MAC_0, BIT_CF_L_V2, 0x1);
1508
}
1509
1510
static int __maybe_unused rtw_pci_suspend(struct device *dev)
1511
{
1512
struct ieee80211_hw *hw = dev_get_drvdata(dev);
1513
struct rtw_dev *rtwdev = hw->priv;
1514
const struct rtw_chip_info *chip = rtwdev->chip;
1515
struct rtw_efuse *efuse = &rtwdev->efuse;
1516
1517
if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1518
rtw_pci_clkreq_pad_low(rtwdev, true);
1519
return 0;
1520
}
1521
1522
static int __maybe_unused rtw_pci_resume(struct device *dev)
1523
{
1524
struct ieee80211_hw *hw = dev_get_drvdata(dev);
1525
struct rtw_dev *rtwdev = hw->priv;
1526
const struct rtw_chip_info *chip = rtwdev->chip;
1527
struct rtw_efuse *efuse = &rtwdev->efuse;
1528
1529
if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1530
rtw_pci_clkreq_pad_low(rtwdev, false);
1531
return 0;
1532
}
1533
1534
SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1535
EXPORT_SYMBOL(rtw_pm_ops);
1536
1537
static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1538
{
1539
int ret;
1540
1541
ret = pci_enable_device(pdev);
1542
if (ret) {
1543
rtw_err(rtwdev, "failed to enable pci device\n");
1544
return ret;
1545
}
1546
1547
pci_set_master(pdev);
1548
pci_set_drvdata(pdev, rtwdev->hw);
1549
SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1550
1551
return 0;
1552
}
1553
1554
static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1555
{
1556
pci_disable_device(pdev);
1557
}
1558
1559
static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1560
{
1561
struct rtw_pci *rtwpci;
1562
int ret;
1563
1564
rtwpci = (struct rtw_pci *)rtwdev->priv;
1565
rtwpci->pdev = pdev;
1566
1567
/* after this driver can access to hw registers */
1568
ret = rtw_pci_io_mapping(rtwdev, pdev);
1569
if (ret) {
1570
rtw_err(rtwdev, "failed to request pci io region\n");
1571
goto err_out;
1572
}
1573
1574
ret = rtw_pci_init(rtwdev);
1575
if (ret) {
1576
rtw_err(rtwdev, "failed to allocate pci resources\n");
1577
goto err_io_unmap;
1578
}
1579
1580
return 0;
1581
1582
err_io_unmap:
1583
rtw_pci_io_unmapping(rtwdev, pdev);
1584
1585
err_out:
1586
return ret;
1587
}
1588
1589
static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1590
{
1591
rtw_pci_deinit(rtwdev);
1592
rtw_pci_io_unmapping(rtwdev, pdev);
1593
}
1594
1595
static const struct rtw_hci_ops rtw_pci_ops = {
1596
.tx_write = rtw_pci_tx_write,
1597
.tx_kick_off = rtw_pci_tx_kick_off,
1598
.flush_queues = rtw_pci_flush_queues,
1599
.setup = rtw_pci_setup,
1600
.start = rtw_pci_start,
1601
.stop = rtw_pci_stop,
1602
.deep_ps = rtw_pci_deep_ps,
1603
.link_ps = rtw_pci_link_ps,
1604
.interface_cfg = rtw_pci_interface_cfg,
1605
.dynamic_rx_agg = NULL,
1606
.write_firmware_page = rtw_write_firmware_page,
1607
1608
.read8 = rtw_pci_read8,
1609
.read16 = rtw_pci_read16,
1610
.read32 = rtw_pci_read32,
1611
.write8 = rtw_pci_write8,
1612
.write16 = rtw_pci_write16,
1613
.write32 = rtw_pci_write32,
1614
.write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1615
.write_data_h2c = rtw_pci_write_data_h2c,
1616
};
1617
1618
static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1619
{
1620
unsigned int flags = PCI_IRQ_INTX;
1621
int ret;
1622
1623
if (!rtw_disable_msi)
1624
flags |= PCI_IRQ_MSI;
1625
1626
ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1627
if (ret < 0) {
1628
rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1629
return ret;
1630
}
1631
1632
ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1633
rtw_pci_interrupt_handler,
1634
rtw_pci_interrupt_threadfn,
1635
IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1636
if (ret) {
1637
rtw_err(rtwdev, "failed to request irq %d\n", ret);
1638
pci_free_irq_vectors(pdev);
1639
}
1640
1641
return ret;
1642
}
1643
1644
static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1645
{
1646
devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1647
pci_free_irq_vectors(pdev);
1648
}
1649
1650
static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1651
{
1652
struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1653
struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1654
priv);
1655
int work_done = 0;
1656
1657
if (rtwpci->rx_no_aspm)
1658
rtw_pci_link_ps(rtwdev, false);
1659
1660
while (work_done < budget) {
1661
u32 work_done_once;
1662
1663
work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1664
budget - work_done);
1665
if (work_done_once == 0)
1666
break;
1667
work_done += work_done_once;
1668
}
1669
if (work_done < budget) {
1670
napi_complete_done(napi, work_done);
1671
spin_lock_bh(&rtwpci->irq_lock);
1672
if (rtwpci->running)
1673
rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1674
spin_unlock_bh(&rtwpci->irq_lock);
1675
/* When ISR happens during polling and before napi_complete
1676
* while no further data is received. Data on the dma_ring will
1677
* not be processed immediately. Check whether dma ring is
1678
* empty and perform napi_schedule accordingly.
1679
*/
1680
if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1681
napi_schedule(napi);
1682
}
1683
if (rtwpci->rx_no_aspm)
1684
rtw_pci_link_ps(rtwdev, true);
1685
1686
return work_done;
1687
}
1688
1689
static int rtw_pci_napi_init(struct rtw_dev *rtwdev)
1690
{
1691
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1692
1693
rtwpci->netdev = alloc_netdev_dummy(0);
1694
if (!rtwpci->netdev)
1695
return -ENOMEM;
1696
1697
netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
1698
return 0;
1699
}
1700
1701
static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1702
{
1703
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1704
1705
rtw_pci_napi_stop(rtwdev);
1706
netif_napi_del(&rtwpci->napi);
1707
free_netdev(rtwpci->netdev);
1708
}
1709
1710
static pci_ers_result_t rtw_pci_io_err_detected(struct pci_dev *pdev,
1711
pci_channel_state_t state)
1712
{
1713
struct net_device *netdev = pci_get_drvdata(pdev);
1714
1715
netif_device_detach(netdev);
1716
1717
return PCI_ERS_RESULT_NEED_RESET;
1718
}
1719
1720
static pci_ers_result_t rtw_pci_io_slot_reset(struct pci_dev *pdev)
1721
{
1722
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1723
struct rtw_dev *rtwdev = hw->priv;
1724
1725
rtw_fw_recovery(rtwdev);
1726
1727
return PCI_ERS_RESULT_RECOVERED;
1728
}
1729
1730
static void rtw_pci_io_resume(struct pci_dev *pdev)
1731
{
1732
struct net_device *netdev = pci_get_drvdata(pdev);
1733
1734
/* ack any pending wake events, disable PME */
1735
pci_enable_wake(pdev, PCI_D0, 0);
1736
1737
netif_device_attach(netdev);
1738
}
1739
1740
const struct pci_error_handlers rtw_pci_err_handler = {
1741
.error_detected = rtw_pci_io_err_detected,
1742
.slot_reset = rtw_pci_io_slot_reset,
1743
.resume = rtw_pci_io_resume,
1744
};
1745
EXPORT_SYMBOL(rtw_pci_err_handler);
1746
1747
int rtw_pci_probe(struct pci_dev *pdev,
1748
const struct pci_device_id *id)
1749
{
1750
struct pci_dev *bridge = pci_upstream_bridge(pdev);
1751
struct ieee80211_hw *hw;
1752
struct rtw_dev *rtwdev;
1753
struct rtw_pci *rtwpci;
1754
int drv_data_size;
1755
int ret;
1756
1757
drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1758
hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1759
if (!hw) {
1760
dev_err(&pdev->dev, "failed to allocate hw\n");
1761
return -ENOMEM;
1762
}
1763
1764
rtwdev = hw->priv;
1765
rtwdev->hw = hw;
1766
rtwdev->dev = &pdev->dev;
1767
rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1768
rtwdev->hci.ops = &rtw_pci_ops;
1769
rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1770
1771
rtwpci = (struct rtw_pci *)rtwdev->priv;
1772
atomic_set(&rtwpci->link_usage, 1);
1773
1774
ret = rtw_core_init(rtwdev);
1775
if (ret)
1776
goto err_release_hw;
1777
1778
rtw_dbg(rtwdev, RTW_DBG_PCI,
1779
"rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1780
pdev->vendor, pdev->device, pdev->revision);
1781
1782
ret = rtw_pci_claim(rtwdev, pdev);
1783
if (ret) {
1784
rtw_err(rtwdev, "failed to claim pci device\n");
1785
goto err_deinit_core;
1786
}
1787
1788
ret = rtw_pci_setup_resource(rtwdev, pdev);
1789
if (ret) {
1790
rtw_err(rtwdev, "failed to setup pci resources\n");
1791
goto err_pci_declaim;
1792
}
1793
1794
ret = rtw_pci_napi_init(rtwdev);
1795
if (ret) {
1796
rtw_err(rtwdev, "failed to setup NAPI\n");
1797
goto err_pci_declaim;
1798
}
1799
1800
ret = rtw_chip_info_setup(rtwdev);
1801
if (ret) {
1802
rtw_err(rtwdev, "failed to setup chip information\n");
1803
goto err_destroy_pci;
1804
}
1805
1806
/* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
1807
if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)
1808
rtwpci->rx_no_aspm = true;
1809
1810
rtw_pci_phy_cfg(rtwdev);
1811
1812
ret = rtw_register_hw(rtwdev, hw);
1813
if (ret) {
1814
rtw_err(rtwdev, "failed to register hw\n");
1815
goto err_destroy_pci;
1816
}
1817
1818
ret = rtw_pci_request_irq(rtwdev, pdev);
1819
if (ret) {
1820
ieee80211_unregister_hw(hw);
1821
goto err_destroy_pci;
1822
}
1823
1824
return 0;
1825
1826
err_destroy_pci:
1827
rtw_pci_napi_deinit(rtwdev);
1828
rtw_pci_destroy(rtwdev, pdev);
1829
1830
err_pci_declaim:
1831
rtw_pci_declaim(rtwdev, pdev);
1832
1833
err_deinit_core:
1834
rtw_core_deinit(rtwdev);
1835
1836
err_release_hw:
1837
ieee80211_free_hw(hw);
1838
1839
return ret;
1840
}
1841
EXPORT_SYMBOL(rtw_pci_probe);
1842
1843
void rtw_pci_remove(struct pci_dev *pdev)
1844
{
1845
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1846
struct rtw_dev *rtwdev;
1847
struct rtw_pci *rtwpci;
1848
1849
if (!hw)
1850
return;
1851
1852
rtwdev = hw->priv;
1853
rtwpci = (struct rtw_pci *)rtwdev->priv;
1854
1855
rtw_unregister_hw(rtwdev, hw);
1856
rtw_pci_disable_interrupt(rtwdev, rtwpci);
1857
rtw_pci_napi_deinit(rtwdev);
1858
rtw_pci_destroy(rtwdev, pdev);
1859
rtw_pci_declaim(rtwdev, pdev);
1860
rtw_pci_free_irq(rtwdev, pdev);
1861
rtw_core_deinit(rtwdev);
1862
ieee80211_free_hw(hw);
1863
}
1864
EXPORT_SYMBOL(rtw_pci_remove);
1865
1866
void rtw_pci_shutdown(struct pci_dev *pdev)
1867
{
1868
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1869
struct rtw_dev *rtwdev;
1870
const struct rtw_chip_info *chip;
1871
1872
if (!hw)
1873
return;
1874
1875
rtwdev = hw->priv;
1876
chip = rtwdev->chip;
1877
1878
if (chip->ops->shutdown)
1879
chip->ops->shutdown(rtwdev);
1880
1881
pci_set_power_state(pdev, PCI_D3hot);
1882
}
1883
EXPORT_SYMBOL(rtw_pci_shutdown);
1884
1885
MODULE_AUTHOR("Realtek Corporation");
1886
MODULE_DESCRIPTION("Realtek PCI 802.11ac wireless driver");
1887
MODULE_LICENSE("Dual BSD/GPL");
1888
1889