Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/fsl-edma-main.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* drivers/dma/fsl-edma.c
4
*
5
* Copyright 2013-2014 Freescale Semiconductor, Inc.
6
* Copyright 2024 NXP
7
*
8
* Driver for the Freescale eDMA engine with flexible channel multiplexing
9
* capability for DMA request sources. The eDMA block can be found on some
10
* Vybrid, Layerscape and S32G SoCs.
11
*/
12
13
#include <dt-bindings/dma/fsl-edma.h>
14
#include <linux/bitfield.h>
15
#include <linux/module.h>
16
#include <linux/interrupt.h>
17
#include <linux/clk.h>
18
#include <linux/of.h>
19
#include <linux/of_dma.h>
20
#include <linux/dma-mapping.h>
21
#include <linux/pm_runtime.h>
22
#include <linux/pm_domain.h>
23
#include <linux/property.h>
24
25
#include "fsl-edma-common.h"
26
27
static void fsl_edma_synchronize(struct dma_chan *chan)
28
{
29
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
30
31
vchan_synchronize(&fsl_chan->vchan);
32
}
33
34
static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
35
{
36
struct fsl_edma_engine *fsl_edma = dev_id;
37
unsigned int intr, ch;
38
struct edma_regs *regs = &fsl_edma->regs;
39
40
intr = edma_readl(fsl_edma, regs->intl);
41
if (!intr)
42
return IRQ_NONE;
43
44
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
45
if (intr & (0x1 << ch)) {
46
edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
47
fsl_edma_tx_chan_handler(&fsl_edma->chans[ch]);
48
}
49
}
50
return IRQ_HANDLED;
51
}
52
53
static void fsl_edma3_err_check(struct fsl_edma_chan *fsl_chan)
54
{
55
unsigned int ch_err;
56
u32 val;
57
58
scoped_guard(spinlock, &fsl_chan->vchan.lock) {
59
ch_err = edma_readl_chreg(fsl_chan, ch_es);
60
if (!(ch_err & EDMA_V3_CH_ERR))
61
return;
62
63
edma_writel_chreg(fsl_chan, EDMA_V3_CH_ERR, ch_es);
64
val = edma_readl_chreg(fsl_chan, ch_csr);
65
val &= ~EDMA_V3_CH_CSR_ERQ;
66
edma_writel_chreg(fsl_chan, val, ch_csr);
67
}
68
69
/* Ignore this interrupt since channel has been disabled already */
70
if (!fsl_chan->edesc)
71
return;
72
73
if (ch_err & EDMA_V3_CH_ERR_DBE)
74
dev_err(&fsl_chan->pdev->dev, "Destination Bus Error interrupt.\n");
75
76
if (ch_err & EDMA_V3_CH_ERR_SBE)
77
dev_err(&fsl_chan->pdev->dev, "Source Bus Error interrupt.\n");
78
79
if (ch_err & EDMA_V3_CH_ERR_SGE)
80
dev_err(&fsl_chan->pdev->dev, "Scatter/Gather Configuration Error interrupt.\n");
81
82
if (ch_err & EDMA_V3_CH_ERR_NCE)
83
dev_err(&fsl_chan->pdev->dev, "NBYTES/CITER Configuration Error interrupt.\n");
84
85
if (ch_err & EDMA_V3_CH_ERR_DOE)
86
dev_err(&fsl_chan->pdev->dev, "Destination Offset Error interrupt.\n");
87
88
if (ch_err & EDMA_V3_CH_ERR_DAE)
89
dev_err(&fsl_chan->pdev->dev, "Destination Address Error interrupt.\n");
90
91
if (ch_err & EDMA_V3_CH_ERR_SOE)
92
dev_err(&fsl_chan->pdev->dev, "Source Offset Error interrupt.\n");
93
94
if (ch_err & EDMA_V3_CH_ERR_SAE)
95
dev_err(&fsl_chan->pdev->dev, "Source Address Error interrupt.\n");
96
97
if (ch_err & EDMA_V3_CH_ERR_ECX)
98
dev_err(&fsl_chan->pdev->dev, "Transfer Canceled interrupt.\n");
99
100
if (ch_err & EDMA_V3_CH_ERR_UCE)
101
dev_err(&fsl_chan->pdev->dev, "Uncorrectable TCD error during channel execution interrupt.\n");
102
103
fsl_chan->status = DMA_ERROR;
104
}
105
106
static irqreturn_t fsl_edma3_err_handler_per_chan(int irq, void *dev_id)
107
{
108
struct fsl_edma_chan *fsl_chan = dev_id;
109
110
fsl_edma3_err_check(fsl_chan);
111
112
return IRQ_HANDLED;
113
}
114
115
static irqreturn_t fsl_edma3_err_handler_shared(int irq, void *dev_id)
116
{
117
struct fsl_edma_engine *fsl_edma = dev_id;
118
unsigned int ch;
119
120
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
121
if (fsl_edma->chan_masked & BIT(ch))
122
continue;
123
124
fsl_edma3_err_check(&fsl_edma->chans[ch]);
125
}
126
127
return IRQ_HANDLED;
128
}
129
130
static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
131
{
132
struct fsl_edma_chan *fsl_chan = dev_id;
133
unsigned int intr;
134
135
intr = edma_readl_chreg(fsl_chan, ch_int);
136
if (!intr)
137
return IRQ_NONE;
138
139
edma_writel_chreg(fsl_chan, 1, ch_int);
140
141
fsl_edma_tx_chan_handler(fsl_chan);
142
143
return IRQ_HANDLED;
144
}
145
146
static irqreturn_t fsl_edma2_tx_handler(int irq, void *devi_id)
147
{
148
struct fsl_edma_chan *fsl_chan = devi_id;
149
150
return fsl_edma_tx_handler(irq, fsl_chan->edma);
151
}
152
153
static irqreturn_t fsl_edma3_or_tx_handler(int irq, void *dev_id,
154
u8 start, u8 end)
155
{
156
struct fsl_edma_engine *fsl_edma = dev_id;
157
struct fsl_edma_chan *chan;
158
int i;
159
160
end = min(end, fsl_edma->n_chans);
161
162
for (i = start; i < end; i++) {
163
chan = &fsl_edma->chans[i];
164
165
fsl_edma3_tx_handler(irq, chan);
166
}
167
168
return IRQ_HANDLED;
169
}
170
171
static irqreturn_t fsl_edma3_tx_0_15_handler(int irq, void *dev_id)
172
{
173
return fsl_edma3_or_tx_handler(irq, dev_id, 0, 16);
174
}
175
176
static irqreturn_t fsl_edma3_tx_16_31_handler(int irq, void *dev_id)
177
{
178
return fsl_edma3_or_tx_handler(irq, dev_id, 16, 32);
179
}
180
181
static irqreturn_t fsl_edma3_or_err_handler(int irq, void *dev_id)
182
{
183
struct fsl_edma_engine *fsl_edma = dev_id;
184
struct edma_regs *regs = &fsl_edma->regs;
185
unsigned int err, ch, ch_es;
186
struct fsl_edma_chan *chan;
187
188
err = edma_readl(fsl_edma, regs->es);
189
if (!(err & EDMA_V3_MP_ES_VLD))
190
return IRQ_NONE;
191
192
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
193
chan = &fsl_edma->chans[ch];
194
195
ch_es = edma_readl_chreg(chan, ch_es);
196
if (!(ch_es & EDMA_V3_CH_ES_ERR))
197
continue;
198
199
edma_writel_chreg(chan, EDMA_V3_CH_ES_ERR, ch_es);
200
fsl_edma_disable_request(chan);
201
fsl_edma->chans[ch].status = DMA_ERROR;
202
}
203
204
return IRQ_HANDLED;
205
}
206
207
static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
208
{
209
struct fsl_edma_engine *fsl_edma = dev_id;
210
unsigned int err, ch;
211
struct edma_regs *regs = &fsl_edma->regs;
212
213
err = edma_readl(fsl_edma, regs->errl);
214
if (!err)
215
return IRQ_NONE;
216
217
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
218
if (err & (0x1 << ch)) {
219
fsl_edma_disable_request(&fsl_edma->chans[ch]);
220
edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
221
fsl_edma_err_chan_handler(&fsl_edma->chans[ch]);
222
}
223
}
224
return IRQ_HANDLED;
225
}
226
227
static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
228
{
229
if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
230
return IRQ_HANDLED;
231
232
return fsl_edma_err_handler(irq, dev_id);
233
}
234
235
static bool fsl_edma_srcid_in_use(struct fsl_edma_engine *fsl_edma, u32 srcid)
236
{
237
struct fsl_edma_chan *fsl_chan;
238
int i;
239
240
for (i = 0; i < fsl_edma->n_chans; i++) {
241
fsl_chan = &fsl_edma->chans[i];
242
243
if (fsl_chan->srcid && srcid == fsl_chan->srcid) {
244
dev_err(&fsl_chan->pdev->dev, "The srcid is in use, can't use!\n");
245
return true;
246
}
247
}
248
return false;
249
}
250
251
static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
252
struct of_dma *ofdma)
253
{
254
struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
255
struct dma_chan *chan, *_chan;
256
struct fsl_edma_chan *fsl_chan;
257
u32 dmamux_nr = fsl_edma->drvdata->dmamuxs;
258
unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr;
259
260
if (dma_spec->args_count != 2)
261
return NULL;
262
263
guard(mutex)(&fsl_edma->fsl_edma_mutex);
264
265
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
266
if (chan->client_count)
267
continue;
268
269
if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[1]))
270
return NULL;
271
272
if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
273
chan = dma_get_slave_channel(chan);
274
if (chan) {
275
chan->device->privatecnt++;
276
fsl_chan = to_fsl_edma_chan(chan);
277
fsl_chan->srcid = dma_spec->args[1];
278
279
if (!fsl_chan->srcid) {
280
dev_err(&fsl_chan->pdev->dev, "Invalidate srcid %d\n",
281
fsl_chan->srcid);
282
return NULL;
283
}
284
285
fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid,
286
true);
287
return chan;
288
}
289
}
290
}
291
return NULL;
292
}
293
294
static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
295
struct of_dma *ofdma)
296
{
297
struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
298
struct dma_chan *chan, *_chan;
299
struct fsl_edma_chan *fsl_chan;
300
bool b_chmux;
301
int i;
302
303
if (dma_spec->args_count != 3)
304
return NULL;
305
306
b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
307
308
guard(mutex)(&fsl_edma->fsl_edma_mutex);
309
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
310
device_node) {
311
312
if (chan->client_count)
313
continue;
314
315
fsl_chan = to_fsl_edma_chan(chan);
316
if (fsl_edma_srcid_in_use(fsl_edma, dma_spec->args[0]))
317
return NULL;
318
i = fsl_chan - fsl_edma->chans;
319
320
fsl_chan->priority = dma_spec->args[1];
321
fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX;
322
fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE;
323
fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO;
324
325
if ((dma_spec->args[2] & FSL_EDMA_EVEN_CH) && (i & 0x1))
326
continue;
327
328
if ((dma_spec->args[2] & FSL_EDMA_ODD_CH) && !(i & 0x1))
329
continue;
330
331
if (!b_chmux && i == dma_spec->args[0]) {
332
chan = dma_get_slave_channel(chan);
333
chan->device->privatecnt++;
334
return chan;
335
} else if (b_chmux && !fsl_chan->srcid) {
336
/* if controller support channel mux, choose a free channel */
337
chan = dma_get_slave_channel(chan);
338
chan->device->privatecnt++;
339
fsl_chan->srcid = dma_spec->args[0];
340
return chan;
341
}
342
}
343
return NULL;
344
}
345
346
static int
347
fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
348
{
349
int ret;
350
351
edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
352
353
fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
354
if (fsl_edma->txirq < 0)
355
return fsl_edma->txirq;
356
357
fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
358
if (fsl_edma->errirq < 0)
359
return fsl_edma->errirq;
360
361
if (fsl_edma->txirq == fsl_edma->errirq) {
362
ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
363
fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
364
if (ret) {
365
dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
366
return ret;
367
}
368
} else {
369
ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
370
fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
371
if (ret) {
372
dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
373
return ret;
374
}
375
376
ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
377
fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
378
if (ret) {
379
dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
380
return ret;
381
}
382
}
383
384
return 0;
385
}
386
387
static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
388
{
389
char *errirq_name;
390
int i, ret;
391
392
for (i = 0; i < fsl_edma->n_chans; i++) {
393
394
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
395
396
if (fsl_edma->chan_masked & BIT(i))
397
continue;
398
399
/* request channel irq */
400
fsl_chan->txirq = platform_get_irq(pdev, i);
401
if (fsl_chan->txirq < 0)
402
return -EINVAL;
403
404
fsl_chan->irq_handler = fsl_edma3_tx_handler;
405
406
if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE)) {
407
fsl_chan->errirq = fsl_chan->txirq;
408
fsl_chan->errirq_handler = fsl_edma3_err_handler_per_chan;
409
}
410
}
411
412
/* All channel err use one irq number */
413
if (fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE) {
414
/* last one is error irq */
415
fsl_edma->errirq = platform_get_irq_optional(pdev, fsl_edma->n_chans);
416
if (fsl_edma->errirq < 0)
417
return 0; /* dts miss err irq, treat as no err irq case */
418
419
errirq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s-err",
420
dev_name(&pdev->dev));
421
422
ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, fsl_edma3_err_handler_shared,
423
0, errirq_name, fsl_edma);
424
if (ret)
425
return dev_err_probe(&pdev->dev, ret, "Can't register eDMA err IRQ.\n");
426
}
427
428
return 0;
429
}
430
431
static int fsl_edma3_or_irq_init(struct platform_device *pdev,
432
struct fsl_edma_engine *fsl_edma)
433
{
434
int ret;
435
436
fsl_edma->txirq = platform_get_irq_byname(pdev, "tx-0-15");
437
if (fsl_edma->txirq < 0)
438
return fsl_edma->txirq;
439
440
fsl_edma->txirq_16_31 = platform_get_irq_byname(pdev, "tx-16-31");
441
if (fsl_edma->txirq_16_31 < 0)
442
return fsl_edma->txirq_16_31;
443
444
fsl_edma->errirq = platform_get_irq_byname(pdev, "err");
445
if (fsl_edma->errirq < 0)
446
return fsl_edma->errirq;
447
448
ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
449
fsl_edma3_tx_0_15_handler, 0, "eDMA tx0_15",
450
fsl_edma);
451
if (ret)
452
return dev_err_probe(&pdev->dev, ret,
453
"Can't register eDMA tx0_15 IRQ.\n");
454
455
if (fsl_edma->n_chans > 16) {
456
ret = devm_request_irq(&pdev->dev, fsl_edma->txirq_16_31,
457
fsl_edma3_tx_16_31_handler, 0,
458
"eDMA tx16_31", fsl_edma);
459
if (ret)
460
return dev_err_probe(&pdev->dev, ret,
461
"Can't register eDMA tx16_31 IRQ.\n");
462
}
463
464
ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
465
fsl_edma3_or_err_handler, 0, "eDMA err",
466
fsl_edma);
467
if (ret)
468
return dev_err_probe(&pdev->dev, ret,
469
"Can't register eDMA err IRQ.\n");
470
471
return 0;
472
}
473
474
static int
475
fsl_edma2_irq_init(struct platform_device *pdev,
476
struct fsl_edma_engine *fsl_edma)
477
{
478
int i, ret, irq;
479
int count;
480
481
edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
482
483
count = platform_irq_count(pdev);
484
dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
485
if (count <= 2) {
486
dev_err(&pdev->dev, "Interrupts in DTS not correct.\n");
487
return -EINVAL;
488
}
489
/*
490
* 16 channel independent interrupts + 1 error interrupt on i.mx7ulp.
491
* 2 channel share one interrupt, for example, ch0/ch16, ch1/ch17...
492
* For now, just simply request irq without IRQF_SHARED flag, since 16
493
* channels are enough on i.mx7ulp whose M4 domain own some peripherals.
494
*/
495
for (i = 0; i < count; i++) {
496
irq = platform_get_irq(pdev, i);
497
ret = 0;
498
if (irq < 0)
499
return -ENXIO;
500
501
/* The last IRQ is for eDMA err */
502
if (i == count - 1) {
503
fsl_edma->errirq = irq;
504
ret = devm_request_irq(&pdev->dev, irq,
505
fsl_edma_err_handler,
506
0, "eDMA2-ERR", fsl_edma);
507
} else {
508
fsl_edma->chans[i].txirq = irq;
509
fsl_edma->chans[i].irq_handler = fsl_edma2_tx_handler;
510
}
511
512
if (ret)
513
return ret;
514
}
515
516
return 0;
517
}
518
519
static void fsl_edma_irq_exit(
520
struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
521
{
522
if (fsl_edma->txirq == fsl_edma->errirq) {
523
if (fsl_edma->txirq >= 0)
524
devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
525
} else {
526
if (fsl_edma->txirq >= 0)
527
devm_free_irq(&pdev->dev, fsl_edma->txirq, fsl_edma);
528
if (fsl_edma->errirq >= 0)
529
devm_free_irq(&pdev->dev, fsl_edma->errirq, fsl_edma);
530
}
531
}
532
533
static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
534
{
535
int i;
536
537
for (i = 0; i < nr_clocks; i++)
538
clk_disable_unprepare(fsl_edma->muxclk[i]);
539
}
540
541
static struct fsl_edma_drvdata vf610_data = {
542
.dmamuxs = DMAMUX_NR,
543
.flags = FSL_EDMA_DRV_WRAP_IO,
544
.chreg_off = EDMA_TCD,
545
.chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
546
.setup_irq = fsl_edma_irq_init,
547
};
548
549
static struct fsl_edma_drvdata ls1028a_data = {
550
.dmamuxs = DMAMUX_NR,
551
.flags = FSL_EDMA_DRV_MUX_SWAP | FSL_EDMA_DRV_WRAP_IO,
552
.chreg_off = EDMA_TCD,
553
.chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
554
.setup_irq = fsl_edma_irq_init,
555
};
556
557
static struct fsl_edma_drvdata imx7ulp_data = {
558
.dmamuxs = 1,
559
.chreg_off = EDMA_TCD,
560
.chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
561
.flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_CONFIG32,
562
.setup_irq = fsl_edma2_irq_init,
563
};
564
565
static struct fsl_edma_drvdata imx8qm_data = {
566
.flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE
567
| FSL_EDMA_DRV_ERRIRQ_SHARE,
568
.chreg_space_sz = 0x10000,
569
.chreg_off = 0x10000,
570
.setup_irq = fsl_edma3_irq_init,
571
};
572
573
static struct fsl_edma_drvdata imx8ulp_data = {
574
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK |
575
FSL_EDMA_DRV_EDMA3,
576
.chreg_space_sz = 0x10000,
577
.chreg_off = 0x10000,
578
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
579
.mux_skip = 0x10000,
580
.setup_irq = fsl_edma3_irq_init,
581
};
582
583
static struct fsl_edma_drvdata imx93_data3 = {
584
.flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_ERRIRQ_SHARE,
585
.chreg_space_sz = 0x10000,
586
.chreg_off = 0x10000,
587
.setup_irq = fsl_edma3_irq_init,
588
};
589
590
static struct fsl_edma_drvdata imx93_data4 = {
591
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4
592
| FSL_EDMA_DRV_ERRIRQ_SHARE,
593
.chreg_space_sz = 0x8000,
594
.chreg_off = 0x10000,
595
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
596
.mux_skip = 0x8000,
597
.setup_irq = fsl_edma3_irq_init,
598
};
599
600
static struct fsl_edma_drvdata imx95_data5 = {
601
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 |
602
FSL_EDMA_DRV_TCD64 | FSL_EDMA_DRV_ERRIRQ_SHARE,
603
.chreg_space_sz = 0x8000,
604
.chreg_off = 0x10000,
605
.mux_off = 0x200,
606
.mux_skip = sizeof(u32),
607
.setup_irq = fsl_edma3_irq_init,
608
};
609
610
static const struct fsl_edma_drvdata s32g2_data = {
611
.dmamuxs = DMAMUX_NR,
612
.chreg_space_sz = EDMA_TCD,
613
.chreg_off = 0x4000,
614
.flags = FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MUX_SWAP,
615
.setup_irq = fsl_edma3_or_irq_init,
616
};
617
618
static const struct of_device_id fsl_edma_dt_ids[] = {
619
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
620
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
621
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
622
{ .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
623
{ .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data},
624
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
625
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
626
{ .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
627
{ .compatible = "nxp,s32g2-edma", .data = &s32g2_data},
628
{ /* sentinel */ }
629
};
630
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
631
632
static void fsl_edma3_detach_pd(struct fsl_edma_engine *fsl_edma)
633
{
634
struct fsl_edma_chan *fsl_chan;
635
int i;
636
637
for (i = 0; i < fsl_edma->n_chans; i++) {
638
if (fsl_edma->chan_masked & BIT(i))
639
continue;
640
fsl_chan = &fsl_edma->chans[i];
641
if (fsl_chan->pd_dev_link)
642
device_link_del(fsl_chan->pd_dev_link);
643
if (fsl_chan->pd_dev) {
644
dev_pm_domain_detach(fsl_chan->pd_dev, false);
645
pm_runtime_dont_use_autosuspend(fsl_chan->pd_dev);
646
pm_runtime_set_suspended(fsl_chan->pd_dev);
647
}
648
}
649
}
650
651
static void devm_fsl_edma3_detach_pd(void *data)
652
{
653
fsl_edma3_detach_pd(data);
654
}
655
656
static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
657
{
658
struct fsl_edma_chan *fsl_chan;
659
struct device *pd_chan;
660
struct device *dev;
661
int i;
662
663
dev = &pdev->dev;
664
665
for (i = 0; i < fsl_edma->n_chans; i++) {
666
if (fsl_edma->chan_masked & BIT(i))
667
continue;
668
669
fsl_chan = &fsl_edma->chans[i];
670
671
pd_chan = dev_pm_domain_attach_by_id(dev, i);
672
if (IS_ERR_OR_NULL(pd_chan)) {
673
dev_err(dev, "Failed attach pd %d\n", i);
674
goto detach;
675
}
676
677
fsl_chan->pd_dev_link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
678
DL_FLAG_PM_RUNTIME |
679
DL_FLAG_RPM_ACTIVE);
680
if (!fsl_chan->pd_dev_link) {
681
dev_err(dev, "Failed to add device_link to %d\n", i);
682
dev_pm_domain_detach(pd_chan, false);
683
goto detach;
684
}
685
686
fsl_chan->pd_dev = pd_chan;
687
688
pm_runtime_use_autosuspend(fsl_chan->pd_dev);
689
pm_runtime_set_autosuspend_delay(fsl_chan->pd_dev, 200);
690
pm_runtime_set_active(fsl_chan->pd_dev);
691
}
692
693
return 0;
694
695
detach:
696
fsl_edma3_detach_pd(fsl_edma);
697
return -EINVAL;
698
}
699
700
static int fsl_edma_probe(struct platform_device *pdev)
701
{
702
struct device_node *np = pdev->dev.of_node;
703
struct fsl_edma_engine *fsl_edma;
704
const struct fsl_edma_drvdata *drvdata = NULL;
705
u32 chan_mask[2] = {0, 0};
706
char clk_name[36];
707
struct edma_regs *regs;
708
int chans;
709
int ret, i;
710
711
drvdata = device_get_match_data(&pdev->dev);
712
if (!drvdata) {
713
dev_err(&pdev->dev, "unable to find driver data\n");
714
return -EINVAL;
715
}
716
717
ret = of_property_read_u32(np, "dma-channels", &chans);
718
if (ret) {
719
dev_err(&pdev->dev, "Can't get dma-channels.\n");
720
return ret;
721
}
722
723
fsl_edma = devm_kzalloc(&pdev->dev, struct_size(fsl_edma, chans, chans),
724
GFP_KERNEL);
725
if (!fsl_edma)
726
return -ENOMEM;
727
728
fsl_edma->errirq = -EINVAL;
729
fsl_edma->txirq = -EINVAL;
730
fsl_edma->drvdata = drvdata;
731
fsl_edma->n_chans = chans;
732
mutex_init(&fsl_edma->fsl_edma_mutex);
733
734
fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0);
735
if (IS_ERR(fsl_edma->membase))
736
return PTR_ERR(fsl_edma->membase);
737
738
if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) {
739
fsl_edma_setup_regs(fsl_edma);
740
regs = &fsl_edma->regs;
741
}
742
743
if (drvdata->flags & FSL_EDMA_DRV_HAS_DMACLK) {
744
fsl_edma->dmaclk = devm_clk_get_enabled(&pdev->dev, "dma");
745
if (IS_ERR(fsl_edma->dmaclk)) {
746
dev_err(&pdev->dev, "Missing DMA block clock.\n");
747
return PTR_ERR(fsl_edma->dmaclk);
748
}
749
}
750
751
ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
752
753
if (ret > 0) {
754
fsl_edma->chan_masked = chan_mask[1];
755
fsl_edma->chan_masked <<= 32;
756
fsl_edma->chan_masked |= chan_mask[0];
757
}
758
759
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
760
char clkname[32];
761
762
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
763
1 + i);
764
if (IS_ERR(fsl_edma->muxbase[i])) {
765
/* on error: disable all previously enabled clks */
766
fsl_disable_clocks(fsl_edma, i);
767
return PTR_ERR(fsl_edma->muxbase[i]);
768
}
769
770
sprintf(clkname, "dmamux%d", i);
771
fsl_edma->muxclk[i] = devm_clk_get_enabled(&pdev->dev, clkname);
772
if (IS_ERR(fsl_edma->muxclk[i])) {
773
dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
774
/* on error: disable all previously enabled clks */
775
return PTR_ERR(fsl_edma->muxclk[i]);
776
}
777
}
778
779
fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
780
781
if (drvdata->flags & FSL_EDMA_DRV_HAS_PD) {
782
ret = fsl_edma3_attach_pd(pdev, fsl_edma);
783
if (ret)
784
return ret;
785
ret = devm_add_action_or_reset(&pdev->dev, devm_fsl_edma3_detach_pd, fsl_edma);
786
if (ret)
787
return ret;
788
}
789
790
if (drvdata->flags & FSL_EDMA_DRV_TCD64)
791
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
792
793
INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
794
for (i = 0; i < fsl_edma->n_chans; i++) {
795
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
796
int len;
797
798
if (fsl_edma->chan_masked & BIT(i))
799
continue;
800
801
snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
802
dev_name(&pdev->dev), i);
803
804
snprintf(fsl_chan->errirq_name, sizeof(fsl_chan->errirq_name),
805
"%s-CH%02d-err", dev_name(&pdev->dev), i);
806
807
fsl_chan->edma = fsl_edma;
808
fsl_chan->pm_state = RUNNING;
809
fsl_chan->srcid = 0;
810
fsl_chan->dma_dir = DMA_NONE;
811
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
812
813
len = (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) ?
814
offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
815
fsl_chan->tcd = fsl_edma->membase
816
+ i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
817
fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
818
819
if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
820
snprintf(clk_name, sizeof(clk_name), "ch%02d", i);
821
fsl_chan->clk = devm_clk_get_enabled(&pdev->dev,
822
(const char *)clk_name);
823
824
if (IS_ERR(fsl_chan->clk))
825
return PTR_ERR(fsl_chan->clk);
826
}
827
fsl_chan->pdev = pdev;
828
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
829
830
edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr);
831
fsl_edma_chan_mux(fsl_chan, 0, false);
832
if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK)
833
clk_disable_unprepare(fsl_chan->clk);
834
}
835
836
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
837
if (ret)
838
return ret;
839
840
dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
841
dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
842
dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
843
dma_cap_set(DMA_MEMCPY, fsl_edma->dma_dev.cap_mask);
844
845
fsl_edma->dma_dev.dev = &pdev->dev;
846
fsl_edma->dma_dev.device_alloc_chan_resources
847
= fsl_edma_alloc_chan_resources;
848
fsl_edma->dma_dev.device_free_chan_resources
849
= fsl_edma_free_chan_resources;
850
fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
851
fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
852
fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
853
fsl_edma->dma_dev.device_prep_dma_memcpy = fsl_edma_prep_memcpy;
854
fsl_edma->dma_dev.device_config = fsl_edma_slave_config;
855
fsl_edma->dma_dev.device_pause = fsl_edma_pause;
856
fsl_edma->dma_dev.device_resume = fsl_edma_resume;
857
fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
858
fsl_edma->dma_dev.device_synchronize = fsl_edma_synchronize;
859
fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
860
861
fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
862
fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
863
864
if (drvdata->flags & FSL_EDMA_DRV_BUS_8BYTE) {
865
fsl_edma->dma_dev.src_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
866
fsl_edma->dma_dev.dst_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
867
}
868
869
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
870
if (drvdata->flags & FSL_EDMA_DRV_DEV_TO_DEV)
871
fsl_edma->dma_dev.directions |= BIT(DMA_DEV_TO_DEV);
872
873
fsl_edma->dma_dev.copy_align = drvdata->flags & FSL_EDMA_DRV_ALIGN_64BYTE ?
874
DMAENGINE_ALIGN_64_BYTES :
875
DMAENGINE_ALIGN_32_BYTES;
876
877
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
878
dma_set_max_seg_size(fsl_edma->dma_dev.dev,
879
FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
880
881
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
882
883
platform_set_drvdata(pdev, fsl_edma);
884
885
ret = dma_async_device_register(&fsl_edma->dma_dev);
886
if (ret) {
887
dev_err(&pdev->dev,
888
"Can't register Freescale eDMA engine. (%d)\n", ret);
889
return ret;
890
}
891
892
ret = of_dma_controller_register(np,
893
drvdata->dmamuxs ? fsl_edma_xlate : fsl_edma3_xlate,
894
fsl_edma);
895
if (ret) {
896
dev_err(&pdev->dev,
897
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
898
dma_async_device_unregister(&fsl_edma->dma_dev);
899
return ret;
900
}
901
902
/* enable round robin arbitration */
903
if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
904
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
905
906
return 0;
907
}
908
909
static void fsl_edma_remove(struct platform_device *pdev)
910
{
911
struct device_node *np = pdev->dev.of_node;
912
struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
913
914
fsl_edma_irq_exit(pdev, fsl_edma);
915
of_dma_controller_free(np);
916
dma_async_device_unregister(&fsl_edma->dma_dev);
917
fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
918
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
919
}
920
921
static int fsl_edma_suspend_late(struct device *dev)
922
{
923
struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
924
struct fsl_edma_chan *fsl_chan;
925
unsigned long flags;
926
int i;
927
928
for (i = 0; i < fsl_edma->n_chans; i++) {
929
fsl_chan = &fsl_edma->chans[i];
930
if (fsl_edma->chan_masked & BIT(i))
931
continue;
932
spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
933
/* Make sure chan is idle or will force disable. */
934
if (unlikely(fsl_chan->status == DMA_IN_PROGRESS)) {
935
dev_warn(dev, "WARN: There is non-idle channel.\n");
936
fsl_edma_disable_request(fsl_chan);
937
fsl_edma_chan_mux(fsl_chan, 0, false);
938
}
939
940
fsl_chan->pm_state = SUSPENDED;
941
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
942
}
943
944
return 0;
945
}
946
947
static int fsl_edma_resume_early(struct device *dev)
948
{
949
struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
950
struct fsl_edma_chan *fsl_chan;
951
struct edma_regs *regs = &fsl_edma->regs;
952
int i;
953
954
for (i = 0; i < fsl_edma->n_chans; i++) {
955
fsl_chan = &fsl_edma->chans[i];
956
if (fsl_edma->chan_masked & BIT(i))
957
continue;
958
fsl_chan->pm_state = RUNNING;
959
edma_write_tcdreg(fsl_chan, 0, csr);
960
if (fsl_chan->srcid != 0)
961
fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, true);
962
}
963
964
if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
965
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
966
967
return 0;
968
}
969
970
/*
971
* eDMA provides the service to others, so it should be suspend late
972
* and resume early. When eDMA suspend, all of the clients should stop
973
* the DMA data transmission and let the channel idle.
974
*/
975
static const struct dev_pm_ops fsl_edma_pm_ops = {
976
.suspend_late = fsl_edma_suspend_late,
977
.resume_early = fsl_edma_resume_early,
978
};
979
980
static struct platform_driver fsl_edma_driver = {
981
.driver = {
982
.name = "fsl-edma",
983
.of_match_table = fsl_edma_dt_ids,
984
.pm = &fsl_edma_pm_ops,
985
},
986
.probe = fsl_edma_probe,
987
.remove = fsl_edma_remove,
988
};
989
990
static int __init fsl_edma_init(void)
991
{
992
return platform_driver_register(&fsl_edma_driver);
993
}
994
subsys_initcall(fsl_edma_init);
995
996
static void __exit fsl_edma_exit(void)
997
{
998
platform_driver_unregister(&fsl_edma_driver);
999
}
1000
module_exit(fsl_edma_exit);
1001
1002
MODULE_ALIAS("platform:fsl-edma");
1003
MODULE_DESCRIPTION("Freescale eDMA engine driver");
1004
MODULE_LICENSE("GPL v2");
1005
1006