Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/imx-dma.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0+
2
//
3
// drivers/dma/imx-dma.c
4
//
5
// This file contains a driver for the Freescale i.MX DMA engine
6
// found on i.MX1/21/27
7
//
8
// Copyright 2010 Sascha Hauer, Pengutronix <[email protected]>
9
// Copyright 2012 Javier Martin, Vista Silicon <[email protected]>
10
11
#include <linux/err.h>
12
#include <linux/init.h>
13
#include <linux/types.h>
14
#include <linux/mm.h>
15
#include <linux/interrupt.h>
16
#include <linux/spinlock.h>
17
#include <linux/device.h>
18
#include <linux/dma-mapping.h>
19
#include <linux/slab.h>
20
#include <linux/string_choices.h>
21
#include <linux/platform_device.h>
22
#include <linux/clk.h>
23
#include <linux/dmaengine.h>
24
#include <linux/module.h>
25
#include <linux/of.h>
26
#include <linux/of_dma.h>
27
28
#include <asm/irq.h>
29
#include <linux/dma/imx-dma.h>
30
31
#include "dmaengine.h"
32
#define IMXDMA_MAX_CHAN_DESCRIPTORS 16
33
#define IMX_DMA_CHANNELS 16
34
35
#define IMX_DMA_2D_SLOTS 2
36
#define IMX_DMA_2D_SLOT_A 0
37
#define IMX_DMA_2D_SLOT_B 1
38
39
#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
40
#define IMX_DMA_MEMSIZE_32 (0 << 4)
41
#define IMX_DMA_MEMSIZE_8 (1 << 4)
42
#define IMX_DMA_MEMSIZE_16 (2 << 4)
43
#define IMX_DMA_TYPE_LINEAR (0 << 10)
44
#define IMX_DMA_TYPE_2D (1 << 10)
45
#define IMX_DMA_TYPE_FIFO (2 << 10)
46
47
#define IMX_DMA_ERR_BURST (1 << 0)
48
#define IMX_DMA_ERR_REQUEST (1 << 1)
49
#define IMX_DMA_ERR_TRANSFER (1 << 2)
50
#define IMX_DMA_ERR_BUFFER (1 << 3)
51
#define IMX_DMA_ERR_TIMEOUT (1 << 4)
52
53
#define DMA_DCR 0x00 /* Control Register */
54
#define DMA_DISR 0x04 /* Interrupt status Register */
55
#define DMA_DIMR 0x08 /* Interrupt mask Register */
56
#define DMA_DBTOSR 0x0c /* Burst timeout status Register */
57
#define DMA_DRTOSR 0x10 /* Request timeout Register */
58
#define DMA_DSESR 0x14 /* Transfer Error Status Register */
59
#define DMA_DBOSR 0x18 /* Buffer overflow status Register */
60
#define DMA_DBTOCR 0x1c /* Burst timeout control Register */
61
#define DMA_WSRA 0x40 /* W-Size Register A */
62
#define DMA_XSRA 0x44 /* X-Size Register A */
63
#define DMA_YSRA 0x48 /* Y-Size Register A */
64
#define DMA_WSRB 0x4c /* W-Size Register B */
65
#define DMA_XSRB 0x50 /* X-Size Register B */
66
#define DMA_YSRB 0x54 /* Y-Size Register B */
67
#define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */
68
#define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */
69
#define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */
70
#define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */
71
#define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */
72
#define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */
73
#define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */
74
#define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */
75
#define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */
76
77
#define DCR_DRST (1<<1)
78
#define DCR_DEN (1<<0)
79
#define DBTOCR_EN (1<<15)
80
#define DBTOCR_CNT(x) ((x) & 0x7fff)
81
#define CNTR_CNT(x) ((x) & 0xffffff)
82
#define CCR_ACRPT (1<<14)
83
#define CCR_DMOD_LINEAR (0x0 << 12)
84
#define CCR_DMOD_2D (0x1 << 12)
85
#define CCR_DMOD_FIFO (0x2 << 12)
86
#define CCR_DMOD_EOBFIFO (0x3 << 12)
87
#define CCR_SMOD_LINEAR (0x0 << 10)
88
#define CCR_SMOD_2D (0x1 << 10)
89
#define CCR_SMOD_FIFO (0x2 << 10)
90
#define CCR_SMOD_EOBFIFO (0x3 << 10)
91
#define CCR_MDIR_DEC (1<<9)
92
#define CCR_MSEL_B (1<<8)
93
#define CCR_DSIZ_32 (0x0 << 6)
94
#define CCR_DSIZ_8 (0x1 << 6)
95
#define CCR_DSIZ_16 (0x2 << 6)
96
#define CCR_SSIZ_32 (0x0 << 4)
97
#define CCR_SSIZ_8 (0x1 << 4)
98
#define CCR_SSIZ_16 (0x2 << 4)
99
#define CCR_REN (1<<3)
100
#define CCR_RPT (1<<2)
101
#define CCR_FRC (1<<1)
102
#define CCR_CEN (1<<0)
103
#define RTOR_EN (1<<15)
104
#define RTOR_CLK (1<<14)
105
#define RTOR_PSC (1<<13)
106
107
enum imxdma_prep_type {
108
IMXDMA_DESC_MEMCPY,
109
IMXDMA_DESC_INTERLEAVED,
110
IMXDMA_DESC_SLAVE_SG,
111
IMXDMA_DESC_CYCLIC,
112
};
113
114
struct imx_dma_2d_config {
115
u16 xsr;
116
u16 ysr;
117
u16 wsr;
118
int count;
119
};
120
121
struct imxdma_desc {
122
struct list_head node;
123
struct dma_async_tx_descriptor desc;
124
enum dma_status status;
125
dma_addr_t src;
126
dma_addr_t dest;
127
size_t len;
128
enum dma_transfer_direction direction;
129
enum imxdma_prep_type type;
130
/* For memcpy and interleaved */
131
unsigned int config_port;
132
unsigned int config_mem;
133
/* For interleaved transfers */
134
unsigned int x;
135
unsigned int y;
136
unsigned int w;
137
/* For slave sg and cyclic */
138
struct scatterlist *sg;
139
unsigned int sgcount;
140
};
141
142
struct imxdma_channel {
143
int hw_chaining;
144
struct timer_list watchdog;
145
struct imxdma_engine *imxdma;
146
unsigned int channel;
147
148
struct tasklet_struct dma_tasklet;
149
struct list_head ld_free;
150
struct list_head ld_queue;
151
struct list_head ld_active;
152
int descs_allocated;
153
enum dma_slave_buswidth word_size;
154
dma_addr_t per_address;
155
u32 watermark_level;
156
struct dma_chan chan;
157
struct dma_async_tx_descriptor desc;
158
enum dma_status status;
159
int dma_request;
160
struct scatterlist *sg_list;
161
u32 ccr_from_device;
162
u32 ccr_to_device;
163
bool enabled_2d;
164
int slot_2d;
165
unsigned int irq;
166
struct dma_slave_config config;
167
};
168
169
enum imx_dma_type {
170
IMX1_DMA,
171
IMX27_DMA,
172
};
173
174
struct imxdma_engine {
175
struct device *dev;
176
struct dma_device dma_device;
177
void __iomem *base;
178
struct clk *dma_ahb;
179
struct clk *dma_ipg;
180
spinlock_t lock;
181
struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS];
182
struct imxdma_channel channel[IMX_DMA_CHANNELS];
183
enum imx_dma_type devtype;
184
unsigned int irq;
185
unsigned int irq_err;
186
187
};
188
189
struct imxdma_filter_data {
190
struct imxdma_engine *imxdma;
191
int request;
192
};
193
194
static const struct of_device_id imx_dma_of_dev_id[] = {
195
{
196
.compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA,
197
}, {
198
.compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA,
199
}, {
200
/* sentinel */
201
}
202
};
203
MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id);
204
205
static inline int is_imx1_dma(struct imxdma_engine *imxdma)
206
{
207
return imxdma->devtype == IMX1_DMA;
208
}
209
210
static inline int is_imx27_dma(struct imxdma_engine *imxdma)
211
{
212
return imxdma->devtype == IMX27_DMA;
213
}
214
215
static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
216
{
217
return container_of(chan, struct imxdma_channel, chan);
218
}
219
220
static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
221
{
222
struct imxdma_desc *desc;
223
224
if (!list_empty(&imxdmac->ld_active)) {
225
desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
226
node);
227
if (desc->type == IMXDMA_DESC_CYCLIC)
228
return true;
229
}
230
return false;
231
}
232
233
234
235
static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
236
unsigned offset)
237
{
238
__raw_writel(val, imxdma->base + offset);
239
}
240
241
static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
242
{
243
return __raw_readl(imxdma->base + offset);
244
}
245
246
static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
247
{
248
struct imxdma_engine *imxdma = imxdmac->imxdma;
249
250
if (is_imx27_dma(imxdma))
251
return imxdmac->hw_chaining;
252
else
253
return 0;
254
}
255
256
/*
257
* imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
258
*/
259
static inline void imxdma_sg_next(struct imxdma_desc *d)
260
{
261
struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
262
struct imxdma_engine *imxdma = imxdmac->imxdma;
263
struct scatterlist *sg = d->sg;
264
size_t now;
265
266
now = min_t(size_t, d->len, sg_dma_len(sg));
267
if (d->len != IMX_DMA_LENGTH_LOOP)
268
d->len -= now;
269
270
if (d->direction == DMA_DEV_TO_MEM)
271
imx_dmav1_writel(imxdma, sg->dma_address,
272
DMA_DAR(imxdmac->channel));
273
else
274
imx_dmav1_writel(imxdma, sg->dma_address,
275
DMA_SAR(imxdmac->channel));
276
277
imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
278
279
dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
280
"size 0x%08x\n", __func__, imxdmac->channel,
281
imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
282
imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
283
imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
284
}
285
286
static void imxdma_enable_hw(struct imxdma_desc *d)
287
{
288
struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
289
struct imxdma_engine *imxdma = imxdmac->imxdma;
290
int channel = imxdmac->channel;
291
unsigned long flags;
292
293
dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
294
295
local_irq_save(flags);
296
297
imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
298
imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
299
~(1 << channel), DMA_DIMR);
300
imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
301
CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
302
303
if (!is_imx1_dma(imxdma) &&
304
d->sg && imxdma_hw_chain(imxdmac)) {
305
d->sg = sg_next(d->sg);
306
if (d->sg) {
307
u32 tmp;
308
imxdma_sg_next(d);
309
tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
310
imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
311
DMA_CCR(channel));
312
}
313
}
314
315
local_irq_restore(flags);
316
}
317
318
static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
319
{
320
struct imxdma_engine *imxdma = imxdmac->imxdma;
321
int channel = imxdmac->channel;
322
unsigned long flags;
323
324
dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
325
326
if (imxdma_hw_chain(imxdmac))
327
timer_delete(&imxdmac->watchdog);
328
329
local_irq_save(flags);
330
imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
331
(1 << channel), DMA_DIMR);
332
imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
333
~CCR_CEN, DMA_CCR(channel));
334
imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
335
local_irq_restore(flags);
336
}
337
338
static void imxdma_watchdog(struct timer_list *t)
339
{
340
struct imxdma_channel *imxdmac = timer_container_of(imxdmac, t,
341
watchdog);
342
struct imxdma_engine *imxdma = imxdmac->imxdma;
343
int channel = imxdmac->channel;
344
345
imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
346
347
/* Tasklet watchdog error handler */
348
tasklet_schedule(&imxdmac->dma_tasklet);
349
dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
350
imxdmac->channel);
351
}
352
353
static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
354
{
355
struct imxdma_engine *imxdma = dev_id;
356
unsigned int err_mask;
357
int i, disr;
358
int errcode;
359
360
disr = imx_dmav1_readl(imxdma, DMA_DISR);
361
362
err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
363
imx_dmav1_readl(imxdma, DMA_DRTOSR) |
364
imx_dmav1_readl(imxdma, DMA_DSESR) |
365
imx_dmav1_readl(imxdma, DMA_DBOSR);
366
367
if (!err_mask)
368
return IRQ_HANDLED;
369
370
imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
371
372
for (i = 0; i < IMX_DMA_CHANNELS; i++) {
373
if (!(err_mask & (1 << i)))
374
continue;
375
errcode = 0;
376
377
if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
378
imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
379
errcode |= IMX_DMA_ERR_BURST;
380
}
381
if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
382
imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
383
errcode |= IMX_DMA_ERR_REQUEST;
384
}
385
if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
386
imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
387
errcode |= IMX_DMA_ERR_TRANSFER;
388
}
389
if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
390
imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
391
errcode |= IMX_DMA_ERR_BUFFER;
392
}
393
/* Tasklet error handler */
394
tasklet_schedule(&imxdma->channel[i].dma_tasklet);
395
396
dev_warn(imxdma->dev,
397
"DMA timeout on channel %d -%s%s%s%s\n", i,
398
errcode & IMX_DMA_ERR_BURST ? " burst" : "",
399
errcode & IMX_DMA_ERR_REQUEST ? " request" : "",
400
errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
401
errcode & IMX_DMA_ERR_BUFFER ? " buffer" : "");
402
}
403
return IRQ_HANDLED;
404
}
405
406
static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
407
{
408
struct imxdma_engine *imxdma = imxdmac->imxdma;
409
int chno = imxdmac->channel;
410
struct imxdma_desc *desc;
411
unsigned long flags;
412
413
spin_lock_irqsave(&imxdma->lock, flags);
414
if (list_empty(&imxdmac->ld_active)) {
415
spin_unlock_irqrestore(&imxdma->lock, flags);
416
goto out;
417
}
418
419
desc = list_first_entry(&imxdmac->ld_active,
420
struct imxdma_desc,
421
node);
422
spin_unlock_irqrestore(&imxdma->lock, flags);
423
424
if (desc->sg) {
425
u32 tmp;
426
desc->sg = sg_next(desc->sg);
427
428
if (desc->sg) {
429
imxdma_sg_next(desc);
430
431
tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
432
433
if (imxdma_hw_chain(imxdmac)) {
434
/* FIXME: The timeout should probably be
435
* configurable
436
*/
437
mod_timer(&imxdmac->watchdog,
438
jiffies + msecs_to_jiffies(500));
439
440
tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
441
imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
442
} else {
443
imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
444
DMA_CCR(chno));
445
tmp |= CCR_CEN;
446
}
447
448
imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
449
450
if (imxdma_chan_is_doing_cyclic(imxdmac))
451
/* Tasklet progression */
452
tasklet_schedule(&imxdmac->dma_tasklet);
453
454
return;
455
}
456
457
if (imxdma_hw_chain(imxdmac)) {
458
timer_delete(&imxdmac->watchdog);
459
return;
460
}
461
}
462
463
out:
464
imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
465
/* Tasklet irq */
466
tasklet_schedule(&imxdmac->dma_tasklet);
467
}
468
469
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
470
{
471
struct imxdma_engine *imxdma = dev_id;
472
int i, disr;
473
474
if (!is_imx1_dma(imxdma))
475
imxdma_err_handler(irq, dev_id);
476
477
disr = imx_dmav1_readl(imxdma, DMA_DISR);
478
479
dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
480
481
imx_dmav1_writel(imxdma, disr, DMA_DISR);
482
for (i = 0; i < IMX_DMA_CHANNELS; i++) {
483
if (disr & (1 << i))
484
dma_irq_handle_channel(&imxdma->channel[i]);
485
}
486
487
return IRQ_HANDLED;
488
}
489
490
static int imxdma_xfer_desc(struct imxdma_desc *d)
491
{
492
struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
493
struct imxdma_engine *imxdma = imxdmac->imxdma;
494
int slot = -1;
495
int i;
496
497
/* Configure and enable */
498
switch (d->type) {
499
case IMXDMA_DESC_INTERLEAVED:
500
/* Try to get a free 2D slot */
501
for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
502
if ((imxdma->slots_2d[i].count > 0) &&
503
((imxdma->slots_2d[i].xsr != d->x) ||
504
(imxdma->slots_2d[i].ysr != d->y) ||
505
(imxdma->slots_2d[i].wsr != d->w)))
506
continue;
507
slot = i;
508
break;
509
}
510
if (slot < 0)
511
return -EBUSY;
512
513
imxdma->slots_2d[slot].xsr = d->x;
514
imxdma->slots_2d[slot].ysr = d->y;
515
imxdma->slots_2d[slot].wsr = d->w;
516
imxdma->slots_2d[slot].count++;
517
518
imxdmac->slot_2d = slot;
519
imxdmac->enabled_2d = true;
520
521
if (slot == IMX_DMA_2D_SLOT_A) {
522
d->config_mem &= ~CCR_MSEL_B;
523
d->config_port &= ~CCR_MSEL_B;
524
imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
525
imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
526
imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
527
} else {
528
d->config_mem |= CCR_MSEL_B;
529
d->config_port |= CCR_MSEL_B;
530
imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
531
imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
532
imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
533
}
534
/*
535
* We fall-through here intentionally, since a 2D transfer is
536
* similar to MEMCPY just adding the 2D slot configuration.
537
*/
538
fallthrough;
539
case IMXDMA_DESC_MEMCPY:
540
imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
541
imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
542
imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
543
DMA_CCR(imxdmac->channel));
544
545
imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
546
547
dev_dbg(imxdma->dev,
548
"%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu\n",
549
__func__, imxdmac->channel,
550
(unsigned long long)d->dest,
551
(unsigned long long)d->src, d->len);
552
553
break;
554
/* Cyclic transfer is the same as slave_sg with special sg configuration. */
555
case IMXDMA_DESC_CYCLIC:
556
case IMXDMA_DESC_SLAVE_SG:
557
if (d->direction == DMA_DEV_TO_MEM) {
558
imx_dmav1_writel(imxdma, imxdmac->per_address,
559
DMA_SAR(imxdmac->channel));
560
imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
561
DMA_CCR(imxdmac->channel));
562
563
dev_dbg(imxdma->dev,
564
"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem)\n",
565
__func__, imxdmac->channel,
566
d->sg, d->sgcount, d->len,
567
(unsigned long long)imxdmac->per_address);
568
} else if (d->direction == DMA_MEM_TO_DEV) {
569
imx_dmav1_writel(imxdma, imxdmac->per_address,
570
DMA_DAR(imxdmac->channel));
571
imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
572
DMA_CCR(imxdmac->channel));
573
574
dev_dbg(imxdma->dev,
575
"%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev)\n",
576
__func__, imxdmac->channel,
577
d->sg, d->sgcount, d->len,
578
(unsigned long long)imxdmac->per_address);
579
} else {
580
dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
581
__func__, imxdmac->channel);
582
return -EINVAL;
583
}
584
585
imxdma_sg_next(d);
586
587
break;
588
default:
589
return -EINVAL;
590
}
591
imxdma_enable_hw(d);
592
return 0;
593
}
594
595
static void imxdma_tasklet(struct tasklet_struct *t)
596
{
597
struct imxdma_channel *imxdmac = from_tasklet(imxdmac, t, dma_tasklet);
598
struct imxdma_engine *imxdma = imxdmac->imxdma;
599
struct imxdma_desc *desc, *next_desc;
600
unsigned long flags;
601
602
spin_lock_irqsave(&imxdma->lock, flags);
603
604
if (list_empty(&imxdmac->ld_active)) {
605
/* Someone might have called terminate all */
606
spin_unlock_irqrestore(&imxdma->lock, flags);
607
return;
608
}
609
desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
610
611
/* If we are dealing with a cyclic descriptor, keep it on ld_active
612
* and dont mark the descriptor as complete.
613
* Only in non-cyclic cases it would be marked as complete
614
*/
615
if (imxdma_chan_is_doing_cyclic(imxdmac))
616
goto out;
617
else
618
dma_cookie_complete(&desc->desc);
619
620
/* Free 2D slot if it was an interleaved transfer */
621
if (imxdmac->enabled_2d) {
622
imxdma->slots_2d[imxdmac->slot_2d].count--;
623
imxdmac->enabled_2d = false;
624
}
625
626
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
627
628
if (!list_empty(&imxdmac->ld_queue)) {
629
next_desc = list_first_entry(&imxdmac->ld_queue,
630
struct imxdma_desc, node);
631
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
632
if (imxdma_xfer_desc(next_desc) < 0)
633
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
634
__func__, imxdmac->channel);
635
}
636
out:
637
spin_unlock_irqrestore(&imxdma->lock, flags);
638
639
dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
640
}
641
642
static int imxdma_terminate_all(struct dma_chan *chan)
643
{
644
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
645
struct imxdma_engine *imxdma = imxdmac->imxdma;
646
unsigned long flags;
647
648
imxdma_disable_hw(imxdmac);
649
650
spin_lock_irqsave(&imxdma->lock, flags);
651
list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
652
list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
653
spin_unlock_irqrestore(&imxdma->lock, flags);
654
return 0;
655
}
656
657
static int imxdma_config_write(struct dma_chan *chan,
658
struct dma_slave_config *dmaengine_cfg,
659
enum dma_transfer_direction direction)
660
{
661
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
662
struct imxdma_engine *imxdma = imxdmac->imxdma;
663
unsigned int mode = 0;
664
665
if (direction == DMA_DEV_TO_MEM) {
666
imxdmac->per_address = dmaengine_cfg->src_addr;
667
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
668
imxdmac->word_size = dmaengine_cfg->src_addr_width;
669
} else {
670
imxdmac->per_address = dmaengine_cfg->dst_addr;
671
imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
672
imxdmac->word_size = dmaengine_cfg->dst_addr_width;
673
}
674
675
switch (imxdmac->word_size) {
676
case DMA_SLAVE_BUSWIDTH_1_BYTE:
677
mode = IMX_DMA_MEMSIZE_8;
678
break;
679
case DMA_SLAVE_BUSWIDTH_2_BYTES:
680
mode = IMX_DMA_MEMSIZE_16;
681
break;
682
default:
683
case DMA_SLAVE_BUSWIDTH_4_BYTES:
684
mode = IMX_DMA_MEMSIZE_32;
685
break;
686
}
687
688
imxdmac->hw_chaining = 0;
689
690
imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
691
((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
692
CCR_REN;
693
imxdmac->ccr_to_device =
694
(IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
695
((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
696
imx_dmav1_writel(imxdma, imxdmac->dma_request,
697
DMA_RSSR(imxdmac->channel));
698
699
/* Set burst length */
700
imx_dmav1_writel(imxdma, imxdmac->watermark_level *
701
imxdmac->word_size, DMA_BLR(imxdmac->channel));
702
703
return 0;
704
}
705
706
static int imxdma_config(struct dma_chan *chan,
707
struct dma_slave_config *dmaengine_cfg)
708
{
709
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
710
711
memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
712
713
return 0;
714
}
715
716
static enum dma_status imxdma_tx_status(struct dma_chan *chan,
717
dma_cookie_t cookie,
718
struct dma_tx_state *txstate)
719
{
720
return dma_cookie_status(chan, cookie, txstate);
721
}
722
723
static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
724
{
725
struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
726
struct imxdma_engine *imxdma = imxdmac->imxdma;
727
dma_cookie_t cookie;
728
unsigned long flags;
729
730
spin_lock_irqsave(&imxdma->lock, flags);
731
list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
732
cookie = dma_cookie_assign(tx);
733
spin_unlock_irqrestore(&imxdma->lock, flags);
734
735
return cookie;
736
}
737
738
static int imxdma_alloc_chan_resources(struct dma_chan *chan)
739
{
740
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
741
struct imx_dma_data *data = chan->private;
742
743
if (data != NULL)
744
imxdmac->dma_request = data->dma_request;
745
746
while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
747
struct imxdma_desc *desc;
748
749
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
750
if (!desc)
751
break;
752
dma_async_tx_descriptor_init(&desc->desc, chan);
753
desc->desc.tx_submit = imxdma_tx_submit;
754
/* txd.flags will be overwritten in prep funcs */
755
desc->desc.flags = DMA_CTRL_ACK;
756
desc->status = DMA_COMPLETE;
757
758
list_add_tail(&desc->node, &imxdmac->ld_free);
759
imxdmac->descs_allocated++;
760
}
761
762
if (!imxdmac->descs_allocated)
763
return -ENOMEM;
764
765
return imxdmac->descs_allocated;
766
}
767
768
static void imxdma_free_chan_resources(struct dma_chan *chan)
769
{
770
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
771
struct imxdma_engine *imxdma = imxdmac->imxdma;
772
struct imxdma_desc *desc, *_desc;
773
unsigned long flags;
774
775
spin_lock_irqsave(&imxdma->lock, flags);
776
777
imxdma_disable_hw(imxdmac);
778
list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
779
list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
780
781
spin_unlock_irqrestore(&imxdma->lock, flags);
782
783
list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
784
kfree(desc);
785
imxdmac->descs_allocated--;
786
}
787
INIT_LIST_HEAD(&imxdmac->ld_free);
788
789
kfree(imxdmac->sg_list);
790
imxdmac->sg_list = NULL;
791
}
792
793
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
794
struct dma_chan *chan, struct scatterlist *sgl,
795
unsigned int sg_len, enum dma_transfer_direction direction,
796
unsigned long flags, void *context)
797
{
798
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
799
struct scatterlist *sg;
800
int i, dma_length = 0;
801
struct imxdma_desc *desc;
802
803
if (list_empty(&imxdmac->ld_free) ||
804
imxdma_chan_is_doing_cyclic(imxdmac))
805
return NULL;
806
807
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
808
809
for_each_sg(sgl, sg, sg_len, i) {
810
dma_length += sg_dma_len(sg);
811
}
812
813
imxdma_config_write(chan, &imxdmac->config, direction);
814
815
switch (imxdmac->word_size) {
816
case DMA_SLAVE_BUSWIDTH_4_BYTES:
817
if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
818
return NULL;
819
break;
820
case DMA_SLAVE_BUSWIDTH_2_BYTES:
821
if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
822
return NULL;
823
break;
824
case DMA_SLAVE_BUSWIDTH_1_BYTE:
825
break;
826
default:
827
return NULL;
828
}
829
830
desc->type = IMXDMA_DESC_SLAVE_SG;
831
desc->sg = sgl;
832
desc->sgcount = sg_len;
833
desc->len = dma_length;
834
desc->direction = direction;
835
if (direction == DMA_DEV_TO_MEM) {
836
desc->src = imxdmac->per_address;
837
} else {
838
desc->dest = imxdmac->per_address;
839
}
840
desc->desc.callback = NULL;
841
desc->desc.callback_param = NULL;
842
843
return &desc->desc;
844
}
845
846
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
847
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
848
size_t period_len, enum dma_transfer_direction direction,
849
unsigned long flags)
850
{
851
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
852
struct imxdma_engine *imxdma = imxdmac->imxdma;
853
struct imxdma_desc *desc;
854
int i;
855
unsigned int periods = buf_len / period_len;
856
857
dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu\n",
858
__func__, imxdmac->channel, buf_len, period_len);
859
860
if (list_empty(&imxdmac->ld_free) ||
861
imxdma_chan_is_doing_cyclic(imxdmac))
862
return NULL;
863
864
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
865
866
kfree(imxdmac->sg_list);
867
868
imxdmac->sg_list = kcalloc(periods + 1,
869
sizeof(struct scatterlist), GFP_ATOMIC);
870
if (!imxdmac->sg_list)
871
return NULL;
872
873
sg_init_table(imxdmac->sg_list, periods);
874
875
for (i = 0; i < periods; i++) {
876
sg_assign_page(&imxdmac->sg_list[i], NULL);
877
imxdmac->sg_list[i].offset = 0;
878
imxdmac->sg_list[i].dma_address = dma_addr;
879
sg_dma_len(&imxdmac->sg_list[i]) = period_len;
880
dma_addr += period_len;
881
}
882
883
/* close the loop */
884
sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list);
885
886
desc->type = IMXDMA_DESC_CYCLIC;
887
desc->sg = imxdmac->sg_list;
888
desc->sgcount = periods;
889
desc->len = IMX_DMA_LENGTH_LOOP;
890
desc->direction = direction;
891
if (direction == DMA_DEV_TO_MEM) {
892
desc->src = imxdmac->per_address;
893
} else {
894
desc->dest = imxdmac->per_address;
895
}
896
desc->desc.callback = NULL;
897
desc->desc.callback_param = NULL;
898
899
imxdma_config_write(chan, &imxdmac->config, direction);
900
901
return &desc->desc;
902
}
903
904
static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
905
struct dma_chan *chan, dma_addr_t dest,
906
dma_addr_t src, size_t len, unsigned long flags)
907
{
908
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
909
struct imxdma_engine *imxdma = imxdmac->imxdma;
910
struct imxdma_desc *desc;
911
912
dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu\n",
913
__func__, imxdmac->channel, (unsigned long long)src,
914
(unsigned long long)dest, len);
915
916
if (list_empty(&imxdmac->ld_free) ||
917
imxdma_chan_is_doing_cyclic(imxdmac))
918
return NULL;
919
920
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
921
922
desc->type = IMXDMA_DESC_MEMCPY;
923
desc->src = src;
924
desc->dest = dest;
925
desc->len = len;
926
desc->direction = DMA_MEM_TO_MEM;
927
desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
928
desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
929
desc->desc.callback = NULL;
930
desc->desc.callback_param = NULL;
931
932
return &desc->desc;
933
}
934
935
static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
936
struct dma_chan *chan, struct dma_interleaved_template *xt,
937
unsigned long flags)
938
{
939
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
940
struct imxdma_engine *imxdma = imxdmac->imxdma;
941
struct imxdma_desc *desc;
942
943
dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx\n"
944
" src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu\n", __func__,
945
imxdmac->channel, (unsigned long long)xt->src_start,
946
(unsigned long long) xt->dst_start,
947
str_true_false(xt->src_sgl), str_true_false(xt->dst_sgl),
948
xt->numf, xt->frame_size);
949
950
if (list_empty(&imxdmac->ld_free) ||
951
imxdma_chan_is_doing_cyclic(imxdmac))
952
return NULL;
953
954
if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
955
return NULL;
956
957
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
958
959
desc->type = IMXDMA_DESC_INTERLEAVED;
960
desc->src = xt->src_start;
961
desc->dest = xt->dst_start;
962
desc->x = xt->sgl[0].size;
963
desc->y = xt->numf;
964
desc->w = xt->sgl[0].icg + desc->x;
965
desc->len = desc->x * desc->y;
966
desc->direction = DMA_MEM_TO_MEM;
967
desc->config_port = IMX_DMA_MEMSIZE_32;
968
desc->config_mem = IMX_DMA_MEMSIZE_32;
969
if (xt->src_sgl)
970
desc->config_mem |= IMX_DMA_TYPE_2D;
971
if (xt->dst_sgl)
972
desc->config_port |= IMX_DMA_TYPE_2D;
973
desc->desc.callback = NULL;
974
desc->desc.callback_param = NULL;
975
976
return &desc->desc;
977
}
978
979
static void imxdma_issue_pending(struct dma_chan *chan)
980
{
981
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
982
struct imxdma_engine *imxdma = imxdmac->imxdma;
983
struct imxdma_desc *desc;
984
unsigned long flags;
985
986
spin_lock_irqsave(&imxdma->lock, flags);
987
if (list_empty(&imxdmac->ld_active) &&
988
!list_empty(&imxdmac->ld_queue)) {
989
desc = list_first_entry(&imxdmac->ld_queue,
990
struct imxdma_desc, node);
991
992
if (imxdma_xfer_desc(desc) < 0) {
993
dev_warn(imxdma->dev,
994
"%s: channel: %d couldn't issue DMA xfer\n",
995
__func__, imxdmac->channel);
996
} else {
997
list_move_tail(imxdmac->ld_queue.next,
998
&imxdmac->ld_active);
999
}
1000
}
1001
spin_unlock_irqrestore(&imxdma->lock, flags);
1002
}
1003
1004
static bool imxdma_filter_fn(struct dma_chan *chan, void *param)
1005
{
1006
struct imxdma_filter_data *fdata = param;
1007
struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan);
1008
1009
if (chan->device->dev != fdata->imxdma->dev)
1010
return false;
1011
1012
imxdma_chan->dma_request = fdata->request;
1013
chan->private = NULL;
1014
1015
return true;
1016
}
1017
1018
static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
1019
struct of_dma *ofdma)
1020
{
1021
int count = dma_spec->args_count;
1022
struct imxdma_engine *imxdma = ofdma->of_dma_data;
1023
struct imxdma_filter_data fdata = {
1024
.imxdma = imxdma,
1025
};
1026
1027
if (count != 1)
1028
return NULL;
1029
1030
fdata.request = dma_spec->args[0];
1031
1032
return dma_request_channel(imxdma->dma_device.cap_mask,
1033
imxdma_filter_fn, &fdata);
1034
}
1035
1036
static int __init imxdma_probe(struct platform_device *pdev)
1037
{
1038
struct imxdma_engine *imxdma;
1039
int ret, i;
1040
int irq, irq_err;
1041
1042
imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1043
if (!imxdma)
1044
return -ENOMEM;
1045
1046
imxdma->dev = &pdev->dev;
1047
imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
1048
1049
imxdma->base = devm_platform_ioremap_resource(pdev, 0);
1050
if (IS_ERR(imxdma->base))
1051
return PTR_ERR(imxdma->base);
1052
1053
irq = platform_get_irq(pdev, 0);
1054
if (irq < 0)
1055
return irq;
1056
1057
imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1058
if (IS_ERR(imxdma->dma_ipg))
1059
return PTR_ERR(imxdma->dma_ipg);
1060
1061
imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1062
if (IS_ERR(imxdma->dma_ahb))
1063
return PTR_ERR(imxdma->dma_ahb);
1064
1065
ret = clk_prepare_enable(imxdma->dma_ipg);
1066
if (ret)
1067
return ret;
1068
ret = clk_prepare_enable(imxdma->dma_ahb);
1069
if (ret)
1070
goto disable_dma_ipg_clk;
1071
1072
/* reset DMA module */
1073
imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1074
1075
if (is_imx1_dma(imxdma)) {
1076
ret = devm_request_irq(&pdev->dev, irq,
1077
dma_irq_handler, 0, "DMA", imxdma);
1078
if (ret) {
1079
dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1080
goto disable_dma_ahb_clk;
1081
}
1082
imxdma->irq = irq;
1083
1084
irq_err = platform_get_irq(pdev, 1);
1085
if (irq_err < 0) {
1086
ret = irq_err;
1087
goto disable_dma_ahb_clk;
1088
}
1089
1090
ret = devm_request_irq(&pdev->dev, irq_err,
1091
imxdma_err_handler, 0, "DMA", imxdma);
1092
if (ret) {
1093
dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1094
goto disable_dma_ahb_clk;
1095
}
1096
imxdma->irq_err = irq_err;
1097
}
1098
1099
/* enable DMA module */
1100
imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1101
1102
/* clear all interrupts */
1103
imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1104
1105
/* disable interrupts */
1106
imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1107
1108
INIT_LIST_HEAD(&imxdma->dma_device.channels);
1109
1110
dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
1111
dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1112
dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1113
dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);
1114
1115
/* Initialize 2D global parameters */
1116
for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
1117
imxdma->slots_2d[i].count = 0;
1118
1119
spin_lock_init(&imxdma->lock);
1120
1121
/* Initialize channel parameters */
1122
for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1123
struct imxdma_channel *imxdmac = &imxdma->channel[i];
1124
1125
if (!is_imx1_dma(imxdma)) {
1126
ret = devm_request_irq(&pdev->dev, irq + i,
1127
dma_irq_handler, 0, "DMA", imxdma);
1128
if (ret) {
1129
dev_warn(imxdma->dev, "Can't register IRQ %d "
1130
"for DMA channel %d\n",
1131
irq + i, i);
1132
goto disable_dma_ahb_clk;
1133
}
1134
1135
imxdmac->irq = irq + i;
1136
timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0);
1137
}
1138
1139
imxdmac->imxdma = imxdma;
1140
1141
INIT_LIST_HEAD(&imxdmac->ld_queue);
1142
INIT_LIST_HEAD(&imxdmac->ld_free);
1143
INIT_LIST_HEAD(&imxdmac->ld_active);
1144
1145
tasklet_setup(&imxdmac->dma_tasklet, imxdma_tasklet);
1146
imxdmac->chan.device = &imxdma->dma_device;
1147
dma_cookie_init(&imxdmac->chan);
1148
imxdmac->channel = i;
1149
1150
/* Add the channel to the DMAC list */
1151
list_add_tail(&imxdmac->chan.device_node,
1152
&imxdma->dma_device.channels);
1153
}
1154
1155
imxdma->dma_device.dev = &pdev->dev;
1156
1157
imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
1158
imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
1159
imxdma->dma_device.device_tx_status = imxdma_tx_status;
1160
imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
1161
imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1162
imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1163
imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1164
imxdma->dma_device.device_config = imxdma_config;
1165
imxdma->dma_device.device_terminate_all = imxdma_terminate_all;
1166
imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
1167
1168
platform_set_drvdata(pdev, imxdma);
1169
1170
imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
1171
dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
1172
1173
ret = dma_async_device_register(&imxdma->dma_device);
1174
if (ret) {
1175
dev_err(&pdev->dev, "unable to register\n");
1176
goto disable_dma_ahb_clk;
1177
}
1178
1179
if (pdev->dev.of_node) {
1180
ret = of_dma_controller_register(pdev->dev.of_node,
1181
imxdma_xlate, imxdma);
1182
if (ret) {
1183
dev_err(&pdev->dev, "unable to register of_dma_controller\n");
1184
goto err_of_dma_controller;
1185
}
1186
}
1187
1188
return 0;
1189
1190
err_of_dma_controller:
1191
dma_async_device_unregister(&imxdma->dma_device);
1192
disable_dma_ahb_clk:
1193
clk_disable_unprepare(imxdma->dma_ahb);
1194
disable_dma_ipg_clk:
1195
clk_disable_unprepare(imxdma->dma_ipg);
1196
return ret;
1197
}
1198
1199
static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma)
1200
{
1201
int i;
1202
1203
if (is_imx1_dma(imxdma)) {
1204
disable_irq(imxdma->irq);
1205
disable_irq(imxdma->irq_err);
1206
}
1207
1208
for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1209
struct imxdma_channel *imxdmac = &imxdma->channel[i];
1210
1211
if (!is_imx1_dma(imxdma))
1212
disable_irq(imxdmac->irq);
1213
1214
tasklet_kill(&imxdmac->dma_tasklet);
1215
}
1216
}
1217
1218
static void imxdma_remove(struct platform_device *pdev)
1219
{
1220
struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
1221
1222
imxdma_free_irq(pdev, imxdma);
1223
1224
dma_async_device_unregister(&imxdma->dma_device);
1225
1226
if (pdev->dev.of_node)
1227
of_dma_controller_free(pdev->dev.of_node);
1228
1229
clk_disable_unprepare(imxdma->dma_ipg);
1230
clk_disable_unprepare(imxdma->dma_ahb);
1231
}
1232
1233
static struct platform_driver imxdma_driver = {
1234
.driver = {
1235
.name = "imx-dma",
1236
.of_match_table = imx_dma_of_dev_id,
1237
},
1238
.remove = imxdma_remove,
1239
};
1240
1241
static int __init imxdma_module_init(void)
1242
{
1243
return platform_driver_probe(&imxdma_driver, imxdma_probe);
1244
}
1245
subsys_initcall(imxdma_module_init);
1246
1247
MODULE_AUTHOR("Sascha Hauer, Pengutronix <[email protected]>");
1248
MODULE_DESCRIPTION("i.MX dma driver");
1249
MODULE_LICENSE("GPL");
1250
1251