Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/dma/fsldma.c
15109 views
1
/*
2
* Freescale MPC85xx, MPC83xx DMA Engine support
3
*
4
* Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5
*
6
* Author:
7
* Zhang Wei <[email protected]>, Jul 2007
8
* Ebony Zhu <[email protected]>, May 2007
9
*
10
* Description:
11
* DMA engine driver for Freescale MPC8540 DMA controller, which is
12
* also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13
* The support for MPC8349 DMA controller is also added.
14
*
15
* This driver instructs the DMA controller to issue the PCI Read Multiple
16
* command for PCI read operations, instead of using the default PCI Read Line
17
* command. Please be aware that this setting may result in read pre-fetching
18
* on some platforms.
19
*
20
* This is free software; you can redistribute it and/or modify
21
* it under the terms of the GNU General Public License as published by
22
* the Free Software Foundation; either version 2 of the License, or
23
* (at your option) any later version.
24
*
25
*/
26
27
#include <linux/init.h>
28
#include <linux/module.h>
29
#include <linux/pci.h>
30
#include <linux/slab.h>
31
#include <linux/interrupt.h>
32
#include <linux/dmaengine.h>
33
#include <linux/delay.h>
34
#include <linux/dma-mapping.h>
35
#include <linux/dmapool.h>
36
#include <linux/of_platform.h>
37
38
#include "fsldma.h"
39
40
#define chan_dbg(chan, fmt, arg...) \
41
dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
42
#define chan_err(chan, fmt, arg...) \
43
dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
44
45
static const char msg_ld_oom[] = "No free memory for link descriptor";
46
47
/*
48
* Register Helpers
49
*/
50
51
static void set_sr(struct fsldma_chan *chan, u32 val)
52
{
53
DMA_OUT(chan, &chan->regs->sr, val, 32);
54
}
55
56
static u32 get_sr(struct fsldma_chan *chan)
57
{
58
return DMA_IN(chan, &chan->regs->sr, 32);
59
}
60
61
static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
62
{
63
DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
64
}
65
66
static dma_addr_t get_cdar(struct fsldma_chan *chan)
67
{
68
return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
69
}
70
71
static u32 get_bcr(struct fsldma_chan *chan)
72
{
73
return DMA_IN(chan, &chan->regs->bcr, 32);
74
}
75
76
/*
77
* Descriptor Helpers
78
*/
79
80
static void set_desc_cnt(struct fsldma_chan *chan,
81
struct fsl_dma_ld_hw *hw, u32 count)
82
{
83
hw->count = CPU_TO_DMA(chan, count, 32);
84
}
85
86
static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
87
{
88
return DMA_TO_CPU(chan, desc->hw.count, 32);
89
}
90
91
static void set_desc_src(struct fsldma_chan *chan,
92
struct fsl_dma_ld_hw *hw, dma_addr_t src)
93
{
94
u64 snoop_bits;
95
96
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
97
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
98
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
99
}
100
101
static dma_addr_t get_desc_src(struct fsldma_chan *chan,
102
struct fsl_desc_sw *desc)
103
{
104
u64 snoop_bits;
105
106
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
107
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
108
return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
109
}
110
111
static void set_desc_dst(struct fsldma_chan *chan,
112
struct fsl_dma_ld_hw *hw, dma_addr_t dst)
113
{
114
u64 snoop_bits;
115
116
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
117
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
118
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
119
}
120
121
static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
122
struct fsl_desc_sw *desc)
123
{
124
u64 snoop_bits;
125
126
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
127
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
128
return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
129
}
130
131
static void set_desc_next(struct fsldma_chan *chan,
132
struct fsl_dma_ld_hw *hw, dma_addr_t next)
133
{
134
u64 snoop_bits;
135
136
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
137
? FSL_DMA_SNEN : 0;
138
hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
139
}
140
141
static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
142
{
143
u64 snoop_bits;
144
145
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
146
? FSL_DMA_SNEN : 0;
147
148
desc->hw.next_ln_addr = CPU_TO_DMA(chan,
149
DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
150
| snoop_bits, 64);
151
}
152
153
/*
154
* DMA Engine Hardware Control Helpers
155
*/
156
157
static void dma_init(struct fsldma_chan *chan)
158
{
159
/* Reset the channel */
160
DMA_OUT(chan, &chan->regs->mr, 0, 32);
161
162
switch (chan->feature & FSL_DMA_IP_MASK) {
163
case FSL_DMA_IP_85XX:
164
/* Set the channel to below modes:
165
* EIE - Error interrupt enable
166
* EOLNIE - End of links interrupt enable
167
* BWC - Bandwidth sharing among channels
168
*/
169
DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
170
| FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
171
break;
172
case FSL_DMA_IP_83XX:
173
/* Set the channel to below modes:
174
* EOTIE - End-of-transfer interrupt enable
175
* PRC_RM - PCI read multiple
176
*/
177
DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
178
| FSL_DMA_MR_PRC_RM, 32);
179
break;
180
}
181
}
182
183
static int dma_is_idle(struct fsldma_chan *chan)
184
{
185
u32 sr = get_sr(chan);
186
return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
187
}
188
189
/*
190
* Start the DMA controller
191
*
192
* Preconditions:
193
* - the CDAR register must point to the start descriptor
194
* - the MRn[CS] bit must be cleared
195
*/
196
static void dma_start(struct fsldma_chan *chan)
197
{
198
u32 mode;
199
200
mode = DMA_IN(chan, &chan->regs->mr, 32);
201
202
if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
203
DMA_OUT(chan, &chan->regs->bcr, 0, 32);
204
mode |= FSL_DMA_MR_EMP_EN;
205
} else {
206
mode &= ~FSL_DMA_MR_EMP_EN;
207
}
208
209
if (chan->feature & FSL_DMA_CHAN_START_EXT) {
210
mode |= FSL_DMA_MR_EMS_EN;
211
} else {
212
mode &= ~FSL_DMA_MR_EMS_EN;
213
mode |= FSL_DMA_MR_CS;
214
}
215
216
DMA_OUT(chan, &chan->regs->mr, mode, 32);
217
}
218
219
static void dma_halt(struct fsldma_chan *chan)
220
{
221
u32 mode;
222
int i;
223
224
/* read the mode register */
225
mode = DMA_IN(chan, &chan->regs->mr, 32);
226
227
/*
228
* The 85xx controller supports channel abort, which will stop
229
* the current transfer. On 83xx, this bit is the transfer error
230
* mask bit, which should not be changed.
231
*/
232
if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
233
mode |= FSL_DMA_MR_CA;
234
DMA_OUT(chan, &chan->regs->mr, mode, 32);
235
236
mode &= ~FSL_DMA_MR_CA;
237
}
238
239
/* stop the DMA controller */
240
mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
241
DMA_OUT(chan, &chan->regs->mr, mode, 32);
242
243
/* wait for the DMA controller to become idle */
244
for (i = 0; i < 100; i++) {
245
if (dma_is_idle(chan))
246
return;
247
248
udelay(10);
249
}
250
251
if (!dma_is_idle(chan))
252
chan_err(chan, "DMA halt timeout!\n");
253
}
254
255
/**
256
* fsl_chan_set_src_loop_size - Set source address hold transfer size
257
* @chan : Freescale DMA channel
258
* @size : Address loop size, 0 for disable loop
259
*
260
* The set source address hold transfer size. The source
261
* address hold or loop transfer size is when the DMA transfer
262
* data from source address (SA), if the loop size is 4, the DMA will
263
* read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
264
* SA + 1 ... and so on.
265
*/
266
static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
267
{
268
u32 mode;
269
270
mode = DMA_IN(chan, &chan->regs->mr, 32);
271
272
switch (size) {
273
case 0:
274
mode &= ~FSL_DMA_MR_SAHE;
275
break;
276
case 1:
277
case 2:
278
case 4:
279
case 8:
280
mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
281
break;
282
}
283
284
DMA_OUT(chan, &chan->regs->mr, mode, 32);
285
}
286
287
/**
288
* fsl_chan_set_dst_loop_size - Set destination address hold transfer size
289
* @chan : Freescale DMA channel
290
* @size : Address loop size, 0 for disable loop
291
*
292
* The set destination address hold transfer size. The destination
293
* address hold or loop transfer size is when the DMA transfer
294
* data to destination address (TA), if the loop size is 4, the DMA will
295
* write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
296
* TA + 1 ... and so on.
297
*/
298
static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
299
{
300
u32 mode;
301
302
mode = DMA_IN(chan, &chan->regs->mr, 32);
303
304
switch (size) {
305
case 0:
306
mode &= ~FSL_DMA_MR_DAHE;
307
break;
308
case 1:
309
case 2:
310
case 4:
311
case 8:
312
mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
313
break;
314
}
315
316
DMA_OUT(chan, &chan->regs->mr, mode, 32);
317
}
318
319
/**
320
* fsl_chan_set_request_count - Set DMA Request Count for external control
321
* @chan : Freescale DMA channel
322
* @size : Number of bytes to transfer in a single request
323
*
324
* The Freescale DMA channel can be controlled by the external signal DREQ#.
325
* The DMA request count is how many bytes are allowed to transfer before
326
* pausing the channel, after which a new assertion of DREQ# resumes channel
327
* operation.
328
*
329
* A size of 0 disables external pause control. The maximum size is 1024.
330
*/
331
static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
332
{
333
u32 mode;
334
335
BUG_ON(size > 1024);
336
337
mode = DMA_IN(chan, &chan->regs->mr, 32);
338
mode |= (__ilog2(size) << 24) & 0x0f000000;
339
340
DMA_OUT(chan, &chan->regs->mr, mode, 32);
341
}
342
343
/**
344
* fsl_chan_toggle_ext_pause - Toggle channel external pause status
345
* @chan : Freescale DMA channel
346
* @enable : 0 is disabled, 1 is enabled.
347
*
348
* The Freescale DMA channel can be controlled by the external signal DREQ#.
349
* The DMA Request Count feature should be used in addition to this feature
350
* to set the number of bytes to transfer before pausing the channel.
351
*/
352
static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
353
{
354
if (enable)
355
chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
356
else
357
chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
358
}
359
360
/**
361
* fsl_chan_toggle_ext_start - Toggle channel external start status
362
* @chan : Freescale DMA channel
363
* @enable : 0 is disabled, 1 is enabled.
364
*
365
* If enable the external start, the channel can be started by an
366
* external DMA start pin. So the dma_start() does not start the
367
* transfer immediately. The DMA channel will wait for the
368
* control pin asserted.
369
*/
370
static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
371
{
372
if (enable)
373
chan->feature |= FSL_DMA_CHAN_START_EXT;
374
else
375
chan->feature &= ~FSL_DMA_CHAN_START_EXT;
376
}
377
378
static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
379
{
380
struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
381
382
if (list_empty(&chan->ld_pending))
383
goto out_splice;
384
385
/*
386
* Add the hardware descriptor to the chain of hardware descriptors
387
* that already exists in memory.
388
*
389
* This will un-set the EOL bit of the existing transaction, and the
390
* last link in this transaction will become the EOL descriptor.
391
*/
392
set_desc_next(chan, &tail->hw, desc->async_tx.phys);
393
394
/*
395
* Add the software descriptor and all children to the list
396
* of pending transactions
397
*/
398
out_splice:
399
list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
400
}
401
402
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
403
{
404
struct fsldma_chan *chan = to_fsl_chan(tx->chan);
405
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
406
struct fsl_desc_sw *child;
407
unsigned long flags;
408
dma_cookie_t cookie;
409
410
spin_lock_irqsave(&chan->desc_lock, flags);
411
412
/*
413
* assign cookies to all of the software descriptors
414
* that make up this transaction
415
*/
416
cookie = chan->common.cookie;
417
list_for_each_entry(child, &desc->tx_list, node) {
418
cookie++;
419
if (cookie < DMA_MIN_COOKIE)
420
cookie = DMA_MIN_COOKIE;
421
422
child->async_tx.cookie = cookie;
423
}
424
425
chan->common.cookie = cookie;
426
427
/* put this transaction onto the tail of the pending queue */
428
append_ld_queue(chan, desc);
429
430
spin_unlock_irqrestore(&chan->desc_lock, flags);
431
432
return cookie;
433
}
434
435
/**
436
* fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
437
* @chan : Freescale DMA channel
438
*
439
* Return - The descriptor allocated. NULL for failed.
440
*/
441
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
442
{
443
struct fsl_desc_sw *desc;
444
dma_addr_t pdesc;
445
446
desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
447
if (!desc) {
448
chan_dbg(chan, "out of memory for link descriptor\n");
449
return NULL;
450
}
451
452
memset(desc, 0, sizeof(*desc));
453
INIT_LIST_HEAD(&desc->tx_list);
454
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
455
desc->async_tx.tx_submit = fsl_dma_tx_submit;
456
desc->async_tx.phys = pdesc;
457
458
#ifdef FSL_DMA_LD_DEBUG
459
chan_dbg(chan, "LD %p allocated\n", desc);
460
#endif
461
462
return desc;
463
}
464
465
/**
466
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
467
* @chan : Freescale DMA channel
468
*
469
* This function will create a dma pool for descriptor allocation.
470
*
471
* Return - The number of descriptors allocated.
472
*/
473
static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
474
{
475
struct fsldma_chan *chan = to_fsl_chan(dchan);
476
477
/* Has this channel already been allocated? */
478
if (chan->desc_pool)
479
return 1;
480
481
/*
482
* We need the descriptor to be aligned to 32bytes
483
* for meeting FSL DMA specification requirement.
484
*/
485
chan->desc_pool = dma_pool_create(chan->name, chan->dev,
486
sizeof(struct fsl_desc_sw),
487
__alignof__(struct fsl_desc_sw), 0);
488
if (!chan->desc_pool) {
489
chan_err(chan, "unable to allocate descriptor pool\n");
490
return -ENOMEM;
491
}
492
493
/* there is at least one descriptor free to be allocated */
494
return 1;
495
}
496
497
/**
498
* fsldma_free_desc_list - Free all descriptors in a queue
499
* @chan: Freescae DMA channel
500
* @list: the list to free
501
*
502
* LOCKING: must hold chan->desc_lock
503
*/
504
static void fsldma_free_desc_list(struct fsldma_chan *chan,
505
struct list_head *list)
506
{
507
struct fsl_desc_sw *desc, *_desc;
508
509
list_for_each_entry_safe(desc, _desc, list, node) {
510
list_del(&desc->node);
511
#ifdef FSL_DMA_LD_DEBUG
512
chan_dbg(chan, "LD %p free\n", desc);
513
#endif
514
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
515
}
516
}
517
518
static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
519
struct list_head *list)
520
{
521
struct fsl_desc_sw *desc, *_desc;
522
523
list_for_each_entry_safe_reverse(desc, _desc, list, node) {
524
list_del(&desc->node);
525
#ifdef FSL_DMA_LD_DEBUG
526
chan_dbg(chan, "LD %p free\n", desc);
527
#endif
528
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
529
}
530
}
531
532
/**
533
* fsl_dma_free_chan_resources - Free all resources of the channel.
534
* @chan : Freescale DMA channel
535
*/
536
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
537
{
538
struct fsldma_chan *chan = to_fsl_chan(dchan);
539
unsigned long flags;
540
541
chan_dbg(chan, "free all channel resources\n");
542
spin_lock_irqsave(&chan->desc_lock, flags);
543
fsldma_free_desc_list(chan, &chan->ld_pending);
544
fsldma_free_desc_list(chan, &chan->ld_running);
545
spin_unlock_irqrestore(&chan->desc_lock, flags);
546
547
dma_pool_destroy(chan->desc_pool);
548
chan->desc_pool = NULL;
549
}
550
551
static struct dma_async_tx_descriptor *
552
fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
553
{
554
struct fsldma_chan *chan;
555
struct fsl_desc_sw *new;
556
557
if (!dchan)
558
return NULL;
559
560
chan = to_fsl_chan(dchan);
561
562
new = fsl_dma_alloc_descriptor(chan);
563
if (!new) {
564
chan_err(chan, "%s\n", msg_ld_oom);
565
return NULL;
566
}
567
568
new->async_tx.cookie = -EBUSY;
569
new->async_tx.flags = flags;
570
571
/* Insert the link descriptor to the LD ring */
572
list_add_tail(&new->node, &new->tx_list);
573
574
/* Set End-of-link to the last link descriptor of new list */
575
set_ld_eol(chan, new);
576
577
return &new->async_tx;
578
}
579
580
static struct dma_async_tx_descriptor *
581
fsl_dma_prep_memcpy(struct dma_chan *dchan,
582
dma_addr_t dma_dst, dma_addr_t dma_src,
583
size_t len, unsigned long flags)
584
{
585
struct fsldma_chan *chan;
586
struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
587
size_t copy;
588
589
if (!dchan)
590
return NULL;
591
592
if (!len)
593
return NULL;
594
595
chan = to_fsl_chan(dchan);
596
597
do {
598
599
/* Allocate the link descriptor from DMA pool */
600
new = fsl_dma_alloc_descriptor(chan);
601
if (!new) {
602
chan_err(chan, "%s\n", msg_ld_oom);
603
goto fail;
604
}
605
606
copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
607
608
set_desc_cnt(chan, &new->hw, copy);
609
set_desc_src(chan, &new->hw, dma_src);
610
set_desc_dst(chan, &new->hw, dma_dst);
611
612
if (!first)
613
first = new;
614
else
615
set_desc_next(chan, &prev->hw, new->async_tx.phys);
616
617
new->async_tx.cookie = 0;
618
async_tx_ack(&new->async_tx);
619
620
prev = new;
621
len -= copy;
622
dma_src += copy;
623
dma_dst += copy;
624
625
/* Insert the link descriptor to the LD ring */
626
list_add_tail(&new->node, &first->tx_list);
627
} while (len);
628
629
new->async_tx.flags = flags; /* client is in control of this ack */
630
new->async_tx.cookie = -EBUSY;
631
632
/* Set End-of-link to the last link descriptor of new list */
633
set_ld_eol(chan, new);
634
635
return &first->async_tx;
636
637
fail:
638
if (!first)
639
return NULL;
640
641
fsldma_free_desc_list_reverse(chan, &first->tx_list);
642
return NULL;
643
}
644
645
static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
646
struct scatterlist *dst_sg, unsigned int dst_nents,
647
struct scatterlist *src_sg, unsigned int src_nents,
648
unsigned long flags)
649
{
650
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
651
struct fsldma_chan *chan = to_fsl_chan(dchan);
652
size_t dst_avail, src_avail;
653
dma_addr_t dst, src;
654
size_t len;
655
656
/* basic sanity checks */
657
if (dst_nents == 0 || src_nents == 0)
658
return NULL;
659
660
if (dst_sg == NULL || src_sg == NULL)
661
return NULL;
662
663
/*
664
* TODO: should we check that both scatterlists have the same
665
* TODO: number of bytes in total? Is that really an error?
666
*/
667
668
/* get prepared for the loop */
669
dst_avail = sg_dma_len(dst_sg);
670
src_avail = sg_dma_len(src_sg);
671
672
/* run until we are out of scatterlist entries */
673
while (true) {
674
675
/* create the largest transaction possible */
676
len = min_t(size_t, src_avail, dst_avail);
677
len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
678
if (len == 0)
679
goto fetch;
680
681
dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
682
src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
683
684
/* allocate and populate the descriptor */
685
new = fsl_dma_alloc_descriptor(chan);
686
if (!new) {
687
chan_err(chan, "%s\n", msg_ld_oom);
688
goto fail;
689
}
690
691
set_desc_cnt(chan, &new->hw, len);
692
set_desc_src(chan, &new->hw, src);
693
set_desc_dst(chan, &new->hw, dst);
694
695
if (!first)
696
first = new;
697
else
698
set_desc_next(chan, &prev->hw, new->async_tx.phys);
699
700
new->async_tx.cookie = 0;
701
async_tx_ack(&new->async_tx);
702
prev = new;
703
704
/* Insert the link descriptor to the LD ring */
705
list_add_tail(&new->node, &first->tx_list);
706
707
/* update metadata */
708
dst_avail -= len;
709
src_avail -= len;
710
711
fetch:
712
/* fetch the next dst scatterlist entry */
713
if (dst_avail == 0) {
714
715
/* no more entries: we're done */
716
if (dst_nents == 0)
717
break;
718
719
/* fetch the next entry: if there are no more: done */
720
dst_sg = sg_next(dst_sg);
721
if (dst_sg == NULL)
722
break;
723
724
dst_nents--;
725
dst_avail = sg_dma_len(dst_sg);
726
}
727
728
/* fetch the next src scatterlist entry */
729
if (src_avail == 0) {
730
731
/* no more entries: we're done */
732
if (src_nents == 0)
733
break;
734
735
/* fetch the next entry: if there are no more: done */
736
src_sg = sg_next(src_sg);
737
if (src_sg == NULL)
738
break;
739
740
src_nents--;
741
src_avail = sg_dma_len(src_sg);
742
}
743
}
744
745
new->async_tx.flags = flags; /* client is in control of this ack */
746
new->async_tx.cookie = -EBUSY;
747
748
/* Set End-of-link to the last link descriptor of new list */
749
set_ld_eol(chan, new);
750
751
return &first->async_tx;
752
753
fail:
754
if (!first)
755
return NULL;
756
757
fsldma_free_desc_list_reverse(chan, &first->tx_list);
758
return NULL;
759
}
760
761
/**
762
* fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
763
* @chan: DMA channel
764
* @sgl: scatterlist to transfer to/from
765
* @sg_len: number of entries in @scatterlist
766
* @direction: DMA direction
767
* @flags: DMAEngine flags
768
*
769
* Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
770
* DMA_SLAVE API, this gets the device-specific information from the
771
* chan->private variable.
772
*/
773
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
774
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
775
enum dma_data_direction direction, unsigned long flags)
776
{
777
/*
778
* This operation is not supported on the Freescale DMA controller
779
*
780
* However, we need to provide the function pointer to allow the
781
* device_control() method to work.
782
*/
783
return NULL;
784
}
785
786
static int fsl_dma_device_control(struct dma_chan *dchan,
787
enum dma_ctrl_cmd cmd, unsigned long arg)
788
{
789
struct dma_slave_config *config;
790
struct fsldma_chan *chan;
791
unsigned long flags;
792
int size;
793
794
if (!dchan)
795
return -EINVAL;
796
797
chan = to_fsl_chan(dchan);
798
799
switch (cmd) {
800
case DMA_TERMINATE_ALL:
801
spin_lock_irqsave(&chan->desc_lock, flags);
802
803
/* Halt the DMA engine */
804
dma_halt(chan);
805
806
/* Remove and free all of the descriptors in the LD queue */
807
fsldma_free_desc_list(chan, &chan->ld_pending);
808
fsldma_free_desc_list(chan, &chan->ld_running);
809
chan->idle = true;
810
811
spin_unlock_irqrestore(&chan->desc_lock, flags);
812
return 0;
813
814
case DMA_SLAVE_CONFIG:
815
config = (struct dma_slave_config *)arg;
816
817
/* make sure the channel supports setting burst size */
818
if (!chan->set_request_count)
819
return -ENXIO;
820
821
/* we set the controller burst size depending on direction */
822
if (config->direction == DMA_TO_DEVICE)
823
size = config->dst_addr_width * config->dst_maxburst;
824
else
825
size = config->src_addr_width * config->src_maxburst;
826
827
chan->set_request_count(chan, size);
828
return 0;
829
830
case FSLDMA_EXTERNAL_START:
831
832
/* make sure the channel supports external start */
833
if (!chan->toggle_ext_start)
834
return -ENXIO;
835
836
chan->toggle_ext_start(chan, arg);
837
return 0;
838
839
default:
840
return -ENXIO;
841
}
842
843
return 0;
844
}
845
846
/**
847
* fsldma_cleanup_descriptor - cleanup and free a single link descriptor
848
* @chan: Freescale DMA channel
849
* @desc: descriptor to cleanup and free
850
*
851
* This function is used on a descriptor which has been executed by the DMA
852
* controller. It will run any callbacks, submit any dependencies, and then
853
* free the descriptor.
854
*/
855
static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
856
struct fsl_desc_sw *desc)
857
{
858
struct dma_async_tx_descriptor *txd = &desc->async_tx;
859
struct device *dev = chan->common.device->dev;
860
dma_addr_t src = get_desc_src(chan, desc);
861
dma_addr_t dst = get_desc_dst(chan, desc);
862
u32 len = get_desc_cnt(chan, desc);
863
864
/* Run the link descriptor callback function */
865
if (txd->callback) {
866
#ifdef FSL_DMA_LD_DEBUG
867
chan_dbg(chan, "LD %p callback\n", desc);
868
#endif
869
txd->callback(txd->callback_param);
870
}
871
872
/* Run any dependencies */
873
dma_run_dependencies(txd);
874
875
/* Unmap the dst buffer, if requested */
876
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
877
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
878
dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE);
879
else
880
dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE);
881
}
882
883
/* Unmap the src buffer, if requested */
884
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
885
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
886
dma_unmap_single(dev, src, len, DMA_TO_DEVICE);
887
else
888
dma_unmap_page(dev, src, len, DMA_TO_DEVICE);
889
}
890
891
#ifdef FSL_DMA_LD_DEBUG
892
chan_dbg(chan, "LD %p free\n", desc);
893
#endif
894
dma_pool_free(chan->desc_pool, desc, txd->phys);
895
}
896
897
/**
898
* fsl_chan_xfer_ld_queue - transfer any pending transactions
899
* @chan : Freescale DMA channel
900
*
901
* HARDWARE STATE: idle
902
* LOCKING: must hold chan->desc_lock
903
*/
904
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
905
{
906
struct fsl_desc_sw *desc;
907
908
/*
909
* If the list of pending descriptors is empty, then we
910
* don't need to do any work at all
911
*/
912
if (list_empty(&chan->ld_pending)) {
913
chan_dbg(chan, "no pending LDs\n");
914
return;
915
}
916
917
/*
918
* The DMA controller is not idle, which means that the interrupt
919
* handler will start any queued transactions when it runs after
920
* this transaction finishes
921
*/
922
if (!chan->idle) {
923
chan_dbg(chan, "DMA controller still busy\n");
924
return;
925
}
926
927
/*
928
* If there are some link descriptors which have not been
929
* transferred, we need to start the controller
930
*/
931
932
/*
933
* Move all elements from the queue of pending transactions
934
* onto the list of running transactions
935
*/
936
chan_dbg(chan, "idle, starting controller\n");
937
desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
938
list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
939
940
/*
941
* The 85xx DMA controller doesn't clear the channel start bit
942
* automatically at the end of a transfer. Therefore we must clear
943
* it in software before starting the transfer.
944
*/
945
if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
946
u32 mode;
947
948
mode = DMA_IN(chan, &chan->regs->mr, 32);
949
mode &= ~FSL_DMA_MR_CS;
950
DMA_OUT(chan, &chan->regs->mr, mode, 32);
951
}
952
953
/*
954
* Program the descriptor's address into the DMA controller,
955
* then start the DMA transaction
956
*/
957
set_cdar(chan, desc->async_tx.phys);
958
get_cdar(chan);
959
960
dma_start(chan);
961
chan->idle = false;
962
}
963
964
/**
965
* fsl_dma_memcpy_issue_pending - Issue the DMA start command
966
* @chan : Freescale DMA channel
967
*/
968
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
969
{
970
struct fsldma_chan *chan = to_fsl_chan(dchan);
971
unsigned long flags;
972
973
spin_lock_irqsave(&chan->desc_lock, flags);
974
fsl_chan_xfer_ld_queue(chan);
975
spin_unlock_irqrestore(&chan->desc_lock, flags);
976
}
977
978
/**
979
* fsl_tx_status - Determine the DMA status
980
* @chan : Freescale DMA channel
981
*/
982
static enum dma_status fsl_tx_status(struct dma_chan *dchan,
983
dma_cookie_t cookie,
984
struct dma_tx_state *txstate)
985
{
986
struct fsldma_chan *chan = to_fsl_chan(dchan);
987
dma_cookie_t last_complete;
988
dma_cookie_t last_used;
989
unsigned long flags;
990
991
spin_lock_irqsave(&chan->desc_lock, flags);
992
993
last_complete = chan->completed_cookie;
994
last_used = dchan->cookie;
995
996
spin_unlock_irqrestore(&chan->desc_lock, flags);
997
998
dma_set_tx_state(txstate, last_complete, last_used, 0);
999
return dma_async_is_complete(cookie, last_complete, last_used);
1000
}
1001
1002
/*----------------------------------------------------------------------------*/
1003
/* Interrupt Handling */
1004
/*----------------------------------------------------------------------------*/
1005
1006
static irqreturn_t fsldma_chan_irq(int irq, void *data)
1007
{
1008
struct fsldma_chan *chan = data;
1009
u32 stat;
1010
1011
/* save and clear the status register */
1012
stat = get_sr(chan);
1013
set_sr(chan, stat);
1014
chan_dbg(chan, "irq: stat = 0x%x\n", stat);
1015
1016
/* check that this was really our device */
1017
stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
1018
if (!stat)
1019
return IRQ_NONE;
1020
1021
if (stat & FSL_DMA_SR_TE)
1022
chan_err(chan, "Transfer Error!\n");
1023
1024
/*
1025
* Programming Error
1026
* The DMA_INTERRUPT async_tx is a NULL transfer, which will
1027
* triger a PE interrupt.
1028
*/
1029
if (stat & FSL_DMA_SR_PE) {
1030
chan_dbg(chan, "irq: Programming Error INT\n");
1031
stat &= ~FSL_DMA_SR_PE;
1032
if (get_bcr(chan) != 0)
1033
chan_err(chan, "Programming Error!\n");
1034
}
1035
1036
/*
1037
* For MPC8349, EOCDI event need to update cookie
1038
* and start the next transfer if it exist.
1039
*/
1040
if (stat & FSL_DMA_SR_EOCDI) {
1041
chan_dbg(chan, "irq: End-of-Chain link INT\n");
1042
stat &= ~FSL_DMA_SR_EOCDI;
1043
}
1044
1045
/*
1046
* If it current transfer is the end-of-transfer,
1047
* we should clear the Channel Start bit for
1048
* prepare next transfer.
1049
*/
1050
if (stat & FSL_DMA_SR_EOLNI) {
1051
chan_dbg(chan, "irq: End-of-link INT\n");
1052
stat &= ~FSL_DMA_SR_EOLNI;
1053
}
1054
1055
/* check that the DMA controller is really idle */
1056
if (!dma_is_idle(chan))
1057
chan_err(chan, "irq: controller not idle!\n");
1058
1059
/* check that we handled all of the bits */
1060
if (stat)
1061
chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1062
1063
/*
1064
* Schedule the tasklet to handle all cleanup of the current
1065
* transaction. It will start a new transaction if there is
1066
* one pending.
1067
*/
1068
tasklet_schedule(&chan->tasklet);
1069
chan_dbg(chan, "irq: Exit\n");
1070
return IRQ_HANDLED;
1071
}
1072
1073
static void dma_do_tasklet(unsigned long data)
1074
{
1075
struct fsldma_chan *chan = (struct fsldma_chan *)data;
1076
struct fsl_desc_sw *desc, *_desc;
1077
LIST_HEAD(ld_cleanup);
1078
unsigned long flags;
1079
1080
chan_dbg(chan, "tasklet entry\n");
1081
1082
spin_lock_irqsave(&chan->desc_lock, flags);
1083
1084
/* update the cookie if we have some descriptors to cleanup */
1085
if (!list_empty(&chan->ld_running)) {
1086
dma_cookie_t cookie;
1087
1088
desc = to_fsl_desc(chan->ld_running.prev);
1089
cookie = desc->async_tx.cookie;
1090
1091
chan->completed_cookie = cookie;
1092
chan_dbg(chan, "completed_cookie=%d\n", cookie);
1093
}
1094
1095
/*
1096
* move the descriptors to a temporary list so we can drop the lock
1097
* during the entire cleanup operation
1098
*/
1099
list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1100
1101
/* the hardware is now idle and ready for more */
1102
chan->idle = true;
1103
1104
/*
1105
* Start any pending transactions automatically
1106
*
1107
* In the ideal case, we keep the DMA controller busy while we go
1108
* ahead and free the descriptors below.
1109
*/
1110
fsl_chan_xfer_ld_queue(chan);
1111
spin_unlock_irqrestore(&chan->desc_lock, flags);
1112
1113
/* Run the callback for each descriptor, in order */
1114
list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1115
1116
/* Remove from the list of transactions */
1117
list_del(&desc->node);
1118
1119
/* Run all cleanup for this descriptor */
1120
fsldma_cleanup_descriptor(chan, desc);
1121
}
1122
1123
chan_dbg(chan, "tasklet exit\n");
1124
}
1125
1126
static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1127
{
1128
struct fsldma_device *fdev = data;
1129
struct fsldma_chan *chan;
1130
unsigned int handled = 0;
1131
u32 gsr, mask;
1132
int i;
1133
1134
gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1135
: in_le32(fdev->regs);
1136
mask = 0xff000000;
1137
dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1138
1139
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1140
chan = fdev->chan[i];
1141
if (!chan)
1142
continue;
1143
1144
if (gsr & mask) {
1145
dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1146
fsldma_chan_irq(irq, chan);
1147
handled++;
1148
}
1149
1150
gsr &= ~mask;
1151
mask >>= 8;
1152
}
1153
1154
return IRQ_RETVAL(handled);
1155
}
1156
1157
static void fsldma_free_irqs(struct fsldma_device *fdev)
1158
{
1159
struct fsldma_chan *chan;
1160
int i;
1161
1162
if (fdev->irq != NO_IRQ) {
1163
dev_dbg(fdev->dev, "free per-controller IRQ\n");
1164
free_irq(fdev->irq, fdev);
1165
return;
1166
}
1167
1168
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1169
chan = fdev->chan[i];
1170
if (chan && chan->irq != NO_IRQ) {
1171
chan_dbg(chan, "free per-channel IRQ\n");
1172
free_irq(chan->irq, chan);
1173
}
1174
}
1175
}
1176
1177
static int fsldma_request_irqs(struct fsldma_device *fdev)
1178
{
1179
struct fsldma_chan *chan;
1180
int ret;
1181
int i;
1182
1183
/* if we have a per-controller IRQ, use that */
1184
if (fdev->irq != NO_IRQ) {
1185
dev_dbg(fdev->dev, "request per-controller IRQ\n");
1186
ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1187
"fsldma-controller", fdev);
1188
return ret;
1189
}
1190
1191
/* no per-controller IRQ, use the per-channel IRQs */
1192
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1193
chan = fdev->chan[i];
1194
if (!chan)
1195
continue;
1196
1197
if (chan->irq == NO_IRQ) {
1198
chan_err(chan, "interrupts property missing in device tree\n");
1199
ret = -ENODEV;
1200
goto out_unwind;
1201
}
1202
1203
chan_dbg(chan, "request per-channel IRQ\n");
1204
ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1205
"fsldma-chan", chan);
1206
if (ret) {
1207
chan_err(chan, "unable to request per-channel IRQ\n");
1208
goto out_unwind;
1209
}
1210
}
1211
1212
return 0;
1213
1214
out_unwind:
1215
for (/* none */; i >= 0; i--) {
1216
chan = fdev->chan[i];
1217
if (!chan)
1218
continue;
1219
1220
if (chan->irq == NO_IRQ)
1221
continue;
1222
1223
free_irq(chan->irq, chan);
1224
}
1225
1226
return ret;
1227
}
1228
1229
/*----------------------------------------------------------------------------*/
1230
/* OpenFirmware Subsystem */
1231
/*----------------------------------------------------------------------------*/
1232
1233
static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
1234
struct device_node *node, u32 feature, const char *compatible)
1235
{
1236
struct fsldma_chan *chan;
1237
struct resource res;
1238
int err;
1239
1240
/* alloc channel */
1241
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1242
if (!chan) {
1243
dev_err(fdev->dev, "no free memory for DMA channels!\n");
1244
err = -ENOMEM;
1245
goto out_return;
1246
}
1247
1248
/* ioremap registers for use */
1249
chan->regs = of_iomap(node, 0);
1250
if (!chan->regs) {
1251
dev_err(fdev->dev, "unable to ioremap registers\n");
1252
err = -ENOMEM;
1253
goto out_free_chan;
1254
}
1255
1256
err = of_address_to_resource(node, 0, &res);
1257
if (err) {
1258
dev_err(fdev->dev, "unable to find 'reg' property\n");
1259
goto out_iounmap_regs;
1260
}
1261
1262
chan->feature = feature;
1263
if (!fdev->feature)
1264
fdev->feature = chan->feature;
1265
1266
/*
1267
* If the DMA device's feature is different than the feature
1268
* of its channels, report the bug
1269
*/
1270
WARN_ON(fdev->feature != chan->feature);
1271
1272
chan->dev = fdev->dev;
1273
chan->id = ((res.start - 0x100) & 0xfff) >> 7;
1274
if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1275
dev_err(fdev->dev, "too many channels for device\n");
1276
err = -EINVAL;
1277
goto out_iounmap_regs;
1278
}
1279
1280
fdev->chan[chan->id] = chan;
1281
tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1282
snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1283
1284
/* Initialize the channel */
1285
dma_init(chan);
1286
1287
/* Clear cdar registers */
1288
set_cdar(chan, 0);
1289
1290
switch (chan->feature & FSL_DMA_IP_MASK) {
1291
case FSL_DMA_IP_85XX:
1292
chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1293
case FSL_DMA_IP_83XX:
1294
chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1295
chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1296
chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1297
chan->set_request_count = fsl_chan_set_request_count;
1298
}
1299
1300
spin_lock_init(&chan->desc_lock);
1301
INIT_LIST_HEAD(&chan->ld_pending);
1302
INIT_LIST_HEAD(&chan->ld_running);
1303
chan->idle = true;
1304
1305
chan->common.device = &fdev->common;
1306
1307
/* find the IRQ line, if it exists in the device tree */
1308
chan->irq = irq_of_parse_and_map(node, 0);
1309
1310
/* Add the channel to DMA device channel list */
1311
list_add_tail(&chan->common.device_node, &fdev->common.channels);
1312
fdev->common.chancnt++;
1313
1314
dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1315
chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1316
1317
return 0;
1318
1319
out_iounmap_regs:
1320
iounmap(chan->regs);
1321
out_free_chan:
1322
kfree(chan);
1323
out_return:
1324
return err;
1325
}
1326
1327
static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1328
{
1329
irq_dispose_mapping(chan->irq);
1330
list_del(&chan->common.device_node);
1331
iounmap(chan->regs);
1332
kfree(chan);
1333
}
1334
1335
static int __devinit fsldma_of_probe(struct platform_device *op)
1336
{
1337
struct fsldma_device *fdev;
1338
struct device_node *child;
1339
int err;
1340
1341
fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1342
if (!fdev) {
1343
dev_err(&op->dev, "No enough memory for 'priv'\n");
1344
err = -ENOMEM;
1345
goto out_return;
1346
}
1347
1348
fdev->dev = &op->dev;
1349
INIT_LIST_HEAD(&fdev->common.channels);
1350
1351
/* ioremap the registers for use */
1352
fdev->regs = of_iomap(op->dev.of_node, 0);
1353
if (!fdev->regs) {
1354
dev_err(&op->dev, "unable to ioremap registers\n");
1355
err = -ENOMEM;
1356
goto out_free_fdev;
1357
}
1358
1359
/* map the channel IRQ if it exists, but don't hookup the handler yet */
1360
fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1361
1362
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1363
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1364
dma_cap_set(DMA_SG, fdev->common.cap_mask);
1365
dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1366
fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1367
fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1368
fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1369
fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1370
fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1371
fdev->common.device_tx_status = fsl_tx_status;
1372
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1373
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1374
fdev->common.device_control = fsl_dma_device_control;
1375
fdev->common.dev = &op->dev;
1376
1377
dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1378
1379
dev_set_drvdata(&op->dev, fdev);
1380
1381
/*
1382
* We cannot use of_platform_bus_probe() because there is no
1383
* of_platform_bus_remove(). Instead, we manually instantiate every DMA
1384
* channel object.
1385
*/
1386
for_each_child_of_node(op->dev.of_node, child) {
1387
if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1388
fsl_dma_chan_probe(fdev, child,
1389
FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1390
"fsl,eloplus-dma-channel");
1391
}
1392
1393
if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1394
fsl_dma_chan_probe(fdev, child,
1395
FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1396
"fsl,elo-dma-channel");
1397
}
1398
}
1399
1400
/*
1401
* Hookup the IRQ handler(s)
1402
*
1403
* If we have a per-controller interrupt, we prefer that to the
1404
* per-channel interrupts to reduce the number of shared interrupt
1405
* handlers on the same IRQ line
1406
*/
1407
err = fsldma_request_irqs(fdev);
1408
if (err) {
1409
dev_err(fdev->dev, "unable to request IRQs\n");
1410
goto out_free_fdev;
1411
}
1412
1413
dma_async_device_register(&fdev->common);
1414
return 0;
1415
1416
out_free_fdev:
1417
irq_dispose_mapping(fdev->irq);
1418
kfree(fdev);
1419
out_return:
1420
return err;
1421
}
1422
1423
static int fsldma_of_remove(struct platform_device *op)
1424
{
1425
struct fsldma_device *fdev;
1426
unsigned int i;
1427
1428
fdev = dev_get_drvdata(&op->dev);
1429
dma_async_device_unregister(&fdev->common);
1430
1431
fsldma_free_irqs(fdev);
1432
1433
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1434
if (fdev->chan[i])
1435
fsl_dma_chan_remove(fdev->chan[i]);
1436
}
1437
1438
iounmap(fdev->regs);
1439
dev_set_drvdata(&op->dev, NULL);
1440
kfree(fdev);
1441
1442
return 0;
1443
}
1444
1445
static const struct of_device_id fsldma_of_ids[] = {
1446
{ .compatible = "fsl,eloplus-dma", },
1447
{ .compatible = "fsl,elo-dma", },
1448
{}
1449
};
1450
1451
static struct platform_driver fsldma_of_driver = {
1452
.driver = {
1453
.name = "fsl-elo-dma",
1454
.owner = THIS_MODULE,
1455
.of_match_table = fsldma_of_ids,
1456
},
1457
.probe = fsldma_of_probe,
1458
.remove = fsldma_of_remove,
1459
};
1460
1461
/*----------------------------------------------------------------------------*/
1462
/* Module Init / Exit */
1463
/*----------------------------------------------------------------------------*/
1464
1465
static __init int fsldma_init(void)
1466
{
1467
pr_info("Freescale Elo / Elo Plus DMA driver\n");
1468
return platform_driver_register(&fsldma_of_driver);
1469
}
1470
1471
static void __exit fsldma_exit(void)
1472
{
1473
platform_driver_unregister(&fsldma_of_driver);
1474
}
1475
1476
subsys_initcall(fsldma_init);
1477
module_exit(fsldma_exit);
1478
1479
MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1480
MODULE_LICENSE("GPL");
1481
1482