Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/dma/at_hdmac.c
15111 views
1
/*
2
* Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3
*
4
* Copyright (C) 2008 Atmel Corporation
5
*
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or
9
* (at your option) any later version.
10
*
11
*
12
* This supports the Atmel AHB DMA Controller,
13
*
14
* The driver has currently been tested with the Atmel AT91SAM9RL
15
* and AT91SAM9G45 series.
16
*/
17
18
#include <linux/clk.h>
19
#include <linux/dmaengine.h>
20
#include <linux/dma-mapping.h>
21
#include <linux/dmapool.h>
22
#include <linux/interrupt.h>
23
#include <linux/module.h>
24
#include <linux/platform_device.h>
25
#include <linux/slab.h>
26
27
#include "at_hdmac_regs.h"
28
29
/*
30
* Glossary
31
* --------
32
*
33
* at_hdmac : Name of the ATmel AHB DMA Controller
34
* at_dma_ / atdma : ATmel DMA controller entity related
35
* atc_ / atchan : ATmel DMA Channel entity related
36
*/
37
38
#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
39
#define ATC_DEFAULT_CTRLA (0)
40
#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
41
|ATC_DIF(AT_DMA_MEM_IF))
42
43
/*
44
* Initial number of descriptors to allocate for each channel. This could
45
* be increased during dma usage.
46
*/
47
static unsigned int init_nr_desc_per_channel = 64;
48
module_param(init_nr_desc_per_channel, uint, 0644);
49
MODULE_PARM_DESC(init_nr_desc_per_channel,
50
"initial descriptors per channel (default: 64)");
51
52
53
/* prototypes */
54
static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
55
56
57
/*----------------------------------------------------------------------*/
58
59
static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
60
{
61
return list_first_entry(&atchan->active_list,
62
struct at_desc, desc_node);
63
}
64
65
static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
66
{
67
return list_first_entry(&atchan->queue,
68
struct at_desc, desc_node);
69
}
70
71
/**
72
* atc_alloc_descriptor - allocate and return an initialized descriptor
73
* @chan: the channel to allocate descriptors for
74
* @gfp_flags: GFP allocation flags
75
*
76
* Note: The ack-bit is positioned in the descriptor flag at creation time
77
* to make initial allocation more convenient. This bit will be cleared
78
* and control will be given to client at usage time (during
79
* preparation functions).
80
*/
81
static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
82
gfp_t gfp_flags)
83
{
84
struct at_desc *desc = NULL;
85
struct at_dma *atdma = to_at_dma(chan->device);
86
dma_addr_t phys;
87
88
desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
89
if (desc) {
90
memset(desc, 0, sizeof(struct at_desc));
91
INIT_LIST_HEAD(&desc->tx_list);
92
dma_async_tx_descriptor_init(&desc->txd, chan);
93
/* txd.flags will be overwritten in prep functions */
94
desc->txd.flags = DMA_CTRL_ACK;
95
desc->txd.tx_submit = atc_tx_submit;
96
desc->txd.phys = phys;
97
}
98
99
return desc;
100
}
101
102
/**
103
* atc_desc_get - get an unused descriptor from free_list
104
* @atchan: channel we want a new descriptor for
105
*/
106
static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
107
{
108
struct at_desc *desc, *_desc;
109
struct at_desc *ret = NULL;
110
unsigned int i = 0;
111
LIST_HEAD(tmp_list);
112
113
spin_lock_bh(&atchan->lock);
114
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
115
i++;
116
if (async_tx_test_ack(&desc->txd)) {
117
list_del(&desc->desc_node);
118
ret = desc;
119
break;
120
}
121
dev_dbg(chan2dev(&atchan->chan_common),
122
"desc %p not ACKed\n", desc);
123
}
124
spin_unlock_bh(&atchan->lock);
125
dev_vdbg(chan2dev(&atchan->chan_common),
126
"scanned %u descriptors on freelist\n", i);
127
128
/* no more descriptor available in initial pool: create one more */
129
if (!ret) {
130
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
131
if (ret) {
132
spin_lock_bh(&atchan->lock);
133
atchan->descs_allocated++;
134
spin_unlock_bh(&atchan->lock);
135
} else {
136
dev_err(chan2dev(&atchan->chan_common),
137
"not enough descriptors available\n");
138
}
139
}
140
141
return ret;
142
}
143
144
/**
145
* atc_desc_put - move a descriptor, including any children, to the free list
146
* @atchan: channel we work on
147
* @desc: descriptor, at the head of a chain, to move to free list
148
*/
149
static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
150
{
151
if (desc) {
152
struct at_desc *child;
153
154
spin_lock_bh(&atchan->lock);
155
list_for_each_entry(child, &desc->tx_list, desc_node)
156
dev_vdbg(chan2dev(&atchan->chan_common),
157
"moving child desc %p to freelist\n",
158
child);
159
list_splice_init(&desc->tx_list, &atchan->free_list);
160
dev_vdbg(chan2dev(&atchan->chan_common),
161
"moving desc %p to freelist\n", desc);
162
list_add(&desc->desc_node, &atchan->free_list);
163
spin_unlock_bh(&atchan->lock);
164
}
165
}
166
167
/**
168
* atc_desc_chain - build chain adding a descripor
169
* @first: address of first descripor of the chain
170
* @prev: address of previous descripor of the chain
171
* @desc: descriptor to queue
172
*
173
* Called from prep_* functions
174
*/
175
static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
176
struct at_desc *desc)
177
{
178
if (!(*first)) {
179
*first = desc;
180
} else {
181
/* inform the HW lli about chaining */
182
(*prev)->lli.dscr = desc->txd.phys;
183
/* insert the link descriptor to the LD ring */
184
list_add_tail(&desc->desc_node,
185
&(*first)->tx_list);
186
}
187
*prev = desc;
188
}
189
190
/**
191
* atc_assign_cookie - compute and assign new cookie
192
* @atchan: channel we work on
193
* @desc: descriptor to assign cookie for
194
*
195
* Called with atchan->lock held and bh disabled
196
*/
197
static dma_cookie_t
198
atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
199
{
200
dma_cookie_t cookie = atchan->chan_common.cookie;
201
202
if (++cookie < 0)
203
cookie = 1;
204
205
atchan->chan_common.cookie = cookie;
206
desc->txd.cookie = cookie;
207
208
return cookie;
209
}
210
211
/**
212
* atc_dostart - starts the DMA engine for real
213
* @atchan: the channel we want to start
214
* @first: first descriptor in the list we want to begin with
215
*
216
* Called with atchan->lock held and bh disabled
217
*/
218
static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
219
{
220
struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
221
222
/* ASSERT: channel is idle */
223
if (atc_chan_is_enabled(atchan)) {
224
dev_err(chan2dev(&atchan->chan_common),
225
"BUG: Attempted to start non-idle channel\n");
226
dev_err(chan2dev(&atchan->chan_common),
227
" channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
228
channel_readl(atchan, SADDR),
229
channel_readl(atchan, DADDR),
230
channel_readl(atchan, CTRLA),
231
channel_readl(atchan, CTRLB),
232
channel_readl(atchan, DSCR));
233
234
/* The tasklet will hopefully advance the queue... */
235
return;
236
}
237
238
vdbg_dump_regs(atchan);
239
240
/* clear any pending interrupt */
241
while (dma_readl(atdma, EBCISR))
242
cpu_relax();
243
244
channel_writel(atchan, SADDR, 0);
245
channel_writel(atchan, DADDR, 0);
246
channel_writel(atchan, CTRLA, 0);
247
channel_writel(atchan, CTRLB, 0);
248
channel_writel(atchan, DSCR, first->txd.phys);
249
dma_writel(atdma, CHER, atchan->mask);
250
251
vdbg_dump_regs(atchan);
252
}
253
254
/**
255
* atc_chain_complete - finish work for one transaction chain
256
* @atchan: channel we work on
257
* @desc: descriptor at the head of the chain we want do complete
258
*
259
* Called with atchan->lock held and bh disabled */
260
static void
261
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
262
{
263
struct dma_async_tx_descriptor *txd = &desc->txd;
264
265
dev_vdbg(chan2dev(&atchan->chan_common),
266
"descriptor %u complete\n", txd->cookie);
267
268
atchan->completed_cookie = txd->cookie;
269
270
/* move children to free_list */
271
list_splice_init(&desc->tx_list, &atchan->free_list);
272
/* move myself to free_list */
273
list_move(&desc->desc_node, &atchan->free_list);
274
275
/* unmap dma addresses (not on slave channels) */
276
if (!atchan->chan_common.private) {
277
struct device *parent = chan2parent(&atchan->chan_common);
278
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
279
if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
280
dma_unmap_single(parent,
281
desc->lli.daddr,
282
desc->len, DMA_FROM_DEVICE);
283
else
284
dma_unmap_page(parent,
285
desc->lli.daddr,
286
desc->len, DMA_FROM_DEVICE);
287
}
288
if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
289
if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
290
dma_unmap_single(parent,
291
desc->lli.saddr,
292
desc->len, DMA_TO_DEVICE);
293
else
294
dma_unmap_page(parent,
295
desc->lli.saddr,
296
desc->len, DMA_TO_DEVICE);
297
}
298
}
299
300
/* for cyclic transfers,
301
* no need to replay callback function while stopping */
302
if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
303
dma_async_tx_callback callback = txd->callback;
304
void *param = txd->callback_param;
305
306
/*
307
* The API requires that no submissions are done from a
308
* callback, so we don't need to drop the lock here
309
*/
310
if (callback)
311
callback(param);
312
}
313
314
dma_run_dependencies(txd);
315
}
316
317
/**
318
* atc_complete_all - finish work for all transactions
319
* @atchan: channel to complete transactions for
320
*
321
* Eventually submit queued descriptors if any
322
*
323
* Assume channel is idle while calling this function
324
* Called with atchan->lock held and bh disabled
325
*/
326
static void atc_complete_all(struct at_dma_chan *atchan)
327
{
328
struct at_desc *desc, *_desc;
329
LIST_HEAD(list);
330
331
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
332
333
BUG_ON(atc_chan_is_enabled(atchan));
334
335
/*
336
* Submit queued descriptors ASAP, i.e. before we go through
337
* the completed ones.
338
*/
339
if (!list_empty(&atchan->queue))
340
atc_dostart(atchan, atc_first_queued(atchan));
341
/* empty active_list now it is completed */
342
list_splice_init(&atchan->active_list, &list);
343
/* empty queue list by moving descriptors (if any) to active_list */
344
list_splice_init(&atchan->queue, &atchan->active_list);
345
346
list_for_each_entry_safe(desc, _desc, &list, desc_node)
347
atc_chain_complete(atchan, desc);
348
}
349
350
/**
351
* atc_cleanup_descriptors - cleanup up finished descriptors in active_list
352
* @atchan: channel to be cleaned up
353
*
354
* Called with atchan->lock held and bh disabled
355
*/
356
static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
357
{
358
struct at_desc *desc, *_desc;
359
struct at_desc *child;
360
361
dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
362
363
list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
364
if (!(desc->lli.ctrla & ATC_DONE))
365
/* This one is currently in progress */
366
return;
367
368
list_for_each_entry(child, &desc->tx_list, desc_node)
369
if (!(child->lli.ctrla & ATC_DONE))
370
/* Currently in progress */
371
return;
372
373
/*
374
* No descriptors so far seem to be in progress, i.e.
375
* this chain must be done.
376
*/
377
atc_chain_complete(atchan, desc);
378
}
379
}
380
381
/**
382
* atc_advance_work - at the end of a transaction, move forward
383
* @atchan: channel where the transaction ended
384
*
385
* Called with atchan->lock held and bh disabled
386
*/
387
static void atc_advance_work(struct at_dma_chan *atchan)
388
{
389
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
390
391
if (list_empty(&atchan->active_list) ||
392
list_is_singular(&atchan->active_list)) {
393
atc_complete_all(atchan);
394
} else {
395
atc_chain_complete(atchan, atc_first_active(atchan));
396
/* advance work */
397
atc_dostart(atchan, atc_first_active(atchan));
398
}
399
}
400
401
402
/**
403
* atc_handle_error - handle errors reported by DMA controller
404
* @atchan: channel where error occurs
405
*
406
* Called with atchan->lock held and bh disabled
407
*/
408
static void atc_handle_error(struct at_dma_chan *atchan)
409
{
410
struct at_desc *bad_desc;
411
struct at_desc *child;
412
413
/*
414
* The descriptor currently at the head of the active list is
415
* broked. Since we don't have any way to report errors, we'll
416
* just have to scream loudly and try to carry on.
417
*/
418
bad_desc = atc_first_active(atchan);
419
list_del_init(&bad_desc->desc_node);
420
421
/* As we are stopped, take advantage to push queued descriptors
422
* in active_list */
423
list_splice_init(&atchan->queue, atchan->active_list.prev);
424
425
/* Try to restart the controller */
426
if (!list_empty(&atchan->active_list))
427
atc_dostart(atchan, atc_first_active(atchan));
428
429
/*
430
* KERN_CRITICAL may seem harsh, but since this only happens
431
* when someone submits a bad physical address in a
432
* descriptor, we should consider ourselves lucky that the
433
* controller flagged an error instead of scribbling over
434
* random memory locations.
435
*/
436
dev_crit(chan2dev(&atchan->chan_common),
437
"Bad descriptor submitted for DMA!\n");
438
dev_crit(chan2dev(&atchan->chan_common),
439
" cookie: %d\n", bad_desc->txd.cookie);
440
atc_dump_lli(atchan, &bad_desc->lli);
441
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
442
atc_dump_lli(atchan, &child->lli);
443
444
/* Pretend the descriptor completed successfully */
445
atc_chain_complete(atchan, bad_desc);
446
}
447
448
/**
449
* atc_handle_cyclic - at the end of a period, run callback function
450
* @atchan: channel used for cyclic operations
451
*
452
* Called with atchan->lock held and bh disabled
453
*/
454
static void atc_handle_cyclic(struct at_dma_chan *atchan)
455
{
456
struct at_desc *first = atc_first_active(atchan);
457
struct dma_async_tx_descriptor *txd = &first->txd;
458
dma_async_tx_callback callback = txd->callback;
459
void *param = txd->callback_param;
460
461
dev_vdbg(chan2dev(&atchan->chan_common),
462
"new cyclic period llp 0x%08x\n",
463
channel_readl(atchan, DSCR));
464
465
if (callback)
466
callback(param);
467
}
468
469
/*-- IRQ & Tasklet ---------------------------------------------------*/
470
471
static void atc_tasklet(unsigned long data)
472
{
473
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
474
475
spin_lock(&atchan->lock);
476
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
477
atc_handle_error(atchan);
478
else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
479
atc_handle_cyclic(atchan);
480
else
481
atc_advance_work(atchan);
482
483
spin_unlock(&atchan->lock);
484
}
485
486
static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
487
{
488
struct at_dma *atdma = (struct at_dma *)dev_id;
489
struct at_dma_chan *atchan;
490
int i;
491
u32 status, pending, imr;
492
int ret = IRQ_NONE;
493
494
do {
495
imr = dma_readl(atdma, EBCIMR);
496
status = dma_readl(atdma, EBCISR);
497
pending = status & imr;
498
499
if (!pending)
500
break;
501
502
dev_vdbg(atdma->dma_common.dev,
503
"interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
504
status, imr, pending);
505
506
for (i = 0; i < atdma->dma_common.chancnt; i++) {
507
atchan = &atdma->chan[i];
508
if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
509
if (pending & AT_DMA_ERR(i)) {
510
/* Disable channel on AHB error */
511
dma_writel(atdma, CHDR,
512
AT_DMA_RES(i) | atchan->mask);
513
/* Give information to tasklet */
514
set_bit(ATC_IS_ERROR, &atchan->status);
515
}
516
tasklet_schedule(&atchan->tasklet);
517
ret = IRQ_HANDLED;
518
}
519
}
520
521
} while (pending);
522
523
return ret;
524
}
525
526
527
/*-- DMA Engine API --------------------------------------------------*/
528
529
/**
530
* atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
531
* @desc: descriptor at the head of the transaction chain
532
*
533
* Queue chain if DMA engine is working already
534
*
535
* Cookie increment and adding to active_list or queue must be atomic
536
*/
537
static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
538
{
539
struct at_desc *desc = txd_to_at_desc(tx);
540
struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
541
dma_cookie_t cookie;
542
543
spin_lock_bh(&atchan->lock);
544
cookie = atc_assign_cookie(atchan, desc);
545
546
if (list_empty(&atchan->active_list)) {
547
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
548
desc->txd.cookie);
549
atc_dostart(atchan, desc);
550
list_add_tail(&desc->desc_node, &atchan->active_list);
551
} else {
552
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
553
desc->txd.cookie);
554
list_add_tail(&desc->desc_node, &atchan->queue);
555
}
556
557
spin_unlock_bh(&atchan->lock);
558
559
return cookie;
560
}
561
562
/**
563
* atc_prep_dma_memcpy - prepare a memcpy operation
564
* @chan: the channel to prepare operation on
565
* @dest: operation virtual destination address
566
* @src: operation virtual source address
567
* @len: operation length
568
* @flags: tx descriptor status flags
569
*/
570
static struct dma_async_tx_descriptor *
571
atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
572
size_t len, unsigned long flags)
573
{
574
struct at_dma_chan *atchan = to_at_dma_chan(chan);
575
struct at_desc *desc = NULL;
576
struct at_desc *first = NULL;
577
struct at_desc *prev = NULL;
578
size_t xfer_count;
579
size_t offset;
580
unsigned int src_width;
581
unsigned int dst_width;
582
u32 ctrla;
583
u32 ctrlb;
584
585
dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
586
dest, src, len, flags);
587
588
if (unlikely(!len)) {
589
dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
590
return NULL;
591
}
592
593
ctrla = ATC_DEFAULT_CTRLA;
594
ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
595
| ATC_SRC_ADDR_MODE_INCR
596
| ATC_DST_ADDR_MODE_INCR
597
| ATC_FC_MEM2MEM;
598
599
/*
600
* We can be a lot more clever here, but this should take care
601
* of the most common optimization.
602
*/
603
if (!((src | dest | len) & 3)) {
604
ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
605
src_width = dst_width = 2;
606
} else if (!((src | dest | len) & 1)) {
607
ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
608
src_width = dst_width = 1;
609
} else {
610
ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
611
src_width = dst_width = 0;
612
}
613
614
for (offset = 0; offset < len; offset += xfer_count << src_width) {
615
xfer_count = min_t(size_t, (len - offset) >> src_width,
616
ATC_BTSIZE_MAX);
617
618
desc = atc_desc_get(atchan);
619
if (!desc)
620
goto err_desc_get;
621
622
desc->lli.saddr = src + offset;
623
desc->lli.daddr = dest + offset;
624
desc->lli.ctrla = ctrla | xfer_count;
625
desc->lli.ctrlb = ctrlb;
626
627
desc->txd.cookie = 0;
628
629
atc_desc_chain(&first, &prev, desc);
630
}
631
632
/* First descriptor of the chain embedds additional information */
633
first->txd.cookie = -EBUSY;
634
first->len = len;
635
636
/* set end-of-link to the last link descriptor of list*/
637
set_desc_eol(desc);
638
639
first->txd.flags = flags; /* client is in control of this ack */
640
641
return &first->txd;
642
643
err_desc_get:
644
atc_desc_put(atchan, first);
645
return NULL;
646
}
647
648
649
/**
650
* atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
651
* @chan: DMA channel
652
* @sgl: scatterlist to transfer to/from
653
* @sg_len: number of entries in @scatterlist
654
* @direction: DMA direction
655
* @flags: tx descriptor status flags
656
*/
657
static struct dma_async_tx_descriptor *
658
atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
659
unsigned int sg_len, enum dma_data_direction direction,
660
unsigned long flags)
661
{
662
struct at_dma_chan *atchan = to_at_dma_chan(chan);
663
struct at_dma_slave *atslave = chan->private;
664
struct at_desc *first = NULL;
665
struct at_desc *prev = NULL;
666
u32 ctrla;
667
u32 ctrlb;
668
dma_addr_t reg;
669
unsigned int reg_width;
670
unsigned int mem_width;
671
unsigned int i;
672
struct scatterlist *sg;
673
size_t total_len = 0;
674
675
dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
676
sg_len,
677
direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
678
flags);
679
680
if (unlikely(!atslave || !sg_len)) {
681
dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
682
return NULL;
683
}
684
685
reg_width = atslave->reg_width;
686
687
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
688
ctrlb = ATC_IEN;
689
690
switch (direction) {
691
case DMA_TO_DEVICE:
692
ctrla |= ATC_DST_WIDTH(reg_width);
693
ctrlb |= ATC_DST_ADDR_MODE_FIXED
694
| ATC_SRC_ADDR_MODE_INCR
695
| ATC_FC_MEM2PER
696
| ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
697
reg = atslave->tx_reg;
698
for_each_sg(sgl, sg, sg_len, i) {
699
struct at_desc *desc;
700
u32 len;
701
u32 mem;
702
703
desc = atc_desc_get(atchan);
704
if (!desc)
705
goto err_desc_get;
706
707
mem = sg_dma_address(sg);
708
len = sg_dma_len(sg);
709
mem_width = 2;
710
if (unlikely(mem & 3 || len & 3))
711
mem_width = 0;
712
713
desc->lli.saddr = mem;
714
desc->lli.daddr = reg;
715
desc->lli.ctrla = ctrla
716
| ATC_SRC_WIDTH(mem_width)
717
| len >> mem_width;
718
desc->lli.ctrlb = ctrlb;
719
720
atc_desc_chain(&first, &prev, desc);
721
total_len += len;
722
}
723
break;
724
case DMA_FROM_DEVICE:
725
ctrla |= ATC_SRC_WIDTH(reg_width);
726
ctrlb |= ATC_DST_ADDR_MODE_INCR
727
| ATC_SRC_ADDR_MODE_FIXED
728
| ATC_FC_PER2MEM
729
| ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
730
731
reg = atslave->rx_reg;
732
for_each_sg(sgl, sg, sg_len, i) {
733
struct at_desc *desc;
734
u32 len;
735
u32 mem;
736
737
desc = atc_desc_get(atchan);
738
if (!desc)
739
goto err_desc_get;
740
741
mem = sg_dma_address(sg);
742
len = sg_dma_len(sg);
743
mem_width = 2;
744
if (unlikely(mem & 3 || len & 3))
745
mem_width = 0;
746
747
desc->lli.saddr = reg;
748
desc->lli.daddr = mem;
749
desc->lli.ctrla = ctrla
750
| ATC_DST_WIDTH(mem_width)
751
| len >> reg_width;
752
desc->lli.ctrlb = ctrlb;
753
754
atc_desc_chain(&first, &prev, desc);
755
total_len += len;
756
}
757
break;
758
default:
759
return NULL;
760
}
761
762
/* set end-of-link to the last link descriptor of list*/
763
set_desc_eol(prev);
764
765
/* First descriptor of the chain embedds additional information */
766
first->txd.cookie = -EBUSY;
767
first->len = total_len;
768
769
/* first link descriptor of list is responsible of flags */
770
first->txd.flags = flags; /* client is in control of this ack */
771
772
return &first->txd;
773
774
err_desc_get:
775
dev_err(chan2dev(chan), "not enough descriptors available\n");
776
atc_desc_put(atchan, first);
777
return NULL;
778
}
779
780
/**
781
* atc_dma_cyclic_check_values
782
* Check for too big/unaligned periods and unaligned DMA buffer
783
*/
784
static int
785
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
786
size_t period_len, enum dma_data_direction direction)
787
{
788
if (period_len > (ATC_BTSIZE_MAX << reg_width))
789
goto err_out;
790
if (unlikely(period_len & ((1 << reg_width) - 1)))
791
goto err_out;
792
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
793
goto err_out;
794
if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
795
goto err_out;
796
797
return 0;
798
799
err_out:
800
return -EINVAL;
801
}
802
803
/**
804
* atc_dma_cyclic_fill_desc - Fill one period decriptor
805
*/
806
static int
807
atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
808
unsigned int period_index, dma_addr_t buf_addr,
809
size_t period_len, enum dma_data_direction direction)
810
{
811
u32 ctrla;
812
unsigned int reg_width = atslave->reg_width;
813
814
/* prepare common CRTLA value */
815
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
816
| ATC_DST_WIDTH(reg_width)
817
| ATC_SRC_WIDTH(reg_width)
818
| period_len >> reg_width;
819
820
switch (direction) {
821
case DMA_TO_DEVICE:
822
desc->lli.saddr = buf_addr + (period_len * period_index);
823
desc->lli.daddr = atslave->tx_reg;
824
desc->lli.ctrla = ctrla;
825
desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
826
| ATC_SRC_ADDR_MODE_INCR
827
| ATC_FC_MEM2PER
828
| ATC_SIF(AT_DMA_MEM_IF)
829
| ATC_DIF(AT_DMA_PER_IF);
830
break;
831
832
case DMA_FROM_DEVICE:
833
desc->lli.saddr = atslave->rx_reg;
834
desc->lli.daddr = buf_addr + (period_len * period_index);
835
desc->lli.ctrla = ctrla;
836
desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
837
| ATC_SRC_ADDR_MODE_FIXED
838
| ATC_FC_PER2MEM
839
| ATC_SIF(AT_DMA_PER_IF)
840
| ATC_DIF(AT_DMA_MEM_IF);
841
break;
842
843
default:
844
return -EINVAL;
845
}
846
847
return 0;
848
}
849
850
/**
851
* atc_prep_dma_cyclic - prepare the cyclic DMA transfer
852
* @chan: the DMA channel to prepare
853
* @buf_addr: physical DMA address where the buffer starts
854
* @buf_len: total number of bytes for the entire buffer
855
* @period_len: number of bytes for each period
856
* @direction: transfer direction, to or from device
857
*/
858
static struct dma_async_tx_descriptor *
859
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
860
size_t period_len, enum dma_data_direction direction)
861
{
862
struct at_dma_chan *atchan = to_at_dma_chan(chan);
863
struct at_dma_slave *atslave = chan->private;
864
struct at_desc *first = NULL;
865
struct at_desc *prev = NULL;
866
unsigned long was_cyclic;
867
unsigned int periods = buf_len / period_len;
868
unsigned int i;
869
870
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
871
direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
872
buf_addr,
873
periods, buf_len, period_len);
874
875
if (unlikely(!atslave || !buf_len || !period_len)) {
876
dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
877
return NULL;
878
}
879
880
was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
881
if (was_cyclic) {
882
dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
883
return NULL;
884
}
885
886
/* Check for too big/unaligned periods and unaligned DMA buffer */
887
if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
888
period_len, direction))
889
goto err_out;
890
891
/* build cyclic linked list */
892
for (i = 0; i < periods; i++) {
893
struct at_desc *desc;
894
895
desc = atc_desc_get(atchan);
896
if (!desc)
897
goto err_desc_get;
898
899
if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
900
period_len, direction))
901
goto err_desc_get;
902
903
atc_desc_chain(&first, &prev, desc);
904
}
905
906
/* lets make a cyclic list */
907
prev->lli.dscr = first->txd.phys;
908
909
/* First descriptor of the chain embedds additional information */
910
first->txd.cookie = -EBUSY;
911
first->len = buf_len;
912
913
return &first->txd;
914
915
err_desc_get:
916
dev_err(chan2dev(chan), "not enough descriptors available\n");
917
atc_desc_put(atchan, first);
918
err_out:
919
clear_bit(ATC_IS_CYCLIC, &atchan->status);
920
return NULL;
921
}
922
923
924
static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
925
unsigned long arg)
926
{
927
struct at_dma_chan *atchan = to_at_dma_chan(chan);
928
struct at_dma *atdma = to_at_dma(chan->device);
929
int chan_id = atchan->chan_common.chan_id;
930
931
LIST_HEAD(list);
932
933
dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
934
935
if (cmd == DMA_PAUSE) {
936
spin_lock_bh(&atchan->lock);
937
938
dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
939
set_bit(ATC_IS_PAUSED, &atchan->status);
940
941
spin_unlock_bh(&atchan->lock);
942
} else if (cmd == DMA_RESUME) {
943
if (!test_bit(ATC_IS_PAUSED, &atchan->status))
944
return 0;
945
946
spin_lock_bh(&atchan->lock);
947
948
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
949
clear_bit(ATC_IS_PAUSED, &atchan->status);
950
951
spin_unlock_bh(&atchan->lock);
952
} else if (cmd == DMA_TERMINATE_ALL) {
953
struct at_desc *desc, *_desc;
954
/*
955
* This is only called when something went wrong elsewhere, so
956
* we don't really care about the data. Just disable the
957
* channel. We still have to poll the channel enable bit due
958
* to AHB/HSB limitations.
959
*/
960
spin_lock_bh(&atchan->lock);
961
962
/* disabling channel: must also remove suspend state */
963
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
964
965
/* confirm that this channel is disabled */
966
while (dma_readl(atdma, CHSR) & atchan->mask)
967
cpu_relax();
968
969
/* active_list entries will end up before queued entries */
970
list_splice_init(&atchan->queue, &list);
971
list_splice_init(&atchan->active_list, &list);
972
973
/* Flush all pending and queued descriptors */
974
list_for_each_entry_safe(desc, _desc, &list, desc_node)
975
atc_chain_complete(atchan, desc);
976
977
clear_bit(ATC_IS_PAUSED, &atchan->status);
978
/* if channel dedicated to cyclic operations, free it */
979
clear_bit(ATC_IS_CYCLIC, &atchan->status);
980
981
spin_unlock_bh(&atchan->lock);
982
} else {
983
return -ENXIO;
984
}
985
986
return 0;
987
}
988
989
/**
990
* atc_tx_status - poll for transaction completion
991
* @chan: DMA channel
992
* @cookie: transaction identifier to check status of
993
* @txstate: if not %NULL updated with transaction state
994
*
995
* If @txstate is passed in, upon return it reflect the driver
996
* internal state and can be used with dma_async_is_complete() to check
997
* the status of multiple cookies without re-checking hardware state.
998
*/
999
static enum dma_status
1000
atc_tx_status(struct dma_chan *chan,
1001
dma_cookie_t cookie,
1002
struct dma_tx_state *txstate)
1003
{
1004
struct at_dma_chan *atchan = to_at_dma_chan(chan);
1005
dma_cookie_t last_used;
1006
dma_cookie_t last_complete;
1007
enum dma_status ret;
1008
1009
spin_lock_bh(&atchan->lock);
1010
1011
last_complete = atchan->completed_cookie;
1012
last_used = chan->cookie;
1013
1014
ret = dma_async_is_complete(cookie, last_complete, last_used);
1015
if (ret != DMA_SUCCESS) {
1016
atc_cleanup_descriptors(atchan);
1017
1018
last_complete = atchan->completed_cookie;
1019
last_used = chan->cookie;
1020
1021
ret = dma_async_is_complete(cookie, last_complete, last_used);
1022
}
1023
1024
spin_unlock_bh(&atchan->lock);
1025
1026
if (ret != DMA_SUCCESS)
1027
dma_set_tx_state(txstate, last_complete, last_used,
1028
atc_first_active(atchan)->len);
1029
else
1030
dma_set_tx_state(txstate, last_complete, last_used, 0);
1031
1032
if (test_bit(ATC_IS_PAUSED, &atchan->status))
1033
ret = DMA_PAUSED;
1034
1035
dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1036
ret, cookie, last_complete ? last_complete : 0,
1037
last_used ? last_used : 0);
1038
1039
return ret;
1040
}
1041
1042
/**
1043
* atc_issue_pending - try to finish work
1044
* @chan: target DMA channel
1045
*/
1046
static void atc_issue_pending(struct dma_chan *chan)
1047
{
1048
struct at_dma_chan *atchan = to_at_dma_chan(chan);
1049
1050
dev_vdbg(chan2dev(chan), "issue_pending\n");
1051
1052
/* Not needed for cyclic transfers */
1053
if (test_bit(ATC_IS_CYCLIC, &atchan->status))
1054
return;
1055
1056
spin_lock_bh(&atchan->lock);
1057
if (!atc_chan_is_enabled(atchan)) {
1058
atc_advance_work(atchan);
1059
}
1060
spin_unlock_bh(&atchan->lock);
1061
}
1062
1063
/**
1064
* atc_alloc_chan_resources - allocate resources for DMA channel
1065
* @chan: allocate descriptor resources for this channel
1066
* @client: current client requesting the channel be ready for requests
1067
*
1068
* return - the number of allocated descriptors
1069
*/
1070
static int atc_alloc_chan_resources(struct dma_chan *chan)
1071
{
1072
struct at_dma_chan *atchan = to_at_dma_chan(chan);
1073
struct at_dma *atdma = to_at_dma(chan->device);
1074
struct at_desc *desc;
1075
struct at_dma_slave *atslave;
1076
int i;
1077
u32 cfg;
1078
LIST_HEAD(tmp_list);
1079
1080
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1081
1082
/* ASSERT: channel is idle */
1083
if (atc_chan_is_enabled(atchan)) {
1084
dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1085
return -EIO;
1086
}
1087
1088
cfg = ATC_DEFAULT_CFG;
1089
1090
atslave = chan->private;
1091
if (atslave) {
1092
/*
1093
* We need controller-specific data to set up slave
1094
* transfers.
1095
*/
1096
BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1097
1098
/* if cfg configuration specified take it instad of default */
1099
if (atslave->cfg)
1100
cfg = atslave->cfg;
1101
}
1102
1103
/* have we already been set up?
1104
* reconfigure channel but no need to reallocate descriptors */
1105
if (!list_empty(&atchan->free_list))
1106
return atchan->descs_allocated;
1107
1108
/* Allocate initial pool of descriptors */
1109
for (i = 0; i < init_nr_desc_per_channel; i++) {
1110
desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1111
if (!desc) {
1112
dev_err(atdma->dma_common.dev,
1113
"Only %d initial descriptors\n", i);
1114
break;
1115
}
1116
list_add_tail(&desc->desc_node, &tmp_list);
1117
}
1118
1119
spin_lock_bh(&atchan->lock);
1120
atchan->descs_allocated = i;
1121
list_splice(&tmp_list, &atchan->free_list);
1122
atchan->completed_cookie = chan->cookie = 1;
1123
spin_unlock_bh(&atchan->lock);
1124
1125
/* channel parameters */
1126
channel_writel(atchan, CFG, cfg);
1127
1128
dev_dbg(chan2dev(chan),
1129
"alloc_chan_resources: allocated %d descriptors\n",
1130
atchan->descs_allocated);
1131
1132
return atchan->descs_allocated;
1133
}
1134
1135
/**
1136
* atc_free_chan_resources - free all channel resources
1137
* @chan: DMA channel
1138
*/
1139
static void atc_free_chan_resources(struct dma_chan *chan)
1140
{
1141
struct at_dma_chan *atchan = to_at_dma_chan(chan);
1142
struct at_dma *atdma = to_at_dma(chan->device);
1143
struct at_desc *desc, *_desc;
1144
LIST_HEAD(list);
1145
1146
dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1147
atchan->descs_allocated);
1148
1149
/* ASSERT: channel is idle */
1150
BUG_ON(!list_empty(&atchan->active_list));
1151
BUG_ON(!list_empty(&atchan->queue));
1152
BUG_ON(atc_chan_is_enabled(atchan));
1153
1154
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1155
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1156
list_del(&desc->desc_node);
1157
/* free link descriptor */
1158
dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1159
}
1160
list_splice_init(&atchan->free_list, &list);
1161
atchan->descs_allocated = 0;
1162
atchan->status = 0;
1163
1164
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1165
}
1166
1167
1168
/*-- Module Management -----------------------------------------------*/
1169
1170
/**
1171
* at_dma_off - disable DMA controller
1172
* @atdma: the Atmel HDAMC device
1173
*/
1174
static void at_dma_off(struct at_dma *atdma)
1175
{
1176
dma_writel(atdma, EN, 0);
1177
1178
/* disable all interrupts */
1179
dma_writel(atdma, EBCIDR, -1L);
1180
1181
/* confirm that all channels are disabled */
1182
while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1183
cpu_relax();
1184
}
1185
1186
static int __init at_dma_probe(struct platform_device *pdev)
1187
{
1188
struct at_dma_platform_data *pdata;
1189
struct resource *io;
1190
struct at_dma *atdma;
1191
size_t size;
1192
int irq;
1193
int err;
1194
int i;
1195
1196
/* get DMA Controller parameters from platform */
1197
pdata = pdev->dev.platform_data;
1198
if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
1199
return -EINVAL;
1200
1201
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1202
if (!io)
1203
return -EINVAL;
1204
1205
irq = platform_get_irq(pdev, 0);
1206
if (irq < 0)
1207
return irq;
1208
1209
size = sizeof(struct at_dma);
1210
size += pdata->nr_channels * sizeof(struct at_dma_chan);
1211
atdma = kzalloc(size, GFP_KERNEL);
1212
if (!atdma)
1213
return -ENOMEM;
1214
1215
/* discover transaction capabilites from the platform data */
1216
atdma->dma_common.cap_mask = pdata->cap_mask;
1217
atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
1218
1219
size = io->end - io->start + 1;
1220
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1221
err = -EBUSY;
1222
goto err_kfree;
1223
}
1224
1225
atdma->regs = ioremap(io->start, size);
1226
if (!atdma->regs) {
1227
err = -ENOMEM;
1228
goto err_release_r;
1229
}
1230
1231
atdma->clk = clk_get(&pdev->dev, "dma_clk");
1232
if (IS_ERR(atdma->clk)) {
1233
err = PTR_ERR(atdma->clk);
1234
goto err_clk;
1235
}
1236
clk_enable(atdma->clk);
1237
1238
/* force dma off, just in case */
1239
at_dma_off(atdma);
1240
1241
err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1242
if (err)
1243
goto err_irq;
1244
1245
platform_set_drvdata(pdev, atdma);
1246
1247
/* create a pool of consistent memory blocks for hardware descriptors */
1248
atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1249
&pdev->dev, sizeof(struct at_desc),
1250
4 /* word alignment */, 0);
1251
if (!atdma->dma_desc_pool) {
1252
dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1253
err = -ENOMEM;
1254
goto err_pool_create;
1255
}
1256
1257
/* clear any pending interrupt */
1258
while (dma_readl(atdma, EBCISR))
1259
cpu_relax();
1260
1261
/* initialize channels related values */
1262
INIT_LIST_HEAD(&atdma->dma_common.channels);
1263
for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
1264
struct at_dma_chan *atchan = &atdma->chan[i];
1265
1266
atchan->chan_common.device = &atdma->dma_common;
1267
atchan->chan_common.cookie = atchan->completed_cookie = 1;
1268
atchan->chan_common.chan_id = i;
1269
list_add_tail(&atchan->chan_common.device_node,
1270
&atdma->dma_common.channels);
1271
1272
atchan->ch_regs = atdma->regs + ch_regs(i);
1273
spin_lock_init(&atchan->lock);
1274
atchan->mask = 1 << i;
1275
1276
INIT_LIST_HEAD(&atchan->active_list);
1277
INIT_LIST_HEAD(&atchan->queue);
1278
INIT_LIST_HEAD(&atchan->free_list);
1279
1280
tasklet_init(&atchan->tasklet, atc_tasklet,
1281
(unsigned long)atchan);
1282
atc_enable_irq(atchan);
1283
}
1284
1285
/* set base routines */
1286
atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1287
atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1288
atdma->dma_common.device_tx_status = atc_tx_status;
1289
atdma->dma_common.device_issue_pending = atc_issue_pending;
1290
atdma->dma_common.dev = &pdev->dev;
1291
1292
/* set prep routines based on capability */
1293
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1294
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1295
1296
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
1297
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1298
1299
if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1300
atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1301
1302
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
1303
dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
1304
atdma->dma_common.device_control = atc_control;
1305
1306
dma_writel(atdma, EN, AT_DMA_ENABLE);
1307
1308
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1309
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1310
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1311
atdma->dma_common.chancnt);
1312
1313
dma_async_device_register(&atdma->dma_common);
1314
1315
return 0;
1316
1317
err_pool_create:
1318
platform_set_drvdata(pdev, NULL);
1319
free_irq(platform_get_irq(pdev, 0), atdma);
1320
err_irq:
1321
clk_disable(atdma->clk);
1322
clk_put(atdma->clk);
1323
err_clk:
1324
iounmap(atdma->regs);
1325
atdma->regs = NULL;
1326
err_release_r:
1327
release_mem_region(io->start, size);
1328
err_kfree:
1329
kfree(atdma);
1330
return err;
1331
}
1332
1333
static int __exit at_dma_remove(struct platform_device *pdev)
1334
{
1335
struct at_dma *atdma = platform_get_drvdata(pdev);
1336
struct dma_chan *chan, *_chan;
1337
struct resource *io;
1338
1339
at_dma_off(atdma);
1340
dma_async_device_unregister(&atdma->dma_common);
1341
1342
dma_pool_destroy(atdma->dma_desc_pool);
1343
platform_set_drvdata(pdev, NULL);
1344
free_irq(platform_get_irq(pdev, 0), atdma);
1345
1346
list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1347
device_node) {
1348
struct at_dma_chan *atchan = to_at_dma_chan(chan);
1349
1350
/* Disable interrupts */
1351
atc_disable_irq(atchan);
1352
tasklet_disable(&atchan->tasklet);
1353
1354
tasklet_kill(&atchan->tasklet);
1355
list_del(&chan->device_node);
1356
}
1357
1358
clk_disable(atdma->clk);
1359
clk_put(atdma->clk);
1360
1361
iounmap(atdma->regs);
1362
atdma->regs = NULL;
1363
1364
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1365
release_mem_region(io->start, io->end - io->start + 1);
1366
1367
kfree(atdma);
1368
1369
return 0;
1370
}
1371
1372
static void at_dma_shutdown(struct platform_device *pdev)
1373
{
1374
struct at_dma *atdma = platform_get_drvdata(pdev);
1375
1376
at_dma_off(platform_get_drvdata(pdev));
1377
clk_disable(atdma->clk);
1378
}
1379
1380
static int at_dma_suspend_noirq(struct device *dev)
1381
{
1382
struct platform_device *pdev = to_platform_device(dev);
1383
struct at_dma *atdma = platform_get_drvdata(pdev);
1384
1385
at_dma_off(platform_get_drvdata(pdev));
1386
clk_disable(atdma->clk);
1387
return 0;
1388
}
1389
1390
static int at_dma_resume_noirq(struct device *dev)
1391
{
1392
struct platform_device *pdev = to_platform_device(dev);
1393
struct at_dma *atdma = platform_get_drvdata(pdev);
1394
1395
clk_enable(atdma->clk);
1396
dma_writel(atdma, EN, AT_DMA_ENABLE);
1397
return 0;
1398
}
1399
1400
static const struct dev_pm_ops at_dma_dev_pm_ops = {
1401
.suspend_noirq = at_dma_suspend_noirq,
1402
.resume_noirq = at_dma_resume_noirq,
1403
};
1404
1405
static struct platform_driver at_dma_driver = {
1406
.remove = __exit_p(at_dma_remove),
1407
.shutdown = at_dma_shutdown,
1408
.driver = {
1409
.name = "at_hdmac",
1410
.pm = &at_dma_dev_pm_ops,
1411
},
1412
};
1413
1414
static int __init at_dma_init(void)
1415
{
1416
return platform_driver_probe(&at_dma_driver, at_dma_probe);
1417
}
1418
subsys_initcall(at_dma_init);
1419
1420
static void __exit at_dma_exit(void)
1421
{
1422
platform_driver_unregister(&at_dma_driver);
1423
}
1424
module_exit(at_dma_exit);
1425
1426
MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1427
MODULE_AUTHOR("Nicolas Ferre <[email protected]>");
1428
MODULE_LICENSE("GPL");
1429
MODULE_ALIAS("platform:at_hdmac");
1430
1431