Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/dw-edma/dw-edma-core.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4
* Synopsys DesignWare eDMA core driver
5
*
6
* Author: Gustavo Pimentel <[email protected]>
7
*/
8
9
#include <linux/module.h>
10
#include <linux/device.h>
11
#include <linux/kernel.h>
12
#include <linux/dmaengine.h>
13
#include <linux/err.h>
14
#include <linux/interrupt.h>
15
#include <linux/irq.h>
16
#include <linux/dma/edma.h>
17
#include <linux/dma-mapping.h>
18
#include <linux/string_choices.h>
19
20
#include "dw-edma-core.h"
21
#include "dw-edma-v0-core.h"
22
#include "dw-hdma-v0-core.h"
23
#include "../dmaengine.h"
24
#include "../virt-dma.h"
25
26
static inline
27
struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
28
{
29
return container_of(vd, struct dw_edma_desc, vd);
30
}
31
32
static inline
33
u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr)
34
{
35
struct dw_edma_chip *chip = chan->dw->chip;
36
37
if (chip->ops->pci_address)
38
return chip->ops->pci_address(chip->dev, cpu_addr);
39
40
return cpu_addr;
41
}
42
43
static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
44
{
45
struct dw_edma_burst *burst;
46
47
burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
48
if (unlikely(!burst))
49
return NULL;
50
51
INIT_LIST_HEAD(&burst->list);
52
if (chunk->burst) {
53
/* Create and add new element into the linked list */
54
chunk->bursts_alloc++;
55
list_add_tail(&burst->list, &chunk->burst->list);
56
} else {
57
/* List head */
58
chunk->bursts_alloc = 0;
59
chunk->burst = burst;
60
}
61
62
return burst;
63
}
64
65
static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
66
{
67
struct dw_edma_chip *chip = desc->chan->dw->chip;
68
struct dw_edma_chan *chan = desc->chan;
69
struct dw_edma_chunk *chunk;
70
71
chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
72
if (unlikely(!chunk))
73
return NULL;
74
75
INIT_LIST_HEAD(&chunk->list);
76
chunk->chan = chan;
77
/* Toggling change bit (CB) in each chunk, this is a mechanism to
78
* inform the eDMA HW block that this is a new linked list ready
79
* to be consumed.
80
* - Odd chunks originate CB equal to 0
81
* - Even chunks originate CB equal to 1
82
*/
83
chunk->cb = !(desc->chunks_alloc % 2);
84
if (chan->dir == EDMA_DIR_WRITE) {
85
chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
86
chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
87
} else {
88
chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
89
chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
90
}
91
92
if (desc->chunk) {
93
/* Create and add new element into the linked list */
94
if (!dw_edma_alloc_burst(chunk)) {
95
kfree(chunk);
96
return NULL;
97
}
98
desc->chunks_alloc++;
99
list_add_tail(&chunk->list, &desc->chunk->list);
100
} else {
101
/* List head */
102
chunk->burst = NULL;
103
desc->chunks_alloc = 0;
104
desc->chunk = chunk;
105
}
106
107
return chunk;
108
}
109
110
static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
111
{
112
struct dw_edma_desc *desc;
113
114
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
115
if (unlikely(!desc))
116
return NULL;
117
118
desc->chan = chan;
119
if (!dw_edma_alloc_chunk(desc)) {
120
kfree(desc);
121
return NULL;
122
}
123
124
return desc;
125
}
126
127
static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
128
{
129
struct dw_edma_burst *child, *_next;
130
131
/* Remove all the list elements */
132
list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
133
list_del(&child->list);
134
kfree(child);
135
chunk->bursts_alloc--;
136
}
137
138
/* Remove the list head */
139
kfree(child);
140
chunk->burst = NULL;
141
}
142
143
static void dw_edma_free_chunk(struct dw_edma_desc *desc)
144
{
145
struct dw_edma_chunk *child, *_next;
146
147
if (!desc->chunk)
148
return;
149
150
/* Remove all the list elements */
151
list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
152
dw_edma_free_burst(child);
153
list_del(&child->list);
154
kfree(child);
155
desc->chunks_alloc--;
156
}
157
158
/* Remove the list head */
159
kfree(child);
160
desc->chunk = NULL;
161
}
162
163
static void dw_edma_free_desc(struct dw_edma_desc *desc)
164
{
165
dw_edma_free_chunk(desc);
166
kfree(desc);
167
}
168
169
static void vchan_free_desc(struct virt_dma_desc *vdesc)
170
{
171
dw_edma_free_desc(vd2dw_edma_desc(vdesc));
172
}
173
174
static int dw_edma_start_transfer(struct dw_edma_chan *chan)
175
{
176
struct dw_edma *dw = chan->dw;
177
struct dw_edma_chunk *child;
178
struct dw_edma_desc *desc;
179
struct virt_dma_desc *vd;
180
181
vd = vchan_next_desc(&chan->vc);
182
if (!vd)
183
return 0;
184
185
desc = vd2dw_edma_desc(vd);
186
if (!desc)
187
return 0;
188
189
child = list_first_entry_or_null(&desc->chunk->list,
190
struct dw_edma_chunk, list);
191
if (!child)
192
return 0;
193
194
dw_edma_core_start(dw, child, !desc->xfer_sz);
195
desc->xfer_sz += child->ll_region.sz;
196
dw_edma_free_burst(child);
197
list_del(&child->list);
198
kfree(child);
199
desc->chunks_alloc--;
200
201
return 1;
202
}
203
204
static void dw_edma_device_caps(struct dma_chan *dchan,
205
struct dma_slave_caps *caps)
206
{
207
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
208
209
if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
210
if (chan->dir == EDMA_DIR_READ)
211
caps->directions = BIT(DMA_DEV_TO_MEM);
212
else
213
caps->directions = BIT(DMA_MEM_TO_DEV);
214
} else {
215
if (chan->dir == EDMA_DIR_WRITE)
216
caps->directions = BIT(DMA_DEV_TO_MEM);
217
else
218
caps->directions = BIT(DMA_MEM_TO_DEV);
219
}
220
}
221
222
static int dw_edma_device_config(struct dma_chan *dchan,
223
struct dma_slave_config *config)
224
{
225
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
226
227
memcpy(&chan->config, config, sizeof(*config));
228
chan->configured = true;
229
230
return 0;
231
}
232
233
static int dw_edma_device_pause(struct dma_chan *dchan)
234
{
235
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
236
int err = 0;
237
238
if (!chan->configured)
239
err = -EPERM;
240
else if (chan->status != EDMA_ST_BUSY)
241
err = -EPERM;
242
else if (chan->request != EDMA_REQ_NONE)
243
err = -EPERM;
244
else
245
chan->request = EDMA_REQ_PAUSE;
246
247
return err;
248
}
249
250
static int dw_edma_device_resume(struct dma_chan *dchan)
251
{
252
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
253
int err = 0;
254
255
if (!chan->configured) {
256
err = -EPERM;
257
} else if (chan->status != EDMA_ST_PAUSE) {
258
err = -EPERM;
259
} else if (chan->request != EDMA_REQ_NONE) {
260
err = -EPERM;
261
} else {
262
chan->status = EDMA_ST_BUSY;
263
dw_edma_start_transfer(chan);
264
}
265
266
return err;
267
}
268
269
static int dw_edma_device_terminate_all(struct dma_chan *dchan)
270
{
271
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
272
int err = 0;
273
274
if (!chan->configured) {
275
/* Do nothing */
276
} else if (chan->status == EDMA_ST_PAUSE) {
277
chan->status = EDMA_ST_IDLE;
278
chan->configured = false;
279
} else if (chan->status == EDMA_ST_IDLE) {
280
chan->configured = false;
281
} else if (dw_edma_core_ch_status(chan) == DMA_COMPLETE) {
282
/*
283
* The channel is in a false BUSY state, probably didn't
284
* receive or lost an interrupt
285
*/
286
chan->status = EDMA_ST_IDLE;
287
chan->configured = false;
288
} else if (chan->request > EDMA_REQ_PAUSE) {
289
err = -EPERM;
290
} else {
291
chan->request = EDMA_REQ_STOP;
292
}
293
294
return err;
295
}
296
297
static void dw_edma_device_issue_pending(struct dma_chan *dchan)
298
{
299
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
300
unsigned long flags;
301
302
if (!chan->configured)
303
return;
304
305
spin_lock_irqsave(&chan->vc.lock, flags);
306
if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
307
chan->status == EDMA_ST_IDLE) {
308
chan->status = EDMA_ST_BUSY;
309
dw_edma_start_transfer(chan);
310
}
311
spin_unlock_irqrestore(&chan->vc.lock, flags);
312
}
313
314
static enum dma_status
315
dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
316
struct dma_tx_state *txstate)
317
{
318
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
319
struct dw_edma_desc *desc;
320
struct virt_dma_desc *vd;
321
unsigned long flags;
322
enum dma_status ret;
323
u32 residue = 0;
324
325
ret = dma_cookie_status(dchan, cookie, txstate);
326
if (ret == DMA_COMPLETE)
327
return ret;
328
329
if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
330
ret = DMA_PAUSED;
331
332
if (!txstate)
333
goto ret_residue;
334
335
spin_lock_irqsave(&chan->vc.lock, flags);
336
vd = vchan_find_desc(&chan->vc, cookie);
337
if (vd) {
338
desc = vd2dw_edma_desc(vd);
339
if (desc)
340
residue = desc->alloc_sz - desc->xfer_sz;
341
}
342
spin_unlock_irqrestore(&chan->vc.lock, flags);
343
344
ret_residue:
345
dma_set_residue(txstate, residue);
346
347
return ret;
348
}
349
350
static struct dma_async_tx_descriptor *
351
dw_edma_device_transfer(struct dw_edma_transfer *xfer)
352
{
353
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
354
enum dma_transfer_direction dir = xfer->direction;
355
struct scatterlist *sg = NULL;
356
struct dw_edma_chunk *chunk;
357
struct dw_edma_burst *burst;
358
struct dw_edma_desc *desc;
359
u64 src_addr, dst_addr;
360
size_t fsz = 0;
361
u32 cnt = 0;
362
int i;
363
364
if (!chan->configured)
365
return NULL;
366
367
/*
368
* Local Root Port/End-point Remote End-point
369
* +-----------------------+ PCIe bus +----------------------+
370
* | | +-+ | |
371
* | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM |
372
* | | | | | |
373
* | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV |
374
* | | +-+ | |
375
* +-----------------------+ +----------------------+
376
*
377
* 1. Normal logic:
378
* If eDMA is embedded into the DW PCIe RP/EP and controlled from the
379
* CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used
380
* for the device read operations (DEV_TO_MEM) and the Tx channel
381
* (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV).
382
*
383
* 2. Inverted logic:
384
* If eDMA is embedded into a Remote PCIe EP and is controlled by the
385
* MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx
386
* channel (EDMA_DIR_WRITE) will be used for the device read operations
387
* (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write
388
* operations (MEM_TO_DEV).
389
*
390
* It is the client driver responsibility to choose a proper channel
391
* for the DMA transfers.
392
*/
393
if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
394
if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
395
(chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
396
return NULL;
397
} else {
398
if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
399
(chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
400
return NULL;
401
}
402
403
if (xfer->type == EDMA_XFER_CYCLIC) {
404
if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
405
return NULL;
406
} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
407
if (xfer->xfer.sg.len < 1)
408
return NULL;
409
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
410
if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1)
411
return NULL;
412
if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc)
413
return NULL;
414
} else {
415
return NULL;
416
}
417
418
desc = dw_edma_alloc_desc(chan);
419
if (unlikely(!desc))
420
goto err_alloc;
421
422
chunk = dw_edma_alloc_chunk(desc);
423
if (unlikely(!chunk))
424
goto err_alloc;
425
426
if (xfer->type == EDMA_XFER_INTERLEAVED) {
427
src_addr = xfer->xfer.il->src_start;
428
dst_addr = xfer->xfer.il->dst_start;
429
} else {
430
src_addr = chan->config.src_addr;
431
dst_addr = chan->config.dst_addr;
432
}
433
434
if (dir == DMA_DEV_TO_MEM)
435
src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr);
436
else
437
dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr);
438
439
if (xfer->type == EDMA_XFER_CYCLIC) {
440
cnt = xfer->xfer.cyclic.cnt;
441
} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
442
cnt = xfer->xfer.sg.len;
443
sg = xfer->xfer.sg.sgl;
444
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
445
cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size;
446
fsz = xfer->xfer.il->frame_size;
447
}
448
449
for (i = 0; i < cnt; i++) {
450
if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
451
break;
452
453
if (chunk->bursts_alloc == chan->ll_max) {
454
chunk = dw_edma_alloc_chunk(desc);
455
if (unlikely(!chunk))
456
goto err_alloc;
457
}
458
459
burst = dw_edma_alloc_burst(chunk);
460
if (unlikely(!burst))
461
goto err_alloc;
462
463
if (xfer->type == EDMA_XFER_CYCLIC)
464
burst->sz = xfer->xfer.cyclic.len;
465
else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
466
burst->sz = sg_dma_len(sg);
467
else if (xfer->type == EDMA_XFER_INTERLEAVED)
468
burst->sz = xfer->xfer.il->sgl[i % fsz].size;
469
470
chunk->ll_region.sz += burst->sz;
471
desc->alloc_sz += burst->sz;
472
473
if (dir == DMA_DEV_TO_MEM) {
474
burst->sar = src_addr;
475
if (xfer->type == EDMA_XFER_CYCLIC) {
476
burst->dar = xfer->xfer.cyclic.paddr;
477
} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
478
src_addr += sg_dma_len(sg);
479
burst->dar = sg_dma_address(sg);
480
/* Unlike the typical assumption by other
481
* drivers/IPs the peripheral memory isn't
482
* a FIFO memory, in this case, it's a
483
* linear memory and that why the source
484
* and destination addresses are increased
485
* by the same portion (data length)
486
*/
487
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
488
burst->dar = dst_addr;
489
}
490
} else {
491
burst->dar = dst_addr;
492
if (xfer->type == EDMA_XFER_CYCLIC) {
493
burst->sar = xfer->xfer.cyclic.paddr;
494
} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
495
dst_addr += sg_dma_len(sg);
496
burst->sar = sg_dma_address(sg);
497
/* Unlike the typical assumption by other
498
* drivers/IPs the peripheral memory isn't
499
* a FIFO memory, in this case, it's a
500
* linear memory and that why the source
501
* and destination addresses are increased
502
* by the same portion (data length)
503
*/
504
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
505
burst->sar = src_addr;
506
}
507
}
508
509
if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
510
sg = sg_next(sg);
511
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
512
struct dma_interleaved_template *il = xfer->xfer.il;
513
struct data_chunk *dc = &il->sgl[i % fsz];
514
515
src_addr += burst->sz;
516
if (il->src_sgl)
517
src_addr += dmaengine_get_src_icg(il, dc);
518
519
dst_addr += burst->sz;
520
if (il->dst_sgl)
521
dst_addr += dmaengine_get_dst_icg(il, dc);
522
}
523
}
524
525
return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
526
527
err_alloc:
528
if (desc)
529
dw_edma_free_desc(desc);
530
531
return NULL;
532
}
533
534
static struct dma_async_tx_descriptor *
535
dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
536
unsigned int len,
537
enum dma_transfer_direction direction,
538
unsigned long flags, void *context)
539
{
540
struct dw_edma_transfer xfer;
541
542
xfer.dchan = dchan;
543
xfer.direction = direction;
544
xfer.xfer.sg.sgl = sgl;
545
xfer.xfer.sg.len = len;
546
xfer.flags = flags;
547
xfer.type = EDMA_XFER_SCATTER_GATHER;
548
549
return dw_edma_device_transfer(&xfer);
550
}
551
552
static struct dma_async_tx_descriptor *
553
dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
554
size_t len, size_t count,
555
enum dma_transfer_direction direction,
556
unsigned long flags)
557
{
558
struct dw_edma_transfer xfer;
559
560
xfer.dchan = dchan;
561
xfer.direction = direction;
562
xfer.xfer.cyclic.paddr = paddr;
563
xfer.xfer.cyclic.len = len;
564
xfer.xfer.cyclic.cnt = count;
565
xfer.flags = flags;
566
xfer.type = EDMA_XFER_CYCLIC;
567
568
return dw_edma_device_transfer(&xfer);
569
}
570
571
static struct dma_async_tx_descriptor *
572
dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
573
struct dma_interleaved_template *ilt,
574
unsigned long flags)
575
{
576
struct dw_edma_transfer xfer;
577
578
xfer.dchan = dchan;
579
xfer.direction = ilt->dir;
580
xfer.xfer.il = ilt;
581
xfer.flags = flags;
582
xfer.type = EDMA_XFER_INTERLEAVED;
583
584
return dw_edma_device_transfer(&xfer);
585
}
586
587
static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
588
{
589
struct dw_edma_desc *desc;
590
struct virt_dma_desc *vd;
591
unsigned long flags;
592
593
spin_lock_irqsave(&chan->vc.lock, flags);
594
vd = vchan_next_desc(&chan->vc);
595
if (vd) {
596
switch (chan->request) {
597
case EDMA_REQ_NONE:
598
desc = vd2dw_edma_desc(vd);
599
if (!desc->chunks_alloc) {
600
list_del(&vd->node);
601
vchan_cookie_complete(vd);
602
}
603
604
/* Continue transferring if there are remaining chunks or issued requests.
605
*/
606
chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
607
break;
608
609
case EDMA_REQ_STOP:
610
list_del(&vd->node);
611
vchan_cookie_complete(vd);
612
chan->request = EDMA_REQ_NONE;
613
chan->status = EDMA_ST_IDLE;
614
break;
615
616
case EDMA_REQ_PAUSE:
617
chan->request = EDMA_REQ_NONE;
618
chan->status = EDMA_ST_PAUSE;
619
break;
620
621
default:
622
break;
623
}
624
}
625
spin_unlock_irqrestore(&chan->vc.lock, flags);
626
}
627
628
static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
629
{
630
struct virt_dma_desc *vd;
631
unsigned long flags;
632
633
spin_lock_irqsave(&chan->vc.lock, flags);
634
vd = vchan_next_desc(&chan->vc);
635
if (vd) {
636
list_del(&vd->node);
637
vchan_cookie_complete(vd);
638
}
639
spin_unlock_irqrestore(&chan->vc.lock, flags);
640
chan->request = EDMA_REQ_NONE;
641
chan->status = EDMA_ST_IDLE;
642
}
643
644
static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
645
{
646
struct dw_edma_irq *dw_irq = data;
647
648
return dw_edma_core_handle_int(dw_irq, EDMA_DIR_WRITE,
649
dw_edma_done_interrupt,
650
dw_edma_abort_interrupt);
651
}
652
653
static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
654
{
655
struct dw_edma_irq *dw_irq = data;
656
657
return dw_edma_core_handle_int(dw_irq, EDMA_DIR_READ,
658
dw_edma_done_interrupt,
659
dw_edma_abort_interrupt);
660
}
661
662
static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
663
{
664
irqreturn_t ret = IRQ_NONE;
665
666
ret |= dw_edma_interrupt_write(irq, data);
667
ret |= dw_edma_interrupt_read(irq, data);
668
669
return ret;
670
}
671
672
static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
673
{
674
struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
675
676
if (chan->status != EDMA_ST_IDLE)
677
return -EBUSY;
678
679
return 0;
680
}
681
682
static void dw_edma_free_chan_resources(struct dma_chan *dchan)
683
{
684
unsigned long timeout = jiffies + msecs_to_jiffies(5000);
685
int ret;
686
687
while (time_before(jiffies, timeout)) {
688
ret = dw_edma_device_terminate_all(dchan);
689
if (!ret)
690
break;
691
692
if (time_after_eq(jiffies, timeout))
693
return;
694
695
cpu_relax();
696
}
697
}
698
699
static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
700
{
701
struct dw_edma_chip *chip = dw->chip;
702
struct device *dev = chip->dev;
703
struct dw_edma_chan *chan;
704
struct dw_edma_irq *irq;
705
struct dma_device *dma;
706
u32 i, ch_cnt;
707
u32 pos;
708
709
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
710
dma = &dw->dma;
711
712
INIT_LIST_HEAD(&dma->channels);
713
714
for (i = 0; i < ch_cnt; i++) {
715
chan = &dw->chan[i];
716
717
chan->dw = dw;
718
719
if (i < dw->wr_ch_cnt) {
720
chan->id = i;
721
chan->dir = EDMA_DIR_WRITE;
722
} else {
723
chan->id = i - dw->wr_ch_cnt;
724
chan->dir = EDMA_DIR_READ;
725
}
726
727
chan->configured = false;
728
chan->request = EDMA_REQ_NONE;
729
chan->status = EDMA_ST_IDLE;
730
731
if (chan->dir == EDMA_DIR_WRITE)
732
chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ);
733
else
734
chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ);
735
chan->ll_max -= 1;
736
737
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
738
str_write_read(chan->dir == EDMA_DIR_WRITE),
739
chan->id, chan->ll_max);
740
741
if (dw->nr_irqs == 1)
742
pos = 0;
743
else if (chan->dir == EDMA_DIR_WRITE)
744
pos = chan->id % wr_alloc;
745
else
746
pos = wr_alloc + chan->id % rd_alloc;
747
748
irq = &dw->irq[pos];
749
750
if (chan->dir == EDMA_DIR_WRITE)
751
irq->wr_mask |= BIT(chan->id);
752
else
753
irq->rd_mask |= BIT(chan->id);
754
755
irq->dw = dw;
756
memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
757
758
dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
759
str_write_read(chan->dir == EDMA_DIR_WRITE),
760
chan->id,
761
chan->msi.address_hi, chan->msi.address_lo,
762
chan->msi.data);
763
764
chan->vc.desc_free = vchan_free_desc;
765
chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ?
766
&dw->chip->dt_region_wr[chan->id] :
767
&dw->chip->dt_region_rd[chan->id];
768
769
vchan_init(&chan->vc, dma);
770
771
dw_edma_core_ch_config(chan);
772
}
773
774
/* Set DMA channel capabilities */
775
dma_cap_zero(dma->cap_mask);
776
dma_cap_set(DMA_SLAVE, dma->cap_mask);
777
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
778
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
779
dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
780
dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
781
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
782
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
783
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
784
785
/* Set DMA channel callbacks */
786
dma->dev = chip->dev;
787
dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
788
dma->device_free_chan_resources = dw_edma_free_chan_resources;
789
dma->device_caps = dw_edma_device_caps;
790
dma->device_config = dw_edma_device_config;
791
dma->device_pause = dw_edma_device_pause;
792
dma->device_resume = dw_edma_device_resume;
793
dma->device_terminate_all = dw_edma_device_terminate_all;
794
dma->device_issue_pending = dw_edma_device_issue_pending;
795
dma->device_tx_status = dw_edma_device_tx_status;
796
dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
797
dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
798
dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
799
800
dma_set_max_seg_size(dma->dev, U32_MAX);
801
802
/* Register DMA device */
803
return dma_async_device_register(dma);
804
}
805
806
static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
807
{
808
if (*nr_irqs && *alloc < cnt) {
809
(*alloc)++;
810
(*nr_irqs)--;
811
}
812
}
813
814
static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
815
{
816
while (*mask * alloc < cnt)
817
(*mask)++;
818
}
819
820
static int dw_edma_irq_request(struct dw_edma *dw,
821
u32 *wr_alloc, u32 *rd_alloc)
822
{
823
struct dw_edma_chip *chip = dw->chip;
824
struct device *dev = dw->chip->dev;
825
u32 wr_mask = 1;
826
u32 rd_mask = 1;
827
int i, err = 0;
828
u32 ch_cnt;
829
int irq;
830
831
ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
832
833
if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
834
return -EINVAL;
835
836
dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
837
if (!dw->irq)
838
return -ENOMEM;
839
840
if (chip->nr_irqs == 1) {
841
/* Common IRQ shared among all channels */
842
irq = chip->ops->irq_vector(dev, 0);
843
err = request_irq(irq, dw_edma_interrupt_common,
844
IRQF_SHARED, dw->name, &dw->irq[0]);
845
if (err) {
846
dw->nr_irqs = 0;
847
return err;
848
}
849
850
if (irq_get_msi_desc(irq))
851
get_cached_msi_msg(irq, &dw->irq[0].msi);
852
853
dw->nr_irqs = 1;
854
} else {
855
/* Distribute IRQs equally among all channels */
856
int tmp = chip->nr_irqs;
857
858
while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
859
dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
860
dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
861
}
862
863
dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
864
dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
865
866
for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
867
irq = chip->ops->irq_vector(dev, i);
868
err = request_irq(irq,
869
i < *wr_alloc ?
870
dw_edma_interrupt_write :
871
dw_edma_interrupt_read,
872
IRQF_SHARED, dw->name,
873
&dw->irq[i]);
874
if (err)
875
goto err_irq_free;
876
877
if (irq_get_msi_desc(irq))
878
get_cached_msi_msg(irq, &dw->irq[i].msi);
879
}
880
881
dw->nr_irqs = i;
882
}
883
884
return 0;
885
886
err_irq_free:
887
for (i--; i >= 0; i--) {
888
irq = chip->ops->irq_vector(dev, i);
889
free_irq(irq, &dw->irq[i]);
890
}
891
892
return err;
893
}
894
895
int dw_edma_probe(struct dw_edma_chip *chip)
896
{
897
struct device *dev;
898
struct dw_edma *dw;
899
u32 wr_alloc = 0;
900
u32 rd_alloc = 0;
901
int i, err;
902
903
if (!chip)
904
return -EINVAL;
905
906
dev = chip->dev;
907
if (!dev || !chip->ops)
908
return -EINVAL;
909
910
dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
911
if (!dw)
912
return -ENOMEM;
913
914
dw->chip = chip;
915
916
if (dw->chip->mf == EDMA_MF_HDMA_NATIVE)
917
dw_hdma_v0_core_register(dw);
918
else
919
dw_edma_v0_core_register(dw);
920
921
raw_spin_lock_init(&dw->lock);
922
923
dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
924
dw_edma_core_ch_count(dw, EDMA_DIR_WRITE));
925
dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
926
927
dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
928
dw_edma_core_ch_count(dw, EDMA_DIR_READ));
929
dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
930
931
if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
932
return -EINVAL;
933
934
dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
935
dw->wr_ch_cnt, dw->rd_ch_cnt);
936
937
/* Allocate channels */
938
dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
939
sizeof(*dw->chan), GFP_KERNEL);
940
if (!dw->chan)
941
return -ENOMEM;
942
943
snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s",
944
dev_name(chip->dev));
945
946
/* Disable eDMA, only to establish the ideal initial conditions */
947
dw_edma_core_off(dw);
948
949
/* Request IRQs */
950
err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
951
if (err)
952
return err;
953
954
/* Setup write/read channels */
955
err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc);
956
if (err)
957
goto err_irq_free;
958
959
/* Turn debugfs on */
960
dw_edma_core_debugfs_on(dw);
961
962
chip->dw = dw;
963
964
return 0;
965
966
err_irq_free:
967
for (i = (dw->nr_irqs - 1); i >= 0; i--)
968
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
969
970
return err;
971
}
972
EXPORT_SYMBOL_GPL(dw_edma_probe);
973
974
int dw_edma_remove(struct dw_edma_chip *chip)
975
{
976
struct dw_edma_chan *chan, *_chan;
977
struct device *dev = chip->dev;
978
struct dw_edma *dw = chip->dw;
979
int i;
980
981
/* Skip removal if no private data found */
982
if (!dw)
983
return -ENODEV;
984
985
/* Disable eDMA */
986
dw_edma_core_off(dw);
987
988
/* Free irqs */
989
for (i = (dw->nr_irqs - 1); i >= 0; i--)
990
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
991
992
/* Deregister eDMA device */
993
dma_async_device_unregister(&dw->dma);
994
list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
995
vc.chan.device_node) {
996
tasklet_kill(&chan->vc.task);
997
list_del(&chan->vc.chan.device_node);
998
}
999
1000
return 0;
1001
}
1002
EXPORT_SYMBOL_GPL(dw_edma_remove);
1003
1004
MODULE_LICENSE("GPL v2");
1005
MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
1006
MODULE_AUTHOR("Gustavo Pimentel <[email protected]>");
1007
1008