Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/dma-jz4780.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Ingenic JZ4780 DMA controller
4
*
5
* Copyright (c) 2015 Imagination Technologies
6
* Author: Alex Smith <[email protected]>
7
*/
8
9
#include <linux/clk.h>
10
#include <linux/dmapool.h>
11
#include <linux/dma-mapping.h>
12
#include <linux/init.h>
13
#include <linux/interrupt.h>
14
#include <linux/module.h>
15
#include <linux/of.h>
16
#include <linux/of_dma.h>
17
#include <linux/platform_device.h>
18
#include <linux/slab.h>
19
20
#include "dmaengine.h"
21
#include "virt-dma.h"
22
23
/* Global registers. */
24
#define JZ_DMA_REG_DMAC 0x00
25
#define JZ_DMA_REG_DIRQP 0x04
26
#define JZ_DMA_REG_DDR 0x08
27
#define JZ_DMA_REG_DDRS 0x0c
28
#define JZ_DMA_REG_DCKE 0x10
29
#define JZ_DMA_REG_DCKES 0x14
30
#define JZ_DMA_REG_DCKEC 0x18
31
#define JZ_DMA_REG_DMACP 0x1c
32
#define JZ_DMA_REG_DSIRQP 0x20
33
#define JZ_DMA_REG_DSIRQM 0x24
34
#define JZ_DMA_REG_DCIRQP 0x28
35
#define JZ_DMA_REG_DCIRQM 0x2c
36
37
/* Per-channel registers. */
38
#define JZ_DMA_REG_CHAN(n) (n * 0x20)
39
#define JZ_DMA_REG_DSA 0x00
40
#define JZ_DMA_REG_DTA 0x04
41
#define JZ_DMA_REG_DTC 0x08
42
#define JZ_DMA_REG_DRT 0x0c
43
#define JZ_DMA_REG_DCS 0x10
44
#define JZ_DMA_REG_DCM 0x14
45
#define JZ_DMA_REG_DDA 0x18
46
#define JZ_DMA_REG_DSD 0x1c
47
48
#define JZ_DMA_DMAC_DMAE BIT(0)
49
#define JZ_DMA_DMAC_AR BIT(2)
50
#define JZ_DMA_DMAC_HLT BIT(3)
51
#define JZ_DMA_DMAC_FAIC BIT(27)
52
#define JZ_DMA_DMAC_FMSC BIT(31)
53
54
#define JZ_DMA_DRT_AUTO 0x8
55
56
#define JZ_DMA_DCS_CTE BIT(0)
57
#define JZ_DMA_DCS_HLT BIT(2)
58
#define JZ_DMA_DCS_TT BIT(3)
59
#define JZ_DMA_DCS_AR BIT(4)
60
#define JZ_DMA_DCS_DES8 BIT(30)
61
62
#define JZ_DMA_DCM_LINK BIT(0)
63
#define JZ_DMA_DCM_TIE BIT(1)
64
#define JZ_DMA_DCM_STDE BIT(2)
65
#define JZ_DMA_DCM_TSZ_SHIFT 8
66
#define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
67
#define JZ_DMA_DCM_DP_SHIFT 12
68
#define JZ_DMA_DCM_SP_SHIFT 14
69
#define JZ_DMA_DCM_DAI BIT(22)
70
#define JZ_DMA_DCM_SAI BIT(23)
71
72
#define JZ_DMA_SIZE_4_BYTE 0x0
73
#define JZ_DMA_SIZE_1_BYTE 0x1
74
#define JZ_DMA_SIZE_2_BYTE 0x2
75
#define JZ_DMA_SIZE_16_BYTE 0x3
76
#define JZ_DMA_SIZE_32_BYTE 0x4
77
#define JZ_DMA_SIZE_64_BYTE 0x5
78
#define JZ_DMA_SIZE_128_BYTE 0x6
79
80
#define JZ_DMA_WIDTH_32_BIT 0x0
81
#define JZ_DMA_WIDTH_8_BIT 0x1
82
#define JZ_DMA_WIDTH_16_BIT 0x2
83
84
#define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
85
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
86
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
87
88
#define JZ4780_DMA_CTRL_OFFSET 0x1000
89
90
/* macros for use with jz4780_dma_soc_data.flags */
91
#define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
92
#define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
93
#define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
94
#define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
95
#define JZ_SOC_DATA_BREAK_LINKS BIT(4)
96
97
/**
98
* struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
99
* @dcm: value for the DCM (channel command) register
100
* @dsa: source address
101
* @dta: target address
102
* @dtc: transfer count (number of blocks of the transfer size specified in DCM
103
* to transfer) in the low 24 bits, offset of the next descriptor from the
104
* descriptor base address in the upper 8 bits.
105
*/
106
struct jz4780_dma_hwdesc {
107
u32 dcm;
108
u32 dsa;
109
u32 dta;
110
u32 dtc;
111
};
112
113
/* Size of allocations for hardware descriptor blocks. */
114
#define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
115
#define JZ_DMA_MAX_DESC \
116
(JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
117
118
struct jz4780_dma_desc {
119
struct virt_dma_desc vdesc;
120
121
struct jz4780_dma_hwdesc *desc;
122
dma_addr_t desc_phys;
123
unsigned int count;
124
enum dma_transaction_type type;
125
u32 transfer_type;
126
u32 status;
127
};
128
129
struct jz4780_dma_chan {
130
struct virt_dma_chan vchan;
131
unsigned int id;
132
struct dma_pool *desc_pool;
133
134
u32 transfer_type_tx, transfer_type_rx;
135
u32 transfer_shift;
136
struct dma_slave_config config;
137
138
struct jz4780_dma_desc *desc;
139
unsigned int curr_hwdesc;
140
};
141
142
struct jz4780_dma_soc_data {
143
unsigned int nb_channels;
144
unsigned int transfer_ord_max;
145
unsigned long flags;
146
};
147
148
struct jz4780_dma_dev {
149
struct dma_device dma_device;
150
void __iomem *chn_base;
151
void __iomem *ctrl_base;
152
struct clk *clk;
153
unsigned int irq;
154
const struct jz4780_dma_soc_data *soc_data;
155
156
u32 chan_reserved;
157
struct jz4780_dma_chan chan[];
158
};
159
160
struct jz4780_dma_filter_data {
161
u32 transfer_type_tx, transfer_type_rx;
162
int channel;
163
};
164
165
static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
166
{
167
return container_of(chan, struct jz4780_dma_chan, vchan.chan);
168
}
169
170
static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
171
struct virt_dma_desc *vdesc)
172
{
173
return container_of(vdesc, struct jz4780_dma_desc, vdesc);
174
}
175
176
static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
177
struct jz4780_dma_chan *jzchan)
178
{
179
return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
180
dma_device);
181
}
182
183
static inline u32 jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
184
unsigned int chn, unsigned int reg)
185
{
186
return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
187
}
188
189
static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
190
unsigned int chn, unsigned int reg, u32 val)
191
{
192
writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
193
}
194
195
static inline u32 jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
196
unsigned int reg)
197
{
198
return readl(jzdma->ctrl_base + reg);
199
}
200
201
static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
202
unsigned int reg, u32 val)
203
{
204
writel(val, jzdma->ctrl_base + reg);
205
}
206
207
static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
208
unsigned int chn)
209
{
210
if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
211
unsigned int reg;
212
213
if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
214
reg = JZ_DMA_REG_DCKE;
215
else
216
reg = JZ_DMA_REG_DCKES;
217
218
jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
219
}
220
}
221
222
static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
223
unsigned int chn)
224
{
225
if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
226
!(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
227
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
228
}
229
230
static struct jz4780_dma_desc *
231
jz4780_dma_desc_alloc(struct jz4780_dma_chan *jzchan, unsigned int count,
232
enum dma_transaction_type type,
233
enum dma_transfer_direction direction)
234
{
235
struct jz4780_dma_desc *desc;
236
237
if (count > JZ_DMA_MAX_DESC)
238
return NULL;
239
240
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
241
if (!desc)
242
return NULL;
243
244
desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
245
&desc->desc_phys);
246
if (!desc->desc) {
247
kfree(desc);
248
return NULL;
249
}
250
251
desc->count = count;
252
desc->type = type;
253
254
if (direction == DMA_DEV_TO_MEM)
255
desc->transfer_type = jzchan->transfer_type_rx;
256
else
257
desc->transfer_type = jzchan->transfer_type_tx;
258
259
return desc;
260
}
261
262
static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
263
{
264
struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
265
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
266
267
dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
268
kfree(desc);
269
}
270
271
static u32 jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
272
unsigned long val, u32 *shift)
273
{
274
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
275
int ord = ffs(val) - 1;
276
277
/*
278
* 8 byte transfer sizes unsupported so fall back on 4. If it's larger
279
* than the maximum, just limit it. It is perfectly safe to fall back
280
* in this way since we won't exceed the maximum burst size supported
281
* by the device, the only effect is reduced efficiency. This is better
282
* than refusing to perform the request at all.
283
*/
284
if (ord == 3)
285
ord = 2;
286
else if (ord > jzdma->soc_data->transfer_ord_max)
287
ord = jzdma->soc_data->transfer_ord_max;
288
289
*shift = ord;
290
291
switch (ord) {
292
case 0:
293
return JZ_DMA_SIZE_1_BYTE;
294
case 1:
295
return JZ_DMA_SIZE_2_BYTE;
296
case 2:
297
return JZ_DMA_SIZE_4_BYTE;
298
case 4:
299
return JZ_DMA_SIZE_16_BYTE;
300
case 5:
301
return JZ_DMA_SIZE_32_BYTE;
302
case 6:
303
return JZ_DMA_SIZE_64_BYTE;
304
default:
305
return JZ_DMA_SIZE_128_BYTE;
306
}
307
}
308
309
static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
310
struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
311
enum dma_transfer_direction direction)
312
{
313
struct dma_slave_config *config = &jzchan->config;
314
u32 width, maxburst, tsz;
315
316
if (direction == DMA_MEM_TO_DEV) {
317
desc->dcm = JZ_DMA_DCM_SAI;
318
desc->dsa = addr;
319
desc->dta = config->dst_addr;
320
321
width = config->dst_addr_width;
322
maxburst = config->dst_maxburst;
323
} else {
324
desc->dcm = JZ_DMA_DCM_DAI;
325
desc->dsa = config->src_addr;
326
desc->dta = addr;
327
328
width = config->src_addr_width;
329
maxburst = config->src_maxburst;
330
}
331
332
/*
333
* This calculates the maximum transfer size that can be used with the
334
* given address, length, width and maximum burst size. The address
335
* must be aligned to the transfer size, the total length must be
336
* divisible by the transfer size, and we must not use more than the
337
* maximum burst specified by the user.
338
*/
339
tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
340
&jzchan->transfer_shift);
341
342
switch (width) {
343
case DMA_SLAVE_BUSWIDTH_1_BYTE:
344
case DMA_SLAVE_BUSWIDTH_2_BYTES:
345
break;
346
case DMA_SLAVE_BUSWIDTH_4_BYTES:
347
width = JZ_DMA_WIDTH_32_BIT;
348
break;
349
default:
350
return -EINVAL;
351
}
352
353
desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
354
desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
355
desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
356
357
desc->dtc = len >> jzchan->transfer_shift;
358
return 0;
359
}
360
361
static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
362
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
363
enum dma_transfer_direction direction, unsigned long flags,
364
void *context)
365
{
366
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
367
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
368
struct jz4780_dma_desc *desc;
369
unsigned int i;
370
int err;
371
372
desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE, direction);
373
if (!desc)
374
return NULL;
375
376
for (i = 0; i < sg_len; i++) {
377
err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
378
sg_dma_address(&sgl[i]),
379
sg_dma_len(&sgl[i]),
380
direction);
381
if (err < 0) {
382
jz4780_dma_desc_free(&jzchan->desc->vdesc);
383
return NULL;
384
}
385
386
desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
387
388
if (i != (sg_len - 1) &&
389
!(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
390
/* Automatically proceed to the next descriptor. */
391
desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
392
393
/*
394
* The upper 8 bits of the DTC field in the descriptor
395
* must be set to (offset from descriptor base of next
396
* descriptor >> 4).
397
*/
398
desc->desc[i].dtc |=
399
(((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
400
}
401
}
402
403
return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
404
}
405
406
static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
407
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
408
size_t period_len, enum dma_transfer_direction direction,
409
unsigned long flags)
410
{
411
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
412
struct jz4780_dma_desc *desc;
413
unsigned int periods, i;
414
int err;
415
416
if (buf_len % period_len)
417
return NULL;
418
419
periods = buf_len / period_len;
420
421
desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC, direction);
422
if (!desc)
423
return NULL;
424
425
for (i = 0; i < periods; i++) {
426
err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
427
period_len, direction);
428
if (err < 0) {
429
jz4780_dma_desc_free(&jzchan->desc->vdesc);
430
return NULL;
431
}
432
433
buf_addr += period_len;
434
435
/*
436
* Set the link bit to indicate that the controller should
437
* automatically proceed to the next descriptor. In
438
* jz4780_dma_begin(), this will be cleared if we need to issue
439
* an interrupt after each period.
440
*/
441
desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
442
443
/*
444
* The upper 8 bits of the DTC field in the descriptor must be
445
* set to (offset from descriptor base of next descriptor >> 4).
446
* If this is the last descriptor, link it back to the first,
447
* i.e. leave offset set to 0, otherwise point to the next one.
448
*/
449
if (i != (periods - 1)) {
450
desc->desc[i].dtc |=
451
(((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
452
}
453
}
454
455
return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
456
}
457
458
static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
459
struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
460
size_t len, unsigned long flags)
461
{
462
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
463
struct jz4780_dma_desc *desc;
464
u32 tsz;
465
466
desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY, 0);
467
if (!desc)
468
return NULL;
469
470
tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
471
&jzchan->transfer_shift);
472
473
desc->transfer_type = JZ_DMA_DRT_AUTO;
474
475
desc->desc[0].dsa = src;
476
desc->desc[0].dta = dest;
477
desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
478
tsz << JZ_DMA_DCM_TSZ_SHIFT |
479
JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
480
JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
481
desc->desc[0].dtc = len >> jzchan->transfer_shift;
482
483
return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
484
}
485
486
static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
487
{
488
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
489
struct virt_dma_desc *vdesc;
490
unsigned int i;
491
dma_addr_t desc_phys;
492
493
if (!jzchan->desc) {
494
vdesc = vchan_next_desc(&jzchan->vchan);
495
if (!vdesc)
496
return;
497
498
list_del(&vdesc->node);
499
500
jzchan->desc = to_jz4780_dma_desc(vdesc);
501
jzchan->curr_hwdesc = 0;
502
503
if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
504
/*
505
* The DMA controller doesn't support triggering an
506
* interrupt after processing each descriptor, only
507
* after processing an entire terminated list of
508
* descriptors. For a cyclic DMA setup the list of
509
* descriptors is not terminated so we can never get an
510
* interrupt.
511
*
512
* If the user requested a callback for a cyclic DMA
513
* setup then we workaround this hardware limitation
514
* here by degrading to a set of unlinked descriptors
515
* which we will submit in sequence in response to the
516
* completion of processing the previous descriptor.
517
*/
518
for (i = 0; i < jzchan->desc->count; i++)
519
jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
520
}
521
} else {
522
/*
523
* There is an existing transfer, therefore this must be one
524
* for which we unlinked the descriptors above. Advance to the
525
* next one in the list.
526
*/
527
jzchan->curr_hwdesc =
528
(jzchan->curr_hwdesc + 1) % jzchan->desc->count;
529
}
530
531
/* Enable the channel's clock. */
532
jz4780_dma_chan_enable(jzdma, jzchan->id);
533
534
/* Use 4-word descriptors. */
535
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
536
537
/* Set transfer type. */
538
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
539
jzchan->desc->transfer_type);
540
541
/*
542
* Set the transfer count. This is redundant for a descriptor-driven
543
* transfer. However, there can be a delay between the transfer start
544
* time and when DTCn reg contains the new transfer count. Setting
545
* it explicitly ensures residue is computed correctly at all times.
546
*/
547
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
548
jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
549
550
/* Write descriptor address and initiate descriptor fetch. */
551
desc_phys = jzchan->desc->desc_phys +
552
(jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
553
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
554
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
555
556
/* Enable the channel. */
557
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
558
JZ_DMA_DCS_CTE);
559
}
560
561
static void jz4780_dma_issue_pending(struct dma_chan *chan)
562
{
563
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
564
unsigned long flags;
565
566
spin_lock_irqsave(&jzchan->vchan.lock, flags);
567
568
if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
569
jz4780_dma_begin(jzchan);
570
571
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
572
}
573
574
static int jz4780_dma_terminate_all(struct dma_chan *chan)
575
{
576
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
577
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
578
unsigned long flags;
579
LIST_HEAD(head);
580
581
spin_lock_irqsave(&jzchan->vchan.lock, flags);
582
583
/* Clear the DMA status and stop the transfer. */
584
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
585
if (jzchan->desc) {
586
vchan_terminate_vdesc(&jzchan->desc->vdesc);
587
jzchan->desc = NULL;
588
}
589
590
jz4780_dma_chan_disable(jzdma, jzchan->id);
591
592
vchan_get_all_descriptors(&jzchan->vchan, &head);
593
594
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
595
596
vchan_dma_desc_free_list(&jzchan->vchan, &head);
597
return 0;
598
}
599
600
static void jz4780_dma_synchronize(struct dma_chan *chan)
601
{
602
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
603
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
604
605
vchan_synchronize(&jzchan->vchan);
606
jz4780_dma_chan_disable(jzdma, jzchan->id);
607
}
608
609
static int jz4780_dma_config(struct dma_chan *chan,
610
struct dma_slave_config *config)
611
{
612
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
613
614
if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
615
|| (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
616
return -EINVAL;
617
618
/* Copy the reset of the slave configuration, it is used later. */
619
memcpy(&jzchan->config, config, sizeof(jzchan->config));
620
621
return 0;
622
}
623
624
static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
625
struct jz4780_dma_desc *desc, unsigned int next_sg)
626
{
627
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
628
unsigned int count = 0;
629
unsigned int i;
630
631
for (i = next_sg; i < desc->count; i++)
632
count += desc->desc[i].dtc & GENMASK(23, 0);
633
634
if (next_sg != 0)
635
count += jz4780_dma_chn_readl(jzdma, jzchan->id,
636
JZ_DMA_REG_DTC);
637
638
return count << jzchan->transfer_shift;
639
}
640
641
static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
642
dma_cookie_t cookie, struct dma_tx_state *txstate)
643
{
644
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
645
struct virt_dma_desc *vdesc;
646
enum dma_status status;
647
unsigned long flags;
648
unsigned long residue = 0;
649
650
spin_lock_irqsave(&jzchan->vchan.lock, flags);
651
652
status = dma_cookie_status(chan, cookie, txstate);
653
if ((status == DMA_COMPLETE) || (txstate == NULL))
654
goto out_unlock_irqrestore;
655
656
vdesc = vchan_find_desc(&jzchan->vchan, cookie);
657
if (vdesc) {
658
/* On the issued list, so hasn't been processed yet */
659
residue = jz4780_dma_desc_residue(jzchan,
660
to_jz4780_dma_desc(vdesc), 0);
661
} else if (cookie == jzchan->desc->vdesc.tx.cookie) {
662
residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
663
jzchan->curr_hwdesc + 1);
664
}
665
dma_set_residue(txstate, residue);
666
667
if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
668
&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
669
status = DMA_ERROR;
670
671
out_unlock_irqrestore:
672
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
673
return status;
674
}
675
676
static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
677
struct jz4780_dma_chan *jzchan)
678
{
679
const unsigned int soc_flags = jzdma->soc_data->flags;
680
struct jz4780_dma_desc *desc = jzchan->desc;
681
u32 dcs;
682
bool ack = true;
683
684
spin_lock(&jzchan->vchan.lock);
685
686
dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
687
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
688
689
if (dcs & JZ_DMA_DCS_AR) {
690
dev_warn(&jzchan->vchan.chan.dev->device,
691
"address error (DCS=0x%x)\n", dcs);
692
}
693
694
if (dcs & JZ_DMA_DCS_HLT) {
695
dev_warn(&jzchan->vchan.chan.dev->device,
696
"channel halt (DCS=0x%x)\n", dcs);
697
}
698
699
if (jzchan->desc) {
700
jzchan->desc->status = dcs;
701
702
if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
703
if (jzchan->desc->type == DMA_CYCLIC) {
704
vchan_cyclic_callback(&jzchan->desc->vdesc);
705
706
jz4780_dma_begin(jzchan);
707
} else if (dcs & JZ_DMA_DCS_TT) {
708
if (!(soc_flags & JZ_SOC_DATA_BREAK_LINKS) ||
709
(jzchan->curr_hwdesc + 1 == desc->count)) {
710
vchan_cookie_complete(&desc->vdesc);
711
jzchan->desc = NULL;
712
}
713
714
jz4780_dma_begin(jzchan);
715
} else {
716
/* False positive - continue the transfer */
717
ack = false;
718
jz4780_dma_chn_writel(jzdma, jzchan->id,
719
JZ_DMA_REG_DCS,
720
JZ_DMA_DCS_CTE);
721
}
722
}
723
} else {
724
dev_err(&jzchan->vchan.chan.dev->device,
725
"channel IRQ with no active transfer\n");
726
}
727
728
spin_unlock(&jzchan->vchan.lock);
729
730
return ack;
731
}
732
733
static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
734
{
735
struct jz4780_dma_dev *jzdma = data;
736
unsigned int nb_channels = jzdma->soc_data->nb_channels;
737
unsigned long pending;
738
u32 dmac;
739
int i;
740
741
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
742
743
for_each_set_bit(i, &pending, nb_channels) {
744
if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
745
pending &= ~BIT(i);
746
}
747
748
/* Clear halt and address error status of all channels. */
749
dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
750
dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
751
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
752
753
/* Clear interrupt pending status. */
754
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
755
756
return IRQ_HANDLED;
757
}
758
759
static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
760
{
761
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
762
763
jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
764
chan->device->dev,
765
JZ_DMA_DESC_BLOCK_SIZE,
766
PAGE_SIZE, 0);
767
if (!jzchan->desc_pool) {
768
dev_err(&chan->dev->device,
769
"failed to allocate descriptor pool\n");
770
return -ENOMEM;
771
}
772
773
return 0;
774
}
775
776
static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
777
{
778
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
779
780
vchan_free_chan_resources(&jzchan->vchan);
781
dma_pool_destroy(jzchan->desc_pool);
782
jzchan->desc_pool = NULL;
783
}
784
785
static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
786
{
787
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
788
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
789
struct jz4780_dma_filter_data *data = param;
790
791
792
if (data->channel > -1) {
793
if (data->channel != jzchan->id)
794
return false;
795
} else if (jzdma->chan_reserved & BIT(jzchan->id)) {
796
return false;
797
}
798
799
jzchan->transfer_type_tx = data->transfer_type_tx;
800
jzchan->transfer_type_rx = data->transfer_type_rx;
801
802
return true;
803
}
804
805
static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
806
struct of_dma *ofdma)
807
{
808
struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
809
dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
810
struct jz4780_dma_filter_data data;
811
812
if (dma_spec->args_count == 2) {
813
data.transfer_type_tx = dma_spec->args[0];
814
data.transfer_type_rx = dma_spec->args[0];
815
data.channel = dma_spec->args[1];
816
} else if (dma_spec->args_count == 3) {
817
data.transfer_type_tx = dma_spec->args[0];
818
data.transfer_type_rx = dma_spec->args[1];
819
data.channel = dma_spec->args[2];
820
} else {
821
return NULL;
822
}
823
824
if (data.channel > -1) {
825
if (data.channel >= jzdma->soc_data->nb_channels) {
826
dev_err(jzdma->dma_device.dev,
827
"device requested non-existent channel %u\n",
828
data.channel);
829
return NULL;
830
}
831
832
/* Can only select a channel marked as reserved. */
833
if (!(jzdma->chan_reserved & BIT(data.channel))) {
834
dev_err(jzdma->dma_device.dev,
835
"device requested unreserved channel %u\n",
836
data.channel);
837
return NULL;
838
}
839
840
jzdma->chan[data.channel].transfer_type_tx = data.transfer_type_tx;
841
jzdma->chan[data.channel].transfer_type_rx = data.transfer_type_rx;
842
843
return dma_get_slave_channel(
844
&jzdma->chan[data.channel].vchan.chan);
845
} else {
846
return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data,
847
ofdma->of_node);
848
}
849
}
850
851
static int jz4780_dma_probe(struct platform_device *pdev)
852
{
853
struct device *dev = &pdev->dev;
854
const struct jz4780_dma_soc_data *soc_data;
855
struct jz4780_dma_dev *jzdma;
856
struct jz4780_dma_chan *jzchan;
857
struct dma_device *dd;
858
struct resource *res;
859
int i, ret;
860
861
if (!dev->of_node) {
862
dev_err(dev, "This driver must be probed from devicetree\n");
863
return -EINVAL;
864
}
865
866
soc_data = device_get_match_data(dev);
867
if (!soc_data)
868
return -EINVAL;
869
870
jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
871
soc_data->nb_channels), GFP_KERNEL);
872
if (!jzdma)
873
return -ENOMEM;
874
875
jzdma->soc_data = soc_data;
876
platform_set_drvdata(pdev, jzdma);
877
878
jzdma->chn_base = devm_platform_ioremap_resource(pdev, 0);
879
if (IS_ERR(jzdma->chn_base))
880
return PTR_ERR(jzdma->chn_base);
881
882
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
883
if (res) {
884
jzdma->ctrl_base = devm_ioremap_resource(dev, res);
885
if (IS_ERR(jzdma->ctrl_base))
886
return PTR_ERR(jzdma->ctrl_base);
887
} else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
888
/*
889
* On JZ4780, if the second memory resource was not supplied,
890
* assume we're using an old devicetree, and calculate the
891
* offset to the control registers.
892
*/
893
jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
894
} else {
895
dev_err(dev, "failed to get I/O memory\n");
896
return -EINVAL;
897
}
898
899
jzdma->clk = devm_clk_get(dev, NULL);
900
if (IS_ERR(jzdma->clk)) {
901
dev_err(dev, "failed to get clock\n");
902
ret = PTR_ERR(jzdma->clk);
903
return ret;
904
}
905
906
clk_prepare_enable(jzdma->clk);
907
908
/* Property is optional, if it doesn't exist the value will remain 0. */
909
of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
910
0, &jzdma->chan_reserved);
911
912
dd = &jzdma->dma_device;
913
914
/*
915
* The real segment size limit is dependent on the size unit selected
916
* for the transfer. Because the size unit is selected automatically
917
* and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to
918
* ensure the 24-bit transfer count in the descriptor cannot overflow.
919
*/
920
dma_set_max_seg_size(dev, 0xffffff);
921
922
dma_cap_set(DMA_MEMCPY, dd->cap_mask);
923
dma_cap_set(DMA_SLAVE, dd->cap_mask);
924
dma_cap_set(DMA_CYCLIC, dd->cap_mask);
925
926
dd->dev = dev;
927
dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
928
dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
929
dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
930
dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
931
dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
932
dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
933
dd->device_config = jz4780_dma_config;
934
dd->device_terminate_all = jz4780_dma_terminate_all;
935
dd->device_synchronize = jz4780_dma_synchronize;
936
dd->device_tx_status = jz4780_dma_tx_status;
937
dd->device_issue_pending = jz4780_dma_issue_pending;
938
dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
939
dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
940
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
941
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
942
dd->max_sg_burst = JZ_DMA_MAX_DESC;
943
944
/*
945
* Enable DMA controller, mark all channels as not programmable.
946
* Also set the FMSC bit - it increases MSC performance, so it makes
947
* little sense not to enable it.
948
*/
949
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
950
JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
951
952
if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
953
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
954
955
INIT_LIST_HEAD(&dd->channels);
956
957
for (i = 0; i < soc_data->nb_channels; i++) {
958
jzchan = &jzdma->chan[i];
959
jzchan->id = i;
960
961
vchan_init(&jzchan->vchan, dd);
962
jzchan->vchan.desc_free = jz4780_dma_desc_free;
963
}
964
965
/*
966
* On JZ4760, chan0 won't enable properly the first time.
967
* Enabling then disabling chan1 will magically make chan0 work
968
* correctly.
969
*/
970
jz4780_dma_chan_enable(jzdma, 1);
971
jz4780_dma_chan_disable(jzdma, 1);
972
973
ret = platform_get_irq(pdev, 0);
974
if (ret < 0)
975
goto err_disable_clk;
976
977
jzdma->irq = ret;
978
979
ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
980
jzdma);
981
if (ret) {
982
dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
983
goto err_disable_clk;
984
}
985
986
ret = dmaenginem_async_device_register(dd);
987
if (ret) {
988
dev_err(dev, "failed to register device\n");
989
goto err_free_irq;
990
}
991
992
/* Register with OF DMA helpers. */
993
ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
994
jzdma);
995
if (ret) {
996
dev_err(dev, "failed to register OF DMA controller\n");
997
goto err_free_irq;
998
}
999
1000
dev_info(dev, "JZ4780 DMA controller initialised\n");
1001
return 0;
1002
1003
err_free_irq:
1004
free_irq(jzdma->irq, jzdma);
1005
1006
err_disable_clk:
1007
clk_disable_unprepare(jzdma->clk);
1008
return ret;
1009
}
1010
1011
static void jz4780_dma_remove(struct platform_device *pdev)
1012
{
1013
struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
1014
int i;
1015
1016
of_dma_controller_free(pdev->dev.of_node);
1017
1018
clk_disable_unprepare(jzdma->clk);
1019
free_irq(jzdma->irq, jzdma);
1020
1021
for (i = 0; i < jzdma->soc_data->nb_channels; i++)
1022
tasklet_kill(&jzdma->chan[i].vchan.task);
1023
}
1024
1025
static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
1026
.nb_channels = 6,
1027
.transfer_ord_max = 5,
1028
.flags = JZ_SOC_DATA_BREAK_LINKS,
1029
};
1030
1031
static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
1032
.nb_channels = 6,
1033
.transfer_ord_max = 5,
1034
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
1035
JZ_SOC_DATA_BREAK_LINKS,
1036
};
1037
1038
static const struct jz4780_dma_soc_data jz4755_dma_soc_data = {
1039
.nb_channels = 4,
1040
.transfer_ord_max = 5,
1041
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
1042
JZ_SOC_DATA_BREAK_LINKS,
1043
};
1044
1045
static const struct jz4780_dma_soc_data jz4760_dma_soc_data = {
1046
.nb_channels = 5,
1047
.transfer_ord_max = 6,
1048
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
1049
};
1050
1051
static const struct jz4780_dma_soc_data jz4760_mdma_soc_data = {
1052
.nb_channels = 2,
1053
.transfer_ord_max = 6,
1054
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
1055
};
1056
1057
static const struct jz4780_dma_soc_data jz4760_bdma_soc_data = {
1058
.nb_channels = 3,
1059
.transfer_ord_max = 6,
1060
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
1061
};
1062
1063
static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = {
1064
.nb_channels = 5,
1065
.transfer_ord_max = 6,
1066
.flags = JZ_SOC_DATA_PER_CHAN_PM,
1067
};
1068
1069
static const struct jz4780_dma_soc_data jz4760b_mdma_soc_data = {
1070
.nb_channels = 2,
1071
.transfer_ord_max = 6,
1072
.flags = JZ_SOC_DATA_PER_CHAN_PM,
1073
};
1074
1075
static const struct jz4780_dma_soc_data jz4760b_bdma_soc_data = {
1076
.nb_channels = 3,
1077
.transfer_ord_max = 6,
1078
.flags = JZ_SOC_DATA_PER_CHAN_PM,
1079
};
1080
1081
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
1082
.nb_channels = 6,
1083
.transfer_ord_max = 6,
1084
.flags = JZ_SOC_DATA_PER_CHAN_PM,
1085
};
1086
1087
static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
1088
.nb_channels = 32,
1089
.transfer_ord_max = 7,
1090
.flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
1091
};
1092
1093
static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
1094
.nb_channels = 8,
1095
.transfer_ord_max = 7,
1096
.flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
1097
};
1098
1099
static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
1100
.nb_channels = 32,
1101
.transfer_ord_max = 7,
1102
.flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
1103
};
1104
1105
static const struct of_device_id jz4780_dma_dt_match[] = {
1106
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
1107
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
1108
{ .compatible = "ingenic,jz4755-dma", .data = &jz4755_dma_soc_data },
1109
{ .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data },
1110
{ .compatible = "ingenic,jz4760-mdma", .data = &jz4760_mdma_soc_data },
1111
{ .compatible = "ingenic,jz4760-bdma", .data = &jz4760_bdma_soc_data },
1112
{ .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data },
1113
{ .compatible = "ingenic,jz4760b-mdma", .data = &jz4760b_mdma_soc_data },
1114
{ .compatible = "ingenic,jz4760b-bdma", .data = &jz4760b_bdma_soc_data },
1115
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
1116
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
1117
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
1118
{ .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data },
1119
{},
1120
};
1121
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
1122
1123
static struct platform_driver jz4780_dma_driver = {
1124
.probe = jz4780_dma_probe,
1125
.remove = jz4780_dma_remove,
1126
.driver = {
1127
.name = "jz4780-dma",
1128
.of_match_table = jz4780_dma_dt_match,
1129
},
1130
};
1131
1132
static int __init jz4780_dma_init(void)
1133
{
1134
return platform_driver_register(&jz4780_dma_driver);
1135
}
1136
subsys_initcall(jz4780_dma_init);
1137
1138
static void __exit jz4780_dma_exit(void)
1139
{
1140
platform_driver_unregister(&jz4780_dma_driver);
1141
}
1142
module_exit(jz4780_dma_exit);
1143
1144
MODULE_AUTHOR("Alex Smith <[email protected]>");
1145
MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
1146
MODULE_LICENSE("GPL");
1147
1148