Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/mips/alchemy/common/dbdma.c
10819 views
1
/*
2
*
3
* BRIEF MODULE DESCRIPTION
4
* The Descriptor Based DMA channel manager that first appeared
5
* on the Au1550. I started with dma.c, but I think all that is
6
* left is this initial comment :-)
7
*
8
* Copyright 2004 Embedded Edge, LLC
9
* [email protected]
10
*
11
* This program is free software; you can redistribute it and/or modify it
12
* under the terms of the GNU General Public License as published by the
13
* Free Software Foundation; either version 2 of the License, or (at your
14
* option) any later version.
15
*
16
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
17
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
22
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
*
27
* You should have received a copy of the GNU General Public License along
28
* with this program; if not, write to the Free Software Foundation, Inc.,
29
* 675 Mass Ave, Cambridge, MA 02139, USA.
30
*
31
*/
32
33
#include <linux/init.h>
34
#include <linux/kernel.h>
35
#include <linux/slab.h>
36
#include <linux/spinlock.h>
37
#include <linux/interrupt.h>
38
#include <linux/module.h>
39
#include <linux/syscore_ops.h>
40
#include <asm/mach-au1x00/au1000.h>
41
#include <asm/mach-au1x00/au1xxx_dbdma.h>
42
43
#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
44
45
/*
46
* The Descriptor Based DMA supports up to 16 channels.
47
*
48
* There are 32 devices defined. We keep an internal structure
49
* of devices using these channels, along with additional
50
* information.
51
*
52
* We allocate the descriptors and allow access to them through various
53
* functions. The drivers allocate the data buffers and assign them
54
* to the descriptors.
55
*/
56
static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
57
58
/* I couldn't find a macro that did this... */
59
#define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1))
60
61
static dbdma_global_t *dbdma_gptr =
62
(dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
63
static int dbdma_initialized;
64
65
static dbdev_tab_t dbdev_tab[] = {
66
#ifdef CONFIG_SOC_AU1550
67
/* UARTS */
68
{ DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
69
{ DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
70
{ DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 },
71
{ DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x11400000, 0, 0 },
72
73
/* EXT DMA */
74
{ DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
75
{ DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
76
{ DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 },
77
{ DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 },
78
79
/* USB DEV */
80
{ DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN, 4, 8, 0x10200000, 0, 0 },
81
{ DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 },
82
{ DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 },
83
{ DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 },
84
{ DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN, 4, 8, 0x10200010, 0, 0 },
85
{ DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN, 4, 8, 0x10200014, 0, 0 },
86
87
/* PSC 0 */
88
{ DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 },
89
{ DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 },
90
91
/* PSC 1 */
92
{ DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 },
93
{ DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 },
94
95
/* PSC 2 */
96
{ DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 },
97
{ DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 0, 0x10a0001c, 0, 0 },
98
99
/* PSC 3 */
100
{ DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 },
101
{ DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 0, 0x10b0001c, 0, 0 },
102
103
{ DSCR_CMD0_PCI_WRITE, 0, 0, 0, 0x00000000, 0, 0 }, /* PCI */
104
{ DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */
105
106
/* MAC 0 */
107
{ DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
108
{ DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
109
110
/* MAC 1 */
111
{ DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
112
{ DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 },
113
114
#endif /* CONFIG_SOC_AU1550 */
115
116
#ifdef CONFIG_SOC_AU1200
117
{ DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 },
118
{ DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 },
119
{ DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 },
120
{ DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x11200000, 0, 0 },
121
122
{ DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 },
123
{ DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 },
124
125
{ DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
126
{ DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
127
{ DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
128
{ DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
129
130
{ DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 },
131
{ DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 },
132
{ DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 },
133
{ DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 },
134
135
{ DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 },
136
{ DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 },
137
138
{ DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 },
139
{ DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x11a0001c, 0, 0 },
140
{ DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
141
142
{ DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 },
143
{ DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x11b0001c, 0, 0 },
144
{ DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
145
146
{ DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 },
147
{ DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 },
148
{ DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 },
149
{ DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
150
151
{ DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 },
152
153
#endif /* CONFIG_SOC_AU1200 */
154
155
{ DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
156
{ DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 },
157
158
/* Provide 16 user definable device types */
159
{ ~0, 0, 0, 0, 0, 0, 0 },
160
{ ~0, 0, 0, 0, 0, 0, 0 },
161
{ ~0, 0, 0, 0, 0, 0, 0 },
162
{ ~0, 0, 0, 0, 0, 0, 0 },
163
{ ~0, 0, 0, 0, 0, 0, 0 },
164
{ ~0, 0, 0, 0, 0, 0, 0 },
165
{ ~0, 0, 0, 0, 0, 0, 0 },
166
{ ~0, 0, 0, 0, 0, 0, 0 },
167
{ ~0, 0, 0, 0, 0, 0, 0 },
168
{ ~0, 0, 0, 0, 0, 0, 0 },
169
{ ~0, 0, 0, 0, 0, 0, 0 },
170
{ ~0, 0, 0, 0, 0, 0, 0 },
171
{ ~0, 0, 0, 0, 0, 0, 0 },
172
{ ~0, 0, 0, 0, 0, 0, 0 },
173
{ ~0, 0, 0, 0, 0, 0, 0 },
174
{ ~0, 0, 0, 0, 0, 0, 0 },
175
};
176
177
#define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab)
178
179
180
static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
181
182
static dbdev_tab_t *find_dbdev_id(u32 id)
183
{
184
int i;
185
dbdev_tab_t *p;
186
for (i = 0; i < DBDEV_TAB_SIZE; ++i) {
187
p = &dbdev_tab[i];
188
if (p->dev_id == id)
189
return p;
190
}
191
return NULL;
192
}
193
194
void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp)
195
{
196
return phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
197
}
198
EXPORT_SYMBOL(au1xxx_ddma_get_nextptr_virt);
199
200
u32 au1xxx_ddma_add_device(dbdev_tab_t *dev)
201
{
202
u32 ret = 0;
203
dbdev_tab_t *p;
204
static u16 new_id = 0x1000;
205
206
p = find_dbdev_id(~0);
207
if (NULL != p) {
208
memcpy(p, dev, sizeof(dbdev_tab_t));
209
p->dev_id = DSCR_DEV2CUSTOM_ID(new_id, dev->dev_id);
210
ret = p->dev_id;
211
new_id++;
212
#if 0
213
printk(KERN_DEBUG "add_device: id:%x flags:%x padd:%x\n",
214
p->dev_id, p->dev_flags, p->dev_physaddr);
215
#endif
216
}
217
218
return ret;
219
}
220
EXPORT_SYMBOL(au1xxx_ddma_add_device);
221
222
void au1xxx_ddma_del_device(u32 devid)
223
{
224
dbdev_tab_t *p = find_dbdev_id(devid);
225
226
if (p != NULL) {
227
memset(p, 0, sizeof(dbdev_tab_t));
228
p->dev_id = ~0;
229
}
230
}
231
EXPORT_SYMBOL(au1xxx_ddma_del_device);
232
233
/* Allocate a channel and return a non-zero descriptor if successful. */
234
u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
235
void (*callback)(int, void *), void *callparam)
236
{
237
unsigned long flags;
238
u32 used, chan;
239
u32 dcp;
240
int i;
241
dbdev_tab_t *stp, *dtp;
242
chan_tab_t *ctp;
243
au1x_dma_chan_t *cp;
244
245
/*
246
* We do the intialization on the first channel allocation.
247
* We have to wait because of the interrupt handler initialization
248
* which can't be done successfully during board set up.
249
*/
250
if (!dbdma_initialized)
251
return 0;
252
253
stp = find_dbdev_id(srcid);
254
if (stp == NULL)
255
return 0;
256
dtp = find_dbdev_id(destid);
257
if (dtp == NULL)
258
return 0;
259
260
used = 0;
261
262
/* Check to see if we can get both channels. */
263
spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
264
if (!(stp->dev_flags & DEV_FLAGS_INUSE) ||
265
(stp->dev_flags & DEV_FLAGS_ANYUSE)) {
266
/* Got source */
267
stp->dev_flags |= DEV_FLAGS_INUSE;
268
if (!(dtp->dev_flags & DEV_FLAGS_INUSE) ||
269
(dtp->dev_flags & DEV_FLAGS_ANYUSE)) {
270
/* Got destination */
271
dtp->dev_flags |= DEV_FLAGS_INUSE;
272
} else {
273
/* Can't get dest. Release src. */
274
stp->dev_flags &= ~DEV_FLAGS_INUSE;
275
used++;
276
}
277
} else
278
used++;
279
spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
280
281
if (used)
282
return 0;
283
284
/* Let's see if we can allocate a channel for it. */
285
ctp = NULL;
286
chan = 0;
287
spin_lock_irqsave(&au1xxx_dbdma_spin_lock, flags);
288
for (i = 0; i < NUM_DBDMA_CHANS; i++)
289
if (chan_tab_ptr[i] == NULL) {
290
/*
291
* If kmalloc fails, it is caught below same
292
* as a channel not available.
293
*/
294
ctp = kmalloc(sizeof(chan_tab_t), GFP_ATOMIC);
295
chan_tab_ptr[i] = ctp;
296
break;
297
}
298
spin_unlock_irqrestore(&au1xxx_dbdma_spin_lock, flags);
299
300
if (ctp != NULL) {
301
memset(ctp, 0, sizeof(chan_tab_t));
302
ctp->chan_index = chan = i;
303
dcp = KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
304
dcp += (0x0100 * chan);
305
ctp->chan_ptr = (au1x_dma_chan_t *)dcp;
306
cp = (au1x_dma_chan_t *)dcp;
307
ctp->chan_src = stp;
308
ctp->chan_dest = dtp;
309
ctp->chan_callback = callback;
310
ctp->chan_callparam = callparam;
311
312
/* Initialize channel configuration. */
313
i = 0;
314
if (stp->dev_intlevel)
315
i |= DDMA_CFG_SED;
316
if (stp->dev_intpolarity)
317
i |= DDMA_CFG_SP;
318
if (dtp->dev_intlevel)
319
i |= DDMA_CFG_DED;
320
if (dtp->dev_intpolarity)
321
i |= DDMA_CFG_DP;
322
if ((stp->dev_flags & DEV_FLAGS_SYNC) ||
323
(dtp->dev_flags & DEV_FLAGS_SYNC))
324
i |= DDMA_CFG_SYNC;
325
cp->ddma_cfg = i;
326
au_sync();
327
328
/*
329
* Return a non-zero value that can be used to find the channel
330
* information in subsequent operations.
331
*/
332
return (u32)(&chan_tab_ptr[chan]);
333
}
334
335
/* Release devices */
336
stp->dev_flags &= ~DEV_FLAGS_INUSE;
337
dtp->dev_flags &= ~DEV_FLAGS_INUSE;
338
339
return 0;
340
}
341
EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc);
342
343
/*
344
* Set the device width if source or destination is a FIFO.
345
* Should be 8, 16, or 32 bits.
346
*/
347
u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits)
348
{
349
u32 rv;
350
chan_tab_t *ctp;
351
dbdev_tab_t *stp, *dtp;
352
353
ctp = *((chan_tab_t **)chanid);
354
stp = ctp->chan_src;
355
dtp = ctp->chan_dest;
356
rv = 0;
357
358
if (stp->dev_flags & DEV_FLAGS_IN) { /* Source in fifo */
359
rv = stp->dev_devwidth;
360
stp->dev_devwidth = bits;
361
}
362
if (dtp->dev_flags & DEV_FLAGS_OUT) { /* Destination out fifo */
363
rv = dtp->dev_devwidth;
364
dtp->dev_devwidth = bits;
365
}
366
367
return rv;
368
}
369
EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth);
370
371
/* Allocate a descriptor ring, initializing as much as possible. */
372
u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
373
{
374
int i;
375
u32 desc_base, srcid, destid;
376
u32 cmd0, cmd1, src1, dest1;
377
u32 src0, dest0;
378
chan_tab_t *ctp;
379
dbdev_tab_t *stp, *dtp;
380
au1x_ddma_desc_t *dp;
381
382
/*
383
* I guess we could check this to be within the
384
* range of the table......
385
*/
386
ctp = *((chan_tab_t **)chanid);
387
stp = ctp->chan_src;
388
dtp = ctp->chan_dest;
389
390
/*
391
* The descriptors must be 32-byte aligned. There is a
392
* possibility the allocation will give us such an address,
393
* and if we try that first we are likely to not waste larger
394
* slabs of memory.
395
*/
396
desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t),
397
GFP_KERNEL|GFP_DMA);
398
if (desc_base == 0)
399
return 0;
400
401
if (desc_base & 0x1f) {
402
/*
403
* Lost....do it again, allocate extra, and round
404
* the address base.
405
*/
406
kfree((const void *)desc_base);
407
i = entries * sizeof(au1x_ddma_desc_t);
408
i += (sizeof(au1x_ddma_desc_t) - 1);
409
desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA);
410
if (desc_base == 0)
411
return 0;
412
413
ctp->cdb_membase = desc_base;
414
desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t));
415
} else
416
ctp->cdb_membase = desc_base;
417
418
dp = (au1x_ddma_desc_t *)desc_base;
419
420
/* Keep track of the base descriptor. */
421
ctp->chan_desc_base = dp;
422
423
/* Initialize the rings with as much information as we know. */
424
srcid = stp->dev_id;
425
destid = dtp->dev_id;
426
427
cmd0 = cmd1 = src1 = dest1 = 0;
428
src0 = dest0 = 0;
429
430
cmd0 |= DSCR_CMD0_SID(srcid);
431
cmd0 |= DSCR_CMD0_DID(destid);
432
cmd0 |= DSCR_CMD0_IE | DSCR_CMD0_CV;
433
cmd0 |= DSCR_CMD0_ST(DSCR_CMD0_ST_NOCHANGE);
434
435
/* Is it mem to mem transfer? */
436
if (((DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_THROTTLE) ||
437
(DSCR_CUSTOM2DEV_ID(srcid) == DSCR_CMD0_ALWAYS)) &&
438
((DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_THROTTLE) ||
439
(DSCR_CUSTOM2DEV_ID(destid) == DSCR_CMD0_ALWAYS)))
440
cmd0 |= DSCR_CMD0_MEM;
441
442
switch (stp->dev_devwidth) {
443
case 8:
444
cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_BYTE);
445
break;
446
case 16:
447
cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_HALFWORD);
448
break;
449
case 32:
450
default:
451
cmd0 |= DSCR_CMD0_SW(DSCR_CMD0_WORD);
452
break;
453
}
454
455
switch (dtp->dev_devwidth) {
456
case 8:
457
cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_BYTE);
458
break;
459
case 16:
460
cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_HALFWORD);
461
break;
462
case 32:
463
default:
464
cmd0 |= DSCR_CMD0_DW(DSCR_CMD0_WORD);
465
break;
466
}
467
468
/*
469
* If the device is marked as an in/out FIFO, ensure it is
470
* set non-coherent.
471
*/
472
if (stp->dev_flags & DEV_FLAGS_IN)
473
cmd0 |= DSCR_CMD0_SN; /* Source in FIFO */
474
if (dtp->dev_flags & DEV_FLAGS_OUT)
475
cmd0 |= DSCR_CMD0_DN; /* Destination out FIFO */
476
477
/*
478
* Set up source1. For now, assume no stride and increment.
479
* A channel attribute update can change this later.
480
*/
481
switch (stp->dev_tsize) {
482
case 1:
483
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE1);
484
break;
485
case 2:
486
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE2);
487
break;
488
case 4:
489
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE4);
490
break;
491
case 8:
492
default:
493
src1 |= DSCR_SRC1_STS(DSCR_xTS_SIZE8);
494
break;
495
}
496
497
/* If source input is FIFO, set static address. */
498
if (stp->dev_flags & DEV_FLAGS_IN) {
499
if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
500
src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
501
else
502
src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC);
503
}
504
505
if (stp->dev_physaddr)
506
src0 = stp->dev_physaddr;
507
508
/*
509
* Set up dest1. For now, assume no stride and increment.
510
* A channel attribute update can change this later.
511
*/
512
switch (dtp->dev_tsize) {
513
case 1:
514
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE1);
515
break;
516
case 2:
517
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE2);
518
break;
519
case 4:
520
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE4);
521
break;
522
case 8:
523
default:
524
dest1 |= DSCR_DEST1_DTS(DSCR_xTS_SIZE8);
525
break;
526
}
527
528
/* If destination output is FIFO, set static address. */
529
if (dtp->dev_flags & DEV_FLAGS_OUT) {
530
if (dtp->dev_flags & DEV_FLAGS_BURSTABLE)
531
dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST);
532
else
533
dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC);
534
}
535
536
if (dtp->dev_physaddr)
537
dest0 = dtp->dev_physaddr;
538
539
#if 0
540
printk(KERN_DEBUG "did:%x sid:%x cmd0:%x cmd1:%x source0:%x "
541
"source1:%x dest0:%x dest1:%x\n",
542
dtp->dev_id, stp->dev_id, cmd0, cmd1, src0,
543
src1, dest0, dest1);
544
#endif
545
for (i = 0; i < entries; i++) {
546
dp->dscr_cmd0 = cmd0;
547
dp->dscr_cmd1 = cmd1;
548
dp->dscr_source0 = src0;
549
dp->dscr_source1 = src1;
550
dp->dscr_dest0 = dest0;
551
dp->dscr_dest1 = dest1;
552
dp->dscr_stat = 0;
553
dp->sw_context = 0;
554
dp->sw_status = 0;
555
dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1));
556
dp++;
557
}
558
559
/* Make last descrptor point to the first. */
560
dp--;
561
dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(ctp->chan_desc_base));
562
ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
563
564
return (u32)ctp->chan_desc_base;
565
}
566
EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
567
568
/*
569
* Put a source buffer into the DMA ring.
570
* This updates the source pointer and byte count. Normally used
571
* for memory to fifo transfers.
572
*/
573
u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
574
{
575
chan_tab_t *ctp;
576
au1x_ddma_desc_t *dp;
577
578
/*
579
* I guess we could check this to be within the
580
* range of the table......
581
*/
582
ctp = *(chan_tab_t **)chanid;
583
584
/*
585
* We should have multiple callers for a particular channel,
586
* an interrupt doesn't affect this pointer nor the descriptor,
587
* so no locking should be needed.
588
*/
589
dp = ctp->put_ptr;
590
591
/*
592
* If the descriptor is valid, we are way ahead of the DMA
593
* engine, so just return an error condition.
594
*/
595
if (dp->dscr_cmd0 & DSCR_CMD0_V)
596
return 0;
597
598
/* Load up buffer address and byte count. */
599
dp->dscr_source0 = buf & ~0UL;
600
dp->dscr_cmd1 = nbytes;
601
/* Check flags */
602
if (flags & DDMA_FLAGS_IE)
603
dp->dscr_cmd0 |= DSCR_CMD0_IE;
604
if (flags & DDMA_FLAGS_NOIE)
605
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
606
607
/*
608
* There is an errata on the Au1200/Au1550 parts that could result
609
* in "stale" data being DMA'ed. It has to do with the snoop logic on
610
* the cache eviction buffer. DMA_NONCOHERENT is on by default for
611
* these parts. If it is fixed in the future, these dma_cache_inv will
612
* just be nothing more than empty macros. See io.h.
613
*/
614
dma_cache_wback_inv((unsigned long)buf, nbytes);
615
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
616
au_sync();
617
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
618
ctp->chan_ptr->ddma_dbell = 0;
619
620
/* Get next descriptor pointer. */
621
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
622
623
/* Return something non-zero. */
624
return nbytes;
625
}
626
EXPORT_SYMBOL(au1xxx_dbdma_put_source);
627
628
/* Put a destination buffer into the DMA ring.
629
* This updates the destination pointer and byte count. Normally used
630
* to place an empty buffer into the ring for fifo to memory transfers.
631
*/
632
u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
633
{
634
chan_tab_t *ctp;
635
au1x_ddma_desc_t *dp;
636
637
/* I guess we could check this to be within the
638
* range of the table......
639
*/
640
ctp = *((chan_tab_t **)chanid);
641
642
/* We should have multiple callers for a particular channel,
643
* an interrupt doesn't affect this pointer nor the descriptor,
644
* so no locking should be needed.
645
*/
646
dp = ctp->put_ptr;
647
648
/* If the descriptor is valid, we are way ahead of the DMA
649
* engine, so just return an error condition.
650
*/
651
if (dp->dscr_cmd0 & DSCR_CMD0_V)
652
return 0;
653
654
/* Load up buffer address and byte count */
655
656
/* Check flags */
657
if (flags & DDMA_FLAGS_IE)
658
dp->dscr_cmd0 |= DSCR_CMD0_IE;
659
if (flags & DDMA_FLAGS_NOIE)
660
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
661
662
dp->dscr_dest0 = buf & ~0UL;
663
dp->dscr_cmd1 = nbytes;
664
#if 0
665
printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
666
dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0,
667
dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
668
#endif
669
/*
670
* There is an errata on the Au1200/Au1550 parts that could result in
671
* "stale" data being DMA'ed. It has to do with the snoop logic on the
672
* cache eviction buffer. DMA_NONCOHERENT is on by default for these
673
* parts. If it is fixed in the future, these dma_cache_inv will just
674
* be nothing more than empty macros. See io.h.
675
*/
676
dma_cache_inv((unsigned long)buf, nbytes);
677
dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */
678
au_sync();
679
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
680
ctp->chan_ptr->ddma_dbell = 0;
681
682
/* Get next descriptor pointer. */
683
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
684
685
/* Return something non-zero. */
686
return nbytes;
687
}
688
EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
689
690
/*
691
* Get a destination buffer into the DMA ring.
692
* Normally used to get a full buffer from the ring during fifo
693
* to memory transfers. This does not set the valid bit, you will
694
* have to put another destination buffer to keep the DMA going.
695
*/
696
u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
697
{
698
chan_tab_t *ctp;
699
au1x_ddma_desc_t *dp;
700
u32 rv;
701
702
/*
703
* I guess we could check this to be within the
704
* range of the table......
705
*/
706
ctp = *((chan_tab_t **)chanid);
707
708
/*
709
* We should have multiple callers for a particular channel,
710
* an interrupt doesn't affect this pointer nor the descriptor,
711
* so no locking should be needed.
712
*/
713
dp = ctp->get_ptr;
714
715
/*
716
* If the descriptor is valid, we are way ahead of the DMA
717
* engine, so just return an error condition.
718
*/
719
if (dp->dscr_cmd0 & DSCR_CMD0_V)
720
return 0;
721
722
/* Return buffer address and byte count. */
723
*buf = (void *)(phys_to_virt(dp->dscr_dest0));
724
*nbytes = dp->dscr_cmd1;
725
rv = dp->dscr_stat;
726
727
/* Get next descriptor pointer. */
728
ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
729
730
/* Return something non-zero. */
731
return rv;
732
}
733
EXPORT_SYMBOL_GPL(au1xxx_dbdma_get_dest);
734
735
void au1xxx_dbdma_stop(u32 chanid)
736
{
737
chan_tab_t *ctp;
738
au1x_dma_chan_t *cp;
739
int halt_timeout = 0;
740
741
ctp = *((chan_tab_t **)chanid);
742
743
cp = ctp->chan_ptr;
744
cp->ddma_cfg &= ~DDMA_CFG_EN; /* Disable channel */
745
au_sync();
746
while (!(cp->ddma_stat & DDMA_STAT_H)) {
747
udelay(1);
748
halt_timeout++;
749
if (halt_timeout > 100) {
750
printk(KERN_WARNING "warning: DMA channel won't halt\n");
751
break;
752
}
753
}
754
/* clear current desc valid and doorbell */
755
cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V);
756
au_sync();
757
}
758
EXPORT_SYMBOL(au1xxx_dbdma_stop);
759
760
/*
761
* Start using the current descriptor pointer. If the DBDMA encounters
762
* a non-valid descriptor, it will stop. In this case, we can just
763
* continue by adding a buffer to the list and starting again.
764
*/
765
void au1xxx_dbdma_start(u32 chanid)
766
{
767
chan_tab_t *ctp;
768
au1x_dma_chan_t *cp;
769
770
ctp = *((chan_tab_t **)chanid);
771
cp = ctp->chan_ptr;
772
cp->ddma_desptr = virt_to_phys(ctp->cur_ptr);
773
cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */
774
au_sync();
775
cp->ddma_dbell = 0;
776
au_sync();
777
}
778
EXPORT_SYMBOL(au1xxx_dbdma_start);
779
780
void au1xxx_dbdma_reset(u32 chanid)
781
{
782
chan_tab_t *ctp;
783
au1x_ddma_desc_t *dp;
784
785
au1xxx_dbdma_stop(chanid);
786
787
ctp = *((chan_tab_t **)chanid);
788
ctp->get_ptr = ctp->put_ptr = ctp->cur_ptr = ctp->chan_desc_base;
789
790
/* Run through the descriptors and reset the valid indicator. */
791
dp = ctp->chan_desc_base;
792
793
do {
794
dp->dscr_cmd0 &= ~DSCR_CMD0_V;
795
/*
796
* Reset our software status -- this is used to determine
797
* if a descriptor is in use by upper level software. Since
798
* posting can reset 'V' bit.
799
*/
800
dp->sw_status = 0;
801
dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
802
} while (dp != ctp->chan_desc_base);
803
}
804
EXPORT_SYMBOL(au1xxx_dbdma_reset);
805
806
u32 au1xxx_get_dma_residue(u32 chanid)
807
{
808
chan_tab_t *ctp;
809
au1x_dma_chan_t *cp;
810
u32 rv;
811
812
ctp = *((chan_tab_t **)chanid);
813
cp = ctp->chan_ptr;
814
815
/* This is only valid if the channel is stopped. */
816
rv = cp->ddma_bytecnt;
817
au_sync();
818
819
return rv;
820
}
821
EXPORT_SYMBOL_GPL(au1xxx_get_dma_residue);
822
823
void au1xxx_dbdma_chan_free(u32 chanid)
824
{
825
chan_tab_t *ctp;
826
dbdev_tab_t *stp, *dtp;
827
828
ctp = *((chan_tab_t **)chanid);
829
stp = ctp->chan_src;
830
dtp = ctp->chan_dest;
831
832
au1xxx_dbdma_stop(chanid);
833
834
kfree((void *)ctp->cdb_membase);
835
836
stp->dev_flags &= ~DEV_FLAGS_INUSE;
837
dtp->dev_flags &= ~DEV_FLAGS_INUSE;
838
chan_tab_ptr[ctp->chan_index] = NULL;
839
840
kfree(ctp);
841
}
842
EXPORT_SYMBOL(au1xxx_dbdma_chan_free);
843
844
static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
845
{
846
u32 intstat;
847
u32 chan_index;
848
chan_tab_t *ctp;
849
au1x_ddma_desc_t *dp;
850
au1x_dma_chan_t *cp;
851
852
intstat = dbdma_gptr->ddma_intstat;
853
au_sync();
854
chan_index = __ffs(intstat);
855
856
ctp = chan_tab_ptr[chan_index];
857
cp = ctp->chan_ptr;
858
dp = ctp->cur_ptr;
859
860
/* Reset interrupt. */
861
cp->ddma_irq = 0;
862
au_sync();
863
864
if (ctp->chan_callback)
865
ctp->chan_callback(irq, ctp->chan_callparam);
866
867
ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
868
return IRQ_RETVAL(1);
869
}
870
871
void au1xxx_dbdma_dump(u32 chanid)
872
{
873
chan_tab_t *ctp;
874
au1x_ddma_desc_t *dp;
875
dbdev_tab_t *stp, *dtp;
876
au1x_dma_chan_t *cp;
877
u32 i = 0;
878
879
ctp = *((chan_tab_t **)chanid);
880
stp = ctp->chan_src;
881
dtp = ctp->chan_dest;
882
cp = ctp->chan_ptr;
883
884
printk(KERN_DEBUG "Chan %x, stp %x (dev %d) dtp %x (dev %d)\n",
885
(u32)ctp, (u32)stp, stp - dbdev_tab, (u32)dtp,
886
dtp - dbdev_tab);
887
printk(KERN_DEBUG "desc base %x, get %x, put %x, cur %x\n",
888
(u32)(ctp->chan_desc_base), (u32)(ctp->get_ptr),
889
(u32)(ctp->put_ptr), (u32)(ctp->cur_ptr));
890
891
printk(KERN_DEBUG "dbdma chan %x\n", (u32)cp);
892
printk(KERN_DEBUG "cfg %08x, desptr %08x, statptr %08x\n",
893
cp->ddma_cfg, cp->ddma_desptr, cp->ddma_statptr);
894
printk(KERN_DEBUG "dbell %08x, irq %08x, stat %08x, bytecnt %08x\n",
895
cp->ddma_dbell, cp->ddma_irq, cp->ddma_stat,
896
cp->ddma_bytecnt);
897
898
/* Run through the descriptors */
899
dp = ctp->chan_desc_base;
900
901
do {
902
printk(KERN_DEBUG "Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n",
903
i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1);
904
printk(KERN_DEBUG "src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n",
905
dp->dscr_source0, dp->dscr_source1,
906
dp->dscr_dest0, dp->dscr_dest1);
907
printk(KERN_DEBUG "stat %08x, nxtptr %08x\n",
908
dp->dscr_stat, dp->dscr_nxtptr);
909
dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
910
} while (dp != ctp->chan_desc_base);
911
}
912
913
/* Put a descriptor into the DMA ring.
914
* This updates the source/destination pointers and byte count.
915
*/
916
u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
917
{
918
chan_tab_t *ctp;
919
au1x_ddma_desc_t *dp;
920
u32 nbytes = 0;
921
922
/*
923
* I guess we could check this to be within the
924
* range of the table......
925
*/
926
ctp = *((chan_tab_t **)chanid);
927
928
/*
929
* We should have multiple callers for a particular channel,
930
* an interrupt doesn't affect this pointer nor the descriptor,
931
* so no locking should be needed.
932
*/
933
dp = ctp->put_ptr;
934
935
/*
936
* If the descriptor is valid, we are way ahead of the DMA
937
* engine, so just return an error condition.
938
*/
939
if (dp->dscr_cmd0 & DSCR_CMD0_V)
940
return 0;
941
942
/* Load up buffer addresses and byte count. */
943
dp->dscr_dest0 = dscr->dscr_dest0;
944
dp->dscr_source0 = dscr->dscr_source0;
945
dp->dscr_dest1 = dscr->dscr_dest1;
946
dp->dscr_source1 = dscr->dscr_source1;
947
dp->dscr_cmd1 = dscr->dscr_cmd1;
948
nbytes = dscr->dscr_cmd1;
949
/* Allow the caller to specifiy if an interrupt is generated */
950
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
951
dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
952
ctp->chan_ptr->ddma_dbell = 0;
953
954
/* Get next descriptor pointer. */
955
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
956
957
/* Return something non-zero. */
958
return nbytes;
959
}
960
961
962
static unsigned long alchemy_dbdma_pm_data[NUM_DBDMA_CHANS + 1][6];
963
964
static int alchemy_dbdma_suspend(void)
965
{
966
int i;
967
void __iomem *addr;
968
969
addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
970
alchemy_dbdma_pm_data[0][0] = __raw_readl(addr + 0x00);
971
alchemy_dbdma_pm_data[0][1] = __raw_readl(addr + 0x04);
972
alchemy_dbdma_pm_data[0][2] = __raw_readl(addr + 0x08);
973
alchemy_dbdma_pm_data[0][3] = __raw_readl(addr + 0x0c);
974
975
/* save channel configurations */
976
addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
977
for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
978
alchemy_dbdma_pm_data[i][0] = __raw_readl(addr + 0x00);
979
alchemy_dbdma_pm_data[i][1] = __raw_readl(addr + 0x04);
980
alchemy_dbdma_pm_data[i][2] = __raw_readl(addr + 0x08);
981
alchemy_dbdma_pm_data[i][3] = __raw_readl(addr + 0x0c);
982
alchemy_dbdma_pm_data[i][4] = __raw_readl(addr + 0x10);
983
alchemy_dbdma_pm_data[i][5] = __raw_readl(addr + 0x14);
984
985
/* halt channel */
986
__raw_writel(alchemy_dbdma_pm_data[i][0] & ~1, addr + 0x00);
987
wmb();
988
while (!(__raw_readl(addr + 0x14) & 1))
989
wmb();
990
991
addr += 0x100; /* next channel base */
992
}
993
/* disable channel interrupts */
994
addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
995
__raw_writel(0, addr + 0x0c);
996
wmb();
997
998
return 0;
999
}
1000
1001
static void alchemy_dbdma_resume(void)
1002
{
1003
int i;
1004
void __iomem *addr;
1005
1006
addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR);
1007
__raw_writel(alchemy_dbdma_pm_data[0][0], addr + 0x00);
1008
__raw_writel(alchemy_dbdma_pm_data[0][1], addr + 0x04);
1009
__raw_writel(alchemy_dbdma_pm_data[0][2], addr + 0x08);
1010
__raw_writel(alchemy_dbdma_pm_data[0][3], addr + 0x0c);
1011
1012
/* restore channel configurations */
1013
addr = (void __iomem *)KSEG1ADDR(AU1550_DBDMA_PHYS_ADDR);
1014
for (i = 1; i <= NUM_DBDMA_CHANS; i++) {
1015
__raw_writel(alchemy_dbdma_pm_data[i][0], addr + 0x00);
1016
__raw_writel(alchemy_dbdma_pm_data[i][1], addr + 0x04);
1017
__raw_writel(alchemy_dbdma_pm_data[i][2], addr + 0x08);
1018
__raw_writel(alchemy_dbdma_pm_data[i][3], addr + 0x0c);
1019
__raw_writel(alchemy_dbdma_pm_data[i][4], addr + 0x10);
1020
__raw_writel(alchemy_dbdma_pm_data[i][5], addr + 0x14);
1021
wmb();
1022
addr += 0x100; /* next channel base */
1023
}
1024
}
1025
1026
static struct syscore_ops alchemy_dbdma_syscore_ops = {
1027
.suspend = alchemy_dbdma_suspend,
1028
.resume = alchemy_dbdma_resume,
1029
};
1030
1031
static int __init au1xxx_dbdma_init(void)
1032
{
1033
int irq_nr, ret;
1034
1035
dbdma_gptr->ddma_config = 0;
1036
dbdma_gptr->ddma_throttle = 0;
1037
dbdma_gptr->ddma_inten = 0xffff;
1038
au_sync();
1039
1040
switch (alchemy_get_cputype()) {
1041
case ALCHEMY_CPU_AU1550:
1042
irq_nr = AU1550_DDMA_INT;
1043
break;
1044
case ALCHEMY_CPU_AU1200:
1045
irq_nr = AU1200_DDMA_INT;
1046
break;
1047
default:
1048
return -ENODEV;
1049
}
1050
1051
ret = request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED,
1052
"Au1xxx dbdma", (void *)dbdma_gptr);
1053
if (ret)
1054
printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
1055
else {
1056
dbdma_initialized = 1;
1057
printk(KERN_INFO "Alchemy DBDMA initialized\n");
1058
register_syscore_ops(&alchemy_dbdma_syscore_ops);
1059
}
1060
1061
return ret;
1062
}
1063
subsys_initcall(au1xxx_dbdma_init);
1064
1065
#endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */
1066
1067