Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm/ti/ti_sdma.c
39481 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2011
5
* Ben Gray <[email protected]>.
6
* All rights reserved.
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions and the following disclaimer.
13
* 2. Redistributions in binary form must reproduce the above copyright
14
* notice, this list of conditions and the following disclaimer in the
15
* documentation and/or other materials provided with the distribution.
16
*
17
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
* SUCH DAMAGE.
28
*/
29
30
#include <sys/param.h>
31
#include <sys/systm.h>
32
#include <sys/bus.h>
33
#include <sys/kernel.h>
34
#include <sys/lock.h>
35
#include <sys/interrupt.h>
36
#include <sys/module.h>
37
#include <sys/malloc.h>
38
#include <sys/mutex.h>
39
#include <sys/rman.h>
40
#include <sys/queue.h>
41
#include <sys/taskqueue.h>
42
#include <sys/timetc.h>
43
#include <machine/bus.h>
44
#include <machine/intr.h>
45
46
#include <dev/ofw/openfirm.h>
47
#include <dev/ofw/ofw_bus.h>
48
#include <dev/ofw/ofw_bus_subr.h>
49
50
#include <arm/ti/ti_cpuid.h>
51
#include <arm/ti/ti_sysc.h>
52
#include <arm/ti/ti_sdma.h>
53
#include <arm/ti/ti_sdmareg.h>
54
55
/**
56
* Kernel functions for using the DMA controller
57
*
58
*
59
* DMA TRANSFERS:
60
* A DMA transfer block consists of a number of frames (FN). Each frame
61
* consists of a number of elements, and each element can have a size of 8, 16,
62
* or 32 bits.
63
*
64
* OMAP44xx and newer chips support linked list (aka scatter gather) transfers,
65
* where a linked list of source/destination pairs can be placed in memory
66
* for the H/W to process. Earlier chips only allowed you to chain multiple
67
* channels together. However currently this linked list feature is not
68
* supported by the driver.
69
*
70
*/
71
72
/**
73
* Data structure per DMA channel.
74
*
75
*
76
*/
77
struct ti_sdma_channel {
78
/*
79
* The configuration registers for the given channel, these are modified
80
* by the set functions and only written to the actual registers when a
81
* transaction is started.
82
*/
83
uint32_t reg_csdp;
84
uint32_t reg_ccr;
85
uint32_t reg_cicr;
86
87
/* Set when one of the configuration registers above change */
88
uint32_t need_reg_write;
89
90
/* Callback function used when an interrupt is tripped on the given channel */
91
void (*callback)(unsigned int ch, uint32_t ch_status, void *data);
92
93
/* Callback data passed in the callback ... duh */
94
void* callback_data;
95
96
};
97
98
/**
99
* DMA driver context, allocated and stored globally, this driver is not
100
* intetned to ever be unloaded (see ti_sdma_sc).
101
*
102
*/
103
struct ti_sdma_softc {
104
device_t sc_dev;
105
struct resource* sc_irq_res;
106
struct resource* sc_mem_res;
107
108
/*
109
* I guess in theory we should have a mutex per DMA channel for register
110
* modifications. But since we know we are never going to be run on a SMP
111
* system, we can use just the single lock for all channels.
112
*/
113
struct mtx sc_mtx;
114
115
/* Stores the H/W revision read from the registers */
116
uint32_t sc_hw_rev;
117
118
/*
119
* Bits in the sc_active_channels data field indicate if the channel has
120
* been activated.
121
*/
122
uint32_t sc_active_channels;
123
124
struct ti_sdma_channel sc_channel[NUM_DMA_CHANNELS];
125
126
};
127
128
static struct ti_sdma_softc *ti_sdma_sc = NULL;
129
130
/**
131
* Macros for driver mutex locking
132
*/
133
#define TI_SDMA_LOCK(_sc) mtx_lock_spin(&(_sc)->sc_mtx)
134
#define TI_SDMA_UNLOCK(_sc) mtx_unlock_spin(&(_sc)->sc_mtx)
135
#define TI_SDMA_LOCK_INIT(_sc) \
136
mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
137
"ti_sdma", MTX_SPIN)
138
#define TI_SDMA_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
139
#define TI_SDMA_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
140
#define TI_SDMA_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
141
142
/**
143
* Function prototypes
144
*
145
*/
146
static void ti_sdma_intr(void *);
147
148
/**
149
* ti_sdma_read_4 - reads a 32-bit value from one of the DMA registers
150
* @sc: DMA device context
151
* @off: The offset of a register from the DMA register address range
152
*
153
*
154
* RETURNS:
155
* 32-bit value read from the register.
156
*/
157
static inline uint32_t
158
ti_sdma_read_4(struct ti_sdma_softc *sc, bus_size_t off)
159
{
160
return bus_read_4(sc->sc_mem_res, off);
161
}
162
163
/**
164
* ti_sdma_write_4 - writes a 32-bit value to one of the DMA registers
165
* @sc: DMA device context
166
* @off: The offset of a register from the DMA register address range
167
*
168
*
169
* RETURNS:
170
* 32-bit value read from the register.
171
*/
172
static inline void
173
ti_sdma_write_4(struct ti_sdma_softc *sc, bus_size_t off, uint32_t val)
174
{
175
bus_write_4(sc->sc_mem_res, off, val);
176
}
177
178
/**
179
* ti_sdma_is_omap3_rev - returns true if H/W is from OMAP3 series
180
* @sc: DMA device context
181
*
182
*/
183
static inline int
184
ti_sdma_is_omap3_rev(struct ti_sdma_softc *sc)
185
{
186
return (sc->sc_hw_rev == DMA4_OMAP3_REV);
187
}
188
189
/**
190
* ti_sdma_is_omap4_rev - returns true if H/W is from OMAP4 series
191
* @sc: DMA device context
192
*
193
*/
194
static inline int
195
ti_sdma_is_omap4_rev(struct ti_sdma_softc *sc)
196
{
197
return (sc->sc_hw_rev == DMA4_OMAP4_REV);
198
}
199
200
/**
201
* ti_sdma_intr - interrupt handler for all 4 DMA IRQs
202
* @arg: ignored
203
*
204
* Called when any of the four DMA IRQs are triggered.
205
*
206
* LOCKING:
207
* DMA registers protected by internal mutex
208
*
209
* RETURNS:
210
* nothing
211
*/
212
static void
213
ti_sdma_intr(void *arg)
214
{
215
struct ti_sdma_softc *sc = ti_sdma_sc;
216
uint32_t intr;
217
uint32_t csr;
218
unsigned int ch, j;
219
struct ti_sdma_channel* channel;
220
221
TI_SDMA_LOCK(sc);
222
223
for (j = 0; j < NUM_DMA_IRQS; j++) {
224
/* Get the flag interrupts (enabled) */
225
intr = ti_sdma_read_4(sc, DMA4_IRQSTATUS_L(j));
226
intr &= ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
227
if (intr == 0x00000000)
228
continue;
229
230
/* Loop through checking the status bits */
231
for (ch = 0; ch < NUM_DMA_CHANNELS; ch++) {
232
if (intr & (1 << ch)) {
233
channel = &sc->sc_channel[ch];
234
235
/* Read the CSR regsiter and verify we don't have a spurious IRQ */
236
csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
237
if (csr == 0) {
238
device_printf(sc->sc_dev, "Spurious DMA IRQ for channel "
239
"%d\n", ch);
240
continue;
241
}
242
243
/* Sanity check this channel is active */
244
if ((sc->sc_active_channels & (1 << ch)) == 0) {
245
device_printf(sc->sc_dev, "IRQ %d for a non-activated "
246
"channel %d\n", j, ch);
247
continue;
248
}
249
250
/* Check the status error codes */
251
if (csr & DMA4_CSR_DROP)
252
device_printf(sc->sc_dev, "Synchronization event drop "
253
"occurred during the transfer on channel %u\n",
254
ch);
255
if (csr & DMA4_CSR_SECURE_ERR)
256
device_printf(sc->sc_dev, "Secure transaction error event "
257
"on channel %u\n", ch);
258
if (csr & DMA4_CSR_MISALIGNED_ADRS_ERR)
259
device_printf(sc->sc_dev, "Misaligned address error event "
260
"on channel %u\n", ch);
261
if (csr & DMA4_CSR_TRANS_ERR) {
262
device_printf(sc->sc_dev, "Transaction error event on "
263
"channel %u\n", ch);
264
/*
265
* Apparently according to linux code, there is an errata
266
* that says the channel is not disabled upon this error.
267
* They explicitly disable the channel here .. since I
268
* haven't seen the errata, I'm going to ignore for now.
269
*/
270
}
271
272
/* Clear the status flags for the IRQ */
273
ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
274
ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
275
276
/* Call the callback for the given channel */
277
if (channel->callback)
278
channel->callback(ch, csr, channel->callback_data);
279
}
280
}
281
}
282
283
TI_SDMA_UNLOCK(sc);
284
285
return;
286
}
287
288
/**
289
* ti_sdma_activate_channel - activates a DMA channel
290
* @ch: upon return contains the channel allocated
291
* @callback: a callback function to associate with the channel
292
* @data: optional data supplied when the callback is called
293
*
294
* Simply activates a channel be enabling and writing default values to the
295
* channel's register set. It doesn't start a transaction, just populates the
296
* internal data structures and sets defaults.
297
*
298
* Note this function doesn't enable interrupts, for that you need to call
299
* ti_sdma_enable_channel_irq(). If not using IRQ to detect the end of the
300
* transfer, you can use ti_sdma_status_poll() to detect a change in the
301
* status.
302
*
303
* A channel must be activated before any of the other DMA functions can be
304
* called on it.
305
*
306
* LOCKING:
307
* DMA registers protected by internal mutex
308
*
309
* RETURNS:
310
* 0 on success, otherwise an error code
311
*/
312
int
313
ti_sdma_activate_channel(unsigned int *ch,
314
void (*callback)(unsigned int ch, uint32_t status, void *data),
315
void *data)
316
{
317
struct ti_sdma_softc *sc = ti_sdma_sc;
318
struct ti_sdma_channel *channel = NULL;
319
uint32_t addr;
320
unsigned int i;
321
322
/* Sanity check */
323
if (sc == NULL)
324
return (ENOMEM);
325
326
if (ch == NULL)
327
return (EINVAL);
328
329
TI_SDMA_LOCK(sc);
330
331
/* Check to see if all channels are in use */
332
if (sc->sc_active_channels == 0xffffffff) {
333
TI_SDMA_UNLOCK(sc);
334
return (ENOMEM);
335
}
336
337
/* Find the first non-active channel */
338
for (i = 0; i < NUM_DMA_CHANNELS; i++) {
339
if (!(sc->sc_active_channels & (0x1 << i))) {
340
sc->sc_active_channels |= (0x1 << i);
341
*ch = i;
342
break;
343
}
344
}
345
346
/* Get the channel struct and populate the fields */
347
channel = &sc->sc_channel[*ch];
348
349
channel->callback = callback;
350
channel->callback_data = data;
351
352
channel->need_reg_write = 1;
353
354
/* Set the default configuration for the DMA channel */
355
channel->reg_csdp = DMA4_CSDP_DATA_TYPE(0x2)
356
| DMA4_CSDP_SRC_BURST_MODE(0)
357
| DMA4_CSDP_DST_BURST_MODE(0)
358
| DMA4_CSDP_SRC_ENDIANISM(0)
359
| DMA4_CSDP_DST_ENDIANISM(0)
360
| DMA4_CSDP_WRITE_MODE(0)
361
| DMA4_CSDP_SRC_PACKED(0)
362
| DMA4_CSDP_DST_PACKED(0);
363
364
channel->reg_ccr = DMA4_CCR_DST_ADDRESS_MODE(1)
365
| DMA4_CCR_SRC_ADDRESS_MODE(1)
366
| DMA4_CCR_READ_PRIORITY(0)
367
| DMA4_CCR_WRITE_PRIORITY(0)
368
| DMA4_CCR_SYNC_TRIGGER(0)
369
| DMA4_CCR_FRAME_SYNC(0)
370
| DMA4_CCR_BLOCK_SYNC(0);
371
372
channel->reg_cicr = DMA4_CICR_TRANS_ERR_IE
373
| DMA4_CICR_SECURE_ERR_IE
374
| DMA4_CICR_SUPERVISOR_ERR_IE
375
| DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
376
377
/* Clear all the channel registers, this should abort any transaction */
378
for (addr = DMA4_CCR(*ch); addr <= DMA4_COLOR(*ch); addr += 4)
379
ti_sdma_write_4(sc, addr, 0x00000000);
380
381
TI_SDMA_UNLOCK(sc);
382
383
return 0;
384
}
385
386
/**
387
* ti_sdma_deactivate_channel - deactivates a channel
388
* @ch: the channel to deactivate
389
*
390
*
391
*
392
* LOCKING:
393
* DMA registers protected by internal mutex
394
*
395
* RETURNS:
396
* EH_HANDLED or EH_NOT_HANDLED
397
*/
398
int
399
ti_sdma_deactivate_channel(unsigned int ch)
400
{
401
struct ti_sdma_softc *sc = ti_sdma_sc;
402
unsigned int j;
403
unsigned int addr;
404
405
/* Sanity check */
406
if (sc == NULL)
407
return (ENOMEM);
408
409
TI_SDMA_LOCK(sc);
410
411
/* First check if the channel is currently active */
412
if ((sc->sc_active_channels & (1 << ch)) == 0) {
413
TI_SDMA_UNLOCK(sc);
414
return (EBUSY);
415
}
416
417
/* Mark the channel as inactive */
418
sc->sc_active_channels &= ~(1 << ch);
419
420
/* Disable all DMA interrupts for the channel. */
421
ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
422
423
/* Make sure the DMA transfer is stopped. */
424
ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
425
426
/* Clear the CSR register and IRQ status register */
427
ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
428
for (j = 0; j < NUM_DMA_IRQS; j++) {
429
ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
430
}
431
432
/* Clear all the channel registers, this should abort any transaction */
433
for (addr = DMA4_CCR(ch); addr <= DMA4_COLOR(ch); addr += 4)
434
ti_sdma_write_4(sc, addr, 0x00000000);
435
436
TI_SDMA_UNLOCK(sc);
437
438
return 0;
439
}
440
441
/**
442
* ti_sdma_disable_channel_irq - disables IRQ's on the given channel
443
* @ch: the channel to disable IRQ's on
444
*
445
* Disable interrupt generation for the given channel.
446
*
447
* LOCKING:
448
* DMA registers protected by internal mutex
449
*
450
* RETURNS:
451
* EH_HANDLED or EH_NOT_HANDLED
452
*/
453
int
454
ti_sdma_disable_channel_irq(unsigned int ch)
455
{
456
struct ti_sdma_softc *sc = ti_sdma_sc;
457
uint32_t irq_enable;
458
unsigned int j;
459
460
/* Sanity check */
461
if (sc == NULL)
462
return (ENOMEM);
463
464
TI_SDMA_LOCK(sc);
465
466
if ((sc->sc_active_channels & (1 << ch)) == 0) {
467
TI_SDMA_UNLOCK(sc);
468
return (EINVAL);
469
}
470
471
/* Disable all the individual error conditions */
472
sc->sc_channel[ch].reg_cicr = 0x0000;
473
ti_sdma_write_4(sc, DMA4_CICR(ch), 0x0000);
474
475
/* Disable the channel interrupt enable */
476
for (j = 0; j < NUM_DMA_IRQS; j++) {
477
irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
478
irq_enable &= ~(1 << ch);
479
480
ti_sdma_write_4(sc, DMA4_IRQENABLE_L(j), irq_enable);
481
}
482
483
/* Indicate the registers need to be rewritten on the next transaction */
484
sc->sc_channel[ch].need_reg_write = 1;
485
486
TI_SDMA_UNLOCK(sc);
487
488
return (0);
489
}
490
491
/**
492
* ti_sdma_disable_channel_irq - enables IRQ's on the given channel
493
* @ch: the channel to enable IRQ's on
494
* @flags: bitmask of interrupt types to enable
495
*
496
* Flags can be a bitmask of the following options:
497
* DMA_IRQ_FLAG_DROP
498
* DMA_IRQ_FLAG_HALF_FRAME_COMPL
499
* DMA_IRQ_FLAG_FRAME_COMPL
500
* DMA_IRQ_FLAG_START_LAST_FRAME
501
* DMA_IRQ_FLAG_BLOCK_COMPL
502
* DMA_IRQ_FLAG_ENDOF_PKT
503
* DMA_IRQ_FLAG_DRAIN
504
*
505
*
506
* LOCKING:
507
* DMA registers protected by internal mutex
508
*
509
* RETURNS:
510
* EH_HANDLED or EH_NOT_HANDLED
511
*/
512
int
513
ti_sdma_enable_channel_irq(unsigned int ch, uint32_t flags)
514
{
515
struct ti_sdma_softc *sc = ti_sdma_sc;
516
uint32_t irq_enable;
517
518
/* Sanity check */
519
if (sc == NULL)
520
return (ENOMEM);
521
522
TI_SDMA_LOCK(sc);
523
524
if ((sc->sc_active_channels & (1 << ch)) == 0) {
525
TI_SDMA_UNLOCK(sc);
526
return (EINVAL);
527
}
528
529
/* Always enable the error interrupts if we have interrupts enabled */
530
flags |= DMA4_CICR_TRANS_ERR_IE | DMA4_CICR_SECURE_ERR_IE |
531
DMA4_CICR_SUPERVISOR_ERR_IE | DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
532
533
sc->sc_channel[ch].reg_cicr = flags;
534
535
/* Write the values to the register */
536
ti_sdma_write_4(sc, DMA4_CICR(ch), flags);
537
538
/* Enable the channel interrupt enable */
539
irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(0));
540
irq_enable |= (1 << ch);
541
542
ti_sdma_write_4(sc, DMA4_IRQENABLE_L(0), irq_enable);
543
544
/* Indicate the registers need to be rewritten on the next transaction */
545
sc->sc_channel[ch].need_reg_write = 1;
546
547
TI_SDMA_UNLOCK(sc);
548
549
return (0);
550
}
551
552
/**
553
* ti_sdma_get_channel_status - returns the status of a given channel
554
* @ch: the channel number to get the status of
555
* @status: upon return will contain the status bitmask, see below for possible
556
* values.
557
*
558
* DMA_STATUS_DROP
559
* DMA_STATUS_HALF
560
* DMA_STATUS_FRAME
561
* DMA_STATUS_LAST
562
* DMA_STATUS_BLOCK
563
* DMA_STATUS_SYNC
564
* DMA_STATUS_PKT
565
* DMA_STATUS_TRANS_ERR
566
* DMA_STATUS_SECURE_ERR
567
* DMA_STATUS_SUPERVISOR_ERR
568
* DMA_STATUS_MISALIGNED_ADRS_ERR
569
* DMA_STATUS_DRAIN_END
570
*
571
*
572
* LOCKING:
573
* DMA registers protected by internal mutex
574
*
575
* RETURNS:
576
* EH_HANDLED or EH_NOT_HANDLED
577
*/
578
int
579
ti_sdma_get_channel_status(unsigned int ch, uint32_t *status)
580
{
581
struct ti_sdma_softc *sc = ti_sdma_sc;
582
uint32_t csr;
583
584
/* Sanity check */
585
if (sc == NULL)
586
return (ENOMEM);
587
588
TI_SDMA_LOCK(sc);
589
590
if ((sc->sc_active_channels & (1 << ch)) == 0) {
591
TI_SDMA_UNLOCK(sc);
592
return (EINVAL);
593
}
594
595
TI_SDMA_UNLOCK(sc);
596
597
csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
598
599
if (status != NULL)
600
*status = csr;
601
602
return (0);
603
}
604
605
/**
606
* ti_sdma_start_xfer - starts a DMA transfer
607
* @ch: the channel number to set the endianness of
608
* @src_paddr: the source phsyical address
609
* @dst_paddr: the destination phsyical address
610
* @frmcnt: the number of frames per block
611
* @elmcnt: the number of elements in a frame, an element is either an 8, 16
612
* or 32-bit value as defined by ti_sdma_set_xfer_burst()
613
*
614
*
615
* LOCKING:
616
* DMA registers protected by internal mutex
617
*
618
* RETURNS:
619
* EH_HANDLED or EH_NOT_HANDLED
620
*/
621
int
622
ti_sdma_start_xfer(unsigned int ch, unsigned int src_paddr,
623
unsigned long dst_paddr,
624
unsigned int frmcnt, unsigned int elmcnt)
625
{
626
struct ti_sdma_softc *sc = ti_sdma_sc;
627
struct ti_sdma_channel *channel;
628
uint32_t ccr;
629
630
/* Sanity check */
631
if (sc == NULL)
632
return (ENOMEM);
633
634
TI_SDMA_LOCK(sc);
635
636
if ((sc->sc_active_channels & (1 << ch)) == 0) {
637
TI_SDMA_UNLOCK(sc);
638
return (EINVAL);
639
}
640
641
channel = &sc->sc_channel[ch];
642
643
/* a) Write the CSDP register */
644
ti_sdma_write_4(sc, DMA4_CSDP(ch),
645
channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
646
647
/* b) Set the number of element per frame CEN[23:0] */
648
ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
649
650
/* c) Set the number of frame per block CFN[15:0] */
651
ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
652
653
/* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
654
ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
655
ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
656
657
/* e) Write the CCR register */
658
ti_sdma_write_4(sc, DMA4_CCR(ch), channel->reg_ccr);
659
660
/* f) - Set the source element index increment CSEI[15:0] */
661
ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
662
663
/* - Set the source frame index increment CSFI[15:0] */
664
ti_sdma_write_4(sc, DMA4_CSF(ch), 0x0001);
665
666
/* - Set the destination element index increment CDEI[15:0]*/
667
ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
668
669
/* - Set the destination frame index increment CDFI[31:0] */
670
ti_sdma_write_4(sc, DMA4_CDF(ch), 0x0001);
671
672
/* Clear the status register */
673
ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
674
675
/* Write the start-bit and away we go */
676
ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
677
ccr |= (1 << 7);
678
ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
679
680
/* Clear the reg write flag */
681
channel->need_reg_write = 0;
682
683
TI_SDMA_UNLOCK(sc);
684
685
return (0);
686
}
687
688
/**
689
* ti_sdma_start_xfer_packet - starts a packet DMA transfer
690
* @ch: the channel number to use for the transfer
691
* @src_paddr: the source physical address
692
* @dst_paddr: the destination physical address
693
* @frmcnt: the number of frames to transfer
694
* @elmcnt: the number of elements in a frame, an element is either an 8, 16
695
* or 32-bit value as defined by ti_sdma_set_xfer_burst()
696
* @pktsize: the number of elements in each transfer packet
697
*
698
* The @frmcnt and @elmcnt define the overall number of bytes to transfer,
699
* typically @frmcnt is 1 and @elmcnt contains the total number of elements.
700
* @pktsize is the size of each individual packet, there might be multiple
701
* packets per transfer. i.e. for the following with element size of 32-bits
702
*
703
* frmcnt = 1, elmcnt = 512, pktsize = 128
704
*
705
* Total transfer bytes = 1 * 512 = 512 elements or 2048 bytes
706
* Packets transferred = 128 / 512 = 4
707
*
708
*
709
* LOCKING:
710
* DMA registers protected by internal mutex
711
*
712
* RETURNS:
713
* EH_HANDLED or EH_NOT_HANDLED
714
*/
715
int
716
ti_sdma_start_xfer_packet(unsigned int ch, unsigned int src_paddr,
717
unsigned long dst_paddr, unsigned int frmcnt,
718
unsigned int elmcnt, unsigned int pktsize)
719
{
720
struct ti_sdma_softc *sc = ti_sdma_sc;
721
struct ti_sdma_channel *channel;
722
uint32_t ccr;
723
724
/* Sanity check */
725
if (sc == NULL)
726
return (ENOMEM);
727
728
TI_SDMA_LOCK(sc);
729
730
if ((sc->sc_active_channels & (1 << ch)) == 0) {
731
TI_SDMA_UNLOCK(sc);
732
return (EINVAL);
733
}
734
735
channel = &sc->sc_channel[ch];
736
737
/* a) Write the CSDP register */
738
if (channel->need_reg_write)
739
ti_sdma_write_4(sc, DMA4_CSDP(ch),
740
channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
741
742
/* b) Set the number of elements to transfer CEN[23:0] */
743
ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
744
745
/* c) Set the number of frames to transfer CFN[15:0] */
746
ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
747
748
/* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
749
ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
750
ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
751
752
/* e) Write the CCR register */
753
ti_sdma_write_4(sc, DMA4_CCR(ch),
754
channel->reg_ccr | DMA4_CCR_PACKET_TRANS);
755
756
/* f) - Set the source element index increment CSEI[15:0] */
757
ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
758
759
/* - Set the packet size, this is dependent on the sync source */
760
if (channel->reg_ccr & DMA4_CCR_SEL_SRC_DST_SYNC(1))
761
ti_sdma_write_4(sc, DMA4_CSF(ch), pktsize);
762
else
763
ti_sdma_write_4(sc, DMA4_CDF(ch), pktsize);
764
765
/* - Set the destination frame index increment CDFI[31:0] */
766
ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
767
768
/* Clear the status register */
769
ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
770
771
/* Write the start-bit and away we go */
772
ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
773
ccr |= (1 << 7);
774
ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
775
776
/* Clear the reg write flag */
777
channel->need_reg_write = 0;
778
779
TI_SDMA_UNLOCK(sc);
780
781
return (0);
782
}
783
784
/**
785
* ti_sdma_stop_xfer - stops any currently active transfers
786
* @ch: the channel number to set the endianness of
787
*
788
* This function call is effectively a NOP if no transaction is in progress.
789
*
790
* LOCKING:
791
* DMA registers protected by internal mutex
792
*
793
* RETURNS:
794
* EH_HANDLED or EH_NOT_HANDLED
795
*/
796
int
797
ti_sdma_stop_xfer(unsigned int ch)
798
{
799
struct ti_sdma_softc *sc = ti_sdma_sc;
800
unsigned int j;
801
802
/* Sanity check */
803
if (sc == NULL)
804
return (ENOMEM);
805
806
TI_SDMA_LOCK(sc);
807
808
if ((sc->sc_active_channels & (1 << ch)) == 0) {
809
TI_SDMA_UNLOCK(sc);
810
return (EINVAL);
811
}
812
813
/* Disable all DMA interrupts for the channel. */
814
ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
815
816
/* Make sure the DMA transfer is stopped. */
817
ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
818
819
/* Clear the CSR register and IRQ status register */
820
ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
821
for (j = 0; j < NUM_DMA_IRQS; j++) {
822
ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
823
}
824
825
/* Configuration registers need to be re-written on the next xfer */
826
sc->sc_channel[ch].need_reg_write = 1;
827
828
TI_SDMA_UNLOCK(sc);
829
830
return (0);
831
}
832
833
/**
834
* ti_sdma_set_xfer_endianess - sets the endianness of subsequent transfers
835
* @ch: the channel number to set the endianness of
836
* @src: the source endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
837
* @dst: the destination endianness (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
838
*
839
*
840
* LOCKING:
841
* DMA registers protected by internal mutex
842
*
843
* RETURNS:
844
* EH_HANDLED or EH_NOT_HANDLED
845
*/
846
int
847
ti_sdma_set_xfer_endianess(unsigned int ch, unsigned int src, unsigned int dst)
848
{
849
struct ti_sdma_softc *sc = ti_sdma_sc;
850
851
/* Sanity check */
852
if (sc == NULL)
853
return (ENOMEM);
854
855
TI_SDMA_LOCK(sc);
856
857
if ((sc->sc_active_channels & (1 << ch)) == 0) {
858
TI_SDMA_UNLOCK(sc);
859
return (EINVAL);
860
}
861
862
sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_ENDIANISM(1);
863
sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_ENDIANISM(src);
864
865
sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_ENDIANISM(1);
866
sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_ENDIANISM(dst);
867
868
sc->sc_channel[ch].need_reg_write = 1;
869
870
TI_SDMA_UNLOCK(sc);
871
872
return 0;
873
}
874
875
/**
876
* ti_sdma_set_xfer_burst - sets the source and destination element size
877
* @ch: the channel number to set the burst settings of
878
* @src: the source endianness (either DMA_BURST_NONE, DMA_BURST_16, DMA_BURST_32
879
* or DMA_BURST_64)
880
* @dst: the destination endianness (either DMA_BURST_NONE, DMA_BURST_16,
881
* DMA_BURST_32 or DMA_BURST_64)
882
*
883
* This function sets the size of the elements for all subsequent transfers.
884
*
885
* LOCKING:
886
* DMA registers protected by internal mutex
887
*
888
* RETURNS:
889
* EH_HANDLED or EH_NOT_HANDLED
890
*/
891
int
892
ti_sdma_set_xfer_burst(unsigned int ch, unsigned int src, unsigned int dst)
893
{
894
struct ti_sdma_softc *sc = ti_sdma_sc;
895
896
/* Sanity check */
897
if (sc == NULL)
898
return (ENOMEM);
899
900
TI_SDMA_LOCK(sc);
901
902
if ((sc->sc_active_channels & (1 << ch)) == 0) {
903
TI_SDMA_UNLOCK(sc);
904
return (EINVAL);
905
}
906
907
sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_BURST_MODE(0x3);
908
sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_BURST_MODE(src);
909
910
sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_BURST_MODE(0x3);
911
sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_BURST_MODE(dst);
912
913
sc->sc_channel[ch].need_reg_write = 1;
914
915
TI_SDMA_UNLOCK(sc);
916
917
return 0;
918
}
919
920
/**
921
* ti_sdma_set_xfer_data_type - driver attach function
922
* @ch: the channel number to set the endianness of
923
* @type: the xfer data type (either DMA_DATA_8BITS_SCALAR, DMA_DATA_16BITS_SCALAR
924
* or DMA_DATA_32BITS_SCALAR)
925
*
926
*
927
* LOCKING:
928
* DMA registers protected by internal mutex
929
*
930
* RETURNS:
931
* EH_HANDLED or EH_NOT_HANDLED
932
*/
933
int
934
ti_sdma_set_xfer_data_type(unsigned int ch, unsigned int type)
935
{
936
struct ti_sdma_softc *sc = ti_sdma_sc;
937
938
/* Sanity check */
939
if (sc == NULL)
940
return (ENOMEM);
941
942
TI_SDMA_LOCK(sc);
943
944
if ((sc->sc_active_channels & (1 << ch)) == 0) {
945
TI_SDMA_UNLOCK(sc);
946
return (EINVAL);
947
}
948
949
sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DATA_TYPE(0x3);
950
sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DATA_TYPE(type);
951
952
sc->sc_channel[ch].need_reg_write = 1;
953
954
TI_SDMA_UNLOCK(sc);
955
956
return 0;
957
}
958
959
/**
960
* ti_sdma_set_callback - driver attach function
961
* @dev: dma device handle
962
*
963
*
964
*
965
* LOCKING:
966
* DMA registers protected by internal mutex
967
*
968
* RETURNS:
969
* EH_HANDLED or EH_NOT_HANDLED
970
*/
971
int
972
ti_sdma_set_callback(unsigned int ch,
973
void (*callback)(unsigned int ch, uint32_t status, void *data),
974
void *data)
975
{
976
struct ti_sdma_softc *sc = ti_sdma_sc;
977
978
/* Sanity check */
979
if (sc == NULL)
980
return (ENOMEM);
981
982
TI_SDMA_LOCK(sc);
983
984
if ((sc->sc_active_channels & (1 << ch)) == 0) {
985
TI_SDMA_UNLOCK(sc);
986
return (EINVAL);
987
}
988
989
sc->sc_channel[ch].callback = callback;
990
sc->sc_channel[ch].callback_data = data;
991
992
sc->sc_channel[ch].need_reg_write = 1;
993
994
TI_SDMA_UNLOCK(sc);
995
996
return 0;
997
}
998
999
/**
1000
* ti_sdma_sync_params - sets channel sync settings
1001
* @ch: the channel number to set the sync on
1002
* @trigger: the number of the sync trigger, this depends on what other H/W
1003
* module is triggering/receiving the DMA transactions
1004
* @mode: flags describing the sync mode to use, it may have one or more of
1005
* the following bits set; TI_SDMA_SYNC_FRAME,
1006
* TI_SDMA_SYNC_BLOCK, TI_SDMA_SYNC_TRIG_ON_SRC.
1007
*
1008
*
1009
*
1010
* LOCKING:
1011
* DMA registers protected by internal mutex
1012
*
1013
* RETURNS:
1014
* EH_HANDLED or EH_NOT_HANDLED
1015
*/
1016
int
1017
ti_sdma_sync_params(unsigned int ch, unsigned int trigger, unsigned int mode)
1018
{
1019
struct ti_sdma_softc *sc = ti_sdma_sc;
1020
uint32_t ccr;
1021
1022
/* Sanity check */
1023
if (sc == NULL)
1024
return (ENOMEM);
1025
1026
TI_SDMA_LOCK(sc);
1027
1028
if ((sc->sc_active_channels & (1 << ch)) == 0) {
1029
TI_SDMA_UNLOCK(sc);
1030
return (EINVAL);
1031
}
1032
1033
ccr = sc->sc_channel[ch].reg_ccr;
1034
1035
ccr &= ~DMA4_CCR_SYNC_TRIGGER(0x7F);
1036
ccr |= DMA4_CCR_SYNC_TRIGGER(trigger + 1);
1037
1038
if (mode & TI_SDMA_SYNC_FRAME)
1039
ccr |= DMA4_CCR_FRAME_SYNC(1);
1040
else
1041
ccr &= ~DMA4_CCR_FRAME_SYNC(1);
1042
1043
if (mode & TI_SDMA_SYNC_BLOCK)
1044
ccr |= DMA4_CCR_BLOCK_SYNC(1);
1045
else
1046
ccr &= ~DMA4_CCR_BLOCK_SYNC(1);
1047
1048
if (mode & TI_SDMA_SYNC_TRIG_ON_SRC)
1049
ccr |= DMA4_CCR_SEL_SRC_DST_SYNC(1);
1050
else
1051
ccr &= ~DMA4_CCR_SEL_SRC_DST_SYNC(1);
1052
1053
sc->sc_channel[ch].reg_ccr = ccr;
1054
1055
sc->sc_channel[ch].need_reg_write = 1;
1056
1057
TI_SDMA_UNLOCK(sc);
1058
1059
return 0;
1060
}
1061
1062
/**
1063
* ti_sdma_set_addr_mode - driver attach function
1064
* @ch: the channel number to set the endianness of
1065
* @rd_mode: the xfer source addressing mode (either DMA_ADDR_CONSTANT,
1066
* DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1067
* DMA_ADDR_DOUBLE_INDEX)
1068
* @wr_mode: the xfer destination addressing mode (either DMA_ADDR_CONSTANT,
1069
* DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1070
* DMA_ADDR_DOUBLE_INDEX)
1071
*
1072
*
1073
* LOCKING:
1074
* DMA registers protected by internal mutex
1075
*
1076
* RETURNS:
1077
* EH_HANDLED or EH_NOT_HANDLED
1078
*/
1079
int
1080
ti_sdma_set_addr_mode(unsigned int ch, unsigned int src_mode,
1081
unsigned int dst_mode)
1082
{
1083
struct ti_sdma_softc *sc = ti_sdma_sc;
1084
uint32_t ccr;
1085
1086
/* Sanity check */
1087
if (sc == NULL)
1088
return (ENOMEM);
1089
1090
TI_SDMA_LOCK(sc);
1091
1092
if ((sc->sc_active_channels & (1 << ch)) == 0) {
1093
TI_SDMA_UNLOCK(sc);
1094
return (EINVAL);
1095
}
1096
1097
ccr = sc->sc_channel[ch].reg_ccr;
1098
1099
ccr &= ~DMA4_CCR_SRC_ADDRESS_MODE(0x3);
1100
ccr |= DMA4_CCR_SRC_ADDRESS_MODE(src_mode);
1101
1102
ccr &= ~DMA4_CCR_DST_ADDRESS_MODE(0x3);
1103
ccr |= DMA4_CCR_DST_ADDRESS_MODE(dst_mode);
1104
1105
sc->sc_channel[ch].reg_ccr = ccr;
1106
1107
sc->sc_channel[ch].need_reg_write = 1;
1108
1109
TI_SDMA_UNLOCK(sc);
1110
1111
return 0;
1112
}
1113
1114
/**
1115
* ti_sdma_probe - driver probe function
1116
* @dev: dma device handle
1117
*
1118
*
1119
*
1120
* RETURNS:
1121
* Always returns 0.
1122
*/
1123
static int
1124
ti_sdma_probe(device_t dev)
1125
{
1126
1127
if (!ofw_bus_status_okay(dev))
1128
return (ENXIO);
1129
1130
if (!ofw_bus_is_compatible(dev, "ti,omap4430-sdma"))
1131
return (ENXIO);
1132
1133
device_set_desc(dev, "TI sDMA Controller");
1134
return (0);
1135
}
1136
1137
/**
1138
* ti_sdma_attach - driver attach function
1139
* @dev: dma device handle
1140
*
1141
* Initialises memory mapping/pointers to the DMA register set and requests
1142
* IRQs. This is effectively the setup function for the driver.
1143
*
1144
* RETURNS:
1145
* 0 on success or a negative error code failure.
1146
*/
1147
static int
1148
ti_sdma_attach(device_t dev)
1149
{
1150
struct ti_sdma_softc *sc = device_get_softc(dev);
1151
unsigned int timeout;
1152
unsigned int i;
1153
int rid;
1154
void *ihl;
1155
int err;
1156
1157
/* Setup the basics */
1158
sc->sc_dev = dev;
1159
1160
/* No channels active at the moment */
1161
sc->sc_active_channels = 0x00000000;
1162
1163
/* Mutex to protect the shared data structures */
1164
TI_SDMA_LOCK_INIT(sc);
1165
1166
/* Get the memory resource for the register mapping */
1167
rid = 0;
1168
sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1169
if (sc->sc_mem_res == NULL)
1170
panic("%s: Cannot map registers", device_get_name(dev));
1171
1172
/* Enable the interface and functional clocks */
1173
ti_sysc_clock_enable(device_get_parent(dev));
1174
1175
/* Read the sDMA revision register and sanity check it's known */
1176
sc->sc_hw_rev = ti_sdma_read_4(sc,
1177
ti_sysc_get_rev_address_offset_host(device_get_parent(dev)));
1178
device_printf(dev, "sDMA revision %08x\n", sc->sc_hw_rev);
1179
1180
if (!ti_sdma_is_omap4_rev(sc) && !ti_sdma_is_omap3_rev(sc)) {
1181
device_printf(sc->sc_dev, "error - unknown sDMA H/W revision\n");
1182
return (EINVAL);
1183
}
1184
1185
/* Disable all interrupts */
1186
for (i = 0; i < NUM_DMA_IRQS; i++) {
1187
ti_sdma_write_4(sc, DMA4_IRQENABLE_L(i), 0x00000000);
1188
}
1189
1190
/* Soft-reset is only supported on pre-OMAP44xx devices */
1191
if (ti_sdma_is_omap3_rev(sc)) {
1192
/* Soft-reset */
1193
ti_sdma_write_4(sc, DMA4_OCP_SYSCONFIG, 0x0002);
1194
1195
/* Set the timeout to 100ms*/
1196
timeout = (hz < 10) ? 1 : ((100 * hz) / 1000);
1197
1198
/* Wait for DMA reset to complete */
1199
while ((ti_sdma_read_4(sc, DMA4_SYSSTATUS) & 0x1) == 0x0) {
1200
/* Sleep for a tick */
1201
pause("DMARESET", 1);
1202
1203
if (timeout-- == 0) {
1204
device_printf(sc->sc_dev, "sDMA reset operation timed out\n");
1205
return (EINVAL);
1206
}
1207
}
1208
}
1209
1210
/*
1211
* Install interrupt handlers for the for possible interrupts. Any channel
1212
* can trip one of the four IRQs
1213
*/
1214
rid = 0;
1215
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1216
RF_ACTIVE | RF_SHAREABLE);
1217
if (sc->sc_irq_res == NULL)
1218
panic("Unable to setup the dma irq handler.\n");
1219
1220
err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
1221
NULL, ti_sdma_intr, NULL, &ihl);
1222
if (err)
1223
panic("%s: Cannot register IRQ", device_get_name(dev));
1224
1225
/* Store the DMA structure globally ... this driver should never be unloaded */
1226
ti_sdma_sc = sc;
1227
1228
return (0);
1229
}
1230
1231
static device_method_t ti_sdma_methods[] = {
1232
DEVMETHOD(device_probe, ti_sdma_probe),
1233
DEVMETHOD(device_attach, ti_sdma_attach),
1234
{0, 0},
1235
};
1236
1237
static driver_t ti_sdma_driver = {
1238
"ti_sdma",
1239
ti_sdma_methods,
1240
sizeof(struct ti_sdma_softc),
1241
};
1242
1243
DRIVER_MODULE(ti_sdma, simplebus, ti_sdma_driver, 0, 0);
1244
MODULE_DEPEND(ti_sdma, ti_sysc, 1, 1, 1);
1245
1246