Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sh/drivers/dma/dma-sh.c
26481 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* arch/sh/drivers/dma/dma-sh.c
4
*
5
* SuperH On-chip DMAC Support
6
*
7
* Copyright (C) 2000 Takashi YOSHII
8
* Copyright (C) 2003, 2004 Paul Mundt
9
* Copyright (C) 2005 Andriy Skulysh
10
*/
11
#include <linux/init.h>
12
#include <linux/interrupt.h>
13
#include <linux/module.h>
14
#include <linux/io.h>
15
#include <mach-dreamcast/mach/dma.h>
16
#include <asm/dma.h>
17
#include <asm/dma-register.h>
18
#include <cpu/dma-register.h>
19
#include <cpu/dma.h>
20
21
/*
22
* Some of the SoCs feature two DMAC modules. In such a case, the channels are
23
* distributed equally among them.
24
*/
25
#ifdef SH_DMAC_BASE1
26
#define SH_DMAC_NR_MD_CH (CONFIG_NR_ONCHIP_DMA_CHANNELS / 2)
27
#else
28
#define SH_DMAC_NR_MD_CH CONFIG_NR_ONCHIP_DMA_CHANNELS
29
#endif
30
31
#define SH_DMAC_CH_SZ 0x10
32
33
/*
34
* Define the default configuration for dual address memory-memory transfer.
35
* The 0x400 value represents auto-request, external->external.
36
*/
37
#define RS_DUAL (DM_INC | SM_INC | RS_AUTO | TS_INDEX2VAL(XMIT_SZ_32BIT))
38
39
static unsigned long dma_find_base(unsigned int chan)
40
{
41
unsigned long base = SH_DMAC_BASE0;
42
43
#ifdef SH_DMAC_BASE1
44
if (chan >= SH_DMAC_NR_MD_CH)
45
base = SH_DMAC_BASE1;
46
#endif
47
48
return base;
49
}
50
51
static unsigned long dma_base_addr(unsigned int chan)
52
{
53
unsigned long base = dma_find_base(chan);
54
55
chan = (chan % SH_DMAC_NR_MD_CH) * SH_DMAC_CH_SZ;
56
57
/* DMAOR is placed inside the channel register space. Step over it. */
58
if (chan >= DMAOR)
59
base += SH_DMAC_CH_SZ;
60
61
return base + chan;
62
}
63
64
#ifdef CONFIG_SH_DMA_IRQ_MULTI
65
static inline unsigned int get_dmte_irq(unsigned int chan)
66
{
67
return chan >= 6 ? DMTE6_IRQ : DMTE0_IRQ;
68
}
69
#else
70
71
static unsigned int dmte_irq_map[] = {
72
DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3,
73
74
#ifdef DMTE4_IRQ
75
DMTE4_IRQ, DMTE4_IRQ + 1,
76
#endif
77
78
#ifdef DMTE6_IRQ
79
DMTE6_IRQ, DMTE6_IRQ + 1,
80
#endif
81
82
#ifdef DMTE8_IRQ
83
DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, DMTE11_IRQ,
84
#endif
85
};
86
87
static inline unsigned int get_dmte_irq(unsigned int chan)
88
{
89
return dmte_irq_map[chan];
90
}
91
#endif
92
93
/*
94
* We determine the correct shift size based off of the CHCR transmit size
95
* for the given channel. Since we know that it will take:
96
*
97
* info->count >> ts_shift[transmit_size]
98
*
99
* iterations to complete the transfer.
100
*/
101
static unsigned int ts_shift[] = TS_SHIFT;
102
103
static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
104
{
105
u32 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
106
int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
107
((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
108
109
return ts_shift[cnt];
110
}
111
112
/*
113
* The transfer end interrupt must read the chcr register to end the
114
* hardware interrupt active condition.
115
* Besides that it needs to waken any waiting process, which should handle
116
* setting up the next transfer.
117
*/
118
static irqreturn_t dma_tei(int irq, void *dev_id)
119
{
120
struct dma_channel *chan = dev_id;
121
u32 chcr;
122
123
chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
124
125
if (!(chcr & CHCR_TE))
126
return IRQ_NONE;
127
128
chcr &= ~(CHCR_IE | CHCR_DE);
129
__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
130
131
wake_up(&chan->wait_queue);
132
133
return IRQ_HANDLED;
134
}
135
136
static int sh_dmac_request_dma(struct dma_channel *chan)
137
{
138
if (unlikely(!(chan->flags & DMA_TEI_CAPABLE)))
139
return 0;
140
141
return request_irq(get_dmte_irq(chan->chan), dma_tei, IRQF_SHARED,
142
chan->dev_id, chan);
143
}
144
145
static void sh_dmac_free_dma(struct dma_channel *chan)
146
{
147
free_irq(get_dmte_irq(chan->chan), chan);
148
}
149
150
static int
151
sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
152
{
153
if (!chcr)
154
chcr = RS_DUAL | CHCR_IE;
155
156
if (chcr & CHCR_IE) {
157
chcr &= ~CHCR_IE;
158
chan->flags |= DMA_TEI_CAPABLE;
159
} else {
160
chan->flags &= ~DMA_TEI_CAPABLE;
161
}
162
163
__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
164
165
chan->flags |= DMA_CONFIGURED;
166
return 0;
167
}
168
169
static void sh_dmac_enable_dma(struct dma_channel *chan)
170
{
171
int irq;
172
u32 chcr;
173
174
chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
175
chcr |= CHCR_DE;
176
177
if (chan->flags & DMA_TEI_CAPABLE)
178
chcr |= CHCR_IE;
179
180
__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
181
182
if (chan->flags & DMA_TEI_CAPABLE) {
183
irq = get_dmte_irq(chan->chan);
184
enable_irq(irq);
185
}
186
}
187
188
static void sh_dmac_disable_dma(struct dma_channel *chan)
189
{
190
int irq;
191
u32 chcr;
192
193
if (chan->flags & DMA_TEI_CAPABLE) {
194
irq = get_dmte_irq(chan->chan);
195
disable_irq(irq);
196
}
197
198
chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
199
chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
200
__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
201
}
202
203
static int sh_dmac_xfer_dma(struct dma_channel *chan)
204
{
205
/*
206
* If we haven't pre-configured the channel with special flags, use
207
* the defaults.
208
*/
209
if (unlikely(!(chan->flags & DMA_CONFIGURED)))
210
sh_dmac_configure_channel(chan, 0);
211
212
sh_dmac_disable_dma(chan);
213
214
/*
215
* Single-address mode usage note!
216
*
217
* It's important that we don't accidentally write any value to SAR/DAR
218
* (this includes 0) that hasn't been directly specified by the user if
219
* we're in single-address mode.
220
*
221
* In this case, only one address can be defined, anything else will
222
* result in a DMA address error interrupt (at least on the SH-4),
223
* which will subsequently halt the transfer.
224
*
225
* Channel 2 on the Dreamcast is a special case, as this is used for
226
* cascading to the PVR2 DMAC. In this case, we still need to write
227
* SAR and DAR, regardless of value, in order for cascading to work.
228
*/
229
if (chan->sar || (mach_is_dreamcast() &&
230
chan->chan == PVR2_CASCADE_CHAN))
231
__raw_writel(chan->sar, (dma_base_addr(chan->chan) + SAR));
232
if (chan->dar || (mach_is_dreamcast() &&
233
chan->chan == PVR2_CASCADE_CHAN))
234
__raw_writel(chan->dar, (dma_base_addr(chan->chan) + DAR));
235
236
__raw_writel(chan->count >> calc_xmit_shift(chan),
237
(dma_base_addr(chan->chan) + TCR));
238
239
sh_dmac_enable_dma(chan);
240
241
return 0;
242
}
243
244
static int sh_dmac_get_dma_residue(struct dma_channel *chan)
245
{
246
if (!(__raw_readl(dma_base_addr(chan->chan) + CHCR) & CHCR_DE))
247
return 0;
248
249
return __raw_readl(dma_base_addr(chan->chan) + TCR)
250
<< calc_xmit_shift(chan);
251
}
252
253
/*
254
* DMAOR handling
255
*/
256
#if defined(CONFIG_CPU_SUBTYPE_SH7723) || \
257
defined(CONFIG_CPU_SUBTYPE_SH7724) || \
258
defined(CONFIG_CPU_SUBTYPE_SH7780) || \
259
defined(CONFIG_CPU_SUBTYPE_SH7785)
260
#define NR_DMAOR 2
261
#else
262
#define NR_DMAOR 1
263
#endif
264
265
#define dmaor_read_reg(n) __raw_readw(dma_find_base((n) * \
266
SH_DMAC_NR_MD_CH) + DMAOR)
267
#define dmaor_write_reg(n, data) __raw_writew(data, \
268
dma_find_base((n) * \
269
SH_DMAC_NR_MD_CH) + DMAOR)
270
271
static inline int dmaor_reset(int no)
272
{
273
unsigned long dmaor = dmaor_read_reg(no);
274
275
/* Try to clear the error flags first, incase they are set */
276
dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
277
dmaor_write_reg(no, dmaor);
278
279
dmaor |= DMAOR_INIT;
280
dmaor_write_reg(no, dmaor);
281
282
/* See if we got an error again */
283
if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) {
284
printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
285
return -EINVAL;
286
}
287
288
return 0;
289
}
290
291
/*
292
* DMAE handling
293
*/
294
#ifdef CONFIG_CPU_SH4
295
296
#if defined(DMAE1_IRQ)
297
#define NR_DMAE 2
298
#else
299
#define NR_DMAE 1
300
#endif
301
302
static const char *dmae_name[] = {
303
"DMAC Address Error0",
304
"DMAC Address Error1"
305
};
306
307
#ifdef CONFIG_SH_DMA_IRQ_MULTI
308
static inline unsigned int get_dma_error_irq(int n)
309
{
310
return get_dmte_irq(n * 6);
311
}
312
#else
313
314
static unsigned int dmae_irq_map[] = {
315
DMAE0_IRQ,
316
317
#ifdef DMAE1_IRQ
318
DMAE1_IRQ,
319
#endif
320
};
321
322
static inline unsigned int get_dma_error_irq(int n)
323
{
324
return dmae_irq_map[n];
325
}
326
#endif
327
328
static irqreturn_t dma_err(int irq, void *dummy)
329
{
330
int i;
331
332
for (i = 0; i < NR_DMAOR; i++)
333
dmaor_reset(i);
334
335
disable_irq(irq);
336
337
return IRQ_HANDLED;
338
}
339
340
static int dmae_irq_init(void)
341
{
342
int n;
343
344
for (n = 0; n < NR_DMAE; n++) {
345
int i = request_irq(get_dma_error_irq(n), dma_err,
346
IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]);
347
if (unlikely(i < 0)) {
348
printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
349
return i;
350
}
351
}
352
353
return 0;
354
}
355
356
static void dmae_irq_free(void)
357
{
358
int n;
359
360
for (n = 0; n < NR_DMAE; n++)
361
free_irq(get_dma_error_irq(n), NULL);
362
}
363
#else
364
static inline int dmae_irq_init(void)
365
{
366
return 0;
367
}
368
369
static void dmae_irq_free(void)
370
{
371
}
372
#endif
373
374
static struct dma_ops sh_dmac_ops = {
375
.request = sh_dmac_request_dma,
376
.free = sh_dmac_free_dma,
377
.get_residue = sh_dmac_get_dma_residue,
378
.xfer = sh_dmac_xfer_dma,
379
.configure = sh_dmac_configure_channel,
380
};
381
382
static struct dma_info sh_dmac_info = {
383
.name = "sh_dmac",
384
.nr_channels = CONFIG_NR_ONCHIP_DMA_CHANNELS,
385
.ops = &sh_dmac_ops,
386
.flags = DMAC_CHANNELS_TEI_CAPABLE,
387
};
388
389
static int __init sh_dmac_init(void)
390
{
391
struct dma_info *info = &sh_dmac_info;
392
int i, rc;
393
394
/*
395
* Initialize DMAE, for parts that support it.
396
*/
397
rc = dmae_irq_init();
398
if (unlikely(rc != 0))
399
return rc;
400
401
/*
402
* Initialize DMAOR, and clean up any error flags that may have
403
* been set.
404
*/
405
for (i = 0; i < NR_DMAOR; i++) {
406
rc = dmaor_reset(i);
407
if (unlikely(rc != 0))
408
return rc;
409
}
410
411
return register_dmac(info);
412
}
413
414
static void __exit sh_dmac_exit(void)
415
{
416
dmae_irq_free();
417
unregister_dmac(&sh_dmac_info);
418
}
419
420
subsys_initcall(sh_dmac_init);
421
module_exit(sh_dmac_exit);
422
423
MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh");
424
MODULE_DESCRIPTION("SuperH On-Chip DMAC Support");
425
MODULE_LICENSE("GPL v2");
426
427