Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/dma/pch_dma.c
15109 views
1
/*
2
* Topcliff PCH DMA controller driver
3
* Copyright (c) 2010 Intel Corporation
4
* Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
5
*
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License version 2 as
8
* published by the Free Software Foundation.
9
*
10
* This program is distributed in the hope that it will be useful,
11
* but WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
* GNU General Public License for more details.
14
*
15
* You should have received a copy of the GNU General Public License
16
* along with this program; if not, write to the Free Software
17
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
*/
19
20
#include <linux/dmaengine.h>
21
#include <linux/dma-mapping.h>
22
#include <linux/init.h>
23
#include <linux/pci.h>
24
#include <linux/interrupt.h>
25
#include <linux/module.h>
26
#include <linux/pch_dma.h>
27
28
#define DRV_NAME "pch-dma"
29
30
#define DMA_CTL0_DISABLE 0x0
31
#define DMA_CTL0_SG 0x1
32
#define DMA_CTL0_ONESHOT 0x2
33
#define DMA_CTL0_MODE_MASK_BITS 0x3
34
#define DMA_CTL0_DIR_SHIFT_BITS 2
35
#define DMA_CTL0_BITS_PER_CH 4
36
37
#define DMA_CTL2_START_SHIFT_BITS 8
38
#define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
39
40
#define DMA_STATUS_IDLE 0x0
41
#define DMA_STATUS_DESC_READ 0x1
42
#define DMA_STATUS_WAIT 0x2
43
#define DMA_STATUS_ACCESS 0x3
44
#define DMA_STATUS_BITS_PER_CH 2
45
#define DMA_STATUS_MASK_BITS 0x3
46
#define DMA_STATUS_SHIFT_BITS 16
47
#define DMA_STATUS_IRQ(x) (0x1 << (x))
48
#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8))
49
50
#define DMA_DESC_WIDTH_SHIFT_BITS 12
51
#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
52
#define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
53
#define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
54
#define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
55
#define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
56
#define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
57
#define DMA_DESC_END_WITHOUT_IRQ 0x0
58
#define DMA_DESC_END_WITH_IRQ 0x1
59
#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
60
#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
61
62
#define MAX_CHAN_NR 8
63
64
static unsigned int init_nr_desc_per_channel = 64;
65
module_param(init_nr_desc_per_channel, uint, 0644);
66
MODULE_PARM_DESC(init_nr_desc_per_channel,
67
"initial descriptors per channel (default: 64)");
68
69
struct pch_dma_desc_regs {
70
u32 dev_addr;
71
u32 mem_addr;
72
u32 size;
73
u32 next;
74
};
75
76
struct pch_dma_regs {
77
u32 dma_ctl0;
78
u32 dma_ctl1;
79
u32 dma_ctl2;
80
u32 dma_ctl3;
81
u32 dma_sts0;
82
u32 dma_sts1;
83
u32 dma_sts2;
84
u32 reserved3;
85
struct pch_dma_desc_regs desc[MAX_CHAN_NR];
86
};
87
88
struct pch_dma_desc {
89
struct pch_dma_desc_regs regs;
90
struct dma_async_tx_descriptor txd;
91
struct list_head desc_node;
92
struct list_head tx_list;
93
};
94
95
struct pch_dma_chan {
96
struct dma_chan chan;
97
void __iomem *membase;
98
enum dma_data_direction dir;
99
struct tasklet_struct tasklet;
100
unsigned long err_status;
101
102
spinlock_t lock;
103
104
dma_cookie_t completed_cookie;
105
struct list_head active_list;
106
struct list_head queue;
107
struct list_head free_list;
108
unsigned int descs_allocated;
109
};
110
111
#define PDC_DEV_ADDR 0x00
112
#define PDC_MEM_ADDR 0x04
113
#define PDC_SIZE 0x08
114
#define PDC_NEXT 0x0C
115
116
#define channel_readl(pdc, name) \
117
readl((pdc)->membase + PDC_##name)
118
#define channel_writel(pdc, name, val) \
119
writel((val), (pdc)->membase + PDC_##name)
120
121
struct pch_dma {
122
struct dma_device dma;
123
void __iomem *membase;
124
struct pci_pool *pool;
125
struct pch_dma_regs regs;
126
struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
127
struct pch_dma_chan channels[MAX_CHAN_NR];
128
};
129
130
#define PCH_DMA_CTL0 0x00
131
#define PCH_DMA_CTL1 0x04
132
#define PCH_DMA_CTL2 0x08
133
#define PCH_DMA_CTL3 0x0C
134
#define PCH_DMA_STS0 0x10
135
#define PCH_DMA_STS1 0x14
136
137
#define dma_readl(pd, name) \
138
readl((pd)->membase + PCH_DMA_##name)
139
#define dma_writel(pd, name, val) \
140
writel((val), (pd)->membase + PCH_DMA_##name)
141
142
static inline
143
struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
144
{
145
return container_of(txd, struct pch_dma_desc, txd);
146
}
147
148
static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
149
{
150
return container_of(chan, struct pch_dma_chan, chan);
151
}
152
153
static inline struct pch_dma *to_pd(struct dma_device *ddev)
154
{
155
return container_of(ddev, struct pch_dma, dma);
156
}
157
158
static inline struct device *chan2dev(struct dma_chan *chan)
159
{
160
return &chan->dev->device;
161
}
162
163
static inline struct device *chan2parent(struct dma_chan *chan)
164
{
165
return chan->dev->device.parent;
166
}
167
168
static inline
169
struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
170
{
171
return list_first_entry(&pd_chan->active_list,
172
struct pch_dma_desc, desc_node);
173
}
174
175
static inline
176
struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
177
{
178
return list_first_entry(&pd_chan->queue,
179
struct pch_dma_desc, desc_node);
180
}
181
182
static void pdc_enable_irq(struct dma_chan *chan, int enable)
183
{
184
struct pch_dma *pd = to_pd(chan->device);
185
u32 val;
186
187
val = dma_readl(pd, CTL2);
188
189
if (enable)
190
val |= 0x1 << chan->chan_id;
191
else
192
val &= ~(0x1 << chan->chan_id);
193
194
dma_writel(pd, CTL2, val);
195
196
dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
197
chan->chan_id, val);
198
}
199
200
static void pdc_set_dir(struct dma_chan *chan)
201
{
202
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
203
struct pch_dma *pd = to_pd(chan->device);
204
u32 val;
205
206
if (chan->chan_id < 8) {
207
val = dma_readl(pd, CTL0);
208
209
if (pd_chan->dir == DMA_TO_DEVICE)
210
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
211
DMA_CTL0_DIR_SHIFT_BITS);
212
else
213
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
214
DMA_CTL0_DIR_SHIFT_BITS));
215
216
dma_writel(pd, CTL0, val);
217
} else {
218
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
219
val = dma_readl(pd, CTL3);
220
221
if (pd_chan->dir == DMA_TO_DEVICE)
222
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
223
DMA_CTL0_DIR_SHIFT_BITS);
224
else
225
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
226
DMA_CTL0_DIR_SHIFT_BITS));
227
228
dma_writel(pd, CTL3, val);
229
}
230
231
dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
232
chan->chan_id, val);
233
}
234
235
static void pdc_set_mode(struct dma_chan *chan, u32 mode)
236
{
237
struct pch_dma *pd = to_pd(chan->device);
238
u32 val;
239
240
if (chan->chan_id < 8) {
241
val = dma_readl(pd, CTL0);
242
243
val &= ~(DMA_CTL0_MODE_MASK_BITS <<
244
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
245
val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
246
247
dma_writel(pd, CTL0, val);
248
} else {
249
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
250
251
val = dma_readl(pd, CTL3);
252
253
val &= ~(DMA_CTL0_MODE_MASK_BITS <<
254
(DMA_CTL0_BITS_PER_CH * ch));
255
val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
256
257
dma_writel(pd, CTL3, val);
258
259
}
260
261
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
262
chan->chan_id, val);
263
}
264
265
static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
266
{
267
struct pch_dma *pd = to_pd(pd_chan->chan.device);
268
u32 val;
269
270
val = dma_readl(pd, STS0);
271
return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
272
DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
273
}
274
275
static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
276
{
277
if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
278
return true;
279
else
280
return false;
281
}
282
283
static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
284
{
285
if (!pdc_is_idle(pd_chan)) {
286
dev_err(chan2dev(&pd_chan->chan),
287
"BUG: Attempt to start non-idle channel\n");
288
return;
289
}
290
291
dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
292
pd_chan->chan.chan_id, desc->regs.dev_addr);
293
dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
294
pd_chan->chan.chan_id, desc->regs.mem_addr);
295
dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
296
pd_chan->chan.chan_id, desc->regs.size);
297
dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
298
pd_chan->chan.chan_id, desc->regs.next);
299
300
if (list_empty(&desc->tx_list)) {
301
channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
302
channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
303
channel_writel(pd_chan, SIZE, desc->regs.size);
304
channel_writel(pd_chan, NEXT, desc->regs.next);
305
pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
306
} else {
307
channel_writel(pd_chan, NEXT, desc->txd.phys);
308
pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
309
}
310
}
311
312
static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
313
struct pch_dma_desc *desc)
314
{
315
struct dma_async_tx_descriptor *txd = &desc->txd;
316
dma_async_tx_callback callback = txd->callback;
317
void *param = txd->callback_param;
318
319
list_splice_init(&desc->tx_list, &pd_chan->free_list);
320
list_move(&desc->desc_node, &pd_chan->free_list);
321
322
if (callback)
323
callback(param);
324
}
325
326
static void pdc_complete_all(struct pch_dma_chan *pd_chan)
327
{
328
struct pch_dma_desc *desc, *_d;
329
LIST_HEAD(list);
330
331
BUG_ON(!pdc_is_idle(pd_chan));
332
333
if (!list_empty(&pd_chan->queue))
334
pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
335
336
list_splice_init(&pd_chan->active_list, &list);
337
list_splice_init(&pd_chan->queue, &pd_chan->active_list);
338
339
list_for_each_entry_safe(desc, _d, &list, desc_node)
340
pdc_chain_complete(pd_chan, desc);
341
}
342
343
static void pdc_handle_error(struct pch_dma_chan *pd_chan)
344
{
345
struct pch_dma_desc *bad_desc;
346
347
bad_desc = pdc_first_active(pd_chan);
348
list_del(&bad_desc->desc_node);
349
350
list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
351
352
if (!list_empty(&pd_chan->active_list))
353
pdc_dostart(pd_chan, pdc_first_active(pd_chan));
354
355
dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
356
dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
357
bad_desc->txd.cookie);
358
359
pdc_chain_complete(pd_chan, bad_desc);
360
}
361
362
static void pdc_advance_work(struct pch_dma_chan *pd_chan)
363
{
364
if (list_empty(&pd_chan->active_list) ||
365
list_is_singular(&pd_chan->active_list)) {
366
pdc_complete_all(pd_chan);
367
} else {
368
pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
369
pdc_dostart(pd_chan, pdc_first_active(pd_chan));
370
}
371
}
372
373
static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
374
struct pch_dma_desc *desc)
375
{
376
dma_cookie_t cookie = pd_chan->chan.cookie;
377
378
if (++cookie < 0)
379
cookie = 1;
380
381
pd_chan->chan.cookie = cookie;
382
desc->txd.cookie = cookie;
383
384
return cookie;
385
}
386
387
static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
388
{
389
struct pch_dma_desc *desc = to_pd_desc(txd);
390
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
391
dma_cookie_t cookie;
392
393
spin_lock(&pd_chan->lock);
394
cookie = pdc_assign_cookie(pd_chan, desc);
395
396
if (list_empty(&pd_chan->active_list)) {
397
list_add_tail(&desc->desc_node, &pd_chan->active_list);
398
pdc_dostart(pd_chan, desc);
399
} else {
400
list_add_tail(&desc->desc_node, &pd_chan->queue);
401
}
402
403
spin_unlock(&pd_chan->lock);
404
return 0;
405
}
406
407
static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
408
{
409
struct pch_dma_desc *desc = NULL;
410
struct pch_dma *pd = to_pd(chan->device);
411
dma_addr_t addr;
412
413
desc = pci_pool_alloc(pd->pool, flags, &addr);
414
if (desc) {
415
memset(desc, 0, sizeof(struct pch_dma_desc));
416
INIT_LIST_HEAD(&desc->tx_list);
417
dma_async_tx_descriptor_init(&desc->txd, chan);
418
desc->txd.tx_submit = pd_tx_submit;
419
desc->txd.flags = DMA_CTRL_ACK;
420
desc->txd.phys = addr;
421
}
422
423
return desc;
424
}
425
426
static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
427
{
428
struct pch_dma_desc *desc, *_d;
429
struct pch_dma_desc *ret = NULL;
430
int i = 0;
431
432
spin_lock(&pd_chan->lock);
433
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
434
i++;
435
if (async_tx_test_ack(&desc->txd)) {
436
list_del(&desc->desc_node);
437
ret = desc;
438
break;
439
}
440
dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
441
}
442
spin_unlock(&pd_chan->lock);
443
dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
444
445
if (!ret) {
446
ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
447
if (ret) {
448
spin_lock(&pd_chan->lock);
449
pd_chan->descs_allocated++;
450
spin_unlock(&pd_chan->lock);
451
} else {
452
dev_err(chan2dev(&pd_chan->chan),
453
"failed to alloc desc\n");
454
}
455
}
456
457
return ret;
458
}
459
460
static void pdc_desc_put(struct pch_dma_chan *pd_chan,
461
struct pch_dma_desc *desc)
462
{
463
if (desc) {
464
spin_lock(&pd_chan->lock);
465
list_splice_init(&desc->tx_list, &pd_chan->free_list);
466
list_add(&desc->desc_node, &pd_chan->free_list);
467
spin_unlock(&pd_chan->lock);
468
}
469
}
470
471
static int pd_alloc_chan_resources(struct dma_chan *chan)
472
{
473
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
474
struct pch_dma_desc *desc;
475
LIST_HEAD(tmp_list);
476
int i;
477
478
if (!pdc_is_idle(pd_chan)) {
479
dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
480
return -EIO;
481
}
482
483
if (!list_empty(&pd_chan->free_list))
484
return pd_chan->descs_allocated;
485
486
for (i = 0; i < init_nr_desc_per_channel; i++) {
487
desc = pdc_alloc_desc(chan, GFP_KERNEL);
488
489
if (!desc) {
490
dev_warn(chan2dev(chan),
491
"Only allocated %d initial descriptors\n", i);
492
break;
493
}
494
495
list_add_tail(&desc->desc_node, &tmp_list);
496
}
497
498
spin_lock_bh(&pd_chan->lock);
499
list_splice(&tmp_list, &pd_chan->free_list);
500
pd_chan->descs_allocated = i;
501
pd_chan->completed_cookie = chan->cookie = 1;
502
spin_unlock_bh(&pd_chan->lock);
503
504
pdc_enable_irq(chan, 1);
505
506
return pd_chan->descs_allocated;
507
}
508
509
static void pd_free_chan_resources(struct dma_chan *chan)
510
{
511
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
512
struct pch_dma *pd = to_pd(chan->device);
513
struct pch_dma_desc *desc, *_d;
514
LIST_HEAD(tmp_list);
515
516
BUG_ON(!pdc_is_idle(pd_chan));
517
BUG_ON(!list_empty(&pd_chan->active_list));
518
BUG_ON(!list_empty(&pd_chan->queue));
519
520
spin_lock_bh(&pd_chan->lock);
521
list_splice_init(&pd_chan->free_list, &tmp_list);
522
pd_chan->descs_allocated = 0;
523
spin_unlock_bh(&pd_chan->lock);
524
525
list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
526
pci_pool_free(pd->pool, desc, desc->txd.phys);
527
528
pdc_enable_irq(chan, 0);
529
}
530
531
static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
532
struct dma_tx_state *txstate)
533
{
534
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
535
dma_cookie_t last_used;
536
dma_cookie_t last_completed;
537
int ret;
538
539
spin_lock_bh(&pd_chan->lock);
540
last_completed = pd_chan->completed_cookie;
541
last_used = chan->cookie;
542
spin_unlock_bh(&pd_chan->lock);
543
544
ret = dma_async_is_complete(cookie, last_completed, last_used);
545
546
dma_set_tx_state(txstate, last_completed, last_used, 0);
547
548
return ret;
549
}
550
551
static void pd_issue_pending(struct dma_chan *chan)
552
{
553
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
554
555
if (pdc_is_idle(pd_chan)) {
556
spin_lock(&pd_chan->lock);
557
pdc_advance_work(pd_chan);
558
spin_unlock(&pd_chan->lock);
559
}
560
}
561
562
static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
563
struct scatterlist *sgl, unsigned int sg_len,
564
enum dma_data_direction direction, unsigned long flags)
565
{
566
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
567
struct pch_dma_slave *pd_slave = chan->private;
568
struct pch_dma_desc *first = NULL;
569
struct pch_dma_desc *prev = NULL;
570
struct pch_dma_desc *desc = NULL;
571
struct scatterlist *sg;
572
dma_addr_t reg;
573
int i;
574
575
if (unlikely(!sg_len)) {
576
dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
577
return NULL;
578
}
579
580
if (direction == DMA_FROM_DEVICE)
581
reg = pd_slave->rx_reg;
582
else if (direction == DMA_TO_DEVICE)
583
reg = pd_slave->tx_reg;
584
else
585
return NULL;
586
587
pd_chan->dir = direction;
588
pdc_set_dir(chan);
589
590
for_each_sg(sgl, sg, sg_len, i) {
591
desc = pdc_desc_get(pd_chan);
592
593
if (!desc)
594
goto err_desc_get;
595
596
desc->regs.dev_addr = reg;
597
desc->regs.mem_addr = sg_phys(sg);
598
desc->regs.size = sg_dma_len(sg);
599
desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
600
601
switch (pd_slave->width) {
602
case PCH_DMA_WIDTH_1_BYTE:
603
if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
604
goto err_desc_get;
605
desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
606
break;
607
case PCH_DMA_WIDTH_2_BYTES:
608
if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
609
goto err_desc_get;
610
desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
611
break;
612
case PCH_DMA_WIDTH_4_BYTES:
613
if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
614
goto err_desc_get;
615
desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
616
break;
617
default:
618
goto err_desc_get;
619
}
620
621
if (!first) {
622
first = desc;
623
} else {
624
prev->regs.next |= desc->txd.phys;
625
list_add_tail(&desc->desc_node, &first->tx_list);
626
}
627
628
prev = desc;
629
}
630
631
if (flags & DMA_PREP_INTERRUPT)
632
desc->regs.next = DMA_DESC_END_WITH_IRQ;
633
else
634
desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
635
636
first->txd.cookie = -EBUSY;
637
desc->txd.flags = flags;
638
639
return &first->txd;
640
641
err_desc_get:
642
dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
643
pdc_desc_put(pd_chan, first);
644
return NULL;
645
}
646
647
static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
648
unsigned long arg)
649
{
650
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
651
struct pch_dma_desc *desc, *_d;
652
LIST_HEAD(list);
653
654
if (cmd != DMA_TERMINATE_ALL)
655
return -ENXIO;
656
657
spin_lock_bh(&pd_chan->lock);
658
659
pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
660
661
list_splice_init(&pd_chan->active_list, &list);
662
list_splice_init(&pd_chan->queue, &list);
663
664
list_for_each_entry_safe(desc, _d, &list, desc_node)
665
pdc_chain_complete(pd_chan, desc);
666
667
spin_unlock_bh(&pd_chan->lock);
668
669
return 0;
670
}
671
672
static void pdc_tasklet(unsigned long data)
673
{
674
struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
675
unsigned long flags;
676
677
if (!pdc_is_idle(pd_chan)) {
678
dev_err(chan2dev(&pd_chan->chan),
679
"BUG: handle non-idle channel in tasklet\n");
680
return;
681
}
682
683
spin_lock_irqsave(&pd_chan->lock, flags);
684
if (test_and_clear_bit(0, &pd_chan->err_status))
685
pdc_handle_error(pd_chan);
686
else
687
pdc_advance_work(pd_chan);
688
spin_unlock_irqrestore(&pd_chan->lock, flags);
689
}
690
691
static irqreturn_t pd_irq(int irq, void *devid)
692
{
693
struct pch_dma *pd = (struct pch_dma *)devid;
694
struct pch_dma_chan *pd_chan;
695
u32 sts0;
696
int i;
697
int ret = IRQ_NONE;
698
699
sts0 = dma_readl(pd, STS0);
700
701
dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
702
703
for (i = 0; i < pd->dma.chancnt; i++) {
704
pd_chan = &pd->channels[i];
705
706
if (sts0 & DMA_STATUS_IRQ(i)) {
707
if (sts0 & DMA_STATUS_ERR(i))
708
set_bit(0, &pd_chan->err_status);
709
710
tasklet_schedule(&pd_chan->tasklet);
711
ret = IRQ_HANDLED;
712
}
713
714
}
715
716
/* clear interrupt bits in status register */
717
dma_writel(pd, STS0, sts0);
718
719
return ret;
720
}
721
722
#ifdef CONFIG_PM
723
static void pch_dma_save_regs(struct pch_dma *pd)
724
{
725
struct pch_dma_chan *pd_chan;
726
struct dma_chan *chan, *_c;
727
int i = 0;
728
729
pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
730
pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
731
pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
732
pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
733
734
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
735
pd_chan = to_pd_chan(chan);
736
737
pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
738
pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
739
pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
740
pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
741
742
i++;
743
}
744
}
745
746
static void pch_dma_restore_regs(struct pch_dma *pd)
747
{
748
struct pch_dma_chan *pd_chan;
749
struct dma_chan *chan, *_c;
750
int i = 0;
751
752
dma_writel(pd, CTL0, pd->regs.dma_ctl0);
753
dma_writel(pd, CTL1, pd->regs.dma_ctl1);
754
dma_writel(pd, CTL2, pd->regs.dma_ctl2);
755
dma_writel(pd, CTL3, pd->regs.dma_ctl3);
756
757
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
758
pd_chan = to_pd_chan(chan);
759
760
channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
761
channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
762
channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
763
channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
764
765
i++;
766
}
767
}
768
769
static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
770
{
771
struct pch_dma *pd = pci_get_drvdata(pdev);
772
773
if (pd)
774
pch_dma_save_regs(pd);
775
776
pci_save_state(pdev);
777
pci_disable_device(pdev);
778
pci_set_power_state(pdev, pci_choose_state(pdev, state));
779
780
return 0;
781
}
782
783
static int pch_dma_resume(struct pci_dev *pdev)
784
{
785
struct pch_dma *pd = pci_get_drvdata(pdev);
786
int err;
787
788
pci_set_power_state(pdev, PCI_D0);
789
pci_restore_state(pdev);
790
791
err = pci_enable_device(pdev);
792
if (err) {
793
dev_dbg(&pdev->dev, "failed to enable device\n");
794
return err;
795
}
796
797
if (pd)
798
pch_dma_restore_regs(pd);
799
800
return 0;
801
}
802
#endif
803
804
static int __devinit pch_dma_probe(struct pci_dev *pdev,
805
const struct pci_device_id *id)
806
{
807
struct pch_dma *pd;
808
struct pch_dma_regs *regs;
809
unsigned int nr_channels;
810
int err;
811
int i;
812
813
nr_channels = id->driver_data;
814
pd = kzalloc(sizeof(struct pch_dma)+
815
sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
816
if (!pd)
817
return -ENOMEM;
818
819
pci_set_drvdata(pdev, pd);
820
821
err = pci_enable_device(pdev);
822
if (err) {
823
dev_err(&pdev->dev, "Cannot enable PCI device\n");
824
goto err_free_mem;
825
}
826
827
if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
828
dev_err(&pdev->dev, "Cannot find proper base address\n");
829
goto err_disable_pdev;
830
}
831
832
err = pci_request_regions(pdev, DRV_NAME);
833
if (err) {
834
dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
835
goto err_disable_pdev;
836
}
837
838
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
839
if (err) {
840
dev_err(&pdev->dev, "Cannot set proper DMA config\n");
841
goto err_free_res;
842
}
843
844
regs = pd->membase = pci_iomap(pdev, 1, 0);
845
if (!pd->membase) {
846
dev_err(&pdev->dev, "Cannot map MMIO registers\n");
847
err = -ENOMEM;
848
goto err_free_res;
849
}
850
851
pci_set_master(pdev);
852
853
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
854
if (err) {
855
dev_err(&pdev->dev, "Failed to request IRQ\n");
856
goto err_iounmap;
857
}
858
859
pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
860
sizeof(struct pch_dma_desc), 4, 0);
861
if (!pd->pool) {
862
dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
863
err = -ENOMEM;
864
goto err_free_irq;
865
}
866
867
pd->dma.dev = &pdev->dev;
868
pd->dma.chancnt = nr_channels;
869
870
INIT_LIST_HEAD(&pd->dma.channels);
871
872
for (i = 0; i < nr_channels; i++) {
873
struct pch_dma_chan *pd_chan = &pd->channels[i];
874
875
pd_chan->chan.device = &pd->dma;
876
pd_chan->chan.cookie = 1;
877
pd_chan->chan.chan_id = i;
878
879
pd_chan->membase = &regs->desc[i];
880
881
spin_lock_init(&pd_chan->lock);
882
883
INIT_LIST_HEAD(&pd_chan->active_list);
884
INIT_LIST_HEAD(&pd_chan->queue);
885
INIT_LIST_HEAD(&pd_chan->free_list);
886
887
tasklet_init(&pd_chan->tasklet, pdc_tasklet,
888
(unsigned long)pd_chan);
889
list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
890
}
891
892
dma_cap_zero(pd->dma.cap_mask);
893
dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
894
dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
895
896
pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
897
pd->dma.device_free_chan_resources = pd_free_chan_resources;
898
pd->dma.device_tx_status = pd_tx_status;
899
pd->dma.device_issue_pending = pd_issue_pending;
900
pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
901
pd->dma.device_control = pd_device_control;
902
903
err = dma_async_device_register(&pd->dma);
904
if (err) {
905
dev_err(&pdev->dev, "Failed to register DMA device\n");
906
goto err_free_pool;
907
}
908
909
return 0;
910
911
err_free_pool:
912
pci_pool_destroy(pd->pool);
913
err_free_irq:
914
free_irq(pdev->irq, pd);
915
err_iounmap:
916
pci_iounmap(pdev, pd->membase);
917
err_free_res:
918
pci_release_regions(pdev);
919
err_disable_pdev:
920
pci_disable_device(pdev);
921
err_free_mem:
922
return err;
923
}
924
925
static void __devexit pch_dma_remove(struct pci_dev *pdev)
926
{
927
struct pch_dma *pd = pci_get_drvdata(pdev);
928
struct pch_dma_chan *pd_chan;
929
struct dma_chan *chan, *_c;
930
931
if (pd) {
932
dma_async_device_unregister(&pd->dma);
933
934
list_for_each_entry_safe(chan, _c, &pd->dma.channels,
935
device_node) {
936
pd_chan = to_pd_chan(chan);
937
938
tasklet_disable(&pd_chan->tasklet);
939
tasklet_kill(&pd_chan->tasklet);
940
}
941
942
pci_pool_destroy(pd->pool);
943
free_irq(pdev->irq, pd);
944
pci_iounmap(pdev, pd->membase);
945
pci_release_regions(pdev);
946
pci_disable_device(pdev);
947
kfree(pd);
948
}
949
}
950
951
/* PCI Device ID of DMA device */
952
#define PCI_VENDOR_ID_ROHM 0x10DB
953
#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
954
#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
955
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
956
#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
957
#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
958
#define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
959
#define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
960
#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
961
#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
962
#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
963
964
DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
965
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
966
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
967
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
968
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
969
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
970
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
971
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
972
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
973
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
974
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
975
{ 0, },
976
};
977
978
static struct pci_driver pch_dma_driver = {
979
.name = DRV_NAME,
980
.id_table = pch_dma_id_table,
981
.probe = pch_dma_probe,
982
.remove = __devexit_p(pch_dma_remove),
983
#ifdef CONFIG_PM
984
.suspend = pch_dma_suspend,
985
.resume = pch_dma_resume,
986
#endif
987
};
988
989
static int __init pch_dma_init(void)
990
{
991
return pci_register_driver(&pch_dma_driver);
992
}
993
994
static void __exit pch_dma_exit(void)
995
{
996
pci_unregister_driver(&pch_dma_driver);
997
}
998
999
module_init(pch_dma_init);
1000
module_exit(pch_dma_exit);
1001
1002
MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
1003
"DMA controller driver");
1004
MODULE_AUTHOR("Yong Wang <[email protected]>");
1005
MODULE_LICENSE("GPL v2");
1006
1007