Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/altera-msgdma.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* DMA driver for Altera mSGDMA IP core
4
*
5
* Copyright (C) 2017 Stefan Roese <[email protected]>
6
*
7
* Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
8
* Copyright (C) 2016 Xilinx, Inc. All rights reserved.
9
*/
10
11
#include <linux/bitops.h>
12
#include <linux/delay.h>
13
#include <linux/dma-mapping.h>
14
#include <linux/dmapool.h>
15
#include <linux/init.h>
16
#include <linux/interrupt.h>
17
#include <linux/io.h>
18
#include <linux/iopoll.h>
19
#include <linux/module.h>
20
#include <linux/platform_device.h>
21
#include <linux/slab.h>
22
#include <linux/of_dma.h>
23
24
#include "dmaengine.h"
25
26
#define MSGDMA_MAX_TRANS_LEN U32_MAX
27
#define MSGDMA_DESC_NUM 1024
28
29
/**
30
* struct msgdma_extended_desc - implements an extended descriptor
31
* @read_addr_lo: data buffer source address low bits
32
* @write_addr_lo: data buffer destination address low bits
33
* @len: the number of bytes to transfer per descriptor
34
* @burst_seq_num: bit 31:24 write burst
35
* bit 23:16 read burst
36
* bit 15:00 sequence number
37
* @stride: bit 31:16 write stride
38
* bit 15:00 read stride
39
* @read_addr_hi: data buffer source address high bits
40
* @write_addr_hi: data buffer destination address high bits
41
* @control: characteristics of the transfer
42
*/
43
struct msgdma_extended_desc {
44
u32 read_addr_lo;
45
u32 write_addr_lo;
46
u32 len;
47
u32 burst_seq_num;
48
u32 stride;
49
u32 read_addr_hi;
50
u32 write_addr_hi;
51
u32 control;
52
};
53
54
/* mSGDMA descriptor control field bit definitions */
55
#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
56
#define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
57
#define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
58
#define MSGDMA_DESC_CTL_PARK_READS BIT(10)
59
#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
60
#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
61
#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
62
#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
63
#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
64
#define MSGDMA_DESC_CTL_TR_ERR_IRQ GENMASK(23, 16)
65
#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
66
67
/*
68
* Writing "1" the "go" bit commits the entire descriptor into the
69
* descriptor FIFO(s)
70
*/
71
#define MSGDMA_DESC_CTL_GO BIT(31)
72
73
/* Tx buffer control flags */
74
#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
75
MSGDMA_DESC_CTL_TR_ERR_IRQ | \
76
MSGDMA_DESC_CTL_GO)
77
78
#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
79
MSGDMA_DESC_CTL_GO)
80
81
#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
82
MSGDMA_DESC_CTL_TR_COMP_IRQ | \
83
MSGDMA_DESC_CTL_TR_ERR_IRQ | \
84
MSGDMA_DESC_CTL_GO)
85
86
#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
87
MSGDMA_DESC_CTL_GEN_EOP | \
88
MSGDMA_DESC_CTL_TR_COMP_IRQ | \
89
MSGDMA_DESC_CTL_TR_ERR_IRQ | \
90
MSGDMA_DESC_CTL_GO)
91
92
#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
93
MSGDMA_DESC_CTL_END_ON_LEN | \
94
MSGDMA_DESC_CTL_TR_COMP_IRQ | \
95
MSGDMA_DESC_CTL_EARLY_IRQ | \
96
MSGDMA_DESC_CTL_TR_ERR_IRQ | \
97
MSGDMA_DESC_CTL_GO)
98
99
/* mSGDMA extended descriptor stride definitions */
100
#define MSGDMA_DESC_STRIDE_RD 0x00000001
101
#define MSGDMA_DESC_STRIDE_WR 0x00010000
102
#define MSGDMA_DESC_STRIDE_RW 0x00010001
103
104
/* mSGDMA dispatcher control and status register map */
105
#define MSGDMA_CSR_STATUS 0x00 /* Read / Clear */
106
#define MSGDMA_CSR_CONTROL 0x04 /* Read / Write */
107
#define MSGDMA_CSR_RW_FILL_LEVEL 0x08 /* 31:16 - write fill level */
108
/* 15:00 - read fill level */
109
#define MSGDMA_CSR_RESP_FILL_LEVEL 0x0c /* response FIFO fill level */
110
#define MSGDMA_CSR_RW_SEQ_NUM 0x10 /* 31:16 - write seq number */
111
/* 15:00 - read seq number */
112
113
/* mSGDMA CSR status register bit definitions */
114
#define MSGDMA_CSR_STAT_BUSY BIT(0)
115
#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
116
#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
117
#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
118
#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
119
#define MSGDMA_CSR_STAT_STOPPED BIT(5)
120
#define MSGDMA_CSR_STAT_RESETTING BIT(6)
121
#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
122
#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
123
#define MSGDMA_CSR_STAT_IRQ BIT(9)
124
#define MSGDMA_CSR_STAT_MASK GENMASK(9, 0)
125
#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ GENMASK(8, 0)
126
127
#define DESC_EMPTY (MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
128
MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
129
130
/* mSGDMA CSR control register bit definitions */
131
#define MSGDMA_CSR_CTL_STOP BIT(0)
132
#define MSGDMA_CSR_CTL_RESET BIT(1)
133
#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
134
#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
135
#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
136
#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
137
138
/* mSGDMA CSR fill level bits */
139
#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
140
#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
141
#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
142
143
#define MSGDMA_CSR_SEQ_NUM_GET(v) (((v) & 0xffff0000) >> 16)
144
145
/* mSGDMA response register map */
146
#define MSGDMA_RESP_BYTES_TRANSFERRED 0x00
147
#define MSGDMA_RESP_STATUS 0x04
148
149
/* mSGDMA response register bit definitions */
150
#define MSGDMA_RESP_EARLY_TERM BIT(8)
151
#define MSGDMA_RESP_ERR_MASK 0xff
152
153
/**
154
* struct msgdma_sw_desc - implements a sw descriptor
155
* @async_tx: support for the async_tx api
156
* @hw_desc: associated HW descriptor
157
* @node: node to move from the free list to the tx list
158
* @tx_list: transmit list node
159
*/
160
struct msgdma_sw_desc {
161
struct dma_async_tx_descriptor async_tx;
162
struct msgdma_extended_desc hw_desc;
163
struct list_head node;
164
struct list_head tx_list;
165
};
166
167
/*
168
* struct msgdma_device - DMA device structure
169
*/
170
struct msgdma_device {
171
spinlock_t lock;
172
struct device *dev;
173
struct tasklet_struct irq_tasklet;
174
struct list_head pending_list;
175
struct list_head free_list;
176
struct list_head active_list;
177
struct list_head done_list;
178
u32 desc_free_cnt;
179
bool idle;
180
181
struct dma_device dmadev;
182
struct dma_chan dmachan;
183
dma_addr_t hw_desq;
184
struct msgdma_sw_desc *sw_desq;
185
unsigned int npendings;
186
187
struct dma_slave_config slave_cfg;
188
189
int irq;
190
191
/* mSGDMA controller */
192
void __iomem *csr;
193
194
/* mSGDMA descriptors */
195
void __iomem *desc;
196
197
/* mSGDMA response */
198
void __iomem *resp;
199
};
200
201
#define to_mdev(chan) container_of(chan, struct msgdma_device, dmachan)
202
#define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx)
203
204
/**
205
* msgdma_get_descriptor - Get the sw descriptor from the pool
206
* @mdev: Pointer to the Altera mSGDMA device structure
207
*
208
* Return: The sw descriptor
209
*/
210
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
211
{
212
struct msgdma_sw_desc *desc;
213
unsigned long flags;
214
215
spin_lock_irqsave(&mdev->lock, flags);
216
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
217
list_del(&desc->node);
218
spin_unlock_irqrestore(&mdev->lock, flags);
219
220
INIT_LIST_HEAD(&desc->tx_list);
221
222
return desc;
223
}
224
225
/**
226
* msgdma_free_descriptor - Issue pending transactions
227
* @mdev: Pointer to the Altera mSGDMA device structure
228
* @desc: Transaction descriptor pointer
229
*/
230
static void msgdma_free_descriptor(struct msgdma_device *mdev,
231
struct msgdma_sw_desc *desc)
232
{
233
struct msgdma_sw_desc *child, *next;
234
235
mdev->desc_free_cnt++;
236
list_move_tail(&desc->node, &mdev->free_list);
237
list_for_each_entry_safe(child, next, &desc->tx_list, node) {
238
mdev->desc_free_cnt++;
239
list_move_tail(&child->node, &mdev->free_list);
240
}
241
}
242
243
/**
244
* msgdma_free_desc_list - Free descriptors list
245
* @mdev: Pointer to the Altera mSGDMA device structure
246
* @list: List to parse and delete the descriptor
247
*/
248
static void msgdma_free_desc_list(struct msgdma_device *mdev,
249
struct list_head *list)
250
{
251
struct msgdma_sw_desc *desc, *next;
252
253
list_for_each_entry_safe(desc, next, list, node)
254
msgdma_free_descriptor(mdev, desc);
255
}
256
257
/**
258
* msgdma_desc_config - Configure the descriptor
259
* @desc: Hw descriptor pointer
260
* @dst: Destination buffer address
261
* @src: Source buffer address
262
* @len: Transfer length
263
* @stride: Read/write stride value to set
264
*/
265
static void msgdma_desc_config(struct msgdma_extended_desc *desc,
266
dma_addr_t dst, dma_addr_t src, size_t len,
267
u32 stride)
268
{
269
/* Set lower 32bits of src & dst addresses in the descriptor */
270
desc->read_addr_lo = lower_32_bits(src);
271
desc->write_addr_lo = lower_32_bits(dst);
272
273
/* Set upper 32bits of src & dst addresses in the descriptor */
274
desc->read_addr_hi = upper_32_bits(src);
275
desc->write_addr_hi = upper_32_bits(dst);
276
277
desc->len = len;
278
desc->stride = stride;
279
desc->burst_seq_num = 0; /* 0 will result in max burst length */
280
281
/*
282
* Don't set interrupt on xfer end yet, this will be done later
283
* for the "last" descriptor
284
*/
285
desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
286
MSGDMA_DESC_CTL_END_ON_LEN;
287
}
288
289
/**
290
* msgdma_desc_config_eod - Mark the descriptor as end descriptor
291
* @desc: Hw descriptor pointer
292
*/
293
static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
294
{
295
desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
296
}
297
298
/**
299
* msgdma_tx_submit - Submit DMA transaction
300
* @tx: Async transaction descriptor pointer
301
*
302
* Return: cookie value
303
*/
304
static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
305
{
306
struct msgdma_device *mdev = to_mdev(tx->chan);
307
struct msgdma_sw_desc *new;
308
dma_cookie_t cookie;
309
unsigned long flags;
310
311
new = tx_to_desc(tx);
312
spin_lock_irqsave(&mdev->lock, flags);
313
cookie = dma_cookie_assign(tx);
314
315
list_add_tail(&new->node, &mdev->pending_list);
316
spin_unlock_irqrestore(&mdev->lock, flags);
317
318
return cookie;
319
}
320
321
/**
322
* msgdma_prep_memcpy - prepare descriptors for memcpy transaction
323
* @dchan: DMA channel
324
* @dma_dst: Destination buffer address
325
* @dma_src: Source buffer address
326
* @len: Transfer length
327
* @flags: transfer ack flags
328
*
329
* Return: Async transaction descriptor on success and NULL on failure
330
*/
331
static struct dma_async_tx_descriptor *
332
msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
333
dma_addr_t dma_src, size_t len, ulong flags)
334
{
335
struct msgdma_device *mdev = to_mdev(dchan);
336
struct msgdma_sw_desc *new, *first = NULL;
337
struct msgdma_extended_desc *desc;
338
size_t copy;
339
u32 desc_cnt;
340
unsigned long irqflags;
341
342
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
343
344
spin_lock_irqsave(&mdev->lock, irqflags);
345
if (desc_cnt > mdev->desc_free_cnt) {
346
spin_unlock_irqrestore(&mdev->lock, irqflags);
347
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
348
return NULL;
349
}
350
mdev->desc_free_cnt -= desc_cnt;
351
spin_unlock_irqrestore(&mdev->lock, irqflags);
352
353
do {
354
/* Allocate and populate the descriptor */
355
new = msgdma_get_descriptor(mdev);
356
357
copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
358
desc = &new->hw_desc;
359
msgdma_desc_config(desc, dma_dst, dma_src, copy,
360
MSGDMA_DESC_STRIDE_RW);
361
len -= copy;
362
dma_src += copy;
363
dma_dst += copy;
364
if (!first)
365
first = new;
366
else
367
list_add_tail(&new->node, &first->tx_list);
368
} while (len);
369
370
msgdma_desc_config_eod(desc);
371
async_tx_ack(&first->async_tx);
372
first->async_tx.flags = flags;
373
374
return &first->async_tx;
375
}
376
377
/**
378
* msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
379
*
380
* @dchan: DMA channel
381
* @sgl: Destination scatter list
382
* @sg_len: Number of entries in destination scatter list
383
* @dir: DMA transfer direction
384
* @flags: transfer ack flags
385
* @context: transfer context (unused)
386
*/
387
static struct dma_async_tx_descriptor *
388
msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
389
unsigned int sg_len, enum dma_transfer_direction dir,
390
unsigned long flags, void *context)
391
392
{
393
struct msgdma_device *mdev = to_mdev(dchan);
394
struct dma_slave_config *cfg = &mdev->slave_cfg;
395
struct msgdma_sw_desc *new, *first = NULL;
396
void *desc = NULL;
397
size_t len, avail;
398
dma_addr_t dma_dst, dma_src;
399
u32 desc_cnt = 0, i;
400
struct scatterlist *sg;
401
u32 stride;
402
unsigned long irqflags;
403
404
for_each_sg(sgl, sg, sg_len, i)
405
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
406
407
spin_lock_irqsave(&mdev->lock, irqflags);
408
if (desc_cnt > mdev->desc_free_cnt) {
409
spin_unlock_irqrestore(&mdev->lock, irqflags);
410
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
411
return NULL;
412
}
413
mdev->desc_free_cnt -= desc_cnt;
414
spin_unlock_irqrestore(&mdev->lock, irqflags);
415
416
avail = sg_dma_len(sgl);
417
418
/* Run until we are out of scatterlist entries */
419
while (true) {
420
/* Allocate and populate the descriptor */
421
new = msgdma_get_descriptor(mdev);
422
423
desc = &new->hw_desc;
424
len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
425
426
if (dir == DMA_MEM_TO_DEV) {
427
dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
428
dma_dst = cfg->dst_addr;
429
stride = MSGDMA_DESC_STRIDE_RD;
430
} else {
431
dma_src = cfg->src_addr;
432
dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
433
stride = MSGDMA_DESC_STRIDE_WR;
434
}
435
msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
436
avail -= len;
437
438
if (!first)
439
first = new;
440
else
441
list_add_tail(&new->node, &first->tx_list);
442
443
/* Fetch the next scatterlist entry */
444
if (avail == 0) {
445
if (sg_len == 0)
446
break;
447
sgl = sg_next(sgl);
448
if (sgl == NULL)
449
break;
450
sg_len--;
451
avail = sg_dma_len(sgl);
452
}
453
}
454
455
msgdma_desc_config_eod(desc);
456
first->async_tx.flags = flags;
457
458
return &first->async_tx;
459
}
460
461
static int msgdma_dma_config(struct dma_chan *dchan,
462
struct dma_slave_config *config)
463
{
464
struct msgdma_device *mdev = to_mdev(dchan);
465
466
memcpy(&mdev->slave_cfg, config, sizeof(*config));
467
468
return 0;
469
}
470
471
static void msgdma_reset(struct msgdma_device *mdev)
472
{
473
u32 val;
474
int ret;
475
476
/* Reset mSGDMA */
477
iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
478
iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
479
480
ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
481
(val & MSGDMA_CSR_STAT_RESETTING) == 0,
482
1, 10000);
483
if (ret)
484
dev_err(mdev->dev, "DMA channel did not reset\n");
485
486
/* Clear all status bits */
487
iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
488
489
/* Enable the DMA controller including interrupts */
490
iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
491
MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
492
493
mdev->idle = true;
494
};
495
496
static void msgdma_copy_one(struct msgdma_device *mdev,
497
struct msgdma_sw_desc *desc)
498
{
499
void __iomem *hw_desc = mdev->desc;
500
501
/*
502
* Check if the DESC FIFO it not full. If its full, we need to wait
503
* for at least one entry to become free again
504
*/
505
while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
506
MSGDMA_CSR_STAT_DESC_BUF_FULL)
507
mdelay(1);
508
509
/*
510
* The descriptor needs to get copied into the descriptor FIFO
511
* of the DMA controller. The descriptor will get flushed to the
512
* FIFO, once the last word (control word) is written. Since we
513
* are not 100% sure that memcpy() writes all word in the "correct"
514
* order (address from low to high) on all architectures, we make
515
* sure this control word is written last by single coding it and
516
* adding some write-barriers here.
517
*/
518
memcpy((void __force *)hw_desc, &desc->hw_desc,
519
sizeof(desc->hw_desc) - sizeof(u32));
520
521
/* Write control word last to flush this descriptor into the FIFO */
522
mdev->idle = false;
523
wmb();
524
iowrite32(desc->hw_desc.control, hw_desc +
525
offsetof(struct msgdma_extended_desc, control));
526
wmb();
527
}
528
529
/**
530
* msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
531
* @mdev: Pointer to the Altera mSGDMA device structure
532
* @desc: Transaction descriptor pointer
533
*/
534
static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
535
struct msgdma_sw_desc *desc)
536
{
537
struct msgdma_sw_desc *sdesc, *next;
538
539
msgdma_copy_one(mdev, desc);
540
541
list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
542
msgdma_copy_one(mdev, sdesc);
543
}
544
545
/**
546
* msgdma_start_transfer - Initiate the new transfer
547
* @mdev: Pointer to the Altera mSGDMA device structure
548
*/
549
static void msgdma_start_transfer(struct msgdma_device *mdev)
550
{
551
struct msgdma_sw_desc *desc;
552
553
if (!mdev->idle)
554
return;
555
556
desc = list_first_entry_or_null(&mdev->pending_list,
557
struct msgdma_sw_desc, node);
558
if (!desc)
559
return;
560
561
list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
562
msgdma_copy_desc_to_fifo(mdev, desc);
563
}
564
565
/**
566
* msgdma_issue_pending - Issue pending transactions
567
* @chan: DMA channel pointer
568
*/
569
static void msgdma_issue_pending(struct dma_chan *chan)
570
{
571
struct msgdma_device *mdev = to_mdev(chan);
572
unsigned long flags;
573
574
spin_lock_irqsave(&mdev->lock, flags);
575
msgdma_start_transfer(mdev);
576
spin_unlock_irqrestore(&mdev->lock, flags);
577
}
578
579
/**
580
* msgdma_chan_desc_cleanup - Cleanup the completed descriptors
581
* @mdev: Pointer to the Altera mSGDMA device structure
582
*/
583
static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
584
{
585
struct msgdma_sw_desc *desc, *next;
586
unsigned long irqflags;
587
588
spin_lock_irqsave(&mdev->lock, irqflags);
589
590
list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
591
struct dmaengine_desc_callback cb;
592
593
dmaengine_desc_get_callback(&desc->async_tx, &cb);
594
if (dmaengine_desc_callback_valid(&cb)) {
595
spin_unlock_irqrestore(&mdev->lock, irqflags);
596
dmaengine_desc_callback_invoke(&cb, NULL);
597
spin_lock_irqsave(&mdev->lock, irqflags);
598
}
599
600
/* Run any dependencies, then free the descriptor */
601
msgdma_free_descriptor(mdev, desc);
602
}
603
604
spin_unlock_irqrestore(&mdev->lock, irqflags);
605
}
606
607
/**
608
* msgdma_complete_descriptor - Mark the active descriptor as complete
609
* @mdev: Pointer to the Altera mSGDMA device structure
610
*/
611
static void msgdma_complete_descriptor(struct msgdma_device *mdev)
612
{
613
struct msgdma_sw_desc *desc;
614
615
desc = list_first_entry_or_null(&mdev->active_list,
616
struct msgdma_sw_desc, node);
617
if (!desc)
618
return;
619
list_del(&desc->node);
620
dma_cookie_complete(&desc->async_tx);
621
list_add_tail(&desc->node, &mdev->done_list);
622
}
623
624
/**
625
* msgdma_free_descriptors - Free channel descriptors
626
* @mdev: Pointer to the Altera mSGDMA device structure
627
*/
628
static void msgdma_free_descriptors(struct msgdma_device *mdev)
629
{
630
msgdma_free_desc_list(mdev, &mdev->active_list);
631
msgdma_free_desc_list(mdev, &mdev->pending_list);
632
msgdma_free_desc_list(mdev, &mdev->done_list);
633
}
634
635
/**
636
* msgdma_free_chan_resources - Free channel resources
637
* @dchan: DMA channel pointer
638
*/
639
static void msgdma_free_chan_resources(struct dma_chan *dchan)
640
{
641
struct msgdma_device *mdev = to_mdev(dchan);
642
unsigned long flags;
643
644
spin_lock_irqsave(&mdev->lock, flags);
645
msgdma_free_descriptors(mdev);
646
spin_unlock_irqrestore(&mdev->lock, flags);
647
kfree(mdev->sw_desq);
648
}
649
650
/**
651
* msgdma_alloc_chan_resources - Allocate channel resources
652
* @dchan: DMA channel
653
*
654
* Return: Number of descriptors on success and failure value on error
655
*/
656
static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
657
{
658
struct msgdma_device *mdev = to_mdev(dchan);
659
struct msgdma_sw_desc *desc;
660
int i;
661
662
mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
663
if (!mdev->sw_desq)
664
return -ENOMEM;
665
666
mdev->idle = true;
667
mdev->desc_free_cnt = MSGDMA_DESC_NUM;
668
669
INIT_LIST_HEAD(&mdev->free_list);
670
671
for (i = 0; i < MSGDMA_DESC_NUM; i++) {
672
desc = mdev->sw_desq + i;
673
dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
674
desc->async_tx.tx_submit = msgdma_tx_submit;
675
list_add_tail(&desc->node, &mdev->free_list);
676
}
677
678
return MSGDMA_DESC_NUM;
679
}
680
681
/**
682
* msgdma_tasklet - Schedule completion tasklet
683
* @t: Pointer to the Altera sSGDMA channel structure
684
*/
685
static void msgdma_tasklet(struct tasklet_struct *t)
686
{
687
struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet);
688
u32 count;
689
u32 __maybe_unused size;
690
u32 __maybe_unused status;
691
unsigned long flags;
692
693
spin_lock_irqsave(&mdev->lock, flags);
694
695
if (mdev->resp) {
696
/* Read number of responses that are available */
697
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
698
dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
699
__func__, __LINE__, count);
700
} else {
701
count = 1;
702
}
703
704
while (count--) {
705
/*
706
* Read both longwords to purge this response from the FIFO
707
* On Avalon-MM implementations, size and status do not
708
* have any real values, like transferred bytes or error
709
* bits. So we need to just drop these values.
710
*/
711
if (mdev->resp) {
712
size = ioread32(mdev->resp +
713
MSGDMA_RESP_BYTES_TRANSFERRED);
714
status = ioread32(mdev->resp +
715
MSGDMA_RESP_STATUS);
716
}
717
718
msgdma_complete_descriptor(mdev);
719
}
720
721
spin_unlock_irqrestore(&mdev->lock, flags);
722
723
msgdma_chan_desc_cleanup(mdev);
724
}
725
726
/**
727
* msgdma_irq_handler - Altera mSGDMA Interrupt handler
728
* @irq: IRQ number
729
* @data: Pointer to the Altera mSGDMA device structure
730
*
731
* Return: IRQ_HANDLED/IRQ_NONE
732
*/
733
static irqreturn_t msgdma_irq_handler(int irq, void *data)
734
{
735
struct msgdma_device *mdev = data;
736
u32 status;
737
738
status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
739
if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
740
/* Start next transfer if the DMA controller is idle */
741
spin_lock(&mdev->lock);
742
mdev->idle = true;
743
msgdma_start_transfer(mdev);
744
spin_unlock(&mdev->lock);
745
}
746
747
tasklet_schedule(&mdev->irq_tasklet);
748
749
/* Clear interrupt in mSGDMA controller */
750
iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
751
752
return IRQ_HANDLED;
753
}
754
755
/**
756
* msgdma_dev_remove() - Device remove function
757
* @mdev: Pointer to the Altera mSGDMA device structure
758
*/
759
static void msgdma_dev_remove(struct msgdma_device *mdev)
760
{
761
if (!mdev)
762
return;
763
764
devm_free_irq(mdev->dev, mdev->irq, mdev);
765
tasklet_kill(&mdev->irq_tasklet);
766
list_del(&mdev->dmachan.device_node);
767
}
768
769
static int request_and_map(struct platform_device *pdev, const char *name,
770
struct resource **res, void __iomem **ptr,
771
bool optional)
772
{
773
struct resource *region;
774
struct device *device = &pdev->dev;
775
776
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
777
if (*res == NULL) {
778
if (optional) {
779
*ptr = NULL;
780
dev_info(device, "optional resource %s not defined\n",
781
name);
782
return 0;
783
}
784
dev_err(device, "mandatory resource %s not defined\n", name);
785
return -ENODEV;
786
}
787
788
region = devm_request_mem_region(device, (*res)->start,
789
resource_size(*res), dev_name(device));
790
if (region == NULL) {
791
dev_err(device, "unable to request %s\n", name);
792
return -EBUSY;
793
}
794
795
*ptr = devm_ioremap(device, region->start,
796
resource_size(region));
797
if (*ptr == NULL) {
798
dev_err(device, "ioremap of %s failed!", name);
799
return -ENOMEM;
800
}
801
802
return 0;
803
}
804
805
/**
806
* msgdma_probe - Driver probe function
807
* @pdev: Pointer to the platform_device structure
808
*
809
* Return: '0' on success and failure value on error
810
*/
811
static int msgdma_probe(struct platform_device *pdev)
812
{
813
struct msgdma_device *mdev;
814
struct dma_device *dma_dev;
815
struct resource *dma_res;
816
int ret;
817
818
mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
819
if (!mdev)
820
return -ENOMEM;
821
822
mdev->dev = &pdev->dev;
823
824
/* Map CSR space */
825
ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr, false);
826
if (ret)
827
return ret;
828
829
/* Map (extended) descriptor space */
830
ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc, false);
831
if (ret)
832
return ret;
833
834
/* Map response space */
835
ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp, true);
836
if (ret)
837
return ret;
838
839
platform_set_drvdata(pdev, mdev);
840
841
/* Get interrupt nr from platform data */
842
mdev->irq = platform_get_irq(pdev, 0);
843
if (mdev->irq < 0)
844
return -ENXIO;
845
846
ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
847
0, dev_name(&pdev->dev), mdev);
848
if (ret)
849
return ret;
850
851
tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet);
852
853
dma_cookie_init(&mdev->dmachan);
854
855
spin_lock_init(&mdev->lock);
856
857
INIT_LIST_HEAD(&mdev->active_list);
858
INIT_LIST_HEAD(&mdev->pending_list);
859
INIT_LIST_HEAD(&mdev->done_list);
860
INIT_LIST_HEAD(&mdev->free_list);
861
862
dma_dev = &mdev->dmadev;
863
864
/* Set DMA capabilities */
865
dma_cap_zero(dma_dev->cap_mask);
866
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
867
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
868
869
dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
870
dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
871
dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
872
BIT(DMA_MEM_TO_MEM);
873
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
874
875
/* Init DMA link list */
876
INIT_LIST_HEAD(&dma_dev->channels);
877
878
/* Set base routines */
879
dma_dev->device_tx_status = dma_cookie_status;
880
dma_dev->device_issue_pending = msgdma_issue_pending;
881
dma_dev->dev = &pdev->dev;
882
883
dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
884
dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
885
dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
886
dma_dev->device_config = msgdma_dma_config;
887
888
dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
889
dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
890
891
mdev->dmachan.device = dma_dev;
892
list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
893
894
/* Set DMA mask to 64 bits */
895
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
896
if (ret) {
897
dev_warn(&pdev->dev, "unable to set coherent mask to 64");
898
goto fail;
899
}
900
901
msgdma_reset(mdev);
902
903
ret = dma_async_device_register(dma_dev);
904
if (ret)
905
goto fail;
906
907
ret = of_dma_controller_register(pdev->dev.of_node,
908
of_dma_xlate_by_chan_id, dma_dev);
909
if (ret == -EINVAL)
910
dev_warn(&pdev->dev, "device was not probed from DT");
911
else if (ret && ret != -ENODEV)
912
goto fail;
913
914
dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
915
916
return 0;
917
918
fail:
919
msgdma_dev_remove(mdev);
920
921
return ret;
922
}
923
924
/**
925
* msgdma_remove() - Driver remove function
926
* @pdev: Pointer to the platform_device structure
927
*
928
* Return: Always '0'
929
*/
930
static void msgdma_remove(struct platform_device *pdev)
931
{
932
struct msgdma_device *mdev = platform_get_drvdata(pdev);
933
934
if (pdev->dev.of_node)
935
of_dma_controller_free(pdev->dev.of_node);
936
dma_async_device_unregister(&mdev->dmadev);
937
msgdma_dev_remove(mdev);
938
939
dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
940
}
941
942
#ifdef CONFIG_OF
943
static const struct of_device_id msgdma_match[] = {
944
{ .compatible = "altr,socfpga-msgdma", },
945
{ }
946
};
947
948
MODULE_DEVICE_TABLE(of, msgdma_match);
949
#endif
950
951
static struct platform_driver msgdma_driver = {
952
.driver = {
953
.name = "altera-msgdma",
954
.of_match_table = of_match_ptr(msgdma_match),
955
},
956
.probe = msgdma_probe,
957
.remove = msgdma_remove,
958
};
959
960
module_platform_driver(msgdma_driver);
961
962
MODULE_ALIAS("platform:altera-msgdma");
963
MODULE_DESCRIPTION("Altera mSGDMA driver");
964
MODULE_AUTHOR("Stefan Roese <[email protected]>");
965
MODULE_LICENSE("GPL");
966
967