Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/loongson2-apb-dma.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Driver for the Loongson-2 APB DMA Controller
4
*
5
* Copyright (C) 2017-2023 Loongson Corporation
6
*/
7
8
#include <linux/clk.h>
9
#include <linux/dma-mapping.h>
10
#include <linux/dmapool.h>
11
#include <linux/interrupt.h>
12
#include <linux/io.h>
13
#include <linux/io-64-nonatomic-lo-hi.h>
14
#include <linux/module.h>
15
#include <linux/of.h>
16
#include <linux/of_dma.h>
17
#include <linux/platform_device.h>
18
#include <linux/slab.h>
19
20
#include "dmaengine.h"
21
#include "virt-dma.h"
22
23
/* Global Configuration Register */
24
#define LDMA_ORDER_ERG 0x0
25
26
/* Bitfield definitions */
27
28
/* Bitfields in Global Configuration Register */
29
#define LDMA_64BIT_EN BIT(0) /* 1: 64 bit support */
30
#define LDMA_UNCOHERENT_EN BIT(1) /* 0: cache, 1: uncache */
31
#define LDMA_ASK_VALID BIT(2)
32
#define LDMA_START BIT(3) /* DMA start operation */
33
#define LDMA_STOP BIT(4) /* DMA stop operation */
34
#define LDMA_CONFIG_MASK GENMASK_ULL(4, 0) /* DMA controller config bits mask */
35
36
/* Bitfields in ndesc_addr field of HW descriptor */
37
#define LDMA_DESC_EN BIT(0) /*1: The next descriptor is valid */
38
#define LDMA_DESC_ADDR_LOW GENMASK(31, 1)
39
40
/* Bitfields in cmd field of HW descriptor */
41
#define LDMA_INT BIT(1) /* Enable DMA interrupts */
42
#define LDMA_DATA_DIRECTION BIT(12) /* 1: write to device, 0: read from device */
43
44
#define LDMA_SLAVE_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
45
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
46
47
#define LDMA_MAX_TRANS_LEN U32_MAX
48
49
/*-- descriptors -----------------------------------------------------*/
50
51
/*
52
* struct ls2x_dma_hw_desc - DMA HW descriptor
53
* @ndesc_addr: the next descriptor low address.
54
* @mem_addr: memory low address.
55
* @apb_addr: device buffer address.
56
* @len: length of a piece of carried content, in words.
57
* @step_len: length between two moved memory data blocks.
58
* @step_times: number of blocks to be carried in a single DMA operation.
59
* @cmd: descriptor command or state.
60
* @stats: DMA status.
61
* @high_ndesc_addr: the next descriptor high address.
62
* @high_mem_addr: memory high address.
63
* @reserved: reserved
64
*/
65
struct ls2x_dma_hw_desc {
66
u32 ndesc_addr;
67
u32 mem_addr;
68
u32 apb_addr;
69
u32 len;
70
u32 step_len;
71
u32 step_times;
72
u32 cmd;
73
u32 stats;
74
u32 high_ndesc_addr;
75
u32 high_mem_addr;
76
u32 reserved[2];
77
} __packed;
78
79
/*
80
* struct ls2x_dma_sg - ls2x dma scatter gather entry
81
* @hw: the pointer to DMA HW descriptor.
82
* @llp: physical address of the DMA HW descriptor.
83
* @phys: destination or source address(mem).
84
* @len: number of Bytes to read.
85
*/
86
struct ls2x_dma_sg {
87
struct ls2x_dma_hw_desc *hw;
88
dma_addr_t llp;
89
dma_addr_t phys;
90
u32 len;
91
};
92
93
/*
94
* struct ls2x_dma_desc - software descriptor
95
* @vdesc: pointer to the virtual dma descriptor.
96
* @cyclic: flag to dma cyclic
97
* @burst_size: burst size of transaction, in words.
98
* @desc_num: number of sg entries.
99
* @direction: transfer direction, to or from device.
100
* @status: dma controller status.
101
* @sg: array of sgs.
102
*/
103
struct ls2x_dma_desc {
104
struct virt_dma_desc vdesc;
105
bool cyclic;
106
size_t burst_size;
107
u32 desc_num;
108
enum dma_transfer_direction direction;
109
enum dma_status status;
110
struct ls2x_dma_sg sg[] __counted_by(desc_num);
111
};
112
113
/*-- Channels --------------------------------------------------------*/
114
115
/*
116
* struct ls2x_dma_chan - internal representation of an LS2X APB DMA channel
117
* @vchan: virtual dma channel entry.
118
* @desc: pointer to the ls2x sw dma descriptor.
119
* @pool: hw desc table
120
* @irq: irq line
121
* @sconfig: configuration for slave transfers, passed via .device_config
122
*/
123
struct ls2x_dma_chan {
124
struct virt_dma_chan vchan;
125
struct ls2x_dma_desc *desc;
126
void *pool;
127
int irq;
128
struct dma_slave_config sconfig;
129
};
130
131
/*-- Controller ------------------------------------------------------*/
132
133
/*
134
* struct ls2x_dma_priv - LS2X APB DMAC specific information
135
* @ddev: dmaengine dma_device object members
136
* @dma_clk: DMAC clock source
137
* @regs: memory mapped register base
138
* @lchan: channel to store ls2x_dma_chan structures
139
*/
140
struct ls2x_dma_priv {
141
struct dma_device ddev;
142
struct clk *dma_clk;
143
void __iomem *regs;
144
struct ls2x_dma_chan lchan;
145
};
146
147
/*-- Helper functions ------------------------------------------------*/
148
149
static inline struct ls2x_dma_desc *to_ldma_desc(struct virt_dma_desc *vdesc)
150
{
151
return container_of(vdesc, struct ls2x_dma_desc, vdesc);
152
}
153
154
static inline struct ls2x_dma_chan *to_ldma_chan(struct dma_chan *chan)
155
{
156
return container_of(chan, struct ls2x_dma_chan, vchan.chan);
157
}
158
159
static inline struct ls2x_dma_priv *to_ldma_priv(struct dma_device *ddev)
160
{
161
return container_of(ddev, struct ls2x_dma_priv, ddev);
162
}
163
164
static struct device *chan2dev(struct dma_chan *chan)
165
{
166
return &chan->dev->device;
167
}
168
169
static void ls2x_dma_desc_free(struct virt_dma_desc *vdesc)
170
{
171
struct ls2x_dma_chan *lchan = to_ldma_chan(vdesc->tx.chan);
172
struct ls2x_dma_desc *desc = to_ldma_desc(vdesc);
173
int i;
174
175
for (i = 0; i < desc->desc_num; i++) {
176
if (desc->sg[i].hw)
177
dma_pool_free(lchan->pool, desc->sg[i].hw,
178
desc->sg[i].llp);
179
}
180
181
kfree(desc);
182
}
183
184
static void ls2x_dma_write_cmd(struct ls2x_dma_chan *lchan, bool cmd)
185
{
186
struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
187
u64 val;
188
189
val = lo_hi_readq(priv->regs + LDMA_ORDER_ERG) & ~LDMA_CONFIG_MASK;
190
val |= LDMA_64BIT_EN | cmd;
191
lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
192
}
193
194
static void ls2x_dma_start_transfer(struct ls2x_dma_chan *lchan)
195
{
196
struct ls2x_dma_priv *priv = to_ldma_priv(lchan->vchan.chan.device);
197
struct ls2x_dma_sg *ldma_sg;
198
struct virt_dma_desc *vdesc;
199
u64 val;
200
201
/* Get the next descriptor */
202
vdesc = vchan_next_desc(&lchan->vchan);
203
if (!vdesc) {
204
lchan->desc = NULL;
205
return;
206
}
207
208
list_del(&vdesc->node);
209
lchan->desc = to_ldma_desc(vdesc);
210
ldma_sg = &lchan->desc->sg[0];
211
212
/* Start DMA */
213
lo_hi_writeq(0, priv->regs + LDMA_ORDER_ERG);
214
val = (ldma_sg->llp & ~LDMA_CONFIG_MASK) | LDMA_64BIT_EN | LDMA_START;
215
lo_hi_writeq(val, priv->regs + LDMA_ORDER_ERG);
216
}
217
218
static size_t ls2x_dmac_detect_burst(struct ls2x_dma_chan *lchan)
219
{
220
u32 maxburst, buswidth;
221
222
/* Reject definitely invalid configurations */
223
if ((lchan->sconfig.src_addr_width & LDMA_SLAVE_BUSWIDTHS) &&
224
(lchan->sconfig.dst_addr_width & LDMA_SLAVE_BUSWIDTHS))
225
return 0;
226
227
if (lchan->sconfig.direction == DMA_MEM_TO_DEV) {
228
maxburst = lchan->sconfig.dst_maxburst;
229
buswidth = lchan->sconfig.dst_addr_width;
230
} else {
231
maxburst = lchan->sconfig.src_maxburst;
232
buswidth = lchan->sconfig.src_addr_width;
233
}
234
235
/* If maxburst is zero, fallback to LDMA_MAX_TRANS_LEN */
236
return maxburst ? (maxburst * buswidth) >> 2 : LDMA_MAX_TRANS_LEN;
237
}
238
239
static void ls2x_dma_fill_desc(struct ls2x_dma_chan *lchan, u32 sg_index,
240
struct ls2x_dma_desc *desc)
241
{
242
struct ls2x_dma_sg *ldma_sg = &desc->sg[sg_index];
243
u32 num_segments, segment_size;
244
245
if (desc->direction == DMA_MEM_TO_DEV) {
246
ldma_sg->hw->cmd = LDMA_INT | LDMA_DATA_DIRECTION;
247
ldma_sg->hw->apb_addr = lchan->sconfig.dst_addr;
248
} else {
249
ldma_sg->hw->cmd = LDMA_INT;
250
ldma_sg->hw->apb_addr = lchan->sconfig.src_addr;
251
}
252
253
ldma_sg->hw->mem_addr = lower_32_bits(ldma_sg->phys);
254
ldma_sg->hw->high_mem_addr = upper_32_bits(ldma_sg->phys);
255
256
/* Split into multiple equally sized segments if necessary */
257
num_segments = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, desc->burst_size);
258
segment_size = DIV_ROUND_UP((ldma_sg->len + 3) >> 2, num_segments);
259
260
/* Word count register takes input in words */
261
ldma_sg->hw->len = segment_size;
262
ldma_sg->hw->step_times = num_segments;
263
ldma_sg->hw->step_len = 0;
264
265
/* lets make a link list */
266
if (sg_index) {
267
desc->sg[sg_index - 1].hw->ndesc_addr = ldma_sg->llp | LDMA_DESC_EN;
268
desc->sg[sg_index - 1].hw->high_ndesc_addr = upper_32_bits(ldma_sg->llp);
269
}
270
}
271
272
/*-- DMA Engine API --------------------------------------------------*/
273
274
/*
275
* ls2x_dma_alloc_chan_resources - allocate resources for DMA channel
276
* @chan: allocate descriptor resources for this channel
277
*
278
* return - the number of allocated descriptors
279
*/
280
static int ls2x_dma_alloc_chan_resources(struct dma_chan *chan)
281
{
282
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
283
284
/* Create a pool of consistent memory blocks for hardware descriptors */
285
lchan->pool = dma_pool_create(dev_name(chan2dev(chan)),
286
chan->device->dev, PAGE_SIZE,
287
__alignof__(struct ls2x_dma_hw_desc), 0);
288
if (!lchan->pool) {
289
dev_err(chan2dev(chan), "No memory for descriptors\n");
290
return -ENOMEM;
291
}
292
293
return 1;
294
}
295
296
/*
297
* ls2x_dma_free_chan_resources - free all channel resources
298
* @chan: DMA channel
299
*/
300
static void ls2x_dma_free_chan_resources(struct dma_chan *chan)
301
{
302
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
303
304
vchan_free_chan_resources(to_virt_chan(chan));
305
dma_pool_destroy(lchan->pool);
306
lchan->pool = NULL;
307
}
308
309
/*
310
* ls2x_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
311
* @chan: DMA channel
312
* @sgl: scatterlist to transfer to/from
313
* @sg_len: number of entries in @scatterlist
314
* @direction: DMA direction
315
* @flags: tx descriptor status flags
316
* @context: transaction context (ignored)
317
*
318
* Return: Async transaction descriptor on success and NULL on failure
319
*/
320
static struct dma_async_tx_descriptor *
321
ls2x_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
322
u32 sg_len, enum dma_transfer_direction direction,
323
unsigned long flags, void *context)
324
{
325
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
326
struct ls2x_dma_desc *desc;
327
struct scatterlist *sg;
328
size_t burst_size;
329
int i;
330
331
if (unlikely(!sg_len || !is_slave_direction(direction)))
332
return NULL;
333
334
burst_size = ls2x_dmac_detect_burst(lchan);
335
if (!burst_size)
336
return NULL;
337
338
desc = kzalloc(struct_size(desc, sg, sg_len), GFP_NOWAIT);
339
if (!desc)
340
return NULL;
341
342
desc->desc_num = sg_len;
343
desc->direction = direction;
344
desc->burst_size = burst_size;
345
346
for_each_sg(sgl, sg, sg_len, i) {
347
struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
348
349
/* Allocate DMA capable memory for hardware descriptor */
350
ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
351
if (!ldma_sg->hw) {
352
desc->desc_num = i;
353
ls2x_dma_desc_free(&desc->vdesc);
354
return NULL;
355
}
356
357
ldma_sg->phys = sg_dma_address(sg);
358
ldma_sg->len = sg_dma_len(sg);
359
360
ls2x_dma_fill_desc(lchan, i, desc);
361
}
362
363
/* Setting the last descriptor enable bit */
364
desc->sg[sg_len - 1].hw->ndesc_addr &= ~LDMA_DESC_EN;
365
desc->status = DMA_IN_PROGRESS;
366
367
return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
368
}
369
370
/*
371
* ls2x_dma_prep_dma_cyclic - prepare the cyclic DMA transfer
372
* @chan: the DMA channel to prepare
373
* @buf_addr: physical DMA address where the buffer starts
374
* @buf_len: total number of bytes for the entire buffer
375
* @period_len: number of bytes for each period
376
* @direction: transfer direction, to or from device
377
* @flags: tx descriptor status flags
378
*
379
* Return: Async transaction descriptor on success and NULL on failure
380
*/
381
static struct dma_async_tx_descriptor *
382
ls2x_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
383
size_t period_len, enum dma_transfer_direction direction,
384
unsigned long flags)
385
{
386
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
387
struct ls2x_dma_desc *desc;
388
size_t burst_size;
389
u32 num_periods;
390
int i;
391
392
if (unlikely(!buf_len || !period_len))
393
return NULL;
394
395
if (unlikely(!is_slave_direction(direction)))
396
return NULL;
397
398
burst_size = ls2x_dmac_detect_burst(lchan);
399
if (!burst_size)
400
return NULL;
401
402
num_periods = buf_len / period_len;
403
desc = kzalloc(struct_size(desc, sg, num_periods), GFP_NOWAIT);
404
if (!desc)
405
return NULL;
406
407
desc->desc_num = num_periods;
408
desc->direction = direction;
409
desc->burst_size = burst_size;
410
411
/* Build cyclic linked list */
412
for (i = 0; i < num_periods; i++) {
413
struct ls2x_dma_sg *ldma_sg = &desc->sg[i];
414
415
/* Allocate DMA capable memory for hardware descriptor */
416
ldma_sg->hw = dma_pool_alloc(lchan->pool, GFP_NOWAIT, &ldma_sg->llp);
417
if (!ldma_sg->hw) {
418
desc->desc_num = i;
419
ls2x_dma_desc_free(&desc->vdesc);
420
return NULL;
421
}
422
423
ldma_sg->phys = buf_addr + period_len * i;
424
ldma_sg->len = period_len;
425
426
ls2x_dma_fill_desc(lchan, i, desc);
427
}
428
429
/* Lets make a cyclic list */
430
desc->sg[num_periods - 1].hw->ndesc_addr = desc->sg[0].llp | LDMA_DESC_EN;
431
desc->sg[num_periods - 1].hw->high_ndesc_addr = upper_32_bits(desc->sg[0].llp);
432
desc->cyclic = true;
433
desc->status = DMA_IN_PROGRESS;
434
435
return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
436
}
437
438
/*
439
* ls2x_slave_config - set slave configuration for channel
440
* @chan: dma channel
441
* @cfg: slave configuration
442
*
443
* Sets slave configuration for channel
444
*/
445
static int ls2x_dma_slave_config(struct dma_chan *chan,
446
struct dma_slave_config *config)
447
{
448
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
449
450
memcpy(&lchan->sconfig, config, sizeof(*config));
451
return 0;
452
}
453
454
/*
455
* ls2x_dma_issue_pending - push pending transactions to the hardware
456
* @chan: channel
457
*
458
* When this function is called, all pending transactions are pushed to the
459
* hardware and executed.
460
*/
461
static void ls2x_dma_issue_pending(struct dma_chan *chan)
462
{
463
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
464
unsigned long flags;
465
466
spin_lock_irqsave(&lchan->vchan.lock, flags);
467
if (vchan_issue_pending(&lchan->vchan) && !lchan->desc)
468
ls2x_dma_start_transfer(lchan);
469
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
470
}
471
472
/*
473
* ls2x_dma_terminate_all - terminate all transactions
474
* @chan: channel
475
*
476
* Stops all DMA transactions.
477
*/
478
static int ls2x_dma_terminate_all(struct dma_chan *chan)
479
{
480
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
481
unsigned long flags;
482
LIST_HEAD(head);
483
484
spin_lock_irqsave(&lchan->vchan.lock, flags);
485
/* Setting stop cmd */
486
ls2x_dma_write_cmd(lchan, LDMA_STOP);
487
if (lchan->desc) {
488
vchan_terminate_vdesc(&lchan->desc->vdesc);
489
lchan->desc = NULL;
490
}
491
492
vchan_get_all_descriptors(&lchan->vchan, &head);
493
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
494
495
vchan_dma_desc_free_list(&lchan->vchan, &head);
496
return 0;
497
}
498
499
/*
500
* ls2x_dma_synchronize - Synchronizes the termination of transfers to the
501
* current context.
502
* @chan: channel
503
*/
504
static void ls2x_dma_synchronize(struct dma_chan *chan)
505
{
506
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
507
508
vchan_synchronize(&lchan->vchan);
509
}
510
511
static int ls2x_dma_pause(struct dma_chan *chan)
512
{
513
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
514
unsigned long flags;
515
516
spin_lock_irqsave(&lchan->vchan.lock, flags);
517
if (lchan->desc && lchan->desc->status == DMA_IN_PROGRESS) {
518
ls2x_dma_write_cmd(lchan, LDMA_STOP);
519
lchan->desc->status = DMA_PAUSED;
520
}
521
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
522
523
return 0;
524
}
525
526
static int ls2x_dma_resume(struct dma_chan *chan)
527
{
528
struct ls2x_dma_chan *lchan = to_ldma_chan(chan);
529
unsigned long flags;
530
531
spin_lock_irqsave(&lchan->vchan.lock, flags);
532
if (lchan->desc && lchan->desc->status == DMA_PAUSED) {
533
lchan->desc->status = DMA_IN_PROGRESS;
534
ls2x_dma_write_cmd(lchan, LDMA_START);
535
}
536
spin_unlock_irqrestore(&lchan->vchan.lock, flags);
537
538
return 0;
539
}
540
541
/*
542
* ls2x_dma_isr - LS2X DMA Interrupt handler
543
* @irq: IRQ number
544
* @dev_id: Pointer to ls2x_dma_chan
545
*
546
* Return: IRQ_HANDLED/IRQ_NONE
547
*/
548
static irqreturn_t ls2x_dma_isr(int irq, void *dev_id)
549
{
550
struct ls2x_dma_chan *lchan = dev_id;
551
struct ls2x_dma_desc *desc;
552
553
spin_lock(&lchan->vchan.lock);
554
desc = lchan->desc;
555
if (desc) {
556
if (desc->cyclic) {
557
vchan_cyclic_callback(&desc->vdesc);
558
} else {
559
desc->status = DMA_COMPLETE;
560
vchan_cookie_complete(&desc->vdesc);
561
ls2x_dma_start_transfer(lchan);
562
}
563
564
/* ls2x_dma_start_transfer() updates lchan->desc */
565
if (!lchan->desc)
566
ls2x_dma_write_cmd(lchan, LDMA_STOP);
567
}
568
spin_unlock(&lchan->vchan.lock);
569
570
return IRQ_HANDLED;
571
}
572
573
static int ls2x_dma_chan_init(struct platform_device *pdev,
574
struct ls2x_dma_priv *priv)
575
{
576
struct ls2x_dma_chan *lchan = &priv->lchan;
577
struct device *dev = &pdev->dev;
578
int ret;
579
580
lchan->irq = platform_get_irq(pdev, 0);
581
if (lchan->irq < 0)
582
return lchan->irq;
583
584
ret = devm_request_irq(dev, lchan->irq, ls2x_dma_isr, IRQF_TRIGGER_RISING,
585
dev_name(&pdev->dev), lchan);
586
if (ret)
587
return ret;
588
589
/* Initialize channels related values */
590
INIT_LIST_HEAD(&priv->ddev.channels);
591
lchan->vchan.desc_free = ls2x_dma_desc_free;
592
vchan_init(&lchan->vchan, &priv->ddev);
593
594
return 0;
595
}
596
597
/*
598
* ls2x_dma_probe - Driver probe function
599
* @pdev: Pointer to the platform_device structure
600
*
601
* Return: '0' on success and failure value on error
602
*/
603
static int ls2x_dma_probe(struct platform_device *pdev)
604
{
605
struct device *dev = &pdev->dev;
606
struct ls2x_dma_priv *priv;
607
struct dma_device *ddev;
608
int ret;
609
610
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
611
if (!priv)
612
return -ENOMEM;
613
614
priv->regs = devm_platform_ioremap_resource(pdev, 0);
615
if (IS_ERR(priv->regs))
616
return dev_err_probe(dev, PTR_ERR(priv->regs),
617
"devm_platform_ioremap_resource failed.\n");
618
619
priv->dma_clk = devm_clk_get(&pdev->dev, NULL);
620
if (IS_ERR(priv->dma_clk))
621
return dev_err_probe(dev, PTR_ERR(priv->dma_clk), "devm_clk_get failed.\n");
622
623
ret = clk_prepare_enable(priv->dma_clk);
624
if (ret)
625
return dev_err_probe(dev, ret, "clk_prepare_enable failed.\n");
626
627
ret = ls2x_dma_chan_init(pdev, priv);
628
if (ret)
629
goto disable_clk;
630
631
ddev = &priv->ddev;
632
ddev->dev = dev;
633
dma_cap_zero(ddev->cap_mask);
634
dma_cap_set(DMA_SLAVE, ddev->cap_mask);
635
dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
636
637
ddev->device_alloc_chan_resources = ls2x_dma_alloc_chan_resources;
638
ddev->device_free_chan_resources = ls2x_dma_free_chan_resources;
639
ddev->device_tx_status = dma_cookie_status;
640
ddev->device_issue_pending = ls2x_dma_issue_pending;
641
ddev->device_prep_slave_sg = ls2x_dma_prep_slave_sg;
642
ddev->device_prep_dma_cyclic = ls2x_dma_prep_dma_cyclic;
643
ddev->device_config = ls2x_dma_slave_config;
644
ddev->device_terminate_all = ls2x_dma_terminate_all;
645
ddev->device_synchronize = ls2x_dma_synchronize;
646
ddev->device_pause = ls2x_dma_pause;
647
ddev->device_resume = ls2x_dma_resume;
648
649
ddev->src_addr_widths = LDMA_SLAVE_BUSWIDTHS;
650
ddev->dst_addr_widths = LDMA_SLAVE_BUSWIDTHS;
651
ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
652
653
ret = dma_async_device_register(&priv->ddev);
654
if (ret < 0)
655
goto disable_clk;
656
657
ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, priv);
658
if (ret < 0)
659
goto unregister_dmac;
660
661
platform_set_drvdata(pdev, priv);
662
663
dev_info(dev, "Loongson LS2X APB DMA driver registered successfully.\n");
664
return 0;
665
666
unregister_dmac:
667
dma_async_device_unregister(&priv->ddev);
668
disable_clk:
669
clk_disable_unprepare(priv->dma_clk);
670
671
return ret;
672
}
673
674
/*
675
* ls2x_dma_remove - Driver remove function
676
* @pdev: Pointer to the platform_device structure
677
*/
678
static void ls2x_dma_remove(struct platform_device *pdev)
679
{
680
struct ls2x_dma_priv *priv = platform_get_drvdata(pdev);
681
682
of_dma_controller_free(pdev->dev.of_node);
683
dma_async_device_unregister(&priv->ddev);
684
clk_disable_unprepare(priv->dma_clk);
685
}
686
687
static const struct of_device_id ls2x_dma_of_match_table[] = {
688
{ .compatible = "loongson,ls2k1000-apbdma" },
689
{ /* sentinel */ }
690
};
691
MODULE_DEVICE_TABLE(of, ls2x_dma_of_match_table);
692
693
static struct platform_driver ls2x_dmac_driver = {
694
.probe = ls2x_dma_probe,
695
.remove = ls2x_dma_remove,
696
.driver = {
697
.name = "ls2x-apbdma",
698
.of_match_table = ls2x_dma_of_match_table,
699
},
700
};
701
module_platform_driver(ls2x_dmac_driver);
702
703
MODULE_DESCRIPTION("Loongson-2 APB DMA Controller driver");
704
MODULE_AUTHOR("Loongson Technology Corporation Limited");
705
MODULE_LICENSE("GPL");
706
707