Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/loongson/loongson2-apb-cmc-dma.c
170890 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Loongson-2 Chain Multi-Channel DMA Controller driver
4
*
5
* Copyright (C) 2024-2026 Loongson Technology Corporation Limited
6
*/
7
8
#include <linux/acpi.h>
9
#include <linux/acpi_dma.h>
10
#include <linux/bitfield.h>
11
#include <linux/clk.h>
12
#include <linux/dma-mapping.h>
13
#include <linux/dmapool.h>
14
#include <linux/interrupt.h>
15
#include <linux/io.h>
16
#include <linux/module.h>
17
#include <linux/of.h>
18
#include <linux/of_dma.h>
19
#include <linux/platform_device.h>
20
#include <linux/slab.h>
21
22
#include "../dmaengine.h"
23
#include "../virt-dma.h"
24
25
#define LOONGSON2_CMCDMA_ISR 0x0 /* DMA Interrupt Status Register */
26
#define LOONGSON2_CMCDMA_IFCR 0x4 /* DMA Interrupt Flag Clear Register */
27
#define LOONGSON2_CMCDMA_CCR 0x8 /* DMA Channel Configuration Register */
28
#define LOONGSON2_CMCDMA_CNDTR 0xc /* DMA Channel Transmit Count Register */
29
#define LOONGSON2_CMCDMA_CPAR 0x10 /* DMA Channel Peripheral Address Register */
30
#define LOONGSON2_CMCDMA_CMAR 0x14 /* DMA Channel Memory Address Register */
31
32
/* Bitfields of DMA interrupt status register */
33
#define LOONGSON2_CMCDMA_TCI BIT(1) /* Transfer Complete Interrupt */
34
#define LOONGSON2_CMCDMA_HTI BIT(2) /* Half Transfer Interrupt */
35
#define LOONGSON2_CMCDMA_TEI BIT(3) /* Transfer Error Interrupt */
36
37
#define LOONGSON2_CMCDMA_MASKI \
38
(LOONGSON2_CMCDMA_TCI | LOONGSON2_CMCDMA_HTI | LOONGSON2_CMCDMA_TEI)
39
40
/* Bitfields of DMA channel x Configuration Register */
41
#define LOONGSON2_CMCDMA_CCR_EN BIT(0) /* Stream Enable */
42
#define LOONGSON2_CMCDMA_CCR_TCIE BIT(1) /* Transfer Complete Interrupt Enable */
43
#define LOONGSON2_CMCDMA_CCR_HTIE BIT(2) /* Half Transfer Complete Interrupt Enable */
44
#define LOONGSON2_CMCDMA_CCR_TEIE BIT(3) /* Transfer Error Interrupt Enable */
45
#define LOONGSON2_CMCDMA_CCR_DIR BIT(4) /* Data Transfer Direction */
46
#define LOONGSON2_CMCDMA_CCR_CIRC BIT(5) /* Circular mode */
47
#define LOONGSON2_CMCDMA_CCR_PINC BIT(6) /* Peripheral increment mode */
48
#define LOONGSON2_CMCDMA_CCR_MINC BIT(7) /* Memory increment mode */
49
#define LOONGSON2_CMCDMA_CCR_PSIZE_MASK GENMASK(9, 8)
50
#define LOONGSON2_CMCDMA_CCR_MSIZE_MASK GENMASK(11, 10)
51
#define LOONGSON2_CMCDMA_CCR_PL_MASK GENMASK(13, 12)
52
#define LOONGSON2_CMCDMA_CCR_M2M BIT(14)
53
54
#define LOONGSON2_CMCDMA_CCR_CFG_MASK \
55
(LOONGSON2_CMCDMA_CCR_PINC | LOONGSON2_CMCDMA_CCR_MINC | LOONGSON2_CMCDMA_CCR_PL_MASK)
56
57
#define LOONGSON2_CMCDMA_CCR_IRQ_MASK \
58
(LOONGSON2_CMCDMA_CCR_TCIE | LOONGSON2_CMCDMA_CCR_HTIE | LOONGSON2_CMCDMA_CCR_TEIE)
59
60
#define LOONGSON2_CMCDMA_STREAM_MASK \
61
(LOONGSON2_CMCDMA_CCR_CFG_MASK | LOONGSON2_CMCDMA_CCR_IRQ_MASK)
62
63
#define LOONGSON2_CMCDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
64
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
65
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
66
67
#define LOONSON2_CMCDMA_MAX_DATA_ITEMS SZ_64K
68
69
struct loongson2_cmc_dma_chan_reg {
70
u32 ccr;
71
u32 cndtr;
72
u32 cpar;
73
u32 cmar;
74
};
75
76
struct loongson2_cmc_dma_sg_req {
77
u32 len;
78
struct loongson2_cmc_dma_chan_reg chan_reg;
79
};
80
81
struct loongson2_cmc_dma_desc {
82
struct virt_dma_desc vdesc;
83
bool cyclic;
84
u32 num_sgs;
85
struct loongson2_cmc_dma_sg_req sg_req[] __counted_by(num_sgs);
86
};
87
88
struct loongson2_cmc_dma_chan {
89
struct virt_dma_chan vchan;
90
struct dma_slave_config dma_sconfig;
91
struct loongson2_cmc_dma_desc *desc;
92
u32 id;
93
u32 irq;
94
u32 next_sg;
95
struct loongson2_cmc_dma_chan_reg chan_reg;
96
};
97
98
struct loongson2_cmc_dma_dev {
99
struct dma_device ddev;
100
struct clk *dma_clk;
101
void __iomem *base;
102
u32 nr_channels;
103
u32 chan_reg_offset;
104
struct loongson2_cmc_dma_chan chan[] __counted_by(nr_channels);
105
};
106
107
struct loongson2_cmc_dma_config {
108
u32 max_channels;
109
u32 chan_reg_offset;
110
};
111
112
static const struct loongson2_cmc_dma_config ls2k0300_cmc_dma_config = {
113
.max_channels = 8,
114
.chan_reg_offset = 0x14,
115
};
116
117
static const struct loongson2_cmc_dma_config ls2k3000_cmc_dma_config = {
118
.max_channels = 4,
119
.chan_reg_offset = 0x18,
120
};
121
122
static struct loongson2_cmc_dma_dev *lmdma_get_dev(struct loongson2_cmc_dma_chan *lchan)
123
{
124
return container_of(lchan->vchan.chan.device, struct loongson2_cmc_dma_dev, ddev);
125
}
126
127
static struct loongson2_cmc_dma_chan *to_lmdma_chan(struct dma_chan *chan)
128
{
129
return container_of(chan, struct loongson2_cmc_dma_chan, vchan.chan);
130
}
131
132
static struct loongson2_cmc_dma_desc *to_lmdma_desc(struct virt_dma_desc *vdesc)
133
{
134
return container_of(vdesc, struct loongson2_cmc_dma_desc, vdesc);
135
}
136
137
static struct device *chan2dev(struct loongson2_cmc_dma_chan *lchan)
138
{
139
return &lchan->vchan.chan.dev->device;
140
}
141
142
static u32 loongson2_cmc_dma_read(struct loongson2_cmc_dma_dev *lddev, u32 reg, u32 id)
143
{
144
return readl(lddev->base + (reg + lddev->chan_reg_offset * id));
145
}
146
147
static void loongson2_cmc_dma_write(struct loongson2_cmc_dma_dev *lddev, u32 reg, u32 id, u32 val)
148
{
149
writel(val, lddev->base + (reg + lddev->chan_reg_offset * id));
150
}
151
152
static int loongson2_cmc_dma_get_width(enum dma_slave_buswidth width)
153
{
154
switch (width) {
155
case DMA_SLAVE_BUSWIDTH_1_BYTE:
156
case DMA_SLAVE_BUSWIDTH_2_BYTES:
157
case DMA_SLAVE_BUSWIDTH_4_BYTES:
158
return ffs(width) - 1;
159
default:
160
return -EINVAL;
161
}
162
}
163
164
static int loongson2_cmc_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *config)
165
{
166
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
167
168
memcpy(&lchan->dma_sconfig, config, sizeof(*config));
169
170
return 0;
171
}
172
173
static void loongson2_cmc_dma_irq_clear(struct loongson2_cmc_dma_chan *lchan, u32 flags)
174
{
175
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
176
u32 ifcr;
177
178
ifcr = flags << (4 * lchan->id);
179
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_IFCR, 0, ifcr);
180
}
181
182
static void loongson2_cmc_dma_stop(struct loongson2_cmc_dma_chan *lchan)
183
{
184
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
185
u32 ccr;
186
187
ccr = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_CCR, lchan->id);
188
ccr &= ~(LOONGSON2_CMCDMA_CCR_IRQ_MASK | LOONGSON2_CMCDMA_CCR_EN);
189
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CCR, lchan->id, ccr);
190
191
loongson2_cmc_dma_irq_clear(lchan, LOONGSON2_CMCDMA_MASKI);
192
}
193
194
static int loongson2_cmc_dma_terminate_all(struct dma_chan *chan)
195
{
196
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
197
198
LIST_HEAD(head);
199
200
scoped_guard(spinlock_irqsave, &lchan->vchan.lock) {
201
if (lchan->desc) {
202
vchan_terminate_vdesc(&lchan->desc->vdesc);
203
loongson2_cmc_dma_stop(lchan);
204
lchan->desc = NULL;
205
}
206
vchan_get_all_descriptors(&lchan->vchan, &head);
207
}
208
209
vchan_dma_desc_free_list(&lchan->vchan, &head);
210
211
return 0;
212
}
213
214
static void loongson2_cmc_dma_synchronize(struct dma_chan *chan)
215
{
216
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
217
218
vchan_synchronize(&lchan->vchan);
219
}
220
221
static void loongson2_cmc_dma_start_transfer(struct loongson2_cmc_dma_chan *lchan)
222
{
223
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
224
struct loongson2_cmc_dma_sg_req *sg_req;
225
struct loongson2_cmc_dma_chan_reg *reg;
226
struct virt_dma_desc *vdesc;
227
228
loongson2_cmc_dma_stop(lchan);
229
230
if (!lchan->desc) {
231
vdesc = vchan_next_desc(&lchan->vchan);
232
if (!vdesc)
233
return;
234
235
list_del(&vdesc->node);
236
lchan->desc = to_lmdma_desc(vdesc);
237
lchan->next_sg = 0;
238
}
239
240
if (lchan->next_sg == lchan->desc->num_sgs)
241
lchan->next_sg = 0;
242
243
sg_req = &lchan->desc->sg_req[lchan->next_sg];
244
reg = &sg_req->chan_reg;
245
246
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CCR, lchan->id, reg->ccr);
247
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CNDTR, lchan->id, reg->cndtr);
248
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CPAR, lchan->id, reg->cpar);
249
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CMAR, lchan->id, reg->cmar);
250
251
lchan->next_sg++;
252
253
/* Start DMA */
254
reg->ccr |= LOONGSON2_CMCDMA_CCR_EN;
255
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CCR, lchan->id, reg->ccr);
256
}
257
258
static void loongson2_cmc_dma_configure_next_sg(struct loongson2_cmc_dma_chan *lchan)
259
{
260
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
261
struct loongson2_cmc_dma_sg_req *sg_req;
262
u32 ccr, id = lchan->id;
263
264
if (lchan->next_sg == lchan->desc->num_sgs)
265
lchan->next_sg = 0;
266
267
/* Stop to update mem addr */
268
ccr = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_CCR, id);
269
ccr &= ~LOONGSON2_CMCDMA_CCR_EN;
270
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CCR, id, ccr);
271
272
sg_req = &lchan->desc->sg_req[lchan->next_sg];
273
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CMAR, id, sg_req->chan_reg.cmar);
274
275
/* Start transition */
276
ccr |= LOONGSON2_CMCDMA_CCR_EN;
277
loongson2_cmc_dma_write(lddev, LOONGSON2_CMCDMA_CCR, id, ccr);
278
}
279
280
static void loongson2_cmc_dma_handle_chan_done(struct loongson2_cmc_dma_chan *lchan)
281
{
282
if (!lchan->desc)
283
return;
284
285
if (lchan->desc->cyclic) {
286
vchan_cyclic_callback(&lchan->desc->vdesc);
287
/* LOONGSON2_CMCDMA_CCR_CIRC mode don't need update register */
288
if (lchan->desc->num_sgs == 1)
289
return;
290
loongson2_cmc_dma_configure_next_sg(lchan);
291
lchan->next_sg++;
292
} else {
293
if (lchan->next_sg == lchan->desc->num_sgs) {
294
vchan_cookie_complete(&lchan->desc->vdesc);
295
lchan->desc = NULL;
296
}
297
loongson2_cmc_dma_start_transfer(lchan);
298
}
299
}
300
301
static irqreturn_t loongson2_cmc_dma_chan_irq(int irq, void *devid)
302
{
303
struct loongson2_cmc_dma_chan *lchan = devid;
304
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
305
struct device *dev = chan2dev(lchan);
306
u32 ists, status, ccr;
307
308
scoped_guard(spinlock, &lchan->vchan.lock) {
309
ccr = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_CCR, lchan->id);
310
ists = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_ISR, 0);
311
status = (ists >> (4 * lchan->id)) & LOONGSON2_CMCDMA_MASKI;
312
313
loongson2_cmc_dma_irq_clear(lchan, status);
314
315
if (status & LOONGSON2_CMCDMA_TCI) {
316
loongson2_cmc_dma_handle_chan_done(lchan);
317
status &= ~LOONGSON2_CMCDMA_TCI;
318
}
319
320
if (status & LOONGSON2_CMCDMA_HTI)
321
status &= ~LOONGSON2_CMCDMA_HTI;
322
323
if (status & LOONGSON2_CMCDMA_TEI) {
324
dev_err(dev, "DMA Transform Error.\n");
325
if (!(ccr & LOONGSON2_CMCDMA_CCR_EN))
326
dev_err(dev, "Channel disabled by HW.\n");
327
}
328
}
329
330
return IRQ_HANDLED;
331
}
332
333
static void loongson2_cmc_dma_issue_pending(struct dma_chan *chan)
334
{
335
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
336
337
guard(spinlock_irqsave)(&lchan->vchan.lock);
338
339
if (vchan_issue_pending(&lchan->vchan) && !lchan->desc) {
340
dev_dbg(chan2dev(lchan), "vchan %pK: issued\n", &lchan->vchan);
341
loongson2_cmc_dma_start_transfer(lchan);
342
}
343
}
344
345
static int loongson2_cmc_dma_set_xfer_param(struct loongson2_cmc_dma_chan *lchan,
346
enum dma_transfer_direction direction,
347
enum dma_slave_buswidth *buswidth, u32 buf_len)
348
{
349
struct dma_slave_config sconfig = lchan->dma_sconfig;
350
struct device *dev = chan2dev(lchan);
351
int dev_width;
352
u32 ccr;
353
354
switch (direction) {
355
case DMA_MEM_TO_DEV:
356
dev_width = loongson2_cmc_dma_get_width(sconfig.dst_addr_width);
357
if (dev_width < 0) {
358
dev_err(dev, "DMA_MEM_TO_DEV bus width not supported\n");
359
return dev_width;
360
}
361
lchan->chan_reg.cpar = sconfig.dst_addr;
362
ccr = LOONGSON2_CMCDMA_CCR_DIR;
363
*buswidth = sconfig.dst_addr_width;
364
break;
365
case DMA_DEV_TO_MEM:
366
dev_width = loongson2_cmc_dma_get_width(sconfig.src_addr_width);
367
if (dev_width < 0) {
368
dev_err(dev, "DMA_DEV_TO_MEM bus width not supported\n");
369
return dev_width;
370
}
371
lchan->chan_reg.cpar = sconfig.src_addr;
372
ccr = LOONGSON2_CMCDMA_CCR_MINC;
373
*buswidth = sconfig.src_addr_width;
374
break;
375
default:
376
return -EINVAL;
377
}
378
379
ccr |= FIELD_PREP(LOONGSON2_CMCDMA_CCR_PSIZE_MASK, dev_width) |
380
FIELD_PREP(LOONGSON2_CMCDMA_CCR_MSIZE_MASK, dev_width);
381
382
/* Set DMA control register */
383
lchan->chan_reg.ccr &= ~(LOONGSON2_CMCDMA_CCR_PSIZE_MASK | LOONGSON2_CMCDMA_CCR_MSIZE_MASK);
384
lchan->chan_reg.ccr |= ccr;
385
386
return 0;
387
}
388
389
static struct dma_async_tx_descriptor *
390
loongson2_cmc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, u32 sg_len,
391
enum dma_transfer_direction direction,
392
unsigned long flags, void *context)
393
{
394
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
395
struct loongson2_cmc_dma_desc *desc;
396
enum dma_slave_buswidth buswidth;
397
struct scatterlist *sg;
398
u32 num_items, i;
399
int ret;
400
401
desc = kzalloc_flex(*desc, sg_req, sg_len, GFP_NOWAIT);
402
if (!desc)
403
return ERR_PTR(-ENOMEM);
404
405
for_each_sg(sgl, sg, sg_len, i) {
406
ret = loongson2_cmc_dma_set_xfer_param(lchan, direction, &buswidth, sg_dma_len(sg));
407
if (ret)
408
return ERR_PTR(ret);
409
410
num_items = DIV_ROUND_UP(sg_dma_len(sg), buswidth);
411
if (num_items >= LOONSON2_CMCDMA_MAX_DATA_ITEMS) {
412
dev_err(chan2dev(lchan), "Number of items not supported\n");
413
kfree(desc);
414
return ERR_PTR(-EINVAL);
415
}
416
417
desc->sg_req[i].len = sg_dma_len(sg);
418
desc->sg_req[i].chan_reg.ccr = lchan->chan_reg.ccr;
419
desc->sg_req[i].chan_reg.cpar = lchan->chan_reg.cpar;
420
desc->sg_req[i].chan_reg.cmar = sg_dma_address(sg);
421
desc->sg_req[i].chan_reg.cndtr = num_items;
422
}
423
424
desc->num_sgs = sg_len;
425
desc->cyclic = false;
426
427
return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
428
}
429
430
static struct dma_async_tx_descriptor *
431
loongson2_cmc_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
432
size_t period_len, enum dma_transfer_direction direction,
433
unsigned long flags)
434
{
435
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
436
struct loongson2_cmc_dma_desc *desc;
437
enum dma_slave_buswidth buswidth;
438
u32 num_periods, num_items, i;
439
int ret;
440
441
if (unlikely(buf_len % period_len))
442
return ERR_PTR(-EINVAL);
443
444
ret = loongson2_cmc_dma_set_xfer_param(lchan, direction, &buswidth, period_len);
445
if (ret)
446
return ERR_PTR(ret);
447
448
num_items = DIV_ROUND_UP(period_len, buswidth);
449
if (num_items >= LOONSON2_CMCDMA_MAX_DATA_ITEMS) {
450
dev_err(chan2dev(lchan), "Number of items not supported\n");
451
return ERR_PTR(-EINVAL);
452
}
453
454
/* Enable Circular mode */
455
if (buf_len == period_len)
456
lchan->chan_reg.ccr |= LOONGSON2_CMCDMA_CCR_CIRC;
457
458
num_periods = DIV_ROUND_UP(buf_len, period_len);
459
desc = kzalloc_flex(*desc, sg_req, num_periods, GFP_NOWAIT);
460
if (!desc)
461
return ERR_PTR(-ENOMEM);
462
463
for (i = 0; i < num_periods; i++) {
464
desc->sg_req[i].len = period_len;
465
desc->sg_req[i].chan_reg.ccr = lchan->chan_reg.ccr;
466
desc->sg_req[i].chan_reg.cpar = lchan->chan_reg.cpar;
467
desc->sg_req[i].chan_reg.cmar = buf_addr;
468
desc->sg_req[i].chan_reg.cndtr = num_items;
469
buf_addr += period_len;
470
}
471
472
desc->num_sgs = num_periods;
473
desc->cyclic = true;
474
475
return vchan_tx_prep(&lchan->vchan, &desc->vdesc, flags);
476
}
477
478
static size_t loongson2_cmc_dma_desc_residue(struct loongson2_cmc_dma_chan *lchan,
479
struct loongson2_cmc_dma_desc *desc, u32 next_sg)
480
{
481
struct loongson2_cmc_dma_dev *lddev = lmdma_get_dev(lchan);
482
u32 residue, width, ndtr, ccr, i;
483
484
ccr = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_CCR, lchan->id);
485
width = FIELD_GET(LOONGSON2_CMCDMA_CCR_PSIZE_MASK, ccr);
486
487
ndtr = loongson2_cmc_dma_read(lddev, LOONGSON2_CMCDMA_CNDTR, lchan->id);
488
residue = ndtr << width;
489
490
if (lchan->desc->cyclic && next_sg == 0)
491
return residue;
492
493
for (i = next_sg; i < desc->num_sgs; i++)
494
residue += desc->sg_req[i].len;
495
496
return residue;
497
}
498
499
static enum dma_status loongson2_cmc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
500
struct dma_tx_state *state)
501
{
502
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
503
struct virt_dma_desc *vdesc;
504
enum dma_status status;
505
506
status = dma_cookie_status(chan, cookie, state);
507
if (status == DMA_COMPLETE || !state)
508
return status;
509
510
scoped_guard(spinlock_irqsave, &lchan->vchan.lock) {
511
vdesc = vchan_find_desc(&lchan->vchan, cookie);
512
if (lchan->desc && cookie == lchan->desc->vdesc.tx.cookie)
513
state->residue = loongson2_cmc_dma_desc_residue(lchan, lchan->desc,
514
lchan->next_sg);
515
else if (vdesc)
516
state->residue = loongson2_cmc_dma_desc_residue(lchan,
517
to_lmdma_desc(vdesc), 0);
518
}
519
520
return status;
521
}
522
523
static void loongson2_cmc_dma_free_chan_resources(struct dma_chan *chan)
524
{
525
vchan_free_chan_resources(to_virt_chan(chan));
526
}
527
528
static void loongson2_cmc_dma_desc_free(struct virt_dma_desc *vdesc)
529
{
530
kfree(to_lmdma_desc(vdesc));
531
}
532
533
static bool loongson2_cmc_dma_acpi_filter(struct dma_chan *chan, void *param)
534
{
535
struct loongson2_cmc_dma_chan *lchan = to_lmdma_chan(chan);
536
struct acpi_dma_spec *dma_spec = param;
537
538
memset(&lchan->chan_reg, 0, sizeof(struct loongson2_cmc_dma_chan_reg));
539
lchan->chan_reg.ccr = dma_spec->chan_id & LOONGSON2_CMCDMA_STREAM_MASK;
540
541
return true;
542
}
543
544
static int loongson2_cmc_dma_acpi_controller_register(struct loongson2_cmc_dma_dev *lddev)
545
{
546
struct device *dev = lddev->ddev.dev;
547
struct acpi_dma_filter_info *info;
548
549
if (!is_acpi_node(dev_fwnode(dev)))
550
return 0;
551
552
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
553
if (!info)
554
return -ENOMEM;
555
556
dma_cap_zero(info->dma_cap);
557
info->dma_cap = lddev->ddev.cap_mask;
558
info->filter_fn = loongson2_cmc_dma_acpi_filter;
559
560
return devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
561
}
562
563
static struct dma_chan *loongson2_cmc_dma_of_xlate(struct of_phandle_args *dma_spec,
564
struct of_dma *ofdma)
565
{
566
struct loongson2_cmc_dma_dev *lddev = ofdma->of_dma_data;
567
struct device *dev = lddev->ddev.dev;
568
struct loongson2_cmc_dma_chan *lchan;
569
struct dma_chan *chan;
570
571
if (dma_spec->args_count < 2)
572
return ERR_PTR(-EINVAL);
573
574
if (dma_spec->args[0] >= lddev->nr_channels) {
575
dev_err(dev, "Invalid channel id.\n");
576
return ERR_PTR(-EINVAL);
577
}
578
579
lchan = &lddev->chan[dma_spec->args[0]];
580
chan = dma_get_slave_channel(&lchan->vchan.chan);
581
if (!chan) {
582
dev_err(dev, "No more channels available.\n");
583
return ERR_PTR(-EINVAL);
584
}
585
586
memset(&lchan->chan_reg, 0, sizeof(struct loongson2_cmc_dma_chan_reg));
587
lchan->chan_reg.ccr = dma_spec->args[1] & LOONGSON2_CMCDMA_STREAM_MASK;
588
589
return chan;
590
}
591
592
static int loongson2_cmc_dma_of_controller_register(struct loongson2_cmc_dma_dev *lddev)
593
{
594
struct device *dev = lddev->ddev.dev;
595
596
if (!is_of_node(dev_fwnode(dev)))
597
return 0;
598
599
return of_dma_controller_register(dev->of_node, loongson2_cmc_dma_of_xlate, lddev);
600
}
601
602
static int loongson2_cmc_dma_probe(struct platform_device *pdev)
603
{
604
const struct loongson2_cmc_dma_config *config;
605
struct loongson2_cmc_dma_chan *lchan;
606
struct loongson2_cmc_dma_dev *lddev;
607
struct device *dev = &pdev->dev;
608
struct dma_device *ddev;
609
u32 nr_chans, i;
610
int ret;
611
612
config = (const struct loongson2_cmc_dma_config *)device_get_match_data(dev);
613
if (!config)
614
return -EINVAL;
615
616
ret = device_property_read_u32(dev, "dma-channels", &nr_chans);
617
if (ret || nr_chans > config->max_channels) {
618
dev_err(dev, "missing or invalid dma-channels property\n");
619
nr_chans = config->max_channels;
620
}
621
622
lddev = devm_kzalloc(dev, struct_size(lddev, chan, nr_chans), GFP_KERNEL);
623
if (!lddev)
624
return -ENOMEM;
625
626
lddev->base = devm_platform_ioremap_resource(pdev, 0);
627
if (IS_ERR(lddev->base))
628
return PTR_ERR(lddev->base);
629
630
platform_set_drvdata(pdev, lddev);
631
lddev->nr_channels = nr_chans;
632
lddev->chan_reg_offset = config->chan_reg_offset;
633
634
lddev->dma_clk = devm_clk_get_optional_enabled(dev, NULL);
635
if (IS_ERR(lddev->dma_clk))
636
return dev_err_probe(dev, PTR_ERR(lddev->dma_clk), "Failed to get dma clock\n");
637
638
ddev = &lddev->ddev;
639
ddev->dev = dev;
640
641
dma_cap_zero(ddev->cap_mask);
642
dma_cap_set(DMA_SLAVE, ddev->cap_mask);
643
dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
644
dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
645
646
ddev->device_free_chan_resources = loongson2_cmc_dma_free_chan_resources;
647
ddev->device_config = loongson2_cmc_dma_slave_config;
648
ddev->device_prep_slave_sg = loongson2_cmc_dma_prep_slave_sg;
649
ddev->device_prep_dma_cyclic = loongson2_cmc_dma_prep_dma_cyclic;
650
ddev->device_issue_pending = loongson2_cmc_dma_issue_pending;
651
ddev->device_synchronize = loongson2_cmc_dma_synchronize;
652
ddev->device_tx_status = loongson2_cmc_dma_tx_status;
653
ddev->device_terminate_all = loongson2_cmc_dma_terminate_all;
654
655
ddev->max_sg_burst = LOONSON2_CMCDMA_MAX_DATA_ITEMS;
656
ddev->src_addr_widths = LOONGSON2_CMCDMA_BUSWIDTHS;
657
ddev->dst_addr_widths = LOONGSON2_CMCDMA_BUSWIDTHS;
658
ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
659
INIT_LIST_HEAD(&ddev->channels);
660
661
for (i = 0; i < nr_chans; i++) {
662
lchan = &lddev->chan[i];
663
664
lchan->id = i;
665
lchan->vchan.desc_free = loongson2_cmc_dma_desc_free;
666
vchan_init(&lchan->vchan, ddev);
667
}
668
669
ret = dmaenginem_async_device_register(ddev);
670
if (ret)
671
return dev_err_probe(dev, ret, "Failed to register DMA engine device.\n");
672
673
for (i = 0; i < nr_chans; i++) {
674
lchan = &lddev->chan[i];
675
676
lchan->irq = platform_get_irq(pdev, i);
677
if (lchan->irq < 0)
678
return lchan->irq;
679
680
ret = devm_request_irq(dev, lchan->irq, loongson2_cmc_dma_chan_irq, IRQF_SHARED,
681
dev_name(chan2dev(lchan)), lchan);
682
if (ret)
683
return ret;
684
}
685
686
ret = loongson2_cmc_dma_acpi_controller_register(lddev);
687
if (ret)
688
return dev_err_probe(dev, ret, "Failed to register dma controller with ACPI.\n");
689
690
ret = loongson2_cmc_dma_of_controller_register(lddev);
691
if (ret)
692
return dev_err_probe(dev, ret, "Failed to register dma controller with FDT.\n");
693
694
dev_info(dev, "Loongson-2 Multi-Channel DMA Controller registered successfully.\n");
695
696
return 0;
697
}
698
699
static void loongson2_cmc_dma_remove(struct platform_device *pdev)
700
{
701
of_dma_controller_free(pdev->dev.of_node);
702
}
703
704
static const struct of_device_id loongson2_cmc_dma_of_match[] = {
705
{ .compatible = "loongson,ls2k0300-dma", .data = &ls2k0300_cmc_dma_config },
706
{ .compatible = "loongson,ls2k3000-dma", .data = &ls2k3000_cmc_dma_config },
707
{ /* sentinel */ }
708
};
709
MODULE_DEVICE_TABLE(of, loongson2_cmc_dma_of_match);
710
711
static const struct acpi_device_id loongson2_cmc_dma_acpi_match[] = {
712
{ "LOON0014", .driver_data = (kernel_ulong_t)&ls2k3000_cmc_dma_config },
713
{ /* sentinel */ }
714
};
715
MODULE_DEVICE_TABLE(acpi, loongson2_cmc_dma_acpi_match);
716
717
static struct platform_driver loongson2_cmc_dma_driver = {
718
.driver = {
719
.name = "loongson2-apb-cmc-dma",
720
.of_match_table = loongson2_cmc_dma_of_match,
721
.acpi_match_table = loongson2_cmc_dma_acpi_match,
722
},
723
.probe = loongson2_cmc_dma_probe,
724
.remove = loongson2_cmc_dma_remove,
725
};
726
module_platform_driver(loongson2_cmc_dma_driver);
727
728
MODULE_DESCRIPTION("Loongson-2 Chain Multi-Channel DMA Controller driver");
729
MODULE_AUTHOR("Loongson Technology Corporation Limited");
730
MODULE_LICENSE("GPL");
731
732