Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/dma/hsu/hsu.c
26282 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Core driver for the High Speed UART DMA
4
*
5
* Copyright (C) 2015 Intel Corporation
6
* Author: Andy Shevchenko <[email protected]>
7
*
8
* Partially based on the bits found in drivers/tty/serial/mfd.c.
9
*/
10
11
/*
12
* DMA channel allocation:
13
* 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
14
* Write (UART RX).
15
* 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
16
* port 3, and so on.
17
*/
18
19
#include <linux/bits.h>
20
#include <linux/delay.h>
21
#include <linux/device.h>
22
#include <linux/dmaengine.h>
23
#include <linux/dma-mapping.h>
24
#include <linux/init.h>
25
#include <linux/interrupt.h>
26
#include <linux/list.h>
27
#include <linux/module.h>
28
#include <linux/percpu-defs.h>
29
#include <linux/scatterlist.h>
30
#include <linux/slab.h>
31
#include <linux/string.h>
32
#include <linux/spinlock.h>
33
34
#include "hsu.h"
35
36
#define HSU_DMA_BUSWIDTHS \
37
BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
38
BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
39
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
40
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
41
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
42
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \
43
BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
44
45
static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
46
{
47
hsu_chan_writel(hsuc, HSU_CH_CR, 0);
48
}
49
50
static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
51
{
52
u32 cr = HSU_CH_CR_CHA;
53
54
if (hsuc->direction == DMA_MEM_TO_DEV)
55
cr &= ~HSU_CH_CR_CHD;
56
else if (hsuc->direction == DMA_DEV_TO_MEM)
57
cr |= HSU_CH_CR_CHD;
58
59
hsu_chan_writel(hsuc, HSU_CH_CR, cr);
60
}
61
62
static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
63
{
64
struct dma_slave_config *config = &hsuc->config;
65
struct hsu_dma_desc *desc = hsuc->desc;
66
u32 bsr = 0, mtsr = 0; /* to shut the compiler up */
67
u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
68
unsigned int i, count;
69
70
if (hsuc->direction == DMA_MEM_TO_DEV) {
71
bsr = config->dst_maxburst;
72
mtsr = config->dst_addr_width;
73
} else if (hsuc->direction == DMA_DEV_TO_MEM) {
74
bsr = config->src_maxburst;
75
mtsr = config->src_addr_width;
76
}
77
78
hsu_chan_disable(hsuc);
79
80
hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
81
hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
82
hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
83
84
/* Set descriptors */
85
count = desc->nents - desc->active;
86
for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
87
hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
88
hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
89
90
/* Prepare value for DCR */
91
dcr |= HSU_CH_DCR_DESCA(i);
92
dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */
93
94
desc->active++;
95
}
96
/* Only for the last descriptor in the chain */
97
dcr |= HSU_CH_DCR_CHSOD(count - 1);
98
dcr |= HSU_CH_DCR_CHDI(count - 1);
99
100
hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
101
102
hsu_chan_enable(hsuc);
103
}
104
105
static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
106
{
107
hsu_chan_disable(hsuc);
108
hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
109
}
110
111
static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
112
{
113
hsu_dma_chan_start(hsuc);
114
}
115
116
static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
117
{
118
struct virt_dma_desc *vdesc;
119
120
/* Get the next descriptor */
121
vdesc = vchan_next_desc(&hsuc->vchan);
122
if (!vdesc) {
123
hsuc->desc = NULL;
124
return;
125
}
126
127
list_del(&vdesc->node);
128
hsuc->desc = to_hsu_dma_desc(vdesc);
129
130
/* Start the channel with a new descriptor */
131
hsu_dma_start_channel(hsuc);
132
}
133
134
/*
135
* hsu_dma_get_status() - get DMA channel status
136
* @chip: HSUART DMA chip
137
* @nr: DMA channel number
138
* @status: pointer for DMA Channel Status Register value
139
*
140
* Description:
141
* The function reads and clears the DMA Channel Status Register, checks
142
* if it was a timeout interrupt and returns a corresponding value.
143
*
144
* Caller should provide a valid pointer for the DMA Channel Status
145
* Register value that will be returned in @status.
146
*
147
* Return:
148
* 1 for DMA timeout status, 0 for other DMA status, or error code for
149
* invalid parameters or no interrupt pending.
150
*/
151
int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
152
u32 *status)
153
{
154
struct hsu_dma_chan *hsuc;
155
unsigned long flags;
156
u32 sr;
157
158
/* Sanity check */
159
if (nr >= chip->hsu->nr_channels)
160
return -EINVAL;
161
162
hsuc = &chip->hsu->chan[nr];
163
164
/*
165
* No matter what situation, need read clear the IRQ status
166
* There is a bug, see Errata 5, HSD 2900918
167
*/
168
spin_lock_irqsave(&hsuc->vchan.lock, flags);
169
sr = hsu_chan_readl(hsuc, HSU_CH_SR);
170
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
171
172
/* Check if any interrupt is pending */
173
sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
174
if (!sr)
175
return -EIO;
176
177
/* Timeout IRQ, need wait some time, see Errata 2 */
178
if (sr & HSU_CH_SR_DESCTO_ANY)
179
udelay(2);
180
181
/*
182
* At this point, at least one of Descriptor Time Out, Channel Error
183
* or Descriptor Done bits must be set. Clear the Descriptor Time Out
184
* bits and if sr is still non-zero, it must be channel error or
185
* descriptor done which are higher priority than timeout and handled
186
* in hsu_dma_do_irq(). Else, it must be a timeout.
187
*/
188
sr &= ~HSU_CH_SR_DESCTO_ANY;
189
190
*status = sr;
191
192
return sr ? 0 : 1;
193
}
194
EXPORT_SYMBOL_GPL(hsu_dma_get_status);
195
196
/*
197
* hsu_dma_do_irq() - DMA interrupt handler
198
* @chip: HSUART DMA chip
199
* @nr: DMA channel number
200
* @status: Channel Status Register value
201
*
202
* Description:
203
* This function handles Channel Error and Descriptor Done interrupts.
204
* This function should be called after determining that the DMA interrupt
205
* is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
206
*
207
* Return:
208
* 0 for invalid channel number, 1 otherwise.
209
*/
210
int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
211
{
212
struct dma_chan_percpu *stat;
213
struct hsu_dma_chan *hsuc;
214
struct hsu_dma_desc *desc;
215
unsigned long flags;
216
217
/* Sanity check */
218
if (nr >= chip->hsu->nr_channels)
219
return 0;
220
221
hsuc = &chip->hsu->chan[nr];
222
stat = this_cpu_ptr(hsuc->vchan.chan.local);
223
224
spin_lock_irqsave(&hsuc->vchan.lock, flags);
225
desc = hsuc->desc;
226
if (desc) {
227
if (status & HSU_CH_SR_CHE) {
228
desc->status = DMA_ERROR;
229
} else if (desc->active < desc->nents) {
230
hsu_dma_start_channel(hsuc);
231
} else {
232
vchan_cookie_complete(&desc->vdesc);
233
desc->status = DMA_COMPLETE;
234
stat->bytes_transferred += desc->length;
235
hsu_dma_start_transfer(hsuc);
236
}
237
}
238
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
239
240
return 1;
241
}
242
EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
243
244
static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
245
{
246
struct hsu_dma_desc *desc;
247
248
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
249
if (!desc)
250
return NULL;
251
252
desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
253
if (!desc->sg) {
254
kfree(desc);
255
return NULL;
256
}
257
258
return desc;
259
}
260
261
static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
262
{
263
struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
264
265
kfree(desc->sg);
266
kfree(desc);
267
}
268
269
static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
270
struct dma_chan *chan, struct scatterlist *sgl,
271
unsigned int sg_len, enum dma_transfer_direction direction,
272
unsigned long flags, void *context)
273
{
274
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
275
struct hsu_dma_desc *desc;
276
struct scatterlist *sg;
277
unsigned int i;
278
279
desc = hsu_dma_alloc_desc(sg_len);
280
if (!desc)
281
return NULL;
282
283
for_each_sg(sgl, sg, sg_len, i) {
284
desc->sg[i].addr = sg_dma_address(sg);
285
desc->sg[i].len = sg_dma_len(sg);
286
287
desc->length += sg_dma_len(sg);
288
}
289
290
desc->nents = sg_len;
291
desc->direction = direction;
292
/* desc->active = 0 by kzalloc */
293
desc->status = DMA_IN_PROGRESS;
294
295
return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
296
}
297
298
static void hsu_dma_issue_pending(struct dma_chan *chan)
299
{
300
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
301
unsigned long flags;
302
303
spin_lock_irqsave(&hsuc->vchan.lock, flags);
304
if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
305
hsu_dma_start_transfer(hsuc);
306
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
307
}
308
309
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
310
{
311
struct hsu_dma_desc *desc = hsuc->desc;
312
size_t bytes = 0;
313
int i;
314
315
for (i = desc->active; i < desc->nents; i++)
316
bytes += desc->sg[i].len;
317
318
i = HSU_DMA_CHAN_NR_DESC - 1;
319
do {
320
bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
321
} while (--i >= 0);
322
323
return bytes;
324
}
325
326
static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
327
dma_cookie_t cookie, struct dma_tx_state *state)
328
{
329
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
330
struct virt_dma_desc *vdesc;
331
enum dma_status status;
332
size_t bytes;
333
unsigned long flags;
334
335
status = dma_cookie_status(chan, cookie, state);
336
if (status == DMA_COMPLETE)
337
return status;
338
339
spin_lock_irqsave(&hsuc->vchan.lock, flags);
340
vdesc = vchan_find_desc(&hsuc->vchan, cookie);
341
if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
342
bytes = hsu_dma_active_desc_size(hsuc);
343
dma_set_residue(state, bytes);
344
status = hsuc->desc->status;
345
} else if (vdesc) {
346
bytes = to_hsu_dma_desc(vdesc)->length;
347
dma_set_residue(state, bytes);
348
}
349
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
350
351
return status;
352
}
353
354
static int hsu_dma_slave_config(struct dma_chan *chan,
355
struct dma_slave_config *config)
356
{
357
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
358
359
memcpy(&hsuc->config, config, sizeof(hsuc->config));
360
361
return 0;
362
}
363
364
static int hsu_dma_pause(struct dma_chan *chan)
365
{
366
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
367
unsigned long flags;
368
369
spin_lock_irqsave(&hsuc->vchan.lock, flags);
370
if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
371
hsu_chan_disable(hsuc);
372
hsuc->desc->status = DMA_PAUSED;
373
}
374
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
375
376
return 0;
377
}
378
379
static int hsu_dma_resume(struct dma_chan *chan)
380
{
381
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
382
unsigned long flags;
383
384
spin_lock_irqsave(&hsuc->vchan.lock, flags);
385
if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
386
hsuc->desc->status = DMA_IN_PROGRESS;
387
hsu_chan_enable(hsuc);
388
}
389
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
390
391
return 0;
392
}
393
394
static int hsu_dma_terminate_all(struct dma_chan *chan)
395
{
396
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
397
unsigned long flags;
398
LIST_HEAD(head);
399
400
spin_lock_irqsave(&hsuc->vchan.lock, flags);
401
402
hsu_dma_stop_channel(hsuc);
403
if (hsuc->desc) {
404
hsu_dma_desc_free(&hsuc->desc->vdesc);
405
hsuc->desc = NULL;
406
}
407
408
vchan_get_all_descriptors(&hsuc->vchan, &head);
409
spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
410
vchan_dma_desc_free_list(&hsuc->vchan, &head);
411
412
return 0;
413
}
414
415
static void hsu_dma_free_chan_resources(struct dma_chan *chan)
416
{
417
vchan_free_chan_resources(to_virt_chan(chan));
418
}
419
420
static void hsu_dma_synchronize(struct dma_chan *chan)
421
{
422
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
423
424
vchan_synchronize(&hsuc->vchan);
425
}
426
427
int hsu_dma_probe(struct hsu_dma_chip *chip)
428
{
429
struct hsu_dma *hsu;
430
void __iomem *addr = chip->regs + chip->offset;
431
unsigned short i;
432
int ret;
433
434
hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
435
if (!hsu)
436
return -ENOMEM;
437
438
chip->hsu = hsu;
439
440
/* Calculate nr_channels from the IO space length */
441
hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH;
442
443
hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels,
444
sizeof(*hsu->chan), GFP_KERNEL);
445
if (!hsu->chan)
446
return -ENOMEM;
447
448
INIT_LIST_HEAD(&hsu->dma.channels);
449
for (i = 0; i < hsu->nr_channels; i++) {
450
struct hsu_dma_chan *hsuc = &hsu->chan[i];
451
452
hsuc->vchan.desc_free = hsu_dma_desc_free;
453
vchan_init(&hsuc->vchan, &hsu->dma);
454
455
hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
456
hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
457
}
458
459
dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
460
dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
461
462
hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
463
464
hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
465
466
hsu->dma.device_issue_pending = hsu_dma_issue_pending;
467
hsu->dma.device_tx_status = hsu_dma_tx_status;
468
469
hsu->dma.device_config = hsu_dma_slave_config;
470
hsu->dma.device_pause = hsu_dma_pause;
471
hsu->dma.device_resume = hsu_dma_resume;
472
hsu->dma.device_terminate_all = hsu_dma_terminate_all;
473
hsu->dma.device_synchronize = hsu_dma_synchronize;
474
475
hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
476
hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
477
hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
478
hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
479
480
hsu->dma.dev = chip->dev;
481
482
dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
483
484
ret = dma_async_device_register(&hsu->dma);
485
if (ret)
486
return ret;
487
488
dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels);
489
return 0;
490
}
491
EXPORT_SYMBOL_GPL(hsu_dma_probe);
492
493
int hsu_dma_remove(struct hsu_dma_chip *chip)
494
{
495
struct hsu_dma *hsu = chip->hsu;
496
unsigned short i;
497
498
dma_async_device_unregister(&hsu->dma);
499
500
for (i = 0; i < hsu->nr_channels; i++) {
501
struct hsu_dma_chan *hsuc = &hsu->chan[i];
502
503
tasklet_kill(&hsuc->vchan.task);
504
}
505
506
return 0;
507
}
508
EXPORT_SYMBOL_GPL(hsu_dma_remove);
509
510
MODULE_LICENSE("GPL v2");
511
MODULE_DESCRIPTION("High Speed UART DMA core driver");
512
MODULE_AUTHOR("Andy Shevchenko <[email protected]>");
513
514