Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c
26481 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* The driver for Freescale MPC512x LocalPlus Bus FIFO
4
* (called SCLPC in the Reference Manual).
5
*
6
* Copyright (C) 2013-2015 Alexander Popov <[email protected]>.
7
*/
8
9
#include <linux/interrupt.h>
10
#include <linux/kernel.h>
11
#include <linux/module.h>
12
#include <linux/of.h>
13
#include <linux/of_address.h>
14
#include <linux/of_irq.h>
15
#include <linux/platform_device.h>
16
#include <asm/mpc5121.h>
17
#include <asm/io.h>
18
#include <linux/spinlock.h>
19
#include <linux/slab.h>
20
#include <linux/dmaengine.h>
21
#include <linux/dma-direction.h>
22
#include <linux/dma-mapping.h>
23
24
#define DRV_NAME "mpc512x_lpbfifo"
25
26
struct cs_range {
27
u32 csnum;
28
u32 base; /* must be zero */
29
u32 addr;
30
u32 size;
31
};
32
33
static struct lpbfifo_data {
34
spinlock_t lock; /* for protecting lpbfifo_data */
35
phys_addr_t regs_phys;
36
resource_size_t regs_size;
37
struct mpc512x_lpbfifo __iomem *regs;
38
int irq;
39
struct cs_range *cs_ranges;
40
size_t cs_n;
41
struct dma_chan *chan;
42
struct mpc512x_lpbfifo_request *req;
43
dma_addr_t ram_bus_addr;
44
bool wait_lpbfifo_irq;
45
bool wait_lpbfifo_callback;
46
} lpbfifo;
47
48
/*
49
* A data transfer from RAM to some device on LPB is finished
50
* when both mpc512x_lpbfifo_irq() and mpc512x_lpbfifo_callback()
51
* have been called. We execute the callback registered in
52
* mpc512x_lpbfifo_request just after that.
53
* But for a data transfer from some device on LPB to RAM we don't enable
54
* LPBFIFO interrupt because clearing MPC512X_SCLPC_SUCCESS interrupt flag
55
* automatically disables LPBFIFO reading request to the DMA controller
56
* and the data transfer hangs. So the callback registered in
57
* mpc512x_lpbfifo_request is executed at the end of mpc512x_lpbfifo_callback().
58
*/
59
60
/*
61
* mpc512x_lpbfifo_irq - IRQ handler for LPB FIFO
62
*/
63
static irqreturn_t mpc512x_lpbfifo_irq(int irq, void *param)
64
{
65
struct device *dev = (struct device *)param;
66
struct mpc512x_lpbfifo_request *req = NULL;
67
unsigned long flags;
68
u32 status;
69
70
spin_lock_irqsave(&lpbfifo.lock, flags);
71
72
if (!lpbfifo.regs)
73
goto end;
74
75
req = lpbfifo.req;
76
if (!req || req->dir == MPC512X_LPBFIFO_REQ_DIR_READ) {
77
dev_err(dev, "bogus LPBFIFO IRQ\n");
78
goto end;
79
}
80
81
status = in_be32(&lpbfifo.regs->status);
82
if (status != MPC512X_SCLPC_SUCCESS) {
83
dev_err(dev, "DMA transfer from RAM to peripheral failed\n");
84
out_be32(&lpbfifo.regs->enable,
85
MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
86
goto end;
87
}
88
/* Clear the interrupt flag */
89
out_be32(&lpbfifo.regs->status, MPC512X_SCLPC_SUCCESS);
90
91
lpbfifo.wait_lpbfifo_irq = false;
92
93
if (lpbfifo.wait_lpbfifo_callback)
94
goto end;
95
96
/* Transfer is finished, set the FIFO as idle */
97
lpbfifo.req = NULL;
98
99
spin_unlock_irqrestore(&lpbfifo.lock, flags);
100
101
if (req->callback)
102
req->callback(req);
103
104
return IRQ_HANDLED;
105
106
end:
107
spin_unlock_irqrestore(&lpbfifo.lock, flags);
108
return IRQ_HANDLED;
109
}
110
111
/*
112
* mpc512x_lpbfifo_callback is called by DMA driver when
113
* DMA transaction is finished.
114
*/
115
static void mpc512x_lpbfifo_callback(void *param)
116
{
117
unsigned long flags;
118
struct mpc512x_lpbfifo_request *req = NULL;
119
enum dma_data_direction dir;
120
121
spin_lock_irqsave(&lpbfifo.lock, flags);
122
123
if (!lpbfifo.regs) {
124
spin_unlock_irqrestore(&lpbfifo.lock, flags);
125
return;
126
}
127
128
req = lpbfifo.req;
129
if (!req) {
130
pr_err("bogus LPBFIFO callback\n");
131
spin_unlock_irqrestore(&lpbfifo.lock, flags);
132
return;
133
}
134
135
/* Release the mapping */
136
if (req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
137
dir = DMA_TO_DEVICE;
138
else
139
dir = DMA_FROM_DEVICE;
140
dma_unmap_single(lpbfifo.chan->device->dev,
141
lpbfifo.ram_bus_addr, req->size, dir);
142
143
lpbfifo.wait_lpbfifo_callback = false;
144
145
if (!lpbfifo.wait_lpbfifo_irq) {
146
/* Transfer is finished, set the FIFO as idle */
147
lpbfifo.req = NULL;
148
149
spin_unlock_irqrestore(&lpbfifo.lock, flags);
150
151
if (req->callback)
152
req->callback(req);
153
} else {
154
spin_unlock_irqrestore(&lpbfifo.lock, flags);
155
}
156
}
157
158
static int mpc512x_lpbfifo_kick(void)
159
{
160
u32 bits;
161
bool no_incr = false;
162
u32 bpt = 32; /* max bytes per LPBFIFO transaction involving DMA */
163
u32 cs = 0;
164
size_t i;
165
struct dma_device *dma_dev = NULL;
166
struct scatterlist sg;
167
enum dma_data_direction dir;
168
struct dma_slave_config dma_conf = {};
169
struct dma_async_tx_descriptor *dma_tx = NULL;
170
dma_cookie_t cookie;
171
int ret;
172
173
/*
174
* 1. Fit the requirements:
175
* - the packet size must be a multiple of 4 since FIFO Data Word
176
* Register allows only full-word access according the Reference
177
* Manual;
178
* - the physical address of the device on LPB and the packet size
179
* must be aligned on BPT (bytes per transaction) or 8-bytes
180
* boundary according the Reference Manual;
181
* - but we choose DMA maxburst equal (or very close to) BPT to prevent
182
* DMA controller from overtaking FIFO and causing FIFO underflow
183
* error. So we force the packet size to be aligned on BPT boundary
184
* not to confuse DMA driver which requires the packet size to be
185
* aligned on maxburst boundary;
186
* - BPT should be set to the LPB device port size for operation with
187
* disabled auto-incrementing according Reference Manual.
188
*/
189
if (lpbfifo.req->size == 0 || !IS_ALIGNED(lpbfifo.req->size, 4))
190
return -EINVAL;
191
192
if (lpbfifo.req->portsize != LPB_DEV_PORTSIZE_UNDEFINED) {
193
bpt = lpbfifo.req->portsize;
194
no_incr = true;
195
}
196
197
while (bpt > 1) {
198
if (IS_ALIGNED(lpbfifo.req->dev_phys_addr, min(bpt, 0x8u)) &&
199
IS_ALIGNED(lpbfifo.req->size, bpt)) {
200
break;
201
}
202
203
if (no_incr)
204
return -EINVAL;
205
206
bpt >>= 1;
207
}
208
dma_conf.dst_maxburst = max(bpt, 0x4u) / 4;
209
dma_conf.src_maxburst = max(bpt, 0x4u) / 4;
210
211
for (i = 0; i < lpbfifo.cs_n; i++) {
212
phys_addr_t cs_start = lpbfifo.cs_ranges[i].addr;
213
phys_addr_t cs_end = cs_start + lpbfifo.cs_ranges[i].size;
214
phys_addr_t access_start = lpbfifo.req->dev_phys_addr;
215
phys_addr_t access_end = access_start + lpbfifo.req->size;
216
217
if (access_start >= cs_start && access_end <= cs_end) {
218
cs = lpbfifo.cs_ranges[i].csnum;
219
break;
220
}
221
}
222
if (i == lpbfifo.cs_n)
223
return -EFAULT;
224
225
/* 2. Prepare DMA */
226
dma_dev = lpbfifo.chan->device;
227
228
if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE) {
229
dir = DMA_TO_DEVICE;
230
dma_conf.direction = DMA_MEM_TO_DEV;
231
dma_conf.dst_addr = lpbfifo.regs_phys +
232
offsetof(struct mpc512x_lpbfifo, data_word);
233
} else {
234
dir = DMA_FROM_DEVICE;
235
dma_conf.direction = DMA_DEV_TO_MEM;
236
dma_conf.src_addr = lpbfifo.regs_phys +
237
offsetof(struct mpc512x_lpbfifo, data_word);
238
}
239
dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
240
dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
241
242
/* Make DMA channel work with LPB FIFO data register */
243
if (dma_dev->device_config(lpbfifo.chan, &dma_conf))
244
return -EINVAL;
245
246
sg_init_table(&sg, 1);
247
248
sg_dma_address(&sg) = dma_map_single(dma_dev->dev,
249
lpbfifo.req->ram_virt_addr, lpbfifo.req->size, dir);
250
if (dma_mapping_error(dma_dev->dev, sg_dma_address(&sg)))
251
return -EFAULT;
252
253
lpbfifo.ram_bus_addr = sg_dma_address(&sg); /* For freeing later */
254
255
sg_dma_len(&sg) = lpbfifo.req->size;
256
257
dma_tx = dmaengine_prep_slave_sg(lpbfifo.chan, &sg,
258
1, dma_conf.direction, 0);
259
if (!dma_tx) {
260
ret = -ENOSPC;
261
goto err_dma_prep;
262
}
263
dma_tx->callback = mpc512x_lpbfifo_callback;
264
dma_tx->callback_param = NULL;
265
266
/* 3. Prepare FIFO */
267
out_be32(&lpbfifo.regs->enable,
268
MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
269
out_be32(&lpbfifo.regs->enable, 0x0);
270
271
/*
272
* Configure the watermarks for write operation (RAM->DMA->FIFO->dev):
273
* - high watermark 7 words according the Reference Manual,
274
* - low watermark 512 bytes (half of the FIFO).
275
* These watermarks don't work for read operation since the
276
* MPC512X_SCLPC_FLUSH bit is set (according the Reference Manual).
277
*/
278
out_be32(&lpbfifo.regs->fifo_ctrl, MPC512X_SCLPC_FIFO_CTRL(0x7));
279
out_be32(&lpbfifo.regs->fifo_alarm, MPC512X_SCLPC_FIFO_ALARM(0x200));
280
281
/*
282
* Start address is a physical address of the region which belongs
283
* to the device on the LocalPlus Bus
284
*/
285
out_be32(&lpbfifo.regs->start_addr, lpbfifo.req->dev_phys_addr);
286
287
/*
288
* Configure chip select, transfer direction, address increment option
289
* and bytes per transaction option
290
*/
291
bits = MPC512X_SCLPC_CS(cs);
292
if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_READ)
293
bits |= MPC512X_SCLPC_READ | MPC512X_SCLPC_FLUSH;
294
if (no_incr)
295
bits |= MPC512X_SCLPC_DAI;
296
bits |= MPC512X_SCLPC_BPT(bpt);
297
out_be32(&lpbfifo.regs->ctrl, bits);
298
299
/* Unmask irqs */
300
bits = MPC512X_SCLPC_ENABLE | MPC512X_SCLPC_ABORT_INT_ENABLE;
301
if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
302
bits |= MPC512X_SCLPC_NORM_INT_ENABLE;
303
else
304
lpbfifo.wait_lpbfifo_irq = false;
305
306
out_be32(&lpbfifo.regs->enable, bits);
307
308
/* 4. Set packet size and kick FIFO off */
309
bits = lpbfifo.req->size | MPC512X_SCLPC_START;
310
out_be32(&lpbfifo.regs->pkt_size, bits);
311
312
/* 5. Finally kick DMA off */
313
cookie = dma_tx->tx_submit(dma_tx);
314
if (dma_submit_error(cookie)) {
315
ret = -ENOSPC;
316
goto err_dma_submit;
317
}
318
319
return 0;
320
321
err_dma_submit:
322
out_be32(&lpbfifo.regs->enable,
323
MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
324
err_dma_prep:
325
dma_unmap_single(dma_dev->dev, sg_dma_address(&sg),
326
lpbfifo.req->size, dir);
327
return ret;
328
}
329
330
static int mpc512x_lpbfifo_submit_locked(struct mpc512x_lpbfifo_request *req)
331
{
332
int ret = 0;
333
334
if (!lpbfifo.regs)
335
return -ENODEV;
336
337
/* Check whether a transfer is in progress */
338
if (lpbfifo.req)
339
return -EBUSY;
340
341
lpbfifo.wait_lpbfifo_irq = true;
342
lpbfifo.wait_lpbfifo_callback = true;
343
lpbfifo.req = req;
344
345
ret = mpc512x_lpbfifo_kick();
346
if (ret != 0)
347
lpbfifo.req = NULL; /* Set the FIFO as idle */
348
349
return ret;
350
}
351
352
int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req)
353
{
354
unsigned long flags;
355
int ret = 0;
356
357
spin_lock_irqsave(&lpbfifo.lock, flags);
358
ret = mpc512x_lpbfifo_submit_locked(req);
359
spin_unlock_irqrestore(&lpbfifo.lock, flags);
360
361
return ret;
362
}
363
EXPORT_SYMBOL(mpc512x_lpbfifo_submit);
364
365
/*
366
* LPBFIFO driver uses "ranges" property of "localbus" device tree node
367
* for being able to determine the chip select number of a client device
368
* ordering a DMA transfer.
369
*/
370
static int get_cs_ranges(struct device *dev)
371
{
372
int ret = -ENODEV;
373
struct device_node *lb_node;
374
size_t i = 0;
375
struct of_range_parser parser;
376
struct of_range range;
377
378
lb_node = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-localbus");
379
if (!lb_node)
380
return ret;
381
382
of_range_parser_init(&parser, lb_node);
383
lpbfifo.cs_n = of_range_count(&parser);
384
385
lpbfifo.cs_ranges = devm_kcalloc(dev, lpbfifo.cs_n,
386
sizeof(struct cs_range), GFP_KERNEL);
387
if (!lpbfifo.cs_ranges)
388
goto end;
389
390
for_each_of_range(&parser, &range) {
391
u32 base = lower_32_bits(range.bus_addr);
392
if (base)
393
goto end;
394
395
lpbfifo.cs_ranges[i].csnum = upper_32_bits(range.bus_addr);
396
lpbfifo.cs_ranges[i].base = base;
397
lpbfifo.cs_ranges[i].addr = range.cpu_addr;
398
lpbfifo.cs_ranges[i].size = range.size;
399
i++;
400
}
401
402
ret = 0;
403
404
end:
405
of_node_put(lb_node);
406
return ret;
407
}
408
409
static int mpc512x_lpbfifo_probe(struct platform_device *pdev)
410
{
411
struct resource r;
412
int ret = 0;
413
414
memset(&lpbfifo, 0, sizeof(struct lpbfifo_data));
415
spin_lock_init(&lpbfifo.lock);
416
417
lpbfifo.chan = dma_request_chan(&pdev->dev, "rx-tx");
418
if (IS_ERR(lpbfifo.chan))
419
return PTR_ERR(lpbfifo.chan);
420
421
if (of_address_to_resource(pdev->dev.of_node, 0, &r) != 0) {
422
dev_err(&pdev->dev, "bad 'reg' in 'sclpc' device tree node\n");
423
ret = -ENODEV;
424
goto err0;
425
}
426
427
lpbfifo.regs_phys = r.start;
428
lpbfifo.regs_size = resource_size(&r);
429
430
if (!devm_request_mem_region(&pdev->dev, lpbfifo.regs_phys,
431
lpbfifo.regs_size, DRV_NAME)) {
432
dev_err(&pdev->dev, "unable to request region\n");
433
ret = -EBUSY;
434
goto err0;
435
}
436
437
lpbfifo.regs = devm_ioremap(&pdev->dev,
438
lpbfifo.regs_phys, lpbfifo.regs_size);
439
if (!lpbfifo.regs) {
440
dev_err(&pdev->dev, "mapping registers failed\n");
441
ret = -ENOMEM;
442
goto err0;
443
}
444
445
out_be32(&lpbfifo.regs->enable,
446
MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
447
448
if (get_cs_ranges(&pdev->dev) != 0) {
449
dev_err(&pdev->dev, "bad '/localbus' device tree node\n");
450
ret = -ENODEV;
451
goto err0;
452
}
453
454
lpbfifo.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
455
if (!lpbfifo.irq) {
456
dev_err(&pdev->dev, "mapping irq failed\n");
457
ret = -ENODEV;
458
goto err0;
459
}
460
461
if (request_irq(lpbfifo.irq, mpc512x_lpbfifo_irq, 0,
462
DRV_NAME, &pdev->dev) != 0) {
463
dev_err(&pdev->dev, "requesting irq failed\n");
464
ret = -ENODEV;
465
goto err1;
466
}
467
468
dev_info(&pdev->dev, "probe succeeded\n");
469
return 0;
470
471
err1:
472
irq_dispose_mapping(lpbfifo.irq);
473
err0:
474
dma_release_channel(lpbfifo.chan);
475
return ret;
476
}
477
478
static void mpc512x_lpbfifo_remove(struct platform_device *pdev)
479
{
480
unsigned long flags;
481
struct dma_device *dma_dev = lpbfifo.chan->device;
482
struct mpc512x_lpbfifo __iomem *regs = NULL;
483
484
spin_lock_irqsave(&lpbfifo.lock, flags);
485
regs = lpbfifo.regs;
486
lpbfifo.regs = NULL;
487
spin_unlock_irqrestore(&lpbfifo.lock, flags);
488
489
dma_dev->device_terminate_all(lpbfifo.chan);
490
out_be32(&regs->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
491
492
free_irq(lpbfifo.irq, &pdev->dev);
493
irq_dispose_mapping(lpbfifo.irq);
494
dma_release_channel(lpbfifo.chan);
495
}
496
497
static const struct of_device_id mpc512x_lpbfifo_match[] = {
498
{ .compatible = "fsl,mpc512x-lpbfifo", },
499
{},
500
};
501
MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match);
502
503
static struct platform_driver mpc512x_lpbfifo_driver = {
504
.probe = mpc512x_lpbfifo_probe,
505
.remove = mpc512x_lpbfifo_remove,
506
.driver = {
507
.name = DRV_NAME,
508
.of_match_table = mpc512x_lpbfifo_match,
509
},
510
};
511
512
module_platform_driver(mpc512x_lpbfifo_driver);
513
514
MODULE_AUTHOR("Alexander Popov <[email protected]>");
515
MODULE_DESCRIPTION("MPC512x LocalPlus Bus FIFO device driver");
516
MODULE_LICENSE("GPL v2");
517
518