Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/platforms/pasemi/dma_lib.c
26489 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2006-2007 PA Semi, Inc
4
*
5
* Common functions for DMA access on PA Semi PWRficient
6
*/
7
8
#include <linux/kernel.h>
9
#include <linux/export.h>
10
#include <linux/pci.h>
11
#include <linux/slab.h>
12
#include <linux/of.h>
13
#include <linux/of_address.h>
14
#include <linux/of_irq.h>
15
#include <linux/sched.h>
16
17
#include <asm/pasemi_dma.h>
18
19
#define MAX_TXCH 64
20
#define MAX_RXCH 64
21
#define MAX_FLAGS 64
22
#define MAX_FUN 8
23
24
static struct pasdma_status *dma_status;
25
26
static void __iomem *iob_regs;
27
static void __iomem *mac_regs[6];
28
static void __iomem *dma_regs;
29
30
static int base_hw_irq;
31
32
static int num_txch, num_rxch;
33
34
static struct pci_dev *dma_pdev;
35
36
/* Bitmaps to handle allocation of channels */
37
38
static DECLARE_BITMAP(txch_free, MAX_TXCH);
39
static DECLARE_BITMAP(rxch_free, MAX_RXCH);
40
static DECLARE_BITMAP(flags_free, MAX_FLAGS);
41
static DECLARE_BITMAP(fun_free, MAX_FUN);
42
43
/* pasemi_read_iob_reg - read IOB register
44
* @reg: Register to read (offset into PCI CFG space)
45
*/
46
unsigned int pasemi_read_iob_reg(unsigned int reg)
47
{
48
return in_le32(iob_regs+reg);
49
}
50
EXPORT_SYMBOL(pasemi_read_iob_reg);
51
52
/* pasemi_write_iob_reg - write IOB register
53
* @reg: Register to write to (offset into PCI CFG space)
54
* @val: Value to write
55
*/
56
void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
57
{
58
out_le32(iob_regs+reg, val);
59
}
60
EXPORT_SYMBOL(pasemi_write_iob_reg);
61
62
/* pasemi_read_mac_reg - read MAC register
63
* @intf: MAC interface
64
* @reg: Register to read (offset into PCI CFG space)
65
*/
66
unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
67
{
68
return in_le32(mac_regs[intf]+reg);
69
}
70
EXPORT_SYMBOL(pasemi_read_mac_reg);
71
72
/* pasemi_write_mac_reg - write MAC register
73
* @intf: MAC interface
74
* @reg: Register to write to (offset into PCI CFG space)
75
* @val: Value to write
76
*/
77
void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
78
{
79
out_le32(mac_regs[intf]+reg, val);
80
}
81
EXPORT_SYMBOL(pasemi_write_mac_reg);
82
83
/* pasemi_read_dma_reg - read DMA register
84
* @reg: Register to read (offset into PCI CFG space)
85
*/
86
unsigned int pasemi_read_dma_reg(unsigned int reg)
87
{
88
return in_le32(dma_regs+reg);
89
}
90
EXPORT_SYMBOL(pasemi_read_dma_reg);
91
92
/* pasemi_write_dma_reg - write DMA register
93
* @reg: Register to write to (offset into PCI CFG space)
94
* @val: Value to write
95
*/
96
void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
97
{
98
out_le32(dma_regs+reg, val);
99
}
100
EXPORT_SYMBOL(pasemi_write_dma_reg);
101
102
static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
103
{
104
int bit;
105
int start, limit;
106
107
switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
108
case TXCHAN_EVT0:
109
start = 0;
110
limit = 10;
111
break;
112
case TXCHAN_EVT1:
113
start = 10;
114
limit = MAX_TXCH;
115
break;
116
default:
117
start = 0;
118
limit = MAX_TXCH;
119
break;
120
}
121
retry:
122
bit = find_next_bit(txch_free, MAX_TXCH, start);
123
if (bit >= limit)
124
return -ENOSPC;
125
if (!test_and_clear_bit(bit, txch_free))
126
goto retry;
127
128
return bit;
129
}
130
131
static void pasemi_free_tx_chan(int chan)
132
{
133
BUG_ON(test_bit(chan, txch_free));
134
set_bit(chan, txch_free);
135
}
136
137
static int pasemi_alloc_rx_chan(void)
138
{
139
int bit;
140
retry:
141
bit = find_first_bit(rxch_free, MAX_RXCH);
142
if (bit >= MAX_TXCH)
143
return -ENOSPC;
144
if (!test_and_clear_bit(bit, rxch_free))
145
goto retry;
146
147
return bit;
148
}
149
150
static void pasemi_free_rx_chan(int chan)
151
{
152
BUG_ON(test_bit(chan, rxch_free));
153
set_bit(chan, rxch_free);
154
}
155
156
/* pasemi_dma_alloc_chan - Allocate a DMA channel
157
* @type: Type of channel to allocate
158
* @total_size: Total size of structure to allocate (to allow for more
159
* room behind the structure to be used by the client)
160
* @offset: Offset in bytes from start of the total structure to the beginning
161
* of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
162
* not the first member of the client structure.
163
*
164
* pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
165
* type argument specifies whether it's a RX or TX channel, and in the case
166
* of TX channels which group it needs to belong to (if any).
167
*
168
* Returns a pointer to the total structure allocated on success, NULL
169
* on failure.
170
*/
171
void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
172
int total_size, int offset)
173
{
174
void *buf;
175
struct pasemi_dmachan *chan;
176
int chno;
177
178
BUG_ON(total_size < sizeof(struct pasemi_dmachan));
179
180
buf = kzalloc(total_size, GFP_KERNEL);
181
182
if (!buf)
183
return NULL;
184
chan = buf + offset;
185
186
chan->priv = buf;
187
188
switch (type & (TXCHAN|RXCHAN)) {
189
case RXCHAN:
190
chno = pasemi_alloc_rx_chan();
191
chan->chno = chno;
192
chan->irq = irq_create_mapping(NULL,
193
base_hw_irq + num_txch + chno);
194
chan->status = &dma_status->rx_sta[chno];
195
break;
196
case TXCHAN:
197
chno = pasemi_alloc_tx_chan(type);
198
chan->chno = chno;
199
chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
200
chan->status = &dma_status->tx_sta[chno];
201
break;
202
}
203
204
chan->chan_type = type;
205
206
return chan;
207
}
208
EXPORT_SYMBOL(pasemi_dma_alloc_chan);
209
210
/* pasemi_dma_free_chan - Free a previously allocated channel
211
* @chan: Channel to free
212
*
213
* Frees a previously allocated channel. It will also deallocate any
214
* descriptor ring associated with the channel, if allocated.
215
*/
216
void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
217
{
218
if (chan->ring_virt)
219
pasemi_dma_free_ring(chan);
220
221
switch (chan->chan_type & (RXCHAN|TXCHAN)) {
222
case RXCHAN:
223
pasemi_free_rx_chan(chan->chno);
224
break;
225
case TXCHAN:
226
pasemi_free_tx_chan(chan->chno);
227
break;
228
}
229
230
kfree(chan->priv);
231
}
232
EXPORT_SYMBOL(pasemi_dma_free_chan);
233
234
/* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
235
* @chan: Channel for which to allocate
236
* @ring_size: Ring size in 64-bit (8-byte) words
237
*
238
* Allocate a descriptor ring for a channel. Returns 0 on success, errno
239
* on failure. The passed in struct pasemi_dmachan is updated with the
240
* virtual and DMA addresses of the ring.
241
*/
242
int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
243
{
244
BUG_ON(chan->ring_virt);
245
246
chan->ring_size = ring_size;
247
248
chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
249
ring_size * sizeof(u64),
250
&chan->ring_dma, GFP_KERNEL);
251
252
if (!chan->ring_virt)
253
return -ENOMEM;
254
255
return 0;
256
}
257
EXPORT_SYMBOL(pasemi_dma_alloc_ring);
258
259
/* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
260
* @chan: Channel for which to free the descriptor ring
261
*
262
* Frees a previously allocated descriptor ring for a channel.
263
*/
264
void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
265
{
266
BUG_ON(!chan->ring_virt);
267
268
dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
269
chan->ring_virt, chan->ring_dma);
270
chan->ring_virt = NULL;
271
chan->ring_size = 0;
272
chan->ring_dma = 0;
273
}
274
EXPORT_SYMBOL(pasemi_dma_free_ring);
275
276
/* pasemi_dma_start_chan - Start a DMA channel
277
* @chan: Channel to start
278
* @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
279
*
280
* Enables (starts) a DMA channel with optional additional arguments.
281
*/
282
void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
283
{
284
if (chan->chan_type == RXCHAN)
285
pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
286
cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
287
else
288
pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
289
cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
290
}
291
EXPORT_SYMBOL(pasemi_dma_start_chan);
292
293
/* pasemi_dma_stop_chan - Stop a DMA channel
294
* @chan: Channel to stop
295
*
296
* Stops (disables) a DMA channel. This is done by setting the ST bit in the
297
* CMDSTA register and waiting on the ACT (active) bit to clear, then
298
* finally disabling the whole channel.
299
*
300
* This function will only try for a short while for the channel to stop, if
301
* it doesn't it will return failure.
302
*
303
* Returns 1 on success, 0 on failure.
304
*/
305
#define MAX_RETRIES 5000
306
int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
307
{
308
int reg, retries;
309
u32 sta;
310
311
if (chan->chan_type == RXCHAN) {
312
reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
313
pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
314
for (retries = 0; retries < MAX_RETRIES; retries++) {
315
sta = pasemi_read_dma_reg(reg);
316
if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
317
pasemi_write_dma_reg(reg, 0);
318
return 1;
319
}
320
cond_resched();
321
}
322
} else {
323
reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
324
pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
325
for (retries = 0; retries < MAX_RETRIES; retries++) {
326
sta = pasemi_read_dma_reg(reg);
327
if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
328
pasemi_write_dma_reg(reg, 0);
329
return 1;
330
}
331
cond_resched();
332
}
333
}
334
335
return 0;
336
}
337
EXPORT_SYMBOL(pasemi_dma_stop_chan);
338
339
/* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
340
* @chan: Channel to allocate for
341
* @size: Size of buffer in bytes
342
* @handle: DMA handle
343
*
344
* Allocate a buffer to be used by the DMA engine for read/write,
345
* similar to dma_alloc_coherent().
346
*
347
* Returns the virtual address of the buffer, or NULL in case of failure.
348
*/
349
void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
350
dma_addr_t *handle)
351
{
352
return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
353
}
354
EXPORT_SYMBOL(pasemi_dma_alloc_buf);
355
356
/* pasemi_dma_free_buf - Free a buffer used for DMA
357
* @chan: Channel the buffer was allocated for
358
* @size: Size of buffer in bytes
359
* @handle: DMA handle
360
*
361
* Frees a previously allocated buffer.
362
*/
363
void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
364
dma_addr_t *handle)
365
{
366
dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
367
}
368
EXPORT_SYMBOL(pasemi_dma_free_buf);
369
370
/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization
371
*
372
* Allocates a flag for use with channel synchronization (event descriptors).
373
* Returns allocated flag (0-63), < 0 on error.
374
*/
375
int pasemi_dma_alloc_flag(void)
376
{
377
int bit;
378
379
retry:
380
bit = find_first_bit(flags_free, MAX_FLAGS);
381
if (bit >= MAX_FLAGS)
382
return -ENOSPC;
383
if (!test_and_clear_bit(bit, flags_free))
384
goto retry;
385
386
return bit;
387
}
388
EXPORT_SYMBOL(pasemi_dma_alloc_flag);
389
390
391
/* pasemi_dma_free_flag - Deallocates a flag (event)
392
* @flag: Flag number to deallocate
393
*
394
* Frees up a flag so it can be reused for other purposes.
395
*/
396
void pasemi_dma_free_flag(int flag)
397
{
398
BUG_ON(test_bit(flag, flags_free));
399
BUG_ON(flag >= MAX_FLAGS);
400
set_bit(flag, flags_free);
401
}
402
EXPORT_SYMBOL(pasemi_dma_free_flag);
403
404
405
/* pasemi_dma_set_flag - Sets a flag (event) to 1
406
* @flag: Flag number to set active
407
*
408
* Sets the flag provided to 1.
409
*/
410
void pasemi_dma_set_flag(int flag)
411
{
412
BUG_ON(flag >= MAX_FLAGS);
413
if (flag < 32)
414
pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag);
415
else
416
pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag);
417
}
418
EXPORT_SYMBOL(pasemi_dma_set_flag);
419
420
/* pasemi_dma_clear_flag - Sets a flag (event) to 0
421
* @flag: Flag number to set inactive
422
*
423
* Sets the flag provided to 0.
424
*/
425
void pasemi_dma_clear_flag(int flag)
426
{
427
BUG_ON(flag >= MAX_FLAGS);
428
if (flag < 32)
429
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag);
430
else
431
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag);
432
}
433
EXPORT_SYMBOL(pasemi_dma_clear_flag);
434
435
/* pasemi_dma_alloc_fun - Allocate a function engine
436
*
437
* Allocates a function engine to use for crypto/checksum offload
438
* Returns allocated engine (0-8), < 0 on error.
439
*/
440
int pasemi_dma_alloc_fun(void)
441
{
442
int bit;
443
444
retry:
445
bit = find_first_bit(fun_free, MAX_FLAGS);
446
if (bit >= MAX_FLAGS)
447
return -ENOSPC;
448
if (!test_and_clear_bit(bit, fun_free))
449
goto retry;
450
451
return bit;
452
}
453
EXPORT_SYMBOL(pasemi_dma_alloc_fun);
454
455
456
/* pasemi_dma_free_fun - Deallocates a function engine
457
* @flag: Engine number to deallocate
458
*
459
* Frees up a function engine so it can be used for other purposes.
460
*/
461
void pasemi_dma_free_fun(int fun)
462
{
463
BUG_ON(test_bit(fun, fun_free));
464
BUG_ON(fun >= MAX_FLAGS);
465
set_bit(fun, fun_free);
466
}
467
EXPORT_SYMBOL(pasemi_dma_free_fun);
468
469
470
static void *map_onedev(struct pci_dev *p, int index)
471
{
472
struct device_node *dn;
473
void __iomem *ret;
474
475
dn = pci_device_to_OF_node(p);
476
if (!dn)
477
goto fallback;
478
479
ret = of_iomap(dn, index);
480
if (!ret)
481
goto fallback;
482
483
return ret;
484
fallback:
485
/* This is hardcoded and ugly, but we have some firmware versions
486
* that don't provide the register space in the device tree. Luckily
487
* they are at well-known locations so we can just do the math here.
488
*/
489
return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
490
}
491
492
/* pasemi_dma_init - Initialize the PA Semi DMA library
493
*
494
* This function initializes the DMA library. It must be called before
495
* any other function in the library.
496
*
497
* Returns 0 on success, errno on failure.
498
*/
499
int pasemi_dma_init(void)
500
{
501
static DEFINE_SPINLOCK(init_lock);
502
struct pci_dev *iob_pdev;
503
struct pci_dev *pdev;
504
struct resource res;
505
struct device_node *dn;
506
int i, intf, err = 0;
507
unsigned long timeout;
508
u32 tmp;
509
510
if (!machine_is(pasemi))
511
return -ENODEV;
512
513
spin_lock(&init_lock);
514
515
/* Make sure we haven't already initialized */
516
if (dma_pdev)
517
goto out;
518
519
iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
520
if (!iob_pdev) {
521
BUG();
522
pr_warn("Can't find I/O Bridge\n");
523
err = -ENODEV;
524
goto out;
525
}
526
iob_regs = map_onedev(iob_pdev, 0);
527
528
dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
529
if (!dma_pdev) {
530
BUG();
531
pr_warn("Can't find DMA controller\n");
532
err = -ENODEV;
533
goto out;
534
}
535
dma_regs = map_onedev(dma_pdev, 0);
536
base_hw_irq = virq_to_hw(dma_pdev->irq);
537
538
pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
539
num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
540
541
pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
542
num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
543
544
intf = 0;
545
for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
546
pdev;
547
pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
548
mac_regs[intf++] = map_onedev(pdev, 0);
549
550
pci_dev_put(pdev);
551
552
for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
553
pdev;
554
pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
555
mac_regs[intf++] = map_onedev(pdev, 0);
556
557
pci_dev_put(pdev);
558
559
dn = pci_device_to_OF_node(iob_pdev);
560
if (dn)
561
err = of_address_to_resource(dn, 1, &res);
562
if (!dn || err) {
563
/* Fallback for old firmware */
564
res.start = 0xfd800000;
565
res.end = res.start + 0x1000;
566
}
567
dma_status = ioremap_cache(res.start, resource_size(&res));
568
pci_dev_put(iob_pdev);
569
570
for (i = 0; i < MAX_TXCH; i++)
571
__set_bit(i, txch_free);
572
573
for (i = 0; i < MAX_RXCH; i++)
574
__set_bit(i, rxch_free);
575
576
timeout = jiffies + HZ;
577
pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0);
578
while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) {
579
if (time_after(jiffies, timeout)) {
580
pr_warn("Warning: Could not disable RX section\n");
581
break;
582
}
583
}
584
585
timeout = jiffies + HZ;
586
pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0);
587
while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) {
588
if (time_after(jiffies, timeout)) {
589
pr_warn("Warning: Could not disable TX section\n");
590
break;
591
}
592
}
593
594
/* setup resource allocations for the different DMA sections */
595
tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG);
596
pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000);
597
598
/* enable tx section */
599
pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
600
601
/* enable rx section */
602
pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
603
604
for (i = 0; i < MAX_FLAGS; i++)
605
__set_bit(i, flags_free);
606
607
for (i = 0; i < MAX_FUN; i++)
608
__set_bit(i, fun_free);
609
610
/* clear all status flags */
611
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
612
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);
613
614
pr_info("PA Semi PWRficient DMA library initialized "
615
"(%d tx, %d rx channels)\n", num_txch, num_rxch);
616
617
out:
618
spin_unlock(&init_lock);
619
return err;
620
}
621
EXPORT_SYMBOL(pasemi_dma_init);
622
623