Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sparc/kernel/iommu.c
48906 views
1
// SPDX-License-Identifier: GPL-2.0
2
/* iommu.c: Generic sparc64 IOMMU support.
3
*
4
* Copyright (C) 1999, 2007, 2008 David S. Miller ([email protected])
5
* Copyright (C) 1999, 2000 Jakub Jelinek ([email protected])
6
*/
7
8
#include <linux/kernel.h>
9
#include <linux/export.h>
10
#include <linux/slab.h>
11
#include <linux/delay.h>
12
#include <linux/device.h>
13
#include <linux/dma-map-ops.h>
14
#include <linux/errno.h>
15
#include <linux/iommu-helper.h>
16
#include <linux/bitmap.h>
17
#include <asm/iommu-common.h>
18
19
#ifdef CONFIG_PCI
20
#include <linux/pci.h>
21
#endif
22
23
#include <asm/iommu.h>
24
25
#include "iommu_common.h"
26
#include "kernel.h"
27
28
#define STC_CTXMATCH_ADDR(STC, CTX) \
29
((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
30
#define STC_FLUSHFLAG_INIT(STC) \
31
(*((STC)->strbuf_flushflag) = 0UL)
32
#define STC_FLUSHFLAG_SET(STC) \
33
(*((STC)->strbuf_flushflag) != 0UL)
34
35
#define iommu_read(__reg) \
36
({ u64 __ret; \
37
__asm__ __volatile__("ldxa [%1] %2, %0" \
38
: "=r" (__ret) \
39
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
40
: "memory"); \
41
__ret; \
42
})
43
#define iommu_write(__reg, __val) \
44
__asm__ __volatile__("stxa %0, [%1] %2" \
45
: /* no outputs */ \
46
: "r" (__val), "r" (__reg), \
47
"i" (ASI_PHYS_BYPASS_EC_E))
48
49
/* Must be invoked under the IOMMU lock. */
50
static void iommu_flushall(struct iommu_map_table *iommu_map_table)
51
{
52
struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
53
if (iommu->iommu_flushinv) {
54
iommu_write(iommu->iommu_flushinv, ~(u64)0);
55
} else {
56
unsigned long tag;
57
int entry;
58
59
tag = iommu->iommu_tags;
60
for (entry = 0; entry < 16; entry++) {
61
iommu_write(tag, 0);
62
tag += 8;
63
}
64
65
/* Ensure completion of previous PIO writes. */
66
(void) iommu_read(iommu->write_complete_reg);
67
}
68
}
69
70
#define IOPTE_CONSISTENT(CTX) \
71
(IOPTE_VALID | IOPTE_CACHE | \
72
(((CTX) << 47) & IOPTE_CONTEXT))
73
74
#define IOPTE_STREAMING(CTX) \
75
(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
76
77
/* Existing mappings are never marked invalid, instead they
78
* are pointed to a dummy page.
79
*/
80
#define IOPTE_IS_DUMMY(iommu, iopte) \
81
((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
82
83
static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
84
{
85
unsigned long val = iopte_val(*iopte);
86
87
val &= ~IOPTE_PAGE;
88
val |= iommu->dummy_page_pa;
89
90
iopte_val(*iopte) = val;
91
}
92
93
int iommu_table_init(struct iommu *iommu, int tsbsize,
94
u32 dma_offset, u32 dma_addr_mask,
95
int numa_node)
96
{
97
unsigned long i, order, sz, num_tsb_entries;
98
struct page *page;
99
100
num_tsb_entries = tsbsize / sizeof(iopte_t);
101
102
/* Setup initial software IOMMU state. */
103
spin_lock_init(&iommu->lock);
104
iommu->ctx_lowest_free = 1;
105
iommu->tbl.table_map_base = dma_offset;
106
iommu->dma_addr_mask = dma_addr_mask;
107
108
/* Allocate and initialize the free area map. */
109
sz = num_tsb_entries / 8;
110
sz = (sz + 7UL) & ~7UL;
111
iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
112
if (!iommu->tbl.map)
113
return -ENOMEM;
114
115
iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
116
(tlb_type != hypervisor ? iommu_flushall : NULL),
117
false, 1, false);
118
119
/* Allocate and initialize the dummy page which we
120
* set inactive IO PTEs to point to.
121
*/
122
page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
123
if (!page) {
124
printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
125
goto out_free_map;
126
}
127
iommu->dummy_page = (unsigned long) page_address(page);
128
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
129
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
130
131
/* Now allocate and setup the IOMMU page table itself. */
132
order = get_order(tsbsize);
133
page = alloc_pages_node(numa_node, GFP_KERNEL, order);
134
if (!page) {
135
printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
136
goto out_free_dummy_page;
137
}
138
iommu->page_table = (iopte_t *)page_address(page);
139
140
for (i = 0; i < num_tsb_entries; i++)
141
iopte_make_dummy(iommu, &iommu->page_table[i]);
142
143
return 0;
144
145
out_free_dummy_page:
146
free_page(iommu->dummy_page);
147
iommu->dummy_page = 0UL;
148
149
out_free_map:
150
kfree(iommu->tbl.map);
151
iommu->tbl.map = NULL;
152
153
return -ENOMEM;
154
}
155
156
static inline iopte_t *alloc_npages(struct device *dev,
157
struct iommu *iommu,
158
unsigned long npages)
159
{
160
unsigned long entry;
161
162
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
163
(unsigned long)(-1), 0);
164
if (unlikely(entry == IOMMU_ERROR_CODE))
165
return NULL;
166
167
return iommu->page_table + entry;
168
}
169
170
static int iommu_alloc_ctx(struct iommu *iommu)
171
{
172
int lowest = iommu->ctx_lowest_free;
173
int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
174
175
if (unlikely(n == IOMMU_NUM_CTXS)) {
176
n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
177
if (unlikely(n == lowest)) {
178
printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
179
n = 0;
180
}
181
}
182
if (n)
183
__set_bit(n, iommu->ctx_bitmap);
184
185
return n;
186
}
187
188
static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
189
{
190
if (likely(ctx)) {
191
__clear_bit(ctx, iommu->ctx_bitmap);
192
if (ctx < iommu->ctx_lowest_free)
193
iommu->ctx_lowest_free = ctx;
194
}
195
}
196
197
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
198
dma_addr_t *dma_addrp, gfp_t gfp,
199
unsigned long attrs)
200
{
201
unsigned long order, first_page;
202
struct iommu *iommu;
203
struct page *page;
204
int npages, nid;
205
iopte_t *iopte;
206
void *ret;
207
208
size = IO_PAGE_ALIGN(size);
209
order = get_order(size);
210
if (order >= 10)
211
return NULL;
212
213
nid = dev->archdata.numa_node;
214
page = alloc_pages_node(nid, gfp, order);
215
if (unlikely(!page))
216
return NULL;
217
218
first_page = (unsigned long) page_address(page);
219
memset((char *)first_page, 0, PAGE_SIZE << order);
220
221
iommu = dev->archdata.iommu;
222
223
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
224
225
if (unlikely(iopte == NULL)) {
226
free_pages(first_page, order);
227
return NULL;
228
}
229
230
*dma_addrp = (iommu->tbl.table_map_base +
231
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
232
ret = (void *) first_page;
233
npages = size >> IO_PAGE_SHIFT;
234
first_page = __pa(first_page);
235
while (npages--) {
236
iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
237
IOPTE_WRITE |
238
(first_page & IOPTE_PAGE));
239
iopte++;
240
first_page += IO_PAGE_SIZE;
241
}
242
243
return ret;
244
}
245
246
static void dma_4u_free_coherent(struct device *dev, size_t size,
247
void *cpu, dma_addr_t dvma,
248
unsigned long attrs)
249
{
250
struct iommu *iommu;
251
unsigned long order, npages;
252
253
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
254
iommu = dev->archdata.iommu;
255
256
iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
257
258
order = get_order(size);
259
if (order < 10)
260
free_pages((unsigned long)cpu, order);
261
}
262
263
static dma_addr_t dma_4u_map_phys(struct device *dev, phys_addr_t phys,
264
size_t sz, enum dma_data_direction direction,
265
unsigned long attrs)
266
{
267
struct iommu *iommu;
268
struct strbuf *strbuf;
269
iopte_t *base;
270
unsigned long flags, npages, oaddr;
271
unsigned long i, ctx;
272
u32 bus_addr, ret;
273
unsigned long iopte_protection;
274
275
if (unlikely(attrs & DMA_ATTR_MMIO))
276
/*
277
* This check is included because older versions of the code
278
* lacked MMIO path support, and my ability to test this path
279
* is limited. However, from a software technical standpoint,
280
* there is no restriction, as the following code operates
281
* solely on physical addresses.
282
*/
283
goto bad_no_ctx;
284
285
iommu = dev->archdata.iommu;
286
strbuf = dev->archdata.stc;
287
288
if (unlikely(direction == DMA_NONE))
289
goto bad_no_ctx;
290
291
oaddr = (unsigned long)(phys_to_virt(phys));
292
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
293
npages >>= IO_PAGE_SHIFT;
294
295
base = alloc_npages(dev, iommu, npages);
296
spin_lock_irqsave(&iommu->lock, flags);
297
ctx = 0;
298
if (iommu->iommu_ctxflush)
299
ctx = iommu_alloc_ctx(iommu);
300
spin_unlock_irqrestore(&iommu->lock, flags);
301
302
if (unlikely(!base))
303
goto bad;
304
305
bus_addr = (iommu->tbl.table_map_base +
306
((base - iommu->page_table) << IO_PAGE_SHIFT));
307
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
308
if (strbuf->strbuf_enabled)
309
iopte_protection = IOPTE_STREAMING(ctx);
310
else
311
iopte_protection = IOPTE_CONSISTENT(ctx);
312
if (direction != DMA_TO_DEVICE)
313
iopte_protection |= IOPTE_WRITE;
314
315
for (i = 0; i < npages; i++, base++, phys += IO_PAGE_SIZE)
316
iopte_val(*base) = iopte_protection | phys;
317
318
return ret;
319
320
bad:
321
iommu_free_ctx(iommu, ctx);
322
bad_no_ctx:
323
if (printk_ratelimit())
324
WARN_ON(1);
325
return DMA_MAPPING_ERROR;
326
}
327
328
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
329
u32 vaddr, unsigned long ctx, unsigned long npages,
330
enum dma_data_direction direction)
331
{
332
int limit;
333
334
if (strbuf->strbuf_ctxflush &&
335
iommu->iommu_ctxflush) {
336
unsigned long matchreg, flushreg;
337
u64 val;
338
339
flushreg = strbuf->strbuf_ctxflush;
340
matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
341
342
iommu_write(flushreg, ctx);
343
val = iommu_read(matchreg);
344
val &= 0xffff;
345
if (!val)
346
goto do_flush_sync;
347
348
while (val) {
349
if (val & 0x1)
350
iommu_write(flushreg, ctx);
351
val >>= 1;
352
}
353
val = iommu_read(matchreg);
354
if (unlikely(val)) {
355
printk(KERN_WARNING "strbuf_flush: ctx flush "
356
"timeout matchreg[%llx] ctx[%lx]\n",
357
val, ctx);
358
goto do_page_flush;
359
}
360
} else {
361
unsigned long i;
362
363
do_page_flush:
364
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
365
iommu_write(strbuf->strbuf_pflush, vaddr);
366
}
367
368
do_flush_sync:
369
/* If the device could not have possibly put dirty data into
370
* the streaming cache, no flush-flag synchronization needs
371
* to be performed.
372
*/
373
if (direction == DMA_TO_DEVICE)
374
return;
375
376
STC_FLUSHFLAG_INIT(strbuf);
377
iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
378
(void) iommu_read(iommu->write_complete_reg);
379
380
limit = 100000;
381
while (!STC_FLUSHFLAG_SET(strbuf)) {
382
limit--;
383
if (!limit)
384
break;
385
udelay(1);
386
rmb();
387
}
388
if (!limit)
389
printk(KERN_WARNING "strbuf_flush: flushflag timeout "
390
"vaddr[%08x] ctx[%lx] npages[%ld]\n",
391
vaddr, ctx, npages);
392
}
393
394
static void dma_4u_unmap_phys(struct device *dev, dma_addr_t bus_addr,
395
size_t sz, enum dma_data_direction direction,
396
unsigned long attrs)
397
{
398
struct iommu *iommu;
399
struct strbuf *strbuf;
400
iopte_t *base;
401
unsigned long flags, npages, ctx, i;
402
403
if (unlikely(direction == DMA_NONE)) {
404
if (printk_ratelimit())
405
WARN_ON(1);
406
return;
407
}
408
409
iommu = dev->archdata.iommu;
410
strbuf = dev->archdata.stc;
411
412
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
413
npages >>= IO_PAGE_SHIFT;
414
base = iommu->page_table +
415
((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
416
bus_addr &= IO_PAGE_MASK;
417
418
spin_lock_irqsave(&iommu->lock, flags);
419
420
/* Record the context, if any. */
421
ctx = 0;
422
if (iommu->iommu_ctxflush)
423
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
424
425
/* Step 1: Kick data out of streaming buffers if necessary. */
426
if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
427
strbuf_flush(strbuf, iommu, bus_addr, ctx,
428
npages, direction);
429
430
/* Step 2: Clear out TSB entries. */
431
for (i = 0; i < npages; i++)
432
iopte_make_dummy(iommu, base + i);
433
434
iommu_free_ctx(iommu, ctx);
435
spin_unlock_irqrestore(&iommu->lock, flags);
436
437
iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
438
}
439
440
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
441
int nelems, enum dma_data_direction direction,
442
unsigned long attrs)
443
{
444
struct scatterlist *s, *outs, *segstart;
445
unsigned long flags, handle, prot, ctx;
446
dma_addr_t dma_next = 0, dma_addr;
447
unsigned int max_seg_size;
448
unsigned long seg_boundary_size;
449
int outcount, incount, i;
450
struct strbuf *strbuf;
451
struct iommu *iommu;
452
unsigned long base_shift;
453
454
BUG_ON(direction == DMA_NONE);
455
456
iommu = dev->archdata.iommu;
457
strbuf = dev->archdata.stc;
458
if (nelems == 0 || !iommu)
459
return -EINVAL;
460
461
spin_lock_irqsave(&iommu->lock, flags);
462
463
ctx = 0;
464
if (iommu->iommu_ctxflush)
465
ctx = iommu_alloc_ctx(iommu);
466
467
if (strbuf->strbuf_enabled)
468
prot = IOPTE_STREAMING(ctx);
469
else
470
prot = IOPTE_CONSISTENT(ctx);
471
if (direction != DMA_TO_DEVICE)
472
prot |= IOPTE_WRITE;
473
474
outs = s = segstart = &sglist[0];
475
outcount = 1;
476
incount = nelems;
477
handle = 0;
478
479
/* Init first segment length for backout at failure */
480
outs->dma_length = 0;
481
482
max_seg_size = dma_get_max_seg_size(dev);
483
seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
484
base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
485
for_each_sg(sglist, s, nelems, i) {
486
unsigned long paddr, npages, entry, out_entry = 0, slen;
487
iopte_t *base;
488
489
slen = s->length;
490
/* Sanity check */
491
if (slen == 0) {
492
dma_next = 0;
493
continue;
494
}
495
/* Allocate iommu entries for that segment */
496
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
497
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
498
entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
499
&handle, (unsigned long)(-1), 0);
500
501
/* Handle failure */
502
if (unlikely(entry == IOMMU_ERROR_CODE)) {
503
if (printk_ratelimit())
504
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
505
" npages %lx\n", iommu, paddr, npages);
506
goto iommu_map_failed;
507
}
508
509
base = iommu->page_table + entry;
510
511
/* Convert entry to a dma_addr_t */
512
dma_addr = iommu->tbl.table_map_base +
513
(entry << IO_PAGE_SHIFT);
514
dma_addr |= (s->offset & ~IO_PAGE_MASK);
515
516
/* Insert into HW table */
517
paddr &= IO_PAGE_MASK;
518
while (npages--) {
519
iopte_val(*base) = prot | paddr;
520
base++;
521
paddr += IO_PAGE_SIZE;
522
}
523
524
/* If we are in an open segment, try merging */
525
if (segstart != s) {
526
/* We cannot merge if:
527
* - allocated dma_addr isn't contiguous to previous allocation
528
*/
529
if ((dma_addr != dma_next) ||
530
(outs->dma_length + s->length > max_seg_size) ||
531
(is_span_boundary(out_entry, base_shift,
532
seg_boundary_size, outs, s))) {
533
/* Can't merge: create a new segment */
534
segstart = s;
535
outcount++;
536
outs = sg_next(outs);
537
} else {
538
outs->dma_length += s->length;
539
}
540
}
541
542
if (segstart == s) {
543
/* This is a new segment, fill entries */
544
outs->dma_address = dma_addr;
545
outs->dma_length = slen;
546
out_entry = entry;
547
}
548
549
/* Calculate next page pointer for contiguous check */
550
dma_next = dma_addr + slen;
551
}
552
553
spin_unlock_irqrestore(&iommu->lock, flags);
554
555
if (outcount < incount) {
556
outs = sg_next(outs);
557
outs->dma_length = 0;
558
}
559
560
return outcount;
561
562
iommu_map_failed:
563
for_each_sg(sglist, s, nelems, i) {
564
if (s->dma_length != 0) {
565
unsigned long vaddr, npages, entry, j;
566
iopte_t *base;
567
568
vaddr = s->dma_address & IO_PAGE_MASK;
569
npages = iommu_num_pages(s->dma_address, s->dma_length,
570
IO_PAGE_SIZE);
571
572
entry = (vaddr - iommu->tbl.table_map_base)
573
>> IO_PAGE_SHIFT;
574
base = iommu->page_table + entry;
575
576
for (j = 0; j < npages; j++)
577
iopte_make_dummy(iommu, base + j);
578
579
iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
580
IOMMU_ERROR_CODE);
581
582
s->dma_length = 0;
583
}
584
if (s == outs)
585
break;
586
}
587
spin_unlock_irqrestore(&iommu->lock, flags);
588
589
return -EINVAL;
590
}
591
592
/* If contexts are being used, they are the same in all of the mappings
593
* we make for a particular SG.
594
*/
595
static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
596
{
597
unsigned long ctx = 0;
598
599
if (iommu->iommu_ctxflush) {
600
iopte_t *base;
601
u32 bus_addr;
602
struct iommu_map_table *tbl = &iommu->tbl;
603
604
bus_addr = sg->dma_address & IO_PAGE_MASK;
605
base = iommu->page_table +
606
((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
607
608
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
609
}
610
return ctx;
611
}
612
613
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
614
int nelems, enum dma_data_direction direction,
615
unsigned long attrs)
616
{
617
unsigned long flags, ctx;
618
struct scatterlist *sg;
619
struct strbuf *strbuf;
620
struct iommu *iommu;
621
622
BUG_ON(direction == DMA_NONE);
623
624
iommu = dev->archdata.iommu;
625
strbuf = dev->archdata.stc;
626
627
ctx = fetch_sg_ctx(iommu, sglist);
628
629
spin_lock_irqsave(&iommu->lock, flags);
630
631
sg = sglist;
632
while (nelems--) {
633
dma_addr_t dma_handle = sg->dma_address;
634
unsigned int len = sg->dma_length;
635
unsigned long npages, entry;
636
iopte_t *base;
637
int i;
638
639
if (!len)
640
break;
641
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
642
643
entry = ((dma_handle - iommu->tbl.table_map_base)
644
>> IO_PAGE_SHIFT);
645
base = iommu->page_table + entry;
646
647
dma_handle &= IO_PAGE_MASK;
648
if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
649
strbuf_flush(strbuf, iommu, dma_handle, ctx,
650
npages, direction);
651
652
for (i = 0; i < npages; i++)
653
iopte_make_dummy(iommu, base + i);
654
655
iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
656
IOMMU_ERROR_CODE);
657
sg = sg_next(sg);
658
}
659
660
iommu_free_ctx(iommu, ctx);
661
662
spin_unlock_irqrestore(&iommu->lock, flags);
663
}
664
665
static void dma_4u_sync_single_for_cpu(struct device *dev,
666
dma_addr_t bus_addr, size_t sz,
667
enum dma_data_direction direction)
668
{
669
struct iommu *iommu;
670
struct strbuf *strbuf;
671
unsigned long flags, ctx, npages;
672
673
iommu = dev->archdata.iommu;
674
strbuf = dev->archdata.stc;
675
676
if (!strbuf->strbuf_enabled)
677
return;
678
679
spin_lock_irqsave(&iommu->lock, flags);
680
681
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
682
npages >>= IO_PAGE_SHIFT;
683
bus_addr &= IO_PAGE_MASK;
684
685
/* Step 1: Record the context, if any. */
686
ctx = 0;
687
if (iommu->iommu_ctxflush &&
688
strbuf->strbuf_ctxflush) {
689
iopte_t *iopte;
690
struct iommu_map_table *tbl = &iommu->tbl;
691
692
iopte = iommu->page_table +
693
((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
694
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
695
}
696
697
/* Step 2: Kick data out of streaming buffers. */
698
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
699
700
spin_unlock_irqrestore(&iommu->lock, flags);
701
}
702
703
static void dma_4u_sync_sg_for_cpu(struct device *dev,
704
struct scatterlist *sglist, int nelems,
705
enum dma_data_direction direction)
706
{
707
struct iommu *iommu;
708
struct strbuf *strbuf;
709
unsigned long flags, ctx, npages, i;
710
struct scatterlist *sg, *sgprv;
711
u32 bus_addr;
712
713
iommu = dev->archdata.iommu;
714
strbuf = dev->archdata.stc;
715
716
if (!strbuf->strbuf_enabled)
717
return;
718
719
spin_lock_irqsave(&iommu->lock, flags);
720
721
/* Step 1: Record the context, if any. */
722
ctx = 0;
723
if (iommu->iommu_ctxflush &&
724
strbuf->strbuf_ctxflush) {
725
iopte_t *iopte;
726
struct iommu_map_table *tbl = &iommu->tbl;
727
728
iopte = iommu->page_table + ((sglist[0].dma_address -
729
tbl->table_map_base) >> IO_PAGE_SHIFT);
730
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
731
}
732
733
/* Step 2: Kick data out of streaming buffers. */
734
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
735
sgprv = NULL;
736
for_each_sg(sglist, sg, nelems, i) {
737
if (sg->dma_length == 0)
738
break;
739
sgprv = sg;
740
}
741
742
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
743
- bus_addr) >> IO_PAGE_SHIFT;
744
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
745
746
spin_unlock_irqrestore(&iommu->lock, flags);
747
}
748
749
static int dma_4u_supported(struct device *dev, u64 device_mask)
750
{
751
struct iommu *iommu = dev->archdata.iommu;
752
753
if (ali_sound_dma_hack(dev, device_mask))
754
return 1;
755
756
if (device_mask < iommu->dma_addr_mask)
757
return 0;
758
return 1;
759
}
760
761
static const struct dma_map_ops sun4u_dma_ops = {
762
.alloc = dma_4u_alloc_coherent,
763
.free = dma_4u_free_coherent,
764
.map_phys = dma_4u_map_phys,
765
.unmap_phys = dma_4u_unmap_phys,
766
.map_sg = dma_4u_map_sg,
767
.unmap_sg = dma_4u_unmap_sg,
768
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
769
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
770
.dma_supported = dma_4u_supported,
771
};
772
773
const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
774
EXPORT_SYMBOL(dma_ops);
775
776