Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/sparc/kernel/iommu.c
10817 views
1
/* iommu.c: Generic sparc64 IOMMU support.
2
*
3
* Copyright (C) 1999, 2007, 2008 David S. Miller ([email protected])
4
* Copyright (C) 1999, 2000 Jakub Jelinek ([email protected])
5
*/
6
7
#include <linux/kernel.h>
8
#include <linux/module.h>
9
#include <linux/slab.h>
10
#include <linux/delay.h>
11
#include <linux/device.h>
12
#include <linux/dma-mapping.h>
13
#include <linux/errno.h>
14
#include <linux/iommu-helper.h>
15
#include <linux/bitmap.h>
16
17
#ifdef CONFIG_PCI
18
#include <linux/pci.h>
19
#endif
20
21
#include <asm/iommu.h>
22
23
#include "iommu_common.h"
24
25
#define STC_CTXMATCH_ADDR(STC, CTX) \
26
((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
27
#define STC_FLUSHFLAG_INIT(STC) \
28
(*((STC)->strbuf_flushflag) = 0UL)
29
#define STC_FLUSHFLAG_SET(STC) \
30
(*((STC)->strbuf_flushflag) != 0UL)
31
32
#define iommu_read(__reg) \
33
({ u64 __ret; \
34
__asm__ __volatile__("ldxa [%1] %2, %0" \
35
: "=r" (__ret) \
36
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
37
: "memory"); \
38
__ret; \
39
})
40
#define iommu_write(__reg, __val) \
41
__asm__ __volatile__("stxa %0, [%1] %2" \
42
: /* no outputs */ \
43
: "r" (__val), "r" (__reg), \
44
"i" (ASI_PHYS_BYPASS_EC_E))
45
46
/* Must be invoked under the IOMMU lock. */
47
static void iommu_flushall(struct iommu *iommu)
48
{
49
if (iommu->iommu_flushinv) {
50
iommu_write(iommu->iommu_flushinv, ~(u64)0);
51
} else {
52
unsigned long tag;
53
int entry;
54
55
tag = iommu->iommu_tags;
56
for (entry = 0; entry < 16; entry++) {
57
iommu_write(tag, 0);
58
tag += 8;
59
}
60
61
/* Ensure completion of previous PIO writes. */
62
(void) iommu_read(iommu->write_complete_reg);
63
}
64
}
65
66
#define IOPTE_CONSISTENT(CTX) \
67
(IOPTE_VALID | IOPTE_CACHE | \
68
(((CTX) << 47) & IOPTE_CONTEXT))
69
70
#define IOPTE_STREAMING(CTX) \
71
(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
72
73
/* Existing mappings are never marked invalid, instead they
74
* are pointed to a dummy page.
75
*/
76
#define IOPTE_IS_DUMMY(iommu, iopte) \
77
((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
78
79
static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
80
{
81
unsigned long val = iopte_val(*iopte);
82
83
val &= ~IOPTE_PAGE;
84
val |= iommu->dummy_page_pa;
85
86
iopte_val(*iopte) = val;
87
}
88
89
/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
90
* facility it must all be done in one pass while under the iommu lock.
91
*
92
* On sun4u platforms, we only flush the IOMMU once every time we've passed
93
* over the entire page table doing allocations. Therefore we only ever advance
94
* the hint and cannot backtrack it.
95
*/
96
unsigned long iommu_range_alloc(struct device *dev,
97
struct iommu *iommu,
98
unsigned long npages,
99
unsigned long *handle)
100
{
101
unsigned long n, end, start, limit, boundary_size;
102
struct iommu_arena *arena = &iommu->arena;
103
int pass = 0;
104
105
/* This allocator was derived from x86_64's bit string search */
106
107
/* Sanity check */
108
if (unlikely(npages == 0)) {
109
if (printk_ratelimit())
110
WARN_ON(1);
111
return DMA_ERROR_CODE;
112
}
113
114
if (handle && *handle)
115
start = *handle;
116
else
117
start = arena->hint;
118
119
limit = arena->limit;
120
121
/* The case below can happen if we have a small segment appended
122
* to a large, or when the previous alloc was at the very end of
123
* the available space. If so, go back to the beginning and flush.
124
*/
125
if (start >= limit) {
126
start = 0;
127
if (iommu->flush_all)
128
iommu->flush_all(iommu);
129
}
130
131
again:
132
133
if (dev)
134
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135
1 << IO_PAGE_SHIFT);
136
else
137
boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
138
139
n = iommu_area_alloc(arena->map, limit, start, npages,
140
iommu->page_table_map_base >> IO_PAGE_SHIFT,
141
boundary_size >> IO_PAGE_SHIFT, 0);
142
if (n == -1) {
143
if (likely(pass < 1)) {
144
/* First failure, rescan from the beginning. */
145
start = 0;
146
if (iommu->flush_all)
147
iommu->flush_all(iommu);
148
pass++;
149
goto again;
150
} else {
151
/* Second failure, give up */
152
return DMA_ERROR_CODE;
153
}
154
}
155
156
end = n + npages;
157
158
arena->hint = end;
159
160
/* Update handle for SG allocations */
161
if (handle)
162
*handle = end;
163
164
return n;
165
}
166
167
void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
168
{
169
struct iommu_arena *arena = &iommu->arena;
170
unsigned long entry;
171
172
entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
173
174
bitmap_clear(arena->map, entry, npages);
175
}
176
177
int iommu_table_init(struct iommu *iommu, int tsbsize,
178
u32 dma_offset, u32 dma_addr_mask,
179
int numa_node)
180
{
181
unsigned long i, order, sz, num_tsb_entries;
182
struct page *page;
183
184
num_tsb_entries = tsbsize / sizeof(iopte_t);
185
186
/* Setup initial software IOMMU state. */
187
spin_lock_init(&iommu->lock);
188
iommu->ctx_lowest_free = 1;
189
iommu->page_table_map_base = dma_offset;
190
iommu->dma_addr_mask = dma_addr_mask;
191
192
/* Allocate and initialize the free area map. */
193
sz = num_tsb_entries / 8;
194
sz = (sz + 7UL) & ~7UL;
195
iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
196
if (!iommu->arena.map) {
197
printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
198
return -ENOMEM;
199
}
200
memset(iommu->arena.map, 0, sz);
201
iommu->arena.limit = num_tsb_entries;
202
203
if (tlb_type != hypervisor)
204
iommu->flush_all = iommu_flushall;
205
206
/* Allocate and initialize the dummy page which we
207
* set inactive IO PTEs to point to.
208
*/
209
page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
210
if (!page) {
211
printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
212
goto out_free_map;
213
}
214
iommu->dummy_page = (unsigned long) page_address(page);
215
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
216
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
217
218
/* Now allocate and setup the IOMMU page table itself. */
219
order = get_order(tsbsize);
220
page = alloc_pages_node(numa_node, GFP_KERNEL, order);
221
if (!page) {
222
printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
223
goto out_free_dummy_page;
224
}
225
iommu->page_table = (iopte_t *)page_address(page);
226
227
for (i = 0; i < num_tsb_entries; i++)
228
iopte_make_dummy(iommu, &iommu->page_table[i]);
229
230
return 0;
231
232
out_free_dummy_page:
233
free_page(iommu->dummy_page);
234
iommu->dummy_page = 0UL;
235
236
out_free_map:
237
kfree(iommu->arena.map);
238
iommu->arena.map = NULL;
239
240
return -ENOMEM;
241
}
242
243
static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
244
unsigned long npages)
245
{
246
unsigned long entry;
247
248
entry = iommu_range_alloc(dev, iommu, npages, NULL);
249
if (unlikely(entry == DMA_ERROR_CODE))
250
return NULL;
251
252
return iommu->page_table + entry;
253
}
254
255
static int iommu_alloc_ctx(struct iommu *iommu)
256
{
257
int lowest = iommu->ctx_lowest_free;
258
int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
259
260
if (unlikely(n == IOMMU_NUM_CTXS)) {
261
n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
262
if (unlikely(n == lowest)) {
263
printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
264
n = 0;
265
}
266
}
267
if (n)
268
__set_bit(n, iommu->ctx_bitmap);
269
270
return n;
271
}
272
273
static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
274
{
275
if (likely(ctx)) {
276
__clear_bit(ctx, iommu->ctx_bitmap);
277
if (ctx < iommu->ctx_lowest_free)
278
iommu->ctx_lowest_free = ctx;
279
}
280
}
281
282
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
283
dma_addr_t *dma_addrp, gfp_t gfp)
284
{
285
unsigned long flags, order, first_page;
286
struct iommu *iommu;
287
struct page *page;
288
int npages, nid;
289
iopte_t *iopte;
290
void *ret;
291
292
size = IO_PAGE_ALIGN(size);
293
order = get_order(size);
294
if (order >= 10)
295
return NULL;
296
297
nid = dev->archdata.numa_node;
298
page = alloc_pages_node(nid, gfp, order);
299
if (unlikely(!page))
300
return NULL;
301
302
first_page = (unsigned long) page_address(page);
303
memset((char *)first_page, 0, PAGE_SIZE << order);
304
305
iommu = dev->archdata.iommu;
306
307
spin_lock_irqsave(&iommu->lock, flags);
308
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
309
spin_unlock_irqrestore(&iommu->lock, flags);
310
311
if (unlikely(iopte == NULL)) {
312
free_pages(first_page, order);
313
return NULL;
314
}
315
316
*dma_addrp = (iommu->page_table_map_base +
317
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
318
ret = (void *) first_page;
319
npages = size >> IO_PAGE_SHIFT;
320
first_page = __pa(first_page);
321
while (npages--) {
322
iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
323
IOPTE_WRITE |
324
(first_page & IOPTE_PAGE));
325
iopte++;
326
first_page += IO_PAGE_SIZE;
327
}
328
329
return ret;
330
}
331
332
static void dma_4u_free_coherent(struct device *dev, size_t size,
333
void *cpu, dma_addr_t dvma)
334
{
335
struct iommu *iommu;
336
unsigned long flags, order, npages;
337
338
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
339
iommu = dev->archdata.iommu;
340
341
spin_lock_irqsave(&iommu->lock, flags);
342
343
iommu_range_free(iommu, dvma, npages);
344
345
spin_unlock_irqrestore(&iommu->lock, flags);
346
347
order = get_order(size);
348
if (order < 10)
349
free_pages((unsigned long)cpu, order);
350
}
351
352
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
353
unsigned long offset, size_t sz,
354
enum dma_data_direction direction,
355
struct dma_attrs *attrs)
356
{
357
struct iommu *iommu;
358
struct strbuf *strbuf;
359
iopte_t *base;
360
unsigned long flags, npages, oaddr;
361
unsigned long i, base_paddr, ctx;
362
u32 bus_addr, ret;
363
unsigned long iopte_protection;
364
365
iommu = dev->archdata.iommu;
366
strbuf = dev->archdata.stc;
367
368
if (unlikely(direction == DMA_NONE))
369
goto bad_no_ctx;
370
371
oaddr = (unsigned long)(page_address(page) + offset);
372
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
373
npages >>= IO_PAGE_SHIFT;
374
375
spin_lock_irqsave(&iommu->lock, flags);
376
base = alloc_npages(dev, iommu, npages);
377
ctx = 0;
378
if (iommu->iommu_ctxflush)
379
ctx = iommu_alloc_ctx(iommu);
380
spin_unlock_irqrestore(&iommu->lock, flags);
381
382
if (unlikely(!base))
383
goto bad;
384
385
bus_addr = (iommu->page_table_map_base +
386
((base - iommu->page_table) << IO_PAGE_SHIFT));
387
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
388
base_paddr = __pa(oaddr & IO_PAGE_MASK);
389
if (strbuf->strbuf_enabled)
390
iopte_protection = IOPTE_STREAMING(ctx);
391
else
392
iopte_protection = IOPTE_CONSISTENT(ctx);
393
if (direction != DMA_TO_DEVICE)
394
iopte_protection |= IOPTE_WRITE;
395
396
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
397
iopte_val(*base) = iopte_protection | base_paddr;
398
399
return ret;
400
401
bad:
402
iommu_free_ctx(iommu, ctx);
403
bad_no_ctx:
404
if (printk_ratelimit())
405
WARN_ON(1);
406
return DMA_ERROR_CODE;
407
}
408
409
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
410
u32 vaddr, unsigned long ctx, unsigned long npages,
411
enum dma_data_direction direction)
412
{
413
int limit;
414
415
if (strbuf->strbuf_ctxflush &&
416
iommu->iommu_ctxflush) {
417
unsigned long matchreg, flushreg;
418
u64 val;
419
420
flushreg = strbuf->strbuf_ctxflush;
421
matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
422
423
iommu_write(flushreg, ctx);
424
val = iommu_read(matchreg);
425
val &= 0xffff;
426
if (!val)
427
goto do_flush_sync;
428
429
while (val) {
430
if (val & 0x1)
431
iommu_write(flushreg, ctx);
432
val >>= 1;
433
}
434
val = iommu_read(matchreg);
435
if (unlikely(val)) {
436
printk(KERN_WARNING "strbuf_flush: ctx flush "
437
"timeout matchreg[%llx] ctx[%lx]\n",
438
val, ctx);
439
goto do_page_flush;
440
}
441
} else {
442
unsigned long i;
443
444
do_page_flush:
445
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
446
iommu_write(strbuf->strbuf_pflush, vaddr);
447
}
448
449
do_flush_sync:
450
/* If the device could not have possibly put dirty data into
451
* the streaming cache, no flush-flag synchronization needs
452
* to be performed.
453
*/
454
if (direction == DMA_TO_DEVICE)
455
return;
456
457
STC_FLUSHFLAG_INIT(strbuf);
458
iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
459
(void) iommu_read(iommu->write_complete_reg);
460
461
limit = 100000;
462
while (!STC_FLUSHFLAG_SET(strbuf)) {
463
limit--;
464
if (!limit)
465
break;
466
udelay(1);
467
rmb();
468
}
469
if (!limit)
470
printk(KERN_WARNING "strbuf_flush: flushflag timeout "
471
"vaddr[%08x] ctx[%lx] npages[%ld]\n",
472
vaddr, ctx, npages);
473
}
474
475
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
476
size_t sz, enum dma_data_direction direction,
477
struct dma_attrs *attrs)
478
{
479
struct iommu *iommu;
480
struct strbuf *strbuf;
481
iopte_t *base;
482
unsigned long flags, npages, ctx, i;
483
484
if (unlikely(direction == DMA_NONE)) {
485
if (printk_ratelimit())
486
WARN_ON(1);
487
return;
488
}
489
490
iommu = dev->archdata.iommu;
491
strbuf = dev->archdata.stc;
492
493
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
494
npages >>= IO_PAGE_SHIFT;
495
base = iommu->page_table +
496
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
497
bus_addr &= IO_PAGE_MASK;
498
499
spin_lock_irqsave(&iommu->lock, flags);
500
501
/* Record the context, if any. */
502
ctx = 0;
503
if (iommu->iommu_ctxflush)
504
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
505
506
/* Step 1: Kick data out of streaming buffers if necessary. */
507
if (strbuf->strbuf_enabled)
508
strbuf_flush(strbuf, iommu, bus_addr, ctx,
509
npages, direction);
510
511
/* Step 2: Clear out TSB entries. */
512
for (i = 0; i < npages; i++)
513
iopte_make_dummy(iommu, base + i);
514
515
iommu_range_free(iommu, bus_addr, npages);
516
517
iommu_free_ctx(iommu, ctx);
518
519
spin_unlock_irqrestore(&iommu->lock, flags);
520
}
521
522
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
523
int nelems, enum dma_data_direction direction,
524
struct dma_attrs *attrs)
525
{
526
struct scatterlist *s, *outs, *segstart;
527
unsigned long flags, handle, prot, ctx;
528
dma_addr_t dma_next = 0, dma_addr;
529
unsigned int max_seg_size;
530
unsigned long seg_boundary_size;
531
int outcount, incount, i;
532
struct strbuf *strbuf;
533
struct iommu *iommu;
534
unsigned long base_shift;
535
536
BUG_ON(direction == DMA_NONE);
537
538
iommu = dev->archdata.iommu;
539
strbuf = dev->archdata.stc;
540
if (nelems == 0 || !iommu)
541
return 0;
542
543
spin_lock_irqsave(&iommu->lock, flags);
544
545
ctx = 0;
546
if (iommu->iommu_ctxflush)
547
ctx = iommu_alloc_ctx(iommu);
548
549
if (strbuf->strbuf_enabled)
550
prot = IOPTE_STREAMING(ctx);
551
else
552
prot = IOPTE_CONSISTENT(ctx);
553
if (direction != DMA_TO_DEVICE)
554
prot |= IOPTE_WRITE;
555
556
outs = s = segstart = &sglist[0];
557
outcount = 1;
558
incount = nelems;
559
handle = 0;
560
561
/* Init first segment length for backout at failure */
562
outs->dma_length = 0;
563
564
max_seg_size = dma_get_max_seg_size(dev);
565
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
566
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
567
base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
568
for_each_sg(sglist, s, nelems, i) {
569
unsigned long paddr, npages, entry, out_entry = 0, slen;
570
iopte_t *base;
571
572
slen = s->length;
573
/* Sanity check */
574
if (slen == 0) {
575
dma_next = 0;
576
continue;
577
}
578
/* Allocate iommu entries for that segment */
579
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
580
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
581
entry = iommu_range_alloc(dev, iommu, npages, &handle);
582
583
/* Handle failure */
584
if (unlikely(entry == DMA_ERROR_CODE)) {
585
if (printk_ratelimit())
586
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
587
" npages %lx\n", iommu, paddr, npages);
588
goto iommu_map_failed;
589
}
590
591
base = iommu->page_table + entry;
592
593
/* Convert entry to a dma_addr_t */
594
dma_addr = iommu->page_table_map_base +
595
(entry << IO_PAGE_SHIFT);
596
dma_addr |= (s->offset & ~IO_PAGE_MASK);
597
598
/* Insert into HW table */
599
paddr &= IO_PAGE_MASK;
600
while (npages--) {
601
iopte_val(*base) = prot | paddr;
602
base++;
603
paddr += IO_PAGE_SIZE;
604
}
605
606
/* If we are in an open segment, try merging */
607
if (segstart != s) {
608
/* We cannot merge if:
609
* - allocated dma_addr isn't contiguous to previous allocation
610
*/
611
if ((dma_addr != dma_next) ||
612
(outs->dma_length + s->length > max_seg_size) ||
613
(is_span_boundary(out_entry, base_shift,
614
seg_boundary_size, outs, s))) {
615
/* Can't merge: create a new segment */
616
segstart = s;
617
outcount++;
618
outs = sg_next(outs);
619
} else {
620
outs->dma_length += s->length;
621
}
622
}
623
624
if (segstart == s) {
625
/* This is a new segment, fill entries */
626
outs->dma_address = dma_addr;
627
outs->dma_length = slen;
628
out_entry = entry;
629
}
630
631
/* Calculate next page pointer for contiguous check */
632
dma_next = dma_addr + slen;
633
}
634
635
spin_unlock_irqrestore(&iommu->lock, flags);
636
637
if (outcount < incount) {
638
outs = sg_next(outs);
639
outs->dma_address = DMA_ERROR_CODE;
640
outs->dma_length = 0;
641
}
642
643
return outcount;
644
645
iommu_map_failed:
646
for_each_sg(sglist, s, nelems, i) {
647
if (s->dma_length != 0) {
648
unsigned long vaddr, npages, entry, j;
649
iopte_t *base;
650
651
vaddr = s->dma_address & IO_PAGE_MASK;
652
npages = iommu_num_pages(s->dma_address, s->dma_length,
653
IO_PAGE_SIZE);
654
iommu_range_free(iommu, vaddr, npages);
655
656
entry = (vaddr - iommu->page_table_map_base)
657
>> IO_PAGE_SHIFT;
658
base = iommu->page_table + entry;
659
660
for (j = 0; j < npages; j++)
661
iopte_make_dummy(iommu, base + j);
662
663
s->dma_address = DMA_ERROR_CODE;
664
s->dma_length = 0;
665
}
666
if (s == outs)
667
break;
668
}
669
spin_unlock_irqrestore(&iommu->lock, flags);
670
671
return 0;
672
}
673
674
/* If contexts are being used, they are the same in all of the mappings
675
* we make for a particular SG.
676
*/
677
static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
678
{
679
unsigned long ctx = 0;
680
681
if (iommu->iommu_ctxflush) {
682
iopte_t *base;
683
u32 bus_addr;
684
685
bus_addr = sg->dma_address & IO_PAGE_MASK;
686
base = iommu->page_table +
687
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
688
689
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
690
}
691
return ctx;
692
}
693
694
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
695
int nelems, enum dma_data_direction direction,
696
struct dma_attrs *attrs)
697
{
698
unsigned long flags, ctx;
699
struct scatterlist *sg;
700
struct strbuf *strbuf;
701
struct iommu *iommu;
702
703
BUG_ON(direction == DMA_NONE);
704
705
iommu = dev->archdata.iommu;
706
strbuf = dev->archdata.stc;
707
708
ctx = fetch_sg_ctx(iommu, sglist);
709
710
spin_lock_irqsave(&iommu->lock, flags);
711
712
sg = sglist;
713
while (nelems--) {
714
dma_addr_t dma_handle = sg->dma_address;
715
unsigned int len = sg->dma_length;
716
unsigned long npages, entry;
717
iopte_t *base;
718
int i;
719
720
if (!len)
721
break;
722
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
723
iommu_range_free(iommu, dma_handle, npages);
724
725
entry = ((dma_handle - iommu->page_table_map_base)
726
>> IO_PAGE_SHIFT);
727
base = iommu->page_table + entry;
728
729
dma_handle &= IO_PAGE_MASK;
730
if (strbuf->strbuf_enabled)
731
strbuf_flush(strbuf, iommu, dma_handle, ctx,
732
npages, direction);
733
734
for (i = 0; i < npages; i++)
735
iopte_make_dummy(iommu, base + i);
736
737
sg = sg_next(sg);
738
}
739
740
iommu_free_ctx(iommu, ctx);
741
742
spin_unlock_irqrestore(&iommu->lock, flags);
743
}
744
745
static void dma_4u_sync_single_for_cpu(struct device *dev,
746
dma_addr_t bus_addr, size_t sz,
747
enum dma_data_direction direction)
748
{
749
struct iommu *iommu;
750
struct strbuf *strbuf;
751
unsigned long flags, ctx, npages;
752
753
iommu = dev->archdata.iommu;
754
strbuf = dev->archdata.stc;
755
756
if (!strbuf->strbuf_enabled)
757
return;
758
759
spin_lock_irqsave(&iommu->lock, flags);
760
761
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
762
npages >>= IO_PAGE_SHIFT;
763
bus_addr &= IO_PAGE_MASK;
764
765
/* Step 1: Record the context, if any. */
766
ctx = 0;
767
if (iommu->iommu_ctxflush &&
768
strbuf->strbuf_ctxflush) {
769
iopte_t *iopte;
770
771
iopte = iommu->page_table +
772
((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
773
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
774
}
775
776
/* Step 2: Kick data out of streaming buffers. */
777
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
778
779
spin_unlock_irqrestore(&iommu->lock, flags);
780
}
781
782
static void dma_4u_sync_sg_for_cpu(struct device *dev,
783
struct scatterlist *sglist, int nelems,
784
enum dma_data_direction direction)
785
{
786
struct iommu *iommu;
787
struct strbuf *strbuf;
788
unsigned long flags, ctx, npages, i;
789
struct scatterlist *sg, *sgprv;
790
u32 bus_addr;
791
792
iommu = dev->archdata.iommu;
793
strbuf = dev->archdata.stc;
794
795
if (!strbuf->strbuf_enabled)
796
return;
797
798
spin_lock_irqsave(&iommu->lock, flags);
799
800
/* Step 1: Record the context, if any. */
801
ctx = 0;
802
if (iommu->iommu_ctxflush &&
803
strbuf->strbuf_ctxflush) {
804
iopte_t *iopte;
805
806
iopte = iommu->page_table +
807
((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
808
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
809
}
810
811
/* Step 2: Kick data out of streaming buffers. */
812
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
813
sgprv = NULL;
814
for_each_sg(sglist, sg, nelems, i) {
815
if (sg->dma_length == 0)
816
break;
817
sgprv = sg;
818
}
819
820
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
821
- bus_addr) >> IO_PAGE_SHIFT;
822
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
823
824
spin_unlock_irqrestore(&iommu->lock, flags);
825
}
826
827
static struct dma_map_ops sun4u_dma_ops = {
828
.alloc_coherent = dma_4u_alloc_coherent,
829
.free_coherent = dma_4u_free_coherent,
830
.map_page = dma_4u_map_page,
831
.unmap_page = dma_4u_unmap_page,
832
.map_sg = dma_4u_map_sg,
833
.unmap_sg = dma_4u_unmap_sg,
834
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
835
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
836
};
837
838
struct dma_map_ops *dma_ops = &sun4u_dma_ops;
839
EXPORT_SYMBOL(dma_ops);
840
841
extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
842
843
int dma_supported(struct device *dev, u64 device_mask)
844
{
845
struct iommu *iommu = dev->archdata.iommu;
846
u64 dma_addr_mask = iommu->dma_addr_mask;
847
848
if (device_mask >= (1UL << 32UL))
849
return 0;
850
851
if ((device_mask & dma_addr_mask) == dma_addr_mask)
852
return 1;
853
854
#ifdef CONFIG_PCI
855
if (dev->bus == &pci_bus_type)
856
return pci64_dma_supported(to_pci_dev(dev), device_mask);
857
#endif
858
859
return 0;
860
}
861
EXPORT_SYMBOL(dma_supported);
862
863