Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/platforms/ps3/mm.c
26481 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* PS3 address space management.
4
*
5
* Copyright (C) 2006 Sony Computer Entertainment Inc.
6
* Copyright 2006 Sony Corp.
7
*/
8
9
#include <linux/dma-mapping.h>
10
#include <linux/kernel.h>
11
#include <linux/export.h>
12
#include <linux/memblock.h>
13
#include <linux/slab.h>
14
15
#include <asm/cell-regs.h>
16
#include <asm/firmware.h>
17
#include <asm/udbg.h>
18
#include <asm/lv1call.h>
19
#include <asm/setup.h>
20
21
#include "platform.h"
22
23
#if defined(DEBUG)
24
#define DBG udbg_printf
25
#else
26
#define DBG pr_devel
27
#endif
28
29
enum {
30
#if defined(CONFIG_PS3_DYNAMIC_DMA)
31
USE_DYNAMIC_DMA = 1,
32
#else
33
USE_DYNAMIC_DMA = 0,
34
#endif
35
};
36
37
enum {
38
PAGE_SHIFT_4K = 12U,
39
PAGE_SHIFT_64K = 16U,
40
PAGE_SHIFT_16M = 24U,
41
};
42
43
static unsigned long __init make_page_sizes(unsigned long a, unsigned long b)
44
{
45
return (a << 56) | (b << 48);
46
}
47
48
enum {
49
ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
50
ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
51
};
52
53
/* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
54
55
enum {
56
HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
57
HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
58
};
59
60
/*============================================================================*/
61
/* virtual address space routines */
62
/*============================================================================*/
63
64
/**
65
* struct mem_region - memory region structure
66
* @base: base address
67
* @size: size in bytes
68
* @offset: difference between base and rm.size
69
* @destroy: flag if region should be destroyed upon shutdown
70
*/
71
72
struct mem_region {
73
u64 base;
74
u64 size;
75
unsigned long offset;
76
int destroy;
77
};
78
79
/**
80
* struct map - address space state variables holder
81
* @total: total memory available as reported by HV
82
* @vas_id - HV virtual address space id
83
* @htab_size: htab size in bytes
84
*
85
* The HV virtual address space (vas) allows for hotplug memory regions.
86
* Memory regions can be created and destroyed in the vas at runtime.
87
* @rm: real mode (bootmem) region
88
* @r1: highmem region(s)
89
*
90
* ps3 addresses
91
* virt_addr: a cpu 'translated' effective address
92
* phys_addr: an address in what Linux thinks is the physical address space
93
* lpar_addr: an address in the HV virtual address space
94
* bus_addr: an io controller 'translated' address on a device bus
95
*/
96
97
struct map {
98
u64 total;
99
u64 vas_id;
100
u64 htab_size;
101
struct mem_region rm;
102
struct mem_region r1;
103
};
104
105
#define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
106
static void __maybe_unused _debug_dump_map(const struct map *m,
107
const char *func, int line)
108
{
109
DBG("%s:%d: map.total = %llxh\n", func, line, m->total);
110
DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size);
111
DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id);
112
DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
113
DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base);
114
DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
115
DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size);
116
}
117
118
static struct map map;
119
120
/**
121
* ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
122
* @phys_addr: linux physical address
123
*/
124
125
unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
126
{
127
BUG_ON(is_kernel_addr(phys_addr));
128
return (phys_addr < map.rm.size || phys_addr >= map.total)
129
? phys_addr : phys_addr + map.r1.offset;
130
}
131
132
EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
133
134
/**
135
* ps3_mm_vas_create - create the virtual address space
136
*/
137
138
void __init ps3_mm_vas_create(unsigned long* htab_size)
139
{
140
int result;
141
u64 start_address;
142
u64 size;
143
u64 access_right;
144
u64 max_page_size;
145
u64 flags;
146
147
result = lv1_query_logical_partition_address_region_info(0,
148
&start_address, &size, &access_right, &max_page_size,
149
&flags);
150
151
if (result) {
152
DBG("%s:%d: lv1_query_logical_partition_address_region_info "
153
"failed: %s\n", __func__, __LINE__,
154
ps3_result(result));
155
goto fail;
156
}
157
158
if (max_page_size < PAGE_SHIFT_16M) {
159
DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
160
max_page_size);
161
goto fail;
162
}
163
164
BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
165
BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
166
167
result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
168
2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
169
&map.vas_id, &map.htab_size);
170
171
if (result) {
172
DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
173
__func__, __LINE__, ps3_result(result));
174
goto fail;
175
}
176
177
result = lv1_select_virtual_address_space(map.vas_id);
178
179
if (result) {
180
DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
181
__func__, __LINE__, ps3_result(result));
182
goto fail;
183
}
184
185
*htab_size = map.htab_size;
186
187
debug_dump_map(&map);
188
189
return;
190
191
fail:
192
panic("ps3_mm_vas_create failed");
193
}
194
195
/**
196
* ps3_mm_vas_destroy -
197
*
198
* called during kexec sequence with MMU off.
199
*/
200
201
notrace void ps3_mm_vas_destroy(void)
202
{
203
int result;
204
205
if (map.vas_id) {
206
result = lv1_select_virtual_address_space(0);
207
result += lv1_destruct_virtual_address_space(map.vas_id);
208
209
if (result) {
210
lv1_panic(0);
211
}
212
213
map.vas_id = 0;
214
}
215
}
216
217
static int __init ps3_mm_get_repository_highmem(struct mem_region *r)
218
{
219
int result;
220
221
/* Assume a single highmem region. */
222
223
result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
224
225
if (result)
226
goto zero_region;
227
228
if (!r->base || !r->size) {
229
result = -1;
230
goto zero_region;
231
}
232
233
r->offset = r->base - map.rm.size;
234
235
DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
236
__func__, __LINE__, r->base, r->size);
237
238
return 0;
239
240
zero_region:
241
DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
242
243
r->size = r->base = r->offset = 0;
244
return result;
245
}
246
247
static int ps3_mm_set_repository_highmem(const struct mem_region *r)
248
{
249
/* Assume a single highmem region. */
250
251
return r ? ps3_repository_write_highmem_info(0, r->base, r->size) :
252
ps3_repository_write_highmem_info(0, 0, 0);
253
}
254
255
/**
256
* ps3_mm_region_create - create a memory region in the vas
257
* @r: pointer to a struct mem_region to accept initialized values
258
* @size: requested region size
259
*
260
* This implementation creates the region with the vas large page size.
261
* @size is rounded down to a multiple of the vas large page size.
262
*/
263
264
static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
265
{
266
int result;
267
u64 muid;
268
269
r->size = ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
270
271
DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
272
DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size);
273
DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
274
size - r->size, (size - r->size) / 1024 / 1024);
275
276
if (r->size == 0) {
277
DBG("%s:%d: size == 0\n", __func__, __LINE__);
278
result = -1;
279
goto zero_region;
280
}
281
282
result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
283
ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
284
285
if (result || r->base < map.rm.size) {
286
DBG("%s:%d: lv1_allocate_memory failed: %s\n",
287
__func__, __LINE__, ps3_result(result));
288
goto zero_region;
289
}
290
291
r->destroy = 1;
292
r->offset = r->base - map.rm.size;
293
return result;
294
295
zero_region:
296
r->size = r->base = r->offset = 0;
297
return result;
298
}
299
300
/**
301
* ps3_mm_region_destroy - destroy a memory region
302
* @r: pointer to struct mem_region
303
*/
304
305
static void ps3_mm_region_destroy(struct mem_region *r)
306
{
307
int result;
308
309
if (!r->destroy) {
310
return;
311
}
312
313
if (r->base) {
314
result = lv1_release_memory(r->base);
315
316
if (result) {
317
lv1_panic(0);
318
}
319
320
r->size = r->base = r->offset = 0;
321
map.total = map.rm.size;
322
}
323
324
ps3_mm_set_repository_highmem(NULL);
325
}
326
327
/*============================================================================*/
328
/* dma routines */
329
/*============================================================================*/
330
331
/**
332
* dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
333
* @r: pointer to dma region structure
334
* @lpar_addr: HV lpar address
335
*/
336
337
static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
338
unsigned long lpar_addr)
339
{
340
if (lpar_addr >= map.rm.size)
341
lpar_addr -= map.r1.offset;
342
BUG_ON(lpar_addr < r->offset);
343
BUG_ON(lpar_addr >= r->offset + r->len);
344
return r->bus_addr + lpar_addr - r->offset;
345
}
346
347
#define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
348
static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
349
const char *func, int line)
350
{
351
DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
352
r->dev->dev_id);
353
DBG("%s:%d: page_size %u\n", func, line, r->page_size);
354
DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
355
DBG("%s:%d: len %lxh\n", func, line, r->len);
356
DBG("%s:%d: offset %lxh\n", func, line, r->offset);
357
}
358
359
/**
360
* dma_chunk - A chunk of dma pages mapped by the io controller.
361
* @region - The dma region that owns this chunk.
362
* @lpar_addr: Starting lpar address of the area to map.
363
* @bus_addr: Starting ioc bus address of the area to map.
364
* @len: Length in bytes of the area to map.
365
* @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
366
* list of all chunks owned by the region.
367
*
368
* This implementation uses a very simple dma page manager
369
* based on the dma_chunk structure. This scheme assumes
370
* that all drivers use very well behaved dma ops.
371
*/
372
373
struct dma_chunk {
374
struct ps3_dma_region *region;
375
unsigned long lpar_addr;
376
unsigned long bus_addr;
377
unsigned long len;
378
struct list_head link;
379
unsigned int usage_count;
380
};
381
382
#define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
383
static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
384
int line)
385
{
386
DBG("%s:%d: r.dev %llu:%llu\n", func, line,
387
c->region->dev->bus_id, c->region->dev->dev_id);
388
DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
389
DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
390
DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
391
DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
392
DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
393
DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
394
DBG("%s:%d: c.len %lxh\n", func, line, c->len);
395
}
396
397
static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
398
unsigned long bus_addr, unsigned long len)
399
{
400
struct dma_chunk *c;
401
unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size);
402
unsigned long aligned_len = ALIGN(len+bus_addr-aligned_bus,
403
1 << r->page_size);
404
405
list_for_each_entry(c, &r->chunk_list.head, link) {
406
/* intersection */
407
if (aligned_bus >= c->bus_addr &&
408
aligned_bus + aligned_len <= c->bus_addr + c->len)
409
return c;
410
411
/* below */
412
if (aligned_bus + aligned_len <= c->bus_addr)
413
continue;
414
415
/* above */
416
if (aligned_bus >= c->bus_addr + c->len)
417
continue;
418
419
/* we don't handle the multi-chunk case for now */
420
dma_dump_chunk(c);
421
BUG();
422
}
423
return NULL;
424
}
425
426
static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
427
unsigned long lpar_addr, unsigned long len)
428
{
429
struct dma_chunk *c;
430
unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size);
431
unsigned long aligned_len = ALIGN(len + lpar_addr - aligned_lpar,
432
1 << r->page_size);
433
434
list_for_each_entry(c, &r->chunk_list.head, link) {
435
/* intersection */
436
if (c->lpar_addr <= aligned_lpar &&
437
aligned_lpar < c->lpar_addr + c->len) {
438
if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
439
return c;
440
else {
441
dma_dump_chunk(c);
442
BUG();
443
}
444
}
445
/* below */
446
if (aligned_lpar + aligned_len <= c->lpar_addr) {
447
continue;
448
}
449
/* above */
450
if (c->lpar_addr + c->len <= aligned_lpar) {
451
continue;
452
}
453
}
454
return NULL;
455
}
456
457
static int dma_sb_free_chunk(struct dma_chunk *c)
458
{
459
int result = 0;
460
461
if (c->bus_addr) {
462
result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
463
c->region->dev->dev_id, c->bus_addr, c->len);
464
BUG_ON(result);
465
}
466
467
kfree(c);
468
return result;
469
}
470
471
static int dma_ioc0_free_chunk(struct dma_chunk *c)
472
{
473
int result = 0;
474
int iopage;
475
unsigned long offset;
476
struct ps3_dma_region *r = c->region;
477
478
DBG("%s:start\n", __func__);
479
for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
480
offset = (1 << r->page_size) * iopage;
481
/* put INVALID entry */
482
result = lv1_put_iopte(0,
483
c->bus_addr + offset,
484
c->lpar_addr + offset,
485
r->ioid,
486
0);
487
DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
488
c->bus_addr + offset,
489
c->lpar_addr + offset,
490
r->ioid);
491
492
if (result) {
493
DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
494
__LINE__, ps3_result(result));
495
}
496
}
497
kfree(c);
498
DBG("%s:end\n", __func__);
499
return result;
500
}
501
502
/**
503
* dma_sb_map_pages - Maps dma pages into the io controller bus address space.
504
* @r: Pointer to a struct ps3_dma_region.
505
* @phys_addr: Starting physical address of the area to map.
506
* @len: Length in bytes of the area to map.
507
* c_out: A pointer to receive an allocated struct dma_chunk for this area.
508
*
509
* This is the lowest level dma mapping routine, and is the one that will
510
* make the HV call to add the pages into the io controller address space.
511
*/
512
513
static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
514
unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
515
{
516
int result;
517
struct dma_chunk *c;
518
519
c = kzalloc(sizeof(*c), GFP_ATOMIC);
520
if (!c) {
521
result = -ENOMEM;
522
goto fail_alloc;
523
}
524
525
c->region = r;
526
c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
527
c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
528
c->len = len;
529
530
BUG_ON(iopte_flag != 0xf800000000000000UL);
531
result = lv1_map_device_dma_region(c->region->dev->bus_id,
532
c->region->dev->dev_id, c->lpar_addr,
533
c->bus_addr, c->len, iopte_flag);
534
if (result) {
535
DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
536
__func__, __LINE__, ps3_result(result));
537
goto fail_map;
538
}
539
540
list_add(&c->link, &r->chunk_list.head);
541
542
*c_out = c;
543
return 0;
544
545
fail_map:
546
kfree(c);
547
fail_alloc:
548
*c_out = NULL;
549
DBG(" <- %s:%d\n", __func__, __LINE__);
550
return result;
551
}
552
553
static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
554
unsigned long len, struct dma_chunk **c_out,
555
u64 iopte_flag)
556
{
557
int result;
558
struct dma_chunk *c, *last;
559
int iopage, pages;
560
unsigned long offset;
561
562
DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
563
phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
564
c = kzalloc(sizeof(*c), GFP_ATOMIC);
565
if (!c) {
566
result = -ENOMEM;
567
goto fail_alloc;
568
}
569
570
c->region = r;
571
c->len = len;
572
c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
573
/* allocate IO address */
574
if (list_empty(&r->chunk_list.head)) {
575
/* first one */
576
c->bus_addr = r->bus_addr;
577
} else {
578
/* derive from last bus addr*/
579
last = list_entry(r->chunk_list.head.next,
580
struct dma_chunk, link);
581
c->bus_addr = last->bus_addr + last->len;
582
DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
583
last->bus_addr, last->len);
584
}
585
586
/* FIXME: check whether length exceeds region size */
587
588
/* build ioptes for the area */
589
pages = len >> r->page_size;
590
DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
591
r->page_size, r->len, pages, iopte_flag);
592
for (iopage = 0; iopage < pages; iopage++) {
593
offset = (1 << r->page_size) * iopage;
594
result = lv1_put_iopte(0,
595
c->bus_addr + offset,
596
c->lpar_addr + offset,
597
r->ioid,
598
iopte_flag);
599
if (result) {
600
pr_warn("%s:%d: lv1_put_iopte failed: %s\n",
601
__func__, __LINE__, ps3_result(result));
602
goto fail_map;
603
}
604
DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
605
iopage, c->bus_addr + offset, c->lpar_addr + offset,
606
r->ioid);
607
}
608
609
/* be sure that last allocated one is inserted at head */
610
list_add(&c->link, &r->chunk_list.head);
611
612
*c_out = c;
613
DBG("%s: end\n", __func__);
614
return 0;
615
616
fail_map:
617
for (iopage--; 0 <= iopage; iopage--) {
618
lv1_put_iopte(0,
619
c->bus_addr + offset,
620
c->lpar_addr + offset,
621
r->ioid,
622
0);
623
}
624
kfree(c);
625
fail_alloc:
626
*c_out = NULL;
627
return result;
628
}
629
630
/**
631
* dma_sb_region_create - Create a device dma region.
632
* @r: Pointer to a struct ps3_dma_region.
633
*
634
* This is the lowest level dma region create routine, and is the one that
635
* will make the HV call to create the region.
636
*/
637
638
static int dma_sb_region_create(struct ps3_dma_region *r)
639
{
640
int result;
641
u64 bus_addr;
642
643
DBG(" -> %s:%d:\n", __func__, __LINE__);
644
645
BUG_ON(!r);
646
647
if (!r->dev->bus_id) {
648
pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
649
r->dev->bus_id, r->dev->dev_id);
650
return 0;
651
}
652
653
DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
654
__LINE__, r->len, r->page_size, r->offset);
655
656
BUG_ON(!r->len);
657
BUG_ON(!r->page_size);
658
BUG_ON(!r->region_ops);
659
660
INIT_LIST_HEAD(&r->chunk_list.head);
661
spin_lock_init(&r->chunk_list.lock);
662
663
result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
664
roundup_pow_of_two(r->len), r->page_size, r->region_type,
665
&bus_addr);
666
r->bus_addr = bus_addr;
667
668
if (result) {
669
DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
670
__func__, __LINE__, ps3_result(result));
671
r->len = r->bus_addr = 0;
672
}
673
674
return result;
675
}
676
677
static int dma_ioc0_region_create(struct ps3_dma_region *r)
678
{
679
int result;
680
u64 bus_addr;
681
682
INIT_LIST_HEAD(&r->chunk_list.head);
683
spin_lock_init(&r->chunk_list.lock);
684
685
result = lv1_allocate_io_segment(0,
686
r->len,
687
r->page_size,
688
&bus_addr);
689
r->bus_addr = bus_addr;
690
if (result) {
691
DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
692
__func__, __LINE__, ps3_result(result));
693
r->len = r->bus_addr = 0;
694
}
695
DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
696
r->len, r->page_size, r->bus_addr);
697
return result;
698
}
699
700
/**
701
* dma_region_free - Free a device dma region.
702
* @r: Pointer to a struct ps3_dma_region.
703
*
704
* This is the lowest level dma region free routine, and is the one that
705
* will make the HV call to free the region.
706
*/
707
708
static int dma_sb_region_free(struct ps3_dma_region *r)
709
{
710
int result;
711
struct dma_chunk *c;
712
struct dma_chunk *tmp;
713
714
BUG_ON(!r);
715
716
if (!r->dev->bus_id) {
717
pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
718
r->dev->bus_id, r->dev->dev_id);
719
return 0;
720
}
721
722
list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
723
list_del(&c->link);
724
dma_sb_free_chunk(c);
725
}
726
727
result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
728
r->bus_addr);
729
730
if (result)
731
DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
732
__func__, __LINE__, ps3_result(result));
733
734
r->bus_addr = 0;
735
736
return result;
737
}
738
739
static int dma_ioc0_region_free(struct ps3_dma_region *r)
740
{
741
int result;
742
struct dma_chunk *c, *n;
743
744
DBG("%s: start\n", __func__);
745
list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
746
list_del(&c->link);
747
dma_ioc0_free_chunk(c);
748
}
749
750
result = lv1_release_io_segment(0, r->bus_addr);
751
752
if (result)
753
DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
754
__func__, __LINE__, ps3_result(result));
755
756
r->bus_addr = 0;
757
DBG("%s: end\n", __func__);
758
759
return result;
760
}
761
762
/**
763
* dma_sb_map_area - Map an area of memory into a device dma region.
764
* @r: Pointer to a struct ps3_dma_region.
765
* @virt_addr: Starting virtual address of the area to map.
766
* @len: Length in bytes of the area to map.
767
* @bus_addr: A pointer to return the starting ioc bus address of the area to
768
* map.
769
*
770
* This is the common dma mapping routine.
771
*/
772
773
static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
774
unsigned long len, dma_addr_t *bus_addr,
775
u64 iopte_flag)
776
{
777
int result;
778
unsigned long flags;
779
struct dma_chunk *c;
780
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
781
: virt_addr;
782
unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
783
unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
784
1 << r->page_size);
785
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
786
787
if (!USE_DYNAMIC_DMA) {
788
unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
789
DBG(" -> %s:%d\n", __func__, __LINE__);
790
DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
791
virt_addr);
792
DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
793
phys_addr);
794
DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
795
lpar_addr);
796
DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
797
DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__,
798
*bus_addr, len);
799
}
800
801
spin_lock_irqsave(&r->chunk_list.lock, flags);
802
c = dma_find_chunk(r, *bus_addr, len);
803
804
if (c) {
805
DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
806
dma_dump_chunk(c);
807
c->usage_count++;
808
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
809
return 0;
810
}
811
812
result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
813
814
if (result) {
815
*bus_addr = 0;
816
DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
817
__func__, __LINE__, result);
818
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
819
return result;
820
}
821
822
c->usage_count = 1;
823
824
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
825
return result;
826
}
827
828
static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
829
unsigned long len, dma_addr_t *bus_addr,
830
u64 iopte_flag)
831
{
832
int result;
833
unsigned long flags;
834
struct dma_chunk *c;
835
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
836
: virt_addr;
837
unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
838
unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
839
1 << r->page_size);
840
841
DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
842
virt_addr, len);
843
DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
844
phys_addr, aligned_phys, aligned_len);
845
846
spin_lock_irqsave(&r->chunk_list.lock, flags);
847
c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
848
849
if (c) {
850
/* FIXME */
851
BUG();
852
*bus_addr = c->bus_addr + phys_addr - aligned_phys;
853
c->usage_count++;
854
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
855
return 0;
856
}
857
858
result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
859
iopte_flag);
860
861
if (result) {
862
*bus_addr = 0;
863
DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
864
__func__, __LINE__, result);
865
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
866
return result;
867
}
868
*bus_addr = c->bus_addr + phys_addr - aligned_phys;
869
DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
870
virt_addr, phys_addr, aligned_phys, *bus_addr);
871
c->usage_count = 1;
872
873
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
874
return result;
875
}
876
877
/**
878
* dma_sb_unmap_area - Unmap an area of memory from a device dma region.
879
* @r: Pointer to a struct ps3_dma_region.
880
* @bus_addr: The starting ioc bus address of the area to unmap.
881
* @len: Length in bytes of the area to unmap.
882
*
883
* This is the common dma unmap routine.
884
*/
885
886
static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
887
unsigned long len)
888
{
889
unsigned long flags;
890
struct dma_chunk *c;
891
892
spin_lock_irqsave(&r->chunk_list.lock, flags);
893
c = dma_find_chunk(r, bus_addr, len);
894
895
if (!c) {
896
unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
897
1 << r->page_size);
898
unsigned long aligned_len = ALIGN(len + bus_addr
899
- aligned_bus, 1 << r->page_size);
900
DBG("%s:%d: not found: bus_addr %llxh\n",
901
__func__, __LINE__, bus_addr);
902
DBG("%s:%d: not found: len %lxh\n",
903
__func__, __LINE__, len);
904
DBG("%s:%d: not found: aligned_bus %lxh\n",
905
__func__, __LINE__, aligned_bus);
906
DBG("%s:%d: not found: aligned_len %lxh\n",
907
__func__, __LINE__, aligned_len);
908
BUG();
909
}
910
911
c->usage_count--;
912
913
if (!c->usage_count) {
914
list_del(&c->link);
915
dma_sb_free_chunk(c);
916
}
917
918
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
919
return 0;
920
}
921
922
static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
923
dma_addr_t bus_addr, unsigned long len)
924
{
925
unsigned long flags;
926
struct dma_chunk *c;
927
928
DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
929
spin_lock_irqsave(&r->chunk_list.lock, flags);
930
c = dma_find_chunk(r, bus_addr, len);
931
932
if (!c) {
933
unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
934
1 << r->page_size);
935
unsigned long aligned_len = ALIGN(len + bus_addr
936
- aligned_bus,
937
1 << r->page_size);
938
DBG("%s:%d: not found: bus_addr %llxh\n",
939
__func__, __LINE__, bus_addr);
940
DBG("%s:%d: not found: len %lxh\n",
941
__func__, __LINE__, len);
942
DBG("%s:%d: not found: aligned_bus %lxh\n",
943
__func__, __LINE__, aligned_bus);
944
DBG("%s:%d: not found: aligned_len %lxh\n",
945
__func__, __LINE__, aligned_len);
946
BUG();
947
}
948
949
c->usage_count--;
950
951
if (!c->usage_count) {
952
list_del(&c->link);
953
dma_ioc0_free_chunk(c);
954
}
955
956
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
957
DBG("%s: end\n", __func__);
958
return 0;
959
}
960
961
/**
962
* dma_sb_region_create_linear - Setup a linear dma mapping for a device.
963
* @r: Pointer to a struct ps3_dma_region.
964
*
965
* This routine creates an HV dma region for the device and maps all available
966
* ram into the io controller bus address space.
967
*/
968
969
static int dma_sb_region_create_linear(struct ps3_dma_region *r)
970
{
971
int result;
972
unsigned long virt_addr, len;
973
dma_addr_t tmp;
974
975
if (r->len > 16*1024*1024) { /* FIXME: need proper fix */
976
/* force 16M dma pages for linear mapping */
977
if (r->page_size != PS3_DMA_16M) {
978
pr_info("%s:%d: forcing 16M pages for linear map\n",
979
__func__, __LINE__);
980
r->page_size = PS3_DMA_16M;
981
r->len = ALIGN(r->len, 1 << r->page_size);
982
}
983
}
984
985
result = dma_sb_region_create(r);
986
BUG_ON(result);
987
988
if (r->offset < map.rm.size) {
989
/* Map (part of) 1st RAM chunk */
990
virt_addr = map.rm.base + r->offset;
991
len = map.rm.size - r->offset;
992
if (len > r->len)
993
len = r->len;
994
result = dma_sb_map_area(r, virt_addr, len, &tmp,
995
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
996
CBE_IOPTE_M);
997
BUG_ON(result);
998
}
999
1000
if (r->offset + r->len > map.rm.size) {
1001
/* Map (part of) 2nd RAM chunk */
1002
virt_addr = map.rm.size;
1003
len = r->len;
1004
if (r->offset >= map.rm.size)
1005
virt_addr += r->offset - map.rm.size;
1006
else
1007
len -= map.rm.size - r->offset;
1008
result = dma_sb_map_area(r, virt_addr, len, &tmp,
1009
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1010
CBE_IOPTE_M);
1011
BUG_ON(result);
1012
}
1013
1014
return result;
1015
}
1016
1017
/**
1018
* dma_sb_region_free_linear - Free a linear dma mapping for a device.
1019
* @r: Pointer to a struct ps3_dma_region.
1020
*
1021
* This routine will unmap all mapped areas and free the HV dma region.
1022
*/
1023
1024
static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1025
{
1026
int result;
1027
dma_addr_t bus_addr;
1028
unsigned long len, lpar_addr;
1029
1030
if (r->offset < map.rm.size) {
1031
/* Unmap (part of) 1st RAM chunk */
1032
lpar_addr = map.rm.base + r->offset;
1033
len = map.rm.size - r->offset;
1034
if (len > r->len)
1035
len = r->len;
1036
bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1037
result = dma_sb_unmap_area(r, bus_addr, len);
1038
BUG_ON(result);
1039
}
1040
1041
if (r->offset + r->len > map.rm.size) {
1042
/* Unmap (part of) 2nd RAM chunk */
1043
lpar_addr = map.r1.base;
1044
len = r->len;
1045
if (r->offset >= map.rm.size)
1046
lpar_addr += r->offset - map.rm.size;
1047
else
1048
len -= map.rm.size - r->offset;
1049
bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1050
result = dma_sb_unmap_area(r, bus_addr, len);
1051
BUG_ON(result);
1052
}
1053
1054
result = dma_sb_region_free(r);
1055
BUG_ON(result);
1056
1057
return result;
1058
}
1059
1060
/**
1061
* dma_sb_map_area_linear - Map an area of memory into a device dma region.
1062
* @r: Pointer to a struct ps3_dma_region.
1063
* @virt_addr: Starting virtual address of the area to map.
1064
* @len: Length in bytes of the area to map.
1065
* @bus_addr: A pointer to return the starting ioc bus address of the area to
1066
* map.
1067
*
1068
* This routine just returns the corresponding bus address. Actual mapping
1069
* occurs in dma_region_create_linear().
1070
*/
1071
1072
static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1073
unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
1074
u64 iopte_flag)
1075
{
1076
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1077
: virt_addr;
1078
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1079
return 0;
1080
}
1081
1082
/**
1083
* dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1084
* @r: Pointer to a struct ps3_dma_region.
1085
* @bus_addr: The starting ioc bus address of the area to unmap.
1086
* @len: Length in bytes of the area to unmap.
1087
*
1088
* This routine does nothing. Unmapping occurs in dma_sb_region_free_linear().
1089
*/
1090
1091
static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1092
dma_addr_t bus_addr, unsigned long len)
1093
{
1094
return 0;
1095
};
1096
1097
static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
1098
.create = dma_sb_region_create,
1099
.free = dma_sb_region_free,
1100
.map = dma_sb_map_area,
1101
.unmap = dma_sb_unmap_area
1102
};
1103
1104
static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1105
.create = dma_sb_region_create_linear,
1106
.free = dma_sb_region_free_linear,
1107
.map = dma_sb_map_area_linear,
1108
.unmap = dma_sb_unmap_area_linear
1109
};
1110
1111
static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1112
.create = dma_ioc0_region_create,
1113
.free = dma_ioc0_region_free,
1114
.map = dma_ioc0_map_area,
1115
.unmap = dma_ioc0_unmap_area
1116
};
1117
1118
int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1119
struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1120
enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1121
{
1122
unsigned long lpar_addr;
1123
int result;
1124
1125
lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1126
1127
r->dev = dev;
1128
r->page_size = page_size;
1129
r->region_type = region_type;
1130
r->offset = lpar_addr;
1131
if (r->offset >= map.rm.size)
1132
r->offset -= map.r1.offset;
1133
r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
1134
1135
dev->core.dma_mask = &r->dma_mask;
1136
1137
result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32));
1138
1139
if (result < 0) {
1140
dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n",
1141
__func__, __LINE__, result);
1142
return result;
1143
}
1144
1145
switch (dev->dev_type) {
1146
case PS3_DEVICE_TYPE_SB:
1147
r->region_ops = (USE_DYNAMIC_DMA)
1148
? &ps3_dma_sb_region_ops
1149
: &ps3_dma_sb_region_linear_ops;
1150
break;
1151
case PS3_DEVICE_TYPE_IOC0:
1152
r->region_ops = &ps3_dma_ioc0_region_ops;
1153
break;
1154
default:
1155
BUG();
1156
return -EINVAL;
1157
}
1158
return 0;
1159
}
1160
EXPORT_SYMBOL(ps3_dma_region_init);
1161
1162
int ps3_dma_region_create(struct ps3_dma_region *r)
1163
{
1164
BUG_ON(!r);
1165
BUG_ON(!r->region_ops);
1166
BUG_ON(!r->region_ops->create);
1167
return r->region_ops->create(r);
1168
}
1169
EXPORT_SYMBOL(ps3_dma_region_create);
1170
1171
int ps3_dma_region_free(struct ps3_dma_region *r)
1172
{
1173
BUG_ON(!r);
1174
BUG_ON(!r->region_ops);
1175
BUG_ON(!r->region_ops->free);
1176
return r->region_ops->free(r);
1177
}
1178
EXPORT_SYMBOL(ps3_dma_region_free);
1179
1180
int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1181
unsigned long len, dma_addr_t *bus_addr,
1182
u64 iopte_flag)
1183
{
1184
return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1185
}
1186
1187
int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
1188
unsigned long len)
1189
{
1190
return r->region_ops->unmap(r, bus_addr, len);
1191
}
1192
1193
/*============================================================================*/
1194
/* system startup routines */
1195
/*============================================================================*/
1196
1197
/**
1198
* ps3_mm_init - initialize the address space state variables
1199
*/
1200
1201
void __init ps3_mm_init(void)
1202
{
1203
int result;
1204
1205
DBG(" -> %s:%d\n", __func__, __LINE__);
1206
1207
result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1208
&map.total);
1209
1210
if (result)
1211
panic("ps3_repository_read_mm_info() failed");
1212
1213
map.rm.offset = map.rm.base;
1214
map.vas_id = map.htab_size = 0;
1215
1216
/* this implementation assumes map.rm.base is zero */
1217
1218
BUG_ON(map.rm.base);
1219
BUG_ON(!map.rm.size);
1220
1221
/* Check if we got the highmem region from an earlier boot step */
1222
1223
if (ps3_mm_get_repository_highmem(&map.r1)) {
1224
result = ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1225
1226
if (!result)
1227
ps3_mm_set_repository_highmem(&map.r1);
1228
}
1229
1230
/* correct map.total for the real total amount of memory we use */
1231
map.total = map.rm.size + map.r1.size;
1232
1233
if (!map.r1.size) {
1234
DBG("%s:%d: No highmem region found\n", __func__, __LINE__);
1235
} else {
1236
DBG("%s:%d: Adding highmem region: %llxh %llxh\n",
1237
__func__, __LINE__, map.rm.size,
1238
map.total - map.rm.size);
1239
memblock_add(map.rm.size, map.total - map.rm.size);
1240
}
1241
1242
DBG(" <- %s:%d\n", __func__, __LINE__);
1243
}
1244
1245
/**
1246
* ps3_mm_shutdown - final cleanup of address space
1247
*
1248
* called during kexec sequence with MMU off.
1249
*/
1250
1251
notrace void ps3_mm_shutdown(void)
1252
{
1253
ps3_mm_region_destroy(&map.r1);
1254
}
1255
1256