Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/powerpc/platforms/ps3/mm.c
10818 views
1
/*
2
* PS3 address space management.
3
*
4
* Copyright (C) 2006 Sony Computer Entertainment Inc.
5
* Copyright 2006 Sony Corp.
6
*
7
* This program is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License as published by
9
* the Free Software Foundation; version 2 of the License.
10
*
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
15
*
16
* You should have received a copy of the GNU General Public License
17
* along with this program; if not, write to the Free Software
18
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
*/
20
21
#include <linux/kernel.h>
22
#include <linux/module.h>
23
#include <linux/memory_hotplug.h>
24
#include <linux/memblock.h>
25
#include <linux/slab.h>
26
27
#include <asm/cell-regs.h>
28
#include <asm/firmware.h>
29
#include <asm/prom.h>
30
#include <asm/udbg.h>
31
#include <asm/lv1call.h>
32
33
#include "platform.h"
34
35
#if defined(DEBUG)
36
#define DBG udbg_printf
37
#else
38
#define DBG pr_devel
39
#endif
40
41
enum {
42
#if defined(CONFIG_PS3_DYNAMIC_DMA)
43
USE_DYNAMIC_DMA = 1,
44
#else
45
USE_DYNAMIC_DMA = 0,
46
#endif
47
};
48
49
enum {
50
PAGE_SHIFT_4K = 12U,
51
PAGE_SHIFT_64K = 16U,
52
PAGE_SHIFT_16M = 24U,
53
};
54
55
static unsigned long make_page_sizes(unsigned long a, unsigned long b)
56
{
57
return (a << 56) | (b << 48);
58
}
59
60
enum {
61
ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
62
ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
63
};
64
65
/* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
66
67
enum {
68
HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
69
HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
70
};
71
72
/*============================================================================*/
73
/* virtual address space routines */
74
/*============================================================================*/
75
76
/**
77
* struct mem_region - memory region structure
78
* @base: base address
79
* @size: size in bytes
80
* @offset: difference between base and rm.size
81
*/
82
83
struct mem_region {
84
u64 base;
85
u64 size;
86
unsigned long offset;
87
};
88
89
/**
90
* struct map - address space state variables holder
91
* @total: total memory available as reported by HV
92
* @vas_id - HV virtual address space id
93
* @htab_size: htab size in bytes
94
*
95
* The HV virtual address space (vas) allows for hotplug memory regions.
96
* Memory regions can be created and destroyed in the vas at runtime.
97
* @rm: real mode (bootmem) region
98
* @r1: hotplug memory region(s)
99
*
100
* ps3 addresses
101
* virt_addr: a cpu 'translated' effective address
102
* phys_addr: an address in what Linux thinks is the physical address space
103
* lpar_addr: an address in the HV virtual address space
104
* bus_addr: an io controller 'translated' address on a device bus
105
*/
106
107
struct map {
108
u64 total;
109
u64 vas_id;
110
u64 htab_size;
111
struct mem_region rm;
112
struct mem_region r1;
113
};
114
115
#define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
116
static void __maybe_unused _debug_dump_map(const struct map *m,
117
const char *func, int line)
118
{
119
DBG("%s:%d: map.total = %llxh\n", func, line, m->total);
120
DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size);
121
DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id);
122
DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
123
DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base);
124
DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
125
DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size);
126
}
127
128
static struct map map;
129
130
/**
131
* ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
132
* @phys_addr: linux physical address
133
*/
134
135
unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
136
{
137
BUG_ON(is_kernel_addr(phys_addr));
138
return (phys_addr < map.rm.size || phys_addr >= map.total)
139
? phys_addr : phys_addr + map.r1.offset;
140
}
141
142
EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
143
144
/**
145
* ps3_mm_vas_create - create the virtual address space
146
*/
147
148
void __init ps3_mm_vas_create(unsigned long* htab_size)
149
{
150
int result;
151
u64 start_address;
152
u64 size;
153
u64 access_right;
154
u64 max_page_size;
155
u64 flags;
156
157
result = lv1_query_logical_partition_address_region_info(0,
158
&start_address, &size, &access_right, &max_page_size,
159
&flags);
160
161
if (result) {
162
DBG("%s:%d: lv1_query_logical_partition_address_region_info "
163
"failed: %s\n", __func__, __LINE__,
164
ps3_result(result));
165
goto fail;
166
}
167
168
if (max_page_size < PAGE_SHIFT_16M) {
169
DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
170
max_page_size);
171
goto fail;
172
}
173
174
BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
175
BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
176
177
result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
178
2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
179
&map.vas_id, &map.htab_size);
180
181
if (result) {
182
DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
183
__func__, __LINE__, ps3_result(result));
184
goto fail;
185
}
186
187
result = lv1_select_virtual_address_space(map.vas_id);
188
189
if (result) {
190
DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
191
__func__, __LINE__, ps3_result(result));
192
goto fail;
193
}
194
195
*htab_size = map.htab_size;
196
197
debug_dump_map(&map);
198
199
return;
200
201
fail:
202
panic("ps3_mm_vas_create failed");
203
}
204
205
/**
206
* ps3_mm_vas_destroy -
207
*/
208
209
void ps3_mm_vas_destroy(void)
210
{
211
int result;
212
213
DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id);
214
215
if (map.vas_id) {
216
result = lv1_select_virtual_address_space(0);
217
BUG_ON(result);
218
result = lv1_destruct_virtual_address_space(map.vas_id);
219
BUG_ON(result);
220
map.vas_id = 0;
221
}
222
}
223
224
/*============================================================================*/
225
/* memory hotplug routines */
226
/*============================================================================*/
227
228
/**
229
* ps3_mm_region_create - create a memory region in the vas
230
* @r: pointer to a struct mem_region to accept initialized values
231
* @size: requested region size
232
*
233
* This implementation creates the region with the vas large page size.
234
* @size is rounded down to a multiple of the vas large page size.
235
*/
236
237
static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
238
{
239
int result;
240
u64 muid;
241
242
r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
243
244
DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
245
DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size);
246
DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
247
size - r->size, (size - r->size) / 1024 / 1024);
248
249
if (r->size == 0) {
250
DBG("%s:%d: size == 0\n", __func__, __LINE__);
251
result = -1;
252
goto zero_region;
253
}
254
255
result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
256
ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
257
258
if (result || r->base < map.rm.size) {
259
DBG("%s:%d: lv1_allocate_memory failed: %s\n",
260
__func__, __LINE__, ps3_result(result));
261
goto zero_region;
262
}
263
264
r->offset = r->base - map.rm.size;
265
return result;
266
267
zero_region:
268
r->size = r->base = r->offset = 0;
269
return result;
270
}
271
272
/**
273
* ps3_mm_region_destroy - destroy a memory region
274
* @r: pointer to struct mem_region
275
*/
276
277
static void ps3_mm_region_destroy(struct mem_region *r)
278
{
279
int result;
280
281
DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
282
if (r->base) {
283
result = lv1_release_memory(r->base);
284
BUG_ON(result);
285
r->size = r->base = r->offset = 0;
286
map.total = map.rm.size;
287
}
288
}
289
290
/**
291
* ps3_mm_add_memory - hot add memory
292
*/
293
294
static int __init ps3_mm_add_memory(void)
295
{
296
int result;
297
unsigned long start_addr;
298
unsigned long start_pfn;
299
unsigned long nr_pages;
300
301
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
302
return -ENODEV;
303
304
BUG_ON(!mem_init_done);
305
306
start_addr = map.rm.size;
307
start_pfn = start_addr >> PAGE_SHIFT;
308
nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
309
310
DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
311
__func__, __LINE__, start_addr, start_pfn, nr_pages);
312
313
result = add_memory(0, start_addr, map.r1.size);
314
315
if (result) {
316
pr_err("%s:%d: add_memory failed: (%d)\n",
317
__func__, __LINE__, result);
318
return result;
319
}
320
321
memblock_add(start_addr, map.r1.size);
322
memblock_analyze();
323
324
result = online_pages(start_pfn, nr_pages);
325
326
if (result)
327
pr_err("%s:%d: online_pages failed: (%d)\n",
328
__func__, __LINE__, result);
329
330
return result;
331
}
332
333
device_initcall(ps3_mm_add_memory);
334
335
/*============================================================================*/
336
/* dma routines */
337
/*============================================================================*/
338
339
/**
340
* dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
341
* @r: pointer to dma region structure
342
* @lpar_addr: HV lpar address
343
*/
344
345
static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
346
unsigned long lpar_addr)
347
{
348
if (lpar_addr >= map.rm.size)
349
lpar_addr -= map.r1.offset;
350
BUG_ON(lpar_addr < r->offset);
351
BUG_ON(lpar_addr >= r->offset + r->len);
352
return r->bus_addr + lpar_addr - r->offset;
353
}
354
355
#define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
356
static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
357
const char *func, int line)
358
{
359
DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
360
r->dev->dev_id);
361
DBG("%s:%d: page_size %u\n", func, line, r->page_size);
362
DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
363
DBG("%s:%d: len %lxh\n", func, line, r->len);
364
DBG("%s:%d: offset %lxh\n", func, line, r->offset);
365
}
366
367
/**
368
* dma_chunk - A chunk of dma pages mapped by the io controller.
369
* @region - The dma region that owns this chunk.
370
* @lpar_addr: Starting lpar address of the area to map.
371
* @bus_addr: Starting ioc bus address of the area to map.
372
* @len: Length in bytes of the area to map.
373
* @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
374
* list of all chuncks owned by the region.
375
*
376
* This implementation uses a very simple dma page manager
377
* based on the dma_chunk structure. This scheme assumes
378
* that all drivers use very well behaved dma ops.
379
*/
380
381
struct dma_chunk {
382
struct ps3_dma_region *region;
383
unsigned long lpar_addr;
384
unsigned long bus_addr;
385
unsigned long len;
386
struct list_head link;
387
unsigned int usage_count;
388
};
389
390
#define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
391
static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
392
int line)
393
{
394
DBG("%s:%d: r.dev %llu:%llu\n", func, line,
395
c->region->dev->bus_id, c->region->dev->dev_id);
396
DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
397
DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
398
DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
399
DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
400
DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
401
DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
402
DBG("%s:%d: c.len %lxh\n", func, line, c->len);
403
}
404
405
static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
406
unsigned long bus_addr, unsigned long len)
407
{
408
struct dma_chunk *c;
409
unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
410
unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
411
1 << r->page_size);
412
413
list_for_each_entry(c, &r->chunk_list.head, link) {
414
/* intersection */
415
if (aligned_bus >= c->bus_addr &&
416
aligned_bus + aligned_len <= c->bus_addr + c->len)
417
return c;
418
419
/* below */
420
if (aligned_bus + aligned_len <= c->bus_addr)
421
continue;
422
423
/* above */
424
if (aligned_bus >= c->bus_addr + c->len)
425
continue;
426
427
/* we don't handle the multi-chunk case for now */
428
dma_dump_chunk(c);
429
BUG();
430
}
431
return NULL;
432
}
433
434
static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
435
unsigned long lpar_addr, unsigned long len)
436
{
437
struct dma_chunk *c;
438
unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
439
unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
440
1 << r->page_size);
441
442
list_for_each_entry(c, &r->chunk_list.head, link) {
443
/* intersection */
444
if (c->lpar_addr <= aligned_lpar &&
445
aligned_lpar < c->lpar_addr + c->len) {
446
if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
447
return c;
448
else {
449
dma_dump_chunk(c);
450
BUG();
451
}
452
}
453
/* below */
454
if (aligned_lpar + aligned_len <= c->lpar_addr) {
455
continue;
456
}
457
/* above */
458
if (c->lpar_addr + c->len <= aligned_lpar) {
459
continue;
460
}
461
}
462
return NULL;
463
}
464
465
static int dma_sb_free_chunk(struct dma_chunk *c)
466
{
467
int result = 0;
468
469
if (c->bus_addr) {
470
result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
471
c->region->dev->dev_id, c->bus_addr, c->len);
472
BUG_ON(result);
473
}
474
475
kfree(c);
476
return result;
477
}
478
479
static int dma_ioc0_free_chunk(struct dma_chunk *c)
480
{
481
int result = 0;
482
int iopage;
483
unsigned long offset;
484
struct ps3_dma_region *r = c->region;
485
486
DBG("%s:start\n", __func__);
487
for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
488
offset = (1 << r->page_size) * iopage;
489
/* put INVALID entry */
490
result = lv1_put_iopte(0,
491
c->bus_addr + offset,
492
c->lpar_addr + offset,
493
r->ioid,
494
0);
495
DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
496
c->bus_addr + offset,
497
c->lpar_addr + offset,
498
r->ioid);
499
500
if (result) {
501
DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
502
__LINE__, ps3_result(result));
503
}
504
}
505
kfree(c);
506
DBG("%s:end\n", __func__);
507
return result;
508
}
509
510
/**
511
* dma_sb_map_pages - Maps dma pages into the io controller bus address space.
512
* @r: Pointer to a struct ps3_dma_region.
513
* @phys_addr: Starting physical address of the area to map.
514
* @len: Length in bytes of the area to map.
515
* c_out: A pointer to receive an allocated struct dma_chunk for this area.
516
*
517
* This is the lowest level dma mapping routine, and is the one that will
518
* make the HV call to add the pages into the io controller address space.
519
*/
520
521
static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
522
unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
523
{
524
int result;
525
struct dma_chunk *c;
526
527
c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
528
529
if (!c) {
530
result = -ENOMEM;
531
goto fail_alloc;
532
}
533
534
c->region = r;
535
c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
536
c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
537
c->len = len;
538
539
BUG_ON(iopte_flag != 0xf800000000000000UL);
540
result = lv1_map_device_dma_region(c->region->dev->bus_id,
541
c->region->dev->dev_id, c->lpar_addr,
542
c->bus_addr, c->len, iopte_flag);
543
if (result) {
544
DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
545
__func__, __LINE__, ps3_result(result));
546
goto fail_map;
547
}
548
549
list_add(&c->link, &r->chunk_list.head);
550
551
*c_out = c;
552
return 0;
553
554
fail_map:
555
kfree(c);
556
fail_alloc:
557
*c_out = NULL;
558
DBG(" <- %s:%d\n", __func__, __LINE__);
559
return result;
560
}
561
562
static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
563
unsigned long len, struct dma_chunk **c_out,
564
u64 iopte_flag)
565
{
566
int result;
567
struct dma_chunk *c, *last;
568
int iopage, pages;
569
unsigned long offset;
570
571
DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
572
phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
573
c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
574
575
if (!c) {
576
result = -ENOMEM;
577
goto fail_alloc;
578
}
579
580
c->region = r;
581
c->len = len;
582
c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
583
/* allocate IO address */
584
if (list_empty(&r->chunk_list.head)) {
585
/* first one */
586
c->bus_addr = r->bus_addr;
587
} else {
588
/* derive from last bus addr*/
589
last = list_entry(r->chunk_list.head.next,
590
struct dma_chunk, link);
591
c->bus_addr = last->bus_addr + last->len;
592
DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
593
last->bus_addr, last->len);
594
}
595
596
/* FIXME: check whether length exceeds region size */
597
598
/* build ioptes for the area */
599
pages = len >> r->page_size;
600
DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
601
r->page_size, r->len, pages, iopte_flag);
602
for (iopage = 0; iopage < pages; iopage++) {
603
offset = (1 << r->page_size) * iopage;
604
result = lv1_put_iopte(0,
605
c->bus_addr + offset,
606
c->lpar_addr + offset,
607
r->ioid,
608
iopte_flag);
609
if (result) {
610
pr_warning("%s:%d: lv1_put_iopte failed: %s\n",
611
__func__, __LINE__, ps3_result(result));
612
goto fail_map;
613
}
614
DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
615
iopage, c->bus_addr + offset, c->lpar_addr + offset,
616
r->ioid);
617
}
618
619
/* be sure that last allocated one is inserted at head */
620
list_add(&c->link, &r->chunk_list.head);
621
622
*c_out = c;
623
DBG("%s: end\n", __func__);
624
return 0;
625
626
fail_map:
627
for (iopage--; 0 <= iopage; iopage--) {
628
lv1_put_iopte(0,
629
c->bus_addr + offset,
630
c->lpar_addr + offset,
631
r->ioid,
632
0);
633
}
634
kfree(c);
635
fail_alloc:
636
*c_out = NULL;
637
return result;
638
}
639
640
/**
641
* dma_sb_region_create - Create a device dma region.
642
* @r: Pointer to a struct ps3_dma_region.
643
*
644
* This is the lowest level dma region create routine, and is the one that
645
* will make the HV call to create the region.
646
*/
647
648
static int dma_sb_region_create(struct ps3_dma_region *r)
649
{
650
int result;
651
u64 bus_addr;
652
653
DBG(" -> %s:%d:\n", __func__, __LINE__);
654
655
BUG_ON(!r);
656
657
if (!r->dev->bus_id) {
658
pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
659
r->dev->bus_id, r->dev->dev_id);
660
return 0;
661
}
662
663
DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
664
__LINE__, r->len, r->page_size, r->offset);
665
666
BUG_ON(!r->len);
667
BUG_ON(!r->page_size);
668
BUG_ON(!r->region_ops);
669
670
INIT_LIST_HEAD(&r->chunk_list.head);
671
spin_lock_init(&r->chunk_list.lock);
672
673
result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
674
roundup_pow_of_two(r->len), r->page_size, r->region_type,
675
&bus_addr);
676
r->bus_addr = bus_addr;
677
678
if (result) {
679
DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
680
__func__, __LINE__, ps3_result(result));
681
r->len = r->bus_addr = 0;
682
}
683
684
return result;
685
}
686
687
static int dma_ioc0_region_create(struct ps3_dma_region *r)
688
{
689
int result;
690
u64 bus_addr;
691
692
INIT_LIST_HEAD(&r->chunk_list.head);
693
spin_lock_init(&r->chunk_list.lock);
694
695
result = lv1_allocate_io_segment(0,
696
r->len,
697
r->page_size,
698
&bus_addr);
699
r->bus_addr = bus_addr;
700
if (result) {
701
DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
702
__func__, __LINE__, ps3_result(result));
703
r->len = r->bus_addr = 0;
704
}
705
DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
706
r->len, r->page_size, r->bus_addr);
707
return result;
708
}
709
710
/**
711
* dma_region_free - Free a device dma region.
712
* @r: Pointer to a struct ps3_dma_region.
713
*
714
* This is the lowest level dma region free routine, and is the one that
715
* will make the HV call to free the region.
716
*/
717
718
static int dma_sb_region_free(struct ps3_dma_region *r)
719
{
720
int result;
721
struct dma_chunk *c;
722
struct dma_chunk *tmp;
723
724
BUG_ON(!r);
725
726
if (!r->dev->bus_id) {
727
pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
728
r->dev->bus_id, r->dev->dev_id);
729
return 0;
730
}
731
732
list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
733
list_del(&c->link);
734
dma_sb_free_chunk(c);
735
}
736
737
result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
738
r->bus_addr);
739
740
if (result)
741
DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
742
__func__, __LINE__, ps3_result(result));
743
744
r->bus_addr = 0;
745
746
return result;
747
}
748
749
static int dma_ioc0_region_free(struct ps3_dma_region *r)
750
{
751
int result;
752
struct dma_chunk *c, *n;
753
754
DBG("%s: start\n", __func__);
755
list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
756
list_del(&c->link);
757
dma_ioc0_free_chunk(c);
758
}
759
760
result = lv1_release_io_segment(0, r->bus_addr);
761
762
if (result)
763
DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
764
__func__, __LINE__, ps3_result(result));
765
766
r->bus_addr = 0;
767
DBG("%s: end\n", __func__);
768
769
return result;
770
}
771
772
/**
773
* dma_sb_map_area - Map an area of memory into a device dma region.
774
* @r: Pointer to a struct ps3_dma_region.
775
* @virt_addr: Starting virtual address of the area to map.
776
* @len: Length in bytes of the area to map.
777
* @bus_addr: A pointer to return the starting ioc bus address of the area to
778
* map.
779
*
780
* This is the common dma mapping routine.
781
*/
782
783
static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
784
unsigned long len, dma_addr_t *bus_addr,
785
u64 iopte_flag)
786
{
787
int result;
788
unsigned long flags;
789
struct dma_chunk *c;
790
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
791
: virt_addr;
792
unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
793
unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
794
1 << r->page_size);
795
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
796
797
if (!USE_DYNAMIC_DMA) {
798
unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
799
DBG(" -> %s:%d\n", __func__, __LINE__);
800
DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
801
virt_addr);
802
DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
803
phys_addr);
804
DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
805
lpar_addr);
806
DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
807
DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__,
808
*bus_addr, len);
809
}
810
811
spin_lock_irqsave(&r->chunk_list.lock, flags);
812
c = dma_find_chunk(r, *bus_addr, len);
813
814
if (c) {
815
DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
816
dma_dump_chunk(c);
817
c->usage_count++;
818
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
819
return 0;
820
}
821
822
result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
823
824
if (result) {
825
*bus_addr = 0;
826
DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
827
__func__, __LINE__, result);
828
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
829
return result;
830
}
831
832
c->usage_count = 1;
833
834
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
835
return result;
836
}
837
838
static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
839
unsigned long len, dma_addr_t *bus_addr,
840
u64 iopte_flag)
841
{
842
int result;
843
unsigned long flags;
844
struct dma_chunk *c;
845
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
846
: virt_addr;
847
unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
848
unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
849
1 << r->page_size);
850
851
DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
852
virt_addr, len);
853
DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
854
phys_addr, aligned_phys, aligned_len);
855
856
spin_lock_irqsave(&r->chunk_list.lock, flags);
857
c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
858
859
if (c) {
860
/* FIXME */
861
BUG();
862
*bus_addr = c->bus_addr + phys_addr - aligned_phys;
863
c->usage_count++;
864
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
865
return 0;
866
}
867
868
result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
869
iopte_flag);
870
871
if (result) {
872
*bus_addr = 0;
873
DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
874
__func__, __LINE__, result);
875
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
876
return result;
877
}
878
*bus_addr = c->bus_addr + phys_addr - aligned_phys;
879
DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
880
virt_addr, phys_addr, aligned_phys, *bus_addr);
881
c->usage_count = 1;
882
883
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
884
return result;
885
}
886
887
/**
888
* dma_sb_unmap_area - Unmap an area of memory from a device dma region.
889
* @r: Pointer to a struct ps3_dma_region.
890
* @bus_addr: The starting ioc bus address of the area to unmap.
891
* @len: Length in bytes of the area to unmap.
892
*
893
* This is the common dma unmap routine.
894
*/
895
896
static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
897
unsigned long len)
898
{
899
unsigned long flags;
900
struct dma_chunk *c;
901
902
spin_lock_irqsave(&r->chunk_list.lock, flags);
903
c = dma_find_chunk(r, bus_addr, len);
904
905
if (!c) {
906
unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
907
1 << r->page_size);
908
unsigned long aligned_len = _ALIGN_UP(len + bus_addr
909
- aligned_bus, 1 << r->page_size);
910
DBG("%s:%d: not found: bus_addr %llxh\n",
911
__func__, __LINE__, bus_addr);
912
DBG("%s:%d: not found: len %lxh\n",
913
__func__, __LINE__, len);
914
DBG("%s:%d: not found: aligned_bus %lxh\n",
915
__func__, __LINE__, aligned_bus);
916
DBG("%s:%d: not found: aligned_len %lxh\n",
917
__func__, __LINE__, aligned_len);
918
BUG();
919
}
920
921
c->usage_count--;
922
923
if (!c->usage_count) {
924
list_del(&c->link);
925
dma_sb_free_chunk(c);
926
}
927
928
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
929
return 0;
930
}
931
932
static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
933
dma_addr_t bus_addr, unsigned long len)
934
{
935
unsigned long flags;
936
struct dma_chunk *c;
937
938
DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
939
spin_lock_irqsave(&r->chunk_list.lock, flags);
940
c = dma_find_chunk(r, bus_addr, len);
941
942
if (!c) {
943
unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
944
1 << r->page_size);
945
unsigned long aligned_len = _ALIGN_UP(len + bus_addr
946
- aligned_bus,
947
1 << r->page_size);
948
DBG("%s:%d: not found: bus_addr %llxh\n",
949
__func__, __LINE__, bus_addr);
950
DBG("%s:%d: not found: len %lxh\n",
951
__func__, __LINE__, len);
952
DBG("%s:%d: not found: aligned_bus %lxh\n",
953
__func__, __LINE__, aligned_bus);
954
DBG("%s:%d: not found: aligned_len %lxh\n",
955
__func__, __LINE__, aligned_len);
956
BUG();
957
}
958
959
c->usage_count--;
960
961
if (!c->usage_count) {
962
list_del(&c->link);
963
dma_ioc0_free_chunk(c);
964
}
965
966
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
967
DBG("%s: end\n", __func__);
968
return 0;
969
}
970
971
/**
972
* dma_sb_region_create_linear - Setup a linear dma mapping for a device.
973
* @r: Pointer to a struct ps3_dma_region.
974
*
975
* This routine creates an HV dma region for the device and maps all available
976
* ram into the io controller bus address space.
977
*/
978
979
static int dma_sb_region_create_linear(struct ps3_dma_region *r)
980
{
981
int result;
982
unsigned long virt_addr, len;
983
dma_addr_t tmp;
984
985
if (r->len > 16*1024*1024) { /* FIXME: need proper fix */
986
/* force 16M dma pages for linear mapping */
987
if (r->page_size != PS3_DMA_16M) {
988
pr_info("%s:%d: forcing 16M pages for linear map\n",
989
__func__, __LINE__);
990
r->page_size = PS3_DMA_16M;
991
r->len = _ALIGN_UP(r->len, 1 << r->page_size);
992
}
993
}
994
995
result = dma_sb_region_create(r);
996
BUG_ON(result);
997
998
if (r->offset < map.rm.size) {
999
/* Map (part of) 1st RAM chunk */
1000
virt_addr = map.rm.base + r->offset;
1001
len = map.rm.size - r->offset;
1002
if (len > r->len)
1003
len = r->len;
1004
result = dma_sb_map_area(r, virt_addr, len, &tmp,
1005
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1006
CBE_IOPTE_M);
1007
BUG_ON(result);
1008
}
1009
1010
if (r->offset + r->len > map.rm.size) {
1011
/* Map (part of) 2nd RAM chunk */
1012
virt_addr = map.rm.size;
1013
len = r->len;
1014
if (r->offset >= map.rm.size)
1015
virt_addr += r->offset - map.rm.size;
1016
else
1017
len -= map.rm.size - r->offset;
1018
result = dma_sb_map_area(r, virt_addr, len, &tmp,
1019
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1020
CBE_IOPTE_M);
1021
BUG_ON(result);
1022
}
1023
1024
return result;
1025
}
1026
1027
/**
1028
* dma_sb_region_free_linear - Free a linear dma mapping for a device.
1029
* @r: Pointer to a struct ps3_dma_region.
1030
*
1031
* This routine will unmap all mapped areas and free the HV dma region.
1032
*/
1033
1034
static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1035
{
1036
int result;
1037
dma_addr_t bus_addr;
1038
unsigned long len, lpar_addr;
1039
1040
if (r->offset < map.rm.size) {
1041
/* Unmap (part of) 1st RAM chunk */
1042
lpar_addr = map.rm.base + r->offset;
1043
len = map.rm.size - r->offset;
1044
if (len > r->len)
1045
len = r->len;
1046
bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1047
result = dma_sb_unmap_area(r, bus_addr, len);
1048
BUG_ON(result);
1049
}
1050
1051
if (r->offset + r->len > map.rm.size) {
1052
/* Unmap (part of) 2nd RAM chunk */
1053
lpar_addr = map.r1.base;
1054
len = r->len;
1055
if (r->offset >= map.rm.size)
1056
lpar_addr += r->offset - map.rm.size;
1057
else
1058
len -= map.rm.size - r->offset;
1059
bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1060
result = dma_sb_unmap_area(r, bus_addr, len);
1061
BUG_ON(result);
1062
}
1063
1064
result = dma_sb_region_free(r);
1065
BUG_ON(result);
1066
1067
return result;
1068
}
1069
1070
/**
1071
* dma_sb_map_area_linear - Map an area of memory into a device dma region.
1072
* @r: Pointer to a struct ps3_dma_region.
1073
* @virt_addr: Starting virtual address of the area to map.
1074
* @len: Length in bytes of the area to map.
1075
* @bus_addr: A pointer to return the starting ioc bus address of the area to
1076
* map.
1077
*
1078
* This routine just returns the corresponding bus address. Actual mapping
1079
* occurs in dma_region_create_linear().
1080
*/
1081
1082
static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1083
unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
1084
u64 iopte_flag)
1085
{
1086
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1087
: virt_addr;
1088
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1089
return 0;
1090
}
1091
1092
/**
1093
* dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1094
* @r: Pointer to a struct ps3_dma_region.
1095
* @bus_addr: The starting ioc bus address of the area to unmap.
1096
* @len: Length in bytes of the area to unmap.
1097
*
1098
* This routine does nothing. Unmapping occurs in dma_sb_region_free_linear().
1099
*/
1100
1101
static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1102
dma_addr_t bus_addr, unsigned long len)
1103
{
1104
return 0;
1105
};
1106
1107
static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
1108
.create = dma_sb_region_create,
1109
.free = dma_sb_region_free,
1110
.map = dma_sb_map_area,
1111
.unmap = dma_sb_unmap_area
1112
};
1113
1114
static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1115
.create = dma_sb_region_create_linear,
1116
.free = dma_sb_region_free_linear,
1117
.map = dma_sb_map_area_linear,
1118
.unmap = dma_sb_unmap_area_linear
1119
};
1120
1121
static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1122
.create = dma_ioc0_region_create,
1123
.free = dma_ioc0_region_free,
1124
.map = dma_ioc0_map_area,
1125
.unmap = dma_ioc0_unmap_area
1126
};
1127
1128
int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1129
struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1130
enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1131
{
1132
unsigned long lpar_addr;
1133
1134
lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1135
1136
r->dev = dev;
1137
r->page_size = page_size;
1138
r->region_type = region_type;
1139
r->offset = lpar_addr;
1140
if (r->offset >= map.rm.size)
1141
r->offset -= map.r1.offset;
1142
r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
1143
1144
switch (dev->dev_type) {
1145
case PS3_DEVICE_TYPE_SB:
1146
r->region_ops = (USE_DYNAMIC_DMA)
1147
? &ps3_dma_sb_region_ops
1148
: &ps3_dma_sb_region_linear_ops;
1149
break;
1150
case PS3_DEVICE_TYPE_IOC0:
1151
r->region_ops = &ps3_dma_ioc0_region_ops;
1152
break;
1153
default:
1154
BUG();
1155
return -EINVAL;
1156
}
1157
return 0;
1158
}
1159
EXPORT_SYMBOL(ps3_dma_region_init);
1160
1161
int ps3_dma_region_create(struct ps3_dma_region *r)
1162
{
1163
BUG_ON(!r);
1164
BUG_ON(!r->region_ops);
1165
BUG_ON(!r->region_ops->create);
1166
return r->region_ops->create(r);
1167
}
1168
EXPORT_SYMBOL(ps3_dma_region_create);
1169
1170
int ps3_dma_region_free(struct ps3_dma_region *r)
1171
{
1172
BUG_ON(!r);
1173
BUG_ON(!r->region_ops);
1174
BUG_ON(!r->region_ops->free);
1175
return r->region_ops->free(r);
1176
}
1177
EXPORT_SYMBOL(ps3_dma_region_free);
1178
1179
int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1180
unsigned long len, dma_addr_t *bus_addr,
1181
u64 iopte_flag)
1182
{
1183
return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1184
}
1185
1186
int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
1187
unsigned long len)
1188
{
1189
return r->region_ops->unmap(r, bus_addr, len);
1190
}
1191
1192
/*============================================================================*/
1193
/* system startup routines */
1194
/*============================================================================*/
1195
1196
/**
1197
* ps3_mm_init - initialize the address space state variables
1198
*/
1199
1200
void __init ps3_mm_init(void)
1201
{
1202
int result;
1203
1204
DBG(" -> %s:%d\n", __func__, __LINE__);
1205
1206
result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1207
&map.total);
1208
1209
if (result)
1210
panic("ps3_repository_read_mm_info() failed");
1211
1212
map.rm.offset = map.rm.base;
1213
map.vas_id = map.htab_size = 0;
1214
1215
/* this implementation assumes map.rm.base is zero */
1216
1217
BUG_ON(map.rm.base);
1218
BUG_ON(!map.rm.size);
1219
1220
1221
/* arrange to do this in ps3_mm_add_memory */
1222
ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1223
1224
/* correct map.total for the real total amount of memory we use */
1225
map.total = map.rm.size + map.r1.size;
1226
1227
DBG(" <- %s:%d\n", __func__, __LINE__);
1228
}
1229
1230
/**
1231
* ps3_mm_shutdown - final cleanup of address space
1232
*/
1233
1234
void ps3_mm_shutdown(void)
1235
{
1236
ps3_mm_region_destroy(&map.r1);
1237
}
1238
1239