Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/unicore32/mm/init.c
10817 views
1
/*
2
* linux/arch/unicore32/mm/init.c
3
*
4
* Copyright (C) 2010 GUAN Xue-tao
5
*
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License version 2 as
8
* published by the Free Software Foundation.
9
*/
10
#include <linux/kernel.h>
11
#include <linux/errno.h>
12
#include <linux/swap.h>
13
#include <linux/init.h>
14
#include <linux/bootmem.h>
15
#include <linux/mman.h>
16
#include <linux/nodemask.h>
17
#include <linux/initrd.h>
18
#include <linux/highmem.h>
19
#include <linux/gfp.h>
20
#include <linux/memblock.h>
21
#include <linux/sort.h>
22
#include <linux/dma-mapping.h>
23
24
#include <asm/sections.h>
25
#include <asm/setup.h>
26
#include <asm/sizes.h>
27
#include <asm/tlb.h>
28
#include <mach/map.h>
29
30
#include "mm.h"
31
32
static unsigned long phys_initrd_start __initdata = 0x01000000;
33
static unsigned long phys_initrd_size __initdata = SZ_8M;
34
35
static int __init early_initrd(char *p)
36
{
37
unsigned long start, size;
38
char *endp;
39
40
start = memparse(p, &endp);
41
if (*endp == ',') {
42
size = memparse(endp + 1, NULL);
43
44
phys_initrd_start = start;
45
phys_initrd_size = size;
46
}
47
return 0;
48
}
49
early_param("initrd", early_initrd);
50
51
/*
52
* This keeps memory configuration data used by a couple memory
53
* initialization functions, as well as show_mem() for the skipping
54
* of holes in the memory map. It is populated by uc32_add_memory().
55
*/
56
struct meminfo meminfo;
57
58
void show_mem(unsigned int filter)
59
{
60
int free = 0, total = 0, reserved = 0;
61
int shared = 0, cached = 0, slab = 0, i;
62
struct meminfo *mi = &meminfo;
63
64
printk(KERN_DEFAULT "Mem-info:\n");
65
show_free_areas(filter);
66
67
for_each_bank(i, mi) {
68
struct membank *bank = &mi->bank[i];
69
unsigned int pfn1, pfn2;
70
struct page *page, *end;
71
72
pfn1 = bank_pfn_start(bank);
73
pfn2 = bank_pfn_end(bank);
74
75
page = pfn_to_page(pfn1);
76
end = pfn_to_page(pfn2 - 1) + 1;
77
78
do {
79
total++;
80
if (PageReserved(page))
81
reserved++;
82
else if (PageSwapCache(page))
83
cached++;
84
else if (PageSlab(page))
85
slab++;
86
else if (!page_count(page))
87
free++;
88
else
89
shared += page_count(page) - 1;
90
page++;
91
} while (page < end);
92
}
93
94
printk(KERN_DEFAULT "%d pages of RAM\n", total);
95
printk(KERN_DEFAULT "%d free pages\n", free);
96
printk(KERN_DEFAULT "%d reserved pages\n", reserved);
97
printk(KERN_DEFAULT "%d slab pages\n", slab);
98
printk(KERN_DEFAULT "%d pages shared\n", shared);
99
printk(KERN_DEFAULT "%d pages swap cached\n", cached);
100
}
101
102
static void __init find_limits(unsigned long *min, unsigned long *max_low,
103
unsigned long *max_high)
104
{
105
struct meminfo *mi = &meminfo;
106
int i;
107
108
*min = -1UL;
109
*max_low = *max_high = 0;
110
111
for_each_bank(i, mi) {
112
struct membank *bank = &mi->bank[i];
113
unsigned long start, end;
114
115
start = bank_pfn_start(bank);
116
end = bank_pfn_end(bank);
117
118
if (*min > start)
119
*min = start;
120
if (*max_high < end)
121
*max_high = end;
122
if (bank->highmem)
123
continue;
124
if (*max_low < end)
125
*max_low = end;
126
}
127
}
128
129
static void __init uc32_bootmem_init(unsigned long start_pfn,
130
unsigned long end_pfn)
131
{
132
struct memblock_region *reg;
133
unsigned int boot_pages;
134
phys_addr_t bitmap;
135
pg_data_t *pgdat;
136
137
/*
138
* Allocate the bootmem bitmap page. This must be in a region
139
* of memory which has already been mapped.
140
*/
141
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
142
bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
143
__pfn_to_phys(end_pfn));
144
145
/*
146
* Initialise the bootmem allocator, handing the
147
* memory banks over to bootmem.
148
*/
149
node_set_online(0);
150
pgdat = NODE_DATA(0);
151
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
152
153
/* Free the lowmem regions from memblock into bootmem. */
154
for_each_memblock(memory, reg) {
155
unsigned long start = memblock_region_memory_base_pfn(reg);
156
unsigned long end = memblock_region_memory_end_pfn(reg);
157
158
if (end >= end_pfn)
159
end = end_pfn;
160
if (start >= end)
161
break;
162
163
free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
164
}
165
166
/* Reserve the lowmem memblock reserved regions in bootmem. */
167
for_each_memblock(reserved, reg) {
168
unsigned long start = memblock_region_reserved_base_pfn(reg);
169
unsigned long end = memblock_region_reserved_end_pfn(reg);
170
171
if (end >= end_pfn)
172
end = end_pfn;
173
if (start >= end)
174
break;
175
176
reserve_bootmem(__pfn_to_phys(start),
177
(end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
178
}
179
}
180
181
static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
182
unsigned long max_high)
183
{
184
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
185
struct memblock_region *reg;
186
187
/*
188
* initialise the zones.
189
*/
190
memset(zone_size, 0, sizeof(zone_size));
191
192
/*
193
* The memory size has already been determined. If we need
194
* to do anything fancy with the allocation of this memory
195
* to the zones, now is the time to do it.
196
*/
197
zone_size[0] = max_low - min;
198
199
/*
200
* Calculate the size of the holes.
201
* holes = node_size - sum(bank_sizes)
202
*/
203
memcpy(zhole_size, zone_size, sizeof(zhole_size));
204
for_each_memblock(memory, reg) {
205
unsigned long start = memblock_region_memory_base_pfn(reg);
206
unsigned long end = memblock_region_memory_end_pfn(reg);
207
208
if (start < max_low) {
209
unsigned long low_end = min(end, max_low);
210
zhole_size[0] -= low_end - start;
211
}
212
}
213
214
/*
215
* Adjust the sizes according to any special requirements for
216
* this machine type.
217
*/
218
arch_adjust_zones(zone_size, zhole_size);
219
220
free_area_init_node(0, zone_size, min, zhole_size);
221
}
222
223
int pfn_valid(unsigned long pfn)
224
{
225
return memblock_is_memory(pfn << PAGE_SHIFT);
226
}
227
EXPORT_SYMBOL(pfn_valid);
228
229
static void uc32_memory_present(void)
230
{
231
}
232
233
static int __init meminfo_cmp(const void *_a, const void *_b)
234
{
235
const struct membank *a = _a, *b = _b;
236
long cmp = bank_pfn_start(a) - bank_pfn_start(b);
237
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
238
}
239
240
void __init uc32_memblock_init(struct meminfo *mi)
241
{
242
int i;
243
244
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
245
meminfo_cmp, NULL);
246
247
memblock_init();
248
for (i = 0; i < mi->nr_banks; i++)
249
memblock_add(mi->bank[i].start, mi->bank[i].size);
250
251
/* Register the kernel text, kernel data and initrd with memblock. */
252
memblock_reserve(__pa(_text), _end - _text);
253
254
#ifdef CONFIG_BLK_DEV_INITRD
255
if (phys_initrd_size) {
256
memblock_reserve(phys_initrd_start, phys_initrd_size);
257
258
/* Now convert initrd to virtual addresses */
259
initrd_start = __phys_to_virt(phys_initrd_start);
260
initrd_end = initrd_start + phys_initrd_size;
261
}
262
#endif
263
264
uc32_mm_memblock_reserve();
265
266
memblock_analyze();
267
memblock_dump_all();
268
}
269
270
void __init bootmem_init(void)
271
{
272
unsigned long min, max_low, max_high;
273
274
max_low = max_high = 0;
275
276
find_limits(&min, &max_low, &max_high);
277
278
uc32_bootmem_init(min, max_low);
279
280
#ifdef CONFIG_SWIOTLB
281
swiotlb_init(1);
282
#endif
283
/*
284
* Sparsemem tries to allocate bootmem in memory_present(),
285
* so must be done after the fixed reservations
286
*/
287
uc32_memory_present();
288
289
/*
290
* sparse_init() needs the bootmem allocator up and running.
291
*/
292
sparse_init();
293
294
/*
295
* Now free the memory - free_area_init_node needs
296
* the sparse mem_map arrays initialized by sparse_init()
297
* for memmap_init_zone(), otherwise all PFNs are invalid.
298
*/
299
uc32_bootmem_free(min, max_low, max_high);
300
301
high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
302
303
/*
304
* This doesn't seem to be used by the Linux memory manager any
305
* more, but is used by ll_rw_block. If we can get rid of it, we
306
* also get rid of some of the stuff above as well.
307
*
308
* Note: max_low_pfn and max_pfn reflect the number of _pages_ in
309
* the system, not the maximum PFN.
310
*/
311
max_low_pfn = max_low - PHYS_PFN_OFFSET;
312
max_pfn = max_high - PHYS_PFN_OFFSET;
313
}
314
315
static inline int free_area(unsigned long pfn, unsigned long end, char *s)
316
{
317
unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
318
319
for (; pfn < end; pfn++) {
320
struct page *page = pfn_to_page(pfn);
321
ClearPageReserved(page);
322
init_page_count(page);
323
__free_page(page);
324
pages++;
325
}
326
327
if (size && s)
328
printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
329
330
return pages;
331
}
332
333
static inline void
334
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
335
{
336
struct page *start_pg, *end_pg;
337
unsigned long pg, pgend;
338
339
/*
340
* Convert start_pfn/end_pfn to a struct page pointer.
341
*/
342
start_pg = pfn_to_page(start_pfn - 1) + 1;
343
end_pg = pfn_to_page(end_pfn);
344
345
/*
346
* Convert to physical addresses, and
347
* round start upwards and end downwards.
348
*/
349
pg = PAGE_ALIGN(__pa(start_pg));
350
pgend = __pa(end_pg) & PAGE_MASK;
351
352
/*
353
* If there are free pages between these,
354
* free the section of the memmap array.
355
*/
356
if (pg < pgend)
357
free_bootmem(pg, pgend - pg);
358
}
359
360
/*
361
* The mem_map array can get very big. Free the unused area of the memory map.
362
*/
363
static void __init free_unused_memmap(struct meminfo *mi)
364
{
365
unsigned long bank_start, prev_bank_end = 0;
366
unsigned int i;
367
368
/*
369
* This relies on each bank being in address order.
370
* The banks are sorted previously in bootmem_init().
371
*/
372
for_each_bank(i, mi) {
373
struct membank *bank = &mi->bank[i];
374
375
bank_start = bank_pfn_start(bank);
376
377
/*
378
* If we had a previous bank, and there is a space
379
* between the current bank and the previous, free it.
380
*/
381
if (prev_bank_end && prev_bank_end < bank_start)
382
free_memmap(prev_bank_end, bank_start);
383
384
/*
385
* Align up here since the VM subsystem insists that the
386
* memmap entries are valid from the bank end aligned to
387
* MAX_ORDER_NR_PAGES.
388
*/
389
prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
390
}
391
}
392
393
/*
394
* mem_init() marks the free areas in the mem_map and tells us how much
395
* memory is free. This is done after various parts of the system have
396
* claimed their memory after the kernel image.
397
*/
398
void __init mem_init(void)
399
{
400
unsigned long reserved_pages, free_pages;
401
struct memblock_region *reg;
402
int i;
403
404
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
405
406
/* this will put all unused low memory onto the freelists */
407
free_unused_memmap(&meminfo);
408
409
totalram_pages += free_all_bootmem();
410
411
reserved_pages = free_pages = 0;
412
413
for_each_bank(i, &meminfo) {
414
struct membank *bank = &meminfo.bank[i];
415
unsigned int pfn1, pfn2;
416
struct page *page, *end;
417
418
pfn1 = bank_pfn_start(bank);
419
pfn2 = bank_pfn_end(bank);
420
421
page = pfn_to_page(pfn1);
422
end = pfn_to_page(pfn2 - 1) + 1;
423
424
do {
425
if (PageReserved(page))
426
reserved_pages++;
427
else if (!page_count(page))
428
free_pages++;
429
page++;
430
} while (page < end);
431
}
432
433
/*
434
* Since our memory may not be contiguous, calculate the
435
* real number of pages we have in this system
436
*/
437
printk(KERN_INFO "Memory:");
438
num_physpages = 0;
439
for_each_memblock(memory, reg) {
440
unsigned long pages = memblock_region_memory_end_pfn(reg) -
441
memblock_region_memory_base_pfn(reg);
442
num_physpages += pages;
443
printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
444
}
445
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
446
447
printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
448
nr_free_pages() << (PAGE_SHIFT-10),
449
free_pages << (PAGE_SHIFT-10),
450
reserved_pages << (PAGE_SHIFT-10),
451
totalhigh_pages << (PAGE_SHIFT-10));
452
453
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
454
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
455
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
456
" lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
457
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
458
" .init : 0x%p" " - 0x%p" " (%4d kB)\n"
459
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
460
" .data : 0x%p" " - 0x%p" " (%4d kB)\n",
461
462
VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,
463
DIV_ROUND_UP(PAGE_SIZE, SZ_1K),
464
VMALLOC_START, VMALLOC_END,
465
DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),
466
PAGE_OFFSET, (unsigned long)high_memory,
467
DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),
468
MODULES_VADDR, MODULES_END,
469
DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),
470
471
__init_begin, __init_end,
472
DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),
473
_stext, _etext,
474
DIV_ROUND_UP((_etext - _stext), SZ_1K),
475
_sdata, _edata,
476
DIV_ROUND_UP((_edata - _sdata), SZ_1K));
477
478
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
479
BUG_ON(TASK_SIZE > MODULES_VADDR);
480
481
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
482
/*
483
* On a machine this small we won't get
484
* anywhere without overcommit, so turn
485
* it on by default.
486
*/
487
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
488
}
489
}
490
491
void free_initmem(void)
492
{
493
totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
494
__phys_to_pfn(__pa(__init_end)),
495
"init");
496
}
497
498
#ifdef CONFIG_BLK_DEV_INITRD
499
500
static int keep_initrd;
501
502
void free_initrd_mem(unsigned long start, unsigned long end)
503
{
504
if (!keep_initrd)
505
totalram_pages += free_area(__phys_to_pfn(__pa(start)),
506
__phys_to_pfn(__pa(end)),
507
"initrd");
508
}
509
510
static int __init keepinitrd_setup(char *__unused)
511
{
512
keep_initrd = 1;
513
return 1;
514
}
515
516
__setup("keepinitrd", keepinitrd_setup);
517
#endif
518
519