Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/tile/kernel/setup.c
10817 views
1
/*
2
* Copyright 2010 Tilera Corporation. All Rights Reserved.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation, version 2.
7
*
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11
* NON INFRINGEMENT. See the GNU General Public License for
12
* more details.
13
*/
14
15
#include <linux/sched.h>
16
#include <linux/kernel.h>
17
#include <linux/mmzone.h>
18
#include <linux/bootmem.h>
19
#include <linux/module.h>
20
#include <linux/node.h>
21
#include <linux/cpu.h>
22
#include <linux/ioport.h>
23
#include <linux/irq.h>
24
#include <linux/kexec.h>
25
#include <linux/pci.h>
26
#include <linux/initrd.h>
27
#include <linux/io.h>
28
#include <linux/highmem.h>
29
#include <linux/smp.h>
30
#include <linux/timex.h>
31
#include <asm/setup.h>
32
#include <asm/sections.h>
33
#include <asm/cacheflush.h>
34
#include <asm/pgalloc.h>
35
#include <asm/mmu_context.h>
36
#include <hv/hypervisor.h>
37
#include <arch/interrupts.h>
38
39
/* <linux/smp.h> doesn't provide this definition. */
40
#ifndef CONFIG_SMP
41
#define setup_max_cpus 1
42
#endif
43
44
static inline int ABS(int x) { return x >= 0 ? x : -x; }
45
46
/* Chip information */
47
char chip_model[64] __write_once;
48
49
struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
50
EXPORT_SYMBOL(node_data);
51
52
/* We only create bootmem data on node 0. */
53
static bootmem_data_t __initdata node0_bdata;
54
55
/* Information on the NUMA nodes that we compute early */
56
unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
57
unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
58
unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
59
unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
60
unsigned long __initdata node_free_pfn[MAX_NUMNODES];
61
62
static unsigned long __initdata node_percpu[MAX_NUMNODES];
63
64
#ifdef CONFIG_HIGHMEM
65
/* Page frame index of end of lowmem on each controller. */
66
unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES];
67
68
/* Number of pages that can be mapped into lowmem. */
69
static unsigned long __initdata mappable_physpages;
70
#endif
71
72
/* Data on which physical memory controller corresponds to which NUMA node */
73
int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 };
74
75
#ifdef CONFIG_HIGHMEM
76
/* Map information from VAs to PAs */
77
unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)]
78
__write_once __attribute__((aligned(L2_CACHE_BYTES)));
79
EXPORT_SYMBOL(pbase_map);
80
81
/* Map information from PAs to VAs */
82
void *vbase_map[NR_PA_HIGHBIT_VALUES]
83
__write_once __attribute__((aligned(L2_CACHE_BYTES)));
84
EXPORT_SYMBOL(vbase_map);
85
#endif
86
87
/* Node number as a function of the high PA bits */
88
int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once;
89
EXPORT_SYMBOL(highbits_to_node);
90
91
static unsigned int __initdata maxmem_pfn = -1U;
92
static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
93
[0 ... MAX_NUMNODES-1] = -1U
94
};
95
static nodemask_t __initdata isolnodes;
96
97
#ifdef CONFIG_PCI
98
enum { DEFAULT_PCI_RESERVE_MB = 64 };
99
static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
100
unsigned long __initdata pci_reserve_start_pfn = -1U;
101
unsigned long __initdata pci_reserve_end_pfn = -1U;
102
#endif
103
104
static int __init setup_maxmem(char *str)
105
{
106
long maxmem_mb;
107
if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 ||
108
maxmem_mb == 0)
109
return -EINVAL;
110
111
maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) <<
112
(HPAGE_SHIFT - PAGE_SHIFT);
113
pr_info("Forcing RAM used to no more than %dMB\n",
114
maxmem_pfn >> (20 - PAGE_SHIFT));
115
return 0;
116
}
117
early_param("maxmem", setup_maxmem);
118
119
static int __init setup_maxnodemem(char *str)
120
{
121
char *endp;
122
long maxnodemem_mb, node;
123
124
node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
125
if (node >= MAX_NUMNODES || *endp != ':' ||
126
strict_strtol(endp+1, 0, &maxnodemem_mb) != 0)
127
return -EINVAL;
128
129
maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) <<
130
(HPAGE_SHIFT - PAGE_SHIFT);
131
pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
132
node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
133
return 0;
134
}
135
early_param("maxnodemem", setup_maxnodemem);
136
137
static int __init setup_isolnodes(char *str)
138
{
139
char buf[MAX_NUMNODES * 5];
140
if (str == NULL || nodelist_parse(str, isolnodes) != 0)
141
return -EINVAL;
142
143
nodelist_scnprintf(buf, sizeof(buf), isolnodes);
144
pr_info("Set isolnodes value to '%s'\n", buf);
145
return 0;
146
}
147
early_param("isolnodes", setup_isolnodes);
148
149
#ifdef CONFIG_PCI
150
static int __init setup_pci_reserve(char* str)
151
{
152
unsigned long mb;
153
154
if (str == NULL || strict_strtoul(str, 0, &mb) != 0 ||
155
mb > 3 * 1024)
156
return -EINVAL;
157
158
pci_reserve_mb = mb;
159
pr_info("Reserving %dMB for PCIE root complex mappings\n",
160
pci_reserve_mb);
161
return 0;
162
}
163
early_param("pci_reserve", setup_pci_reserve);
164
#endif
165
166
#ifndef __tilegx__
167
/*
168
* vmalloc=size forces the vmalloc area to be exactly 'size' bytes.
169
* This can be used to increase (or decrease) the vmalloc area.
170
*/
171
static int __init parse_vmalloc(char *arg)
172
{
173
if (!arg)
174
return -EINVAL;
175
176
VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK;
177
178
/* See validate_va() for more on this test. */
179
if ((long)_VMALLOC_START >= 0)
180
early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n",
181
VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL);
182
183
return 0;
184
}
185
early_param("vmalloc", parse_vmalloc);
186
#endif
187
188
#ifdef CONFIG_HIGHMEM
189
/*
190
* Determine for each controller where its lowmem is mapped and how much of
191
* it is mapped there. On controller zero, the first few megabytes are
192
* already mapped in as code at MEM_SV_INTRPT, so in principle we could
193
* start our data mappings higher up, but for now we don't bother, to avoid
194
* additional confusion.
195
*
196
* One question is whether, on systems with more than 768 Mb and
197
* controllers of different sizes, to map in a proportionate amount of
198
* each one, or to try to map the same amount from each controller.
199
* (E.g. if we have three controllers with 256MB, 1GB, and 256MB
200
* respectively, do we map 256MB from each, or do we map 128 MB, 512
201
* MB, and 128 MB respectively?) For now we use a proportionate
202
* solution like the latter.
203
*
204
* The VA/PA mapping demands that we align our decisions at 16 MB
205
* boundaries so that we can rapidly convert VA to PA.
206
*/
207
static void *__init setup_pa_va_mapping(void)
208
{
209
unsigned long curr_pages = 0;
210
unsigned long vaddr = PAGE_OFFSET;
211
nodemask_t highonlynodes = isolnodes;
212
int i, j;
213
214
memset(pbase_map, -1, sizeof(pbase_map));
215
memset(vbase_map, -1, sizeof(vbase_map));
216
217
/* Node zero cannot be isolated for LOWMEM purposes. */
218
node_clear(0, highonlynodes);
219
220
/* Count up the number of pages on non-highonlynodes controllers. */
221
mappable_physpages = 0;
222
for_each_online_node(i) {
223
if (!node_isset(i, highonlynodes))
224
mappable_physpages +=
225
node_end_pfn[i] - node_start_pfn[i];
226
}
227
228
for_each_online_node(i) {
229
unsigned long start = node_start_pfn[i];
230
unsigned long end = node_end_pfn[i];
231
unsigned long size = end - start;
232
unsigned long vaddr_end;
233
234
if (node_isset(i, highonlynodes)) {
235
/* Mark this controller as having no lowmem. */
236
node_lowmem_end_pfn[i] = start;
237
continue;
238
}
239
240
curr_pages += size;
241
if (mappable_physpages > MAXMEM_PFN) {
242
vaddr_end = PAGE_OFFSET +
243
(((u64)curr_pages * MAXMEM_PFN /
244
mappable_physpages)
245
<< PAGE_SHIFT);
246
} else {
247
vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT);
248
}
249
for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) {
250
unsigned long this_pfn =
251
start + (j << HUGETLB_PAGE_ORDER);
252
pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn;
253
if (vbase_map[__pfn_to_highbits(this_pfn)] ==
254
(void *)-1)
255
vbase_map[__pfn_to_highbits(this_pfn)] =
256
(void *)(vaddr & HPAGE_MASK);
257
}
258
node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER);
259
BUG_ON(node_lowmem_end_pfn[i] > end);
260
}
261
262
/* Return highest address of any mapped memory. */
263
return (void *)vaddr;
264
}
265
#endif /* CONFIG_HIGHMEM */
266
267
/*
268
* Register our most important memory mappings with the debug stub.
269
*
270
* This is up to 4 mappings for lowmem, one mapping per memory
271
* controller, plus one for our text segment.
272
*/
273
static void __cpuinit store_permanent_mappings(void)
274
{
275
int i;
276
277
for_each_online_node(i) {
278
HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT;
279
#ifdef CONFIG_HIGHMEM
280
HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i];
281
#else
282
HV_PhysAddr high_mapped_pa = node_end_pfn[i];
283
#endif
284
285
unsigned long pages = high_mapped_pa - node_start_pfn[i];
286
HV_VirtAddr addr = (HV_VirtAddr) __va(pa);
287
hv_store_mapping(addr, pages << PAGE_SHIFT, pa);
288
}
289
290
hv_store_mapping((HV_VirtAddr)_stext,
291
(uint32_t)(_einittext - _stext), 0);
292
}
293
294
/*
295
* Use hv_inquire_physical() to populate node_{start,end}_pfn[]
296
* and node_online_map, doing suitable sanity-checking.
297
* Also set min_low_pfn, max_low_pfn, and max_pfn.
298
*/
299
static void __init setup_memory(void)
300
{
301
int i, j;
302
int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 };
303
#ifdef CONFIG_HIGHMEM
304
long highmem_pages;
305
#endif
306
#ifndef __tilegx__
307
int cap;
308
#endif
309
#if defined(CONFIG_HIGHMEM) || defined(__tilegx__)
310
long lowmem_pages;
311
#endif
312
313
/* We are using a char to hold the cpu_2_node[] mapping */
314
BUILD_BUG_ON(MAX_NUMNODES > 127);
315
316
/* Discover the ranges of memory available to us */
317
for (i = 0; ; ++i) {
318
unsigned long start, size, end, highbits;
319
HV_PhysAddrRange range = hv_inquire_physical(i);
320
if (range.size == 0)
321
break;
322
#ifdef CONFIG_FLATMEM
323
if (i > 0) {
324
pr_err("Can't use discontiguous PAs: %#llx..%#llx\n",
325
range.size, range.start + range.size);
326
continue;
327
}
328
#endif
329
#ifndef __tilegx__
330
if ((unsigned long)range.start) {
331
pr_err("Range not at 4GB multiple: %#llx..%#llx\n",
332
range.start, range.start + range.size);
333
continue;
334
}
335
#endif
336
if ((range.start & (HPAGE_SIZE-1)) != 0 ||
337
(range.size & (HPAGE_SIZE-1)) != 0) {
338
unsigned long long start_pa = range.start;
339
unsigned long long orig_size = range.size;
340
range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
341
range.size -= (range.start - start_pa);
342
range.size &= HPAGE_MASK;
343
pr_err("Range not hugepage-aligned: %#llx..%#llx:"
344
" now %#llx-%#llx\n",
345
start_pa, start_pa + orig_size,
346
range.start, range.start + range.size);
347
}
348
highbits = __pa_to_highbits(range.start);
349
if (highbits >= NR_PA_HIGHBIT_VALUES) {
350
pr_err("PA high bits too high: %#llx..%#llx\n",
351
range.start, range.start + range.size);
352
continue;
353
}
354
if (highbits_seen[highbits]) {
355
pr_err("Range overlaps in high bits: %#llx..%#llx\n",
356
range.start, range.start + range.size);
357
continue;
358
}
359
highbits_seen[highbits] = 1;
360
if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
361
int max_size = maxnodemem_pfn[i];
362
if (max_size > 0) {
363
pr_err("Maxnodemem reduced node %d to"
364
" %d pages\n", i, max_size);
365
range.size = PFN_PHYS(max_size);
366
} else {
367
pr_err("Maxnodemem disabled node %d\n", i);
368
continue;
369
}
370
}
371
if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) {
372
int max_size = maxmem_pfn - num_physpages;
373
if (max_size > 0) {
374
pr_err("Maxmem reduced node %d to %d pages\n",
375
i, max_size);
376
range.size = PFN_PHYS(max_size);
377
} else {
378
pr_err("Maxmem disabled node %d\n", i);
379
continue;
380
}
381
}
382
if (i >= MAX_NUMNODES) {
383
pr_err("Too many PA nodes (#%d): %#llx...%#llx\n",
384
i, range.size, range.size + range.start);
385
continue;
386
}
387
388
start = range.start >> PAGE_SHIFT;
389
size = range.size >> PAGE_SHIFT;
390
end = start + size;
391
392
#ifndef __tilegx__
393
if (((HV_PhysAddr)end << PAGE_SHIFT) !=
394
(range.start + range.size)) {
395
pr_err("PAs too high to represent: %#llx..%#llx\n",
396
range.start, range.start + range.size);
397
continue;
398
}
399
#endif
400
#ifdef CONFIG_PCI
401
/*
402
* Blocks that overlap the pci reserved region must
403
* have enough space to hold the maximum percpu data
404
* region at the top of the range. If there isn't
405
* enough space above the reserved region, just
406
* truncate the node.
407
*/
408
if (start <= pci_reserve_start_pfn &&
409
end > pci_reserve_start_pfn) {
410
unsigned int per_cpu_size =
411
__per_cpu_end - __per_cpu_start;
412
unsigned int percpu_pages =
413
NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
414
if (end < pci_reserve_end_pfn + percpu_pages) {
415
end = pci_reserve_start_pfn;
416
pr_err("PCI mapping region reduced node %d to"
417
" %ld pages\n", i, end - start);
418
}
419
}
420
#endif
421
422
for (j = __pfn_to_highbits(start);
423
j <= __pfn_to_highbits(end - 1); j++)
424
highbits_to_node[j] = i;
425
426
node_start_pfn[i] = start;
427
node_end_pfn[i] = end;
428
node_controller[i] = range.controller;
429
num_physpages += size;
430
max_pfn = end;
431
432
/* Mark node as online */
433
node_set(i, node_online_map);
434
node_set(i, node_possible_map);
435
}
436
437
#ifndef __tilegx__
438
/*
439
* For 4KB pages, mem_map "struct page" data is 1% of the size
440
* of the physical memory, so can be quite big (640 MB for
441
* four 16G zones). These structures must be mapped in
442
* lowmem, and since we currently cap out at about 768 MB,
443
* it's impractical to try to use this much address space.
444
* For now, arbitrarily cap the amount of physical memory
445
* we're willing to use at 8 million pages (32GB of 4KB pages).
446
*/
447
cap = 8 * 1024 * 1024; /* 8 million pages */
448
if (num_physpages > cap) {
449
int num_nodes = num_online_nodes();
450
int cap_each = cap / num_nodes;
451
unsigned long dropped_pages = 0;
452
for (i = 0; i < num_nodes; ++i) {
453
int size = node_end_pfn[i] - node_start_pfn[i];
454
if (size > cap_each) {
455
dropped_pages += (size - cap_each);
456
node_end_pfn[i] = node_start_pfn[i] + cap_each;
457
}
458
}
459
num_physpages -= dropped_pages;
460
pr_warning("Only using %ldMB memory;"
461
" ignoring %ldMB.\n",
462
num_physpages >> (20 - PAGE_SHIFT),
463
dropped_pages >> (20 - PAGE_SHIFT));
464
pr_warning("Consider using a larger page size.\n");
465
}
466
#endif
467
468
/* Heap starts just above the last loaded address. */
469
min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET);
470
471
#ifdef CONFIG_HIGHMEM
472
/* Find where we map lowmem from each controller. */
473
high_memory = setup_pa_va_mapping();
474
475
/* Set max_low_pfn based on what node 0 can directly address. */
476
max_low_pfn = node_lowmem_end_pfn[0];
477
478
lowmem_pages = (mappable_physpages > MAXMEM_PFN) ?
479
MAXMEM_PFN : mappable_physpages;
480
highmem_pages = (long) (num_physpages - lowmem_pages);
481
482
pr_notice("%ldMB HIGHMEM available.\n",
483
pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
484
pr_notice("%ldMB LOWMEM available.\n",
485
pages_to_mb(lowmem_pages));
486
#else
487
/* Set max_low_pfn based on what node 0 can directly address. */
488
max_low_pfn = node_end_pfn[0];
489
490
#ifndef __tilegx__
491
if (node_end_pfn[0] > MAXMEM_PFN) {
492
pr_warning("Only using %ldMB LOWMEM.\n",
493
MAXMEM>>20);
494
pr_warning("Use a HIGHMEM enabled kernel.\n");
495
max_low_pfn = MAXMEM_PFN;
496
max_pfn = MAXMEM_PFN;
497
num_physpages = MAXMEM_PFN;
498
node_end_pfn[0] = MAXMEM_PFN;
499
} else {
500
pr_notice("%ldMB memory available.\n",
501
pages_to_mb(node_end_pfn[0]));
502
}
503
for (i = 1; i < MAX_NUMNODES; ++i) {
504
node_start_pfn[i] = 0;
505
node_end_pfn[i] = 0;
506
}
507
high_memory = __va(node_end_pfn[0]);
508
#else
509
lowmem_pages = 0;
510
for (i = 0; i < MAX_NUMNODES; ++i) {
511
int pages = node_end_pfn[i] - node_start_pfn[i];
512
lowmem_pages += pages;
513
if (pages)
514
high_memory = pfn_to_kaddr(node_end_pfn[i]);
515
}
516
pr_notice("%ldMB memory available.\n",
517
pages_to_mb(lowmem_pages));
518
#endif
519
#endif
520
}
521
522
static void __init setup_bootmem_allocator(void)
523
{
524
unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn;
525
526
/* Provide a node 0 bdata. */
527
NODE_DATA(0)->bdata = &node0_bdata;
528
529
#ifdef CONFIG_PCI
530
/* Don't let boot memory alias the PCI region. */
531
last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn);
532
#else
533
last_alloc_pfn = max_low_pfn;
534
#endif
535
536
/*
537
* Initialize the boot-time allocator (with low memory only):
538
* The first argument says where to put the bitmap, and the
539
* second says where the end of allocatable memory is.
540
*/
541
bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn);
542
543
/*
544
* Let the bootmem allocator use all the space we've given it
545
* except for its own bitmap.
546
*/
547
first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size);
548
if (first_alloc_pfn >= last_alloc_pfn)
549
early_panic("Not enough memory on controller 0 for bootmem\n");
550
551
free_bootmem(PFN_PHYS(first_alloc_pfn),
552
PFN_PHYS(last_alloc_pfn - first_alloc_pfn));
553
554
#ifdef CONFIG_KEXEC
555
if (crashk_res.start != crashk_res.end)
556
reserve_bootmem(crashk_res.start,
557
crashk_res.end - crashk_res.start + 1, 0);
558
#endif
559
}
560
561
void *__init alloc_remap(int nid, unsigned long size)
562
{
563
int pages = node_end_pfn[nid] - node_start_pfn[nid];
564
void *map = pfn_to_kaddr(node_memmap_pfn[nid]);
565
BUG_ON(size != pages * sizeof(struct page));
566
memset(map, 0, size);
567
return map;
568
}
569
570
static int __init percpu_size(void)
571
{
572
int size = __per_cpu_end - __per_cpu_start;
573
size += PERCPU_MODULE_RESERVE;
574
size += PERCPU_DYNAMIC_EARLY_SIZE;
575
if (size < PCPU_MIN_UNIT_SIZE)
576
size = PCPU_MIN_UNIT_SIZE;
577
size = roundup(size, PAGE_SIZE);
578
579
/* In several places we assume the per-cpu data fits on a huge page. */
580
BUG_ON(kdata_huge && size > HPAGE_SIZE);
581
return size;
582
}
583
584
static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
585
{
586
void *kva = __alloc_bootmem(size, PAGE_SIZE, goal);
587
unsigned long pfn = kaddr_to_pfn(kva);
588
BUG_ON(goal && PFN_PHYS(pfn) != goal);
589
return pfn;
590
}
591
592
static void __init zone_sizes_init(void)
593
{
594
unsigned long zones_size[MAX_NR_ZONES] = { 0 };
595
int size = percpu_size();
596
int num_cpus = smp_height * smp_width;
597
int i;
598
599
for (i = 0; i < num_cpus; ++i)
600
node_percpu[cpu_to_node(i)] += size;
601
602
for_each_online_node(i) {
603
unsigned long start = node_start_pfn[i];
604
unsigned long end = node_end_pfn[i];
605
#ifdef CONFIG_HIGHMEM
606
unsigned long lowmem_end = node_lowmem_end_pfn[i];
607
#else
608
unsigned long lowmem_end = end;
609
#endif
610
int memmap_size = (end - start) * sizeof(struct page);
611
node_free_pfn[i] = start;
612
613
/*
614
* Set aside pages for per-cpu data and the mem_map array.
615
*
616
* Since the per-cpu data requires special homecaching,
617
* if we are in kdata_huge mode, we put it at the end of
618
* the lowmem region. If we're not in kdata_huge mode,
619
* we take the per-cpu pages from the bottom of the
620
* controller, since that avoids fragmenting a huge page
621
* that users might want. We always take the memmap
622
* from the bottom of the controller, since with
623
* kdata_huge that lets it be under a huge TLB entry.
624
*
625
* If the user has requested isolnodes for a controller,
626
* though, there'll be no lowmem, so we just alloc_bootmem
627
* the memmap. There will be no percpu memory either.
628
*/
629
if (__pfn_to_highbits(start) == 0) {
630
/* In low PAs, allocate via bootmem. */
631
unsigned long goal = 0;
632
node_memmap_pfn[i] =
633
alloc_bootmem_pfn(memmap_size, goal);
634
if (kdata_huge)
635
goal = PFN_PHYS(lowmem_end) - node_percpu[i];
636
if (node_percpu[i])
637
node_percpu_pfn[i] =
638
alloc_bootmem_pfn(node_percpu[i], goal);
639
} else if (cpu_isset(i, isolnodes)) {
640
node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
641
BUG_ON(node_percpu[i] != 0);
642
} else {
643
/* In high PAs, just reserve some pages. */
644
node_memmap_pfn[i] = node_free_pfn[i];
645
node_free_pfn[i] += PFN_UP(memmap_size);
646
if (!kdata_huge) {
647
node_percpu_pfn[i] = node_free_pfn[i];
648
node_free_pfn[i] += PFN_UP(node_percpu[i]);
649
} else {
650
node_percpu_pfn[i] =
651
lowmem_end - PFN_UP(node_percpu[i]);
652
}
653
}
654
655
#ifdef CONFIG_HIGHMEM
656
if (start > lowmem_end) {
657
zones_size[ZONE_NORMAL] = 0;
658
zones_size[ZONE_HIGHMEM] = end - start;
659
} else {
660
zones_size[ZONE_NORMAL] = lowmem_end - start;
661
zones_size[ZONE_HIGHMEM] = end - lowmem_end;
662
}
663
#else
664
zones_size[ZONE_NORMAL] = end - start;
665
#endif
666
667
/*
668
* Everyone shares node 0's bootmem allocator, but
669
* we use alloc_remap(), above, to put the actual
670
* struct page array on the individual controllers,
671
* which is most of the data that we actually care about.
672
* We can't place bootmem allocators on the other
673
* controllers since the bootmem allocator can only
674
* operate on 32-bit physical addresses.
675
*/
676
NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
677
678
free_area_init_node(i, zones_size, start, NULL);
679
printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
680
PFN_UP(node_percpu[i]));
681
682
/* Track the type of memory on each node */
683
if (zones_size[ZONE_NORMAL])
684
node_set_state(i, N_NORMAL_MEMORY);
685
#ifdef CONFIG_HIGHMEM
686
if (end != start)
687
node_set_state(i, N_HIGH_MEMORY);
688
#endif
689
690
node_set_online(i);
691
}
692
}
693
694
#ifdef CONFIG_NUMA
695
696
/* which logical CPUs are on which nodes */
697
struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once;
698
EXPORT_SYMBOL(node_2_cpu_mask);
699
700
/* which node each logical CPU is on */
701
char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES)));
702
EXPORT_SYMBOL(cpu_2_node);
703
704
/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */
705
static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus)
706
{
707
if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus))
708
return -1;
709
else
710
return cpu_to_node(cpu);
711
}
712
713
/* Return number of immediately-adjacent tiles sharing the same NUMA node. */
714
static int __init node_neighbors(int node, int cpu,
715
struct cpumask *unbound_cpus)
716
{
717
int neighbors = 0;
718
int w = smp_width;
719
int h = smp_height;
720
int x = cpu % w;
721
int y = cpu / w;
722
if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node)
723
++neighbors;
724
if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node)
725
++neighbors;
726
if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node)
727
++neighbors;
728
if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node)
729
++neighbors;
730
return neighbors;
731
}
732
733
static void __init setup_numa_mapping(void)
734
{
735
int distance[MAX_NUMNODES][NR_CPUS];
736
HV_Coord coord;
737
int cpu, node, cpus, i, x, y;
738
int num_nodes = num_online_nodes();
739
struct cpumask unbound_cpus;
740
nodemask_t default_nodes;
741
742
cpumask_clear(&unbound_cpus);
743
744
/* Get set of nodes we will use for defaults */
745
nodes_andnot(default_nodes, node_online_map, isolnodes);
746
if (nodes_empty(default_nodes)) {
747
BUG_ON(!node_isset(0, node_online_map));
748
pr_err("Forcing NUMA node zero available as a default node\n");
749
node_set(0, default_nodes);
750
}
751
752
/* Populate the distance[] array */
753
memset(distance, -1, sizeof(distance));
754
cpu = 0;
755
for (coord.y = 0; coord.y < smp_height; ++coord.y) {
756
for (coord.x = 0; coord.x < smp_width;
757
++coord.x, ++cpu) {
758
BUG_ON(cpu >= nr_cpu_ids);
759
if (!cpu_possible(cpu)) {
760
cpu_2_node[cpu] = -1;
761
continue;
762
}
763
for_each_node_mask(node, default_nodes) {
764
HV_MemoryControllerInfo info =
765
hv_inquire_memory_controller(
766
coord, node_controller[node]);
767
distance[node][cpu] =
768
ABS(info.coord.x) + ABS(info.coord.y);
769
}
770
cpumask_set_cpu(cpu, &unbound_cpus);
771
}
772
}
773
cpus = cpu;
774
775
/*
776
* Round-robin through the NUMA nodes until all the cpus are
777
* assigned. We could be more clever here (e.g. create four
778
* sorted linked lists on the same set of cpu nodes, and pull
779
* off them in round-robin sequence, removing from all four
780
* lists each time) but given the relatively small numbers
781
* involved, O(n^2) seem OK for a one-time cost.
782
*/
783
node = first_node(default_nodes);
784
while (!cpumask_empty(&unbound_cpus)) {
785
int best_cpu = -1;
786
int best_distance = INT_MAX;
787
for (cpu = 0; cpu < cpus; ++cpu) {
788
if (cpumask_test_cpu(cpu, &unbound_cpus)) {
789
/*
790
* Compute metric, which is how much
791
* closer the cpu is to this memory
792
* controller than the others, shifted
793
* up, and then the number of
794
* neighbors already in the node as an
795
* epsilon adjustment to try to keep
796
* the nodes compact.
797
*/
798
int d = distance[node][cpu] * num_nodes;
799
for_each_node_mask(i, default_nodes) {
800
if (i != node)
801
d -= distance[i][cpu];
802
}
803
d *= 8; /* allow space for epsilon */
804
d -= node_neighbors(node, cpu, &unbound_cpus);
805
if (d < best_distance) {
806
best_cpu = cpu;
807
best_distance = d;
808
}
809
}
810
}
811
BUG_ON(best_cpu < 0);
812
cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]);
813
cpu_2_node[best_cpu] = node;
814
cpumask_clear_cpu(best_cpu, &unbound_cpus);
815
node = next_node(node, default_nodes);
816
if (node == MAX_NUMNODES)
817
node = first_node(default_nodes);
818
}
819
820
/* Print out node assignments and set defaults for disabled cpus */
821
cpu = 0;
822
for (y = 0; y < smp_height; ++y) {
823
printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y);
824
for (x = 0; x < smp_width; ++x, ++cpu) {
825
if (cpu_to_node(cpu) < 0) {
826
pr_cont(" -");
827
cpu_2_node[cpu] = first_node(default_nodes);
828
} else {
829
pr_cont(" %d", cpu_to_node(cpu));
830
}
831
}
832
pr_cont("\n");
833
}
834
}
835
836
static struct cpu cpu_devices[NR_CPUS];
837
838
static int __init topology_init(void)
839
{
840
int i;
841
842
for_each_online_node(i)
843
register_one_node(i);
844
845
for (i = 0; i < smp_height * smp_width; ++i)
846
register_cpu(&cpu_devices[i], i);
847
848
return 0;
849
}
850
851
subsys_initcall(topology_init);
852
853
#else /* !CONFIG_NUMA */
854
855
#define setup_numa_mapping() do { } while (0)
856
857
#endif /* CONFIG_NUMA */
858
859
/**
860
* setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
861
* @boot: Is this the boot cpu?
862
*
863
* Called from setup_arch() on the boot cpu, or online_secondary().
864
*/
865
void __cpuinit setup_cpu(int boot)
866
{
867
/* The boot cpu sets up its permanent mappings much earlier. */
868
if (!boot)
869
store_permanent_mappings();
870
871
/* Allow asynchronous TLB interrupts. */
872
#if CHIP_HAS_TILE_DMA()
873
arch_local_irq_unmask(INT_DMATLB_MISS);
874
arch_local_irq_unmask(INT_DMATLB_ACCESS);
875
#endif
876
#if CHIP_HAS_SN_PROC()
877
arch_local_irq_unmask(INT_SNITLB_MISS);
878
#endif
879
#ifdef __tilegx__
880
arch_local_irq_unmask(INT_SINGLE_STEP_K);
881
#endif
882
883
/*
884
* Allow user access to many generic SPRs, like the cycle
885
* counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc.
886
*/
887
__insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1);
888
889
#if CHIP_HAS_SN()
890
/* Static network is not restricted. */
891
__insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
892
#endif
893
#if CHIP_HAS_SN_PROC()
894
__insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1);
895
__insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1);
896
#endif
897
898
/*
899
* Set the MPL for interrupt control 0 & 1 to the corresponding
900
* values. This includes access to the SYSTEM_SAVE and EX_CONTEXT
901
* SPRs, as well as the interrupt mask.
902
*/
903
__insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1);
904
__insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1);
905
906
/* Initialize IRQ support for this cpu. */
907
setup_irq_regs();
908
909
#ifdef CONFIG_HARDWALL
910
/* Reset the network state on this cpu. */
911
reset_network_state();
912
#endif
913
}
914
915
#ifdef CONFIG_BLK_DEV_INITRD
916
917
static int __initdata set_initramfs_file;
918
static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
919
920
static int __init setup_initramfs_file(char *str)
921
{
922
if (str == NULL)
923
return -EINVAL;
924
strncpy(initramfs_file, str, sizeof(initramfs_file) - 1);
925
set_initramfs_file = 1;
926
927
return 0;
928
}
929
early_param("initramfs_file", setup_initramfs_file);
930
931
/*
932
* We look for an additional "initramfs.cpio.gz" file in the hvfs.
933
* If there is one, we allocate some memory for it and it will be
934
* unpacked to the initramfs after any built-in initramfs_data.
935
*/
936
static void __init load_hv_initrd(void)
937
{
938
HV_FS_StatInfo stat;
939
int fd, rc;
940
void *initrd;
941
942
fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
943
if (fd == HV_ENOENT) {
944
if (set_initramfs_file)
945
pr_warning("No such hvfs initramfs file '%s'\n",
946
initramfs_file);
947
return;
948
}
949
BUG_ON(fd < 0);
950
stat = hv_fs_fstat(fd);
951
BUG_ON(stat.size < 0);
952
if (stat.flags & HV_FS_ISDIR) {
953
pr_warning("Ignoring hvfs file '%s': it's a directory.\n",
954
initramfs_file);
955
return;
956
}
957
initrd = alloc_bootmem_pages(stat.size);
958
rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0);
959
if (rc != stat.size) {
960
pr_err("Error reading %d bytes from hvfs file '%s': %d\n",
961
stat.size, initramfs_file, rc);
962
free_initrd_mem((unsigned long) initrd, stat.size);
963
return;
964
}
965
initrd_start = (unsigned long) initrd;
966
initrd_end = initrd_start + stat.size;
967
}
968
969
void __init free_initrd_mem(unsigned long begin, unsigned long end)
970
{
971
free_bootmem(__pa(begin), end - begin);
972
}
973
974
#else
975
static inline void load_hv_initrd(void) {}
976
#endif /* CONFIG_BLK_DEV_INITRD */
977
978
static void __init validate_hv(void)
979
{
980
/*
981
* It may already be too late, but let's check our built-in
982
* configuration against what the hypervisor is providing.
983
*/
984
unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE);
985
int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL);
986
int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE);
987
HV_ASIDRange asid_range;
988
989
#ifndef CONFIG_SMP
990
HV_Topology topology = hv_inquire_topology();
991
BUG_ON(topology.coord.x != 0 || topology.coord.y != 0);
992
if (topology.width != 1 || topology.height != 1) {
993
pr_warning("Warning: booting UP kernel on %dx%d grid;"
994
" will ignore all but first tile.\n",
995
topology.width, topology.height);
996
}
997
#endif
998
999
if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text)
1000
early_panic("Hypervisor glue size %ld is too big!\n",
1001
glue_size);
1002
if (hv_page_size != PAGE_SIZE)
1003
early_panic("Hypervisor page size %#x != our %#lx\n",
1004
hv_page_size, PAGE_SIZE);
1005
if (hv_hpage_size != HPAGE_SIZE)
1006
early_panic("Hypervisor huge page size %#x != our %#lx\n",
1007
hv_hpage_size, HPAGE_SIZE);
1008
1009
#ifdef CONFIG_SMP
1010
/*
1011
* Some hypervisor APIs take a pointer to a bitmap array
1012
* whose size is at least the number of cpus on the chip.
1013
* We use a struct cpumask for this, so it must be big enough.
1014
*/
1015
if ((smp_height * smp_width) > nr_cpu_ids)
1016
early_panic("Hypervisor %d x %d grid too big for Linux"
1017
" NR_CPUS %d\n", smp_height, smp_width,
1018
nr_cpu_ids);
1019
#endif
1020
1021
/*
1022
* Check that we're using allowed ASIDs, and initialize the
1023
* various asid variables to their appropriate initial states.
1024
*/
1025
asid_range = hv_inquire_asid(0);
1026
__get_cpu_var(current_asid) = min_asid = asid_range.start;
1027
max_asid = asid_range.start + asid_range.size - 1;
1028
1029
if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
1030
sizeof(chip_model)) < 0) {
1031
pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n");
1032
strlcpy(chip_model, "unknown", sizeof(chip_model));
1033
}
1034
}
1035
1036
static void __init validate_va(void)
1037
{
1038
#ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */
1039
/*
1040
* Similarly, make sure we're only using allowed VAs.
1041
* We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT,
1042
* and 0 .. KERNEL_HIGH_VADDR.
1043
* In addition, make sure we CAN'T use the end of memory, since
1044
* we use the last chunk of each pgd for the pgd_list.
1045
*/
1046
int i, user_kernel_ok = 0;
1047
unsigned long max_va = 0;
1048
unsigned long list_va =
1049
((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT);
1050
1051
for (i = 0; ; ++i) {
1052
HV_VirtAddrRange range = hv_inquire_virtual(i);
1053
if (range.size == 0)
1054
break;
1055
if (range.start <= MEM_USER_INTRPT &&
1056
range.start + range.size >= MEM_HV_INTRPT)
1057
user_kernel_ok = 1;
1058
if (range.start == 0)
1059
max_va = range.size;
1060
BUG_ON(range.start + range.size > list_va);
1061
}
1062
if (!user_kernel_ok)
1063
early_panic("Hypervisor not configured for user/kernel VAs\n");
1064
if (max_va == 0)
1065
early_panic("Hypervisor not configured for low VAs\n");
1066
if (max_va < KERNEL_HIGH_VADDR)
1067
early_panic("Hypervisor max VA %#lx smaller than %#lx\n",
1068
max_va, KERNEL_HIGH_VADDR);
1069
1070
/* Kernel PCs must have their high bit set; see intvec.S. */
1071
if ((long)VMALLOC_START >= 0)
1072
early_panic(
1073
"Linux VMALLOC region below the 2GB line (%#lx)!\n"
1074
"Reconfigure the kernel with fewer NR_HUGE_VMAPS\n"
1075
"or smaller VMALLOC_RESERVE.\n",
1076
VMALLOC_START);
1077
#endif
1078
}
1079
1080
/*
1081
* cpu_lotar_map lists all the cpus that are valid for the supervisor
1082
* to cache data on at a page level, i.e. what cpus can be placed in
1083
* the LOTAR field of a PTE. It is equivalent to the set of possible
1084
* cpus plus any other cpus that are willing to share their cache.
1085
* It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR).
1086
*/
1087
struct cpumask __write_once cpu_lotar_map;
1088
EXPORT_SYMBOL(cpu_lotar_map);
1089
1090
#if CHIP_HAS_CBOX_HOME_MAP()
1091
/*
1092
* hash_for_home_map lists all the tiles that hash-for-home data
1093
* will be cached on. Note that this may includes tiles that are not
1094
* valid for this supervisor to use otherwise (e.g. if a hypervisor
1095
* device is being shared between multiple supervisors).
1096
* It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE).
1097
*/
1098
struct cpumask hash_for_home_map;
1099
EXPORT_SYMBOL(hash_for_home_map);
1100
#endif
1101
1102
/*
1103
* cpu_cacheable_map lists all the cpus whose caches the hypervisor can
1104
* flush on our behalf. It is set to cpu_possible_map OR'ed with
1105
* hash_for_home_map, and it is what should be passed to
1106
* hv_flush_remote() to flush all caches. Note that if there are
1107
* dedicated hypervisor driver tiles that have authorized use of their
1108
* cache, those tiles will only appear in cpu_lotar_map, NOT in
1109
* cpu_cacheable_map, as they are a special case.
1110
*/
1111
struct cpumask __write_once cpu_cacheable_map;
1112
EXPORT_SYMBOL(cpu_cacheable_map);
1113
1114
static __initdata struct cpumask disabled_map;
1115
1116
static int __init disabled_cpus(char *str)
1117
{
1118
int boot_cpu = smp_processor_id();
1119
1120
if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0)
1121
return -EINVAL;
1122
if (cpumask_test_cpu(boot_cpu, &disabled_map)) {
1123
pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu);
1124
cpumask_clear_cpu(boot_cpu, &disabled_map);
1125
}
1126
return 0;
1127
}
1128
1129
early_param("disabled_cpus", disabled_cpus);
1130
1131
void __init print_disabled_cpus(void)
1132
{
1133
if (!cpumask_empty(&disabled_map)) {
1134
char buf[100];
1135
cpulist_scnprintf(buf, sizeof(buf), &disabled_map);
1136
pr_info("CPUs not available for Linux: %s\n", buf);
1137
}
1138
}
1139
1140
static void __init setup_cpu_maps(void)
1141
{
1142
struct cpumask hv_disabled_map, cpu_possible_init;
1143
int boot_cpu = smp_processor_id();
1144
int cpus, i, rc;
1145
1146
/* Learn which cpus are allowed by the hypervisor. */
1147
rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL,
1148
(HV_VirtAddr) cpumask_bits(&cpu_possible_init),
1149
sizeof(cpu_cacheable_map));
1150
if (rc < 0)
1151
early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc);
1152
if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init))
1153
early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu);
1154
1155
/* Compute the cpus disabled by the hvconfig file. */
1156
cpumask_complement(&hv_disabled_map, &cpu_possible_init);
1157
1158
/* Include them with the cpus disabled by "disabled_cpus". */
1159
cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map);
1160
1161
/*
1162
* Disable every cpu after "setup_max_cpus". But don't mark
1163
* as disabled the cpus that are outside of our initial rectangle,
1164
* since that turns out to be confusing.
1165
*/
1166
cpus = 1; /* this cpu */
1167
cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */
1168
for (i = 0; cpus < setup_max_cpus; ++i)
1169
if (!cpumask_test_cpu(i, &disabled_map))
1170
++cpus;
1171
for (; i < smp_height * smp_width; ++i)
1172
cpumask_set_cpu(i, &disabled_map);
1173
cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */
1174
for (i = smp_height * smp_width; i < NR_CPUS; ++i)
1175
cpumask_clear_cpu(i, &disabled_map);
1176
1177
/*
1178
* Setup cpu_possible map as every cpu allocated to us, minus
1179
* the results of any "disabled_cpus" settings.
1180
*/
1181
cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map);
1182
init_cpu_possible(&cpu_possible_init);
1183
1184
/* Learn which cpus are valid for LOTAR caching. */
1185
rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR,
1186
(HV_VirtAddr) cpumask_bits(&cpu_lotar_map),
1187
sizeof(cpu_lotar_map));
1188
if (rc < 0) {
1189
pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n");
1190
cpu_lotar_map = cpu_possible_map;
1191
}
1192
1193
#if CHIP_HAS_CBOX_HOME_MAP()
1194
/* Retrieve set of CPUs used for hash-for-home caching */
1195
rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
1196
(HV_VirtAddr) hash_for_home_map.bits,
1197
sizeof(hash_for_home_map));
1198
if (rc < 0)
1199
early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1200
cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map);
1201
#else
1202
cpu_cacheable_map = cpu_possible_map;
1203
#endif
1204
}
1205
1206
1207
static int __init dataplane(char *str)
1208
{
1209
pr_warning("WARNING: dataplane support disabled in this kernel\n");
1210
return 0;
1211
}
1212
1213
early_param("dataplane", dataplane);
1214
1215
#ifdef CONFIG_CMDLINE_BOOL
1216
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
1217
#endif
1218
1219
void __init setup_arch(char **cmdline_p)
1220
{
1221
int len;
1222
1223
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
1224
len = hv_get_command_line((HV_VirtAddr) boot_command_line,
1225
COMMAND_LINE_SIZE);
1226
if (boot_command_line[0])
1227
pr_warning("WARNING: ignoring dynamic command line \"%s\"\n",
1228
boot_command_line);
1229
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
1230
#else
1231
char *hv_cmdline;
1232
#if defined(CONFIG_CMDLINE_BOOL)
1233
if (builtin_cmdline[0]) {
1234
int builtin_len = strlcpy(boot_command_line, builtin_cmdline,
1235
COMMAND_LINE_SIZE);
1236
if (builtin_len < COMMAND_LINE_SIZE-1)
1237
boot_command_line[builtin_len++] = ' ';
1238
hv_cmdline = &boot_command_line[builtin_len];
1239
len = COMMAND_LINE_SIZE - builtin_len;
1240
} else
1241
#endif
1242
{
1243
hv_cmdline = boot_command_line;
1244
len = COMMAND_LINE_SIZE;
1245
}
1246
len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len);
1247
if (len < 0 || len > COMMAND_LINE_SIZE)
1248
early_panic("hv_get_command_line failed: %d\n", len);
1249
#endif
1250
1251
*cmdline_p = boot_command_line;
1252
1253
/* Set disabled_map and setup_max_cpus very early */
1254
parse_early_param();
1255
1256
/* Make sure the kernel is compatible with the hypervisor. */
1257
validate_hv();
1258
validate_va();
1259
1260
setup_cpu_maps();
1261
1262
1263
#ifdef CONFIG_PCI
1264
/*
1265
* Initialize the PCI structures. This is done before memory
1266
* setup so that we know whether or not a pci_reserve region
1267
* is necessary.
1268
*/
1269
if (tile_pci_init() == 0)
1270
pci_reserve_mb = 0;
1271
1272
/* PCI systems reserve a region just below 4GB for mapping iomem. */
1273
pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT));
1274
pci_reserve_start_pfn = pci_reserve_end_pfn -
1275
(pci_reserve_mb << (20 - PAGE_SHIFT));
1276
#endif
1277
1278
init_mm.start_code = (unsigned long) _text;
1279
init_mm.end_code = (unsigned long) _etext;
1280
init_mm.end_data = (unsigned long) _edata;
1281
init_mm.brk = (unsigned long) _end;
1282
1283
setup_memory();
1284
store_permanent_mappings();
1285
setup_bootmem_allocator();
1286
1287
/*
1288
* NOTE: before this point _nobody_ is allowed to allocate
1289
* any memory using the bootmem allocator.
1290
*/
1291
1292
paging_init();
1293
setup_numa_mapping();
1294
zone_sizes_init();
1295
set_page_homes();
1296
setup_cpu(1);
1297
setup_clock();
1298
load_hv_initrd();
1299
}
1300
1301
1302
/*
1303
* Set up per-cpu memory.
1304
*/
1305
1306
unsigned long __per_cpu_offset[NR_CPUS] __write_once;
1307
EXPORT_SYMBOL(__per_cpu_offset);
1308
1309
static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 };
1310
static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 };
1311
1312
/*
1313
* As the percpu code allocates pages, we return the pages from the
1314
* end of the node for the specified cpu.
1315
*/
1316
static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
1317
{
1318
int nid = cpu_to_node(cpu);
1319
unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid];
1320
1321
BUG_ON(size % PAGE_SIZE != 0);
1322
pfn_offset[nid] += size / PAGE_SIZE;
1323
BUG_ON(node_percpu[nid] < size);
1324
node_percpu[nid] -= size;
1325
if (percpu_pfn[cpu] == 0)
1326
percpu_pfn[cpu] = pfn;
1327
return pfn_to_kaddr(pfn);
1328
}
1329
1330
/*
1331
* Pages reserved for percpu memory are not freeable, and in any case we are
1332
* on a short path to panic() in setup_per_cpu_area() at this point anyway.
1333
*/
1334
static void __init pcpu_fc_free(void *ptr, size_t size)
1335
{
1336
}
1337
1338
/*
1339
* Set up vmalloc page tables using bootmem for the percpu code.
1340
*/
1341
static void __init pcpu_fc_populate_pte(unsigned long addr)
1342
{
1343
pgd_t *pgd;
1344
pud_t *pud;
1345
pmd_t *pmd;
1346
pte_t *pte;
1347
1348
BUG_ON(pgd_addr_invalid(addr));
1349
if (addr < VMALLOC_START || addr >= VMALLOC_END)
1350
panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
1351
" try increasing CONFIG_VMALLOC_RESERVE\n",
1352
addr, VMALLOC_START, VMALLOC_END);
1353
1354
pgd = swapper_pg_dir + pgd_index(addr);
1355
pud = pud_offset(pgd, addr);
1356
BUG_ON(!pud_present(*pud));
1357
pmd = pmd_offset(pud, addr);
1358
if (pmd_present(*pmd)) {
1359
BUG_ON(pmd_huge_page(*pmd));
1360
} else {
1361
pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE,
1362
HV_PAGE_TABLE_ALIGN, 0);
1363
pmd_populate_kernel(&init_mm, pmd, pte);
1364
}
1365
}
1366
1367
void __init setup_per_cpu_areas(void)
1368
{
1369
struct page *pg;
1370
unsigned long delta, pfn, lowmem_va;
1371
unsigned long size = percpu_size();
1372
char *ptr;
1373
int rc, cpu, i;
1374
1375
rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc,
1376
pcpu_fc_free, pcpu_fc_populate_pte);
1377
if (rc < 0)
1378
panic("Cannot initialize percpu area (err=%d)", rc);
1379
1380
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1381
for_each_possible_cpu(cpu) {
1382
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1383
1384
/* finv the copy out of cache so we can change homecache */
1385
ptr = pcpu_base_addr + pcpu_unit_offsets[cpu];
1386
__finv_buffer(ptr, size);
1387
pfn = percpu_pfn[cpu];
1388
1389
/* Rewrite the page tables to cache on that cpu */
1390
pg = pfn_to_page(pfn);
1391
for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1392
1393
/* Update the vmalloc mapping and page home. */
1394
pte_t *ptep =
1395
virt_to_pte(NULL, (unsigned long)ptr + i);
1396
pte_t pte = *ptep;
1397
BUG_ON(pfn != pte_pfn(pte));
1398
pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1399
pte = set_remote_cache_cpu(pte, cpu);
1400
set_pte(ptep, pte);
1401
1402
/* Update the lowmem mapping for consistency. */
1403
lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1404
ptep = virt_to_pte(NULL, lowmem_va);
1405
if (pte_huge(*ptep)) {
1406
printk(KERN_DEBUG "early shatter of huge page"
1407
" at %#lx\n", lowmem_va);
1408
shatter_pmd((pmd_t *)ptep);
1409
ptep = virt_to_pte(NULL, lowmem_va);
1410
BUG_ON(pte_huge(*ptep));
1411
}
1412
BUG_ON(pfn != pte_pfn(*ptep));
1413
set_pte(ptep, pte);
1414
}
1415
}
1416
1417
/* Set our thread pointer appropriately. */
1418
set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]);
1419
1420
/* Make sure the finv's have completed. */
1421
mb_incoherent();
1422
1423
/* Flush the TLB so we reference it properly from here on out. */
1424
local_flush_tlb_all();
1425
}
1426
1427
static struct resource data_resource = {
1428
.name = "Kernel data",
1429
.start = 0,
1430
.end = 0,
1431
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
1432
};
1433
1434
static struct resource code_resource = {
1435
.name = "Kernel code",
1436
.start = 0,
1437
.end = 0,
1438
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
1439
};
1440
1441
/*
1442
* We reserve all resources above 4GB so that PCI won't try to put
1443
* mappings above 4GB; the standard allows that for some devices but
1444
* the probing code trunates values to 32 bits.
1445
*/
1446
#ifdef CONFIG_PCI
1447
static struct resource* __init
1448
insert_non_bus_resource(void)
1449
{
1450
struct resource *res =
1451
kzalloc(sizeof(struct resource), GFP_ATOMIC);
1452
res->name = "Non-Bus Physical Address Space";
1453
res->start = (1ULL << 32);
1454
res->end = -1LL;
1455
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1456
if (insert_resource(&iomem_resource, res)) {
1457
kfree(res);
1458
return NULL;
1459
}
1460
return res;
1461
}
1462
#endif
1463
1464
static struct resource* __init
1465
insert_ram_resource(u64 start_pfn, u64 end_pfn)
1466
{
1467
struct resource *res =
1468
kzalloc(sizeof(struct resource), GFP_ATOMIC);
1469
res->name = "System RAM";
1470
res->start = start_pfn << PAGE_SHIFT;
1471
res->end = (end_pfn << PAGE_SHIFT) - 1;
1472
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1473
if (insert_resource(&iomem_resource, res)) {
1474
kfree(res);
1475
return NULL;
1476
}
1477
return res;
1478
}
1479
1480
/*
1481
* Request address space for all standard resources
1482
*
1483
* If the system includes PCI root complex drivers, we need to create
1484
* a window just below 4GB where PCI BARs can be mapped.
1485
*/
1486
static int __init request_standard_resources(void)
1487
{
1488
int i;
1489
enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
1490
1491
iomem_resource.end = -1LL;
1492
#ifdef CONFIG_PCI
1493
insert_non_bus_resource();
1494
#endif
1495
1496
for_each_online_node(i) {
1497
u64 start_pfn = node_start_pfn[i];
1498
u64 end_pfn = node_end_pfn[i];
1499
1500
#ifdef CONFIG_PCI
1501
if (start_pfn <= pci_reserve_start_pfn &&
1502
end_pfn > pci_reserve_start_pfn) {
1503
if (end_pfn > pci_reserve_end_pfn)
1504
insert_ram_resource(pci_reserve_end_pfn,
1505
end_pfn);
1506
end_pfn = pci_reserve_start_pfn;
1507
}
1508
#endif
1509
insert_ram_resource(start_pfn, end_pfn);
1510
}
1511
1512
code_resource.start = __pa(_text - CODE_DELTA);
1513
code_resource.end = __pa(_etext - CODE_DELTA)-1;
1514
data_resource.start = __pa(_sdata);
1515
data_resource.end = __pa(_end)-1;
1516
1517
insert_resource(&iomem_resource, &code_resource);
1518
insert_resource(&iomem_resource, &data_resource);
1519
1520
#ifdef CONFIG_KEXEC
1521
insert_resource(&iomem_resource, &crashk_res);
1522
#endif
1523
1524
return 0;
1525
}
1526
1527
subsys_initcall(request_standard_resources);
1528
1529