Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/sgi-ip27/ip27-memory.c
26424 views
1
/*
2
* This file is subject to the terms and conditions of the GNU General Public
3
* License. See the file "COPYING" in the main directory of this archive
4
* for more details.
5
*
6
* Copyright (C) 2000, 05 by Ralf Baechle ([email protected])
7
* Copyright (C) 2000 by Silicon Graphics, Inc.
8
* Copyright (C) 2004 by Christoph Hellwig
9
*
10
* On SGI IP27 the ARC memory configuration data is completely bogus but
11
* alternate easier to use mechanisms are available.
12
*/
13
#include <linux/init.h>
14
#include <linux/kernel.h>
15
#include <linux/memblock.h>
16
#include <linux/mm.h>
17
#include <linux/mmzone.h>
18
#include <linux/export.h>
19
#include <linux/nodemask.h>
20
#include <linux/swap.h>
21
#include <linux/pfn.h>
22
#include <linux/highmem.h>
23
#include <asm/page.h>
24
#include <asm/pgalloc.h>
25
#include <asm/sections.h>
26
#include <asm/sgialib.h>
27
28
#include <asm/sn/arch.h>
29
#include <asm/sn/agent.h>
30
#include <asm/sn/klconfig.h>
31
32
#include "ip27-common.h"
33
34
#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
35
#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
36
37
struct node_data *__node_data[MAX_NUMNODES];
38
EXPORT_SYMBOL(__node_data);
39
40
static u64 gen_region_mask(void)
41
{
42
int region_shift;
43
u64 region_mask;
44
nasid_t nasid;
45
46
region_shift = get_region_shift();
47
region_mask = 0;
48
for_each_online_node(nasid)
49
region_mask |= BIT_ULL(nasid >> region_shift);
50
51
return region_mask;
52
}
53
54
#define rou_rflag rou_flags
55
56
static int router_distance;
57
58
static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
59
{
60
klrou_t *router;
61
lboard_t *brd;
62
int port;
63
64
if (router_a->rou_rflag == 1)
65
return;
66
67
if (depth >= router_distance)
68
return;
69
70
router_a->rou_rflag = 1;
71
72
for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
73
if (router_a->rou_port[port].port_nasid == INVALID_NASID)
74
continue;
75
76
brd = (lboard_t *)NODE_OFFSET_TO_K0(
77
router_a->rou_port[port].port_nasid,
78
router_a->rou_port[port].port_offset);
79
80
if (brd->brd_type == KLTYPE_ROUTER) {
81
router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
82
if (router == router_b) {
83
if (depth < router_distance)
84
router_distance = depth;
85
}
86
else
87
router_recurse(router, router_b, depth + 1);
88
}
89
}
90
91
router_a->rou_rflag = 0;
92
}
93
94
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
95
EXPORT_SYMBOL(__node_distances);
96
97
static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
98
{
99
klrou_t *router, *router_a = NULL, *router_b = NULL;
100
lboard_t *brd, *dest_brd;
101
nasid_t nasid;
102
int port;
103
104
/* Figure out which routers nodes in question are connected to */
105
for_each_online_node(nasid) {
106
brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
107
KLTYPE_ROUTER);
108
109
if (!brd)
110
continue;
111
112
do {
113
if (brd->brd_flags & DUPLICATE_BOARD)
114
continue;
115
116
router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
117
router->rou_rflag = 0;
118
119
for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
120
if (router->rou_port[port].port_nasid == INVALID_NASID)
121
continue;
122
123
dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
124
router->rou_port[port].port_nasid,
125
router->rou_port[port].port_offset);
126
127
if (dest_brd->brd_type == KLTYPE_IP27) {
128
if (dest_brd->brd_nasid == nasid_a)
129
router_a = router;
130
if (dest_brd->brd_nasid == nasid_b)
131
router_b = router;
132
}
133
}
134
135
} while ((brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)));
136
}
137
138
if (nasid_a == nasid_b)
139
return LOCAL_DISTANCE;
140
141
if (router_a == router_b)
142
return LOCAL_DISTANCE + 1;
143
144
if (router_a == NULL) {
145
pr_info("node_distance: router_a NULL\n");
146
return 255;
147
}
148
if (router_b == NULL) {
149
pr_info("node_distance: router_b NULL\n");
150
return 255;
151
}
152
153
router_distance = 100;
154
router_recurse(router_a, router_b, 2);
155
156
return LOCAL_DISTANCE + router_distance;
157
}
158
159
static void __init init_topology_matrix(void)
160
{
161
nasid_t row, col;
162
163
for (row = 0; row < MAX_NUMNODES; row++)
164
for (col = 0; col < MAX_NUMNODES; col++)
165
__node_distances[row][col] = -1;
166
167
for_each_online_node(row) {
168
for_each_online_node(col) {
169
__node_distances[row][col] =
170
compute_node_distance(row, col);
171
}
172
}
173
}
174
175
static void __init dump_topology(void)
176
{
177
nasid_t nasid;
178
lboard_t *brd, *dest_brd;
179
int port;
180
int router_num = 0;
181
klrou_t *router;
182
nasid_t row, col;
183
184
pr_info("************** Topology ********************\n");
185
186
pr_info(" ");
187
for_each_online_node(col)
188
pr_cont("%02d ", col);
189
pr_cont("\n");
190
for_each_online_node(row) {
191
pr_info("%02d ", row);
192
for_each_online_node(col)
193
pr_cont("%2d ", node_distance(row, col));
194
pr_cont("\n");
195
}
196
197
for_each_online_node(nasid) {
198
brd = find_lboard_class((lboard_t *)KL_CONFIG_INFO(nasid),
199
KLTYPE_ROUTER);
200
201
if (!brd)
202
continue;
203
204
do {
205
if (brd->brd_flags & DUPLICATE_BOARD)
206
continue;
207
pr_cont("Router %d:", router_num);
208
router_num++;
209
210
router = (klrou_t *)NODE_OFFSET_TO_K0(NASID_GET(brd), brd->brd_compts[0]);
211
212
for (port = 1; port <= MAX_ROUTER_PORTS; port++) {
213
if (router->rou_port[port].port_nasid == INVALID_NASID)
214
continue;
215
216
dest_brd = (lboard_t *)NODE_OFFSET_TO_K0(
217
router->rou_port[port].port_nasid,
218
router->rou_port[port].port_offset);
219
220
if (dest_brd->brd_type == KLTYPE_IP27)
221
pr_cont(" %d", dest_brd->brd_nasid);
222
if (dest_brd->brd_type == KLTYPE_ROUTER)
223
pr_cont(" r");
224
}
225
pr_cont("\n");
226
227
} while ( (brd = find_lboard_class(KLCF_NEXT(brd), KLTYPE_ROUTER)) );
228
}
229
}
230
231
static unsigned long __init slot_getbasepfn(nasid_t nasid, int slot)
232
{
233
return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
234
}
235
236
static unsigned long __init slot_psize_compute(nasid_t nasid, int slot)
237
{
238
lboard_t *brd;
239
klmembnk_t *banks;
240
unsigned long size;
241
242
/* Find the node board */
243
brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27);
244
if (!brd)
245
return 0;
246
247
/* Get the memory bank structure */
248
banks = (klmembnk_t *) find_first_component(brd, KLSTRUCT_MEMBNK);
249
if (!banks)
250
return 0;
251
252
/* Size in _Megabytes_ */
253
size = (unsigned long)banks->membnk_bnksz[slot/4];
254
255
/* hack for 128 dimm banks */
256
if (size <= 128) {
257
if (slot % 4 == 0) {
258
size <<= 20; /* size in bytes */
259
return size >> PAGE_SHIFT;
260
} else
261
return 0;
262
} else {
263
size /= 4;
264
size <<= 20;
265
return size >> PAGE_SHIFT;
266
}
267
}
268
269
static void __init mlreset(void)
270
{
271
u64 region_mask;
272
nasid_t nasid;
273
274
master_nasid = get_nasid();
275
276
/*
277
* Probe for all CPUs - this creates the cpumask and sets up the
278
* mapping tables. We need to do this as early as possible.
279
*/
280
#ifdef CONFIG_SMP
281
cpu_node_probe();
282
#endif
283
284
init_topology_matrix();
285
dump_topology();
286
287
region_mask = gen_region_mask();
288
289
setup_replication_mask();
290
291
/*
292
* Set all nodes' calias sizes to 8k
293
*/
294
for_each_online_node(nasid) {
295
/*
296
* Always have node 0 in the region mask, otherwise
297
* CALIAS accesses get exceptions since the hub
298
* thinks it is a node 0 address.
299
*/
300
REMOTE_HUB_S(nasid, PI_REGION_PRESENT, (region_mask | 1));
301
REMOTE_HUB_S(nasid, PI_CALIAS_SIZE, PI_CALIAS_SIZE_0);
302
303
#ifdef LATER
304
/*
305
* Set up all hubs to have a big window pointing at
306
* widget 0. Memory mode, widget 0, offset 0
307
*/
308
REMOTE_HUB_S(nasid, IIO_ITTE(SWIN0_BIGWIN),
309
((HUB_PIO_MAP_TO_MEM << IIO_ITTE_IOSP_SHIFT) |
310
(0 << IIO_ITTE_WIDGET_SHIFT)));
311
#endif
312
}
313
}
314
315
static void __init szmem(void)
316
{
317
unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
318
int slot;
319
nasid_t node;
320
321
for_each_online_node(node) {
322
nodebytes = 0;
323
for (slot = 0; slot < MAX_MEM_SLOTS; slot++) {
324
slot_psize = slot_psize_compute(node, slot);
325
if (slot == 0)
326
slot0sz = slot_psize;
327
/*
328
* We need to refine the hack when we have replicated
329
* kernel text.
330
*/
331
nodebytes += (1LL << SLOT_SHIFT);
332
333
if (!slot_psize)
334
continue;
335
336
if ((nodebytes >> PAGE_SHIFT) * (sizeof(struct page)) >
337
(slot0sz << PAGE_SHIFT)) {
338
pr_info("Ignoring slot %d onwards on node %d\n",
339
slot, node);
340
slot = MAX_MEM_SLOTS;
341
continue;
342
}
343
memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
344
PFN_PHYS(slot_psize), node,
345
MEMBLOCK_NONE);
346
}
347
}
348
}
349
350
static void __init node_mem_init(nasid_t node)
351
{
352
unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
353
unsigned long slot_freepfn = node_getfirstfree(node);
354
unsigned long start_pfn, end_pfn;
355
356
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
357
358
/*
359
* Allocate the node data structures on the node first.
360
*/
361
__node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
362
memset(__node_data[node], 0, PAGE_SIZE);
363
node_data[node] = &__node_data[node]->pglist;
364
365
NODE_DATA(node)->node_start_pfn = start_pfn;
366
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
367
368
cpumask_clear(&hub_data(node)->h_cpus);
369
370
slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
371
sizeof(struct hub_data));
372
373
memblock_reserve(slot_firstpfn << PAGE_SHIFT,
374
((slot_freepfn - slot_firstpfn) << PAGE_SHIFT));
375
}
376
377
/*
378
* A node with nothing. We use it to avoid any special casing in
379
* cpumask_of_node
380
*/
381
static struct node_data null_node = {
382
.hub = {
383
.h_cpus = CPU_MASK_NONE
384
}
385
};
386
387
/*
388
* Currently, the intranode memory hole support assumes that each slot
389
* contains at least 32 MBytes of memory. We assume all bootmem data
390
* fits on the first slot.
391
*/
392
void __init prom_meminit(void)
393
{
394
nasid_t node;
395
396
mlreset();
397
szmem();
398
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
399
400
for (node = 0; node < MAX_NUMNODES; node++) {
401
if (node_online(node)) {
402
node_mem_init(node);
403
continue;
404
}
405
__node_data[node] = &null_node;
406
}
407
}
408
409
void __init paging_init(void)
410
{
411
unsigned long zones_size[MAX_NR_ZONES] = {0, };
412
413
pagetable_init();
414
zones_size[ZONE_NORMAL] = max_low_pfn;
415
free_area_init(zones_size);
416
}
417
418