Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/blackfin/kernel/setup.c
10817 views
1
/*
2
* Copyright 2004-2010 Analog Devices Inc.
3
*
4
* Licensed under the GPL-2 or later.
5
*/
6
7
#include <linux/delay.h>
8
#include <linux/console.h>
9
#include <linux/bootmem.h>
10
#include <linux/seq_file.h>
11
#include <linux/cpu.h>
12
#include <linux/mm.h>
13
#include <linux/module.h>
14
#include <linux/tty.h>
15
#include <linux/pfn.h>
16
17
#ifdef CONFIG_MTD_UCLINUX
18
#include <linux/mtd/map.h>
19
#include <linux/ext2_fs.h>
20
#include <linux/cramfs_fs.h>
21
#include <linux/romfs_fs.h>
22
#endif
23
24
#include <asm/cplb.h>
25
#include <asm/cacheflush.h>
26
#include <asm/blackfin.h>
27
#include <asm/cplbinit.h>
28
#include <asm/div64.h>
29
#include <asm/cpu.h>
30
#include <asm/fixed_code.h>
31
#include <asm/early_printk.h>
32
#include <asm/irq_handler.h>
33
34
u16 _bfin_swrst;
35
EXPORT_SYMBOL(_bfin_swrst);
36
37
unsigned long memory_start, memory_end, physical_mem_end;
38
unsigned long _rambase, _ramstart, _ramend;
39
unsigned long reserved_mem_dcache_on;
40
unsigned long reserved_mem_icache_on;
41
EXPORT_SYMBOL(memory_start);
42
EXPORT_SYMBOL(memory_end);
43
EXPORT_SYMBOL(physical_mem_end);
44
EXPORT_SYMBOL(_ramend);
45
EXPORT_SYMBOL(reserved_mem_dcache_on);
46
47
#ifdef CONFIG_MTD_UCLINUX
48
extern struct map_info uclinux_ram_map;
49
unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
50
unsigned long _ebss;
51
EXPORT_SYMBOL(memory_mtd_end);
52
EXPORT_SYMBOL(memory_mtd_start);
53
EXPORT_SYMBOL(mtd_size);
54
#endif
55
56
char __initdata command_line[COMMAND_LINE_SIZE];
57
void __initdata *init_retx, *init_saved_retx, *init_saved_seqstat,
58
*init_saved_icplb_fault_addr, *init_saved_dcplb_fault_addr;
59
60
/* boot memmap, for parsing "memmap=" */
61
#define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
62
#define BFIN_MEMMAP_RAM 1
63
#define BFIN_MEMMAP_RESERVED 2
64
static struct bfin_memmap {
65
int nr_map;
66
struct bfin_memmap_entry {
67
unsigned long long addr; /* start of memory segment */
68
unsigned long long size;
69
unsigned long type;
70
} map[BFIN_MEMMAP_MAX];
71
} bfin_memmap __initdata;
72
73
/* for memmap sanitization */
74
struct change_member {
75
struct bfin_memmap_entry *pentry; /* pointer to original entry */
76
unsigned long long addr; /* address for this change point */
77
};
78
static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
79
static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
80
static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
81
static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
82
83
DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
84
85
static int early_init_clkin_hz(char *buf);
86
87
#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
88
void __init generate_cplb_tables(void)
89
{
90
unsigned int cpu;
91
92
generate_cplb_tables_all();
93
/* Generate per-CPU I&D CPLB tables */
94
for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
95
generate_cplb_tables_cpu(cpu);
96
}
97
#endif
98
99
void __cpuinit bfin_setup_caches(unsigned int cpu)
100
{
101
#ifdef CONFIG_BFIN_ICACHE
102
bfin_icache_init(icplb_tbl[cpu]);
103
#endif
104
105
#ifdef CONFIG_BFIN_DCACHE
106
bfin_dcache_init(dcplb_tbl[cpu]);
107
#endif
108
109
bfin_setup_cpudata(cpu);
110
111
/*
112
* In cache coherence emulation mode, we need to have the
113
* D-cache enabled before running any atomic operation which
114
* might involve cache invalidation (i.e. spinlock, rwlock).
115
* So printk's are deferred until then.
116
*/
117
#ifdef CONFIG_BFIN_ICACHE
118
printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
119
printk(KERN_INFO " External memory:"
120
# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
121
" cacheable"
122
# else
123
" uncacheable"
124
# endif
125
" in instruction cache\n");
126
if (L2_LENGTH)
127
printk(KERN_INFO " L2 SRAM :"
128
# ifdef CONFIG_BFIN_L2_ICACHEABLE
129
" cacheable"
130
# else
131
" uncacheable"
132
# endif
133
" in instruction cache\n");
134
135
#else
136
printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
137
#endif
138
139
#ifdef CONFIG_BFIN_DCACHE
140
printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
141
printk(KERN_INFO " External memory:"
142
# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
143
" cacheable (write-back)"
144
# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
145
" cacheable (write-through)"
146
# else
147
" uncacheable"
148
# endif
149
" in data cache\n");
150
if (L2_LENGTH)
151
printk(KERN_INFO " L2 SRAM :"
152
# if defined CONFIG_BFIN_L2_WRITEBACK
153
" cacheable (write-back)"
154
# elif defined CONFIG_BFIN_L2_WRITETHROUGH
155
" cacheable (write-through)"
156
# else
157
" uncacheable"
158
# endif
159
" in data cache\n");
160
#else
161
printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
162
#endif
163
}
164
165
void __cpuinit bfin_setup_cpudata(unsigned int cpu)
166
{
167
struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
168
169
cpudata->imemctl = bfin_read_IMEM_CONTROL();
170
cpudata->dmemctl = bfin_read_DMEM_CONTROL();
171
}
172
173
void __init bfin_cache_init(void)
174
{
175
#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
176
generate_cplb_tables();
177
#endif
178
bfin_setup_caches(0);
179
}
180
181
void __init bfin_relocate_l1_mem(void)
182
{
183
unsigned long text_l1_len = (unsigned long)_text_l1_len;
184
unsigned long data_l1_len = (unsigned long)_data_l1_len;
185
unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
186
unsigned long l2_len = (unsigned long)_l2_len;
187
188
early_shadow_stamp();
189
190
/*
191
* due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
192
* we know that everything about l1 text/data is nice and aligned,
193
* so copy by 4 byte chunks, and don't worry about overlapping
194
* src/dest.
195
*
196
* We can't use the dma_memcpy functions, since they can call
197
* scheduler functions which might be in L1 :( and core writes
198
* into L1 instruction cause bad access errors, so we are stuck,
199
* we are required to use DMA, but can't use the common dma
200
* functions. We can't use memcpy either - since that might be
201
* going to be in the relocated L1
202
*/
203
204
blackfin_dma_early_init();
205
206
/* if necessary, copy L1 text to L1 instruction SRAM */
207
if (L1_CODE_LENGTH && text_l1_len)
208
early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
209
210
/* if necessary, copy L1 data to L1 data bank A SRAM */
211
if (L1_DATA_A_LENGTH && data_l1_len)
212
early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
213
214
/* if necessary, copy L1 data B to L1 data bank B SRAM */
215
if (L1_DATA_B_LENGTH && data_b_l1_len)
216
early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
217
218
early_dma_memcpy_done();
219
220
#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
221
blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
222
#endif
223
224
/* if necessary, copy L2 text/data to L2 SRAM */
225
if (L2_LENGTH && l2_len)
226
memcpy(_stext_l2, _l2_lma, l2_len);
227
}
228
229
#ifdef CONFIG_SMP
230
void __init bfin_relocate_coreb_l1_mem(void)
231
{
232
unsigned long text_l1_len = (unsigned long)_text_l1_len;
233
unsigned long data_l1_len = (unsigned long)_data_l1_len;
234
unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
235
236
blackfin_dma_early_init();
237
238
/* if necessary, copy L1 text to L1 instruction SRAM */
239
if (L1_CODE_LENGTH && text_l1_len)
240
early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
241
text_l1_len);
242
243
/* if necessary, copy L1 data to L1 data bank A SRAM */
244
if (L1_DATA_A_LENGTH && data_l1_len)
245
early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
246
data_l1_len);
247
248
/* if necessary, copy L1 data B to L1 data bank B SRAM */
249
if (L1_DATA_B_LENGTH && data_b_l1_len)
250
early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
251
data_b_l1_len);
252
253
early_dma_memcpy_done();
254
255
#ifdef CONFIG_ICACHE_FLUSH_L1
256
blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
257
(unsigned long)_stext_l1 + COREB_L1_CODE_START;
258
#endif
259
}
260
#endif
261
262
#ifdef CONFIG_ROMKERNEL
263
void __init bfin_relocate_xip_data(void)
264
{
265
early_shadow_stamp();
266
267
memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
268
memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
269
}
270
#endif
271
272
/* add_memory_region to memmap */
273
static void __init add_memory_region(unsigned long long start,
274
unsigned long long size, int type)
275
{
276
int i;
277
278
i = bfin_memmap.nr_map;
279
280
if (i == BFIN_MEMMAP_MAX) {
281
printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
282
return;
283
}
284
285
bfin_memmap.map[i].addr = start;
286
bfin_memmap.map[i].size = size;
287
bfin_memmap.map[i].type = type;
288
bfin_memmap.nr_map++;
289
}
290
291
/*
292
* Sanitize the boot memmap, removing overlaps.
293
*/
294
static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
295
{
296
struct change_member *change_tmp;
297
unsigned long current_type, last_type;
298
unsigned long long last_addr;
299
int chgidx, still_changing;
300
int overlap_entries;
301
int new_entry;
302
int old_nr, new_nr, chg_nr;
303
int i;
304
305
/*
306
Visually we're performing the following (1,2,3,4 = memory types)
307
308
Sample memory map (w/overlaps):
309
____22__________________
310
______________________4_
311
____1111________________
312
_44_____________________
313
11111111________________
314
____________________33__
315
___________44___________
316
__________33333_________
317
______________22________
318
___________________2222_
319
_________111111111______
320
_____________________11_
321
_________________4______
322
323
Sanitized equivalent (no overlap):
324
1_______________________
325
_44_____________________
326
___1____________________
327
____22__________________
328
______11________________
329
_________1______________
330
__________3_____________
331
___________44___________
332
_____________33_________
333
_______________2________
334
________________1_______
335
_________________4______
336
___________________2____
337
____________________33__
338
______________________4_
339
*/
340
/* if there's only one memory region, don't bother */
341
if (*pnr_map < 2)
342
return -1;
343
344
old_nr = *pnr_map;
345
346
/* bail out if we find any unreasonable addresses in memmap */
347
for (i = 0; i < old_nr; i++)
348
if (map[i].addr + map[i].size < map[i].addr)
349
return -1;
350
351
/* create pointers for initial change-point information (for sorting) */
352
for (i = 0; i < 2*old_nr; i++)
353
change_point[i] = &change_point_list[i];
354
355
/* record all known change-points (starting and ending addresses),
356
omitting those that are for empty memory regions */
357
chgidx = 0;
358
for (i = 0; i < old_nr; i++) {
359
if (map[i].size != 0) {
360
change_point[chgidx]->addr = map[i].addr;
361
change_point[chgidx++]->pentry = &map[i];
362
change_point[chgidx]->addr = map[i].addr + map[i].size;
363
change_point[chgidx++]->pentry = &map[i];
364
}
365
}
366
chg_nr = chgidx; /* true number of change-points */
367
368
/* sort change-point list by memory addresses (low -> high) */
369
still_changing = 1;
370
while (still_changing) {
371
still_changing = 0;
372
for (i = 1; i < chg_nr; i++) {
373
/* if <current_addr> > <last_addr>, swap */
374
/* or, if current=<start_addr> & last=<end_addr>, swap */
375
if ((change_point[i]->addr < change_point[i-1]->addr) ||
376
((change_point[i]->addr == change_point[i-1]->addr) &&
377
(change_point[i]->addr == change_point[i]->pentry->addr) &&
378
(change_point[i-1]->addr != change_point[i-1]->pentry->addr))
379
) {
380
change_tmp = change_point[i];
381
change_point[i] = change_point[i-1];
382
change_point[i-1] = change_tmp;
383
still_changing = 1;
384
}
385
}
386
}
387
388
/* create a new memmap, removing overlaps */
389
overlap_entries = 0; /* number of entries in the overlap table */
390
new_entry = 0; /* index for creating new memmap entries */
391
last_type = 0; /* start with undefined memory type */
392
last_addr = 0; /* start with 0 as last starting address */
393
/* loop through change-points, determining affect on the new memmap */
394
for (chgidx = 0; chgidx < chg_nr; chgidx++) {
395
/* keep track of all overlapping memmap entries */
396
if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
397
/* add map entry to overlap list (> 1 entry implies an overlap) */
398
overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
399
} else {
400
/* remove entry from list (order independent, so swap with last) */
401
for (i = 0; i < overlap_entries; i++) {
402
if (overlap_list[i] == change_point[chgidx]->pentry)
403
overlap_list[i] = overlap_list[overlap_entries-1];
404
}
405
overlap_entries--;
406
}
407
/* if there are overlapping entries, decide which "type" to use */
408
/* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
409
current_type = 0;
410
for (i = 0; i < overlap_entries; i++)
411
if (overlap_list[i]->type > current_type)
412
current_type = overlap_list[i]->type;
413
/* continue building up new memmap based on this information */
414
if (current_type != last_type) {
415
if (last_type != 0) {
416
new_map[new_entry].size =
417
change_point[chgidx]->addr - last_addr;
418
/* move forward only if the new size was non-zero */
419
if (new_map[new_entry].size != 0)
420
if (++new_entry >= BFIN_MEMMAP_MAX)
421
break; /* no more space left for new entries */
422
}
423
if (current_type != 0) {
424
new_map[new_entry].addr = change_point[chgidx]->addr;
425
new_map[new_entry].type = current_type;
426
last_addr = change_point[chgidx]->addr;
427
}
428
last_type = current_type;
429
}
430
}
431
new_nr = new_entry; /* retain count for new entries */
432
433
/* copy new mapping into original location */
434
memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
435
*pnr_map = new_nr;
436
437
return 0;
438
}
439
440
static void __init print_memory_map(char *who)
441
{
442
int i;
443
444
for (i = 0; i < bfin_memmap.nr_map; i++) {
445
printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
446
bfin_memmap.map[i].addr,
447
bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
448
switch (bfin_memmap.map[i].type) {
449
case BFIN_MEMMAP_RAM:
450
printk(KERN_CONT "(usable)\n");
451
break;
452
case BFIN_MEMMAP_RESERVED:
453
printk(KERN_CONT "(reserved)\n");
454
break;
455
default:
456
printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
457
break;
458
}
459
}
460
}
461
462
static __init int parse_memmap(char *arg)
463
{
464
unsigned long long start_at, mem_size;
465
466
if (!arg)
467
return -EINVAL;
468
469
mem_size = memparse(arg, &arg);
470
if (*arg == '@') {
471
start_at = memparse(arg+1, &arg);
472
add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
473
} else if (*arg == '$') {
474
start_at = memparse(arg+1, &arg);
475
add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
476
}
477
478
return 0;
479
}
480
481
/*
482
* Initial parsing of the command line. Currently, we support:
483
* - Controlling the linux memory size: mem=xxx[KMG]
484
* - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
485
* $ -> reserved memory is dcacheable
486
* # -> reserved memory is icacheable
487
* - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
488
* @ from <start> to <start>+<mem>, type RAM
489
* $ from <start> to <start>+<mem>, type RESERVED
490
*/
491
static __init void parse_cmdline_early(char *cmdline_p)
492
{
493
char c = ' ', *to = cmdline_p;
494
unsigned int memsize;
495
for (;;) {
496
if (c == ' ') {
497
if (!memcmp(to, "mem=", 4)) {
498
to += 4;
499
memsize = memparse(to, &to);
500
if (memsize)
501
_ramend = memsize;
502
503
} else if (!memcmp(to, "max_mem=", 8)) {
504
to += 8;
505
memsize = memparse(to, &to);
506
if (memsize) {
507
physical_mem_end = memsize;
508
if (*to != ' ') {
509
if (*to == '$'
510
|| *(to + 1) == '$')
511
reserved_mem_dcache_on = 1;
512
if (*to == '#'
513
|| *(to + 1) == '#')
514
reserved_mem_icache_on = 1;
515
}
516
}
517
} else if (!memcmp(to, "clkin_hz=", 9)) {
518
to += 9;
519
early_init_clkin_hz(to);
520
#ifdef CONFIG_EARLY_PRINTK
521
} else if (!memcmp(to, "earlyprintk=", 12)) {
522
to += 12;
523
setup_early_printk(to);
524
#endif
525
} else if (!memcmp(to, "memmap=", 7)) {
526
to += 7;
527
parse_memmap(to);
528
}
529
}
530
c = *(to++);
531
if (!c)
532
break;
533
}
534
}
535
536
/*
537
* Setup memory defaults from user config.
538
* The physical memory layout looks like:
539
*
540
* [_rambase, _ramstart]: kernel image
541
* [memory_start, memory_end]: dynamic memory managed by kernel
542
* [memory_end, _ramend]: reserved memory
543
* [memory_mtd_start(memory_end),
544
* memory_mtd_start + mtd_size]: rootfs (if any)
545
* [_ramend - DMA_UNCACHED_REGION,
546
* _ramend]: uncached DMA region
547
* [_ramend, physical_mem_end]: memory not managed by kernel
548
*/
549
static __init void memory_setup(void)
550
{
551
#ifdef CONFIG_MTD_UCLINUX
552
unsigned long mtd_phys = 0;
553
#endif
554
unsigned long max_mem;
555
556
_rambase = CONFIG_BOOT_LOAD;
557
_ramstart = (unsigned long)_end;
558
559
if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
560
console_init();
561
panic("DMA region exceeds memory limit: %lu.",
562
_ramend - _ramstart);
563
}
564
max_mem = memory_end = _ramend - DMA_UNCACHED_REGION;
565
566
#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
567
/* Due to a Hardware Anomaly we need to limit the size of usable
568
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
569
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
570
*/
571
# if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
572
if (max_mem >= 56 * 1024 * 1024)
573
max_mem = 56 * 1024 * 1024;
574
# else
575
if (max_mem >= 60 * 1024 * 1024)
576
max_mem = 60 * 1024 * 1024;
577
# endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
578
#endif /* ANOMALY_05000263 */
579
580
581
#ifdef CONFIG_MPU
582
/* Round up to multiple of 4MB */
583
memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
584
#else
585
memory_start = PAGE_ALIGN(_ramstart);
586
#endif
587
588
#if defined(CONFIG_MTD_UCLINUX)
589
/* generic memory mapped MTD driver */
590
memory_mtd_end = memory_end;
591
592
mtd_phys = _ramstart;
593
mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
594
595
# if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
596
if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
597
mtd_size =
598
PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
599
# endif
600
601
# if defined(CONFIG_CRAMFS)
602
if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
603
mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
604
# endif
605
606
# if defined(CONFIG_ROMFS_FS)
607
if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
608
&& ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) {
609
mtd_size =
610
PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
611
612
/* ROM_FS is XIP, so if we found it, we need to limit memory */
613
if (memory_end > max_mem) {
614
pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
615
memory_end = max_mem;
616
}
617
}
618
# endif /* CONFIG_ROMFS_FS */
619
620
/* Since the default MTD_UCLINUX has no magic number, we just blindly
621
* read 8 past the end of the kernel's image, and look at it.
622
* When no image is attached, mtd_size is set to a random number
623
* Do some basic sanity checks before operating on things
624
*/
625
if (mtd_size == 0 || memory_end <= mtd_size) {
626
pr_emerg("Could not find valid ram mtd attached.\n");
627
} else {
628
memory_end -= mtd_size;
629
630
/* Relocate MTD image to the top of memory after the uncached memory area */
631
uclinux_ram_map.phys = memory_mtd_start = memory_end;
632
uclinux_ram_map.size = mtd_size;
633
pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
634
_end, mtd_size, (void *)memory_mtd_start);
635
dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
636
}
637
#endif /* CONFIG_MTD_UCLINUX */
638
639
/* We need lo limit memory, since everything could have a text section
640
* of userspace in it, and expose anomaly 05000263. If the anomaly
641
* doesn't exist, or we don't need to - then dont.
642
*/
643
if (memory_end > max_mem) {
644
pr_info("Limiting kernel memory to %liMB due to anomaly 05000263\n", max_mem >> 20);
645
memory_end = max_mem;
646
}
647
648
#ifdef CONFIG_MPU
649
#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
650
page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
651
ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
652
#else
653
page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
654
#endif
655
page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
656
#endif
657
658
init_mm.start_code = (unsigned long)_stext;
659
init_mm.end_code = (unsigned long)_etext;
660
init_mm.end_data = (unsigned long)_edata;
661
init_mm.brk = (unsigned long)0;
662
663
printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20);
664
printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
665
666
printk(KERN_INFO "Memory map:\n"
667
" fixedcode = 0x%p-0x%p\n"
668
" text = 0x%p-0x%p\n"
669
" rodata = 0x%p-0x%p\n"
670
" bss = 0x%p-0x%p\n"
671
" data = 0x%p-0x%p\n"
672
" stack = 0x%p-0x%p\n"
673
" init = 0x%p-0x%p\n"
674
" available = 0x%p-0x%p\n"
675
#ifdef CONFIG_MTD_UCLINUX
676
" rootfs = 0x%p-0x%p\n"
677
#endif
678
#if DMA_UNCACHED_REGION > 0
679
" DMA Zone = 0x%p-0x%p\n"
680
#endif
681
, (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
682
_stext, _etext,
683
__start_rodata, __end_rodata,
684
__bss_start, __bss_stop,
685
_sdata, _edata,
686
(void *)&init_thread_union,
687
(void *)((int)(&init_thread_union) + THREAD_SIZE),
688
__init_begin, __init_end,
689
(void *)_ramstart, (void *)memory_end
690
#ifdef CONFIG_MTD_UCLINUX
691
, (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
692
#endif
693
#if DMA_UNCACHED_REGION > 0
694
, (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
695
#endif
696
);
697
}
698
699
/*
700
* Find the lowest, highest page frame number we have available
701
*/
702
void __init find_min_max_pfn(void)
703
{
704
int i;
705
706
max_pfn = 0;
707
min_low_pfn = memory_end;
708
709
for (i = 0; i < bfin_memmap.nr_map; i++) {
710
unsigned long start, end;
711
/* RAM? */
712
if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
713
continue;
714
start = PFN_UP(bfin_memmap.map[i].addr);
715
end = PFN_DOWN(bfin_memmap.map[i].addr +
716
bfin_memmap.map[i].size);
717
if (start >= end)
718
continue;
719
if (end > max_pfn)
720
max_pfn = end;
721
if (start < min_low_pfn)
722
min_low_pfn = start;
723
}
724
}
725
726
static __init void setup_bootmem_allocator(void)
727
{
728
int bootmap_size;
729
int i;
730
unsigned long start_pfn, end_pfn;
731
unsigned long curr_pfn, last_pfn, size;
732
733
/* mark memory between memory_start and memory_end usable */
734
add_memory_region(memory_start,
735
memory_end - memory_start, BFIN_MEMMAP_RAM);
736
/* sanity check for overlap */
737
sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
738
print_memory_map("boot memmap");
739
740
/* initialize globals in linux/bootmem.h */
741
find_min_max_pfn();
742
/* pfn of the last usable page frame */
743
if (max_pfn > memory_end >> PAGE_SHIFT)
744
max_pfn = memory_end >> PAGE_SHIFT;
745
/* pfn of last page frame directly mapped by kernel */
746
max_low_pfn = max_pfn;
747
/* pfn of the first usable page frame after kernel image*/
748
if (min_low_pfn < memory_start >> PAGE_SHIFT)
749
min_low_pfn = memory_start >> PAGE_SHIFT;
750
751
start_pfn = PAGE_OFFSET >> PAGE_SHIFT;
752
end_pfn = memory_end >> PAGE_SHIFT;
753
754
/*
755
* give all the memory to the bootmap allocator, tell it to put the
756
* boot mem_map at the start of memory.
757
*/
758
bootmap_size = init_bootmem_node(NODE_DATA(0),
759
memory_start >> PAGE_SHIFT, /* map goes here */
760
start_pfn, end_pfn);
761
762
/* register the memmap regions with the bootmem allocator */
763
for (i = 0; i < bfin_memmap.nr_map; i++) {
764
/*
765
* Reserve usable memory
766
*/
767
if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
768
continue;
769
/*
770
* We are rounding up the start address of usable memory:
771
*/
772
curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
773
if (curr_pfn >= end_pfn)
774
continue;
775
/*
776
* ... and at the end of the usable range downwards:
777
*/
778
last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
779
bfin_memmap.map[i].size);
780
781
if (last_pfn > end_pfn)
782
last_pfn = end_pfn;
783
784
/*
785
* .. finally, did all the rounding and playing
786
* around just make the area go away?
787
*/
788
if (last_pfn <= curr_pfn)
789
continue;
790
791
size = last_pfn - curr_pfn;
792
free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
793
}
794
795
/* reserve memory before memory_start, including bootmap */
796
reserve_bootmem(PAGE_OFFSET,
797
memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET,
798
BOOTMEM_DEFAULT);
799
}
800
801
#define EBSZ_TO_MEG(ebsz) \
802
({ \
803
int meg = 0; \
804
switch (ebsz & 0xf) { \
805
case 0x1: meg = 16; break; \
806
case 0x3: meg = 32; break; \
807
case 0x5: meg = 64; break; \
808
case 0x7: meg = 128; break; \
809
case 0x9: meg = 256; break; \
810
case 0xb: meg = 512; break; \
811
} \
812
meg; \
813
})
814
static inline int __init get_mem_size(void)
815
{
816
#if defined(EBIU_SDBCTL)
817
# if defined(BF561_FAMILY)
818
int ret = 0;
819
u32 sdbctl = bfin_read_EBIU_SDBCTL();
820
ret += EBSZ_TO_MEG(sdbctl >> 0);
821
ret += EBSZ_TO_MEG(sdbctl >> 8);
822
ret += EBSZ_TO_MEG(sdbctl >> 16);
823
ret += EBSZ_TO_MEG(sdbctl >> 24);
824
return ret;
825
# else
826
return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL());
827
# endif
828
#elif defined(EBIU_DDRCTL1)
829
u32 ddrctl = bfin_read_EBIU_DDRCTL1();
830
int ret = 0;
831
switch (ddrctl & 0xc0000) {
832
case DEVSZ_64: ret = 64 / 8;
833
case DEVSZ_128: ret = 128 / 8;
834
case DEVSZ_256: ret = 256 / 8;
835
case DEVSZ_512: ret = 512 / 8;
836
}
837
switch (ddrctl & 0x30000) {
838
case DEVWD_4: ret *= 2;
839
case DEVWD_8: ret *= 2;
840
case DEVWD_16: break;
841
}
842
if ((ddrctl & 0xc000) == 0x4000)
843
ret *= 2;
844
return ret;
845
#endif
846
BUG();
847
}
848
849
__attribute__((weak))
850
void __init native_machine_early_platform_add_devices(void)
851
{
852
}
853
854
void __init setup_arch(char **cmdline_p)
855
{
856
u32 mmr;
857
unsigned long sclk, cclk;
858
859
native_machine_early_platform_add_devices();
860
861
enable_shadow_console();
862
863
/* Check to make sure we are running on the right processor */
864
if (unlikely(CPUID != bfin_cpuid()))
865
printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
866
CPU, bfin_cpuid(), bfin_revid());
867
868
#ifdef CONFIG_DUMMY_CONSOLE
869
conswitchp = &dummy_con;
870
#endif
871
872
#if defined(CONFIG_CMDLINE_BOOL)
873
strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
874
command_line[sizeof(command_line) - 1] = 0;
875
#endif
876
877
/* Keep a copy of command line */
878
*cmdline_p = &command_line[0];
879
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
880
boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
881
882
memset(&bfin_memmap, 0, sizeof(bfin_memmap));
883
884
/* If the user does not specify things on the command line, use
885
* what the bootloader set things up as
886
*/
887
physical_mem_end = 0;
888
parse_cmdline_early(&command_line[0]);
889
890
if (_ramend == 0)
891
_ramend = get_mem_size() * 1024 * 1024;
892
893
if (physical_mem_end == 0)
894
physical_mem_end = _ramend;
895
896
memory_setup();
897
898
/* Initialize Async memory banks */
899
bfin_write_EBIU_AMBCTL0(AMBCTL0VAL);
900
bfin_write_EBIU_AMBCTL1(AMBCTL1VAL);
901
bfin_write_EBIU_AMGCTL(AMGCTLVAL);
902
#ifdef CONFIG_EBIU_MBSCTLVAL
903
bfin_write_EBIU_MBSCTL(CONFIG_EBIU_MBSCTLVAL);
904
bfin_write_EBIU_MODE(CONFIG_EBIU_MODEVAL);
905
bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
906
#endif
907
#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
908
bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
909
bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
910
bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
911
bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
912
~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
913
#endif
914
915
cclk = get_cclk();
916
sclk = get_sclk();
917
918
if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
919
panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
920
921
#ifdef BF561_FAMILY
922
if (ANOMALY_05000266) {
923
bfin_read_IMDMA_D0_IRQ_STATUS();
924
bfin_read_IMDMA_D1_IRQ_STATUS();
925
}
926
#endif
927
928
mmr = bfin_read_TBUFCTL();
929
printk(KERN_INFO "Hardware Trace %s and %sabled\n",
930
(mmr & 0x1) ? "active" : "off",
931
(mmr & 0x2) ? "en" : "dis");
932
933
mmr = bfin_read_SYSCR();
934
printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
935
936
/* Newer parts mirror SWRST bits in SYSCR */
937
#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
938
defined(CONFIG_BF538) || defined(CONFIG_BF539)
939
_bfin_swrst = bfin_read_SWRST();
940
#else
941
/* Clear boot mode field */
942
_bfin_swrst = mmr & ~0xf;
943
#endif
944
945
#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
946
bfin_write_SWRST(_bfin_swrst & ~DOUBLE_FAULT);
947
#endif
948
#ifdef CONFIG_DEBUG_DOUBLEFAULT_RESET
949
bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
950
#endif
951
952
#ifdef CONFIG_SMP
953
if (_bfin_swrst & SWRST_DBL_FAULT_A) {
954
#else
955
if (_bfin_swrst & RESET_DOUBLE) {
956
#endif
957
printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
958
#ifdef CONFIG_DEBUG_DOUBLEFAULT
959
/* We assume the crashing kernel, and the current symbol table match */
960
printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
961
(int)init_saved_seqstat & SEQSTAT_EXCAUSE, init_saved_retx);
962
printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr);
963
printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr);
964
#endif
965
printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
966
init_retx);
967
} else if (_bfin_swrst & RESET_WDOG)
968
printk(KERN_INFO "Recovering from Watchdog event\n");
969
else if (_bfin_swrst & RESET_SOFTWARE)
970
printk(KERN_NOTICE "Reset caused by Software reset\n");
971
972
printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n");
973
if (bfin_compiled_revid() == 0xffff)
974
printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
975
else if (bfin_compiled_revid() == -1)
976
printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
977
else
978
printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
979
980
if (likely(CPUID == bfin_cpuid())) {
981
if (bfin_revid() != bfin_compiled_revid()) {
982
if (bfin_compiled_revid() == -1)
983
printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
984
bfin_revid());
985
else if (bfin_compiled_revid() != 0xffff) {
986
printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
987
bfin_compiled_revid(), bfin_revid());
988
if (bfin_compiled_revid() > bfin_revid())
989
panic("Error: you are missing anomaly workarounds for this rev");
990
}
991
}
992
if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
993
printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
994
CPU, bfin_revid());
995
}
996
997
printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
998
999
printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
1000
cclk / 1000000, sclk / 1000000);
1001
1002
setup_bootmem_allocator();
1003
1004
paging_init();
1005
1006
/* Copy atomic sequences to their fixed location, and sanity check that
1007
these locations are the ones that we advertise to userspace. */
1008
memcpy((void *)FIXED_CODE_START, &fixed_code_start,
1009
FIXED_CODE_END - FIXED_CODE_START);
1010
BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
1011
!= SIGRETURN_STUB - FIXED_CODE_START);
1012
BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
1013
!= ATOMIC_XCHG32 - FIXED_CODE_START);
1014
BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
1015
!= ATOMIC_CAS32 - FIXED_CODE_START);
1016
BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
1017
!= ATOMIC_ADD32 - FIXED_CODE_START);
1018
BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
1019
!= ATOMIC_SUB32 - FIXED_CODE_START);
1020
BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
1021
!= ATOMIC_IOR32 - FIXED_CODE_START);
1022
BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
1023
!= ATOMIC_AND32 - FIXED_CODE_START);
1024
BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
1025
!= ATOMIC_XOR32 - FIXED_CODE_START);
1026
BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
1027
!= SAFE_USER_INSTRUCTION - FIXED_CODE_START);
1028
1029
#ifdef CONFIG_SMP
1030
platform_init_cpus();
1031
#endif
1032
init_exception_vectors();
1033
bfin_cache_init(); /* Initialize caches for the boot CPU */
1034
}
1035
1036
static int __init topology_init(void)
1037
{
1038
unsigned int cpu;
1039
1040
for_each_possible_cpu(cpu) {
1041
register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
1042
}
1043
1044
return 0;
1045
}
1046
1047
subsys_initcall(topology_init);
1048
1049
/* Get the input clock frequency */
1050
static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
1051
static u_long get_clkin_hz(void)
1052
{
1053
return cached_clkin_hz;
1054
}
1055
static int __init early_init_clkin_hz(char *buf)
1056
{
1057
cached_clkin_hz = simple_strtoul(buf, NULL, 0);
1058
#ifdef BFIN_KERNEL_CLOCK
1059
if (cached_clkin_hz != CONFIG_CLKIN_HZ)
1060
panic("cannot change clkin_hz when reprogramming clocks");
1061
#endif
1062
return 1;
1063
}
1064
early_param("clkin_hz=", early_init_clkin_hz);
1065
1066
/* Get the voltage input multiplier */
1067
static u_long get_vco(void)
1068
{
1069
static u_long cached_vco;
1070
u_long msel, pll_ctl;
1071
1072
/* The assumption here is that VCO never changes at runtime.
1073
* If, someday, we support that, then we'll have to change this.
1074
*/
1075
if (cached_vco)
1076
return cached_vco;
1077
1078
pll_ctl = bfin_read_PLL_CTL();
1079
msel = (pll_ctl >> 9) & 0x3F;
1080
if (0 == msel)
1081
msel = 64;
1082
1083
cached_vco = get_clkin_hz();
1084
cached_vco >>= (1 & pll_ctl); /* DF bit */
1085
cached_vco *= msel;
1086
return cached_vco;
1087
}
1088
1089
/* Get the Core clock */
1090
u_long get_cclk(void)
1091
{
1092
static u_long cached_cclk_pll_div, cached_cclk;
1093
u_long csel, ssel;
1094
1095
if (bfin_read_PLL_STAT() & 0x1)
1096
return get_clkin_hz();
1097
1098
ssel = bfin_read_PLL_DIV();
1099
if (ssel == cached_cclk_pll_div)
1100
return cached_cclk;
1101
else
1102
cached_cclk_pll_div = ssel;
1103
1104
csel = ((ssel >> 4) & 0x03);
1105
ssel &= 0xf;
1106
if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */
1107
cached_cclk = get_vco() / ssel;
1108
else
1109
cached_cclk = get_vco() >> csel;
1110
return cached_cclk;
1111
}
1112
EXPORT_SYMBOL(get_cclk);
1113
1114
/* Get the System clock */
1115
u_long get_sclk(void)
1116
{
1117
static u_long cached_sclk;
1118
u_long ssel;
1119
1120
/* The assumption here is that SCLK never changes at runtime.
1121
* If, someday, we support that, then we'll have to change this.
1122
*/
1123
if (cached_sclk)
1124
return cached_sclk;
1125
1126
if (bfin_read_PLL_STAT() & 0x1)
1127
return get_clkin_hz();
1128
1129
ssel = bfin_read_PLL_DIV() & 0xf;
1130
if (0 == ssel) {
1131
printk(KERN_WARNING "Invalid System Clock\n");
1132
ssel = 1;
1133
}
1134
1135
cached_sclk = get_vco() / ssel;
1136
return cached_sclk;
1137
}
1138
EXPORT_SYMBOL(get_sclk);
1139
1140
unsigned long sclk_to_usecs(unsigned long sclk)
1141
{
1142
u64 tmp = USEC_PER_SEC * (u64)sclk;
1143
do_div(tmp, get_sclk());
1144
return tmp;
1145
}
1146
EXPORT_SYMBOL(sclk_to_usecs);
1147
1148
unsigned long usecs_to_sclk(unsigned long usecs)
1149
{
1150
u64 tmp = get_sclk() * (u64)usecs;
1151
do_div(tmp, USEC_PER_SEC);
1152
return tmp;
1153
}
1154
EXPORT_SYMBOL(usecs_to_sclk);
1155
1156
/*
1157
* Get CPU information for use by the procfs.
1158
*/
1159
static int show_cpuinfo(struct seq_file *m, void *v)
1160
{
1161
char *cpu, *mmu, *fpu, *vendor, *cache;
1162
uint32_t revid;
1163
int cpu_num = *(unsigned int *)v;
1164
u_long sclk, cclk;
1165
u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1166
struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
1167
1168
cpu = CPU;
1169
mmu = "none";
1170
fpu = "none";
1171
revid = bfin_revid();
1172
1173
sclk = get_sclk();
1174
cclk = get_cclk();
1175
1176
switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
1177
case 0xca:
1178
vendor = "Analog Devices";
1179
break;
1180
default:
1181
vendor = "unknown";
1182
break;
1183
}
1184
1185
seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
1186
1187
if (CPUID == bfin_cpuid())
1188
seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
1189
else
1190
seq_printf(m, "cpu family\t: Compiled for:0x%04x, running on:0x%04x\n",
1191
CPUID, bfin_cpuid());
1192
1193
seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1194
"stepping\t: %d ",
1195
cpu, cclk/1000000, sclk/1000000,
1196
#ifdef CONFIG_MPU
1197
"mpu on",
1198
#else
1199
"mpu off",
1200
#endif
1201
revid);
1202
1203
if (bfin_revid() != bfin_compiled_revid()) {
1204
if (bfin_compiled_revid() == -1)
1205
seq_printf(m, "(Compiled for Rev none)");
1206
else if (bfin_compiled_revid() == 0xffff)
1207
seq_printf(m, "(Compiled for Rev any)");
1208
else
1209
seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
1210
}
1211
1212
seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1213
cclk/1000000, cclk%1000000,
1214
sclk/1000000, sclk%1000000);
1215
seq_printf(m, "bogomips\t: %lu.%02lu\n"
1216
"Calibration\t: %lu loops\n",
1217
(loops_per_jiffy * HZ) / 500000,
1218
((loops_per_jiffy * HZ) / 5000) % 100,
1219
(loops_per_jiffy * HZ));
1220
1221
/* Check Cache configutation */
1222
switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1223
case ACACHE_BSRAM:
1224
cache = "dbank-A/B\t: cache/sram";
1225
dcache_size = 16;
1226
dsup_banks = 1;
1227
break;
1228
case ACACHE_BCACHE:
1229
cache = "dbank-A/B\t: cache/cache";
1230
dcache_size = 32;
1231
dsup_banks = 2;
1232
break;
1233
case ASRAM_BSRAM:
1234
cache = "dbank-A/B\t: sram/sram";
1235
dcache_size = 0;
1236
dsup_banks = 0;
1237
break;
1238
default:
1239
cache = "unknown";
1240
dcache_size = 0;
1241
dsup_banks = 0;
1242
break;
1243
}
1244
1245
/* Is it turned on? */
1246
if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1247
dcache_size = 0;
1248
1249
if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1250
icache_size = 0;
1251
1252
seq_printf(m, "cache size\t: %d KB(L1 icache) "
1253
"%d KB(L1 dcache) %d KB(L2 cache)\n",
1254
icache_size, dcache_size, 0);
1255
seq_printf(m, "%s\n", cache);
1256
seq_printf(m, "external memory\t: "
1257
#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1258
"cacheable"
1259
#else
1260
"uncacheable"
1261
#endif
1262
" in instruction cache\n");
1263
seq_printf(m, "external memory\t: "
1264
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1265
"cacheable (write-back)"
1266
#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1267
"cacheable (write-through)"
1268
#else
1269
"uncacheable"
1270
#endif
1271
" in data cache\n");
1272
1273
if (icache_size)
1274
seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
1275
BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
1276
else
1277
seq_printf(m, "icache setup\t: off\n");
1278
1279
seq_printf(m,
1280
"dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1281
dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1282
BFIN_DLINES);
1283
#ifdef __ARCH_SYNC_CORE_DCACHE
1284
seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
1285
#endif
1286
#ifdef __ARCH_SYNC_CORE_ICACHE
1287
seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
1288
#endif
1289
1290
seq_printf(m, "\n");
1291
1292
if (cpu_num != num_possible_cpus() - 1)
1293
return 0;
1294
1295
if (L2_LENGTH) {
1296
seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1297
seq_printf(m, "L2 SRAM\t\t: "
1298
#if defined(CONFIG_BFIN_L2_ICACHEABLE)
1299
"cacheable"
1300
#else
1301
"uncacheable"
1302
#endif
1303
" in instruction cache\n");
1304
seq_printf(m, "L2 SRAM\t\t: "
1305
#if defined(CONFIG_BFIN_L2_WRITEBACK)
1306
"cacheable (write-back)"
1307
#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1308
"cacheable (write-through)"
1309
#else
1310
"uncacheable"
1311
#endif
1312
" in data cache\n");
1313
}
1314
seq_printf(m, "board name\t: %s\n", bfin_board_name);
1315
seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
1316
physical_mem_end >> 10, 0ul, physical_mem_end);
1317
seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
1318
((int)memory_end - (int)_rambase) >> 10,
1319
_rambase, memory_end);
1320
1321
return 0;
1322
}
1323
1324
static void *c_start(struct seq_file *m, loff_t *pos)
1325
{
1326
if (*pos == 0)
1327
*pos = cpumask_first(cpu_online_mask);
1328
if (*pos >= num_online_cpus())
1329
return NULL;
1330
1331
return pos;
1332
}
1333
1334
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1335
{
1336
*pos = cpumask_next(*pos, cpu_online_mask);
1337
1338
return c_start(m, pos);
1339
}
1340
1341
static void c_stop(struct seq_file *m, void *v)
1342
{
1343
}
1344
1345
const struct seq_operations cpuinfo_op = {
1346
.start = c_start,
1347
.next = c_next,
1348
.stop = c_stop,
1349
.show = show_cpuinfo,
1350
};
1351
1352
void __init cmdline_init(const char *r0)
1353
{
1354
early_shadow_stamp();
1355
if (r0)
1356
strncpy(command_line, r0, COMMAND_LINE_SIZE);
1357
}
1358
1359