Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arc/kernel/setup.c
26439 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4
*/
5
6
#include <linux/seq_file.h>
7
#include <linux/fs.h>
8
#include <linux/delay.h>
9
#include <linux/root_dev.h>
10
#include <linux/clk.h>
11
#include <linux/clocksource.h>
12
#include <linux/console.h>
13
#include <linux/module.h>
14
#include <linux/sizes.h>
15
#include <linux/cpu.h>
16
#include <linux/of_clk.h>
17
#include <linux/of_fdt.h>
18
#include <linux/of.h>
19
#include <linux/cache.h>
20
#include <uapi/linux/mount.h>
21
#include <asm/sections.h>
22
#include <asm/arcregs.h>
23
#include <asm/asserts.h>
24
#include <asm/tlb.h>
25
#include <asm/setup.h>
26
#include <asm/page.h>
27
#include <asm/irq.h>
28
#include <asm/unwind.h>
29
#include <asm/mach_desc.h>
30
#include <asm/smp.h>
31
#include <asm/dsp-impl.h>
32
#include <soc/arc/mcip.h>
33
34
#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
35
36
unsigned int intr_to_DE_cnt;
37
38
/* Part of U-boot ABI: see head.S */
39
int __initdata uboot_tag;
40
int __initdata uboot_magic;
41
char __initdata *uboot_arg;
42
43
const struct machine_desc *machine_desc;
44
45
struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
46
47
struct cpuinfo_arc {
48
int arcver;
49
unsigned int t0:1, t1:1;
50
struct {
51
unsigned long base;
52
unsigned int sz;
53
} iccm, dccm;
54
};
55
56
#ifdef CONFIG_ISA_ARCV2
57
58
static const struct id_to_str arc_hs_rel[] = {
59
/* ID.ARCVER, Release */
60
{ 0x51, "R2.0" },
61
{ 0x52, "R2.1" },
62
{ 0x53, "R3.0" },
63
};
64
65
static const struct id_to_str arc_hs_ver54_rel[] = {
66
/* UARCH.MAJOR, Release */
67
{ 0, "R3.10a"},
68
{ 1, "R3.50a"},
69
{ 2, "R3.60a"},
70
{ 3, "R4.00a"},
71
{ 0xFF, NULL }
72
};
73
#endif
74
75
static int
76
arcompact_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
77
{
78
int n = 0;
79
#ifdef CONFIG_ISA_ARCOMPACT
80
char *cpu_nm, *isa_nm = "ARCompact";
81
struct bcr_fp_arcompact fpu_sp, fpu_dp;
82
int atomic = 0, be, present;
83
int bpu_full, bpu_cache, bpu_pred;
84
struct bcr_bpu_arcompact bpu;
85
struct bcr_iccm_arcompact iccm;
86
struct bcr_dccm_arcompact dccm;
87
struct bcr_generic isa;
88
89
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
90
91
if (!isa.ver) /* ISA BCR absent, use Kconfig info */
92
atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
93
else {
94
/* ARC700_BUILD only has 2 bits of isa info */
95
atomic = isa.info & 1;
96
}
97
98
be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
99
100
if (info->arcver < 0x34)
101
cpu_nm = "ARC750";
102
else
103
cpu_nm = "ARC770";
104
105
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s (%s ISA) %s%s%s\n",
106
c, cpu_nm, isa_nm,
107
IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
108
IS_AVAIL1(be, "[Big-Endian]"));
109
110
READ_BCR(ARC_REG_FP_BCR, fpu_sp);
111
READ_BCR(ARC_REG_DPFP_BCR, fpu_dp);
112
113
if (fpu_sp.ver | fpu_dp.ver)
114
n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
115
IS_AVAIL1(fpu_sp.ver, "SP "),
116
IS_AVAIL1(fpu_dp.ver, "DP "));
117
118
READ_BCR(ARC_REG_BPU_BCR, bpu);
119
bpu_full = bpu.fam ? 1 : 0;
120
bpu_cache = 256 << (bpu.ent - 1);
121
bpu_pred = 256 << (bpu.ent - 1);
122
123
n += scnprintf(buf + n, len - n,
124
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
125
IS_AVAIL1(bpu_full, "full"),
126
IS_AVAIL1(!bpu_full, "partial"),
127
bpu_cache, bpu_pred);
128
129
READ_BCR(ARC_REG_ICCM_BUILD, iccm);
130
if (iccm.ver) {
131
info->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
132
info->iccm.base = iccm.base << 16;
133
}
134
135
READ_BCR(ARC_REG_DCCM_BUILD, dccm);
136
if (dccm.ver) {
137
unsigned long base;
138
info->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
139
140
base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
141
info->dccm.base = base & ~0xF;
142
}
143
144
/* ARCompact ISA specific sanity checks */
145
present = fpu_dp.ver; /* SP has no arch visible regs */
146
CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
147
#endif
148
return n;
149
150
}
151
152
static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
153
{
154
int n = 0;
155
#ifdef CONFIG_ISA_ARCV2
156
const char *release = "", *cpu_nm = "HS38", *isa_nm = "ARCv2";
157
int dual_issue = 0, dual_enb = 0, mpy_opt, present;
158
int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
159
char mpy_nm[16], lpb_nm[32];
160
struct bcr_isa_arcv2 isa;
161
struct bcr_mpy mpy;
162
struct bcr_fp_arcv2 fpu;
163
struct bcr_bpu_arcv2 bpu;
164
struct bcr_lpb lpb;
165
struct bcr_iccm_arcv2 iccm;
166
struct bcr_dccm_arcv2 dccm;
167
struct bcr_erp erp;
168
169
/*
170
* Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
171
* ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
172
* releases only update it.
173
*/
174
175
if (info->arcver > 0x50 && info->arcver <= 0x53) {
176
release = arc_hs_rel[info->arcver - 0x51].str;
177
} else {
178
const struct id_to_str *tbl;
179
struct bcr_uarch_build uarch;
180
181
READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
182
183
for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
184
if (uarch.maj == tbl->id) {
185
release = tbl->str;
186
break;
187
}
188
}
189
if (uarch.prod == 4) {
190
unsigned int exec_ctrl;
191
192
cpu_nm = "HS48";
193
dual_issue = 1;
194
/* if dual issue hardware, is it enabled ? */
195
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
196
dual_enb = !(exec_ctrl & 1);
197
}
198
}
199
200
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
201
202
n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
203
c, cpu_nm, release, isa_nm,
204
IS_AVAIL1(isa.be, "[Big-Endian]"),
205
IS_AVAIL3(dual_issue, dual_enb, " Dual-Issue "));
206
207
READ_BCR(ARC_REG_MPY_BCR, mpy);
208
mpy_opt = 2; /* stock MPY/MPYH */
209
if (mpy.dsp) /* OPT 7-9 */
210
mpy_opt = mpy.dsp + 6;
211
212
scnprintf(mpy_nm, 16, "mpy[opt %d] ", mpy_opt);
213
214
READ_BCR(ARC_REG_FP_V2_BCR, fpu);
215
216
n += scnprintf(buf + n, len - n, "ISA Extn\t: %s%s%s%s%s%s%s%s%s%s%s\n",
217
IS_AVAIL2(isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
218
IS_AVAIL2(isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
219
IS_AVAIL2(isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
220
IS_AVAIL1(mpy.ver, mpy_nm),
221
IS_AVAIL1(isa.div_rem, "div_rem "),
222
IS_AVAIL1((fpu.sp | fpu.dp), " FPU:"),
223
IS_AVAIL1(fpu.sp, " sp"),
224
IS_AVAIL1(fpu.dp, " dp"));
225
226
READ_BCR(ARC_REG_BPU_BCR, bpu);
227
bpu_full = bpu.ft;
228
bpu_cache = 256 << bpu.bce;
229
bpu_pred = 2048 << bpu.pte;
230
bpu_ret_stk = 4 << bpu.rse;
231
232
READ_BCR(ARC_REG_LPB_BUILD, lpb);
233
if (lpb.ver) {
234
unsigned int ctl;
235
ctl = read_aux_reg(ARC_REG_LPB_CTRL);
236
237
scnprintf(lpb_nm, sizeof(lpb_nm), " Loop Buffer:%d %s",
238
lpb.entries, IS_DISABLED_RUN(!ctl));
239
}
240
241
n += scnprintf(buf + n, len - n,
242
"BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d%s\n",
243
IS_AVAIL1(bpu_full, "full"),
244
IS_AVAIL1(!bpu_full, "partial"),
245
bpu_cache, bpu_pred, bpu_ret_stk,
246
lpb_nm);
247
248
READ_BCR(ARC_REG_ICCM_BUILD, iccm);
249
if (iccm.ver) {
250
unsigned long base;
251
info->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
252
if (iccm.sz00 == 0xF && iccm.sz01 > 0)
253
info->iccm.sz <<= iccm.sz01;
254
base = read_aux_reg(ARC_REG_AUX_ICCM);
255
info->iccm.base = base & 0xF0000000;
256
}
257
258
READ_BCR(ARC_REG_DCCM_BUILD, dccm);
259
if (dccm.ver) {
260
unsigned long base;
261
info->dccm.sz = 256 << dccm.sz0;
262
if (dccm.sz0 == 0xF && dccm.sz1 > 0)
263
info->dccm.sz <<= dccm.sz1;
264
base = read_aux_reg(ARC_REG_AUX_DCCM);
265
info->dccm.base = base & 0xF0000000;
266
}
267
268
/* Error Protection: ECC/Parity */
269
READ_BCR(ARC_REG_ERP_BUILD, erp);
270
if (erp.ver) {
271
struct ctl_erp ctl;
272
READ_BCR(ARC_REG_ERP_CTRL, ctl);
273
/* inverted bits: 0 means enabled */
274
n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
275
IS_AVAIL3(erp.ic, !ctl.dpi, "IC "),
276
IS_AVAIL3(erp.dc, !ctl.dpd, "DC "),
277
IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
278
}
279
280
/* ARCv2 ISA specific sanity checks */
281
present = fpu.sp | fpu.dp | mpy.dsp; /* DSP and/or FPU */
282
CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
283
284
dsp_config_check();
285
#endif
286
return n;
287
}
288
289
static char *arc_cpu_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
290
{
291
struct bcr_identity ident;
292
struct bcr_timer timer;
293
struct bcr_generic bcr;
294
struct mcip_bcr mp;
295
struct bcr_actionpoint ap;
296
unsigned long vec_base;
297
int ap_num, ap_full, smart, rtt, n;
298
299
memset(info, 0, sizeof(struct cpuinfo_arc));
300
301
READ_BCR(AUX_IDENTITY, ident);
302
info->arcver = ident.family;
303
304
n = scnprintf(buf, len,
305
"\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
306
ident.family, ident.cpu_id, ident.chip_id);
307
308
if (is_isa_arcompact()) {
309
n += arcompact_mumbojumbo(c, info, buf + n, len - n);
310
} else if (is_isa_arcv2()){
311
n += arcv2_mumbojumbo(c, info, buf + n, len - n);
312
}
313
314
n += arc_mmu_mumbojumbo(c, buf + n, len - n);
315
n += arc_cache_mumbojumbo(c, buf + n, len - n);
316
317
READ_BCR(ARC_REG_TIMERS_BCR, timer);
318
info->t0 = timer.t0;
319
info->t1 = timer.t1;
320
321
READ_BCR(ARC_REG_MCIP_BCR, mp);
322
vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
323
324
n += scnprintf(buf + n, len - n,
325
"Timers\t\t: %s%s%s%s%s%s\nVector Table\t: %#lx\n",
326
IS_AVAIL1(timer.t0, "Timer0 "),
327
IS_AVAIL1(timer.t1, "Timer1 "),
328
IS_AVAIL2(timer.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
329
IS_AVAIL2(mp.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
330
vec_base);
331
332
READ_BCR(ARC_REG_AP_BCR, ap);
333
if (ap.ver) {
334
ap_num = 2 << ap.num;
335
ap_full = !ap.min;
336
}
337
338
READ_BCR(ARC_REG_SMART_BCR, bcr);
339
smart = bcr.ver ? 1 : 0;
340
341
READ_BCR(ARC_REG_RTT_BCR, bcr);
342
rtt = bcr.ver ? 1 : 0;
343
344
if (ap.ver | smart | rtt) {
345
n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
346
IS_AVAIL1(smart, "smaRT "),
347
IS_AVAIL1(rtt, "RTT "));
348
if (ap.ver) {
349
n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
350
ap_num,
351
ap_full ? "full":"min");
352
}
353
n += scnprintf(buf + n, len - n, "\n");
354
}
355
356
if (info->dccm.sz || info->iccm.sz)
357
n += scnprintf(buf + n, len - n,
358
"Extn [CCM]\t: DCCM @ %lx, %d KB / ICCM: @ %lx, %d KB\n",
359
info->dccm.base, TO_KB(info->dccm.sz),
360
info->iccm.base, TO_KB(info->iccm.sz));
361
362
return buf;
363
}
364
365
void chk_opt_strict(char *opt_name, bool hw_exists, bool opt_ena)
366
{
367
if (hw_exists && !opt_ena)
368
pr_warn(" ! Enable %s for working apps\n", opt_name);
369
else if (!hw_exists && opt_ena)
370
panic("Disable %s, hardware NOT present\n", opt_name);
371
}
372
373
void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena)
374
{
375
if (!hw_exists && opt_ena)
376
panic("Disable %s, hardware NOT present\n", opt_name);
377
}
378
379
/*
380
* ISA agnostic sanity checks
381
*/
382
static void arc_chk_core_config(struct cpuinfo_arc *info)
383
{
384
if (!info->t0)
385
panic("Timer0 is not present!\n");
386
387
if (!info->t1)
388
panic("Timer1 is not present!\n");
389
390
#ifdef CONFIG_ARC_HAS_DCCM
391
/*
392
* DCCM can be arbit placed in hardware.
393
* Make sure its placement/sz matches what Linux is built with
394
*/
395
if ((unsigned int)__arc_dccm_base != info->dccm.base)
396
panic("Linux built with incorrect DCCM Base address\n");
397
398
if (CONFIG_ARC_DCCM_SZ * SZ_1K != info->dccm.sz)
399
panic("Linux built with incorrect DCCM Size\n");
400
#endif
401
402
#ifdef CONFIG_ARC_HAS_ICCM
403
if (CONFIG_ARC_ICCM_SZ * SZ_1K != info->iccm.sz)
404
panic("Linux built with incorrect ICCM Size\n");
405
#endif
406
}
407
408
/*
409
* Initialize and setup the processor core
410
* This is called by all the CPUs thus should not do special case stuff
411
* such as only for boot CPU etc
412
*/
413
414
void setup_processor(void)
415
{
416
struct cpuinfo_arc info;
417
int c = smp_processor_id();
418
char str[512];
419
420
pr_info("%s", arc_cpu_mumbojumbo(c, &info, str, sizeof(str)));
421
pr_info("%s", arc_platform_smp_cpuinfo());
422
423
arc_chk_core_config(&info);
424
425
arc_init_IRQ();
426
arc_mmu_init();
427
arc_cache_init();
428
429
}
430
431
static inline bool uboot_arg_invalid(unsigned long addr)
432
{
433
/*
434
* Check that it is a untranslated address (although MMU is not enabled
435
* yet, it being a high address ensures this is not by fluke)
436
*/
437
if (addr < PAGE_OFFSET)
438
return true;
439
440
/* Check that address doesn't clobber resident kernel image */
441
return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
442
}
443
444
#define IGNORE_ARGS "Ignore U-boot args: "
445
446
/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
447
#define UBOOT_TAG_NONE 0
448
#define UBOOT_TAG_CMDLINE 1
449
#define UBOOT_TAG_DTB 2
450
/* We always pass 0 as magic from U-boot */
451
#define UBOOT_MAGIC_VALUE 0
452
453
void __init handle_uboot_args(void)
454
{
455
bool use_embedded_dtb = true;
456
bool append_cmdline = false;
457
458
/* check that we know this tag */
459
if (uboot_tag != UBOOT_TAG_NONE &&
460
uboot_tag != UBOOT_TAG_CMDLINE &&
461
uboot_tag != UBOOT_TAG_DTB) {
462
pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
463
goto ignore_uboot_args;
464
}
465
466
if (uboot_magic != UBOOT_MAGIC_VALUE) {
467
pr_warn(IGNORE_ARGS "non zero uboot magic\n");
468
goto ignore_uboot_args;
469
}
470
471
if (uboot_tag != UBOOT_TAG_NONE &&
472
uboot_arg_invalid((unsigned long)uboot_arg)) {
473
pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
474
goto ignore_uboot_args;
475
}
476
477
/* see if U-boot passed an external Device Tree blob */
478
if (uboot_tag == UBOOT_TAG_DTB) {
479
machine_desc = setup_machine_fdt((void *)uboot_arg);
480
481
/* external Device Tree blob is invalid - use embedded one */
482
use_embedded_dtb = !machine_desc;
483
}
484
485
if (uboot_tag == UBOOT_TAG_CMDLINE)
486
append_cmdline = true;
487
488
ignore_uboot_args:
489
490
if (use_embedded_dtb) {
491
machine_desc = setup_machine_fdt(__dtb_start);
492
if (!machine_desc)
493
panic("Embedded DT invalid\n");
494
}
495
496
/*
497
* NOTE: @boot_command_line is populated by setup_machine_fdt() so this
498
* append processing can only happen after.
499
*/
500
if (append_cmdline) {
501
/* Ensure a whitespace between the 2 cmdlines */
502
strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
503
strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
504
}
505
}
506
507
void __init setup_arch(char **cmdline_p)
508
{
509
handle_uboot_args();
510
511
/* Save unparsed command line copy for /proc/cmdline */
512
*cmdline_p = boot_command_line;
513
514
/* To force early parsing of things like mem=xxx */
515
parse_early_param();
516
517
/* Platform/board specific: e.g. early console registration */
518
if (machine_desc->init_early)
519
machine_desc->init_early();
520
521
smp_init_cpus();
522
523
setup_processor();
524
setup_arch_memory();
525
526
/* copy flat DT out of .init and then unflatten it */
527
unflatten_and_copy_device_tree();
528
529
/* Can be issue if someone passes cmd line arg "ro"
530
* But that is unlikely so keeping it as it is
531
*/
532
root_mountflags &= ~MS_RDONLY;
533
534
arc_unwind_init();
535
}
536
537
/*
538
* Called from start_kernel() - boot CPU only
539
*/
540
void __init time_init(void)
541
{
542
of_clk_init(NULL);
543
timer_probe();
544
}
545
546
static int __init customize_machine(void)
547
{
548
if (machine_desc->init_machine)
549
machine_desc->init_machine();
550
551
return 0;
552
}
553
arch_initcall(customize_machine);
554
555
static int __init init_late_machine(void)
556
{
557
if (machine_desc->init_late)
558
machine_desc->init_late();
559
560
return 0;
561
}
562
late_initcall(init_late_machine);
563
/*
564
* Get CPU information for use by the procfs.
565
*/
566
567
#define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
568
#define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
569
570
static int show_cpuinfo(struct seq_file *m, void *v)
571
{
572
char *str;
573
int cpu_id = ptr_to_cpu(v);
574
struct device *cpu_dev = get_cpu_device(cpu_id);
575
struct cpuinfo_arc info;
576
struct clk *cpu_clk;
577
unsigned long freq = 0;
578
579
if (!cpu_online(cpu_id)) {
580
seq_printf(m, "processor [%d]\t: Offline\n", cpu_id);
581
goto done;
582
}
583
584
str = (char *)__get_free_page(GFP_KERNEL);
585
if (!str)
586
goto done;
587
588
seq_printf(m, arc_cpu_mumbojumbo(cpu_id, &info, str, PAGE_SIZE));
589
590
cpu_clk = clk_get(cpu_dev, NULL);
591
if (IS_ERR(cpu_clk)) {
592
seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n",
593
cpu_id);
594
} else {
595
freq = clk_get_rate(cpu_clk);
596
}
597
if (freq)
598
seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n",
599
freq / 1000000, (freq / 10000) % 100);
600
601
seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
602
loops_per_jiffy / (500000 / HZ),
603
(loops_per_jiffy / (5000 / HZ)) % 100);
604
605
seq_printf(m, arc_platform_smp_cpuinfo());
606
607
free_page((unsigned long)str);
608
done:
609
seq_printf(m, "\n");
610
611
return 0;
612
}
613
614
static void *c_start(struct seq_file *m, loff_t *pos)
615
{
616
/*
617
* Callback returns cpu-id to iterator for show routine, NULL to stop.
618
* However since NULL is also a valid cpu-id (0), we use a round-about
619
* way to pass it w/o having to kmalloc/free a 2 byte string.
620
* Encode cpu-id as 0xFFcccc, which is decoded by show routine.
621
*/
622
return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL;
623
}
624
625
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
626
{
627
++*pos;
628
return c_start(m, pos);
629
}
630
631
static void c_stop(struct seq_file *m, void *v)
632
{
633
}
634
635
const struct seq_operations cpuinfo_op = {
636
.start = c_start,
637
.next = c_next,
638
.stop = c_stop,
639
.show = show_cpuinfo
640
};
641
642
static DEFINE_PER_CPU(struct cpu, cpu_topology);
643
644
static int __init topology_init(void)
645
{
646
int cpu;
647
648
for_each_present_cpu(cpu)
649
register_cpu(&per_cpu(cpu_topology, cpu), cpu);
650
651
return 0;
652
}
653
654
subsys_initcall(topology_init);
655
656