Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/kernel/head_64.S
170931 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later */
2
/*
3
* PowerPC version
4
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
5
*
6
* Rewritten by Cort Dougan ([email protected]) for PReP
7
* Copyright (C) 1996 Cort Dougan <[email protected]>
8
* Adapted for Power Macintosh by Paul Mackerras.
9
* Low-level exception handlers and MMU support
10
* rewritten by Paul Mackerras.
11
* Copyright (C) 1996 Paul Mackerras.
12
*
13
* Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
14
* Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
15
*
16
* This file contains the entry point for the 64-bit kernel along
17
* with some early initialization code common to all 64-bit powerpc
18
* variants.
19
*/
20
21
#include <linux/linkage.h>
22
#include <linux/threads.h>
23
#include <linux/init.h>
24
#include <asm/reg.h>
25
#include <asm/page.h>
26
#include <asm/mmu.h>
27
#include <asm/ppc_asm.h>
28
#include <asm/head-64.h>
29
#include <asm/asm-offsets.h>
30
#include <asm/bug.h>
31
#include <asm/cputable.h>
32
#include <asm/setup.h>
33
#include <asm/hvcall.h>
34
#include <asm/thread_info.h>
35
#include <asm/firmware.h>
36
#include <asm/page_64.h>
37
#include <asm/irqflags.h>
38
#include <asm/kvm_book3s_asm.h>
39
#include <asm/ptrace.h>
40
#include <asm/hw_irq.h>
41
#include <asm/cputhreads.h>
42
#include <asm/ppc-opcode.h>
43
#include <asm/feature-fixups.h>
44
#ifdef CONFIG_PPC_BOOK3S
45
#include <asm/exception-64s.h>
46
#else
47
#include <asm/exception-64e.h>
48
#endif
49
50
/* The physical memory is laid out such that the secondary processor
51
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
52
* using the layout described in exceptions-64s.S
53
*/
54
55
/*
56
* Entering into this code we make the following assumptions:
57
*
58
* For pSeries or server processors:
59
* 1. The MMU is off & open firmware is running in real mode.
60
* 2. The primary CPU enters at __start.
61
* 3. If the RTAS supports "query-cpu-stopped-state", then secondary
62
* CPUs will enter as directed by "start-cpu" RTAS call, which is
63
* generic_secondary_smp_init, with PIR in r3.
64
* 4. Else the secondary CPUs will enter at secondary_hold (0x60) as
65
* directed by the "start-cpu" RTS call, with PIR in r3.
66
* -or- For OPAL entry:
67
* 1. The MMU is off, processor in HV mode.
68
* 2. The primary CPU enters at 0 with device-tree in r3, OPAL base
69
* in r8, and entry in r9 for debugging purposes.
70
* 3. Secondary CPUs enter as directed by OPAL_START_CPU call, which
71
* is at generic_secondary_smp_init, with PIR in r3.
72
*
73
* For Book3E processors:
74
* 1. The MMU is on running in AS0 in a state defined in ePAPR
75
* 2. The kernel is entered at __start
76
*/
77
78
/*
79
* boot_from_prom and prom_init run at the physical address. Everything
80
* after prom and kexec entry run at the virtual address (PAGE_OFFSET).
81
* Secondaries run at the virtual address from generic_secondary_common_init
82
* onward.
83
*/
84
85
OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)
86
USE_FIXED_SECTION(first_256B)
87
/*
88
* Offsets are relative from the start of fixed section, and
89
* first_256B starts at 0. Offsets are a bit easier to use here
90
* than the fixed section entry macros.
91
*/
92
. = 0x0
93
_GLOBAL(__start)
94
/* NOP this out unconditionally */
95
BEGIN_FTR_SECTION
96
FIXUP_ENDIAN
97
b __start_initialization_multiplatform
98
END_FTR_SECTION(0, 1)
99
100
/* Catch branch to 0 in real mode */
101
trap
102
103
/* Secondary processors spin on this value until it becomes non-zero.
104
* When non-zero, it contains the real address of the function the cpu
105
* should jump to.
106
*/
107
.balign 8
108
.globl __secondary_hold_spinloop
109
__secondary_hold_spinloop:
110
.8byte 0x0
111
112
/* Secondary processors write this value with their cpu # */
113
/* after they enter the spin loop immediately below. */
114
.globl __secondary_hold_acknowledge
115
__secondary_hold_acknowledge:
116
.8byte 0x0
117
118
#ifdef CONFIG_RELOCATABLE
119
/* This flag is set to 1 by a loader if the kernel should run
120
* at the loaded address instead of the linked address. This
121
* is used by kexec-tools to keep the kdump kernel in the
122
* crash_kernel region. The loader is responsible for
123
* observing the alignment requirement.
124
*/
125
126
#ifdef CONFIG_RELOCATABLE_TEST
127
#define RUN_AT_LOAD_DEFAULT 1 /* Test relocation, do not copy to 0 */
128
#else
129
#define RUN_AT_LOAD_DEFAULT 0x72756e30 /* "run0" -- relocate to 0 by default */
130
#endif
131
132
/* Do not move this variable as kexec-tools knows about it. */
133
. = 0x5c
134
.globl __run_at_load
135
__run_at_load:
136
DEFINE_FIXED_SYMBOL(__run_at_load, first_256B)
137
.long RUN_AT_LOAD_DEFAULT
138
#endif
139
140
. = 0x60
141
/*
142
* The following code is used to hold secondary processors
143
* in a spin loop after they have entered the kernel, but
144
* before the bulk of the kernel has been relocated. This code
145
* is relocated to physical address 0x60 before prom_init is run.
146
* All of it must fit below the first exception vector at 0x100.
147
* Use .globl here not _GLOBAL because we want __secondary_hold
148
* to be the actual text address, not a descriptor.
149
*/
150
.globl __secondary_hold
151
__secondary_hold:
152
FIXUP_ENDIAN
153
#ifndef CONFIG_PPC_BOOK3E_64
154
mfmsr r24
155
ori r24,r24,MSR_RI
156
mtmsrd r24 /* RI on */
157
#endif
158
/* Grab our physical cpu number */
159
mr r24,r3
160
/* stash r4 for book3e */
161
mr r25,r4
162
163
/* Tell the master cpu we're here */
164
/* Relocation is off & we are located at an address less */
165
/* than 0x100, so only need to grab low order offset. */
166
std r24,(ABS_ADDR(__secondary_hold_acknowledge, first_256B))(0)
167
sync
168
169
/* All secondary cpus wait here until told to start. */
170
100: ld r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(0)
171
cmpdi 0,r12,0
172
beq 100b
173
174
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
175
#ifdef CONFIG_PPC_BOOK3E_64
176
tovirt(r12,r12)
177
#endif
178
mtctr r12
179
mr r3,r24
180
/*
181
* it may be the case that other platforms have r4 right to
182
* begin with, this gives us some safety in case it is not
183
*/
184
#ifdef CONFIG_PPC_BOOK3E_64
185
mr r4,r25
186
#else
187
li r4,0
188
#endif
189
/* Make sure that patched code is visible */
190
isync
191
bctr
192
#else
193
0: trap
194
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
195
#endif
196
CLOSE_FIXED_SECTION(first_256B)
197
198
/*
199
* On server, we include the exception vectors code here as it
200
* relies on absolute addressing which is only possible within
201
* this compilation unit
202
*/
203
#ifdef CONFIG_PPC_BOOK3S
204
#include "exceptions-64s.S"
205
#else
206
OPEN_TEXT_SECTION(0x100)
207
#endif
208
209
USE_TEXT_SECTION()
210
211
#include "interrupt_64.S"
212
213
#ifdef CONFIG_PPC_BOOK3E_64
214
/*
215
* The booting_thread_hwid holds the thread id we want to boot in cpu
216
* hotplug case. It is set by cpu hotplug code, and is invalid by default.
217
* The thread id is the same as the initial value of SPRN_PIR[THREAD_ID]
218
* bit field.
219
*/
220
.globl booting_thread_hwid
221
booting_thread_hwid:
222
.long INVALID_THREAD_HWID
223
.align 3
224
/*
225
* start a thread in the same core
226
* input parameters:
227
* r3 = the thread physical id
228
* r4 = the entry point where thread starts
229
*/
230
_GLOBAL(book3e_start_thread)
231
LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
232
cmpwi r3, 0
233
beq 10f
234
cmpwi r3, 1
235
beq 11f
236
/* If the thread id is invalid, just exit. */
237
b 13f
238
10:
239
MTTMR(TMRN_IMSR0, 5)
240
MTTMR(TMRN_INIA0, 4)
241
b 12f
242
11:
243
MTTMR(TMRN_IMSR1, 5)
244
MTTMR(TMRN_INIA1, 4)
245
12:
246
isync
247
li r6, 1
248
sld r6, r6, r3
249
mtspr SPRN_TENS, r6
250
13:
251
blr
252
253
/*
254
* stop a thread in the same core
255
* input parameter:
256
* r3 = the thread physical id
257
*/
258
_GLOBAL(book3e_stop_thread)
259
cmpwi r3, 0
260
beq 10f
261
cmpwi r3, 1
262
beq 10f
263
/* If the thread id is invalid, just exit. */
264
b 13f
265
10:
266
li r4, 1
267
sld r4, r4, r3
268
mtspr SPRN_TENC, r4
269
13:
270
blr
271
272
_GLOBAL(fsl_secondary_thread_init)
273
mfspr r4,SPRN_BUCSR
274
275
/* Enable branch prediction */
276
lis r3,BUCSR_INIT@h
277
ori r3,r3,BUCSR_INIT@l
278
mtspr SPRN_BUCSR,r3
279
isync
280
281
/*
282
* Fix PIR to match the linear numbering in the device tree.
283
*
284
* On e6500, the reset value of PIR uses the low three bits for
285
* the thread within a core, and the upper bits for the core
286
* number. There are two threads per core, so shift everything
287
* but the low bit right by two bits so that the cpu numbering is
288
* continuous.
289
*
290
* If the old value of BUCSR is non-zero, this thread has run
291
* before. Thus, we assume we are coming from kexec or a similar
292
* scenario, and PIR is already set to the correct value. This
293
* is a bit of a hack, but there are limited opportunities for
294
* getting information into the thread and the alternatives
295
* seemed like they'd be overkill. We can't tell just by looking
296
* at the old PIR value which state it's in, since the same value
297
* could be valid for one thread out of reset and for a different
298
* thread in Linux.
299
*/
300
301
mfspr r3, SPRN_PIR
302
cmpwi r4,0
303
bne 1f
304
rlwimi r3, r3, 30, 2, 30
305
mtspr SPRN_PIR, r3
306
1:
307
mr r24,r3
308
309
/* turn on 64-bit mode */
310
bl enable_64b_mode
311
312
/* Book3E initialization */
313
mr r3,r24
314
bl book3e_secondary_thread_init
315
bl relative_toc
316
317
b generic_secondary_common_init
318
319
#endif /* CONFIG_PPC_BOOK3E_64 */
320
321
/*
322
* On pSeries and most other platforms, secondary processors spin
323
* in the following code.
324
* At entry, r3 = this processor's number (physical cpu id)
325
*
326
* On Book3E, r4 = 1 to indicate that the initial TLB entry for
327
* this core already exists (setup via some other mechanism such
328
* as SCOM before entry).
329
*/
330
_GLOBAL(generic_secondary_smp_init)
331
FIXUP_ENDIAN
332
333
li r13,0
334
335
/* Poison TOC */
336
li r2,-1
337
338
mr r24,r3
339
mr r25,r4
340
341
/* turn on 64-bit mode */
342
bl enable_64b_mode
343
344
#ifdef CONFIG_PPC_BOOK3E_64
345
/* Book3E initialization */
346
mr r3,r24
347
mr r4,r25
348
bl book3e_secondary_core_init
349
/* Now NIA and r2 are relocated to PAGE_OFFSET if not already */
350
/*
351
* After common core init has finished, check if the current thread is the
352
* one we wanted to boot. If not, start the specified thread and stop the
353
* current thread.
354
*/
355
LOAD_REG_ADDR(r4, booting_thread_hwid)
356
lwz r3, 0(r4)
357
li r5, INVALID_THREAD_HWID
358
cmpw r3, r5
359
beq 20f
360
361
/*
362
* The value of booting_thread_hwid has been stored in r3,
363
* so make it invalid.
364
*/
365
stw r5, 0(r4)
366
367
/*
368
* Get the current thread id and check if it is the one we wanted.
369
* If not, start the one specified in booting_thread_hwid and stop
370
* the current thread.
371
*/
372
mfspr r8, SPRN_TIR
373
cmpw r3, r8
374
beq 20f
375
376
/* start the specified thread */
377
LOAD_REG_ADDR(r5, DOTSYM(fsl_secondary_thread_init))
378
bl book3e_start_thread
379
380
/* stop the current thread */
381
mr r3, r8
382
bl book3e_stop_thread
383
10:
384
b 10b
385
20:
386
#else
387
/* Now the MMU is off, can branch to our PAGE_OFFSET address */
388
bcl 20,31,$+4
389
1: mflr r11
390
addi r11,r11,(2f - 1b)
391
tovirt(r11, r11)
392
mtctr r11
393
bctr
394
2:
395
bl relative_toc
396
#endif
397
398
generic_secondary_common_init:
399
/* Set up a paca value for this processor. Since we have the
400
* physical cpu id in r24, we need to search the pacas to find
401
* which logical id maps to our physical one.
402
*/
403
#ifndef CONFIG_SMP
404
b kexec_wait /* wait for next kernel if !SMP */
405
#else
406
LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */
407
ld r8,0(r8) /* Get base vaddr of array */
408
#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
409
LOAD_REG_IMMEDIATE(r7, NR_CPUS)
410
#else
411
LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
412
lwz r7,0(r7) /* also the max paca allocated */
413
#endif
414
li r5,0 /* logical cpu id */
415
1:
416
sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */
417
ldx r13,r9,r8 /* r13 = paca_ptrs[cpu id] */
418
lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
419
cmpw r6,r24 /* Compare to our id */
420
beq 2f
421
addi r5,r5,1
422
cmpw r5,r7 /* Check if more pacas exist */
423
blt 1b
424
425
mr r3,r24 /* not found, copy phys to r3 */
426
b kexec_wait /* next kernel might do better */
427
428
2: SET_PACA(r13)
429
#ifdef CONFIG_PPC_BOOK3E_64
430
addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */
431
mtspr SPRN_SPRG_TLB_EXFRAME,r12
432
#endif
433
434
/* From now on, r24 is expected to be logical cpuid */
435
mr r24,r5
436
437
/* Create a temp kernel stack for use before relocation is on. */
438
ld r1,PACAEMERGSP(r13)
439
subi r1,r1,STACK_FRAME_MIN_SIZE
440
441
/* See if we need to call a cpu state restore handler */
442
LOAD_REG_ADDR(r23, cur_cpu_spec)
443
ld r23,0(r23)
444
ld r12,CPU_SPEC_RESTORE(r23)
445
cmpdi 0,r12,0
446
beq 3f
447
#ifdef CONFIG_PPC64_ELF_ABI_V1
448
ld r12,0(r12)
449
#endif
450
mtctr r12
451
bctrl
452
453
3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
454
lwarx r4,0,r3
455
subi r4,r4,1
456
stwcx. r4,0,r3
457
bne 3b
458
isync
459
460
4: HMT_LOW
461
lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
462
/* start. */
463
cmpwi 0,r23,0
464
beq 4b /* Loop until told to go */
465
466
sync /* order paca.run and cur_cpu_spec */
467
isync /* In case code patching happened */
468
469
b __secondary_start
470
#endif /* SMP */
471
472
/*
473
* Turn the MMU off.
474
* Assumes we're mapped EA == RA if the MMU is on.
475
*/
476
#ifdef CONFIG_PPC_BOOK3S
477
SYM_FUNC_START_LOCAL(__mmu_off)
478
mfmsr r3
479
andi. r0,r3,MSR_IR|MSR_DR
480
beqlr
481
mflr r4
482
andc r3,r3,r0
483
mtspr SPRN_SRR0,r4
484
mtspr SPRN_SRR1,r3
485
sync
486
rfid
487
b . /* prevent speculative execution */
488
SYM_FUNC_END(__mmu_off)
489
490
SYM_FUNC_START_LOCAL(start_initialization_book3s)
491
mflr r25
492
493
/* Setup some critical 970 SPRs before switching MMU off */
494
mfspr r0,SPRN_PVR
495
srwi r0,r0,16
496
cmpwi r0,0x39 /* 970 */
497
beq 1f
498
cmpwi r0,0x3c /* 970FX */
499
beq 1f
500
cmpwi r0,0x44 /* 970MP */
501
beq 1f
502
cmpwi r0,0x45 /* 970GX */
503
bne 2f
504
1: bl __cpu_preinit_ppc970
505
2:
506
507
/* Switch off MMU if not already off */
508
bl __mmu_off
509
510
/* Now the MMU is off, can return to our PAGE_OFFSET address */
511
tovirt(r25,r25)
512
mtlr r25
513
blr
514
SYM_FUNC_END(start_initialization_book3s)
515
#endif
516
517
/*
518
* Here is our main kernel entry point. We support currently 2 kind of entries
519
* depending on the value of r5.
520
*
521
* r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
522
* in r3...r7
523
*
524
* r5 == NULL -> kexec style entry. r3 is a physical pointer to the
525
* DT block, r4 is a physical pointer to the kernel itself
526
*
527
*/
528
__start_initialization_multiplatform:
529
/* Make sure we are running in 64 bits mode */
530
bl enable_64b_mode
531
532
/* Zero r13 (paca) so early program check / mce don't use it */
533
li r13,0
534
535
/* Poison TOC */
536
li r2,-1
537
538
/*
539
* Are we booted from a PROM Of-type client-interface ?
540
*/
541
cmpldi cr0,r5,0
542
beq 1f
543
b __boot_from_prom /* yes -> prom */
544
1:
545
/* Save parameters */
546
mr r31,r3
547
mr r30,r4
548
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
549
/* Save OPAL entry */
550
mr r28,r8
551
mr r29,r9
552
#endif
553
554
/* Get TOC pointer (current runtime address) */
555
bl relative_toc
556
557
/* These functions return to the virtual (PAGE_OFFSET) address */
558
#ifdef CONFIG_PPC_BOOK3E_64
559
bl start_initialization_book3e
560
#else
561
bl start_initialization_book3s
562
#endif /* CONFIG_PPC_BOOK3E_64 */
563
564
/* Get TOC pointer, virtual */
565
bl relative_toc
566
567
/* find out where we are now */
568
569
/* OPAL doesn't pass base address in r4, have to derive it. */
570
bcl 20,31,$+4
571
0: mflr r26 /* r26 = runtime addr here */
572
addis r26,r26,(_stext - 0b)@ha
573
addi r26,r26,(_stext - 0b)@l /* current runtime base addr */
574
575
b __after_prom_start
576
577
__REF
578
__boot_from_prom:
579
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
580
/* Get TOC pointer, non-virtual */
581
bl relative_toc
582
583
/* find out where we are now */
584
bcl 20,31,$+4
585
0: mflr r26 /* r26 = runtime addr here */
586
addis r26,r26,(_stext - 0b)@ha
587
addi r26,r26,(_stext - 0b)@l /* current runtime base addr */
588
589
/* Save parameters */
590
mr r31,r3
591
mr r30,r4
592
mr r29,r5
593
mr r28,r6
594
mr r27,r7
595
596
/*
597
* Align the stack to 16-byte boundary
598
* Depending on the size and layout of the ELF sections in the initial
599
* boot binary, the stack pointer may be unaligned on PowerMac
600
*/
601
rldicr r1,r1,0,59
602
603
#ifdef CONFIG_RELOCATABLE
604
/* Relocate code for where we are now */
605
mr r3,r26
606
bl relocate
607
#endif
608
609
/* Restore parameters */
610
mr r3,r31
611
mr r4,r30
612
mr r5,r29
613
mr r6,r28
614
mr r7,r27
615
616
/* Do all of the interaction with OF client interface */
617
mr r8,r26
618
bl CFUNC(prom_init)
619
#endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */
620
621
/* We never return. We also hit that trap if trying to boot
622
* from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
623
trap
624
.previous
625
626
__after_prom_start:
627
#ifdef CONFIG_RELOCATABLE
628
/* process relocations for the final address of the kernel */
629
lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
630
cmplwi cr0,r7,1 /* flagged to stay where we are ? */
631
mr r25,r26 /* then use current kernel base */
632
beq 1f
633
LOAD_REG_IMMEDIATE(r25, PAGE_OFFSET) /* else use static kernel base */
634
1: mr r3,r25
635
bl relocate
636
#if defined(CONFIG_PPC_BOOK3E_64)
637
/* IVPR needs to be set after relocation. */
638
bl init_core_book3e
639
#endif
640
#endif
641
642
/*
643
* We need to run with _stext at physical address PHYSICAL_START.
644
* This will leave some code in the first 256B of
645
* real memory, which are reserved for software use.
646
*
647
* Note: This process overwrites the OF exception vectors.
648
*/
649
LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET)
650
mr r4,r26 /* Load the virtual source address into r4 */
651
cmpld r3,r4 /* Check if source == dest */
652
beq 9f /* If so skip the copy */
653
li r6,0x100 /* Start offset, the first 0x100 */
654
/* bytes were copied earlier. */
655
656
#ifdef CONFIG_RELOCATABLE
657
/*
658
* Check if the kernel has to be running as relocatable kernel based on the
659
* variable __run_at_load, if it is set the kernel is treated as relocatable
660
* kernel, otherwise it will be moved to PHYSICAL_START
661
*/
662
lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
663
cmplwi cr0,r7,1
664
bne 3f
665
666
#ifdef CONFIG_PPC_BOOK3E_64
667
LOAD_REG_ADDR(r5, __end_interrupts)
668
LOAD_REG_ADDR(r11, _stext)
669
sub r5,r5,r11
670
#else
671
/* just copy interrupts */
672
LOAD_REG_IMMEDIATE_SYM(r5, r11, FIXED_SYMBOL_ABS_ADDR(__end_interrupts))
673
#endif
674
b 5f
675
3:
676
#endif
677
/* # bytes of memory to copy */
678
lis r5,(ABS_ADDR(copy_to_here, text))@ha
679
addi r5,r5,(ABS_ADDR(copy_to_here, text))@l
680
681
bl copy_and_flush /* copy the first n bytes */
682
/* this includes the code being */
683
/* executed here. */
684
/* Jump to the copy of this code that we just made */
685
addis r8,r3,(ABS_ADDR(4f, text))@ha
686
addi r12,r8,(ABS_ADDR(4f, text))@l
687
mtctr r12
688
bctr
689
690
.balign 8
691
p_end: .8byte _end - copy_to_here
692
693
4:
694
/*
695
* Now copy the rest of the kernel up to _end, add
696
* _end - copy_to_here to the copy limit and run again.
697
*/
698
addis r8,r26,(ABS_ADDR(p_end, text))@ha
699
ld r8,(ABS_ADDR(p_end, text))@l(r8)
700
add r5,r5,r8
701
5: bl copy_and_flush /* copy the rest */
702
703
9: b start_here_multiplatform
704
705
/*
706
* Copy routine used to copy the kernel to start at physical address 0
707
* and flush and invalidate the caches as needed.
708
* r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
709
* on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
710
*
711
* Note: this routine *only* clobbers r0, r6 and lr
712
*/
713
_GLOBAL(copy_and_flush)
714
addi r5,r5,-8
715
addi r6,r6,-8
716
4: li r0,8 /* Use the smallest common */
717
/* denominator cache line */
718
/* size. This results in */
719
/* extra cache line flushes */
720
/* but operation is correct. */
721
/* Can't get cache line size */
722
/* from NACA as it is being */
723
/* moved too. */
724
725
mtctr r0 /* put # words/line in ctr */
726
3: addi r6,r6,8 /* copy a cache line */
727
ldx r0,r6,r4
728
stdx r0,r6,r3
729
bdnz 3b
730
dcbst r6,r3 /* write it to memory */
731
sync
732
icbi r6,r3 /* flush the icache line */
733
cmpld 0,r6,r5
734
blt 4b
735
sync
736
addi r5,r5,8
737
addi r6,r6,8
738
isync
739
blr
740
741
_ASM_NOKPROBE_SYMBOL(copy_and_flush); /* Called in real mode */
742
743
.align 8
744
copy_to_here:
745
746
#ifdef CONFIG_SMP
747
#ifdef CONFIG_PPC_PMAC
748
/*
749
* On PowerMac, secondary processors starts from the reset vector, which
750
* is temporarily turned into a call to one of the functions below.
751
*/
752
.section ".text";
753
.align 2 ;
754
755
.globl __secondary_start_pmac_0
756
__secondary_start_pmac_0:
757
/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
758
li r24,0
759
b 1f
760
li r24,1
761
b 1f
762
li r24,2
763
b 1f
764
li r24,3
765
1:
766
767
_GLOBAL(pmac_secondary_start)
768
/* turn on 64-bit mode */
769
bl enable_64b_mode
770
771
li r0,0
772
mfspr r3,SPRN_HID4
773
rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
774
sync
775
mtspr SPRN_HID4,r3
776
isync
777
sync
778
slbia
779
780
/* Branch to our PAGE_OFFSET address */
781
bcl 20,31,$+4
782
1: mflr r11
783
addi r11,r11,(2f - 1b)
784
tovirt(r11, r11)
785
mtctr r11
786
bctr
787
2:
788
bl relative_toc
789
790
/* Copy some CPU settings from CPU 0 */
791
bl __restore_cpu_ppc970
792
793
/* pSeries do that early though I don't think we really need it */
794
mfmsr r3
795
ori r3,r3,MSR_RI
796
mtmsrd r3 /* RI on */
797
798
/* Set up a paca value for this processor. */
799
LOAD_REG_ADDR(r4,paca_ptrs) /* Load paca pointer */
800
ld r4,0(r4) /* Get base vaddr of paca_ptrs array */
801
sldi r5,r24,3 /* get paca_ptrs[] index from cpu id */
802
ldx r13,r5,r4 /* r13 = paca_ptrs[cpu id] */
803
SET_PACA(r13) /* Save vaddr of paca in an SPRG*/
804
805
/* Mark interrupts soft and hard disabled (they might be enabled
806
* in the PACA when doing hotplug)
807
*/
808
li r0,IRQS_DISABLED
809
stb r0,PACAIRQSOFTMASK(r13)
810
li r0,PACA_IRQ_HARD_DIS
811
stb r0,PACAIRQHAPPENED(r13)
812
813
/* Create a temp kernel stack for use before relocation is on. */
814
ld r1,PACAEMERGSP(r13)
815
subi r1,r1,STACK_FRAME_MIN_SIZE
816
817
b __secondary_start
818
819
#endif /* CONFIG_PPC_PMAC */
820
821
/*
822
* This function is called after the master CPU has released the
823
* secondary processors. The execution environment is relocation off.
824
* The paca for this processor has the following fields initialized at
825
* this point:
826
* 1. Processor number
827
* 2. Segment table pointer (virtual address)
828
* On entry the following are set:
829
* r1 = stack pointer (real addr of temp stack)
830
* r24 = cpu# (in Linux terms)
831
* r13 = paca virtual address
832
* SPRG_PACA = paca virtual address
833
*/
834
.section ".text";
835
.align 2 ;
836
837
.globl __secondary_start
838
__secondary_start:
839
/* Set thread priority to MEDIUM */
840
HMT_MEDIUM
841
842
/*
843
* Do early setup for this CPU, in particular initialising the MMU so we
844
* can turn it on below. This is a call to C, which is OK, we're still
845
* running on the emergency stack.
846
*/
847
bl CFUNC(early_setup_secondary)
848
849
/*
850
* The primary has initialized our kernel stack for us in the paca, grab
851
* it and put it in r1. We must *not* use it until we turn on the MMU
852
* below, because it may not be inside the RMO.
853
*/
854
ld r1, PACAKSAVE(r13)
855
856
/* Clear backchain so we get nice backtraces */
857
li r7,0
858
mtlr r7
859
860
/* Mark interrupts soft and hard disabled (they might be enabled
861
* in the PACA when doing hotplug)
862
*/
863
li r7,IRQS_DISABLED
864
stb r7,PACAIRQSOFTMASK(r13)
865
li r0,PACA_IRQ_HARD_DIS
866
stb r0,PACAIRQHAPPENED(r13)
867
868
/* enable MMU and jump to start_secondary */
869
LOAD_REG_ADDR(r3, start_secondary_prolog)
870
LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
871
872
mtspr SPRN_SRR0,r3
873
mtspr SPRN_SRR1,r4
874
RFI_TO_KERNEL
875
b . /* prevent speculative execution */
876
877
/*
878
* Running with relocation on at this point. All we want to do is
879
* zero the stack back-chain pointer and get the TOC virtual address
880
* before going into C code.
881
*/
882
start_secondary_prolog:
883
LOAD_PACA_TOC()
884
li r3,0
885
std r3,0(r1) /* Zero the stack frame pointer */
886
bl CFUNC(start_secondary)
887
b .
888
/*
889
* Reset stack pointer and call start_secondary
890
* to continue with online operation when woken up
891
* from cede in cpu offline.
892
*/
893
_GLOBAL(start_secondary_resume)
894
ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */
895
li r3,0
896
std r3,0(r1) /* Zero the stack frame pointer */
897
bl CFUNC(start_secondary)
898
b .
899
#endif
900
901
/*
902
* This subroutine clobbers r11 and r12
903
*/
904
SYM_FUNC_START_LOCAL(enable_64b_mode)
905
mfmsr r11 /* grab the current MSR */
906
#ifdef CONFIG_PPC_BOOK3E_64
907
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
908
mtmsr r11
909
#else /* CONFIG_PPC_BOOK3E_64 */
910
LOAD_REG_IMMEDIATE(r12, MSR_64BIT)
911
or r11,r11,r12
912
mtmsrd r11
913
isync
914
#endif
915
blr
916
SYM_FUNC_END(enable_64b_mode)
917
918
/*
919
* This puts the TOC pointer into r2, offset by 0x8000 (as expected
920
* by the toolchain). It computes the correct value for wherever we
921
* are running at the moment, using position-independent code.
922
*
923
* Note: The compiler constructs pointers using offsets from the
924
* TOC in -mcmodel=medium mode. After we relocate to 0 but before
925
* the MMU is on we need our TOC to be a virtual address otherwise
926
* these pointers will be real addresses which may get stored and
927
* accessed later with the MMU on. We branch to the virtual address
928
* while still in real mode then call relative_toc again to handle
929
* this.
930
*/
931
_GLOBAL(relative_toc)
932
#ifdef CONFIG_PPC_KERNEL_PCREL
933
tdnei r2,-1
934
blr
935
#else
936
mflr r0
937
bcl 20,31,$+4
938
0: mflr r11
939
ld r2,(p_toc - 0b)(r11)
940
add r2,r2,r11
941
mtlr r0
942
blr
943
944
.balign 8
945
p_toc: .8byte .TOC. - 0b
946
#endif
947
948
/*
949
* This is where the main kernel code starts.
950
*/
951
__REF
952
start_here_multiplatform:
953
/* Adjust TOC for moved kernel. Could adjust when moving it instead. */
954
bl relative_toc
955
956
/* Clear out the BSS. It may have been done in prom_init,
957
* already but that's irrelevant since prom_init will soon
958
* be detached from the kernel completely. Besides, we need
959
* to clear it now for kexec-style entry.
960
*/
961
LOAD_REG_ADDR(r11,__bss_stop)
962
LOAD_REG_ADDR(r8,__bss_start)
963
sub r11,r11,r8 /* bss size */
964
addi r11,r11,7 /* round up to an even double word */
965
srdi. r11,r11,3 /* shift right by 3 */
966
beq 4f
967
addi r8,r8,-8
968
li r0,0
969
mtctr r11 /* zero this many doublewords */
970
3: stdu r0,8(r8)
971
bdnz 3b
972
4:
973
974
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
975
/* Setup OPAL entry */
976
LOAD_REG_ADDR(r11, opal)
977
std r28,0(r11);
978
std r29,8(r11);
979
#endif
980
981
#ifndef CONFIG_PPC_BOOK3E_64
982
mfmsr r6
983
ori r6,r6,MSR_RI
984
mtmsrd r6 /* RI on */
985
#endif
986
987
#ifdef CONFIG_RELOCATABLE
988
/* Save the physical address we're running at in kernstart_addr */
989
LOAD_REG_ADDR(r4, kernstart_addr)
990
clrldi r0,r25,2
991
std r0,0(r4)
992
#endif
993
994
/* set up a stack pointer */
995
LOAD_REG_ADDR(r3,init_thread_union)
996
LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)
997
add r1,r3,r1
998
li r0,0
999
stdu r0,-STACK_FRAME_MIN_SIZE(r1)
1000
1001
/*
1002
* Do very early kernel initializations, including initial hash table
1003
* and SLB setup before we turn on relocation.
1004
*/
1005
1006
#ifdef CONFIG_KASAN
1007
bl CFUNC(kasan_early_init)
1008
#endif
1009
/* Restore parameters passed from prom_init/kexec */
1010
mr r3,r31
1011
LOAD_REG_ADDR(r12, DOTSYM(early_setup))
1012
mtctr r12
1013
bctrl /* also sets r13 and SPRG_PACA */
1014
1015
LOAD_REG_ADDR(r3, start_here_common)
1016
ld r4,PACAKMSR(r13)
1017
mtspr SPRN_SRR0,r3
1018
mtspr SPRN_SRR1,r4
1019
RFI_TO_KERNEL
1020
b . /* prevent speculative execution */
1021
1022
/* This is where all platforms converge execution */
1023
1024
start_here_common:
1025
/* relocation is on at this point */
1026
std r1,PACAKSAVE(r13)
1027
1028
/* Load the TOC (virtual address) */
1029
LOAD_PACA_TOC()
1030
1031
/* Mark interrupts soft and hard disabled (they might be enabled
1032
* in the PACA when doing hotplug)
1033
*/
1034
li r0,IRQS_DISABLED
1035
stb r0,PACAIRQSOFTMASK(r13)
1036
li r0,PACA_IRQ_HARD_DIS
1037
stb r0,PACAIRQHAPPENED(r13)
1038
1039
/* Generic kernel entry */
1040
bl CFUNC(start_kernel)
1041
1042
/* Not reached */
1043
0: trap
1044
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
1045
.previous
1046
1047