Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kernel/head.S
26424 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Low-level CPU initialisation
4
* Based on arch/arm/kernel/head.S
5
*
6
* Copyright (C) 1994-2002 Russell King
7
* Copyright (C) 2003-2012 ARM Ltd.
8
* Authors: Catalin Marinas <[email protected]>
9
* Will Deacon <[email protected]>
10
*/
11
12
#include <linux/linkage.h>
13
#include <linux/init.h>
14
#include <linux/pgtable.h>
15
16
#include <asm/asm_pointer_auth.h>
17
#include <asm/assembler.h>
18
#include <asm/boot.h>
19
#include <asm/bug.h>
20
#include <asm/ptrace.h>
21
#include <asm/asm-offsets.h>
22
#include <asm/cache.h>
23
#include <asm/cputype.h>
24
#include <asm/el2_setup.h>
25
#include <asm/elf.h>
26
#include <asm/image.h>
27
#include <asm/kernel-pgtable.h>
28
#include <asm/kvm_arm.h>
29
#include <asm/memory.h>
30
#include <asm/pgtable-hwdef.h>
31
#include <asm/page.h>
32
#include <asm/scs.h>
33
#include <asm/smp.h>
34
#include <asm/sysreg.h>
35
#include <asm/stacktrace/frame.h>
36
#include <asm/thread_info.h>
37
#include <asm/virt.h>
38
39
#include "efi-header.S"
40
41
#if (PAGE_OFFSET & 0x1fffff) != 0
42
#error PAGE_OFFSET must be at least 2MB aligned
43
#endif
44
45
/*
46
* Kernel startup entry point.
47
* ---------------------------
48
*
49
* The requirements are:
50
* MMU = off, D-cache = off, I-cache = on or off,
51
* x0 = physical address to the FDT blob.
52
*
53
* Note that the callee-saved registers are used for storing variables
54
* that are useful before the MMU is enabled. The allocations are described
55
* in the entry routines.
56
*/
57
__HEAD
58
/*
59
* DO NOT MODIFY. Image header expected by Linux boot-loaders.
60
*/
61
efi_signature_nop // special NOP to identity as PE/COFF executable
62
b primary_entry // branch to kernel start, magic
63
.quad 0 // Image load offset from start of RAM, little-endian
64
le64sym _kernel_size_le // Effective size of kernel image, little-endian
65
le64sym _kernel_flags_le // Informative flags, little-endian
66
.quad 0 // reserved
67
.quad 0 // reserved
68
.quad 0 // reserved
69
.ascii ARM64_IMAGE_MAGIC // Magic number
70
.long .Lpe_header_offset // Offset to the PE header.
71
72
__EFI_PE_HEADER
73
74
.section ".idmap.text","a"
75
76
/*
77
* The following callee saved general purpose registers are used on the
78
* primary lowlevel boot path:
79
*
80
* Register Scope Purpose
81
* x19 primary_entry() .. start_kernel() whether we entered with the MMU on
82
* x20 primary_entry() .. __primary_switch() CPU boot mode
83
* x21 primary_entry() .. start_kernel() FDT pointer passed at boot in x0
84
*/
85
SYM_CODE_START(primary_entry)
86
bl record_mmu_state
87
bl preserve_boot_args
88
89
adrp x1, early_init_stack
90
mov sp, x1
91
mov x29, xzr
92
adrp x0, __pi_init_idmap_pg_dir
93
mov x1, xzr
94
bl __pi_create_init_idmap
95
96
/*
97
* If the page tables have been populated with non-cacheable
98
* accesses (MMU disabled), invalidate those tables again to
99
* remove any speculatively loaded cache lines.
100
*/
101
cbnz x19, 0f
102
dmb sy
103
mov x1, x0 // end of used region
104
adrp x0, __pi_init_idmap_pg_dir
105
adr_l x2, dcache_inval_poc
106
blr x2
107
b 1f
108
109
/*
110
* If we entered with the MMU and caches on, clean the ID mapped part
111
* of the primary boot code to the PoC so we can safely execute it with
112
* the MMU off.
113
*/
114
0: adrp x0, __idmap_text_start
115
adr_l x1, __idmap_text_end
116
adr_l x2, dcache_clean_poc
117
blr x2
118
119
1: mov x0, x19
120
bl init_kernel_el // w0=cpu_boot_mode
121
mov x20, x0
122
123
/*
124
* The following calls CPU setup code, see arch/arm64/mm/proc.S for
125
* details.
126
* On return, the CPU will be ready for the MMU to be turned on and
127
* the TCR will have been set.
128
*/
129
bl __cpu_setup // initialise processor
130
b __primary_switch
131
SYM_CODE_END(primary_entry)
132
133
__INIT
134
SYM_CODE_START_LOCAL(record_mmu_state)
135
mrs x19, CurrentEL
136
cmp x19, #CurrentEL_EL2
137
mrs x19, sctlr_el1
138
b.ne 0f
139
mrs x19, sctlr_el2
140
0:
141
CPU_LE( tbnz x19, #SCTLR_ELx_EE_SHIFT, 1f )
142
CPU_BE( tbz x19, #SCTLR_ELx_EE_SHIFT, 1f )
143
tst x19, #SCTLR_ELx_C // Z := (C == 0)
144
and x19, x19, #SCTLR_ELx_M // isolate M bit
145
csel x19, xzr, x19, eq // clear x19 if Z
146
ret
147
148
/*
149
* Set the correct endianness early so all memory accesses issued
150
* before init_kernel_el() occur in the correct byte order. Note that
151
* this means the MMU must be disabled, or the active ID map will end
152
* up getting interpreted with the wrong byte order.
153
*/
154
1: eor x19, x19, #SCTLR_ELx_EE
155
bic x19, x19, #SCTLR_ELx_M
156
b.ne 2f
157
pre_disable_mmu_workaround
158
msr sctlr_el2, x19
159
b 3f
160
2: pre_disable_mmu_workaround
161
msr sctlr_el1, x19
162
3: isb
163
mov x19, xzr
164
ret
165
SYM_CODE_END(record_mmu_state)
166
167
/*
168
* Preserve the arguments passed by the bootloader in x0 .. x3
169
*/
170
SYM_CODE_START_LOCAL(preserve_boot_args)
171
mov x21, x0 // x21=FDT
172
173
adr_l x0, boot_args // record the contents of
174
stp x21, x1, [x0] // x0 .. x3 at kernel entry
175
stp x2, x3, [x0, #16]
176
177
cbnz x19, 0f // skip cache invalidation if MMU is on
178
dmb sy // needed before dc ivac with
179
// MMU off
180
181
add x1, x0, #0x20 // 4 x 8 bytes
182
b dcache_inval_poc // tail call
183
0: str_l x19, mmu_enabled_at_boot, x0
184
ret
185
SYM_CODE_END(preserve_boot_args)
186
187
/*
188
* Initialize CPU registers with task-specific and cpu-specific context.
189
*
190
* Create a final frame record at task_pt_regs(current)->stackframe, so
191
* that the unwinder can identify the final frame record of any task by
192
* its location in the task stack. We reserve the entire pt_regs space
193
* for consistency with user tasks and kthreads.
194
*/
195
.macro init_cpu_task tsk, tmp1, tmp2
196
msr sp_el0, \tsk
197
198
ldr \tmp1, [\tsk, #TSK_STACK]
199
add sp, \tmp1, #THREAD_SIZE
200
sub sp, sp, #PT_REGS_SIZE
201
202
stp xzr, xzr, [sp, #S_STACKFRAME]
203
mov \tmp1, #FRAME_META_TYPE_FINAL
204
str \tmp1, [sp, #S_STACKFRAME_TYPE]
205
add x29, sp, #S_STACKFRAME
206
207
scs_load_current
208
209
adr_l \tmp1, __per_cpu_offset
210
ldr w\tmp2, [\tsk, #TSK_TI_CPU]
211
ldr \tmp1, [\tmp1, \tmp2, lsl #3]
212
set_this_cpu_offset \tmp1
213
.endm
214
215
/*
216
* The following fragment of code is executed with the MMU enabled.
217
*
218
* x0 = __pa(KERNEL_START)
219
*/
220
SYM_FUNC_START_LOCAL(__primary_switched)
221
adr_l x4, init_task
222
init_cpu_task x4, x5, x6
223
224
adr_l x8, vectors // load VBAR_EL1 with virtual
225
msr vbar_el1, x8 // vector table address
226
isb
227
228
stp x29, x30, [sp, #-16]!
229
mov x29, sp
230
231
str_l x21, __fdt_pointer, x5 // Save FDT pointer
232
233
adrp x4, _text // Save the offset between
234
sub x4, x4, x0 // the kernel virtual and
235
str_l x4, kimage_voffset, x5 // physical mappings
236
237
mov x0, x20
238
bl set_cpu_boot_mode_flag
239
240
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
241
bl kasan_early_init
242
#endif
243
mov x0, x20
244
bl finalise_el2 // Prefer VHE if possible
245
ldp x29, x30, [sp], #16
246
bl start_kernel
247
ASM_BUG()
248
SYM_FUNC_END(__primary_switched)
249
250
/*
251
* end early head section, begin head code that is also used for
252
* hotplug and needs to have the same protections as the text region
253
*/
254
.section ".idmap.text","a"
255
256
/*
257
* Starting from EL2 or EL1, configure the CPU to execute at the highest
258
* reachable EL supported by the kernel in a chosen default state. If dropping
259
* from EL2 to EL1, configure EL2 before configuring EL1.
260
*
261
* Since we cannot always rely on ERET synchronizing writes to sysregs (e.g. if
262
* SCTLR_ELx.EOS is clear), we place an ISB prior to ERET.
263
*
264
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x0 if
265
* booted in EL1 or EL2 respectively, with the top 32 bits containing
266
* potential context flags. These flags are *not* stored in __boot_cpu_mode.
267
*
268
* x0: whether we are being called from the primary boot path with the MMU on
269
*/
270
SYM_FUNC_START(init_kernel_el)
271
mrs x1, CurrentEL
272
cmp x1, #CurrentEL_EL2
273
b.eq init_el2
274
275
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
276
mov_q x0, INIT_SCTLR_EL1_MMU_OFF
277
pre_disable_mmu_workaround
278
msr sctlr_el1, x0
279
isb
280
mov_q x0, INIT_PSTATE_EL1
281
msr spsr_el1, x0
282
msr elr_el1, lr
283
mov w0, #BOOT_CPU_MODE_EL1
284
eret
285
286
SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
287
msr elr_el2, lr
288
289
// clean all HYP code to the PoC if we booted at EL2 with the MMU on
290
cbz x0, 0f
291
adrp x0, __hyp_idmap_text_start
292
adr_l x1, __hyp_text_end
293
adr_l x2, dcache_clean_poc
294
blr x2
295
296
mov_q x0, INIT_SCTLR_EL2_MMU_OFF
297
pre_disable_mmu_workaround
298
msr sctlr_el2, x0
299
isb
300
0:
301
302
init_el2_hcr HCR_HOST_NVHE_FLAGS
303
init_el2_state
304
305
/* Hypervisor stub */
306
adr_l x0, __hyp_stub_vectors
307
msr vbar_el2, x0
308
isb
309
310
mov_q x1, INIT_SCTLR_EL1_MMU_OFF
311
312
mrs x0, hcr_el2
313
and x0, x0, #HCR_E2H
314
cbz x0, 2f
315
316
/* Set a sane SCTLR_EL1, the VHE way */
317
msr_s SYS_SCTLR_EL12, x1
318
mov x2, #BOOT_CPU_FLAG_E2H
319
b 3f
320
321
2:
322
msr sctlr_el1, x1
323
mov x2, xzr
324
3:
325
mov x0, #INIT_PSTATE_EL1
326
msr spsr_el2, x0
327
328
mov w0, #BOOT_CPU_MODE_EL2
329
orr x0, x0, x2
330
eret
331
SYM_FUNC_END(init_kernel_el)
332
333
/*
334
* This provides a "holding pen" for platforms to hold all secondary
335
* cores are held until we're ready for them to initialise.
336
*/
337
SYM_FUNC_START(secondary_holding_pen)
338
mov x0, xzr
339
bl init_kernel_el // w0=cpu_boot_mode
340
mrs x2, mpidr_el1
341
mov_q x1, MPIDR_HWID_BITMASK
342
and x2, x2, x1
343
adr_l x3, secondary_holding_pen_release
344
pen: ldr x4, [x3]
345
cmp x4, x2
346
b.eq secondary_startup
347
wfe
348
b pen
349
SYM_FUNC_END(secondary_holding_pen)
350
351
/*
352
* Secondary entry point that jumps straight into the kernel. Only to
353
* be used where CPUs are brought online dynamically by the kernel.
354
*/
355
SYM_FUNC_START(secondary_entry)
356
mov x0, xzr
357
bl init_kernel_el // w0=cpu_boot_mode
358
b secondary_startup
359
SYM_FUNC_END(secondary_entry)
360
361
SYM_FUNC_START_LOCAL(secondary_startup)
362
/*
363
* Common entry point for secondary CPUs.
364
*/
365
mov x20, x0 // preserve boot mode
366
367
#ifdef CONFIG_ARM64_VA_BITS_52
368
alternative_if ARM64_HAS_VA52
369
bl __cpu_secondary_check52bitva
370
alternative_else_nop_endif
371
#endif
372
373
bl __cpu_setup // initialise processor
374
adrp x1, swapper_pg_dir
375
adrp x2, idmap_pg_dir
376
bl __enable_mmu
377
ldr x8, =__secondary_switched
378
br x8
379
SYM_FUNC_END(secondary_startup)
380
381
.text
382
SYM_FUNC_START_LOCAL(__secondary_switched)
383
mov x0, x20
384
bl set_cpu_boot_mode_flag
385
386
mov x0, x20
387
bl finalise_el2
388
389
str_l xzr, __early_cpu_boot_status, x3
390
adr_l x5, vectors
391
msr vbar_el1, x5
392
isb
393
394
adr_l x0, secondary_data
395
ldr x2, [x0, #CPU_BOOT_TASK]
396
cbz x2, __secondary_too_slow
397
398
init_cpu_task x2, x1, x3
399
400
#ifdef CONFIG_ARM64_PTR_AUTH
401
ptrauth_keys_init_cpu x2, x3, x4, x5
402
#endif
403
404
bl secondary_start_kernel
405
ASM_BUG()
406
SYM_FUNC_END(__secondary_switched)
407
408
SYM_FUNC_START_LOCAL(__secondary_too_slow)
409
wfe
410
wfi
411
b __secondary_too_slow
412
SYM_FUNC_END(__secondary_too_slow)
413
414
/*
415
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
416
* in w0. See arch/arm64/include/asm/virt.h for more info.
417
*/
418
SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
419
adr_l x1, __boot_cpu_mode
420
cmp w0, #BOOT_CPU_MODE_EL2
421
b.ne 1f
422
add x1, x1, #4
423
1: str w0, [x1] // Save CPU boot mode
424
ret
425
SYM_FUNC_END(set_cpu_boot_mode_flag)
426
427
/*
428
* The booting CPU updates the failed status @__early_cpu_boot_status,
429
* with MMU turned off.
430
*
431
* update_early_cpu_boot_status tmp, status
432
* - Corrupts tmp1, tmp2
433
* - Writes 'status' to __early_cpu_boot_status and makes sure
434
* it is committed to memory.
435
*/
436
437
.macro update_early_cpu_boot_status status, tmp1, tmp2
438
mov \tmp2, #\status
439
adr_l \tmp1, __early_cpu_boot_status
440
str \tmp2, [\tmp1]
441
dmb sy
442
dc ivac, \tmp1 // Invalidate potentially stale cache line
443
.endm
444
445
/*
446
* Enable the MMU.
447
*
448
* x0 = SCTLR_EL1 value for turning on the MMU.
449
* x1 = TTBR1_EL1 value
450
* x2 = ID map root table address
451
*
452
* Returns to the caller via x30/lr. This requires the caller to be covered
453
* by the .idmap.text section.
454
*
455
* Checks if the selected granule size is supported by the CPU.
456
* If it isn't, park the CPU
457
*/
458
.section ".idmap.text","a"
459
SYM_FUNC_START(__enable_mmu)
460
mrs x3, ID_AA64MMFR0_EL1
461
ubfx x3, x3, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4
462
cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN
463
b.lt __no_granule_support
464
cmp x3, #ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX
465
b.gt __no_granule_support
466
phys_to_ttbr x2, x2
467
msr ttbr0_el1, x2 // load TTBR0
468
load_ttbr1 x1, x1, x3
469
470
set_sctlr_el1 x0
471
472
ret
473
SYM_FUNC_END(__enable_mmu)
474
475
#ifdef CONFIG_ARM64_VA_BITS_52
476
SYM_FUNC_START(__cpu_secondary_check52bitva)
477
#ifndef CONFIG_ARM64_LPA2
478
mrs_s x0, SYS_ID_AA64MMFR2_EL1
479
and x0, x0, ID_AA64MMFR2_EL1_VARange_MASK
480
cbnz x0, 2f
481
#else
482
mrs x0, id_aa64mmfr0_el1
483
sbfx x0, x0, #ID_AA64MMFR0_EL1_TGRAN_SHIFT, 4
484
cmp x0, #ID_AA64MMFR0_EL1_TGRAN_LPA2
485
b.ge 2f
486
#endif
487
488
update_early_cpu_boot_status \
489
CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
490
1: wfe
491
wfi
492
b 1b
493
494
2: ret
495
SYM_FUNC_END(__cpu_secondary_check52bitva)
496
#endif
497
498
SYM_FUNC_START_LOCAL(__no_granule_support)
499
/* Indicate that this CPU can't boot and is stuck in the kernel */
500
update_early_cpu_boot_status \
501
CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
502
1:
503
wfe
504
wfi
505
b 1b
506
SYM_FUNC_END(__no_granule_support)
507
508
SYM_FUNC_START_LOCAL(__primary_switch)
509
adrp x1, reserved_pg_dir
510
adrp x2, __pi_init_idmap_pg_dir
511
bl __enable_mmu
512
513
adrp x1, early_init_stack
514
mov sp, x1
515
mov x29, xzr
516
mov x0, x20 // pass the full boot status
517
mov x1, x21 // pass the FDT
518
bl __pi_early_map_kernel // Map and relocate the kernel
519
520
ldr x8, =__primary_switched
521
adrp x0, KERNEL_START // __pa(KERNEL_START)
522
br x8
523
SYM_FUNC_END(__primary_switch)
524
525