Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/kernel/entry-armv.S
26292 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* linux/arch/arm/kernel/entry-armv.S
4
*
5
* Copyright (C) 1996,1997,1998 Russell King.
6
* ARM700 fix by Matthew Godbolt ([email protected])
7
* nommu support by Hyok S. Choi ([email protected])
8
*
9
* Low-level vector interface routines
10
*
11
* Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
12
* that causes it to save wrong values... Be aware!
13
*/
14
15
#include <linux/init.h>
16
17
#include <asm/assembler.h>
18
#include <asm/page.h>
19
#include <asm/glue-df.h>
20
#include <asm/glue-pf.h>
21
#include <asm/vfpmacros.h>
22
#include <asm/thread_notify.h>
23
#include <asm/unwind.h>
24
#include <asm/unistd.h>
25
#include <asm/tls.h>
26
#include <asm/system_info.h>
27
#include <asm/uaccess-asm.h>
28
#include <asm/kasan_def.h>
29
30
#include "entry-header.S"
31
#include <asm/probes.h>
32
33
#ifdef CONFIG_HAVE_LD_DEAD_CODE_DATA_ELIMINATION
34
#define RELOC_TEXT_NONE .reloc .text, R_ARM_NONE, .
35
#else
36
#define RELOC_TEXT_NONE
37
#endif
38
39
/*
40
* Interrupt handling.
41
*/
42
.macro irq_handler, from_user:req
43
mov r1, sp
44
ldr_this_cpu r2, irq_stack_ptr, r2, r3
45
.if \from_user == 0
46
@
47
@ If we took the interrupt while running in the kernel, we may already
48
@ be using the IRQ stack, so revert to the original value in that case.
49
@
50
subs r3, r2, r1 @ SP above bottom of IRQ stack?
51
rsbscs r3, r3, #THREAD_SIZE @ ... and below the top?
52
#ifdef CONFIG_VMAP_STACK
53
ldr_va r3, high_memory, cc @ End of the linear region
54
cmpcc r3, r1 @ Stack pointer was below it?
55
#endif
56
bcc 0f @ If not, switch to the IRQ stack
57
mov r0, r1
58
bl generic_handle_arch_irq
59
b 1f
60
0:
61
.endif
62
63
mov_l r0, generic_handle_arch_irq
64
bl call_with_stack
65
1:
66
.endm
67
68
.macro pabt_helper
69
@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
70
#ifdef MULTI_PABORT
71
ldr_va ip, processor, offset=PROCESSOR_PABT_FUNC
72
bl_r ip
73
#else
74
bl CPU_PABORT_HANDLER
75
#endif
76
.endm
77
78
.macro dabt_helper
79
80
@
81
@ Call the processor-specific abort handler:
82
@
83
@ r2 - pt_regs
84
@ r4 - aborted context pc
85
@ r5 - aborted context psr
86
@
87
@ The abort handler must return the aborted address in r0, and
88
@ the fault status register in r1. r9 must be preserved.
89
@
90
#ifdef MULTI_DABORT
91
ldr_va ip, processor, offset=PROCESSOR_DABT_FUNC
92
bl_r ip
93
#else
94
bl CPU_DABORT_HANDLER
95
#endif
96
.endm
97
98
.section .entry.text,"ax",%progbits
99
100
/*
101
* Invalid mode handlers
102
*/
103
.macro inv_entry, reason
104
sub sp, sp, #PT_REGS_SIZE
105
ARM( stmib sp, {r1 - lr} )
106
THUMB( stmia sp, {r0 - r12} )
107
THUMB( str sp, [sp, #S_SP] )
108
THUMB( str lr, [sp, #S_LR] )
109
mov r1, #\reason
110
.endm
111
112
__pabt_invalid:
113
inv_entry BAD_PREFETCH
114
b common_invalid
115
ENDPROC(__pabt_invalid)
116
117
__dabt_invalid:
118
inv_entry BAD_DATA
119
b common_invalid
120
ENDPROC(__dabt_invalid)
121
122
__irq_invalid:
123
inv_entry BAD_IRQ
124
b common_invalid
125
ENDPROC(__irq_invalid)
126
127
__und_invalid:
128
inv_entry BAD_UNDEFINSTR
129
130
@
131
@ XXX fall through to common_invalid
132
@
133
134
@
135
@ common_invalid - generic code for failed exception (re-entrant version of handlers)
136
@
137
common_invalid:
138
zero_fp
139
140
ldmia r0, {r4 - r6}
141
add r0, sp, #S_PC @ here for interlock avoidance
142
mov r7, #-1 @ "" "" "" ""
143
str r4, [sp] @ save preserved r0
144
stmia r0, {r5 - r7} @ lr_<exception>,
145
@ cpsr_<exception>, "old_r0"
146
147
mov r0, sp
148
b bad_mode
149
ENDPROC(__und_invalid)
150
151
/*
152
* SVC mode handlers
153
*/
154
155
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
156
#define SPFIX(code...) code
157
#else
158
#define SPFIX(code...)
159
#endif
160
161
.macro svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
162
UNWIND(.fnstart )
163
sub sp, sp, #(SVC_REGS_SIZE + \stack_hole)
164
THUMB( add sp, r1 ) @ get SP in a GPR without
165
THUMB( sub r1, sp, r1 ) @ using a temp register
166
167
.if \overflow_check
168
UNWIND(.save {r0 - pc} )
169
do_overflow_check (SVC_REGS_SIZE + \stack_hole)
170
.endif
171
172
#ifdef CONFIG_THUMB2_KERNEL
173
tst r1, #4 @ test stack pointer alignment
174
sub r1, sp, r1 @ restore original R1
175
sub sp, r1 @ restore original SP
176
#else
177
SPFIX( tst sp, #4 )
178
#endif
179
SPFIX( subne sp, sp, #4 )
180
181
ARM( stmib sp, {r1 - r12} )
182
THUMB( stmia sp, {r0 - r12} ) @ No STMIB in Thumb-2
183
184
ldmia r0, {r3 - r5}
185
add r7, sp, #S_SP @ here for interlock avoidance
186
mov r6, #-1 @ "" "" "" ""
187
add r2, sp, #(SVC_REGS_SIZE + \stack_hole)
188
SPFIX( addne r2, r2, #4 )
189
str r3, [sp] @ save the "real" r0 copied
190
@ from the exception stack
191
192
mov r3, lr
193
194
@
195
@ We are now ready to fill in the remaining blanks on the stack:
196
@
197
@ r2 - sp_svc
198
@ r3 - lr_svc
199
@ r4 - lr_<exception>, already fixed up for correct return/restart
200
@ r5 - spsr_<exception>
201
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
202
@
203
stmia r7, {r2 - r6}
204
205
get_thread_info tsk
206
uaccess_entry tsk, r0, r1, r2, \uaccess
207
208
.if \trace
209
#ifdef CONFIG_TRACE_IRQFLAGS
210
bl trace_hardirqs_off
211
#endif
212
.endif
213
.endm
214
215
.align 5
216
__dabt_svc:
217
svc_entry uaccess=0
218
mov r2, sp
219
dabt_helper
220
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
221
svc_exit r5 @ return from exception
222
UNWIND(.fnend )
223
ENDPROC(__dabt_svc)
224
225
.align 5
226
__irq_svc:
227
svc_entry
228
irq_handler from_user=0
229
230
#ifdef CONFIG_PREEMPTION
231
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
232
ldr r0, [tsk, #TI_FLAGS] @ get flags
233
teq r8, #0 @ if preempt count != 0
234
movne r0, #0 @ force flags to 0
235
tst r0, #_TIF_NEED_RESCHED
236
blne svc_preempt
237
#endif
238
239
svc_exit r5, irq = 1 @ return from exception
240
UNWIND(.fnend )
241
ENDPROC(__irq_svc)
242
243
.ltorg
244
245
#ifdef CONFIG_PREEMPTION
246
svc_preempt:
247
mov r8, lr
248
1: bl preempt_schedule_irq @ irq en/disable is done inside
249
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
250
tst r0, #_TIF_NEED_RESCHED
251
reteq r8 @ go again
252
b 1b
253
#endif
254
255
__und_fault:
256
@ Correct the PC such that it is pointing at the instruction
257
@ which caused the fault. If the faulting instruction was ARM
258
@ the PC will be pointing at the next instruction, and have to
259
@ subtract 4. Otherwise, it is Thumb, and the PC will be
260
@ pointing at the second half of the Thumb instruction. We
261
@ have to subtract 2.
262
ldr r2, [r0, #S_PC]
263
sub r2, r2, r1
264
str r2, [r0, #S_PC]
265
b do_undefinstr
266
ENDPROC(__und_fault)
267
268
.align 5
269
__und_svc:
270
#ifdef CONFIG_KPROBES
271
@ If a kprobe is about to simulate a "stmdb sp..." instruction,
272
@ it obviously needs free stack space which then will belong to
273
@ the saved context.
274
svc_entry MAX_STACK_SIZE
275
#else
276
svc_entry
277
#endif
278
279
mov r1, #4 @ PC correction to apply
280
THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode?
281
THUMB( movne r1, #2 ) @ if so, fix up PC correction
282
mov r0, sp @ struct pt_regs *regs
283
bl __und_fault
284
285
__und_svc_finish:
286
get_thread_info tsk
287
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
288
svc_exit r5 @ return from exception
289
UNWIND(.fnend )
290
ENDPROC(__und_svc)
291
292
.align 5
293
__pabt_svc:
294
svc_entry
295
mov r2, sp @ regs
296
pabt_helper
297
svc_exit r5 @ return from exception
298
UNWIND(.fnend )
299
ENDPROC(__pabt_svc)
300
301
.align 5
302
__fiq_svc:
303
svc_entry trace=0
304
mov r0, sp @ struct pt_regs *regs
305
bl handle_fiq_as_nmi
306
svc_exit_via_fiq
307
UNWIND(.fnend )
308
ENDPROC(__fiq_svc)
309
310
/*
311
* Abort mode handlers
312
*/
313
314
@
315
@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
316
@ and reuses the same macros. However in abort mode we must also
317
@ save/restore lr_abt and spsr_abt to make nested aborts safe.
318
@
319
.align 5
320
__fiq_abt:
321
svc_entry trace=0
322
323
ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
324
THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
325
THUMB( msr cpsr_c, r0 )
326
mov r1, lr @ Save lr_abt
327
mrs r2, spsr @ Save spsr_abt, abort is now safe
328
ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
329
THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
330
THUMB( msr cpsr_c, r0 )
331
stmfd sp!, {r1 - r2}
332
333
add r0, sp, #8 @ struct pt_regs *regs
334
bl handle_fiq_as_nmi
335
336
ldmfd sp!, {r1 - r2}
337
ARM( msr cpsr_c, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
338
THUMB( mov r0, #ABT_MODE | PSR_I_BIT | PSR_F_BIT )
339
THUMB( msr cpsr_c, r0 )
340
mov lr, r1 @ Restore lr_abt, abort is unsafe
341
msr spsr_cxsf, r2 @ Restore spsr_abt
342
ARM( msr cpsr_c, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
343
THUMB( mov r0, #SVC_MODE | PSR_I_BIT | PSR_F_BIT )
344
THUMB( msr cpsr_c, r0 )
345
346
svc_exit_via_fiq
347
UNWIND(.fnend )
348
ENDPROC(__fiq_abt)
349
350
/*
351
* User mode handlers
352
*
353
* EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
354
*/
355
356
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
357
#error "sizeof(struct pt_regs) must be a multiple of 8"
358
#endif
359
360
.macro usr_entry, trace=1, uaccess=1
361
UNWIND(.fnstart )
362
UNWIND(.cantunwind ) @ don't unwind the user space
363
sub sp, sp, #PT_REGS_SIZE
364
ARM( stmib sp, {r1 - r12} )
365
THUMB( stmia sp, {r0 - r12} )
366
367
ATRAP( mrc p15, 0, r7, c1, c0, 0)
368
ATRAP( ldr_va r8, cr_alignment)
369
370
ldmia r0, {r3 - r5}
371
add r0, sp, #S_PC @ here for interlock avoidance
372
mov r6, #-1 @ "" "" "" ""
373
374
str r3, [sp] @ save the "real" r0 copied
375
@ from the exception stack
376
377
@
378
@ We are now ready to fill in the remaining blanks on the stack:
379
@
380
@ r4 - lr_<exception>, already fixed up for correct return/restart
381
@ r5 - spsr_<exception>
382
@ r6 - orig_r0 (see pt_regs definition in ptrace.h)
383
@
384
@ Also, separately save sp_usr and lr_usr
385
@
386
stmia r0, {r4 - r6}
387
ARM( stmdb r0, {sp, lr}^ )
388
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
389
390
.if \uaccess
391
uaccess_disable ip
392
.endif
393
394
@ Enable the alignment trap while in kernel mode
395
ATRAP( teq r8, r7)
396
ATRAP( mcrne p15, 0, r8, c1, c0, 0)
397
398
reload_current r7, r8
399
400
@
401
@ Clear FP to mark the first stack frame
402
@
403
zero_fp
404
405
.if \trace
406
#ifdef CONFIG_TRACE_IRQFLAGS
407
bl trace_hardirqs_off
408
#endif
409
ct_user_exit save = 0
410
.endif
411
.endm
412
413
.macro kuser_cmpxchg_check
414
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS)
415
#ifndef CONFIG_MMU
416
#warning "NPTL on non MMU needs fixing"
417
#else
418
@ Make sure our user space atomic helper is restarted
419
@ if it was interrupted in a critical region. Here we
420
@ perform a quick test inline since it should be false
421
@ 99.9999% of the time. The rest is done out of line.
422
ldr r0, =TASK_SIZE
423
cmp r4, r0
424
blhs kuser_cmpxchg64_fixup
425
#endif
426
#endif
427
.endm
428
429
.align 5
430
__dabt_usr:
431
usr_entry uaccess=0
432
kuser_cmpxchg_check
433
mov r2, sp
434
dabt_helper
435
b ret_from_exception
436
UNWIND(.fnend )
437
ENDPROC(__dabt_usr)
438
439
.align 5
440
__irq_usr:
441
usr_entry
442
kuser_cmpxchg_check
443
irq_handler from_user=1
444
get_thread_info tsk
445
mov why, #0
446
b ret_to_user_from_irq
447
UNWIND(.fnend )
448
ENDPROC(__irq_usr)
449
450
.ltorg
451
452
.align 5
453
__und_usr:
454
usr_entry uaccess=0
455
456
@ IRQs must be enabled before attempting to read the instruction from
457
@ user space since that could cause a page/translation fault if the
458
@ page table was modified by another CPU.
459
enable_irq
460
461
tst r5, #PSR_T_BIT @ Thumb mode?
462
mov r1, #2 @ set insn size to 2 for Thumb
463
bne 0f @ handle as Thumb undef exception
464
#ifdef CONFIG_FPE_NWFPE
465
adr r9, ret_from_exception
466
bl call_fpe @ returns via R9 on success
467
#endif
468
mov r1, #4 @ set insn size to 4 for ARM
469
0: mov r0, sp
470
uaccess_disable ip
471
bl __und_fault
472
b ret_from_exception
473
UNWIND(.fnend)
474
ENDPROC(__und_usr)
475
476
.align 5
477
__pabt_usr:
478
usr_entry
479
mov r2, sp @ regs
480
pabt_helper
481
UNWIND(.fnend )
482
/* fall through */
483
/*
484
* This is the return code to user mode for abort handlers
485
*/
486
ENTRY(ret_from_exception)
487
UNWIND(.fnstart )
488
UNWIND(.cantunwind )
489
get_thread_info tsk
490
mov why, #0
491
b ret_to_user
492
UNWIND(.fnend )
493
ENDPROC(__pabt_usr)
494
ENDPROC(ret_from_exception)
495
496
.align 5
497
__fiq_usr:
498
usr_entry trace=0
499
kuser_cmpxchg_check
500
mov r0, sp @ struct pt_regs *regs
501
bl handle_fiq_as_nmi
502
get_thread_info tsk
503
restore_user_regs fast = 0, offset = 0
504
UNWIND(.fnend )
505
ENDPROC(__fiq_usr)
506
507
/*
508
* Register switch for ARMv3 and ARMv4 processors
509
* r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
510
* previous and next are guaranteed not to be the same.
511
*/
512
ENTRY(__switch_to)
513
UNWIND(.fnstart )
514
UNWIND(.cantunwind )
515
add ip, r1, #TI_CPU_SAVE
516
ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
517
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
518
THUMB( str sp, [ip], #4 )
519
THUMB( str lr, [ip], #4 )
520
ldr r4, [r2, #TI_TP_VALUE]
521
ldr r5, [r2, #TI_TP_VALUE + 4]
522
#ifdef CONFIG_CPU_USE_DOMAINS
523
mrc p15, 0, r6, c3, c0, 0 @ Get domain register
524
str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
525
ldr r6, [r2, #TI_CPU_DOMAIN]
526
#endif
527
switch_tls r1, r4, r5, r3, r7
528
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
529
!defined(CONFIG_STACKPROTECTOR_PER_TASK)
530
ldr r8, =__stack_chk_guard
531
.if (TSK_STACK_CANARY > IMM12_MASK)
532
add r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
533
ldr r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
534
.else
535
ldr r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
536
.endif
537
#endif
538
mov r7, r2 @ Preserve 'next'
539
#ifdef CONFIG_CPU_USE_DOMAINS
540
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
541
#endif
542
mov r5, r0
543
add r4, r2, #TI_CPU_SAVE
544
ldr r0, =thread_notify_head
545
mov r1, #THREAD_NOTIFY_SWITCH
546
bl atomic_notifier_call_chain
547
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
548
!defined(CONFIG_STACKPROTECTOR_PER_TASK)
549
str r9, [r8]
550
#endif
551
mov r0, r5
552
#if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
553
set_current r7, r8
554
ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
555
#else
556
mov r1, r7
557
ldmia r4, {r4 - sl, fp, ip, lr} @ Load all regs saved previously
558
#ifdef CONFIG_VMAP_STACK
559
@
560
@ Do a dummy read from the new stack while running from the old one so
561
@ that we can rely on do_translation_fault() to fix up any stale PMD
562
@ entries covering the vmalloc region.
563
@
564
ldr r2, [ip]
565
#ifdef CONFIG_KASAN_VMALLOC
566
@ Also dummy read from the KASAN shadow memory for the new stack if we
567
@ are using KASAN
568
mov_l r2, KASAN_SHADOW_OFFSET
569
add r2, r2, ip, lsr #KASAN_SHADOW_SCALE_SHIFT
570
ldr r2, [r2]
571
#endif
572
#endif
573
574
@ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
575
@ effectuates the task switch, as that is what causes the observable
576
@ values of current and current_thread_info to change. When
577
@ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
578
@ current_thread_info) is done explicitly, and the update of SP just
579
@ switches us to another stack, with few other side effects. In order
580
@ to prevent this distinction from causing any inconsistencies, let's
581
@ keep the 'set_current' call as close as we can to the update of SP.
582
set_current r1, r2
583
mov sp, ip
584
ret lr
585
#endif
586
UNWIND(.fnend )
587
ENDPROC(__switch_to)
588
589
#ifdef CONFIG_VMAP_STACK
590
.text
591
.align 2
592
__bad_stack:
593
@
594
@ We've just detected an overflow. We need to load the address of this
595
@ CPU's overflow stack into the stack pointer register. We have only one
596
@ scratch register so let's use a sequence of ADDs including one
597
@ involving the PC, and decorate them with PC-relative group
598
@ relocations. As these are ARM only, switch to ARM mode first.
599
@
600
@ We enter here with IP clobbered and its value stashed on the mode
601
@ stack.
602
@
603
THUMB( bx pc )
604
THUMB( nop )
605
THUMB( .arm )
606
ldr_this_cpu_armv6 ip, overflow_stack_ptr
607
608
str sp, [ip, #-4]! @ Preserve original SP value
609
mov sp, ip @ Switch to overflow stack
610
pop {ip} @ Original SP in IP
611
612
#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
613
mov ip, ip @ mov expected by unwinder
614
push {fp, ip, lr, pc} @ GCC flavor frame record
615
#else
616
str ip, [sp, #-8]! @ store original SP
617
push {fpreg, lr} @ Clang flavor frame record
618
#endif
619
UNWIND( ldr ip, [r0, #4] ) @ load exception LR
620
UNWIND( str ip, [sp, #12] ) @ store in the frame record
621
ldr ip, [r0, #12] @ reload IP
622
623
@ Store the original GPRs to the new stack.
624
svc_entry uaccess=0, overflow_check=0
625
626
UNWIND( .save {sp, pc} )
627
UNWIND( .save {fpreg, lr} )
628
UNWIND( .setfp fpreg, sp )
629
630
ldr fpreg, [sp, #S_SP] @ Add our frame record
631
@ to the linked list
632
#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
633
ldr r1, [fp, #4] @ reload SP at entry
634
add fp, fp, #12
635
#else
636
ldr r1, [fpreg, #8]
637
#endif
638
str r1, [sp, #S_SP] @ store in pt_regs
639
640
@ Stash the regs for handle_bad_stack
641
mov r0, sp
642
643
@ Time to die
644
bl handle_bad_stack
645
nop
646
UNWIND( .fnend )
647
ENDPROC(__bad_stack)
648
#endif
649
650
__INIT
651
652
/*
653
* User helpers.
654
*
655
* Each segment is 32-byte aligned and will be moved to the top of the high
656
* vector page. New segments (if ever needed) must be added in front of
657
* existing ones. This mechanism should be used only for things that are
658
* really small and justified, and not be abused freely.
659
*
660
* See Documentation/arch/arm/kernel_user_helpers.rst for formal definitions.
661
*/
662
THUMB( .arm )
663
664
.macro usr_ret, reg
665
#ifdef CONFIG_ARM_THUMB
666
bx \reg
667
#else
668
ret \reg
669
#endif
670
.endm
671
672
.macro kuser_pad, sym, size
673
.if (. - \sym) & 3
674
.rept 4 - (. - \sym) & 3
675
.byte 0
676
.endr
677
.endif
678
.rept (\size - (. - \sym)) / 4
679
.word 0xe7fddef1
680
.endr
681
.endm
682
683
#ifdef CONFIG_KUSER_HELPERS
684
.align 5
685
.globl __kuser_helper_start
686
__kuser_helper_start:
687
688
/*
689
* Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
690
* kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
691
*/
692
693
__kuser_cmpxchg64: @ 0xffff0f60
694
695
#if defined(CONFIG_CPU_32v6K)
696
697
stmfd sp!, {r4, r5, r6, r7}
698
ldrd r4, r5, [r0] @ load old val
699
ldrd r6, r7, [r1] @ load new val
700
smp_dmb arm
701
1: ldrexd r0, r1, [r2] @ load current val
702
eors r3, r0, r4 @ compare with oldval (1)
703
eorseq r3, r1, r5 @ compare with oldval (2)
704
strexdeq r3, r6, r7, [r2] @ store newval if eq
705
teqeq r3, #1 @ success?
706
beq 1b @ if no then retry
707
smp_dmb arm
708
rsbs r0, r3, #0 @ set returned val and C flag
709
ldmfd sp!, {r4, r5, r6, r7}
710
usr_ret lr
711
712
#elif !defined(CONFIG_SMP)
713
714
#ifdef CONFIG_MMU
715
716
/*
717
* The only thing that can break atomicity in this cmpxchg64
718
* implementation is either an IRQ or a data abort exception
719
* causing another process/thread to be scheduled in the middle of
720
* the critical sequence. The same strategy as for cmpxchg is used.
721
*/
722
stmfd sp!, {r4, r5, r6, lr}
723
ldmia r0, {r4, r5} @ load old val
724
ldmia r1, {r6, lr} @ load new val
725
1: ldmia r2, {r0, r1} @ load current val
726
eors r3, r0, r4 @ compare with oldval (1)
727
eorseq r3, r1, r5 @ compare with oldval (2)
728
2: stmiaeq r2, {r6, lr} @ store newval if eq
729
rsbs r0, r3, #0 @ set return val and C flag
730
ldmfd sp!, {r4, r5, r6, pc}
731
732
.text
733
kuser_cmpxchg64_fixup:
734
@ Called from kuser_cmpxchg_fixup.
735
@ r4 = address of interrupted insn (must be preserved).
736
@ sp = saved regs. r7 and r8 are clobbered.
737
@ 1b = first critical insn, 2b = last critical insn.
738
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
739
mov r7, #0xffff0fff
740
sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
741
subs r8, r4, r7
742
rsbscs r8, r8, #(2b - 1b)
743
strcs r7, [sp, #S_PC]
744
#if __LINUX_ARM_ARCH__ < 6
745
bcc kuser_cmpxchg32_fixup
746
#endif
747
ret lr
748
.previous
749
750
#else
751
#warning "NPTL on non MMU needs fixing"
752
mov r0, #-1
753
adds r0, r0, #0
754
usr_ret lr
755
#endif
756
757
#else
758
#error "incoherent kernel configuration"
759
#endif
760
761
kuser_pad __kuser_cmpxchg64, 64
762
763
__kuser_memory_barrier: @ 0xffff0fa0
764
smp_dmb arm
765
usr_ret lr
766
767
kuser_pad __kuser_memory_barrier, 32
768
769
__kuser_cmpxchg: @ 0xffff0fc0
770
771
#if __LINUX_ARM_ARCH__ < 6
772
773
#ifdef CONFIG_MMU
774
775
/*
776
* The only thing that can break atomicity in this cmpxchg
777
* implementation is either an IRQ or a data abort exception
778
* causing another process/thread to be scheduled in the middle
779
* of the critical sequence. To prevent this, code is added to
780
* the IRQ and data abort exception handlers to set the pc back
781
* to the beginning of the critical section if it is found to be
782
* within that critical section (see kuser_cmpxchg_fixup).
783
*/
784
1: ldr r3, [r2] @ load current val
785
subs r3, r3, r0 @ compare with oldval
786
2: streq r1, [r2] @ store newval if eq
787
rsbs r0, r3, #0 @ set return val and C flag
788
usr_ret lr
789
790
.text
791
kuser_cmpxchg32_fixup:
792
@ Called from kuser_cmpxchg_check macro.
793
@ r4 = address of interrupted insn (must be preserved).
794
@ sp = saved regs. r7 and r8 are clobbered.
795
@ 1b = first critical insn, 2b = last critical insn.
796
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
797
mov r7, #0xffff0fff
798
sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
799
subs r8, r4, r7
800
rsbscs r8, r8, #(2b - 1b)
801
strcs r7, [sp, #S_PC]
802
ret lr
803
.previous
804
805
#else
806
#warning "NPTL on non MMU needs fixing"
807
mov r0, #-1
808
adds r0, r0, #0
809
usr_ret lr
810
#endif
811
812
#else
813
814
smp_dmb arm
815
1: ldrex r3, [r2]
816
subs r3, r3, r0
817
strexeq r3, r1, [r2]
818
teqeq r3, #1
819
beq 1b
820
rsbs r0, r3, #0
821
/* beware -- each __kuser slot must be 8 instructions max */
822
ALT_SMP(b __kuser_memory_barrier)
823
ALT_UP(usr_ret lr)
824
825
#endif
826
827
kuser_pad __kuser_cmpxchg, 32
828
829
__kuser_get_tls: @ 0xffff0fe0
830
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
831
usr_ret lr
832
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
833
kuser_pad __kuser_get_tls, 16
834
.rep 3
835
.word 0 @ 0xffff0ff0 software TLS value, then
836
.endr @ pad up to __kuser_helper_version
837
838
__kuser_helper_version: @ 0xffff0ffc
839
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
840
841
.globl __kuser_helper_end
842
__kuser_helper_end:
843
844
#endif
845
846
THUMB( .thumb )
847
848
/*
849
* Vector stubs.
850
*
851
* This code is copied to 0xffff1000 so we can use branches in the
852
* vectors, rather than ldr's. Note that this code must not exceed
853
* a page size.
854
*
855
* Common stub entry macro:
856
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
857
*
858
* SP points to a minimal amount of processor-private memory, the address
859
* of which is copied into r0 for the mode specific abort handler.
860
*/
861
.macro vector_stub, name, mode, correction=0
862
.align 5
863
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
864
vector_bhb_bpiall_\name:
865
mcr p15, 0, r0, c7, c5, 6 @ BPIALL
866
@ isb not needed due to "movs pc, lr" in the vector stub
867
@ which gives a "context synchronisation".
868
#endif
869
870
vector_\name:
871
.if \correction
872
sub lr, lr, #\correction
873
.endif
874
875
@ Save r0, lr_<exception> (parent PC)
876
stmia sp, {r0, lr} @ save r0, lr
877
878
@ Save spsr_<exception> (parent CPSR)
879
.Lvec_\name:
880
mrs lr, spsr
881
str lr, [sp, #8] @ save spsr
882
883
@
884
@ Prepare for SVC32 mode. IRQs remain disabled.
885
@
886
mrs r0, cpsr
887
eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
888
msr spsr_cxsf, r0
889
890
@
891
@ the branch table must immediately follow this code
892
@
893
and lr, lr, #0x0f
894
THUMB( adr r0, 1f )
895
THUMB( ldr lr, [r0, lr, lsl #2] )
896
mov r0, sp
897
ARM( ldr lr, [pc, lr, lsl #2] )
898
movs pc, lr @ branch to handler in SVC mode
899
ENDPROC(vector_\name)
900
901
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
902
.subsection 1
903
.align 5
904
vector_bhb_loop8_\name:
905
.if \correction
906
sub lr, lr, #\correction
907
.endif
908
909
@ Save r0, lr_<exception> (parent PC)
910
stmia sp, {r0, lr}
911
912
@ bhb workaround
913
mov r0, #8
914
3: W(b) . + 4
915
subs r0, r0, #1
916
bne 3b
917
dsb nsh
918
@ isb not needed due to "movs pc, lr" in the vector stub
919
@ which gives a "context synchronisation".
920
b .Lvec_\name
921
ENDPROC(vector_bhb_loop8_\name)
922
.previous
923
#endif
924
925
.align 2
926
@ handler addresses follow this label
927
1:
928
.endm
929
930
.section .stubs, "ax", %progbits
931
@ These need to remain at the start of the section so that
932
@ they are in range of the 'SWI' entries in the vector tables
933
@ located 4k down.
934
.L__vector_swi:
935
.word vector_swi
936
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
937
.L__vector_bhb_loop8_swi:
938
.word vector_bhb_loop8_swi
939
.L__vector_bhb_bpiall_swi:
940
.word vector_bhb_bpiall_swi
941
#endif
942
943
vector_rst:
944
ARM( swi SYS_ERROR0 )
945
THUMB( svc #0 )
946
THUMB( nop )
947
b vector_und
948
949
/*
950
* Interrupt dispatcher
951
*/
952
vector_stub irq, IRQ_MODE, 4
953
954
.long __irq_usr @ 0 (USR_26 / USR_32)
955
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
956
.long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
957
.long __irq_svc @ 3 (SVC_26 / SVC_32)
958
.long __irq_invalid @ 4
959
.long __irq_invalid @ 5
960
.long __irq_invalid @ 6
961
.long __irq_invalid @ 7
962
.long __irq_invalid @ 8
963
.long __irq_invalid @ 9
964
.long __irq_invalid @ a
965
.long __irq_invalid @ b
966
.long __irq_invalid @ c
967
.long __irq_invalid @ d
968
.long __irq_invalid @ e
969
.long __irq_invalid @ f
970
971
/*
972
* Data abort dispatcher
973
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
974
*/
975
vector_stub dabt, ABT_MODE, 8
976
977
.long __dabt_usr @ 0 (USR_26 / USR_32)
978
.long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
979
.long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
980
.long __dabt_svc @ 3 (SVC_26 / SVC_32)
981
.long __dabt_invalid @ 4
982
.long __dabt_invalid @ 5
983
.long __dabt_invalid @ 6
984
.long __dabt_invalid @ 7
985
.long __dabt_invalid @ 8
986
.long __dabt_invalid @ 9
987
.long __dabt_invalid @ a
988
.long __dabt_invalid @ b
989
.long __dabt_invalid @ c
990
.long __dabt_invalid @ d
991
.long __dabt_invalid @ e
992
.long __dabt_invalid @ f
993
994
/*
995
* Prefetch abort dispatcher
996
* Enter in ABT mode, spsr = USR CPSR, lr = USR PC
997
*/
998
vector_stub pabt, ABT_MODE, 4
999
1000
.long __pabt_usr @ 0 (USR_26 / USR_32)
1001
.long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1002
.long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1003
.long __pabt_svc @ 3 (SVC_26 / SVC_32)
1004
.long __pabt_invalid @ 4
1005
.long __pabt_invalid @ 5
1006
.long __pabt_invalid @ 6
1007
.long __pabt_invalid @ 7
1008
.long __pabt_invalid @ 8
1009
.long __pabt_invalid @ 9
1010
.long __pabt_invalid @ a
1011
.long __pabt_invalid @ b
1012
.long __pabt_invalid @ c
1013
.long __pabt_invalid @ d
1014
.long __pabt_invalid @ e
1015
.long __pabt_invalid @ f
1016
1017
/*
1018
* Undef instr entry dispatcher
1019
* Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1020
*/
1021
vector_stub und, UND_MODE
1022
1023
.long __und_usr @ 0 (USR_26 / USR_32)
1024
.long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1025
.long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1026
.long __und_svc @ 3 (SVC_26 / SVC_32)
1027
.long __und_invalid @ 4
1028
.long __und_invalid @ 5
1029
.long __und_invalid @ 6
1030
.long __und_invalid @ 7
1031
.long __und_invalid @ 8
1032
.long __und_invalid @ 9
1033
.long __und_invalid @ a
1034
.long __und_invalid @ b
1035
.long __und_invalid @ c
1036
.long __und_invalid @ d
1037
.long __und_invalid @ e
1038
.long __und_invalid @ f
1039
1040
.align 5
1041
1042
/*=============================================================================
1043
* Address exception handler
1044
*-----------------------------------------------------------------------------
1045
* These aren't too critical.
1046
* (they're not supposed to happen, and won't happen in 32-bit data mode).
1047
*/
1048
1049
vector_addrexcptn:
1050
b vector_addrexcptn
1051
1052
/*=============================================================================
1053
* FIQ "NMI" handler
1054
*-----------------------------------------------------------------------------
1055
* Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86
1056
* systems. This must be the last vector stub, so lets place it in its own
1057
* subsection.
1058
*/
1059
.subsection 2
1060
vector_stub fiq, FIQ_MODE, 4
1061
1062
.long __fiq_usr @ 0 (USR_26 / USR_32)
1063
.long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1064
.long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1065
.long __fiq_svc @ 3 (SVC_26 / SVC_32)
1066
.long __fiq_svc @ 4
1067
.long __fiq_svc @ 5
1068
.long __fiq_svc @ 6
1069
.long __fiq_abt @ 7
1070
.long __fiq_svc @ 8
1071
.long __fiq_svc @ 9
1072
.long __fiq_svc @ a
1073
.long __fiq_svc @ b
1074
.long __fiq_svc @ c
1075
.long __fiq_svc @ d
1076
.long __fiq_svc @ e
1077
.long __fiq_svc @ f
1078
1079
.globl vector_fiq
1080
1081
.section .vectors, "ax", %progbits
1082
RELOC_TEXT_NONE
1083
W(b) vector_rst
1084
W(b) vector_und
1085
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi )
1086
THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi )
1087
W(ldr) pc, .
1088
W(b) vector_pabt
1089
W(b) vector_dabt
1090
W(b) vector_addrexcptn
1091
W(b) vector_irq
1092
W(b) vector_fiq
1093
1094
#ifdef CONFIG_HARDEN_BRANCH_HISTORY
1095
.section .vectors.bhb.loop8, "ax", %progbits
1096
RELOC_TEXT_NONE
1097
W(b) vector_rst
1098
W(b) vector_bhb_loop8_und
1099
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi )
1100
THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi )
1101
W(ldr) pc, .
1102
W(b) vector_bhb_loop8_pabt
1103
W(b) vector_bhb_loop8_dabt
1104
W(b) vector_addrexcptn
1105
W(b) vector_bhb_loop8_irq
1106
W(b) vector_bhb_loop8_fiq
1107
1108
.section .vectors.bhb.bpiall, "ax", %progbits
1109
RELOC_TEXT_NONE
1110
W(b) vector_rst
1111
W(b) vector_bhb_bpiall_und
1112
ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi )
1113
THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi )
1114
W(ldr) pc, .
1115
W(b) vector_bhb_bpiall_pabt
1116
W(b) vector_bhb_bpiall_dabt
1117
W(b) vector_addrexcptn
1118
W(b) vector_bhb_bpiall_irq
1119
W(b) vector_bhb_bpiall_fiq
1120
#endif
1121
1122
.data
1123
.align 2
1124
1125
.globl cr_alignment
1126
cr_alignment:
1127
.space 4
1128
1129