Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/arm/kernel/entry-common.S
10817 views
1
/*
2
* linux/arch/arm/kernel/entry-common.S
3
*
4
* Copyright (C) 2000 Russell King
5
*
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License version 2 as
8
* published by the Free Software Foundation.
9
*/
10
11
#include <asm/unistd.h>
12
#include <asm/ftrace.h>
13
#include <mach/entry-macro.S>
14
#include <asm/unwind.h>
15
16
#include "entry-header.S"
17
18
19
.align 5
20
/*
21
* This is the fast syscall return path. We do as little as
22
* possible here, and this includes saving r0 back into the SVC
23
* stack.
24
*/
25
ret_fast_syscall:
26
UNWIND(.fnstart )
27
UNWIND(.cantunwind )
28
disable_irq @ disable interrupts
29
ldr r1, [tsk, #TI_FLAGS]
30
tst r1, #_TIF_WORK_MASK
31
bne fast_work_pending
32
#if defined(CONFIG_IRQSOFF_TRACER)
33
asm_trace_hardirqs_on
34
#endif
35
36
/* perform architecture specific actions before user return */
37
arch_ret_to_user r1, lr
38
39
restore_user_regs fast = 1, offset = S_OFF
40
UNWIND(.fnend )
41
42
/*
43
* Ok, we need to do extra processing, enter the slow path.
44
*/
45
fast_work_pending:
46
str r0, [sp, #S_R0+S_OFF]! @ returned r0
47
work_pending:
48
tst r1, #_TIF_NEED_RESCHED
49
bne work_resched
50
tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
51
beq no_work_pending
52
mov r0, sp @ 'regs'
53
mov r2, why @ 'syscall'
54
tst r1, #_TIF_SIGPENDING @ delivering a signal?
55
movne why, #0 @ prevent further restarts
56
bl do_notify_resume
57
b ret_slow_syscall @ Check work again
58
59
work_resched:
60
bl schedule
61
/*
62
* "slow" syscall return path. "why" tells us if this was a real syscall.
63
*/
64
ENTRY(ret_to_user)
65
ret_slow_syscall:
66
disable_irq @ disable interrupts
67
ENTRY(ret_to_user_from_irq)
68
ldr r1, [tsk, #TI_FLAGS]
69
tst r1, #_TIF_WORK_MASK
70
bne work_pending
71
no_work_pending:
72
#if defined(CONFIG_IRQSOFF_TRACER)
73
asm_trace_hardirqs_on
74
#endif
75
/* perform architecture specific actions before user return */
76
arch_ret_to_user r1, lr
77
78
restore_user_regs fast = 0, offset = 0
79
ENDPROC(ret_to_user_from_irq)
80
ENDPROC(ret_to_user)
81
82
/*
83
* This is how we return from a fork.
84
*/
85
ENTRY(ret_from_fork)
86
bl schedule_tail
87
get_thread_info tsk
88
ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
89
mov why, #1
90
tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
91
beq ret_slow_syscall
92
mov r1, sp
93
mov r0, #1 @ trace exit [IP = 1]
94
bl syscall_trace
95
b ret_slow_syscall
96
ENDPROC(ret_from_fork)
97
98
.equ NR_syscalls,0
99
#define CALL(x) .equ NR_syscalls,NR_syscalls+1
100
#include "calls.S"
101
#undef CALL
102
#define CALL(x) .long x
103
104
#ifdef CONFIG_FUNCTION_TRACER
105
/*
106
* When compiling with -pg, gcc inserts a call to the mcount routine at the
107
* start of every function. In mcount, apart from the function's address (in
108
* lr), we need to get hold of the function's caller's address.
109
*
110
* Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
111
*
112
* bl mcount
113
*
114
* These versions have the limitation that in order for the mcount routine to
115
* be able to determine the function's caller's address, an APCS-style frame
116
* pointer (which is set up with something like the code below) is required.
117
*
118
* mov ip, sp
119
* push {fp, ip, lr, pc}
120
* sub fp, ip, #4
121
*
122
* With EABI, these frame pointers are not available unless -mapcs-frame is
123
* specified, and if building as Thumb-2, not even then.
124
*
125
* Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
126
* with call sites like:
127
*
128
* push {lr}
129
* bl __gnu_mcount_nc
130
*
131
* With these compilers, frame pointers are not necessary.
132
*
133
* mcount can be thought of as a function called in the middle of a subroutine
134
* call. As such, it needs to be transparent for both the caller and the
135
* callee: the original lr needs to be restored when leaving mcount, and no
136
* registers should be clobbered. (In the __gnu_mcount_nc implementation, we
137
* clobber the ip register. This is OK because the ARM calling convention
138
* allows it to be clobbered in subroutines and doesn't use it to hold
139
* parameters.)
140
*
141
* When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
142
* for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
143
* arch/arm/kernel/ftrace.c).
144
*/
145
146
#ifndef CONFIG_OLD_MCOUNT
147
#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
148
#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
149
#endif
150
#endif
151
152
.macro __mcount suffix
153
mcount_enter
154
ldr r0, =ftrace_trace_function
155
ldr r2, [r0]
156
adr r0, .Lftrace_stub
157
cmp r0, r2
158
bne 1f
159
160
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
161
ldr r1, =ftrace_graph_return
162
ldr r2, [r1]
163
cmp r0, r2
164
bne ftrace_graph_caller\suffix
165
166
ldr r1, =ftrace_graph_entry
167
ldr r2, [r1]
168
ldr r0, =ftrace_graph_entry_stub
169
cmp r0, r2
170
bne ftrace_graph_caller\suffix
171
#endif
172
173
mcount_exit
174
175
1: mcount_get_lr r1 @ lr of instrumented func
176
mov r0, lr @ instrumented function
177
sub r0, r0, #MCOUNT_INSN_SIZE
178
adr lr, BSYM(2f)
179
mov pc, r2
180
2: mcount_exit
181
.endm
182
183
.macro __ftrace_caller suffix
184
mcount_enter
185
186
mcount_get_lr r1 @ lr of instrumented func
187
mov r0, lr @ instrumented function
188
sub r0, r0, #MCOUNT_INSN_SIZE
189
190
.globl ftrace_call\suffix
191
ftrace_call\suffix:
192
bl ftrace_stub
193
194
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
195
.globl ftrace_graph_call\suffix
196
ftrace_graph_call\suffix:
197
mov r0, r0
198
#endif
199
200
mcount_exit
201
.endm
202
203
.macro __ftrace_graph_caller
204
sub r0, fp, #4 @ &lr of instrumented routine (&parent)
205
#ifdef CONFIG_DYNAMIC_FTRACE
206
@ called from __ftrace_caller, saved in mcount_enter
207
ldr r1, [sp, #16] @ instrumented routine (func)
208
#else
209
@ called from __mcount, untouched in lr
210
mov r1, lr @ instrumented routine (func)
211
#endif
212
sub r1, r1, #MCOUNT_INSN_SIZE
213
mov r2, fp @ frame pointer
214
bl prepare_ftrace_return
215
mcount_exit
216
.endm
217
218
#ifdef CONFIG_OLD_MCOUNT
219
/*
220
* mcount
221
*/
222
223
.macro mcount_enter
224
stmdb sp!, {r0-r3, lr}
225
.endm
226
227
.macro mcount_get_lr reg
228
ldr \reg, [fp, #-4]
229
.endm
230
231
.macro mcount_exit
232
ldr lr, [fp, #-4]
233
ldmia sp!, {r0-r3, pc}
234
.endm
235
236
ENTRY(mcount)
237
#ifdef CONFIG_DYNAMIC_FTRACE
238
stmdb sp!, {lr}
239
ldr lr, [fp, #-4]
240
ldmia sp!, {pc}
241
#else
242
__mcount _old
243
#endif
244
ENDPROC(mcount)
245
246
#ifdef CONFIG_DYNAMIC_FTRACE
247
ENTRY(ftrace_caller_old)
248
__ftrace_caller _old
249
ENDPROC(ftrace_caller_old)
250
#endif
251
252
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
253
ENTRY(ftrace_graph_caller_old)
254
__ftrace_graph_caller
255
ENDPROC(ftrace_graph_caller_old)
256
#endif
257
258
.purgem mcount_enter
259
.purgem mcount_get_lr
260
.purgem mcount_exit
261
#endif
262
263
/*
264
* __gnu_mcount_nc
265
*/
266
267
.macro mcount_enter
268
stmdb sp!, {r0-r3, lr}
269
.endm
270
271
.macro mcount_get_lr reg
272
ldr \reg, [sp, #20]
273
.endm
274
275
.macro mcount_exit
276
ldmia sp!, {r0-r3, ip, lr}
277
mov pc, ip
278
.endm
279
280
ENTRY(__gnu_mcount_nc)
281
#ifdef CONFIG_DYNAMIC_FTRACE
282
mov ip, lr
283
ldmia sp!, {lr}
284
mov pc, ip
285
#else
286
__mcount
287
#endif
288
ENDPROC(__gnu_mcount_nc)
289
290
#ifdef CONFIG_DYNAMIC_FTRACE
291
ENTRY(ftrace_caller)
292
__ftrace_caller
293
ENDPROC(ftrace_caller)
294
#endif
295
296
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
297
ENTRY(ftrace_graph_caller)
298
__ftrace_graph_caller
299
ENDPROC(ftrace_graph_caller)
300
#endif
301
302
.purgem mcount_enter
303
.purgem mcount_get_lr
304
.purgem mcount_exit
305
306
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
307
.globl return_to_handler
308
return_to_handler:
309
stmdb sp!, {r0-r3}
310
mov r0, fp @ frame pointer
311
bl ftrace_return_to_handler
312
mov lr, r0 @ r0 has real ret addr
313
ldmia sp!, {r0-r3}
314
mov pc, lr
315
#endif
316
317
ENTRY(ftrace_stub)
318
.Lftrace_stub:
319
mov pc, lr
320
ENDPROC(ftrace_stub)
321
322
#endif /* CONFIG_FUNCTION_TRACER */
323
324
/*=============================================================================
325
* SWI handler
326
*-----------------------------------------------------------------------------
327
*/
328
329
/* If we're optimising for StrongARM the resulting code won't
330
run on an ARM7 and we can save a couple of instructions.
331
--pb */
332
#ifdef CONFIG_CPU_ARM710
333
#define A710(code...) code
334
.Larm710bug:
335
ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
336
mov r0, r0
337
add sp, sp, #S_FRAME_SIZE
338
subs pc, lr, #4
339
#else
340
#define A710(code...)
341
#endif
342
343
.align 5
344
ENTRY(vector_swi)
345
sub sp, sp, #S_FRAME_SIZE
346
stmia sp, {r0 - r12} @ Calling r0 - r12
347
ARM( add r8, sp, #S_PC )
348
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
349
THUMB( mov r8, sp )
350
THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
351
mrs r8, spsr @ called from non-FIQ mode, so ok.
352
str lr, [sp, #S_PC] @ Save calling PC
353
str r8, [sp, #S_PSR] @ Save CPSR
354
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
355
zero_fp
356
357
/*
358
* Get the system call number.
359
*/
360
361
#if defined(CONFIG_OABI_COMPAT)
362
363
/*
364
* If we have CONFIG_OABI_COMPAT then we need to look at the swi
365
* value to determine if it is an EABI or an old ABI call.
366
*/
367
#ifdef CONFIG_ARM_THUMB
368
tst r8, #PSR_T_BIT
369
movne r10, #0 @ no thumb OABI emulation
370
ldreq r10, [lr, #-4] @ get SWI instruction
371
#else
372
ldr r10, [lr, #-4] @ get SWI instruction
373
A710( and ip, r10, #0x0f000000 @ check for SWI )
374
A710( teq ip, #0x0f000000 )
375
A710( bne .Larm710bug )
376
#endif
377
#ifdef CONFIG_CPU_ENDIAN_BE8
378
rev r10, r10 @ little endian instruction
379
#endif
380
381
#elif defined(CONFIG_AEABI)
382
383
/*
384
* Pure EABI user space always put syscall number into scno (r7).
385
*/
386
A710( ldr ip, [lr, #-4] @ get SWI instruction )
387
A710( and ip, ip, #0x0f000000 @ check for SWI )
388
A710( teq ip, #0x0f000000 )
389
A710( bne .Larm710bug )
390
391
#elif defined(CONFIG_ARM_THUMB)
392
393
/* Legacy ABI only, possibly thumb mode. */
394
tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
395
addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
396
ldreq scno, [lr, #-4]
397
398
#else
399
400
/* Legacy ABI only. */
401
ldr scno, [lr, #-4] @ get SWI instruction
402
A710( and ip, scno, #0x0f000000 @ check for SWI )
403
A710( teq ip, #0x0f000000 )
404
A710( bne .Larm710bug )
405
406
#endif
407
408
#ifdef CONFIG_ALIGNMENT_TRAP
409
ldr ip, __cr_alignment
410
ldr ip, [ip]
411
mcr p15, 0, ip, c1, c0 @ update control register
412
#endif
413
enable_irq
414
415
get_thread_info tsk
416
adr tbl, sys_call_table @ load syscall table pointer
417
418
#if defined(CONFIG_OABI_COMPAT)
419
/*
420
* If the swi argument is zero, this is an EABI call and we do nothing.
421
*
422
* If this is an old ABI call, get the syscall number into scno and
423
* get the old ABI syscall table address.
424
*/
425
bics r10, r10, #0xff000000
426
eorne scno, r10, #__NR_OABI_SYSCALL_BASE
427
ldrne tbl, =sys_oabi_call_table
428
#elif !defined(CONFIG_AEABI)
429
bic scno, scno, #0xff000000 @ mask off SWI op-code
430
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
431
#endif
432
433
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
434
stmdb sp!, {r4, r5} @ push fifth and sixth args
435
436
#ifdef CONFIG_SECCOMP
437
tst r10, #_TIF_SECCOMP
438
beq 1f
439
mov r0, scno
440
bl __secure_computing
441
add r0, sp, #S_R0 + S_OFF @ pointer to regs
442
ldmia r0, {r0 - r3} @ have to reload r0 - r3
443
1:
444
#endif
445
446
tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
447
bne __sys_trace
448
449
cmp scno, #NR_syscalls @ check upper syscall limit
450
adr lr, BSYM(ret_fast_syscall) @ return address
451
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
452
453
add r1, sp, #S_OFF
454
2: mov why, #0 @ no longer a real syscall
455
cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
456
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
457
bcs arm_syscall
458
b sys_ni_syscall @ not private func
459
ENDPROC(vector_swi)
460
461
/*
462
* This is the really slow path. We're going to be doing
463
* context switches, and waiting for our parent to respond.
464
*/
465
__sys_trace:
466
mov r2, scno
467
add r1, sp, #S_OFF
468
mov r0, #0 @ trace entry [IP = 0]
469
bl syscall_trace
470
471
adr lr, BSYM(__sys_trace_return) @ return address
472
mov scno, r0 @ syscall number (possibly new)
473
add r1, sp, #S_R0 + S_OFF @ pointer to regs
474
cmp scno, #NR_syscalls @ check upper syscall limit
475
ldmccia r1, {r0 - r3} @ have to reload r0 - r3
476
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
477
b 2b
478
479
__sys_trace_return:
480
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
481
mov r2, scno
482
mov r1, sp
483
mov r0, #1 @ trace exit [IP = 1]
484
bl syscall_trace
485
b ret_slow_syscall
486
487
.align 5
488
#ifdef CONFIG_ALIGNMENT_TRAP
489
.type __cr_alignment, #object
490
__cr_alignment:
491
.word cr_alignment
492
#endif
493
.ltorg
494
495
/*
496
* This is the syscall table declaration for native ABI syscalls.
497
* With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
498
*/
499
#define ABI(native, compat) native
500
#ifdef CONFIG_AEABI
501
#define OBSOLETE(syscall) sys_ni_syscall
502
#else
503
#define OBSOLETE(syscall) syscall
504
#endif
505
506
.type sys_call_table, #object
507
ENTRY(sys_call_table)
508
#include "calls.S"
509
#undef ABI
510
#undef OBSOLETE
511
512
/*============================================================================
513
* Special system call wrappers
514
*/
515
@ r0 = syscall number
516
@ r8 = syscall table
517
sys_syscall:
518
bic scno, r0, #__NR_OABI_SYSCALL_BASE
519
cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
520
cmpne scno, #NR_syscalls @ check range
521
stmloia sp, {r5, r6} @ shuffle args
522
movlo r0, r1
523
movlo r1, r2
524
movlo r2, r3
525
movlo r3, r4
526
ldrlo pc, [tbl, scno, lsl #2]
527
b sys_ni_syscall
528
ENDPROC(sys_syscall)
529
530
sys_fork_wrapper:
531
add r0, sp, #S_OFF
532
b sys_fork
533
ENDPROC(sys_fork_wrapper)
534
535
sys_vfork_wrapper:
536
add r0, sp, #S_OFF
537
b sys_vfork
538
ENDPROC(sys_vfork_wrapper)
539
540
sys_execve_wrapper:
541
add r3, sp, #S_OFF
542
b sys_execve
543
ENDPROC(sys_execve_wrapper)
544
545
sys_clone_wrapper:
546
add ip, sp, #S_OFF
547
str ip, [sp, #4]
548
b sys_clone
549
ENDPROC(sys_clone_wrapper)
550
551
sys_sigreturn_wrapper:
552
add r0, sp, #S_OFF
553
mov why, #0 @ prevent syscall restart handling
554
b sys_sigreturn
555
ENDPROC(sys_sigreturn_wrapper)
556
557
sys_rt_sigreturn_wrapper:
558
add r0, sp, #S_OFF
559
mov why, #0 @ prevent syscall restart handling
560
b sys_rt_sigreturn
561
ENDPROC(sys_rt_sigreturn_wrapper)
562
563
sys_sigaltstack_wrapper:
564
ldr r2, [sp, #S_OFF + S_SP]
565
b do_sigaltstack
566
ENDPROC(sys_sigaltstack_wrapper)
567
568
sys_statfs64_wrapper:
569
teq r1, #88
570
moveq r1, #84
571
b sys_statfs64
572
ENDPROC(sys_statfs64_wrapper)
573
574
sys_fstatfs64_wrapper:
575
teq r1, #88
576
moveq r1, #84
577
b sys_fstatfs64
578
ENDPROC(sys_fstatfs64_wrapper)
579
580
/*
581
* Note: off_4k (r5) is always units of 4K. If we can't do the requested
582
* offset, we return EINVAL.
583
*/
584
sys_mmap2:
585
#if PAGE_SHIFT > 12
586
tst r5, #PGOFF_MASK
587
moveq r5, r5, lsr #PAGE_SHIFT - 12
588
streq r5, [sp, #4]
589
beq sys_mmap_pgoff
590
mov r0, #-EINVAL
591
mov pc, lr
592
#else
593
str r5, [sp, #4]
594
b sys_mmap_pgoff
595
#endif
596
ENDPROC(sys_mmap2)
597
598
#ifdef CONFIG_OABI_COMPAT
599
600
/*
601
* These are syscalls with argument register differences
602
*/
603
604
sys_oabi_pread64:
605
stmia sp, {r3, r4}
606
b sys_pread64
607
ENDPROC(sys_oabi_pread64)
608
609
sys_oabi_pwrite64:
610
stmia sp, {r3, r4}
611
b sys_pwrite64
612
ENDPROC(sys_oabi_pwrite64)
613
614
sys_oabi_truncate64:
615
mov r3, r2
616
mov r2, r1
617
b sys_truncate64
618
ENDPROC(sys_oabi_truncate64)
619
620
sys_oabi_ftruncate64:
621
mov r3, r2
622
mov r2, r1
623
b sys_ftruncate64
624
ENDPROC(sys_oabi_ftruncate64)
625
626
sys_oabi_readahead:
627
str r3, [sp]
628
mov r3, r2
629
mov r2, r1
630
b sys_readahead
631
ENDPROC(sys_oabi_readahead)
632
633
/*
634
* Let's declare a second syscall table for old ABI binaries
635
* using the compatibility syscall entries.
636
*/
637
#define ABI(native, compat) compat
638
#define OBSOLETE(syscall) syscall
639
640
.type sys_oabi_call_table, #object
641
ENTRY(sys_oabi_call_table)
642
#include "calls.S"
643
#undef ABI
644
#undef OBSOLETE
645
646
#endif
647
648
649