/* SPDX-License-Identifier: GPL-2.0-only */1/*2* linux/arch/arm/kernel/entry-common.S3*4* Copyright (C) 2000 Russell King5*/67#include <asm/assembler.h>8#include <asm/unistd.h>9#include <asm/ftrace.h>10#include <asm/unwind.h>11#include <asm/page.h>12#ifdef CONFIG_AEABI13#include <asm/unistd-oabi.h>14#endif1516.equ NR_syscalls, __NR_syscalls1718#include "entry-header.S"1920saved_psr .req r821#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER)22saved_pc .req r923#define TRACE(x...) x24#else25saved_pc .req lr26#define TRACE(x...)27#endif2829.section .entry.text,"ax",%progbits30.align 531#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING_USER) || \32IS_ENABLED(CONFIG_DEBUG_RSEQ))33/*34* This is the fast syscall return path. We do as little as possible here,35* such as avoiding writing r0 to the stack. We only use this path if we36* have tracing, context tracking and rseq debug disabled - the overheads37* from those features make this path too inefficient.38*/39ret_fast_syscall:40__ret_fast_syscall:41UNWIND(.fnstart )42UNWIND(.cantunwind )43disable_irq_notrace @ disable interrupts44ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing45movs r1, r1, lsl #1646bne fast_work_pending4748restore_user_regs fast = 1, offset = S_OFF49UNWIND(.fnend )50ENDPROC(ret_fast_syscall)5152/* Ok, we need to do extra processing, enter the slow path. */53fast_work_pending:54str r0, [sp, #S_R0+S_OFF]! @ returned r055/* fall through to work_pending */56#else57/*58* The "replacement" ret_fast_syscall for when tracing, context tracking,59* or rseq debug is enabled. As we will need to call out to some C functions,60* we save r0 first to avoid needing to save registers around each C function61* call.62*/63ret_fast_syscall:64__ret_fast_syscall:65UNWIND(.fnstart )66UNWIND(.cantunwind )67str r0, [sp, #S_R0 + S_OFF]! @ save returned r068#if IS_ENABLED(CONFIG_DEBUG_RSEQ)69/* do_rseq_syscall needs interrupts enabled. */70mov r0, sp @ 'regs'71bl do_rseq_syscall72#endif73disable_irq_notrace @ disable interrupts74ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing75movs r1, r1, lsl #1676beq no_work_pending77UNWIND(.fnend )78ENDPROC(ret_fast_syscall)7980/* Slower path - fall through to work_pending */81#endif8283tst r1, #_TIF_SYSCALL_WORK84bne __sys_trace_return_nosave85slow_work_pending:86mov r0, sp @ 'regs'87mov r2, why @ 'syscall'88bl do_work_pending89cmp r0, #090beq no_work_pending91movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)92str scno, [tsk, #TI_ABI_SYSCALL] @ make sure tracers see update93ldmia sp, {r0 - r6} @ have to reload r0 - r694b local_restart @ ... and off we go95ENDPROC(ret_fast_syscall)9697/*98* "slow" syscall return path. "why" tells us if this was a real syscall.99* IRQs may be enabled here, so always disable them. Note that we use the100* "notrace" version to avoid calling into the tracing code unnecessarily.101* do_work_pending() will update this state if necessary.102*/103ENTRY(ret_to_user)104ret_slow_syscall:105#if IS_ENABLED(CONFIG_DEBUG_RSEQ)106/* do_rseq_syscall needs interrupts enabled. */107enable_irq_notrace @ enable interrupts108mov r0, sp @ 'regs'109bl do_rseq_syscall110#endif111disable_irq_notrace @ disable interrupts112ENTRY(ret_to_user_from_irq)113ldr r1, [tsk, #TI_FLAGS]114movs r1, r1, lsl #16115bne slow_work_pending116no_work_pending:117asm_trace_hardirqs_on save = 0118119ct_user_enter save = 0120121#ifdef CONFIG_KSTACK_ERASE122bl stackleak_erase_on_task_stack123#endif124restore_user_regs fast = 0, offset = 0125ENDPROC(ret_to_user_from_irq)126ENDPROC(ret_to_user)127128/*129* This is how we return from a fork.130*/131ENTRY(ret_from_fork)132bl schedule_tail133cmp r5, #0134movne r0, r4135badrne lr, 1f136retne r51371: get_thread_info tsk138b ret_slow_syscall139ENDPROC(ret_from_fork)140141/*=============================================================================142* SWI handler143*-----------------------------------------------------------------------------144*/145146.align 5147#ifdef CONFIG_HARDEN_BRANCH_HISTORY148ENTRY(vector_bhb_loop8_swi)149sub sp, sp, #PT_REGS_SIZE150stmia sp, {r0 - r12}151mov r8, #81521: b 2f1532: subs r8, r8, #1154bne 1b155dsb nsh156isb157b 3f158ENDPROC(vector_bhb_loop8_swi)159160.align 5161ENTRY(vector_bhb_bpiall_swi)162sub sp, sp, #PT_REGS_SIZE163stmia sp, {r0 - r12}164mcr p15, 0, r8, c7, c5, 6 @ BPIALL165isb166b 3f167ENDPROC(vector_bhb_bpiall_swi)168#endif169.align 5170ENTRY(vector_swi)171#ifdef CONFIG_CPU_V7M172v7m_exception_entry173#else174sub sp, sp, #PT_REGS_SIZE175stmia sp, {r0 - r12} @ Calling r0 - r121763:177ARM( add r8, sp, #S_PC )178ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr179THUMB( mov r8, sp )180THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr181mrs saved_psr, spsr @ called from non-FIQ mode, so ok.182TRACE( mov saved_pc, lr )183str saved_pc, [sp, #S_PC] @ Save calling PC184str saved_psr, [sp, #S_PSR] @ Save CPSR185str r0, [sp, #S_OLD_R0] @ Save OLD_R0186#endif187reload_current r10, ip188zero_fp189alignment_trap r10, ip, cr_alignment190asm_trace_hardirqs_on save=0191enable_irq_notrace192ct_user_exit save=0193194/*195* Get the system call number.196*/197198#if defined(CONFIG_OABI_COMPAT)199200/*201* If we have CONFIG_OABI_COMPAT then we need to look at the swi202* value to determine if it is an EABI or an old ABI call.203*/204#ifdef CONFIG_ARM_THUMB205tst saved_psr, #PSR_T_BIT206movne r10, #0 @ no thumb OABI emulation207USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction208#else209USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction210#endif211ARM_BE8(rev r10, r10) @ little endian instruction212213#elif defined(CONFIG_AEABI)214215/*216* Pure EABI user space always put syscall number into scno (r7).217*/218#elif defined(CONFIG_ARM_THUMB)219/* Legacy ABI only, possibly thumb mode. */220tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs221addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in222USER( ldreq scno, [saved_pc, #-4] )223224#else225/* Legacy ABI only. */226USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction227#endif228229/* saved_psr and saved_pc are now dead */230231uaccess_disable tbl232get_thread_info tsk233234adr tbl, sys_call_table @ load syscall table pointer235236#if defined(CONFIG_OABI_COMPAT)237/*238* If the swi argument is zero, this is an EABI call and we do nothing.239*240* If this is an old ABI call, get the syscall number into scno and241* get the old ABI syscall table address.242*/243bics r10, r10, #0xff000000244strne r10, [tsk, #TI_ABI_SYSCALL]245streq scno, [tsk, #TI_ABI_SYSCALL]246eorne scno, r10, #__NR_OABI_SYSCALL_BASE247ldrne tbl, =sys_oabi_call_table248#elif !defined(CONFIG_AEABI)249bic scno, scno, #0xff000000 @ mask off SWI op-code250str scno, [tsk, #TI_ABI_SYSCALL]251eor scno, scno, #__NR_SYSCALL_BASE @ check OS number252#else253str scno, [tsk, #TI_ABI_SYSCALL]254#endif255/*256* Reload the registers that may have been corrupted on entry to257* the syscall assembly (by tracing or context tracking.)258*/259TRACE( ldmia sp, {r0 - r3} )260261local_restart:262ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing263stmdb sp!, {r4, r5} @ push fifth and sixth args264265tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?266bne __sys_trace267268invoke_syscall tbl, scno, r10, __ret_fast_syscall269270add r1, sp, #S_OFF2712: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)272eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back273bcs arm_syscall274mov why, #0 @ no longer a real syscall275b sys_ni_syscall @ not private func276277#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)278/*279* We failed to handle a fault trying to access the page280* containing the swi instruction, but we're not really in a281* position to return -EFAULT. Instead, return back to the282* instruction and re-enter the user fault handling path trying283* to page it in. This will likely result in sending SEGV to the284* current task.285*/2869001:287sub lr, saved_pc, #4288str lr, [sp, #S_PC]289get_thread_info tsk290b ret_fast_syscall291#endif292ENDPROC(vector_swi)293.ltorg294295/*296* This is the really slow path. We're going to be doing297* context switches, and waiting for our parent to respond.298*/299__sys_trace:300add r0, sp, #S_OFF301bl syscall_trace_enter302mov scno, r0303invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1304cmp scno, #-1 @ skip the syscall?305bne 2b306add sp, sp, #S_OFF @ restore stack307308__sys_trace_return_nosave:309enable_irq_notrace310mov r0, sp311bl syscall_trace_exit312b ret_slow_syscall313314__sys_trace_return:315str r0, [sp, #S_R0 + S_OFF]! @ save returned r0316mov r0, sp317bl syscall_trace_exit318b ret_slow_syscall319320.macro syscall_table_start, sym321.equ __sys_nr, 0322.type \sym, #object323ENTRY(\sym)324.endm325326.macro syscall, nr, func327.ifgt __sys_nr - \nr328.error "Duplicated/unorded system call entry"329.endif330.rept \nr - __sys_nr331.long sys_ni_syscall332.endr333.long \func334.equ __sys_nr, \nr + 1335.endm336337.macro syscall_table_end, sym338.ifgt __sys_nr - __NR_syscalls339.error "System call table too big"340.endif341.rept __NR_syscalls - __sys_nr342.long sys_ni_syscall343.endr344.size \sym, . - \sym345.endm346347#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)348#define __SYSCALL(nr, func) syscall nr, func349350/*351* This is the syscall table declaration for native ABI syscalls.352* With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.353*/354syscall_table_start sys_call_table355#ifdef CONFIG_AEABI356#include <calls-eabi.S>357#else358#include <calls-oabi.S>359#endif360syscall_table_end sys_call_table361362/*============================================================================363* Special system call wrappers364*/365@ r0 = syscall number366@ r8 = syscall table367sys_syscall:368bic scno, r0, #__NR_OABI_SYSCALL_BASE369cmp scno, #__NR_syscall - __NR_SYSCALL_BASE370cmpne scno, #NR_syscalls @ check range371#ifdef CONFIG_CPU_SPECTRE372movhs scno, #0373csdb374#endif375stmialo sp, {r5, r6} @ shuffle args376movlo r0, r1377movlo r1, r2378movlo r2, r3379movlo r3, r4380ldrlo pc, [tbl, scno, lsl #2]381b sys_ni_syscall382ENDPROC(sys_syscall)383384sys_sigreturn_wrapper:385add r0, sp, #S_OFF386mov why, #0 @ prevent syscall restart handling387b sys_sigreturn388ENDPROC(sys_sigreturn_wrapper)389390sys_rt_sigreturn_wrapper:391add r0, sp, #S_OFF392mov why, #0 @ prevent syscall restart handling393b sys_rt_sigreturn394ENDPROC(sys_rt_sigreturn_wrapper)395396sys_statfs64_wrapper:397teq r1, #88398moveq r1, #84399b sys_statfs64400ENDPROC(sys_statfs64_wrapper)401402sys_fstatfs64_wrapper:403teq r1, #88404moveq r1, #84405b sys_fstatfs64406ENDPROC(sys_fstatfs64_wrapper)407408/*409* Note: off_4k (r5) is always units of 4K. If we can't do the requested410* offset, we return EINVAL.411*/412sys_mmap2:413str r5, [sp, #4]414b sys_mmap_pgoff415ENDPROC(sys_mmap2)416417#ifdef CONFIG_OABI_COMPAT418419/*420* These are syscalls with argument register differences421*/422423sys_oabi_pread64:424stmia sp, {r3, r4}425b sys_pread64426ENDPROC(sys_oabi_pread64)427428sys_oabi_pwrite64:429stmia sp, {r3, r4}430b sys_pwrite64431ENDPROC(sys_oabi_pwrite64)432433sys_oabi_truncate64:434mov r3, r2435mov r2, r1436b sys_truncate64437ENDPROC(sys_oabi_truncate64)438439sys_oabi_ftruncate64:440mov r3, r2441mov r2, r1442b sys_ftruncate64443ENDPROC(sys_oabi_ftruncate64)444445sys_oabi_readahead:446str r3, [sp]447mov r3, r2448mov r2, r1449b sys_readahead450ENDPROC(sys_oabi_readahead)451452/*453* Let's declare a second syscall table for old ABI binaries454* using the compatibility syscall entries.455*/456syscall_table_start sys_oabi_call_table457#undef __SYSCALL_WITH_COMPAT458#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)459#include <calls-oabi.S>460syscall_table_end sys_oabi_call_table461462#endif463464465466