.globl system_call, buserr, trap, resume
.globl sys_call_table
.globl __sys_fork, __sys_clone, __sys_vfork
.globl bad_interrupt
.globl auto_irqhandler_fixup
.globl user_irqvec_fixup
.text
ENTRY(__sys_fork)
SAVE_SWITCH_STACK
jbsr sys_fork
lea %sp@(24),%sp
rts
ENTRY(__sys_clone)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_clone
lea %sp@(28),%sp
rts
ENTRY(__sys_vfork)
SAVE_SWITCH_STACK
jbsr sys_vfork
lea %sp@(24),%sp
rts
ENTRY(__sys_clone3)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_clone3
lea %sp@(28),%sp
rts
ENTRY(sys_sigreturn)
SAVE_SWITCH_STACK
movel %sp,%a1 | switch_stack pointer
lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
lea %sp@(-84),%sp | leave a gap
movel %a1,%sp@-
movel %a0,%sp@-
jbsr do_sigreturn
jra 1f | shared with rt_sigreturn()
ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK
movel %sp,%a1 | switch_stack pointer
lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
lea %sp@(-84),%sp | leave a gap
movel %a1,%sp@-
movel %a0,%sp@-
| stack contents:
| [original pt_regs address] [original switch_stack address]
| [gap] [switch_stack] [pt_regs] [exception frame]
jbsr do_rt_sigreturn
1:
| stack contents now:
| [original pt_regs address] [original switch_stack address]
| [unused part of the gap] [moved switch_stack] [moved pt_regs]
| [replacement exception frame]
| return value of do_{rt_,}sigreturn() points to moved switch_stack.
movel %d0,%sp | discard the leftover junk
RESTORE_SWITCH_STACK
| stack contents now is just [syscall return address] [pt_regs] [frame]
| return pt_regs.d0
movel %sp@(PT_OFF_D0+4),%d0
rts
ENTRY(buserr)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
jbsr buserr_c
addql
jra ret_from_exception
ENTRY(trap)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
jbsr trap_c
addql
jra ret_from_exception
| After a fork we jump here directly from resume,
| so that %d1 contains the previous task
| schedule_tail now used regardless of CONFIG_SMP
ENTRY(ret_from_fork)
movel %d1,%sp@-
jsr schedule_tail
addql
jra ret_from_exception
ENTRY(ret_from_kernel_thread)
| a3 contains the kernel thread payload, d7 - its argument
movel %d1,%sp@-
jsr schedule_tail
movel %d7,(%sp)
jsr %a3@
addql
jra ret_from_exception
.globl dbginterrupt
ENTRY(dbginterrupt)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@-
jsr dbginterrupt_c
addql
jra ret_from_exception
ENTRY(reschedule)
pea %sp@
jbsr set_esp0
addql
pea ret_from_exception
jmp schedule
ENTRY(ret_from_user_signal)
moveq
trap
ENTRY(ret_from_user_rt_signal)
movel
trap
do_trace_entry:
movel
subql
SAVE_SWITCH_STACK
jbsr syscall_trace_enter
RESTORE_SWITCH_STACK
addql
addql
jeq ret_from_syscall
movel %sp@(PT_OFF_ORIG_D0),%d0
cmpl
jcs syscall
jra ret_from_syscall
badsys:
movel
jra ret_from_syscall
do_trace_exit:
subql
SAVE_SWITCH_STACK
jbsr syscall_trace_leave
RESTORE_SWITCH_STACK
addql
jra .Lret_from_exception
ENTRY(system_call)
SAVE_ALL_SYS
GET_CURRENT(%d1)
movel %d1,%a1
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
| syscall trace?
tstb %a1@(TINFO_FLAGS+2)
jmi do_trace_entry
| seccomp filter active?
btst
bnes do_trace_entry
cmpl
jcc badsys
syscall:
jbsr @(sys_call_table,%d0:l:4)@(0)
movel %d0,%sp@(PT_OFF_D0) | save the return value
ret_from_syscall:
|oriw
movel %curptr@(TASK_STACK),%a1
movew %a1@(TINFO_FLAGS+2),%d0
jne syscall_exit_work
1: RESTORE_ALL
syscall_exit_work:
btst
bnes 1b | if so, skip resched, signals
lslw
jcs do_trace_exit
jmi do_delayed_trace
lslw
jne do_signal_return
pea resume_userspace
jra schedule
ENTRY(ret_from_exception)
.Lret_from_exception:
btst
bnes 1f | if so, skip resched, signals
| only allow interrupts when we are really the last one on the
| kernel stack, otherwise stack overflow can occur during
| heavy interrupt load
andw
resume_userspace:
movel %curptr@(TASK_STACK),%a1
moveb %a1@(TINFO_FLAGS+3),%d0
jne exit_work
1: RESTORE_ALL
exit_work:
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
lslb
jne do_signal_return
pea resume_userspace
jra schedule
do_signal_return:
|andw
subql
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
bsrl do_notify_resume
addql
RESTORE_SWITCH_STACK
addql
jbra resume_userspace
do_delayed_trace:
bclr
pea 1 | send SIGTRAP
movel %curptr,%sp@-
pea LSIGTRAP
jbsr send_sig
addql
addql
jbra resume_userspace
ENTRY(auto_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
| put exception
bfextu %sp@(PT_OFF_FORMATVEC){
subw
movel %sp,%sp@-
movel %d0,%sp@- | put vector
auto_irqhandler_fixup = . + 2
jsr do_IRQ | process the IRQ
addql
jra ret_from_exception
ENTRY(user_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
| put exception
bfextu %sp@(PT_OFF_FORMATVEC){
user_irqvec_fixup = . + 2
subw
movel %sp,%sp@-
movel %d0,%sp@- | put vector
jsr do_IRQ | process the IRQ
addql
jra ret_from_exception
ENTRY(bad_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@-
jsr handle_badint
addql
jra ret_from_exception
resume:
movew %sr,%a0@(TASK_THREAD+THREAD_SR)
movec %sfc,%d0
movew %d0,%a0@(TASK_THREAD+THREAD_FC)
movec %usp,%d0
movel %d0,%a0@(TASK_THREAD+THREAD_USP)
SAVE_SWITCH_STACK
movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
tstl m68k_fputype
jeq 3f
fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
btst
beqs 1f
tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
jeq 3f
jra 2f
1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
jeq 3f
2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3:
movel %curptr,%d1
movel %a1,%curptr
tstl m68k_fputype
jeq 4f
btst
beqs 1f
tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
jeq 3f
jra 2f
1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
jeq 3f
2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4:
movel %a1@(TASK_THREAD+THREAD_KSP),%sp
RESTORE_SWITCH_STACK
movel %a1@(TASK_THREAD+THREAD_USP),%a0
movel %a0,%usp
movew %a1@(TASK_THREAD+THREAD_FC),%a0
movec %a0,%sfc
movec %a0,%dfc
movew %a1@(TASK_THREAD+THREAD_SR),%d0
oriw
movew %d0,%sr
rts