Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/m68k/coldfire/entry.S
26442 views
1
/* SPDX-License-Identifier: GPL-2.0-or-later
2
*
3
* entry.S -- interrupt and exception processing for ColdFire
4
*
5
* Copyright (C) 1999-2007, Greg Ungerer ([email protected])
6
* Copyright (C) 1998 D. Jeff Dionne <[email protected]>,
7
* Kenneth Albanowski <[email protected]>,
8
* Copyright (C) 2000 Lineo Inc. (www.lineo.com)
9
* Copyright (C) 2004-2006 Macq Electronique SA. (www.macqel.com)
10
*
11
* Based on:
12
*
13
* linux/arch/m68k/kernel/entry.S
14
*
15
* Copyright (C) 1991, 1992 Linus Torvalds
16
*
17
* Linux/m68k support by Hamish Macdonald
18
*
19
* 68060 fixes by Jesper Skov
20
* ColdFire support by Greg Ungerer ([email protected])
21
* 5307 fixes by David W. Miller
22
* linux 2.4 support David McCullough <[email protected]>
23
* Bug, speed and maintainability fixes by Philippe De Muyter <[email protected]>
24
*/
25
26
#include <linux/linkage.h>
27
#include <asm/unistd.h>
28
#include <asm/thread_info.h>
29
#include <asm/errno.h>
30
#include <asm/setup.h>
31
#include <asm/asm-offsets.h>
32
#include <asm/entry.h>
33
34
#ifdef CONFIG_COLDFIRE_SW_A7
35
/*
36
* Define software copies of the supervisor and user stack pointers.
37
*/
38
.bss
39
sw_ksp:
40
.long 0
41
sw_usp:
42
.long 0
43
#endif /* CONFIG_COLDFIRE_SW_A7 */
44
45
.text
46
47
.globl system_call
48
.globl resume
49
.globl ret_from_exception
50
.globl sys_call_table
51
.globl inthandler
52
53
enosys:
54
mov.l #sys_ni_syscall,%d3
55
bra 1f
56
57
ENTRY(system_call)
58
SAVE_ALL_SYS
59
move #0x2000,%sr /* enable intrs again */
60
GET_CURRENT(%d2)
61
62
cmpl #NR_syscalls,%d0
63
jcc enosys
64
lea sys_call_table,%a0
65
lsll #2,%d0 /* movel %a0@(%d0:l:4),%d3 */
66
movel %a0@(%d0),%d3
67
jeq enosys
68
69
1:
70
movel %sp,%d2 /* get thread_info pointer */
71
andl #-THREAD_SIZE,%d2 /* at start of kernel stack */
72
movel %d2,%a0
73
movel %a0@,%a1 /* save top of frame */
74
movel %sp,%a1@(TASK_THREAD+THREAD_ESP0)
75
btst #(TIF_SYSCALL_TRACE%8),%a0@(TINFO_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
76
bnes 1f
77
78
movel %d3,%a0
79
jbsr %a0@
80
movel %d0,%sp@(PT_OFF_D0) /* save the return value */
81
jra ret_from_exception
82
1:
83
movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */
84
movel %d2,PT_OFF_D0(%sp) /* on syscall entry */
85
subql #4,%sp
86
SAVE_SWITCH_STACK
87
jbsr syscall_trace_enter
88
RESTORE_SWITCH_STACK
89
addql #4,%sp
90
addql #1,%d0
91
jeq ret_from_exception
92
movel %d3,%a0
93
jbsr %a0@
94
movel %d0,%sp@(PT_OFF_D0) /* save the return value */
95
subql #4,%sp /* dummy return address */
96
SAVE_SWITCH_STACK
97
jbsr syscall_trace_leave
98
RESTORE_SWITCH_STACK
99
addql #4,%sp
100
101
ret_from_exception:
102
move #0x2700,%sr /* disable intrs */
103
btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */
104
jeq Luser_return /* if so, skip resched, signals */
105
106
#ifdef CONFIG_PREEMPTION
107
movel %sp,%d1 /* get thread_info pointer */
108
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
109
movel %d1,%a0
110
movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
111
andl #(1<<TIF_NEED_RESCHED),%d1
112
jeq Lkernel_return
113
114
movel %a0@(TINFO_PREEMPT),%d1
115
cmpl #0,%d1
116
jne Lkernel_return
117
118
pea Lkernel_return
119
jmp preempt_schedule_irq /* preempt the kernel */
120
#endif
121
122
Lkernel_return:
123
moveml %sp@,%d1-%d5/%a0-%a2
124
lea %sp@(32),%sp /* space for 8 regs */
125
movel %sp@+,%d0
126
addql #4,%sp /* orig d0 */
127
addl %sp@+,%sp /* stk adj */
128
rte
129
130
Luser_return:
131
movel %sp,%d1 /* get thread_info pointer */
132
andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
133
movel %d1,%a0
134
moveb %a0@(TINFO_FLAGS+3),%d1 /* thread_info->flags (low 8 bits) */
135
jne Lwork_to_do /* still work to do */
136
137
Lreturn:
138
RESTORE_USER
139
140
Lwork_to_do:
141
movel %a0@(TINFO_FLAGS),%d1 /* get thread_info->flags */
142
move #0x2000,%sr /* enable intrs again */
143
btst #TIF_NEED_RESCHED,%d1
144
jne reschedule
145
146
Lsignal_return:
147
subql #4,%sp /* dummy return address */
148
SAVE_SWITCH_STACK
149
pea %sp@(SWITCH_STACK_SIZE)
150
jsr do_notify_resume
151
addql #4,%sp
152
RESTORE_SWITCH_STACK
153
addql #4,%sp
154
jmp Luser_return
155
156
/*
157
* This is the generic interrupt handler (for all hardware interrupt
158
* sources). Calls up to high level code to do all the work.
159
*/
160
ENTRY(inthandler)
161
SAVE_ALL_INT
162
GET_CURRENT(%d2)
163
164
movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
165
andl #0x03fc,%d0 /* mask out vector only */
166
167
movel %sp,%sp@- /* push regs arg */
168
lsrl #2,%d0 /* calculate real vector # */
169
movel %d0,%sp@- /* push vector number */
170
jbsr do_IRQ /* call high level irq handler */
171
lea %sp@(8),%sp /* pop args off stack */
172
173
bra ret_from_exception
174
175
/*
176
* Beware - when entering resume, prev (the current task) is
177
* in a0, next (the new task) is in a1, so don't change these
178
* registers until their contents are no longer needed.
179
*/
180
ENTRY(resume)
181
movew %sr,%d1 /* save current status */
182
movew %d1,%a0@(TASK_THREAD+THREAD_SR)
183
movel %a0,%d1 /* get prev thread in d1 */
184
SAVE_SWITCH_STACK
185
movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
186
RDUSP /* movel %usp,%a3 */
187
movel %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */
188
#ifdef CONFIG_MMU
189
movel %a1,%a2 /* set new current */
190
#endif
191
movel %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */
192
WRUSP /* movel %a3,%usp */
193
movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */
194
movew %a1@(TASK_THREAD+THREAD_SR),%d7 /* restore new status */
195
movew %d7,%sr
196
RESTORE_SWITCH_STACK
197
rts
198
199
200