Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sparc/kernel/etrap_32.S
26451 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* etrap.S: Sparc trap window preparation for entry into the
4
* Linux kernel.
5
*
6
* Copyright (C) 1995 David S. Miller ([email protected])
7
*/
8
9
#include <asm/head.h>
10
#include <asm/asi.h>
11
#include <asm/contregs.h>
12
#include <asm/page.h>
13
#include <asm/psr.h>
14
#include <asm/ptrace.h>
15
#include <asm/winmacro.h>
16
#include <asm/asmmacro.h>
17
#include <asm/thread_info.h>
18
19
/* Registers to not touch at all. */
20
#define t_psr l0 /* Set by caller */
21
#define t_pc l1 /* Set by caller */
22
#define t_npc l2 /* Set by caller */
23
#define t_wim l3 /* Set by caller */
24
#define t_twinmask l4 /* Set at beginning of this entry routine. */
25
#define t_kstack l5 /* Set right before pt_regs frame is built */
26
#define t_retpc l6 /* If you change this, change winmacro.h header file */
27
#define t_systable l7 /* Never touch this, could be the syscall table ptr. */
28
#define curptr g6 /* Set after pt_regs frame is built */
29
30
.text
31
.align 4
32
33
/* SEVEN WINDOW PATCH INSTRUCTIONS */
34
.globl tsetup_7win_patch1, tsetup_7win_patch2
35
.globl tsetup_7win_patch3, tsetup_7win_patch4
36
.globl tsetup_7win_patch5, tsetup_7win_patch6
37
tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
38
tsetup_7win_patch2: and %g2, 0x7f, %g2
39
tsetup_7win_patch3: and %g2, 0x7f, %g2
40
tsetup_7win_patch4: and %g1, 0x7f, %g1
41
tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
42
tsetup_7win_patch6: and %g2, 0x7f, %g2
43
/* END OF PATCH INSTRUCTIONS */
44
45
/* At trap time, interrupts and all generic traps do the
46
* following:
47
*
48
* rd %psr, %l0
49
* b some_handler
50
* rd %wim, %l3
51
* nop
52
*
53
* Then 'some_handler' if it needs a trap frame (ie. it has
54
* to call c-code and the trap cannot be handled in-window)
55
* then it does the SAVE_ALL macro in entry.S which does
56
*
57
* sethi %hi(trap_setup), %l4
58
* jmpl %l4 + %lo(trap_setup), %l6
59
* nop
60
*/
61
62
/* 2 3 4 window number
63
* -----
64
* O T S mnemonic
65
*
66
* O == Current window before trap
67
* T == Window entered when trap occurred
68
* S == Window we will need to save if (1<<T) == %wim
69
*
70
* Before execution gets here, it must be guaranteed that
71
* %l0 contains trap time %psr, %l1 and %l2 contain the
72
* trap pc and npc, and %l3 contains the trap time %wim.
73
*/
74
75
.globl trap_setup, tsetup_patch1, tsetup_patch2
76
.globl tsetup_patch3, tsetup_patch4
77
.globl tsetup_patch5, tsetup_patch6
78
trap_setup:
79
/* Calculate mask of trap window. See if from user
80
* or kernel and branch conditionally.
81
*/
82
mov 1, %t_twinmask
83
andcc %t_psr, PSR_PS, %g0 ! fromsupv_p = (psr & PSR_PS)
84
be trap_setup_from_user ! nope, from user mode
85
sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
86
87
/* From kernel, allocate more kernel stack and
88
* build a pt_regs trap frame.
89
*/
90
sub %fp, (STACKFRAME_SZ + TRACEREG_SZ), %t_kstack
91
STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
92
93
/* See if we are in the trap window. */
94
andcc %t_twinmask, %t_wim, %g0
95
bne trap_setup_kernel_spill ! in trap window, clean up
96
nop
97
98
/* Trap from kernel with a window available.
99
* Just do it...
100
*/
101
jmpl %t_retpc + 0x8, %g0 ! return to caller
102
mov %t_kstack, %sp ! jump onto new stack
103
104
trap_setup_kernel_spill:
105
ld [%curptr + TI_UWINMASK], %g1
106
orcc %g0, %g1, %g0
107
bne trap_setup_user_spill ! there are some user windows, yuck
108
/* Spill from kernel, but only kernel windows, adjust
109
* %wim and go.
110
*/
111
srl %t_wim, 0x1, %g2 ! begin computation of new %wim
112
tsetup_patch1:
113
sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
114
or %t_wim, %g2, %g2
115
tsetup_patch2:
116
and %g2, 0xff, %g2 ! patched on 7 window Sparcs
117
118
save %g0, %g0, %g0
119
120
/* Set new %wim value */
121
wr %g2, 0x0, %wim
122
123
/* Save the kernel window onto the corresponding stack. */
124
STORE_WINDOW(sp)
125
126
restore %g0, %g0, %g0
127
128
jmpl %t_retpc + 0x8, %g0 ! return to caller
129
mov %t_kstack, %sp ! and onto new kernel stack
130
131
#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
132
133
trap_setup_from_user:
134
/* We can't use %curptr yet. */
135
LOAD_CURRENT(t_kstack, t_twinmask)
136
137
sethi %hi(STACK_OFFSET), %t_twinmask
138
or %t_twinmask, %lo(STACK_OFFSET), %t_twinmask
139
add %t_kstack, %t_twinmask, %t_kstack
140
141
mov 1, %t_twinmask
142
sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
143
144
/* Build pt_regs frame. */
145
STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
146
147
#if 0
148
/* If we're sure every task_struct is THREAD_SIZE aligned,
149
we can speed this up. */
150
sethi %hi(STACK_OFFSET), %curptr
151
or %curptr, %lo(STACK_OFFSET), %curptr
152
sub %t_kstack, %curptr, %curptr
153
#else
154
sethi %hi(~(THREAD_SIZE - 1)), %curptr
155
and %t_kstack, %curptr, %curptr
156
#endif
157
158
/* Clear current_thread_info->w_saved */
159
st %g0, [%curptr + TI_W_SAVED]
160
161
/* See if we are in the trap window. */
162
andcc %t_twinmask, %t_wim, %g0
163
bne trap_setup_user_spill ! yep we are
164
orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1
165
166
/* Trap from user, but not into the invalid window.
167
* Calculate new umask. The way this works is,
168
* any window from the %wim at trap time until
169
* the window right before the one we are in now,
170
* is a user window. A diagram:
171
*
172
* 7 6 5 4 3 2 1 0 window number
173
* ---------------
174
* I L T mnemonic
175
*
176
* Window 'I' is the invalid window in our example,
177
* window 'L' is the window the user was in when
178
* the trap occurred, window T is the trap window
179
* we are in now. So therefore, windows 5, 4 and
180
* 3 are user windows. The following sequence
181
* computes the user winmask to represent this.
182
*/
183
subcc %t_wim, %t_twinmask, %g2
184
bneg,a 1f
185
sub %g2, 0x1, %g2
186
1:
187
andn %g2, %t_twinmask, %g2
188
tsetup_patch3:
189
and %g2, 0xff, %g2 ! patched on 7win Sparcs
190
st %g2, [%curptr + TI_UWINMASK] ! store new umask
191
192
jmpl %t_retpc + 0x8, %g0 ! return to caller
193
mov %t_kstack, %sp ! and onto kernel stack
194
195
trap_setup_user_spill:
196
/* A spill occurred from either kernel or user mode
197
* and there exist some user windows to deal with.
198
* A mask of the currently valid user windows
199
* is in %g1 upon entry to here.
200
*/
201
202
tsetup_patch4:
203
and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
204
srl %t_wim, 0x1, %g2 ! compute new %wim
205
tsetup_patch5:
206
sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
207
or %t_wim, %g2, %g2 ! %g2 is new %wim
208
tsetup_patch6:
209
and %g2, 0xff, %g2 ! patched on 7win Sparcs
210
andn %g1, %g2, %g1 ! clear this bit in %g1
211
st %g1, [%curptr + TI_UWINMASK]
212
213
save %g0, %g0, %g0
214
215
wr %g2, 0x0, %wim
216
217
/* Call MMU-architecture dependent stack checking
218
* routine.
219
*/
220
b tsetup_srmmu_stackchk
221
andcc %sp, 0x7, %g0
222
223
/* Architecture specific stack checking routines. When either
224
* of these routines are called, the globals are free to use
225
* as they have been safely stashed on the new kernel stack
226
* pointer. Thus the definition below for simplicity.
227
*/
228
#define glob_tmp g1
229
230
.globl tsetup_srmmu_stackchk
231
tsetup_srmmu_stackchk:
232
/* Check results of callers andcc %sp, 0x7, %g0 */
233
bne trap_setup_user_stack_is_bolixed
234
sethi %hi(PAGE_OFFSET), %glob_tmp
235
236
cmp %glob_tmp, %sp
237
bleu,a 1f
238
LEON_PI( lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
239
SUN_PI_( lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
240
241
trap_setup_user_stack_is_bolixed:
242
/* From user/kernel into invalid window w/bad user
243
* stack. Save bad user stack, and return to caller.
244
*/
245
SAVE_BOLIXED_USER_STACK(curptr, g3)
246
restore %g0, %g0, %g0
247
248
jmpl %t_retpc + 0x8, %g0
249
mov %t_kstack, %sp
250
251
1:
252
/* Clear the fault status and turn on the no_fault bit. */
253
or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
254
LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
255
SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
256
257
/* Dump the registers and cross fingers. */
258
STORE_WINDOW(sp)
259
260
/* Clear the no_fault bit and check the status. */
261
andn %glob_tmp, 0x2, %glob_tmp
262
LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
263
SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
264
265
mov AC_M_SFAR, %glob_tmp
266
LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
267
SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
268
269
mov AC_M_SFSR, %glob_tmp
270
LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
271
SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) ! save away status of winstore
272
273
andcc %glob_tmp, 0x2, %g0 ! did we fault?
274
bne trap_setup_user_stack_is_bolixed ! failure
275
nop
276
277
restore %g0, %g0, %g0
278
279
jmpl %t_retpc + 0x8, %g0
280
mov %t_kstack, %sp
281
282
283