Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/s390/kernel/entry.S
10819 views
1
/*
2
* arch/s390/kernel/entry.S
3
* S390 low-level entry points.
4
*
5
* Copyright (C) IBM Corp. 1999,2006
6
* Author(s): Martin Schwidefsky ([email protected]),
7
* Hartmut Penner ([email protected]),
8
* Denis Joseph Barrow ([email protected],[email protected]),
9
* Heiko Carstens <[email protected]>
10
*/
11
12
#include <linux/linkage.h>
13
#include <linux/init.h>
14
#include <asm/cache.h>
15
#include <asm/errno.h>
16
#include <asm/ptrace.h>
17
#include <asm/thread_info.h>
18
#include <asm/asm-offsets.h>
19
#include <asm/unistd.h>
20
#include <asm/page.h>
21
22
/*
23
* Stack layout for the system_call stack entry.
24
* The first few entries are identical to the user_regs_struct.
25
*/
26
SP_PTREGS = STACK_FRAME_OVERHEAD
27
SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
28
SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
29
SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
30
SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4
31
SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
32
SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12
33
SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
34
SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20
35
SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
36
SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28
37
SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
38
SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36
39
SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
40
SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44
41
SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
42
SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52
43
SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
44
SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
45
SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
46
SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
47
SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
48
SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
49
50
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
51
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
52
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
53
_TIF_MCCK_PENDING)
54
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
55
_TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
56
57
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
58
STACK_SIZE = 1 << STACK_SHIFT
59
60
#define BASED(name) name-system_call(%r13)
61
62
#ifdef CONFIG_TRACE_IRQFLAGS
63
.macro TRACE_IRQS_ON
64
basr %r2,%r0
65
l %r1,BASED(.Ltrace_irq_on_caller)
66
basr %r14,%r1
67
.endm
68
69
.macro TRACE_IRQS_OFF
70
basr %r2,%r0
71
l %r1,BASED(.Ltrace_irq_off_caller)
72
basr %r14,%r1
73
.endm
74
#else
75
#define TRACE_IRQS_ON
76
#define TRACE_IRQS_OFF
77
#endif
78
79
#ifdef CONFIG_LOCKDEP
80
.macro LOCKDEP_SYS_EXIT
81
tm SP_PSW+1(%r15),0x01 # returning to user ?
82
jz 0f
83
l %r1,BASED(.Llockdep_sys_exit)
84
basr %r14,%r1
85
0:
86
.endm
87
#else
88
#define LOCKDEP_SYS_EXIT
89
#endif
90
91
/*
92
* Register usage in interrupt handlers:
93
* R9 - pointer to current task structure
94
* R13 - pointer to literal pool
95
* R14 - return register for function calls
96
* R15 - kernel stack pointer
97
*/
98
99
.macro UPDATE_VTIME lc_from,lc_to,lc_sum
100
lm %r10,%r11,\lc_from
101
sl %r10,\lc_to
102
sl %r11,\lc_to+4
103
bc 3,BASED(0f)
104
sl %r10,BASED(.Lc_1)
105
0: al %r10,\lc_sum
106
al %r11,\lc_sum+4
107
bc 12,BASED(1f)
108
al %r10,BASED(.Lc_1)
109
1: stm %r10,%r11,\lc_sum
110
.endm
111
112
.macro SAVE_ALL_SVC psworg,savearea
113
stm %r12,%r15,\savearea
114
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
115
l %r15,__LC_KERNEL_STACK # problem state -> load ksp
116
s %r15,BASED(.Lc_spsize) # make room for registers & psw
117
.endm
118
119
.macro SAVE_ALL_BASE savearea
120
stm %r12,%r15,\savearea
121
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
122
.endm
123
124
.macro SAVE_ALL_PGM psworg,savearea
125
tm \psworg+1,0x01 # test problem state bit
126
#ifdef CONFIG_CHECK_STACK
127
bnz BASED(1f)
128
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
129
bnz BASED(2f)
130
la %r12,\psworg
131
b BASED(stack_overflow)
132
#else
133
bz BASED(2f)
134
#endif
135
1: l %r15,__LC_KERNEL_STACK # problem state -> load ksp
136
2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
137
.endm
138
139
.macro SAVE_ALL_ASYNC psworg,savearea
140
stm %r12,%r15,\savearea
141
l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
142
la %r12,\psworg
143
tm \psworg+1,0x01 # test problem state bit
144
bnz BASED(1f) # from user -> load async stack
145
clc \psworg+4(4),BASED(.Lcritical_end)
146
bhe BASED(0f)
147
clc \psworg+4(4),BASED(.Lcritical_start)
148
bl BASED(0f)
149
l %r14,BASED(.Lcleanup_critical)
150
basr %r14,%r14
151
tm 1(%r12),0x01 # retest problem state after cleanup
152
bnz BASED(1f)
153
0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
154
slr %r14,%r15
155
sra %r14,STACK_SHIFT
156
#ifdef CONFIG_CHECK_STACK
157
bnz BASED(1f)
158
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
159
bnz BASED(2f)
160
b BASED(stack_overflow)
161
#else
162
bz BASED(2f)
163
#endif
164
1: l %r15,__LC_ASYNC_STACK
165
2: s %r15,BASED(.Lc_spsize) # make room for registers & psw
166
.endm
167
168
.macro CREATE_STACK_FRAME savearea
169
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
170
st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
171
mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
172
stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
173
.endm
174
175
.macro RESTORE_ALL psworg,sync
176
mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore
177
.if !\sync
178
ni \psworg+1,0xfd # clear wait state bit
179
.endif
180
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
181
stpt __LC_EXIT_TIMER
182
lpsw \psworg # back to caller
183
.endm
184
185
.macro REENABLE_IRQS
186
mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
187
ni __SF_EMPTY(%r15),0xbf
188
ssm __SF_EMPTY(%r15)
189
.endm
190
191
.section .kprobes.text, "ax"
192
193
/*
194
* Scheduler resume function, called by switch_to
195
* gpr2 = (task_struct *) prev
196
* gpr3 = (task_struct *) next
197
* Returns:
198
* gpr2 = prev
199
*/
200
.globl __switch_to
201
__switch_to:
202
basr %r1,0
203
0: l %r4,__THREAD_info(%r2) # get thread_info of prev
204
l %r5,__THREAD_info(%r3) # get thread_info of next
205
tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
206
bz 1f-0b(%r1)
207
ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
208
oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next
209
1: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
210
st %r15,__THREAD_ksp(%r2) # store kernel stack of prev
211
l %r15,__THREAD_ksp(%r3) # load kernel stack of next
212
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
213
lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
214
st %r3,__LC_CURRENT # store task struct of next
215
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
216
st %r5,__LC_THREAD_INFO # store thread info of next
217
ahi %r5,STACK_SIZE # end of kernel stack of next
218
st %r5,__LC_KERNEL_STACK # store end of kernel stack
219
br %r14
220
221
__critical_start:
222
/*
223
* SVC interrupt handler routine. System calls are synchronous events and
224
* are executed with interrupts enabled.
225
*/
226
227
.globl system_call
228
system_call:
229
stpt __LC_SYNC_ENTER_TIMER
230
sysc_saveall:
231
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
232
CREATE_STACK_FRAME __LC_SAVE_AREA
233
mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
234
mvc SP_ILC(4,%r15),__LC_SVC_ILC
235
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
236
sysc_vtime:
237
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
238
sysc_stime:
239
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
240
sysc_update:
241
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
242
sysc_do_svc:
243
xr %r7,%r7
244
icm %r7,3,SP_SVCNR(%r15) # load svc number and test for svc 0
245
bnz BASED(sysc_nr_ok) # svc number > 0
246
# svc 0: system call number in %r1
247
cl %r1,BASED(.Lnr_syscalls)
248
bnl BASED(sysc_nr_ok)
249
sth %r1,SP_SVCNR(%r15)
250
lr %r7,%r1 # copy svc number to %r7
251
sysc_nr_ok:
252
sll %r7,2 # svc number *4
253
l %r10,BASED(.Lsysc_table)
254
tm __TI_flags+2(%r12),_TIF_SYSCALL
255
mvc SP_ARGS(4,%r15),SP_R7(%r15)
256
l %r8,0(%r7,%r10) # get system call addr.
257
bnz BASED(sysc_tracesys)
258
basr %r14,%r8 # call sys_xxxx
259
st %r2,SP_R2(%r15) # store return value (change R2 on stack)
260
261
sysc_return:
262
LOCKDEP_SYS_EXIT
263
sysc_tif:
264
tm __TI_flags+3(%r12),_TIF_WORK_SVC
265
bnz BASED(sysc_work) # there is work to do (signals etc.)
266
sysc_restore:
267
RESTORE_ALL __LC_RETURN_PSW,1
268
sysc_done:
269
270
#
271
# There is work to do, but first we need to check if we return to userspace.
272
#
273
sysc_work:
274
tm SP_PSW+1(%r15),0x01 # returning to user ?
275
bno BASED(sysc_restore)
276
277
#
278
# One of the work bits is on. Find out which one.
279
#
280
sysc_work_tif:
281
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
282
bo BASED(sysc_mcck_pending)
283
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
284
bo BASED(sysc_reschedule)
285
tm __TI_flags+3(%r12),_TIF_SIGPENDING
286
bo BASED(sysc_sigpending)
287
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
288
bo BASED(sysc_notify_resume)
289
tm __TI_flags+3(%r12),_TIF_RESTART_SVC
290
bo BASED(sysc_restart)
291
tm __TI_flags+3(%r12),_TIF_PER_TRAP
292
bo BASED(sysc_singlestep)
293
b BASED(sysc_return) # beware of critical section cleanup
294
295
#
296
# _TIF_NEED_RESCHED is set, call schedule
297
#
298
sysc_reschedule:
299
l %r1,BASED(.Lschedule)
300
la %r14,BASED(sysc_return)
301
br %r1 # call scheduler
302
303
#
304
# _TIF_MCCK_PENDING is set, call handler
305
#
306
sysc_mcck_pending:
307
l %r1,BASED(.Ls390_handle_mcck)
308
la %r14,BASED(sysc_return)
309
br %r1 # TIF bit will be cleared by handler
310
311
#
312
# _TIF_SIGPENDING is set, call do_signal
313
#
314
sysc_sigpending:
315
ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
316
la %r2,SP_PTREGS(%r15) # load pt_regs
317
l %r1,BASED(.Ldo_signal)
318
basr %r14,%r1 # call do_signal
319
tm __TI_flags+3(%r12),_TIF_RESTART_SVC
320
bo BASED(sysc_restart)
321
tm __TI_flags+3(%r12),_TIF_PER_TRAP
322
bo BASED(sysc_singlestep)
323
b BASED(sysc_return)
324
325
#
326
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
327
#
328
sysc_notify_resume:
329
la %r2,SP_PTREGS(%r15) # load pt_regs
330
l %r1,BASED(.Ldo_notify_resume)
331
la %r14,BASED(sysc_return)
332
br %r1 # call do_notify_resume
333
334
335
#
336
# _TIF_RESTART_SVC is set, set up registers and restart svc
337
#
338
sysc_restart:
339
ni __TI_flags+3(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
340
l %r7,SP_R2(%r15) # load new svc number
341
mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
342
lm %r2,%r6,SP_R2(%r15) # load svc arguments
343
sth %r7,SP_SVCNR(%r15)
344
b BASED(sysc_nr_ok) # restart svc
345
346
#
347
# _TIF_PER_TRAP is set, call do_per_trap
348
#
349
sysc_singlestep:
350
ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
351
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
352
la %r2,SP_PTREGS(%r15) # address of register-save area
353
l %r1,BASED(.Lhandle_per) # load adr. of per handler
354
la %r14,BASED(sysc_return) # load adr. of system return
355
br %r1 # branch to do_per_trap
356
357
#
358
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
359
# and after the system call
360
#
361
sysc_tracesys:
362
l %r1,BASED(.Ltrace_entry)
363
la %r2,SP_PTREGS(%r15) # load pt_regs
364
la %r3,0
365
xr %r0,%r0
366
icm %r0,3,SP_SVCNR(%r15)
367
st %r0,SP_R2(%r15)
368
basr %r14,%r1
369
cl %r2,BASED(.Lnr_syscalls)
370
bnl BASED(sysc_tracenogo)
371
lr %r7,%r2
372
sll %r7,2 # svc number *4
373
l %r8,0(%r7,%r10)
374
sysc_tracego:
375
lm %r3,%r6,SP_R3(%r15)
376
mvc SP_ARGS(4,%r15),SP_R7(%r15)
377
l %r2,SP_ORIG_R2(%r15)
378
basr %r14,%r8 # call sys_xxx
379
st %r2,SP_R2(%r15) # store return value
380
sysc_tracenogo:
381
tm __TI_flags+2(%r12),_TIF_SYSCALL
382
bz BASED(sysc_return)
383
l %r1,BASED(.Ltrace_exit)
384
la %r2,SP_PTREGS(%r15) # load pt_regs
385
la %r14,BASED(sysc_return)
386
br %r1
387
388
#
389
# a new process exits the kernel with ret_from_fork
390
#
391
.globl ret_from_fork
392
ret_from_fork:
393
l %r13,__LC_SVC_NEW_PSW+4
394
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
395
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
396
bo BASED(0f)
397
st %r15,SP_R15(%r15) # store stack pointer for new kthread
398
0: l %r1,BASED(.Lschedtail)
399
basr %r14,%r1
400
TRACE_IRQS_ON
401
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
402
b BASED(sysc_tracenogo)
403
404
#
405
# kernel_execve function needs to deal with pt_regs that is not
406
# at the usual place
407
#
408
.globl kernel_execve
409
kernel_execve:
410
stm %r12,%r15,48(%r15)
411
lr %r14,%r15
412
l %r13,__LC_SVC_NEW_PSW+4
413
s %r15,BASED(.Lc_spsize)
414
st %r14,__SF_BACKCHAIN(%r15)
415
la %r12,SP_PTREGS(%r15)
416
xc 0(__PT_SIZE,%r12),0(%r12)
417
l %r1,BASED(.Ldo_execve)
418
lr %r5,%r12
419
basr %r14,%r1
420
ltr %r2,%r2
421
be BASED(0f)
422
a %r15,BASED(.Lc_spsize)
423
lm %r12,%r15,48(%r15)
424
br %r14
425
# execve succeeded.
426
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
427
l %r15,__LC_KERNEL_STACK # load ksp
428
s %r15,BASED(.Lc_spsize) # make room for registers & psw
429
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
430
l %r12,__LC_THREAD_INFO
431
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
432
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
433
l %r1,BASED(.Lexecve_tail)
434
basr %r14,%r1
435
b BASED(sysc_return)
436
437
/*
438
* Program check handler routine
439
*/
440
441
.globl pgm_check_handler
442
pgm_check_handler:
443
/*
444
* First we need to check for a special case:
445
* Single stepping an instruction that disables the PER event mask will
446
* cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
447
* For a single stepped SVC the program check handler gets control after
448
* the SVC new PSW has been loaded. But we want to execute the SVC first and
449
* then handle the PER event. Therefore we update the SVC old PSW to point
450
* to the pgm_check_handler and branch to the SVC handler after we checked
451
* if we have to load the kernel stack register.
452
* For every other possible cause for PER event without the PER mask set
453
* we just ignore the PER event (FIXME: is there anything we have to do
454
* for LPSW?).
455
*/
456
stpt __LC_SYNC_ENTER_TIMER
457
SAVE_ALL_BASE __LC_SAVE_AREA
458
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
459
bnz BASED(pgm_per) # got per exception -> special case
460
SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
461
CREATE_STACK_FRAME __LC_SAVE_AREA
462
xc SP_ILC(4,%r15),SP_ILC(%r15)
463
mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
464
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
465
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
466
bz BASED(pgm_no_vtime)
467
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
468
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
469
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
470
pgm_no_vtime:
471
l %r3,__LC_PGM_ILC # load program interruption code
472
l %r4,__LC_TRANS_EXC_CODE
473
REENABLE_IRQS
474
la %r8,0x7f
475
nr %r8,%r3
476
sll %r8,2
477
l %r1,BASED(.Ljump_table)
478
l %r1,0(%r8,%r1) # load address of handler routine
479
la %r2,SP_PTREGS(%r15) # address of register-save area
480
basr %r14,%r1 # branch to interrupt-handler
481
pgm_exit:
482
b BASED(sysc_return)
483
484
#
485
# handle per exception
486
#
487
pgm_per:
488
tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
489
bnz BASED(pgm_per_std) # ok, normal per event from user space
490
# ok its one of the special cases, now we need to find out which one
491
clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
492
be BASED(pgm_svcper)
493
# no interesting special case, ignore PER event
494
lm %r12,%r15,__LC_SAVE_AREA
495
lpsw 0x28
496
497
#
498
# Normal per exception
499
#
500
pgm_per_std:
501
SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
502
CREATE_STACK_FRAME __LC_SAVE_AREA
503
mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW
504
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
505
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
506
bz BASED(pgm_no_vtime2)
507
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
508
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
509
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
510
pgm_no_vtime2:
511
l %r1,__TI_task(%r12)
512
tm SP_PSW+1(%r15),0x01 # kernel per event ?
513
bz BASED(kernel_per)
514
mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
515
mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
516
mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
517
oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
518
l %r3,__LC_PGM_ILC # load program interruption code
519
l %r4,__LC_TRANS_EXC_CODE
520
REENABLE_IRQS
521
la %r8,0x7f
522
nr %r8,%r3 # clear per-event-bit and ilc
523
be BASED(pgm_exit2) # only per or per+check ?
524
sll %r8,2
525
l %r1,BASED(.Ljump_table)
526
l %r1,0(%r8,%r1) # load address of handler routine
527
la %r2,SP_PTREGS(%r15) # address of register-save area
528
basr %r14,%r1 # branch to interrupt-handler
529
pgm_exit2:
530
b BASED(sysc_return)
531
532
#
533
# it was a single stepped SVC that is causing all the trouble
534
#
535
pgm_svcper:
536
SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
537
CREATE_STACK_FRAME __LC_SAVE_AREA
538
mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
539
mvc SP_ILC(4,%r15),__LC_SVC_ILC
540
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
541
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
542
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
543
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
544
l %r8,__TI_task(%r12)
545
mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
546
mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS
547
mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
548
oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
549
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
550
lm %r2,%r6,SP_R2(%r15) # load svc arguments
551
b BASED(sysc_do_svc)
552
553
#
554
# per was called from kernel, must be kprobes
555
#
556
kernel_per:
557
REENABLE_IRQS
558
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15)
559
la %r2,SP_PTREGS(%r15) # address of register-save area
560
l %r1,BASED(.Lhandle_per) # load adr. of per handler
561
basr %r14,%r1 # branch to do_single_step
562
b BASED(pgm_exit)
563
564
/*
565
* IO interrupt handler routine
566
*/
567
568
.globl io_int_handler
569
io_int_handler:
570
stck __LC_INT_CLOCK
571
stpt __LC_ASYNC_ENTER_TIMER
572
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
573
CREATE_STACK_FRAME __LC_SAVE_AREA+16
574
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
575
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
576
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
577
bz BASED(io_no_vtime)
578
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
579
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
580
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
581
io_no_vtime:
582
TRACE_IRQS_OFF
583
l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
584
la %r2,SP_PTREGS(%r15) # address of register-save area
585
basr %r14,%r1 # branch to standard irq handler
586
io_return:
587
LOCKDEP_SYS_EXIT
588
TRACE_IRQS_ON
589
io_tif:
590
tm __TI_flags+3(%r12),_TIF_WORK_INT
591
bnz BASED(io_work) # there is work to do (signals etc.)
592
io_restore:
593
RESTORE_ALL __LC_RETURN_PSW,0
594
io_done:
595
596
#
597
# There is work todo, find out in which context we have been interrupted:
598
# 1) if we return to user space we can do all _TIF_WORK_INT work
599
# 2) if we return to kernel code and preemptive scheduling is enabled check
600
# the preemption counter and if it is zero call preempt_schedule_irq
601
# Before any work can be done, a switch to the kernel stack is required.
602
#
603
io_work:
604
tm SP_PSW+1(%r15),0x01 # returning to user ?
605
bo BASED(io_work_user) # yes -> do resched & signal
606
#ifdef CONFIG_PREEMPT
607
# check for preemptive scheduling
608
icm %r0,15,__TI_precount(%r12)
609
bnz BASED(io_restore) # preemption disabled
610
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
611
bno BASED(io_restore)
612
# switch to kernel stack
613
l %r1,SP_R15(%r15)
614
s %r1,BASED(.Lc_spsize)
615
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
616
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
617
lr %r15,%r1
618
# TRACE_IRQS_ON already done at io_return, call
619
# TRACE_IRQS_OFF to keep things symmetrical
620
TRACE_IRQS_OFF
621
l %r1,BASED(.Lpreempt_schedule_irq)
622
basr %r14,%r1 # call preempt_schedule_irq
623
b BASED(io_return)
624
#else
625
b BASED(io_restore)
626
#endif
627
628
#
629
# Need to do work before returning to userspace, switch to kernel stack
630
#
631
io_work_user:
632
l %r1,__LC_KERNEL_STACK
633
s %r1,BASED(.Lc_spsize)
634
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
635
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
636
lr %r15,%r1
637
638
#
639
# One of the work bits is on. Find out which one.
640
# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
641
# and _TIF_MCCK_PENDING
642
#
643
io_work_tif:
644
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
645
bo BASED(io_mcck_pending)
646
tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
647
bo BASED(io_reschedule)
648
tm __TI_flags+3(%r12),_TIF_SIGPENDING
649
bo BASED(io_sigpending)
650
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
651
bo BASED(io_notify_resume)
652
b BASED(io_return) # beware of critical section cleanup
653
654
#
655
# _TIF_MCCK_PENDING is set, call handler
656
#
657
io_mcck_pending:
658
# TRACE_IRQS_ON already done at io_return
659
l %r1,BASED(.Ls390_handle_mcck)
660
basr %r14,%r1 # TIF bit will be cleared by handler
661
TRACE_IRQS_OFF
662
b BASED(io_return)
663
664
#
665
# _TIF_NEED_RESCHED is set, call schedule
666
#
667
io_reschedule:
668
# TRACE_IRQS_ON already done at io_return
669
l %r1,BASED(.Lschedule)
670
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
671
basr %r14,%r1 # call scheduler
672
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
673
TRACE_IRQS_OFF
674
b BASED(io_return)
675
676
#
677
# _TIF_SIGPENDING is set, call do_signal
678
#
679
io_sigpending:
680
# TRACE_IRQS_ON already done at io_return
681
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
682
la %r2,SP_PTREGS(%r15) # load pt_regs
683
l %r1,BASED(.Ldo_signal)
684
basr %r14,%r1 # call do_signal
685
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
686
TRACE_IRQS_OFF
687
b BASED(io_return)
688
689
#
690
# _TIF_SIGPENDING is set, call do_signal
691
#
692
io_notify_resume:
693
# TRACE_IRQS_ON already done at io_return
694
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
695
la %r2,SP_PTREGS(%r15) # load pt_regs
696
l %r1,BASED(.Ldo_notify_resume)
697
basr %r14,%r1 # call do_signal
698
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
699
TRACE_IRQS_OFF
700
b BASED(io_return)
701
702
/*
703
* External interrupt handler routine
704
*/
705
706
.globl ext_int_handler
707
ext_int_handler:
708
stck __LC_INT_CLOCK
709
stpt __LC_ASYNC_ENTER_TIMER
710
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
711
CREATE_STACK_FRAME __LC_SAVE_AREA+16
712
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
713
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
714
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
715
bz BASED(ext_no_vtime)
716
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
717
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
718
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
719
ext_no_vtime:
720
TRACE_IRQS_OFF
721
la %r2,SP_PTREGS(%r15) # address of register-save area
722
l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
723
l %r4,__LC_EXT_PARAMS # get external parameters
724
l %r1,BASED(.Ldo_extint)
725
basr %r14,%r1
726
b BASED(io_return)
727
728
__critical_end:
729
730
/*
731
* Machine check handler routines
732
*/
733
734
.globl mcck_int_handler
735
mcck_int_handler:
736
stck __LC_MCCK_CLOCK
737
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
738
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
739
SAVE_ALL_BASE __LC_SAVE_AREA+32
740
la %r12,__LC_MCK_OLD_PSW
741
tm __LC_MCCK_CODE,0x80 # system damage?
742
bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
743
mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
744
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
745
bo BASED(1f)
746
la %r14,__LC_SYNC_ENTER_TIMER
747
clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
748
bl BASED(0f)
749
la %r14,__LC_ASYNC_ENTER_TIMER
750
0: clc 0(8,%r14),__LC_EXIT_TIMER
751
bl BASED(0f)
752
la %r14,__LC_EXIT_TIMER
753
0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
754
bl BASED(0f)
755
la %r14,__LC_LAST_UPDATE_TIMER
756
0: spt 0(%r14)
757
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
758
1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
759
bno BASED(mcck_int_main) # no -> skip cleanup critical
760
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
761
bnz BASED(mcck_int_main) # from user -> load async stack
762
clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end)
763
bhe BASED(mcck_int_main)
764
clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start)
765
bl BASED(mcck_int_main)
766
l %r14,BASED(.Lcleanup_critical)
767
basr %r14,%r14
768
mcck_int_main:
769
l %r14,__LC_PANIC_STACK # are we already on the panic stack?
770
slr %r14,%r15
771
sra %r14,PAGE_SHIFT
772
be BASED(0f)
773
l %r15,__LC_PANIC_STACK # load panic stack
774
0: s %r15,BASED(.Lc_spsize) # make room for registers & psw
775
CREATE_STACK_FRAME __LC_SAVE_AREA+32
776
mvc SP_PSW(8,%r15),0(%r12)
777
l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
778
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
779
bno BASED(mcck_no_vtime) # no -> skip cleanup critical
780
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
781
bz BASED(mcck_no_vtime)
782
UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
783
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
784
mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
785
mcck_no_vtime:
786
la %r2,SP_PTREGS(%r15) # load pt_regs
787
l %r1,BASED(.Ls390_mcck)
788
basr %r14,%r1 # call machine check handler
789
tm SP_PSW+1(%r15),0x01 # returning to user ?
790
bno BASED(mcck_return)
791
l %r1,__LC_KERNEL_STACK # switch to kernel stack
792
s %r1,BASED(.Lc_spsize)
793
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
794
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
795
lr %r15,%r1
796
stosm __SF_EMPTY(%r15),0x04 # turn dat on
797
tm __TI_flags+3(%r12),_TIF_MCCK_PENDING
798
bno BASED(mcck_return)
799
TRACE_IRQS_OFF
800
l %r1,BASED(.Ls390_handle_mcck)
801
basr %r14,%r1 # call machine check handler
802
TRACE_IRQS_ON
803
mcck_return:
804
mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
805
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
806
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
807
bno BASED(0f)
808
lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
809
stpt __LC_EXIT_TIMER
810
lpsw __LC_RETURN_MCCK_PSW # back to caller
811
0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
812
lpsw __LC_RETURN_MCCK_PSW # back to caller
813
814
RESTORE_ALL __LC_RETURN_MCCK_PSW,0
815
816
/*
817
* Restart interruption handler, kick starter for additional CPUs
818
*/
819
#ifdef CONFIG_SMP
820
__CPUINIT
821
.globl restart_int_handler
822
restart_int_handler:
823
basr %r1,0
824
restart_base:
825
spt restart_vtime-restart_base(%r1)
826
stck __LC_LAST_UPDATE_CLOCK
827
mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
828
mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
829
l %r15,__LC_SAVE_AREA+60 # load ksp
830
lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
831
lam %a0,%a15,__LC_AREGS_SAVE_AREA
832
lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone
833
l %r1,__LC_THREAD_INFO
834
mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
835
mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
836
xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
837
stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
838
basr %r14,0
839
l %r14,restart_addr-.(%r14)
840
basr %r14,%r14 # branch to start_secondary
841
restart_addr:
842
.long start_secondary
843
.align 8
844
restart_vtime:
845
.long 0x7fffffff,0xffffffff
846
.previous
847
#else
848
/*
849
* If we do not run with SMP enabled, let the new CPU crash ...
850
*/
851
.globl restart_int_handler
852
restart_int_handler:
853
basr %r1,0
854
restart_base:
855
lpsw restart_crash-restart_base(%r1)
856
.align 8
857
restart_crash:
858
.long 0x000a0000,0x00000000
859
restart_go:
860
#endif
861
862
.section .kprobes.text, "ax"
863
864
#ifdef CONFIG_CHECK_STACK
865
/*
866
* The synchronous or the asynchronous stack overflowed. We are dead.
867
* No need to properly save the registers, we are going to panic anyway.
868
* Setup a pt_regs so that show_trace can provide a good call trace.
869
*/
870
stack_overflow:
871
l %r15,__LC_PANIC_STACK # change to panic stack
872
sl %r15,BASED(.Lc_spsize)
873
mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
874
stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
875
la %r1,__LC_SAVE_AREA
876
ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ?
877
be BASED(0f)
878
ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ?
879
be BASED(0f)
880
la %r1,__LC_SAVE_AREA+16
881
0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack
882
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
883
l %r1,BASED(1f) # branch to kernel_stack_overflow
884
la %r2,SP_PTREGS(%r15) # load pt_regs
885
br %r1
886
1: .long kernel_stack_overflow
887
#endif
888
889
cleanup_table_system_call:
890
.long system_call + 0x80000000, sysc_do_svc + 0x80000000
891
cleanup_table_sysc_tif:
892
.long sysc_tif + 0x80000000, sysc_restore + 0x80000000
893
cleanup_table_sysc_restore:
894
.long sysc_restore + 0x80000000, sysc_done + 0x80000000
895
cleanup_table_io_tif:
896
.long io_tif + 0x80000000, io_restore + 0x80000000
897
cleanup_table_io_restore:
898
.long io_restore + 0x80000000, io_done + 0x80000000
899
900
cleanup_critical:
901
clc 4(4,%r12),BASED(cleanup_table_system_call)
902
bl BASED(0f)
903
clc 4(4,%r12),BASED(cleanup_table_system_call+4)
904
bl BASED(cleanup_system_call)
905
0:
906
clc 4(4,%r12),BASED(cleanup_table_sysc_tif)
907
bl BASED(0f)
908
clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4)
909
bl BASED(cleanup_sysc_tif)
910
0:
911
clc 4(4,%r12),BASED(cleanup_table_sysc_restore)
912
bl BASED(0f)
913
clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4)
914
bl BASED(cleanup_sysc_restore)
915
0:
916
clc 4(4,%r12),BASED(cleanup_table_io_tif)
917
bl BASED(0f)
918
clc 4(4,%r12),BASED(cleanup_table_io_tif+4)
919
bl BASED(cleanup_io_tif)
920
0:
921
clc 4(4,%r12),BASED(cleanup_table_io_restore)
922
bl BASED(0f)
923
clc 4(4,%r12),BASED(cleanup_table_io_restore+4)
924
bl BASED(cleanup_io_restore)
925
0:
926
br %r14
927
928
cleanup_system_call:
929
mvc __LC_RETURN_PSW(8),0(%r12)
930
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
931
bh BASED(0f)
932
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
933
c %r12,BASED(.Lmck_old_psw)
934
be BASED(0f)
935
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
936
0: c %r12,BASED(.Lmck_old_psw)
937
la %r12,__LC_SAVE_AREA+32
938
be BASED(0f)
939
la %r12,__LC_SAVE_AREA+16
940
0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
941
bhe BASED(cleanup_vtime)
942
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
943
bh BASED(0f)
944
mvc __LC_SAVE_AREA(16),0(%r12)
945
0: st %r13,4(%r12)
946
l %r15,__LC_KERNEL_STACK # problem state -> load ksp
947
s %r15,BASED(.Lc_spsize) # make room for registers & psw
948
st %r15,12(%r12)
949
CREATE_STACK_FRAME __LC_SAVE_AREA
950
mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW
951
mvc SP_ILC(4,%r15),__LC_SVC_ILC
952
mvc 0(4,%r12),__LC_THREAD_INFO
953
cleanup_vtime:
954
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
955
bhe BASED(cleanup_stime)
956
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
957
cleanup_stime:
958
clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
959
bh BASED(cleanup_update)
960
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
961
cleanup_update:
962
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
963
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
964
la %r12,__LC_RETURN_PSW
965
br %r14
966
cleanup_system_call_insn:
967
.long sysc_saveall + 0x80000000
968
.long system_call + 0x80000000
969
.long sysc_vtime + 0x80000000
970
.long sysc_stime + 0x80000000
971
.long sysc_update + 0x80000000
972
973
cleanup_sysc_tif:
974
mvc __LC_RETURN_PSW(4),0(%r12)
975
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
976
la %r12,__LC_RETURN_PSW
977
br %r14
978
979
cleanup_sysc_restore:
980
clc 4(4,%r12),BASED(cleanup_sysc_restore_insn)
981
be BASED(2f)
982
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
983
c %r12,BASED(.Lmck_old_psw)
984
be BASED(0f)
985
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
986
0: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
987
be BASED(2f)
988
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
989
c %r12,BASED(.Lmck_old_psw)
990
la %r12,__LC_SAVE_AREA+32
991
be BASED(1f)
992
la %r12,__LC_SAVE_AREA+16
993
1: mvc 0(16,%r12),SP_R12(%r15)
994
lm %r0,%r11,SP_R0(%r15)
995
l %r15,SP_R15(%r15)
996
2: la %r12,__LC_RETURN_PSW
997
br %r14
998
cleanup_sysc_restore_insn:
999
.long sysc_done - 4 + 0x80000000
1000
.long sysc_done - 8 + 0x80000000
1001
1002
cleanup_io_tif:
1003
mvc __LC_RETURN_PSW(4),0(%r12)
1004
mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
1005
la %r12,__LC_RETURN_PSW
1006
br %r14
1007
1008
cleanup_io_restore:
1009
clc 4(4,%r12),BASED(cleanup_io_restore_insn)
1010
be BASED(1f)
1011
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1012
clc 4(4,%r12),BASED(cleanup_io_restore_insn+4)
1013
be BASED(1f)
1014
mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
1015
mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
1016
lm %r0,%r11,SP_R0(%r15)
1017
l %r15,SP_R15(%r15)
1018
1: la %r12,__LC_RETURN_PSW
1019
br %r14
1020
cleanup_io_restore_insn:
1021
.long io_done - 4 + 0x80000000
1022
.long io_done - 8 + 0x80000000
1023
1024
/*
1025
* Integer constants
1026
*/
1027
.align 4
1028
.Lc_spsize: .long SP_SIZE
1029
.Lc_overhead: .long STACK_FRAME_OVERHEAD
1030
.Lnr_syscalls: .long NR_syscalls
1031
.L0x018: .short 0x018
1032
.L0x020: .short 0x020
1033
.L0x028: .short 0x028
1034
.L0x030: .short 0x030
1035
.L0x038: .short 0x038
1036
.Lc_1: .long 1
1037
1038
/*
1039
* Symbol constants
1040
*/
1041
.Ls390_mcck: .long s390_do_machine_check
1042
.Ls390_handle_mcck:
1043
.long s390_handle_mcck
1044
.Lmck_old_psw: .long __LC_MCK_OLD_PSW
1045
.Ldo_IRQ: .long do_IRQ
1046
.Ldo_extint: .long do_extint
1047
.Ldo_signal: .long do_signal
1048
.Ldo_notify_resume:
1049
.long do_notify_resume
1050
.Lhandle_per: .long do_per_trap
1051
.Ldo_execve: .long do_execve
1052
.Lexecve_tail: .long execve_tail
1053
.Ljump_table: .long pgm_check_table
1054
.Lschedule: .long schedule
1055
#ifdef CONFIG_PREEMPT
1056
.Lpreempt_schedule_irq:
1057
.long preempt_schedule_irq
1058
#endif
1059
.Ltrace_entry: .long do_syscall_trace_enter
1060
.Ltrace_exit: .long do_syscall_trace_exit
1061
.Lschedtail: .long schedule_tail
1062
.Lsysc_table: .long sys_call_table
1063
#ifdef CONFIG_TRACE_IRQFLAGS
1064
.Ltrace_irq_on_caller:
1065
.long trace_hardirqs_on_caller
1066
.Ltrace_irq_off_caller:
1067
.long trace_hardirqs_off_caller
1068
#endif
1069
#ifdef CONFIG_LOCKDEP
1070
.Llockdep_sys_exit:
1071
.long lockdep_sys_exit
1072
#endif
1073
.Lcritical_start:
1074
.long __critical_start + 0x80000000
1075
.Lcritical_end:
1076
.long __critical_end + 0x80000000
1077
.Lcleanup_critical:
1078
.long cleanup_critical
1079
1080
.section .rodata, "a"
1081
#define SYSCALL(esa,esame,emu) .long esa
1082
.globl sys_call_table
1083
sys_call_table:
1084
#include "syscalls.S"
1085
#undef SYSCALL
1086
1087