Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/s390/kernel/entry64.S
10817 views
1
/*
2
* arch/s390/kernel/entry64.S
3
* S390 low-level entry points.
4
*
5
* Copyright (C) IBM Corp. 1999,2010
6
* Author(s): Martin Schwidefsky ([email protected]),
7
* Hartmut Penner ([email protected]),
8
* Denis Joseph Barrow ([email protected],[email protected]),
9
* Heiko Carstens <[email protected]>
10
*/
11
12
#include <linux/linkage.h>
13
#include <linux/init.h>
14
#include <asm/cache.h>
15
#include <asm/errno.h>
16
#include <asm/ptrace.h>
17
#include <asm/thread_info.h>
18
#include <asm/asm-offsets.h>
19
#include <asm/unistd.h>
20
#include <asm/page.h>
21
22
/*
23
* Stack layout for the system_call stack entry.
24
* The first few entries are identical to the user_regs_struct.
25
*/
26
SP_PTREGS = STACK_FRAME_OVERHEAD
27
SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
28
SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
29
SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
30
SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
31
SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
32
SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
33
SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
34
SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
35
SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
36
SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
37
SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64
38
SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72
39
SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80
40
SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88
41
SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96
42
SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104
43
SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112
44
SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120
45
SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
46
SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
47
SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
48
SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
49
50
STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
51
STACK_SIZE = 1 << STACK_SHIFT
52
53
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
54
_TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP )
55
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
56
_TIF_MCCK_PENDING)
57
_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
58
_TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
59
60
#define BASED(name) name-system_call(%r13)
61
62
.macro HANDLE_SIE_INTERCEPT
63
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
64
lg %r3,__LC_SIE_HOOK
65
ltgr %r3,%r3
66
jz 0f
67
basr %r14,%r3
68
0:
69
#endif
70
.endm
71
72
#ifdef CONFIG_TRACE_IRQFLAGS
73
.macro TRACE_IRQS_ON
74
basr %r2,%r0
75
brasl %r14,trace_hardirqs_on_caller
76
.endm
77
78
.macro TRACE_IRQS_OFF
79
basr %r2,%r0
80
brasl %r14,trace_hardirqs_off_caller
81
.endm
82
#else
83
#define TRACE_IRQS_ON
84
#define TRACE_IRQS_OFF
85
#endif
86
87
#ifdef CONFIG_LOCKDEP
88
.macro LOCKDEP_SYS_EXIT
89
tm SP_PSW+1(%r15),0x01 # returning to user ?
90
jz 0f
91
brasl %r14,lockdep_sys_exit
92
0:
93
.endm
94
#else
95
#define LOCKDEP_SYS_EXIT
96
#endif
97
98
.macro UPDATE_VTIME lc_from,lc_to,lc_sum
99
lg %r10,\lc_from
100
slg %r10,\lc_to
101
alg %r10,\lc_sum
102
stg %r10,\lc_sum
103
.endm
104
105
/*
106
* Register usage in interrupt handlers:
107
* R9 - pointer to current task structure
108
* R13 - pointer to literal pool
109
* R14 - return register for function calls
110
* R15 - kernel stack pointer
111
*/
112
113
.macro SAVE_ALL_SVC psworg,savearea
114
stmg %r11,%r15,\savearea
115
lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
116
aghi %r15,-SP_SIZE # make room for registers & psw
117
lg %r11,__LC_LAST_BREAK
118
.endm
119
120
.macro SAVE_ALL_PGM psworg,savearea
121
stmg %r11,%r15,\savearea
122
tm \psworg+1,0x01 # test problem state bit
123
#ifdef CONFIG_CHECK_STACK
124
jnz 1f
125
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
126
jnz 2f
127
la %r12,\psworg
128
j stack_overflow
129
#else
130
jz 2f
131
#endif
132
1: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
133
2: aghi %r15,-SP_SIZE # make room for registers & psw
134
larl %r13,system_call
135
lg %r11,__LC_LAST_BREAK
136
.endm
137
138
.macro SAVE_ALL_ASYNC psworg,savearea
139
stmg %r11,%r15,\savearea
140
larl %r13,system_call
141
lg %r11,__LC_LAST_BREAK
142
la %r12,\psworg
143
tm \psworg+1,0x01 # test problem state bit
144
jnz 1f # from user -> load kernel stack
145
clc \psworg+8(8),BASED(.Lcritical_end)
146
jhe 0f
147
clc \psworg+8(8),BASED(.Lcritical_start)
148
jl 0f
149
brasl %r14,cleanup_critical
150
tm 1(%r12),0x01 # retest problem state after cleanup
151
jnz 1f
152
0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
153
slgr %r14,%r15
154
srag %r14,%r14,STACK_SHIFT
155
#ifdef CONFIG_CHECK_STACK
156
jnz 1f
157
tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
158
jnz 2f
159
j stack_overflow
160
#else
161
jz 2f
162
#endif
163
1: lg %r15,__LC_ASYNC_STACK # load async stack
164
2: aghi %r15,-SP_SIZE # make room for registers & psw
165
.endm
166
167
.macro CREATE_STACK_FRAME savearea
168
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
169
stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
170
mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
171
stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
172
.endm
173
174
.macro RESTORE_ALL psworg,sync
175
mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore
176
.if !\sync
177
ni \psworg+1,0xfd # clear wait state bit
178
.endif
179
lg %r14,__LC_VDSO_PER_CPU
180
lmg %r0,%r13,SP_R0(%r15) # load gprs 0-13 of user
181
stpt __LC_EXIT_TIMER
182
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
183
lmg %r14,%r15,SP_R14(%r15) # load grps 14-15 of user
184
lpswe \psworg # back to caller
185
.endm
186
187
.macro LAST_BREAK
188
srag %r10,%r11,23
189
jz 0f
190
stg %r11,__TI_last_break(%r12)
191
0:
192
.endm
193
194
.macro REENABLE_IRQS
195
mvc __SF_EMPTY(1,%r15),SP_PSW(%r15)
196
ni __SF_EMPTY(%r15),0xbf
197
ssm __SF_EMPTY(%r15)
198
.endm
199
200
.section .kprobes.text, "ax"
201
202
/*
203
* Scheduler resume function, called by switch_to
204
* gpr2 = (task_struct *) prev
205
* gpr3 = (task_struct *) next
206
* Returns:
207
* gpr2 = prev
208
*/
209
.globl __switch_to
210
__switch_to:
211
lg %r4,__THREAD_info(%r2) # get thread_info of prev
212
lg %r5,__THREAD_info(%r3) # get thread_info of next
213
tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
214
jz 0f
215
ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
216
oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next
217
0: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
218
stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev
219
lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
220
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
221
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
222
stg %r3,__LC_CURRENT # store task struct of next
223
mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
224
stg %r5,__LC_THREAD_INFO # store thread info of next
225
aghi %r5,STACK_SIZE # end of kernel stack of next
226
stg %r5,__LC_KERNEL_STACK # store end of kernel stack
227
br %r14
228
229
__critical_start:
230
/*
231
* SVC interrupt handler routine. System calls are synchronous events and
232
* are executed with interrupts enabled.
233
*/
234
235
.globl system_call
236
system_call:
237
stpt __LC_SYNC_ENTER_TIMER
238
sysc_saveall:
239
SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
240
CREATE_STACK_FRAME __LC_SAVE_AREA
241
mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
242
mvc SP_ILC(4,%r15),__LC_SVC_ILC
243
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
244
sysc_vtime:
245
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
246
sysc_stime:
247
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
248
sysc_update:
249
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
250
LAST_BREAK
251
sysc_do_svc:
252
llgh %r7,SP_SVCNR(%r15)
253
slag %r7,%r7,2 # shift and test for svc 0
254
jnz sysc_nr_ok
255
# svc 0: system call number in %r1
256
llgfr %r1,%r1 # clear high word in r1
257
cghi %r1,NR_syscalls
258
jnl sysc_nr_ok
259
sth %r1,SP_SVCNR(%r15)
260
slag %r7,%r1,2 # shift and test for svc 0
261
sysc_nr_ok:
262
larl %r10,sys_call_table
263
#ifdef CONFIG_COMPAT
264
tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ?
265
jno sysc_noemu
266
larl %r10,sys_call_table_emu # use 31 bit emulation system calls
267
sysc_noemu:
268
#endif
269
tm __TI_flags+6(%r12),_TIF_SYSCALL
270
mvc SP_ARGS(8,%r15),SP_R7(%r15)
271
lgf %r8,0(%r7,%r10) # load address of system call routine
272
jnz sysc_tracesys
273
basr %r14,%r8 # call sys_xxxx
274
stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
275
276
sysc_return:
277
LOCKDEP_SYS_EXIT
278
sysc_tif:
279
tm __TI_flags+7(%r12),_TIF_WORK_SVC
280
jnz sysc_work # there is work to do (signals etc.)
281
sysc_restore:
282
RESTORE_ALL __LC_RETURN_PSW,1
283
sysc_done:
284
285
#
286
# There is work to do, but first we need to check if we return to userspace.
287
#
288
sysc_work:
289
tm SP_PSW+1(%r15),0x01 # returning to user ?
290
jno sysc_restore
291
292
#
293
# One of the work bits is on. Find out which one.
294
#
295
sysc_work_tif:
296
tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
297
jo sysc_mcck_pending
298
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
299
jo sysc_reschedule
300
tm __TI_flags+7(%r12),_TIF_SIGPENDING
301
jo sysc_sigpending
302
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
303
jo sysc_notify_resume
304
tm __TI_flags+7(%r12),_TIF_RESTART_SVC
305
jo sysc_restart
306
tm __TI_flags+7(%r12),_TIF_PER_TRAP
307
jo sysc_singlestep
308
j sysc_return # beware of critical section cleanup
309
310
#
311
# _TIF_NEED_RESCHED is set, call schedule
312
#
313
sysc_reschedule:
314
larl %r14,sysc_return
315
jg schedule # return point is sysc_return
316
317
#
318
# _TIF_MCCK_PENDING is set, call handler
319
#
320
sysc_mcck_pending:
321
larl %r14,sysc_return
322
jg s390_handle_mcck # TIF bit will be cleared by handler
323
324
#
325
# _TIF_SIGPENDING is set, call do_signal
326
#
327
sysc_sigpending:
328
ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
329
la %r2,SP_PTREGS(%r15) # load pt_regs
330
brasl %r14,do_signal # call do_signal
331
tm __TI_flags+7(%r12),_TIF_RESTART_SVC
332
jo sysc_restart
333
tm __TI_flags+7(%r12),_TIF_PER_TRAP
334
jo sysc_singlestep
335
j sysc_return
336
337
#
338
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
339
#
340
sysc_notify_resume:
341
la %r2,SP_PTREGS(%r15) # load pt_regs
342
larl %r14,sysc_return
343
jg do_notify_resume # call do_notify_resume
344
345
#
346
# _TIF_RESTART_SVC is set, set up registers and restart svc
347
#
348
sysc_restart:
349
ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
350
lg %r7,SP_R2(%r15) # load new svc number
351
mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
352
lmg %r2,%r6,SP_R2(%r15) # load svc arguments
353
sth %r7,SP_SVCNR(%r15)
354
slag %r7,%r7,2
355
j sysc_nr_ok # restart svc
356
357
#
358
# _TIF_PER_TRAP is set, call do_per_trap
359
#
360
sysc_singlestep:
361
ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
362
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
363
la %r2,SP_PTREGS(%r15) # address of register-save area
364
larl %r14,sysc_return # load adr. of system return
365
jg do_per_trap
366
367
#
368
# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
369
# and after the system call
370
#
371
sysc_tracesys:
372
la %r2,SP_PTREGS(%r15) # load pt_regs
373
la %r3,0
374
llgh %r0,SP_SVCNR(%r15)
375
stg %r0,SP_R2(%r15)
376
brasl %r14,do_syscall_trace_enter
377
lghi %r0,NR_syscalls
378
clgr %r0,%r2
379
jnh sysc_tracenogo
380
sllg %r7,%r2,2 # svc number *4
381
lgf %r8,0(%r7,%r10)
382
sysc_tracego:
383
lmg %r3,%r6,SP_R3(%r15)
384
mvc SP_ARGS(8,%r15),SP_R7(%r15)
385
lg %r2,SP_ORIG_R2(%r15)
386
basr %r14,%r8 # call sys_xxx
387
stg %r2,SP_R2(%r15) # store return value
388
sysc_tracenogo:
389
tm __TI_flags+6(%r12),_TIF_SYSCALL
390
jz sysc_return
391
la %r2,SP_PTREGS(%r15) # load pt_regs
392
larl %r14,sysc_return # return point is sysc_return
393
jg do_syscall_trace_exit
394
395
#
396
# a new process exits the kernel with ret_from_fork
397
#
398
.globl ret_from_fork
399
ret_from_fork:
400
lg %r13,__LC_SVC_NEW_PSW+8
401
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
402
tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
403
jo 0f
404
stg %r15,SP_R15(%r15) # store stack pointer for new kthread
405
0: brasl %r14,schedule_tail
406
TRACE_IRQS_ON
407
stosm 24(%r15),0x03 # reenable interrupts
408
j sysc_tracenogo
409
410
#
411
# kernel_execve function needs to deal with pt_regs that is not
412
# at the usual place
413
#
414
.globl kernel_execve
415
kernel_execve:
416
stmg %r12,%r15,96(%r15)
417
lgr %r14,%r15
418
aghi %r15,-SP_SIZE
419
stg %r14,__SF_BACKCHAIN(%r15)
420
la %r12,SP_PTREGS(%r15)
421
xc 0(__PT_SIZE,%r12),0(%r12)
422
lgr %r5,%r12
423
brasl %r14,do_execve
424
ltgfr %r2,%r2
425
je 0f
426
aghi %r15,SP_SIZE
427
lmg %r12,%r15,96(%r15)
428
br %r14
429
# execve succeeded.
430
0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
431
lg %r15,__LC_KERNEL_STACK # load ksp
432
aghi %r15,-SP_SIZE # make room for registers & psw
433
lg %r13,__LC_SVC_NEW_PSW+8
434
mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
435
lg %r12,__LC_THREAD_INFO
436
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
437
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
438
brasl %r14,execve_tail
439
j sysc_return
440
441
/*
442
* Program check handler routine
443
*/
444
445
.globl pgm_check_handler
446
pgm_check_handler:
447
/*
448
* First we need to check for a special case:
449
* Single stepping an instruction that disables the PER event mask will
450
* cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
451
* For a single stepped SVC the program check handler gets control after
452
* the SVC new PSW has been loaded. But we want to execute the SVC first and
453
* then handle the PER event. Therefore we update the SVC old PSW to point
454
* to the pgm_check_handler and branch to the SVC handler after we checked
455
* if we have to load the kernel stack register.
456
* For every other possible cause for PER event without the PER mask set
457
* we just ignore the PER event (FIXME: is there anything we have to do
458
* for LPSW?).
459
*/
460
stpt __LC_SYNC_ENTER_TIMER
461
tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
462
jnz pgm_per # got per exception -> special case
463
SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
464
CREATE_STACK_FRAME __LC_SAVE_AREA
465
xc SP_ILC(4,%r15),SP_ILC(%r15)
466
mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
467
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
468
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
469
jz pgm_no_vtime
470
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
471
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
472
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
473
LAST_BREAK
474
pgm_no_vtime:
475
HANDLE_SIE_INTERCEPT
476
stg %r11,SP_ARGS(%r15)
477
lgf %r3,__LC_PGM_ILC # load program interruption code
478
lg %r4,__LC_TRANS_EXC_CODE
479
REENABLE_IRQS
480
lghi %r8,0x7f
481
ngr %r8,%r3
482
sll %r8,3
483
larl %r1,pgm_check_table
484
lg %r1,0(%r8,%r1) # load address of handler routine
485
la %r2,SP_PTREGS(%r15) # address of register-save area
486
basr %r14,%r1 # branch to interrupt-handler
487
pgm_exit:
488
j sysc_return
489
490
#
491
# handle per exception
492
#
493
pgm_per:
494
tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
495
jnz pgm_per_std # ok, normal per event from user space
496
# ok its one of the special cases, now we need to find out which one
497
clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
498
je pgm_svcper
499
# no interesting special case, ignore PER event
500
lpswe __LC_PGM_OLD_PSW
501
502
#
503
# Normal per exception
504
#
505
pgm_per_std:
506
SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
507
CREATE_STACK_FRAME __LC_SAVE_AREA
508
mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
509
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
510
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
511
jz pgm_no_vtime2
512
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
513
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
514
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
515
LAST_BREAK
516
pgm_no_vtime2:
517
HANDLE_SIE_INTERCEPT
518
lg %r1,__TI_task(%r12)
519
tm SP_PSW+1(%r15),0x01 # kernel per event ?
520
jz kernel_per
521
mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE
522
mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS
523
mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID
524
oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
525
lgf %r3,__LC_PGM_ILC # load program interruption code
526
lg %r4,__LC_TRANS_EXC_CODE
527
REENABLE_IRQS
528
lghi %r8,0x7f
529
ngr %r8,%r3 # clear per-event-bit and ilc
530
je pgm_exit2
531
sll %r8,3
532
larl %r1,pgm_check_table
533
lg %r1,0(%r8,%r1) # load address of handler routine
534
la %r2,SP_PTREGS(%r15) # address of register-save area
535
basr %r14,%r1 # branch to interrupt-handler
536
pgm_exit2:
537
j sysc_return
538
539
#
540
# it was a single stepped SVC that is causing all the trouble
541
#
542
pgm_svcper:
543
SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
544
CREATE_STACK_FRAME __LC_SAVE_AREA
545
mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
546
mvc SP_ILC(4,%r15),__LC_SVC_ILC
547
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
548
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
549
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
550
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
551
LAST_BREAK
552
lg %r8,__TI_task(%r12)
553
mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE
554
mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS
555
mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID
556
oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP
557
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
558
lmg %r2,%r6,SP_R2(%r15) # load svc arguments
559
j sysc_do_svc
560
561
#
562
# per was called from kernel, must be kprobes
563
#
564
kernel_per:
565
REENABLE_IRQS
566
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
567
la %r2,SP_PTREGS(%r15) # address of register-save area
568
brasl %r14,do_per_trap
569
j pgm_exit
570
571
/*
572
* IO interrupt handler routine
573
*/
574
.globl io_int_handler
575
io_int_handler:
576
stck __LC_INT_CLOCK
577
stpt __LC_ASYNC_ENTER_TIMER
578
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
579
CREATE_STACK_FRAME __LC_SAVE_AREA+40
580
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
581
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
582
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
583
jz io_no_vtime
584
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
585
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
586
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
587
LAST_BREAK
588
io_no_vtime:
589
HANDLE_SIE_INTERCEPT
590
TRACE_IRQS_OFF
591
la %r2,SP_PTREGS(%r15) # address of register-save area
592
brasl %r14,do_IRQ # call standard irq handler
593
io_return:
594
LOCKDEP_SYS_EXIT
595
TRACE_IRQS_ON
596
io_tif:
597
tm __TI_flags+7(%r12),_TIF_WORK_INT
598
jnz io_work # there is work to do (signals etc.)
599
io_restore:
600
RESTORE_ALL __LC_RETURN_PSW,0
601
io_done:
602
603
#
604
# There is work todo, find out in which context we have been interrupted:
605
# 1) if we return to user space we can do all _TIF_WORK_INT work
606
# 2) if we return to kernel code and kvm is enabled check if we need to
607
# modify the psw to leave SIE
608
# 3) if we return to kernel code and preemptive scheduling is enabled check
609
# the preemption counter and if it is zero call preempt_schedule_irq
610
# Before any work can be done, a switch to the kernel stack is required.
611
#
612
io_work:
613
tm SP_PSW+1(%r15),0x01 # returning to user ?
614
jo io_work_user # yes -> do resched & signal
615
#ifdef CONFIG_PREEMPT
616
# check for preemptive scheduling
617
icm %r0,15,__TI_precount(%r12)
618
jnz io_restore # preemption is disabled
619
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
620
jno io_restore
621
# switch to kernel stack
622
lg %r1,SP_R15(%r15)
623
aghi %r1,-SP_SIZE
624
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
625
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
626
lgr %r15,%r1
627
# TRACE_IRQS_ON already done at io_return, call
628
# TRACE_IRQS_OFF to keep things symmetrical
629
TRACE_IRQS_OFF
630
brasl %r14,preempt_schedule_irq
631
j io_return
632
#else
633
j io_restore
634
#endif
635
636
#
637
# Need to do work before returning to userspace, switch to kernel stack
638
#
639
io_work_user:
640
lg %r1,__LC_KERNEL_STACK
641
aghi %r1,-SP_SIZE
642
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
643
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
644
lgr %r15,%r1
645
646
#
647
# One of the work bits is on. Find out which one.
648
# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
649
# and _TIF_MCCK_PENDING
650
#
651
io_work_tif:
652
tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
653
jo io_mcck_pending
654
tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
655
jo io_reschedule
656
tm __TI_flags+7(%r12),_TIF_SIGPENDING
657
jo io_sigpending
658
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
659
jo io_notify_resume
660
j io_return # beware of critical section cleanup
661
662
#
663
# _TIF_MCCK_PENDING is set, call handler
664
#
665
io_mcck_pending:
666
# TRACE_IRQS_ON already done at io_return
667
brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
668
TRACE_IRQS_OFF
669
j io_return
670
671
#
672
# _TIF_NEED_RESCHED is set, call schedule
673
#
674
io_reschedule:
675
# TRACE_IRQS_ON already done at io_return
676
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
677
brasl %r14,schedule # call scheduler
678
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
679
TRACE_IRQS_OFF
680
j io_return
681
682
#
683
# _TIF_SIGPENDING or is set, call do_signal
684
#
685
io_sigpending:
686
# TRACE_IRQS_ON already done at io_return
687
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
688
la %r2,SP_PTREGS(%r15) # load pt_regs
689
brasl %r14,do_signal # call do_signal
690
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
691
TRACE_IRQS_OFF
692
j io_return
693
694
#
695
# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
696
#
697
io_notify_resume:
698
# TRACE_IRQS_ON already done at io_return
699
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
700
la %r2,SP_PTREGS(%r15) # load pt_regs
701
brasl %r14,do_notify_resume # call do_notify_resume
702
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
703
TRACE_IRQS_OFF
704
j io_return
705
706
/*
707
* External interrupt handler routine
708
*/
709
.globl ext_int_handler
710
ext_int_handler:
711
stck __LC_INT_CLOCK
712
stpt __LC_ASYNC_ENTER_TIMER
713
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
714
CREATE_STACK_FRAME __LC_SAVE_AREA+40
715
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
716
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
717
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
718
jz ext_no_vtime
719
UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
720
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
721
mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
722
LAST_BREAK
723
ext_no_vtime:
724
HANDLE_SIE_INTERCEPT
725
TRACE_IRQS_OFF
726
lghi %r1,4096
727
la %r2,SP_PTREGS(%r15) # address of register-save area
728
llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code
729
llgf %r4,__LC_EXT_PARAMS # get external parameter
730
lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter
731
brasl %r14,do_extint
732
j io_return
733
734
__critical_end:
735
736
/*
737
* Machine check handler routines
738
*/
739
.globl mcck_int_handler
740
mcck_int_handler:
741
stck __LC_MCCK_CLOCK
742
la %r1,4095 # revalidate r1
743
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
744
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
745
stmg %r11,%r15,__LC_SAVE_AREA+80
746
larl %r13,system_call
747
lg %r11,__LC_LAST_BREAK
748
la %r12,__LC_MCK_OLD_PSW
749
tm __LC_MCCK_CODE,0x80 # system damage?
750
jo mcck_int_main # yes -> rest of mcck code invalid
751
la %r14,4095
752
mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
753
tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
754
jo 1f
755
la %r14,__LC_SYNC_ENTER_TIMER
756
clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
757
jl 0f
758
la %r14,__LC_ASYNC_ENTER_TIMER
759
0: clc 0(8,%r14),__LC_EXIT_TIMER
760
jl 0f
761
la %r14,__LC_EXIT_TIMER
762
0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
763
jl 0f
764
la %r14,__LC_LAST_UPDATE_TIMER
765
0: spt 0(%r14)
766
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
767
1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
768
jno mcck_int_main # no -> skip cleanup critical
769
tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
770
jnz mcck_int_main # from user -> load kernel stack
771
clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end)
772
jhe mcck_int_main
773
clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start)
774
jl mcck_int_main
775
brasl %r14,cleanup_critical
776
mcck_int_main:
777
lg %r14,__LC_PANIC_STACK # are we already on the panic stack?
778
slgr %r14,%r15
779
srag %r14,%r14,PAGE_SHIFT
780
jz 0f
781
lg %r15,__LC_PANIC_STACK # load panic stack
782
0: aghi %r15,-SP_SIZE # make room for registers & psw
783
CREATE_STACK_FRAME __LC_SAVE_AREA+80
784
mvc SP_PSW(16,%r15),0(%r12)
785
lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
786
tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
787
jno mcck_no_vtime # no -> no timer update
788
tm SP_PSW+1(%r15),0x01 # interrupting from user ?
789
jz mcck_no_vtime
790
UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
791
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
792
mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
793
LAST_BREAK
794
mcck_no_vtime:
795
la %r2,SP_PTREGS(%r15) # load pt_regs
796
brasl %r14,s390_do_machine_check
797
tm SP_PSW+1(%r15),0x01 # returning to user ?
798
jno mcck_return
799
lg %r1,__LC_KERNEL_STACK # switch to kernel stack
800
aghi %r1,-SP_SIZE
801
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
802
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
803
lgr %r15,%r1
804
stosm __SF_EMPTY(%r15),0x04 # turn dat on
805
tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
806
jno mcck_return
807
HANDLE_SIE_INTERCEPT
808
TRACE_IRQS_OFF
809
brasl %r14,s390_handle_mcck
810
TRACE_IRQS_ON
811
mcck_return:
812
mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
813
ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
814
lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
815
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
816
jno 0f
817
stpt __LC_EXIT_TIMER
818
0: lpswe __LC_RETURN_MCCK_PSW # back to caller
819
mcck_done:
820
821
/*
822
* Restart interruption handler, kick starter for additional CPUs
823
*/
824
#ifdef CONFIG_SMP
825
__CPUINIT
826
.globl restart_int_handler
827
restart_int_handler:
828
basr %r1,0
829
restart_base:
830
spt restart_vtime-restart_base(%r1)
831
stck __LC_LAST_UPDATE_CLOCK
832
mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1)
833
mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1)
834
lg %r15,__LC_SAVE_AREA+120 # load ksp
835
lghi %r10,__LC_CREGS_SAVE_AREA
836
lctlg %c0,%c15,0(%r10) # get new ctl regs
837
lghi %r10,__LC_AREGS_SAVE_AREA
838
lam %a0,%a15,0(%r10)
839
lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone
840
lg %r1,__LC_THREAD_INFO
841
mvc __LC_USER_TIMER(8),__TI_user_timer(%r1)
842
mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
843
xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
844
stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
845
brasl %r14,start_secondary
846
.align 8
847
restart_vtime:
848
.long 0x7fffffff,0xffffffff
849
.previous
850
#else
851
/*
852
* If we do not run with SMP enabled, let the new CPU crash ...
853
*/
854
.globl restart_int_handler
855
restart_int_handler:
856
basr %r1,0
857
restart_base:
858
lpswe restart_crash-restart_base(%r1)
859
.align 8
860
restart_crash:
861
.long 0x000a0000,0x00000000,0x00000000,0x00000000
862
restart_go:
863
#endif
864
865
.section .kprobes.text, "ax"
866
867
#ifdef CONFIG_CHECK_STACK
868
/*
869
* The synchronous or the asynchronous stack overflowed. We are dead.
870
* No need to properly save the registers, we are going to panic anyway.
871
* Setup a pt_regs so that show_trace can provide a good call trace.
872
*/
873
stack_overflow:
874
lg %r15,__LC_PANIC_STACK # change to panic stack
875
aghi %r15,-SP_SIZE
876
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
877
stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
878
la %r1,__LC_SAVE_AREA
879
chi %r12,__LC_SVC_OLD_PSW
880
je 0f
881
chi %r12,__LC_PGM_OLD_PSW
882
je 0f
883
la %r1,__LC_SAVE_AREA+40
884
0: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack
885
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
886
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
887
la %r2,SP_PTREGS(%r15) # load pt_regs
888
jg kernel_stack_overflow
889
#endif
890
891
cleanup_table_system_call:
892
.quad system_call, sysc_do_svc
893
cleanup_table_sysc_tif:
894
.quad sysc_tif, sysc_restore
895
cleanup_table_sysc_restore:
896
.quad sysc_restore, sysc_done
897
cleanup_table_io_tif:
898
.quad io_tif, io_restore
899
cleanup_table_io_restore:
900
.quad io_restore, io_done
901
902
cleanup_critical:
903
clc 8(8,%r12),BASED(cleanup_table_system_call)
904
jl 0f
905
clc 8(8,%r12),BASED(cleanup_table_system_call+8)
906
jl cleanup_system_call
907
0:
908
clc 8(8,%r12),BASED(cleanup_table_sysc_tif)
909
jl 0f
910
clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8)
911
jl cleanup_sysc_tif
912
0:
913
clc 8(8,%r12),BASED(cleanup_table_sysc_restore)
914
jl 0f
915
clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8)
916
jl cleanup_sysc_restore
917
0:
918
clc 8(8,%r12),BASED(cleanup_table_io_tif)
919
jl 0f
920
clc 8(8,%r12),BASED(cleanup_table_io_tif+8)
921
jl cleanup_io_tif
922
0:
923
clc 8(8,%r12),BASED(cleanup_table_io_restore)
924
jl 0f
925
clc 8(8,%r12),BASED(cleanup_table_io_restore+8)
926
jl cleanup_io_restore
927
0:
928
br %r14
929
930
cleanup_system_call:
931
mvc __LC_RETURN_PSW(16),0(%r12)
932
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
933
jh 0f
934
mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
935
cghi %r12,__LC_MCK_OLD_PSW
936
je 0f
937
mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
938
0: cghi %r12,__LC_MCK_OLD_PSW
939
la %r12,__LC_SAVE_AREA+80
940
je 0f
941
la %r12,__LC_SAVE_AREA+40
942
0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
943
jhe cleanup_vtime
944
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
945
jh 0f
946
mvc __LC_SAVE_AREA(40),0(%r12)
947
0: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
948
aghi %r15,-SP_SIZE # make room for registers & psw
949
stg %r15,32(%r12)
950
stg %r11,0(%r12)
951
CREATE_STACK_FRAME __LC_SAVE_AREA
952
mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
953
mvc SP_ILC(4,%r15),__LC_SVC_ILC
954
mvc 8(8,%r12),__LC_THREAD_INFO
955
cleanup_vtime:
956
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
957
jhe cleanup_stime
958
UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
959
cleanup_stime:
960
clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32)
961
jh cleanup_update
962
UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
963
cleanup_update:
964
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
965
srag %r12,%r11,23
966
lg %r12,__LC_THREAD_INFO
967
jz 0f
968
stg %r11,__TI_last_break(%r12)
969
0: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
970
la %r12,__LC_RETURN_PSW
971
br %r14
972
cleanup_system_call_insn:
973
.quad sysc_saveall
974
.quad system_call
975
.quad sysc_vtime
976
.quad sysc_stime
977
.quad sysc_update
978
979
cleanup_sysc_tif:
980
mvc __LC_RETURN_PSW(8),0(%r12)
981
mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
982
la %r12,__LC_RETURN_PSW
983
br %r14
984
985
cleanup_sysc_restore:
986
clc 8(8,%r12),BASED(cleanup_sysc_restore_insn)
987
je 2f
988
clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
989
jhe 0f
990
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
991
cghi %r12,__LC_MCK_OLD_PSW
992
je 0f
993
mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
994
0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
995
cghi %r12,__LC_MCK_OLD_PSW
996
la %r12,__LC_SAVE_AREA+80
997
je 1f
998
la %r12,__LC_SAVE_AREA+40
999
1: mvc 0(40,%r12),SP_R11(%r15)
1000
lmg %r0,%r10,SP_R0(%r15)
1001
lg %r15,SP_R15(%r15)
1002
2: la %r12,__LC_RETURN_PSW
1003
br %r14
1004
cleanup_sysc_restore_insn:
1005
.quad sysc_done - 4
1006
.quad sysc_done - 16
1007
1008
cleanup_io_tif:
1009
mvc __LC_RETURN_PSW(8),0(%r12)
1010
mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
1011
la %r12,__LC_RETURN_PSW
1012
br %r14
1013
1014
cleanup_io_restore:
1015
clc 8(8,%r12),BASED(cleanup_io_restore_insn)
1016
je 1f
1017
clc 8(8,%r12),BASED(cleanup_io_restore_insn+8)
1018
jhe 0f
1019
mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1020
0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
1021
mvc __LC_SAVE_AREA+80(40),SP_R11(%r15)
1022
lmg %r0,%r10,SP_R0(%r15)
1023
lg %r15,SP_R15(%r15)
1024
1: la %r12,__LC_RETURN_PSW
1025
br %r14
1026
cleanup_io_restore_insn:
1027
.quad io_done - 4
1028
.quad io_done - 16
1029
1030
/*
1031
* Integer constants
1032
*/
1033
.align 4
1034
.Lcritical_start:
1035
.quad __critical_start
1036
.Lcritical_end:
1037
.quad __critical_end
1038
1039
.section .rodata, "a"
1040
#define SYSCALL(esa,esame,emu) .long esame
1041
.globl sys_call_table
1042
sys_call_table:
1043
#include "syscalls.S"
1044
#undef SYSCALL
1045
1046
#ifdef CONFIG_COMPAT
1047
1048
#define SYSCALL(esa,esame,emu) .long emu
1049
sys_call_table_emu:
1050
#include "syscalls.S"
1051
#undef SYSCALL
1052
#endif
1053
1054