Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/alpha/kernel/entry.S
26424 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* arch/alpha/kernel/entry.S
4
*
5
* Kernel entry-points.
6
*/
7
8
#include <asm/asm-offsets.h>
9
#include <asm/thread_info.h>
10
#include <asm/pal.h>
11
#include <asm/errno.h>
12
#include <asm/unistd.h>
13
14
.text
15
.set noat
16
.cfi_sections .debug_frame
17
18
.macro CFI_START_OSF_FRAME func
19
.align 4
20
.globl \func
21
.type \func,@function
22
\func:
23
.cfi_startproc simple
24
.cfi_return_column 64
25
.cfi_def_cfa $sp, 48
26
.cfi_rel_offset 64, 8
27
.cfi_rel_offset $gp, 16
28
.cfi_rel_offset $16, 24
29
.cfi_rel_offset $17, 32
30
.cfi_rel_offset $18, 40
31
.endm
32
33
.macro CFI_END_OSF_FRAME func
34
.cfi_endproc
35
.size \func, . - \func
36
.endm
37
38
/*
39
* This defines the normal kernel pt-regs layout.
40
*
41
* regs 9-15 preserved by C code
42
* regs 16-18 saved by PAL-code
43
* regs 29-30 saved and set up by PAL-code
44
* JRP - Save regs 16-18 in a special area of the stack, so that
45
* the palcode-provided values are available to the signal handler.
46
*/
47
48
.macro SAVE_ALL
49
subq $sp, SP_OFF, $sp
50
.cfi_adjust_cfa_offset SP_OFF
51
stq $0, 0($sp)
52
stq $1, 8($sp)
53
stq $2, 16($sp)
54
stq $3, 24($sp)
55
stq $4, 32($sp)
56
stq $28, 144($sp)
57
.cfi_rel_offset $0, 0
58
.cfi_rel_offset $1, 8
59
.cfi_rel_offset $2, 16
60
.cfi_rel_offset $3, 24
61
.cfi_rel_offset $4, 32
62
.cfi_rel_offset $28, 144
63
lda $2, alpha_mv
64
stq $5, 40($sp)
65
stq $6, 48($sp)
66
stq $7, 56($sp)
67
stq $8, 64($sp)
68
stq $19, 72($sp)
69
stq $20, 80($sp)
70
stq $21, 88($sp)
71
ldq $2, HAE_CACHE($2)
72
stq $22, 96($sp)
73
stq $23, 104($sp)
74
stq $24, 112($sp)
75
stq $25, 120($sp)
76
stq $26, 128($sp)
77
stq $27, 136($sp)
78
stq $2, 152($sp)
79
stq $16, 160($sp)
80
stq $17, 168($sp)
81
stq $18, 176($sp)
82
.cfi_rel_offset $5, 40
83
.cfi_rel_offset $6, 48
84
.cfi_rel_offset $7, 56
85
.cfi_rel_offset $8, 64
86
.cfi_rel_offset $19, 72
87
.cfi_rel_offset $20, 80
88
.cfi_rel_offset $21, 88
89
.cfi_rel_offset $22, 96
90
.cfi_rel_offset $23, 104
91
.cfi_rel_offset $24, 112
92
.cfi_rel_offset $25, 120
93
.cfi_rel_offset $26, 128
94
.cfi_rel_offset $27, 136
95
.endm
96
97
.macro RESTORE_ALL
98
lda $19, alpha_mv
99
ldq $0, 0($sp)
100
ldq $1, 8($sp)
101
ldq $2, 16($sp)
102
ldq $3, 24($sp)
103
ldq $21, 152($sp)
104
ldq $20, HAE_CACHE($19)
105
ldq $4, 32($sp)
106
ldq $5, 40($sp)
107
ldq $6, 48($sp)
108
ldq $7, 56($sp)
109
subq $20, $21, $20
110
ldq $8, 64($sp)
111
beq $20, 99f
112
ldq $20, HAE_REG($19)
113
stq $21, HAE_CACHE($19)
114
stq $21, 0($20)
115
99: ldq $19, 72($sp)
116
ldq $20, 80($sp)
117
ldq $21, 88($sp)
118
ldq $22, 96($sp)
119
ldq $23, 104($sp)
120
ldq $24, 112($sp)
121
ldq $25, 120($sp)
122
ldq $26, 128($sp)
123
ldq $27, 136($sp)
124
ldq $28, 144($sp)
125
addq $sp, SP_OFF, $sp
126
.cfi_restore $0
127
.cfi_restore $1
128
.cfi_restore $2
129
.cfi_restore $3
130
.cfi_restore $4
131
.cfi_restore $5
132
.cfi_restore $6
133
.cfi_restore $7
134
.cfi_restore $8
135
.cfi_restore $19
136
.cfi_restore $20
137
.cfi_restore $21
138
.cfi_restore $22
139
.cfi_restore $23
140
.cfi_restore $24
141
.cfi_restore $25
142
.cfi_restore $26
143
.cfi_restore $27
144
.cfi_restore $28
145
.cfi_adjust_cfa_offset -SP_OFF
146
.endm
147
148
.macro DO_SWITCH_STACK
149
bsr $1, do_switch_stack
150
.cfi_adjust_cfa_offset SWITCH_STACK_SIZE
151
.cfi_rel_offset $9, 0
152
.cfi_rel_offset $10, 8
153
.cfi_rel_offset $11, 16
154
.cfi_rel_offset $12, 24
155
.cfi_rel_offset $13, 32
156
.cfi_rel_offset $14, 40
157
.cfi_rel_offset $15, 48
158
.endm
159
160
.macro UNDO_SWITCH_STACK
161
bsr $1, undo_switch_stack
162
.cfi_restore $9
163
.cfi_restore $10
164
.cfi_restore $11
165
.cfi_restore $12
166
.cfi_restore $13
167
.cfi_restore $14
168
.cfi_restore $15
169
.cfi_adjust_cfa_offset -SWITCH_STACK_SIZE
170
.endm
171
172
/*
173
* Non-syscall kernel entry points.
174
*/
175
176
CFI_START_OSF_FRAME entInt
177
SAVE_ALL
178
lda $8, 0x3fff
179
lda $26, ret_from_sys_call
180
bic $sp, $8, $8
181
mov $sp, $19
182
jsr $31, do_entInt
183
CFI_END_OSF_FRAME entInt
184
185
CFI_START_OSF_FRAME entArith
186
SAVE_ALL
187
lda $8, 0x3fff
188
lda $26, ret_from_sys_call
189
bic $sp, $8, $8
190
mov $sp, $18
191
jsr $31, do_entArith
192
CFI_END_OSF_FRAME entArith
193
194
CFI_START_OSF_FRAME entMM
195
SAVE_ALL
196
/* save $9 - $15 so the inline exception code can manipulate them. */
197
subq $sp, 64, $sp
198
.cfi_adjust_cfa_offset 64
199
stq $9, 0($sp)
200
stq $10, 8($sp)
201
stq $11, 16($sp)
202
stq $12, 24($sp)
203
stq $13, 32($sp)
204
stq $14, 40($sp)
205
stq $15, 48($sp)
206
.cfi_rel_offset $9, 0
207
.cfi_rel_offset $10, 8
208
.cfi_rel_offset $11, 16
209
.cfi_rel_offset $12, 24
210
.cfi_rel_offset $13, 32
211
.cfi_rel_offset $14, 40
212
.cfi_rel_offset $15, 48
213
addq $sp, 64, $19
214
/* handle the fault */
215
lda $8, 0x3fff
216
bic $sp, $8, $8
217
jsr $26, do_page_fault
218
/* reload the registers after the exception code played. */
219
ldq $9, 0($sp)
220
ldq $10, 8($sp)
221
ldq $11, 16($sp)
222
ldq $12, 24($sp)
223
ldq $13, 32($sp)
224
ldq $14, 40($sp)
225
ldq $15, 48($sp)
226
addq $sp, 64, $sp
227
.cfi_restore $9
228
.cfi_restore $10
229
.cfi_restore $11
230
.cfi_restore $12
231
.cfi_restore $13
232
.cfi_restore $14
233
.cfi_restore $15
234
.cfi_adjust_cfa_offset -64
235
/* finish up the syscall as normal. */
236
br ret_from_sys_call
237
CFI_END_OSF_FRAME entMM
238
239
CFI_START_OSF_FRAME entIF
240
SAVE_ALL
241
lda $8, 0x3fff
242
lda $26, ret_from_sys_call
243
bic $sp, $8, $8
244
mov $sp, $17
245
jsr $31, do_entIF
246
CFI_END_OSF_FRAME entIF
247
248
CFI_START_OSF_FRAME entUna
249
lda $sp, -256($sp)
250
.cfi_adjust_cfa_offset 256
251
stq $0, 0($sp)
252
.cfi_rel_offset $0, 0
253
.cfi_remember_state
254
ldq $0, 256($sp) /* get PS */
255
stq $1, 8($sp)
256
stq $2, 16($sp)
257
stq $3, 24($sp)
258
and $0, 8, $0 /* user mode? */
259
stq $4, 32($sp)
260
bne $0, entUnaUser /* yup -> do user-level unaligned fault */
261
stq $5, 40($sp)
262
stq $6, 48($sp)
263
stq $7, 56($sp)
264
stq $8, 64($sp)
265
stq $9, 72($sp)
266
stq $10, 80($sp)
267
stq $11, 88($sp)
268
stq $12, 96($sp)
269
stq $13, 104($sp)
270
stq $14, 112($sp)
271
stq $15, 120($sp)
272
/* 16-18 PAL-saved */
273
stq $19, 152($sp)
274
stq $20, 160($sp)
275
stq $21, 168($sp)
276
stq $22, 176($sp)
277
stq $23, 184($sp)
278
stq $24, 192($sp)
279
stq $25, 200($sp)
280
stq $26, 208($sp)
281
stq $27, 216($sp)
282
stq $28, 224($sp)
283
mov $sp, $19
284
stq $gp, 232($sp)
285
.cfi_rel_offset $1, 1*8
286
.cfi_rel_offset $2, 2*8
287
.cfi_rel_offset $3, 3*8
288
.cfi_rel_offset $4, 4*8
289
.cfi_rel_offset $5, 5*8
290
.cfi_rel_offset $6, 6*8
291
.cfi_rel_offset $7, 7*8
292
.cfi_rel_offset $8, 8*8
293
.cfi_rel_offset $9, 9*8
294
.cfi_rel_offset $10, 10*8
295
.cfi_rel_offset $11, 11*8
296
.cfi_rel_offset $12, 12*8
297
.cfi_rel_offset $13, 13*8
298
.cfi_rel_offset $14, 14*8
299
.cfi_rel_offset $15, 15*8
300
.cfi_rel_offset $19, 19*8
301
.cfi_rel_offset $20, 20*8
302
.cfi_rel_offset $21, 21*8
303
.cfi_rel_offset $22, 22*8
304
.cfi_rel_offset $23, 23*8
305
.cfi_rel_offset $24, 24*8
306
.cfi_rel_offset $25, 25*8
307
.cfi_rel_offset $26, 26*8
308
.cfi_rel_offset $27, 27*8
309
.cfi_rel_offset $28, 28*8
310
.cfi_rel_offset $29, 29*8
311
lda $8, 0x3fff
312
stq $31, 248($sp)
313
bic $sp, $8, $8
314
jsr $26, do_entUna
315
ldq $0, 0($sp)
316
ldq $1, 8($sp)
317
ldq $2, 16($sp)
318
ldq $3, 24($sp)
319
ldq $4, 32($sp)
320
ldq $5, 40($sp)
321
ldq $6, 48($sp)
322
ldq $7, 56($sp)
323
ldq $8, 64($sp)
324
ldq $9, 72($sp)
325
ldq $10, 80($sp)
326
ldq $11, 88($sp)
327
ldq $12, 96($sp)
328
ldq $13, 104($sp)
329
ldq $14, 112($sp)
330
ldq $15, 120($sp)
331
/* 16-18 PAL-saved */
332
ldq $19, 152($sp)
333
ldq $20, 160($sp)
334
ldq $21, 168($sp)
335
ldq $22, 176($sp)
336
ldq $23, 184($sp)
337
ldq $24, 192($sp)
338
ldq $25, 200($sp)
339
ldq $26, 208($sp)
340
ldq $27, 216($sp)
341
ldq $28, 224($sp)
342
ldq $gp, 232($sp)
343
lda $sp, 256($sp)
344
.cfi_restore $1
345
.cfi_restore $2
346
.cfi_restore $3
347
.cfi_restore $4
348
.cfi_restore $5
349
.cfi_restore $6
350
.cfi_restore $7
351
.cfi_restore $8
352
.cfi_restore $9
353
.cfi_restore $10
354
.cfi_restore $11
355
.cfi_restore $12
356
.cfi_restore $13
357
.cfi_restore $14
358
.cfi_restore $15
359
.cfi_restore $19
360
.cfi_restore $20
361
.cfi_restore $21
362
.cfi_restore $22
363
.cfi_restore $23
364
.cfi_restore $24
365
.cfi_restore $25
366
.cfi_restore $26
367
.cfi_restore $27
368
.cfi_restore $28
369
.cfi_restore $29
370
.cfi_adjust_cfa_offset -256
371
call_pal PAL_rti
372
373
.align 4
374
entUnaUser:
375
.cfi_restore_state
376
ldq $0, 0($sp) /* restore original $0 */
377
lda $sp, 256($sp) /* pop entUna's stack frame */
378
.cfi_restore $0
379
.cfi_adjust_cfa_offset -256
380
SAVE_ALL /* setup normal kernel stack */
381
lda $sp, -64($sp)
382
.cfi_adjust_cfa_offset 64
383
stq $9, 0($sp)
384
stq $10, 8($sp)
385
stq $11, 16($sp)
386
stq $12, 24($sp)
387
stq $13, 32($sp)
388
stq $14, 40($sp)
389
stq $15, 48($sp)
390
.cfi_rel_offset $9, 0
391
.cfi_rel_offset $10, 8
392
.cfi_rel_offset $11, 16
393
.cfi_rel_offset $12, 24
394
.cfi_rel_offset $13, 32
395
.cfi_rel_offset $14, 40
396
.cfi_rel_offset $15, 48
397
lda $8, 0x3fff
398
addq $sp, 64, $19
399
bic $sp, $8, $8
400
jsr $26, do_entUnaUser
401
ldq $9, 0($sp)
402
ldq $10, 8($sp)
403
ldq $11, 16($sp)
404
ldq $12, 24($sp)
405
ldq $13, 32($sp)
406
ldq $14, 40($sp)
407
ldq $15, 48($sp)
408
lda $sp, 64($sp)
409
.cfi_restore $9
410
.cfi_restore $10
411
.cfi_restore $11
412
.cfi_restore $12
413
.cfi_restore $13
414
.cfi_restore $14
415
.cfi_restore $15
416
.cfi_adjust_cfa_offset -64
417
br ret_from_sys_call
418
CFI_END_OSF_FRAME entUna
419
420
CFI_START_OSF_FRAME entDbg
421
SAVE_ALL
422
lda $8, 0x3fff
423
lda $26, ret_from_sys_call
424
bic $sp, $8, $8
425
mov $sp, $16
426
jsr $31, do_entDbg
427
CFI_END_OSF_FRAME entDbg
428
429
/*
430
* The system call entry point is special. Most importantly, it looks
431
* like a function call to userspace as far as clobbered registers. We
432
* do preserve the argument registers (for syscall restarts) and $26
433
* (for leaf syscall functions).
434
*
435
* So much for theory. We don't take advantage of this yet.
436
*
437
* Note that a0-a2 are not saved by PALcode as with the other entry points.
438
*/
439
440
.align 4
441
.globl entSys
442
.type entSys, @function
443
.cfi_startproc simple
444
.cfi_return_column 64
445
.cfi_def_cfa $sp, 48
446
.cfi_rel_offset 64, 8
447
.cfi_rel_offset $gp, 16
448
entSys:
449
SAVE_ALL
450
lda $8, 0x3fff
451
bic $sp, $8, $8
452
lda $4, NR_syscalls($31)
453
stq $16, SP_OFF+24($sp)
454
lda $5, sys_call_table
455
lda $27, sys_ni_syscall
456
cmpult $0, $4, $4
457
ldl $3, TI_FLAGS($8)
458
stq $17, SP_OFF+32($sp)
459
s8addq $0, $5, $5
460
stq $18, SP_OFF+40($sp)
461
.cfi_rel_offset $16, SP_OFF+24
462
.cfi_rel_offset $17, SP_OFF+32
463
.cfi_rel_offset $18, SP_OFF+40
464
#ifdef CONFIG_AUDITSYSCALL
465
lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
466
and $3, $6, $3
467
bne $3, strace
468
#else
469
blbs $3, strace /* check for SYSCALL_TRACE in disguise */
470
#endif
471
beq $4, 1f
472
ldq $27, 0($5)
473
1: jsr $26, ($27), sys_ni_syscall
474
ldgp $gp, 0($26)
475
blt $0, $syscall_error /* the call failed */
476
$ret_success:
477
stq $0, 0($sp)
478
stq $31, 72($sp) /* a3=0 => no error */
479
480
.align 4
481
.globl ret_from_sys_call
482
ret_from_sys_call:
483
cmovne $26, 0, $18 /* $18 = 0 => non-restartable */
484
ldq $0, SP_OFF($sp)
485
and $0, 8, $0
486
beq $0, ret_to_kernel
487
ret_to_user:
488
/* Make sure need_resched and sigpending don't change between
489
sampling and the rti. */
490
lda $16, 7
491
call_pal PAL_swpipl
492
ldl $17, TI_FLAGS($8)
493
and $17, _TIF_WORK_MASK, $2
494
bne $2, work_pending
495
restore_all:
496
ldl $2, TI_STATUS($8)
497
and $2, TS_SAVED_FP | TS_RESTORE_FP, $3
498
bne $3, restore_fpu
499
restore_other:
500
.cfi_remember_state
501
RESTORE_ALL
502
call_pal PAL_rti
503
504
ret_to_kernel:
505
.cfi_restore_state
506
lda $16, 7
507
call_pal PAL_swpipl
508
br restore_other
509
510
.align 3
511
$syscall_error:
512
/*
513
* Some system calls (e.g., ptrace) can return arbitrary
514
* values which might normally be mistaken as error numbers.
515
* Those functions must zero $0 (v0) directly in the stack
516
* frame to indicate that a negative return value wasn't an
517
* error number..
518
*/
519
ldq $18, 0($sp) /* old syscall nr (zero if success) */
520
beq $18, $ret_success
521
522
ldq $19, 72($sp) /* .. and this a3 */
523
subq $31, $0, $0 /* with error in v0 */
524
addq $31, 1, $1 /* set a3 for errno return */
525
stq $0, 0($sp)
526
mov $31, $26 /* tell "ret_from_sys_call" we can restart */
527
stq $1, 72($sp) /* a3 for return */
528
br ret_from_sys_call
529
530
/*
531
* Do all cleanup when returning from all interrupts and system calls.
532
*
533
* Arguments:
534
* $8: current.
535
* $17: TI_FLAGS.
536
* $18: The old syscall number, or zero if this is not a return
537
* from a syscall that errored and is possibly restartable.
538
* $19: The old a3 value
539
*/
540
541
.align 4
542
.type work_pending, @function
543
work_pending:
544
and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL, $2
545
bne $2, $work_notifysig
546
547
$work_resched:
548
/*
549
* We can get here only if we returned from syscall without SIGPENDING
550
* or got through work_notifysig already. Either case means no syscall
551
* restarts for us, so let $18 and $19 burn.
552
*/
553
jsr $26, schedule
554
mov 0, $18
555
br ret_to_user
556
557
$work_notifysig:
558
mov $sp, $16
559
DO_SWITCH_STACK
560
jsr $26, do_work_pending
561
UNDO_SWITCH_STACK
562
br restore_all
563
564
/*
565
* PTRACE syscall handler
566
*/
567
568
.align 4
569
.type strace, @function
570
strace:
571
/* set up signal stack, call syscall_trace */
572
// NB: if anyone adds preemption, this block will need to be protected
573
ldl $1, TI_STATUS($8)
574
and $1, TS_SAVED_FP, $3
575
or $1, TS_SAVED_FP, $2
576
bne $3, 1f
577
stl $2, TI_STATUS($8)
578
bsr $26, __save_fpu
579
1:
580
DO_SWITCH_STACK
581
jsr $26, syscall_trace_enter /* returns the syscall number */
582
UNDO_SWITCH_STACK
583
584
/* get the arguments back.. */
585
ldq $16, SP_OFF+24($sp)
586
ldq $17, SP_OFF+32($sp)
587
ldq $18, SP_OFF+40($sp)
588
ldq $19, 72($sp)
589
ldq $20, 80($sp)
590
ldq $21, 88($sp)
591
592
/* get the system call pointer.. */
593
lda $1, NR_syscalls($31)
594
lda $2, sys_call_table
595
lda $27, sys_ni_syscall
596
cmpult $0, $1, $1
597
s8addq $0, $2, $2
598
beq $1, 1f
599
ldq $27, 0($2)
600
1: jsr $26, ($27), sys_gettimeofday
601
ret_from_straced:
602
ldgp $gp, 0($26)
603
604
/* check return.. */
605
blt $0, $strace_error /* the call failed */
606
$strace_success:
607
stq $31, 72($sp) /* a3=0 => no error */
608
stq $0, 0($sp) /* save return value */
609
610
DO_SWITCH_STACK
611
jsr $26, syscall_trace_leave
612
UNDO_SWITCH_STACK
613
br $31, ret_from_sys_call
614
615
.align 3
616
$strace_error:
617
ldq $18, 0($sp) /* old syscall nr (zero if success) */
618
beq $18, $strace_success
619
ldq $19, 72($sp) /* .. and this a3 */
620
621
subq $31, $0, $0 /* with error in v0 */
622
addq $31, 1, $1 /* set a3 for errno return */
623
stq $0, 0($sp)
624
stq $1, 72($sp) /* a3 for return */
625
626
DO_SWITCH_STACK
627
mov $18, $9 /* save old syscall number */
628
mov $19, $10 /* save old a3 */
629
jsr $26, syscall_trace_leave
630
mov $9, $18
631
mov $10, $19
632
UNDO_SWITCH_STACK
633
634
mov $31, $26 /* tell "ret_from_sys_call" we can restart */
635
br ret_from_sys_call
636
CFI_END_OSF_FRAME entSys
637
638
/*
639
* Save and restore the switch stack -- aka the balance of the user context.
640
*/
641
642
.align 4
643
.type do_switch_stack, @function
644
.cfi_startproc simple
645
.cfi_return_column 64
646
.cfi_def_cfa $sp, 0
647
.cfi_register 64, $1
648
do_switch_stack:
649
lda $sp, -SWITCH_STACK_SIZE($sp)
650
.cfi_adjust_cfa_offset SWITCH_STACK_SIZE
651
stq $9, 0($sp)
652
stq $10, 8($sp)
653
stq $11, 16($sp)
654
stq $12, 24($sp)
655
stq $13, 32($sp)
656
stq $14, 40($sp)
657
stq $15, 48($sp)
658
stq $26, 56($sp)
659
ret $31, ($1), 1
660
.cfi_endproc
661
.size do_switch_stack, .-do_switch_stack
662
663
.align 4
664
.type undo_switch_stack, @function
665
.cfi_startproc simple
666
.cfi_def_cfa $sp, 0
667
.cfi_register 64, $1
668
undo_switch_stack:
669
ldq $9, 0($sp)
670
ldq $10, 8($sp)
671
ldq $11, 16($sp)
672
ldq $12, 24($sp)
673
ldq $13, 32($sp)
674
ldq $14, 40($sp)
675
ldq $15, 48($sp)
676
ldq $26, 56($sp)
677
lda $sp, SWITCH_STACK_SIZE($sp)
678
ret $31, ($1), 1
679
.cfi_endproc
680
.size undo_switch_stack, .-undo_switch_stack
681
682
#define FR(n) n * 8 + TI_FP($8)
683
.align 4
684
.globl __save_fpu
685
.type __save_fpu, @function
686
__save_fpu:
687
#define V(n) stt $f##n, FR(n)
688
V( 0); V( 1); V( 2); V( 3)
689
V( 4); V( 5); V( 6); V( 7)
690
V( 8); V( 9); V(10); V(11)
691
V(12); V(13); V(14); V(15)
692
V(16); V(17); V(18); V(19)
693
V(20); V(21); V(22); V(23)
694
V(24); V(25); V(26); V(27)
695
mf_fpcr $f0 # get fpcr
696
V(28); V(29); V(30)
697
stt $f0, FR(31) # save fpcr in slot of $f31
698
ldt $f0, FR(0) # don't let "__save_fpu" change fp state.
699
ret
700
#undef V
701
.size __save_fpu, .-__save_fpu
702
703
.align 4
704
restore_fpu:
705
and $3, TS_RESTORE_FP, $3
706
bic $2, TS_SAVED_FP | TS_RESTORE_FP, $2
707
beq $3, 1f
708
#define V(n) ldt $f##n, FR(n)
709
ldt $f30, FR(31) # get saved fpcr
710
V( 0); V( 1); V( 2); V( 3)
711
mt_fpcr $f30 # install saved fpcr
712
V( 4); V( 5); V( 6); V( 7)
713
V( 8); V( 9); V(10); V(11)
714
V(12); V(13); V(14); V(15)
715
V(16); V(17); V(18); V(19)
716
V(20); V(21); V(22); V(23)
717
V(24); V(25); V(26); V(27)
718
V(28); V(29); V(30)
719
1: stl $2, TI_STATUS($8)
720
br restore_other
721
#undef V
722
723
724
/*
725
* The meat of the context switch code.
726
*/
727
.align 4
728
.globl alpha_switch_to
729
.type alpha_switch_to, @function
730
.cfi_startproc
731
alpha_switch_to:
732
DO_SWITCH_STACK
733
ldl $1, TI_STATUS($8)
734
and $1, TS_RESTORE_FP, $3
735
bne $3, 1f
736
or $1, TS_RESTORE_FP | TS_SAVED_FP, $2
737
and $1, TS_SAVED_FP, $3
738
stl $2, TI_STATUS($8)
739
bne $3, 1f
740
bsr $26, __save_fpu
741
1:
742
call_pal PAL_swpctx
743
lda $8, 0x3fff
744
UNDO_SWITCH_STACK
745
bic $sp, $8, $8
746
mov $17, $0
747
ret
748
.cfi_endproc
749
.size alpha_switch_to, .-alpha_switch_to
750
751
/*
752
* New processes begin life here.
753
*/
754
755
.globl ret_from_fork
756
.align 4
757
.ent ret_from_fork
758
ret_from_fork:
759
lda $26, ret_to_user
760
mov $17, $16
761
jmp $31, schedule_tail
762
.end ret_from_fork
763
764
/*
765
* ... and new kernel threads - here
766
*/
767
.align 4
768
.globl ret_from_kernel_thread
769
.ent ret_from_kernel_thread
770
ret_from_kernel_thread:
771
mov $17, $16
772
jsr $26, schedule_tail
773
mov $9, $27
774
mov $10, $16
775
jsr $26, ($9)
776
br $31, ret_to_user
777
.end ret_from_kernel_thread
778
779
780
/*
781
* Special system calls. Most of these are special in that they either
782
* have to play switch_stack games.
783
*/
784
785
.macro fork_like name
786
.align 4
787
.globl alpha_\name
788
.ent alpha_\name
789
alpha_\name:
790
.prologue 0
791
bsr $1, do_switch_stack
792
// NB: if anyone adds preemption, this block will need to be protected
793
ldl $1, TI_STATUS($8)
794
and $1, TS_SAVED_FP, $3
795
or $1, TS_SAVED_FP, $2
796
bne $3, 1f
797
stl $2, TI_STATUS($8)
798
bsr $26, __save_fpu
799
1:
800
jsr $26, sys_\name
801
ldq $26, 56($sp)
802
lda $sp, SWITCH_STACK_SIZE($sp)
803
ret
804
.end alpha_\name
805
.endm
806
807
fork_like fork
808
fork_like vfork
809
fork_like clone
810
fork_like clone3
811
812
.macro sigreturn_like name
813
.align 4
814
.globl sys_\name
815
.ent sys_\name
816
sys_\name:
817
.prologue 0
818
lda $9, ret_from_straced
819
cmpult $26, $9, $9
820
lda $sp, -SWITCH_STACK_SIZE($sp)
821
jsr $26, do_\name
822
bne $9, 1f
823
jsr $26, syscall_trace_leave
824
1: br $1, undo_switch_stack
825
br ret_from_sys_call
826
.end sys_\name
827
.endm
828
829
sigreturn_like sigreturn
830
sigreturn_like rt_sigreturn
831
832
.align 4
833
.globl alpha_syscall_zero
834
.ent alpha_syscall_zero
835
alpha_syscall_zero:
836
.prologue 0
837
/* Special because it needs to do something opposite to
838
force_successful_syscall_return(). We use the saved
839
syscall number for that, zero meaning "not an error".
840
That works nicely, but for real syscall 0 we need to
841
make sure that this logics doesn't get confused.
842
Store a non-zero there - -ENOSYS we need in register
843
for our return value will do just fine.
844
*/
845
lda $0, -ENOSYS
846
unop
847
stq $0, 0($sp)
848
ret
849
.end alpha_syscall_zero
850
851