Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/powerpc/aim/trap_subr64.S
39507 views
1
/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */
2
3
/*-
4
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
5
* Copyright (C) 1995, 1996 TooLs GmbH.
6
* All rights reserved.
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions and the following disclaimer.
13
* 2. Redistributions in binary form must reproduce the above copyright
14
* notice, this list of conditions and the following disclaimer in the
15
* documentation and/or other materials provided with the distribution.
16
* 3. All advertising materials mentioning features or use of this software
17
* must display the following acknowledgement:
18
* This product includes software developed by TooLs GmbH.
19
* 4. The name of TooLs GmbH may not be used to endorse or promote products
20
* derived from this software without specific prior written permission.
21
*
22
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32
*/
33
34
/*
35
* NOTICE: This is not a standalone file. to use it, #include it in
36
* your port's locore.S, like so:
37
*
38
* #include <powerpc/aim/trap_subr.S>
39
*/
40
41
/* Locate the per-CPU data structure */
42
#define GET_CPUINFO(r) \
43
mfsprg0 r
44
#define GET_TOCBASE(r) \
45
lis r,DMAP_BASE_ADDRESS@highesta; /* To real-mode alias/dmap */ \
46
sldi r,r,32; \
47
ori r,r,TRAP_TOCBASE; /* Magic address for TOC */ \
48
ld r,0(r)
49
50
/*
51
* Restore SRs for a pmap
52
*
53
* Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
54
*/
55
56
/*
57
* User SRs are loaded through a pointer to the current pmap.
58
* PCPU already in %r3
59
*/
60
restore_usersrs:
61
ld %r28,PC_USERSLB(%r3)
62
cmpdi %r28, 0 /* If user SLB pointer NULL, exit */
63
beqlr
64
65
li %r29, 0 /* Set the counter to zero */
66
67
slbia
68
slbmfee %r31,%r29
69
clrrdi %r31,%r31,28
70
slbie %r31
71
1: ld %r31, 0(%r28) /* Load SLB entry pointer */
72
cmpdi %r31, 0 /* If NULL, stop */
73
beqlr
74
75
ld %r30, 0(%r31) /* Load SLBV */
76
ld %r31, 8(%r31) /* Load SLBE */
77
or %r31, %r31, %r29 /* Set SLBE slot */
78
slbmte %r30, %r31 /* Install SLB entry */
79
80
addi %r28, %r28, 8 /* Advance pointer */
81
addi %r29, %r29, 1
82
b 1b /* Repeat */
83
84
/*
85
* Kernel SRs are loaded directly from the PCPU fields
86
* PCPU in %r1
87
*/
88
restore_kernsrs:
89
lwz %r29, PC_FLAGS(%r1)
90
mtcr %r29
91
btlr 0
92
addi %r28,%r1,PC_KERNSLB
93
ld %r29,16(%r28) /* One past USER_SLB_SLOT */
94
cmpdi %r29,0
95
beqlr /* If first kernel entry is invalid,
96
* SLBs not in use, so exit early */
97
98
/* Otherwise, set up SLBs */
99
li %r29, 0 /* Set the counter to zero */
100
101
slbia
102
slbmfee %r31,%r29
103
clrrdi %r31,%r31,28
104
slbie %r31
105
1: cmpdi %r29, USER_SLB_SLOT /* Skip the user slot */
106
beq- 2f
107
108
ld %r31, 8(%r28) /* Load SLBE */
109
cmpdi %r31, 0 /* If SLBE is not valid, stop */
110
beqlr
111
ld %r30, 0(%r28) /* Load SLBV */
112
slbmte %r30, %r31 /* Install SLB entry */
113
114
2: addi %r28, %r28, 16 /* Advance pointer */
115
addi %r29, %r29, 1
116
cmpdi %r29, 64 /* Repeat if we are not at the end */
117
blt 1b
118
blr
119
120
/*
121
* FRAME_SETUP assumes:
122
* SPRG1 SP (1)
123
* SPRG3 trap type
124
* savearea r27-r31,DAR,DSISR (DAR & DSISR only for DSI traps)
125
* r28 LR
126
* r29 CR
127
* r30 scratch
128
* r31 scratch
129
* r1 kernel stack
130
* SRR0/1 as at start of trap
131
*
132
* NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse
133
* in any real-mode fault handler, including those handling double faults.
134
*/
135
#define FRAME_SETUP(savearea) \
136
/* Have to enable translation to allow access of kernel stack: */ \
137
GET_CPUINFO(%r31); \
138
mfsrr0 %r30; \
139
std %r30,(savearea+CPUSAVE_SRR0)(%r31); /* save SRR0 */ \
140
mfsrr1 %r30; \
141
std %r30,(savearea+CPUSAVE_SRR1)(%r31); /* save SRR1 */ \
142
mfsprg1 %r31; /* get saved SP (clears SPRG1) */ \
143
mfmsr %r30; \
144
ori %r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */ \
145
mtmsr %r30; /* stack can now be accessed */ \
146
isync; \
147
stdu %r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
148
std %r0, FRAME_0+48(%r1); /* save r0 in the trapframe */ \
149
std %r31,FRAME_1+48(%r1); /* save SP " " */ \
150
std %r2, FRAME_2+48(%r1); /* save r2 " " */ \
151
std %r28,FRAME_LR+48(%r1); /* save LR " " */ \
152
std %r29,FRAME_CR+48(%r1); /* save CR " " */ \
153
GET_CPUINFO(%r2); \
154
ld %r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */ \
155
ld %r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */ \
156
ld %r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */ \
157
ld %r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \
158
ld %r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \
159
std %r3, FRAME_3+48(%r1); /* save r3-r31 */ \
160
std %r4, FRAME_4+48(%r1); \
161
std %r5, FRAME_5+48(%r1); \
162
std %r6, FRAME_6+48(%r1); \
163
std %r7, FRAME_7+48(%r1); \
164
std %r8, FRAME_8+48(%r1); \
165
std %r9, FRAME_9+48(%r1); \
166
std %r10, FRAME_10+48(%r1); \
167
std %r11, FRAME_11+48(%r1); \
168
std %r12, FRAME_12+48(%r1); \
169
std %r13, FRAME_13+48(%r1); \
170
std %r14, FRAME_14+48(%r1); \
171
std %r15, FRAME_15+48(%r1); \
172
std %r16, FRAME_16+48(%r1); \
173
std %r17, FRAME_17+48(%r1); \
174
std %r18, FRAME_18+48(%r1); \
175
std %r19, FRAME_19+48(%r1); \
176
std %r20, FRAME_20+48(%r1); \
177
std %r21, FRAME_21+48(%r1); \
178
std %r22, FRAME_22+48(%r1); \
179
std %r23, FRAME_23+48(%r1); \
180
std %r24, FRAME_24+48(%r1); \
181
std %r25, FRAME_25+48(%r1); \
182
std %r26, FRAME_26+48(%r1); \
183
std %r27, FRAME_27+48(%r1); \
184
std %r28, FRAME_28+48(%r1); \
185
std %r29, FRAME_29+48(%r1); \
186
std %r30, FRAME_30+48(%r1); \
187
std %r31, FRAME_31+48(%r1); \
188
ld %r28,(savearea+CPUSAVE_AIM_DAR)(%r2); /* saved DAR */ \
189
ld %r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
190
ld %r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */ \
191
ld %r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */ \
192
mfxer %r3; \
193
mfctr %r4; \
194
mfsprg3 %r5; \
195
std %r3, FRAME_XER+48(1); /* save xer/ctr/exc */ \
196
std %r4, FRAME_CTR+48(1); \
197
std %r5, FRAME_EXC+48(1); \
198
std %r28,FRAME_AIM_DAR+48(1); \
199
std %r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */ \
200
std %r30,FRAME_SRR0+48(1); \
201
std %r31,FRAME_SRR1+48(1); \
202
ld %r13,PC_CURTHREAD(%r2) /* set kernel curthread */
203
204
#define FRAME_LEAVE(savearea) \
205
/* Disable exceptions: */ \
206
mfmsr %r2; \
207
andi. %r2,%r2,~PSL_EE@l; \
208
mtmsr %r2; \
209
isync; \
210
/* Now restore regs: */ \
211
ld %r2,FRAME_SRR0+48(%r1); \
212
ld %r3,FRAME_SRR1+48(%r1); \
213
ld %r4,FRAME_CTR+48(%r1); \
214
ld %r5,FRAME_XER+48(%r1); \
215
ld %r6,FRAME_LR+48(%r1); \
216
GET_CPUINFO(%r7); \
217
std %r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */ \
218
std %r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */ \
219
ld %r7,FRAME_CR+48(%r1); \
220
mtctr %r4; \
221
mtxer %r5; \
222
mtlr %r6; \
223
mtsprg2 %r7; /* save cr */ \
224
ld %r31,FRAME_31+48(%r1); /* restore r0-31 */ \
225
ld %r30,FRAME_30+48(%r1); \
226
ld %r29,FRAME_29+48(%r1); \
227
ld %r28,FRAME_28+48(%r1); \
228
ld %r27,FRAME_27+48(%r1); \
229
ld %r26,FRAME_26+48(%r1); \
230
ld %r25,FRAME_25+48(%r1); \
231
ld %r24,FRAME_24+48(%r1); \
232
ld %r23,FRAME_23+48(%r1); \
233
ld %r22,FRAME_22+48(%r1); \
234
ld %r21,FRAME_21+48(%r1); \
235
ld %r20,FRAME_20+48(%r1); \
236
ld %r19,FRAME_19+48(%r1); \
237
ld %r18,FRAME_18+48(%r1); \
238
ld %r17,FRAME_17+48(%r1); \
239
ld %r16,FRAME_16+48(%r1); \
240
ld %r15,FRAME_15+48(%r1); \
241
ld %r14,FRAME_14+48(%r1); \
242
ld %r13,FRAME_13+48(%r1); \
243
ld %r12,FRAME_12+48(%r1); \
244
ld %r11,FRAME_11+48(%r1); \
245
ld %r10,FRAME_10+48(%r1); \
246
ld %r9, FRAME_9+48(%r1); \
247
ld %r8, FRAME_8+48(%r1); \
248
ld %r7, FRAME_7+48(%r1); \
249
ld %r6, FRAME_6+48(%r1); \
250
ld %r5, FRAME_5+48(%r1); \
251
ld %r4, FRAME_4+48(%r1); \
252
ld %r3, FRAME_3+48(%r1); \
253
ld %r2, FRAME_2+48(%r1); \
254
ld %r0, FRAME_0+48(%r1); \
255
ld %r1, FRAME_1+48(%r1); \
256
/* Can't touch %r1 from here on */ \
257
mtsprg3 %r3; /* save r3 */ \
258
/* Disable translation, machine check and recoverability: */ \
259
mfmsr %r3; \
260
andi. %r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l; \
261
mtmsr %r3; \
262
isync; \
263
/* Decide whether we return to user mode: */ \
264
GET_CPUINFO(%r3); \
265
ld %r3,(savearea+CPUSAVE_SRR1)(%r3); \
266
mtcr %r3; \
267
bf 17,1f; /* branch if PSL_PR is false */ \
268
/* Restore user SRs */ \
269
GET_CPUINFO(%r3); \
270
std %r27,(savearea+CPUSAVE_R27)(%r3); \
271
lwz %r27,PC_FLAGS(%r3); \
272
mtcr %r27; \
273
bt 0, 0f; /* Check to skip restoring SRs. */ \
274
std %r28,(savearea+CPUSAVE_R28)(%r3); \
275
std %r29,(savearea+CPUSAVE_R29)(%r3); \
276
std %r30,(savearea+CPUSAVE_R30)(%r3); \
277
std %r31,(savearea+CPUSAVE_R31)(%r3); \
278
mflr %r27; /* preserve LR */ \
279
bl restore_usersrs; /* uses r28-r31 */ \
280
mtlr %r27; \
281
ld %r31,(savearea+CPUSAVE_R31)(%r3); \
282
ld %r30,(savearea+CPUSAVE_R30)(%r3); \
283
ld %r29,(savearea+CPUSAVE_R29)(%r3); \
284
ld %r28,(savearea+CPUSAVE_R28)(%r3); \
285
0: \
286
ld %r27,(savearea+CPUSAVE_R27)(%r3); \
287
1: mfsprg2 %r3; /* restore cr */ \
288
mtcr %r3; \
289
GET_CPUINFO(%r3); \
290
ld %r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */ \
291
mtsrr0 %r3; \
292
GET_CPUINFO(%r3); \
293
ld %r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */ \
294
mtsrr1 %r3; \
295
mfsprg3 %r3 /* restore r3 */
296
297
#ifdef KDTRACE_HOOKS
298
.data
299
.globl dtrace_invop_calltrap_addr
300
.align 8
301
.type dtrace_invop_calltrap_addr, @object
302
.size dtrace_invop_calltrap_addr, 8
303
dtrace_invop_calltrap_addr:
304
.word 0
305
.word 0
306
307
.text
308
#endif
309
310
/*
311
* Processor reset exception handler. These are typically
312
* the first instructions the processor executes after a
313
* software reset. We do this in two bits so that we are
314
* not still hanging around in the trap handling region
315
* once the MMU is turned on.
316
*/
317
.globl CNAME(rstcode), CNAME(rstcodeend), CNAME(cpu_reset_handler)
318
.globl CNAME(cpu_wakeup_handler)
319
.p2align 3
320
CNAME(rstcode):
321
#ifdef __LITTLE_ENDIAN__
322
/*
323
* XXX This shouldn't be necessary.
324
*
325
* According to the ISA documentation, LE should be set from HILE
326
* or the LPCR ILE bit automatically. However, the entry into this
327
* vector from OPAL_START_CPU does not honor this correctly.
328
*
329
* We should be able to define an alternate entry for opal's
330
* start_kernel_secondary asm code to branch to.
331
*/
332
RETURN_TO_NATIVE_ENDIAN
333
#endif
334
/*
335
* Check if this is software reset or
336
* processor is waking up from power saving mode
337
* It is software reset when 46:47 = 0b00
338
*/
339
/* 0x00 */
340
ld %r2,TRAP_GENTRAP(0) /* Real-mode &generictrap */
341
mfsrr1 %r9 /* Load SRR1 into r9 */
342
andis. %r9,%r9,0x3 /* Logic AND with 46:47 bits */
343
344
beq 2f /* Branch if software reset */
345
/* 0x10 */
346
/* Reset was wakeup */
347
addi %r9,%r2,(cpu_wakeup_handler-generictrap)
348
b 1f /* Was power save, do the wakeup */
349
350
/* Reset was software reset */
351
/* Explicitly set MSR[SF] */
352
2: mfmsr %r9
353
li %r8,1
354
/* 0x20 */
355
insrdi %r9,%r8,1,0
356
mtmsrd %r9
357
isync
358
359
addi %r9,%r2,(cpu_reset_handler-generictrap)
360
361
/* 0x30 */
362
1: mtlr %r9
363
blr /* Branch to either cpu_reset_handler
364
* or cpu_wakeup_handler.
365
*/
366
CNAME(rstcodeend):
367
368
cpu_reset_handler:
369
GET_TOCBASE(%r2)
370
371
addis %r1,%r2,TOC_REF(tmpstk)@ha
372
ld %r1,TOC_REF(tmpstk)@l(%r1) /* get new SP */
373
addi %r1,%r1,(TMPSTKSZ-48)
374
375
bl CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */
376
nop
377
lis %r3,1@l
378
bl CNAME(pmap_cpu_bootstrap) /* Turn on virtual memory */
379
nop
380
bl CNAME(cpudep_ap_bootstrap) /* Set up PCPU and stack */
381
nop
382
mr %r1,%r3 /* Use new stack */
383
bl CNAME(cpudep_ap_setup)
384
nop
385
GET_CPUINFO(%r5)
386
ld %r3,(PC_RESTORE)(%r5)
387
cmpldi %cr0,%r3,0
388
beq %cr0,2f
389
nop
390
li %r4,1
391
bl CNAME(longjmp)
392
nop
393
2:
394
#ifdef SMP
395
bl CNAME(machdep_ap_bootstrap) /* And away! */
396
nop
397
#endif
398
399
/* Should not be reached */
400
9:
401
b 9b
402
403
cpu_wakeup_handler:
404
GET_TOCBASE(%r2)
405
406
/* Check for false wake up due to badly SRR1 set (eg. by OPAL) */
407
addis %r3,%r2,TOC_REF(can_wakeup)@ha
408
ld %r3,TOC_REF(can_wakeup)@l(%r3)
409
ld %r3,0(%r3)
410
cmpdi %r3,0
411
beq cpu_reset_handler
412
413
/* Turn on MMU after return from interrupt */
414
mfsrr1 %r3
415
ori %r3,%r3,(PSL_IR | PSL_DR)
416
mtsrr1 %r3
417
418
/* Turn on MMU (needed to access PCB) */
419
mfmsr %r3
420
ori %r3,%r3,(PSL_IR | PSL_DR)
421
mtmsr %r3
422
isync
423
424
mfsprg0 %r3
425
426
ld %r3,PC_CURTHREAD(%r3) /* Get current thread */
427
ld %r3,TD_PCB(%r3) /* Get PCB of current thread */
428
ld %r12,PCB_CONTEXT(%r3) /* Load the non-volatile GP regs. */
429
ld %r13,PCB_CONTEXT+1*8(%r3)
430
ld %r14,PCB_CONTEXT+2*8(%r3)
431
ld %r15,PCB_CONTEXT+3*8(%r3)
432
ld %r16,PCB_CONTEXT+4*8(%r3)
433
ld %r17,PCB_CONTEXT+5*8(%r3)
434
ld %r18,PCB_CONTEXT+6*8(%r3)
435
ld %r19,PCB_CONTEXT+7*8(%r3)
436
ld %r20,PCB_CONTEXT+8*8(%r3)
437
ld %r21,PCB_CONTEXT+9*8(%r3)
438
ld %r22,PCB_CONTEXT+10*8(%r3)
439
ld %r23,PCB_CONTEXT+11*8(%r3)
440
ld %r24,PCB_CONTEXT+12*8(%r3)
441
ld %r25,PCB_CONTEXT+13*8(%r3)
442
ld %r26,PCB_CONTEXT+14*8(%r3)
443
ld %r27,PCB_CONTEXT+15*8(%r3)
444
ld %r28,PCB_CONTEXT+16*8(%r3)
445
ld %r29,PCB_CONTEXT+17*8(%r3)
446
ld %r30,PCB_CONTEXT+18*8(%r3)
447
ld %r31,PCB_CONTEXT+19*8(%r3)
448
ld %r5,PCB_CR(%r3) /* Load the condition register */
449
mtcr %r5
450
ld %r5,PCB_LR(%r3) /* Load the link register */
451
mtsrr0 %r5
452
ld %r1,PCB_SP(%r3) /* Load the stack pointer */
453
ld %r2,PCB_TOC(%r3) /* Load the TOC pointer */
454
455
rfid
456
457
/*
458
* This code gets copied to all the trap vectors
459
* (except ISI/DSI, ALI, and the interrupts). Has to fit in 8 instructions!
460
*/
461
462
.globl CNAME(trapcode),CNAME(trapcodeend)
463
.p2align 3
464
CNAME(trapcode):
465
mtsprg1 %r1 /* save SP */
466
mflr %r1 /* Save the old LR in r1 */
467
mtsprg2 %r1 /* And then in SPRG2 */
468
ld %r1,TRAP_ENTRY(0)
469
mtlr %r1
470
li %r1, 0xe0 /* How to get the vector from LR */
471
blrl /* Branch to generictrap */
472
CNAME(trapcodeend):
473
474
/* Same thing for traps setting HSRR0/HSRR1 */
475
.globl CNAME(hypertrapcode),CNAME(hypertrapcodeend)
476
.p2align 3
477
CNAME(hypertrapcode):
478
mtsprg1 %r1 /* save SP */
479
mflr %r1 /* Save the old LR in r1 */
480
mtsprg2 %r1 /* And then in SPRG2 */
481
ld %r1,TRAP_GENTRAP(0)
482
addi %r1,%r1,(generichypertrap-generictrap)
483
mtlr %r1
484
li %r1, 0xe0 /* How to get the vector from LR */
485
blrl /* Branch to generichypertrap */
486
CNAME(hypertrapcodeend):
487
488
/*
489
* For SLB misses: do special things for the kernel
490
*
491
* Note: SPRG1 is always safe to overwrite any time the MMU was on, which is
492
* the only time this can be called.
493
*/
494
.globl CNAME(slbtrap),CNAME(slbtrapend)
495
.p2align 3
496
CNAME(slbtrap):
497
/* 0x00 */
498
mtsprg1 %r1 /* save SP */
499
GET_CPUINFO(%r1)
500
std %r2,(PC_SLBSAVE+16)(%r1) /* save r2 */
501
mfcr %r2
502
/* 0x10 */
503
std %r2,(PC_SLBSAVE+104)(%r1) /* save CR */
504
mfsrr1 %r2 /* test kernel mode */
505
mtcr %r2
506
bf 17,2f /* branch if PSL_PR is false */
507
/* 0x20 */
508
/* User mode */
509
ld %r2,(PC_SLBSAVE+104)(%r1)
510
mtcr %r2 /* restore CR */
511
ld %r2,(PC_SLBSAVE+16)(%r1) /* restore r2 */
512
mflr %r1
513
/* 0x30 */
514
mtsprg2 %r1 /* save LR in SPRG2 */
515
ld %r1,TRAP_ENTRY(0) /* real-mode &generictrap */
516
mtlr %r1
517
li %r1, 0x80 /* How to get the vector from LR */
518
/* 0x40 */
519
blrl /* Branch to generictrap */
520
2: mflr %r2 /* Save the old LR in r2 */
521
/* Kernel mode */
522
ld %r1,TRAP_GENTRAP(0) /* Real-mode &generictrap */
523
addi %r1,%r1,(kern_slbtrap-generictrap)
524
/* 0x50 */
525
mtlr %r1
526
GET_CPUINFO(%r1)
527
blrl /* Branch to kern_slbtrap */
528
/* must fit in 128 bytes! */
529
CNAME(slbtrapend):
530
531
/*
532
* On entry:
533
* SPRG1: SP
534
* r1: pcpu
535
* r2: LR
536
* LR: branch address in trap region
537
*/
538
kern_slbtrap:
539
std %r2,(PC_SLBSAVE+136)(%r1) /* old LR */
540
std %r3,(PC_SLBSAVE+24)(%r1) /* save R3 */
541
542
/* Check if this needs to be handled as a regular trap (userseg miss) */
543
mflr %r2
544
andi. %r2,%r2,0xff80
545
cmpwi %r2,EXC_DSE
546
bne 1f
547
mfdar %r2
548
b 2f
549
1: mfsrr0 %r2
550
2: /* r2 now contains the fault address */
551
lis %r3,SEGMENT_MASK@highesta
552
ori %r3,%r3,SEGMENT_MASK@highera
553
sldi %r3,%r3,32
554
oris %r3,%r3,SEGMENT_MASK@ha
555
ori %r3,%r3,SEGMENT_MASK@l
556
and %r2,%r2,%r3 /* R2 = segment base address */
557
lis %r3,USER_ADDR@highesta
558
ori %r3,%r3,USER_ADDR@highera
559
sldi %r3,%r3,32
560
oris %r3,%r3,USER_ADDR@ha
561
ori %r3,%r3,USER_ADDR@l
562
cmpd %r2,%r3 /* Compare fault base to USER_ADDR */
563
bne 3f
564
565
/* User seg miss, handle as a regular trap */
566
ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
567
mtcr %r2
568
ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */
569
ld %r3,(PC_SLBSAVE+24)(%r1)
570
ld %r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */
571
mtsprg2 %r1 /* And then in SPRG2 */
572
li %r1, 0x80 /* How to get the vector from LR */
573
b generictrap /* Retain old LR using b */
574
575
3: /* Real kernel SLB miss */
576
std %r0,(PC_SLBSAVE+0)(%r1) /* free all volatile regs */
577
mfsprg1 %r2 /* Old R1 */
578
std %r2,(PC_SLBSAVE+8)(%r1)
579
/* R2,R3 already saved */
580
std %r4,(PC_SLBSAVE+32)(%r1)
581
std %r5,(PC_SLBSAVE+40)(%r1)
582
std %r6,(PC_SLBSAVE+48)(%r1)
583
std %r7,(PC_SLBSAVE+56)(%r1)
584
std %r8,(PC_SLBSAVE+64)(%r1)
585
std %r9,(PC_SLBSAVE+72)(%r1)
586
std %r10,(PC_SLBSAVE+80)(%r1)
587
std %r11,(PC_SLBSAVE+88)(%r1)
588
std %r12,(PC_SLBSAVE+96)(%r1)
589
/* CR already saved */
590
mfxer %r2 /* save XER */
591
std %r2,(PC_SLBSAVE+112)(%r1)
592
mflr %r2 /* save LR (SP already saved) */
593
std %r2,(PC_SLBSAVE+120)(%r1)
594
mfctr %r2 /* save CTR */
595
std %r2,(PC_SLBSAVE+128)(%r1)
596
597
/* Call handler */
598
addi %r1,%r1,PC_SLBSTACK-48+1024
599
li %r2,~15
600
and %r1,%r1,%r2
601
GET_TOCBASE(%r2)
602
mflr %r3
603
andi. %r3,%r3,0xff80
604
mfdar %r4
605
mfsrr0 %r5
606
bl handle_kernel_slb_spill
607
nop
608
609
/* Save r28-31, restore r4-r12 */
610
GET_CPUINFO(%r1)
611
ld %r4,(PC_SLBSAVE+32)(%r1)
612
ld %r5,(PC_SLBSAVE+40)(%r1)
613
ld %r6,(PC_SLBSAVE+48)(%r1)
614
ld %r7,(PC_SLBSAVE+56)(%r1)
615
ld %r8,(PC_SLBSAVE+64)(%r1)
616
ld %r9,(PC_SLBSAVE+72)(%r1)
617
ld %r10,(PC_SLBSAVE+80)(%r1)
618
ld %r11,(PC_SLBSAVE+88)(%r1)
619
ld %r12,(PC_SLBSAVE+96)(%r1)
620
std %r28,(PC_SLBSAVE+64)(%r1)
621
std %r29,(PC_SLBSAVE+72)(%r1)
622
std %r30,(PC_SLBSAVE+80)(%r1)
623
std %r31,(PC_SLBSAVE+88)(%r1)
624
625
/* Restore kernel mapping */
626
bl restore_kernsrs
627
628
/* Restore remaining registers */
629
ld %r28,(PC_SLBSAVE+64)(%r1)
630
ld %r29,(PC_SLBSAVE+72)(%r1)
631
ld %r30,(PC_SLBSAVE+80)(%r1)
632
ld %r31,(PC_SLBSAVE+88)(%r1)
633
634
ld %r2,(PC_SLBSAVE+104)(%r1)
635
mtcr %r2
636
ld %r2,(PC_SLBSAVE+112)(%r1)
637
mtxer %r2
638
ld %r2,(PC_SLBSAVE+120)(%r1)
639
mtlr %r2
640
ld %r2,(PC_SLBSAVE+128)(%r1)
641
mtctr %r2
642
ld %r2,(PC_SLBSAVE+136)(%r1)
643
mtlr %r2
644
645
/* Restore r0-r3 */
646
ld %r0,(PC_SLBSAVE+0)(%r1)
647
ld %r2,(PC_SLBSAVE+16)(%r1)
648
ld %r3,(PC_SLBSAVE+24)(%r1)
649
mfsprg1 %r1
650
651
/* Back to whatever we were doing */
652
rfid
653
654
/*
655
* For ALI: has to save DSISR and DAR
656
*/
657
.globl CNAME(alitrap),CNAME(aliend)
658
CNAME(alitrap):
659
mtsprg1 %r1 /* save SP */
660
GET_CPUINFO(%r1)
661
std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
662
std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
663
std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
664
std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
665
std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
666
mfdar %r30
667
mfdsisr %r31
668
std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
669
std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
670
mfsprg1 %r1 /* restore SP, in case of branch */
671
mflr %r28 /* save LR */
672
mfcr %r29 /* save CR */
673
674
ld %r31,TRAP_GENTRAP(0)
675
addi %r31,%r31,(s_trap - generictrap)
676
mtlr %r31
677
678
/* Put our exception vector in SPRG3 */
679
li %r31, EXC_ALI
680
mtsprg3 %r31
681
682
/* Test whether we already had PR set */
683
mfsrr1 %r31
684
mtcr %r31
685
blrl /* Branch to s_trap */
686
CNAME(aliend):
687
688
/*
689
* Similar to the above for DSI
690
* Has to handle standard pagetable spills
691
*/
692
.globl CNAME(dsitrap),CNAME(dsiend)
693
.p2align 3
694
CNAME(dsitrap):
695
mtsprg1 %r1 /* save SP */
696
GET_CPUINFO(%r1)
697
std %r27,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
698
std %r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
699
std %r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
700
std %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
701
std %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
702
mfcr %r29 /* save CR */
703
mfxer %r30 /* save XER */
704
mtsprg2 %r30 /* in SPRG2 */
705
mfsrr1 %r31 /* test kernel mode */
706
mtcr %r31
707
mflr %r28 /* save LR (SP already saved) */
708
ld %r1,TRAP_GENTRAP(0)
709
addi %r1,%r1,(disitrap-generictrap)
710
mtlr %r1
711
blrl /* Branch to disitrap */
712
CNAME(dsiend):
713
714
/*
715
* Preamble code for DSI/ISI traps
716
*/
717
disitrap:
718
/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
719
mflr %r1
720
andi. %r1,%r1,0xff00
721
mtsprg3 %r1
722
723
GET_CPUINFO(%r1)
724
ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
725
std %r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
726
ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
727
std %r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
728
ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
729
std %r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
730
ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
731
std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
732
ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
733
std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
734
mfdar %r30
735
mfdsisr %r31
736
std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
737
std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
738
739
#ifdef KDB
740
/* Try to detect a kernel stack overflow */
741
mfsrr1 %r31
742
mtcr %r31
743
bt 17,realtrap /* branch is user mode */
744
mfsprg1 %r31 /* get old SP */
745
clrrdi %r31,%r31,12 /* Round SP down to nearest page */
746
sub. %r30,%r31,%r30 /* SP - DAR */
747
bge 1f
748
neg %r30,%r30 /* modulo value */
749
1: cmpldi %cr0,%r30,4096 /* is DAR within a page of SP? */
750
bge %cr0,realtrap /* no, too far away. */
751
752
/* Now convert this DSI into a DDB trap. */
753
GET_CPUINFO(%r1)
754
ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
755
std %r30,(PC_DBSAVE +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
756
ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
757
std %r30,(PC_DBSAVE +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
758
ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get r27 */
759
std %r31,(PC_DBSAVE +CPUSAVE_R27)(%r1) /* save r27 */
760
ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get r28 */
761
std %r30,(PC_DBSAVE +CPUSAVE_R28)(%r1) /* save r28 */
762
ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get r29 */
763
std %r31,(PC_DBSAVE +CPUSAVE_R29)(%r1) /* save r29 */
764
ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get r30 */
765
std %r30,(PC_DBSAVE +CPUSAVE_R30)(%r1) /* save r30 */
766
ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get r31 */
767
std %r31,(PC_DBSAVE +CPUSAVE_R31)(%r1) /* save r31 */
768
b dbtrap
769
#endif
770
771
/* XXX need stack probe here */
772
realtrap:
773
/* Test whether we already had PR set */
774
mfsrr1 %r1
775
mtcr %r1
776
mfsprg1 %r1 /* restore SP (might have been
777
overwritten) */
778
bf 17,k_trap /* branch if PSL_PR is false */
779
GET_CPUINFO(%r1)
780
mr %r27,%r28 /* Save LR, r29 */
781
mtsprg2 %r29
782
bl restore_kernsrs /* enable kernel mapping */
783
mfsprg2 %r29
784
mr %r28,%r27
785
ld %r1,PC_CURPCB(%r1)
786
b s_trap
787
788
/*
789
* generictrap does some standard setup for trap handling to minimize
790
* the code that need be installed in the actual vectors. It expects
791
* the following conditions.
792
*
793
* R1 - Trap vector = LR & (0xff00 | R1)
794
* SPRG1 - Original R1 contents
795
* SPRG2 - Original LR
796
*/
797
798
generichypertrap:
799
mtsprg3 %r1
800
mfspr %r1, SPR_HSRR0
801
mtsrr0 %r1
802
mfspr %r1, SPR_HSRR1
803
mtsrr1 %r1
804
mfsprg3 %r1
805
.globl CNAME(generictrap)
806
generictrap:
807
/* Save R1 for computing the exception vector */
808
mtsprg3 %r1
809
810
/* Save interesting registers */
811
GET_CPUINFO(%r1)
812
std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
813
std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
814
std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
815
std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
816
std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
817
mfdar %r30
818
std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
819
mfdsisr %r30
820
std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
821
mfsprg1 %r1 /* restore SP, in case of branch */
822
mfsprg2 %r28 /* save LR */
823
mfcr %r29 /* save CR */
824
825
/* Compute the exception vector from the link register */
826
mfsprg3 %r31
827
ori %r31,%r31,0xff00
828
mflr %r30
829
addi %r30,%r30,-4 /* The branch instruction, not the next */
830
and %r30,%r30,%r31
831
mtsprg3 %r30
832
833
/* Test whether we already had PR set */
834
mfsrr1 %r31
835
mtcr %r31
836
837
s_trap:
838
bf 17,k_trap /* branch if PSL_PR is false */
839
GET_CPUINFO(%r1)
840
u_trap:
841
mr %r27,%r28 /* Save LR, r29 */
842
mtsprg2 %r29
843
bl restore_kernsrs /* enable kernel mapping */
844
mfsprg2 %r29
845
mr %r28,%r27
846
ld %r1,PC_CURPCB(%r1)
847
848
/*
849
* Now the common trap catching code.
850
*/
851
k_trap:
852
FRAME_SETUP(PC_TEMPSAVE)
853
/* Call C interrupt dispatcher: */
854
trapagain:
855
GET_TOCBASE(%r2)
856
addi %r3,%r1,48
857
bl CNAME(powerpc_interrupt)
858
nop
859
860
.globl CNAME(trapexit) /* backtrace code sentinel */
861
CNAME(trapexit):
862
/* Disable interrupts: */
863
mfmsr %r3
864
andi. %r3,%r3,~PSL_EE@l
865
mtmsr %r3
866
isync
867
/* Test AST pending: */
868
ld %r5,FRAME_SRR1+48(%r1)
869
mtcr %r5
870
bf 17,1f /* branch if PSL_PR is false */
871
872
GET_CPUINFO(%r3) /* get per-CPU pointer */
873
lwz %r4,TD_AST(%r13) /* get thread ast value */
874
cmpwi %r4,0
875
beq 1f
876
mfmsr %r3 /* re-enable interrupts */
877
ori %r3,%r3,PSL_EE@l
878
mtmsr %r3
879
isync
880
GET_TOCBASE(%r2)
881
addi %r3,%r1,48
882
bl CNAME(ast)
883
nop
884
.globl CNAME(asttrapexit) /* backtrace code sentinel #2 */
885
CNAME(asttrapexit):
886
b trapexit /* test ast ret value ? */
887
1:
888
FRAME_LEAVE(PC_TEMPSAVE)
889
rfid
890
891
#if defined(KDB)
892
/*
893
* Deliberate entry to dbtrap
894
*/
895
ASENTRY_NOPROF(breakpoint)
896
mtsprg1 %r1
897
mfmsr %r3
898
mtsrr1 %r3
899
andi. %r3,%r3,~(PSL_EE|PSL_ME)@l
900
mtmsr %r3 /* disable interrupts */
901
isync
902
GET_CPUINFO(%r3)
903
std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
904
std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
905
std %r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
906
std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
907
std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
908
mflr %r28
909
li %r29,EXC_BPT
910
mtlr %r29
911
mfcr %r29
912
mtsrr0 %r28
913
914
/*
915
* Now the kdb trap catching code.
916
*/
917
dbtrap:
918
/* Write the trap vector to SPRG3 by computing LR & 0xff00 */
919
mflr %r1
920
andi. %r1,%r1,0xff00
921
mtsprg3 %r1
922
923
GET_TOCBASE(%r1) /* get new SP */
924
addis %r1,%r1,TOC_REF(trapstk)@ha
925
ld %r1,TOC_REF(trapstk)@l(%r1)
926
addi %r1,%r1,(TRAPSTKSZ-48)
927
928
FRAME_SETUP(PC_DBSAVE)
929
/* Call C trap code: */
930
GET_TOCBASE(%r2)
931
addi %r3,%r1,48
932
bl CNAME(db_trap_glue)
933
nop
934
or. %r3,%r3,%r3
935
bne dbleave
936
/* This wasn't for KDB, so switch to real trap: */
937
ld %r3,FRAME_EXC+48(%r1) /* save exception */
938
GET_CPUINFO(%r4)
939
std %r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
940
FRAME_LEAVE(PC_DBSAVE)
941
mtsprg1 %r1 /* prepare for entrance to realtrap */
942
GET_CPUINFO(%r1)
943
std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
944
std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
945
std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
946
std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
947
std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
948
mflr %r28
949
mfcr %r29
950
ld %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
951
mtsprg3 %r31 /* SPRG3 was clobbered by FRAME_LEAVE */
952
mfsprg1 %r1
953
b realtrap
954
dbleave:
955
FRAME_LEAVE(PC_DBSAVE)
956
rfid
957
ASEND(breakpoint)
958
959
/*
960
* In case of KDB we want a separate trap catcher for it
961
*/
962
.globl CNAME(dblow),CNAME(dbend)
963
.p2align 3
964
CNAME(dblow):
965
mtsprg1 %r1 /* save SP */
966
mtsprg2 %r29 /* save r29 */
967
mfcr %r29 /* save CR in r29 */
968
mfsrr1 %r1
969
mtcr %r1
970
bf 17,1f /* branch if privileged */
971
972
/* Unprivileged case */
973
mtcr %r29 /* put the condition register back */
974
mfsprg2 %r29 /* ... and r29 */
975
mflr %r1 /* save LR */
976
mtsprg2 %r1 /* And then in SPRG2 */
977
978
ld %r1, TRAP_ENTRY(0) /* Get branch address */
979
mtlr %r1
980
li %r1, 0 /* How to get the vector from LR */
981
blrl /* Branch to generictrap */
982
/* No fallthrough */
983
1:
984
GET_CPUINFO(%r1)
985
std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r1) /* free r27 */
986
std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */
987
mfsprg2 %r28 /* r29 holds cr... */
988
std %r28,(PC_DBSAVE+CPUSAVE_R29)(%r1) /* free r29 */
989
std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r1) /* free r30 */
990
std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) /* free r31 */
991
mflr %r28 /* save LR */
992
ld %r1,TRAP_GENTRAP(0)
993
addi %r1,%r1,(dbtrap-generictrap)
994
mtlr %r1
995
blrl /* Branch to dbtrap */
996
CNAME(dbend):
997
#endif /* KDB */
998
999