Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/powerpc/kernel/exceptions-64s.S
10817 views
1
/*
2
* This file contains the 64-bit "server" PowerPC variant
3
* of the low level exception handling including exception
4
* vectors, exception return, part of the slb and stab
5
* handling and other fixed offset specific things.
6
*
7
* This file is meant to be #included from head_64.S due to
8
* position dependent assembly.
9
*
10
* Most of this originates from head_64.S and thus has the same
11
* copyright history.
12
*
13
*/
14
15
#include <asm/exception-64s.h>
16
#include <asm/ptrace.h>
17
18
/*
19
* We layout physical memory as follows:
20
* 0x0000 - 0x00ff : Secondary processor spin code
21
* 0x0100 - 0x2fff : pSeries Interrupt prologs
22
* 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
23
* 0x6000 - 0x6fff : Initial (CPU0) segment table
24
* 0x7000 - 0x7fff : FWNMI data area
25
* 0x8000 - : Early init and support code
26
*/
27
28
/*
29
* This is the start of the interrupt handlers for pSeries
30
* This code runs with relocation off.
31
* Code from here to __end_interrupts gets copied down to real
32
* address 0x100 when we are running a relocatable kernel.
33
* Therefore any relative branches in this section must only
34
* branch to labels in this section.
35
*/
36
. = 0x100
37
.globl __start_interrupts
38
__start_interrupts:
39
40
.globl system_reset_pSeries;
41
system_reset_pSeries:
42
HMT_MEDIUM;
43
DO_KVM 0x100;
44
SET_SCRATCH0(r13)
45
#ifdef CONFIG_PPC_P7_NAP
46
BEGIN_FTR_SECTION
47
/* Running native on arch 2.06 or later, check if we are
48
* waking up from nap. We only handle no state loss and
49
* supervisor state loss. We do -not- handle hypervisor
50
* state loss at this time.
51
*/
52
mfspr r13,SPRN_SRR1
53
rlwinm r13,r13,47-31,30,31
54
cmpwi cr0,r13,1
55
bne 1f
56
b .power7_wakeup_noloss
57
1: cmpwi cr0,r13,2
58
bne 1f
59
b .power7_wakeup_loss
60
/* Total loss of HV state is fatal, we could try to use the
61
* PIR to locate a PACA, then use an emergency stack etc...
62
* but for now, let's just stay stuck here
63
*/
64
1: cmpwi cr0,r13,3
65
beq .
66
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206)
67
#endif /* CONFIG_PPC_P7_NAP */
68
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
69
70
. = 0x200
71
_machine_check_pSeries:
72
HMT_MEDIUM
73
DO_KVM 0x200
74
SET_SCRATCH0(r13)
75
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
76
77
. = 0x300
78
.globl data_access_pSeries
79
data_access_pSeries:
80
HMT_MEDIUM
81
DO_KVM 0x300
82
SET_SCRATCH0(r13)
83
BEGIN_FTR_SECTION
84
GET_PACA(r13)
85
std r9,PACA_EXSLB+EX_R9(r13)
86
std r10,PACA_EXSLB+EX_R10(r13)
87
mfspr r10,SPRN_DAR
88
mfspr r9,SPRN_DSISR
89
srdi r10,r10,60
90
rlwimi r10,r9,16,0x20
91
mfcr r9
92
cmpwi r10,0x2c
93
beq do_stab_bolted_pSeries
94
ld r10,PACA_EXSLB+EX_R10(r13)
95
std r11,PACA_EXGEN+EX_R11(r13)
96
ld r11,PACA_EXSLB+EX_R9(r13)
97
std r12,PACA_EXGEN+EX_R12(r13)
98
GET_SCRATCH0(r12)
99
std r10,PACA_EXGEN+EX_R10(r13)
100
std r11,PACA_EXGEN+EX_R9(r13)
101
std r12,PACA_EXGEN+EX_R13(r13)
102
EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
103
FTR_SECTION_ELSE
104
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD)
105
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
106
107
. = 0x380
108
.globl data_access_slb_pSeries
109
data_access_slb_pSeries:
110
HMT_MEDIUM
111
DO_KVM 0x380
112
SET_SCRATCH0(r13)
113
GET_PACA(r13)
114
std r3,PACA_EXSLB+EX_R3(r13)
115
mfspr r3,SPRN_DAR
116
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
117
mfcr r9
118
#ifdef __DISABLED__
119
/* Keep that around for when we re-implement dynamic VSIDs */
120
cmpdi r3,0
121
bge slb_miss_user_pseries
122
#endif /* __DISABLED__ */
123
std r10,PACA_EXSLB+EX_R10(r13)
124
std r11,PACA_EXSLB+EX_R11(r13)
125
std r12,PACA_EXSLB+EX_R12(r13)
126
GET_SCRATCH0(r10)
127
std r10,PACA_EXSLB+EX_R13(r13)
128
mfspr r12,SPRN_SRR1 /* and SRR1 */
129
#ifndef CONFIG_RELOCATABLE
130
b .slb_miss_realmode
131
#else
132
/*
133
* We can't just use a direct branch to .slb_miss_realmode
134
* because the distance from here to there depends on where
135
* the kernel ends up being put.
136
*/
137
mfctr r11
138
ld r10,PACAKBASE(r13)
139
LOAD_HANDLER(r10, .slb_miss_realmode)
140
mtctr r10
141
bctr
142
#endif
143
144
STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
145
146
. = 0x480
147
.globl instruction_access_slb_pSeries
148
instruction_access_slb_pSeries:
149
HMT_MEDIUM
150
DO_KVM 0x480
151
SET_SCRATCH0(r13)
152
GET_PACA(r13)
153
std r3,PACA_EXSLB+EX_R3(r13)
154
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
155
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
156
mfcr r9
157
#ifdef __DISABLED__
158
/* Keep that around for when we re-implement dynamic VSIDs */
159
cmpdi r3,0
160
bge slb_miss_user_pseries
161
#endif /* __DISABLED__ */
162
std r10,PACA_EXSLB+EX_R10(r13)
163
std r11,PACA_EXSLB+EX_R11(r13)
164
std r12,PACA_EXSLB+EX_R12(r13)
165
GET_SCRATCH0(r10)
166
std r10,PACA_EXSLB+EX_R13(r13)
167
mfspr r12,SPRN_SRR1 /* and SRR1 */
168
#ifndef CONFIG_RELOCATABLE
169
b .slb_miss_realmode
170
#else
171
mfctr r11
172
ld r10,PACAKBASE(r13)
173
LOAD_HANDLER(r10, .slb_miss_realmode)
174
mtctr r10
175
bctr
176
#endif
177
178
/* We open code these as we can't have a ". = x" (even with
179
* x = "." within a feature section
180
*/
181
. = 0x500;
182
.globl hardware_interrupt_pSeries;
183
.globl hardware_interrupt_hv;
184
hardware_interrupt_pSeries:
185
hardware_interrupt_hv:
186
BEGIN_FTR_SECTION
187
_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD)
188
FTR_SECTION_ELSE
189
_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV)
190
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206)
191
192
STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
193
STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
194
STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
195
196
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
197
MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer)
198
199
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
200
STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
201
202
. = 0xc00
203
.globl system_call_pSeries
204
system_call_pSeries:
205
HMT_MEDIUM
206
DO_KVM 0xc00
207
BEGIN_FTR_SECTION
208
cmpdi r0,0x1ebe
209
beq- 1f
210
END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
211
mr r9,r13
212
GET_PACA(r13)
213
mfspr r11,SPRN_SRR0
214
mfspr r12,SPRN_SRR1
215
ld r10,PACAKBASE(r13)
216
LOAD_HANDLER(r10, system_call_entry)
217
mtspr SPRN_SRR0,r10
218
ld r10,PACAKMSR(r13)
219
mtspr SPRN_SRR1,r10
220
rfid
221
b . /* prevent speculative execution */
222
223
/* Fast LE/BE switch system call */
224
1: mfspr r12,SPRN_SRR1
225
xori r12,r12,MSR_LE
226
mtspr SPRN_SRR1,r12
227
rfid /* return to userspace */
228
b .
229
230
STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
231
232
/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
233
* out of line to handle them
234
*/
235
. = 0xe00
236
b h_data_storage_hv
237
. = 0xe20
238
b h_instr_storage_hv
239
. = 0xe40
240
b emulation_assist_hv
241
. = 0xe50
242
b hmi_exception_hv
243
. = 0xe60
244
b hmi_exception_hv
245
246
/* We need to deal with the Altivec unavailable exception
247
* here which is at 0xf20, thus in the middle of the
248
* prolog code of the PerformanceMonitor one. A little
249
* trickery is thus necessary
250
*/
251
performance_monitor_pSeries_1:
252
. = 0xf00
253
b performance_monitor_pSeries
254
255
altivec_unavailable_pSeries_1:
256
. = 0xf20
257
b altivec_unavailable_pSeries
258
259
vsx_unavailable_pSeries_1:
260
. = 0xf40
261
b vsx_unavailable_pSeries
262
263
#ifdef CONFIG_CBE_RAS
264
STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
265
#endif /* CONFIG_CBE_RAS */
266
STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
267
#ifdef CONFIG_CBE_RAS
268
STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
269
#endif /* CONFIG_CBE_RAS */
270
STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
271
#ifdef CONFIG_CBE_RAS
272
STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
273
#endif /* CONFIG_CBE_RAS */
274
275
. = 0x3000
276
277
/*** Out of line interrupts support ***/
278
279
/* moved from 0xe00 */
280
STD_EXCEPTION_HV(., 0xe00, h_data_storage)
281
STD_EXCEPTION_HV(., 0xe20, h_instr_storage)
282
STD_EXCEPTION_HV(., 0xe40, emulation_assist)
283
STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */
284
285
/* moved from 0xf00 */
286
STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
287
STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
288
STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
289
290
/*
291
* An interrupt came in while soft-disabled; clear EE in SRR1,
292
* clear paca->hard_enabled and return.
293
*/
294
masked_interrupt:
295
stb r10,PACAHARDIRQEN(r13)
296
mtcrf 0x80,r9
297
ld r9,PACA_EXGEN+EX_R9(r13)
298
mfspr r10,SPRN_SRR1
299
rldicl r10,r10,48,1 /* clear MSR_EE */
300
rotldi r10,r10,16
301
mtspr SPRN_SRR1,r10
302
ld r10,PACA_EXGEN+EX_R10(r13)
303
GET_SCRATCH0(r13)
304
rfid
305
b .
306
307
masked_Hinterrupt:
308
stb r10,PACAHARDIRQEN(r13)
309
mtcrf 0x80,r9
310
ld r9,PACA_EXGEN+EX_R9(r13)
311
mfspr r10,SPRN_HSRR1
312
rldicl r10,r10,48,1 /* clear MSR_EE */
313
rotldi r10,r10,16
314
mtspr SPRN_HSRR1,r10
315
ld r10,PACA_EXGEN+EX_R10(r13)
316
GET_SCRATCH0(r13)
317
hrfid
318
b .
319
320
.align 7
321
do_stab_bolted_pSeries:
322
std r11,PACA_EXSLB+EX_R11(r13)
323
std r12,PACA_EXSLB+EX_R12(r13)
324
GET_SCRATCH0(r10)
325
std r10,PACA_EXSLB+EX_R13(r13)
326
EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
327
328
#ifdef CONFIG_PPC_PSERIES
329
/*
330
* Vectors for the FWNMI option. Share common code.
331
*/
332
.globl system_reset_fwnmi
333
.align 7
334
system_reset_fwnmi:
335
HMT_MEDIUM
336
SET_SCRATCH0(r13) /* save r13 */
337
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD)
338
339
.globl machine_check_fwnmi
340
.align 7
341
machine_check_fwnmi:
342
HMT_MEDIUM
343
SET_SCRATCH0(r13) /* save r13 */
344
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD)
345
346
#endif /* CONFIG_PPC_PSERIES */
347
348
#ifdef __DISABLED__
349
/*
350
* This is used for when the SLB miss handler has to go virtual,
351
* which doesn't happen for now anymore but will once we re-implement
352
* dynamic VSIDs for shared page tables
353
*/
354
slb_miss_user_pseries:
355
std r10,PACA_EXGEN+EX_R10(r13)
356
std r11,PACA_EXGEN+EX_R11(r13)
357
std r12,PACA_EXGEN+EX_R12(r13)
358
GET_SCRATCH0(r10)
359
ld r11,PACA_EXSLB+EX_R9(r13)
360
ld r12,PACA_EXSLB+EX_R3(r13)
361
std r10,PACA_EXGEN+EX_R13(r13)
362
std r11,PACA_EXGEN+EX_R9(r13)
363
std r12,PACA_EXGEN+EX_R3(r13)
364
clrrdi r12,r13,32
365
mfmsr r10
366
mfspr r11,SRR0 /* save SRR0 */
367
ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
368
ori r10,r10,MSR_IR|MSR_DR|MSR_RI
369
mtspr SRR0,r12
370
mfspr r12,SRR1 /* and SRR1 */
371
mtspr SRR1,r10
372
rfid
373
b . /* prevent spec. execution */
374
#endif /* __DISABLED__ */
375
376
/* KVM's trampoline code needs to be close to the interrupt handlers */
377
378
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
379
#include "../kvm/book3s_rmhandlers.S"
380
#endif
381
382
.align 7
383
.globl __end_interrupts
384
__end_interrupts:
385
386
/*
387
* Code from here down to __end_handlers is invoked from the
388
* exception prologs above. Because the prologs assemble the
389
* addresses of these handlers using the LOAD_HANDLER macro,
390
* which uses an addi instruction, these handlers must be in
391
* the first 32k of the kernel image.
392
*/
393
394
/*** Common interrupt handlers ***/
395
396
STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
397
398
/*
399
* Machine check is different because we use a different
400
* save area: PACA_EXMC instead of PACA_EXGEN.
401
*/
402
.align 7
403
.globl machine_check_common
404
machine_check_common:
405
EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
406
FINISH_NAP
407
DISABLE_INTS
408
bl .save_nvgprs
409
addi r3,r1,STACK_FRAME_OVERHEAD
410
bl .machine_check_exception
411
b .ret_from_except
412
413
STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
414
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
415
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
416
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
417
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
418
STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
419
STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
420
STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
421
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
422
#ifdef CONFIG_ALTIVEC
423
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
424
#else
425
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
426
#endif
427
#ifdef CONFIG_CBE_RAS
428
STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
429
STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
430
STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
431
#endif /* CONFIG_CBE_RAS */
432
433
.align 7
434
system_call_entry:
435
b system_call_common
436
437
/*
438
* Here we have detected that the kernel stack pointer is bad.
439
* R9 contains the saved CR, r13 points to the paca,
440
* r10 contains the (bad) kernel stack pointer,
441
* r11 and r12 contain the saved SRR0 and SRR1.
442
* We switch to using an emergency stack, save the registers there,
443
* and call kernel_bad_stack(), which panics.
444
*/
445
bad_stack:
446
ld r1,PACAEMERGSP(r13)
447
subi r1,r1,64+INT_FRAME_SIZE
448
std r9,_CCR(r1)
449
std r10,GPR1(r1)
450
std r11,_NIP(r1)
451
std r12,_MSR(r1)
452
mfspr r11,SPRN_DAR
453
mfspr r12,SPRN_DSISR
454
std r11,_DAR(r1)
455
std r12,_DSISR(r1)
456
mflr r10
457
mfctr r11
458
mfxer r12
459
std r10,_LINK(r1)
460
std r11,_CTR(r1)
461
std r12,_XER(r1)
462
SAVE_GPR(0,r1)
463
SAVE_GPR(2,r1)
464
ld r10,EX_R3(r3)
465
std r10,GPR3(r1)
466
SAVE_GPR(4,r1)
467
SAVE_4GPRS(5,r1)
468
ld r9,EX_R9(r3)
469
ld r10,EX_R10(r3)
470
SAVE_2GPRS(9,r1)
471
ld r9,EX_R11(r3)
472
ld r10,EX_R12(r3)
473
ld r11,EX_R13(r3)
474
std r9,GPR11(r1)
475
std r10,GPR12(r1)
476
std r11,GPR13(r1)
477
BEGIN_FTR_SECTION
478
ld r10,EX_CFAR(r3)
479
std r10,ORIG_GPR3(r1)
480
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
481
SAVE_8GPRS(14,r1)
482
SAVE_10GPRS(22,r1)
483
lhz r12,PACA_TRAP_SAVE(r13)
484
std r12,_TRAP(r1)
485
addi r11,r1,INT_FRAME_SIZE
486
std r11,0(r1)
487
li r12,0
488
std r12,0(r11)
489
ld r2,PACATOC(r13)
490
ld r11,exception_marker@toc(r2)
491
std r12,RESULT(r1)
492
std r11,STACK_FRAME_OVERHEAD-16(r1)
493
1: addi r3,r1,STACK_FRAME_OVERHEAD
494
bl .kernel_bad_stack
495
b 1b
496
497
/*
498
* Here r13 points to the paca, r9 contains the saved CR,
499
* SRR0 and SRR1 are saved in r11 and r12,
500
* r9 - r13 are saved in paca->exgen.
501
*/
502
.align 7
503
.globl data_access_common
504
data_access_common:
505
mfspr r10,SPRN_DAR
506
std r10,PACA_EXGEN+EX_DAR(r13)
507
mfspr r10,SPRN_DSISR
508
stw r10,PACA_EXGEN+EX_DSISR(r13)
509
EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
510
ld r3,PACA_EXGEN+EX_DAR(r13)
511
lwz r4,PACA_EXGEN+EX_DSISR(r13)
512
li r5,0x300
513
b .do_hash_page /* Try to handle as hpte fault */
514
515
.align 7
516
.globl h_data_storage_common
517
h_data_storage_common:
518
mfspr r10,SPRN_HDAR
519
std r10,PACA_EXGEN+EX_DAR(r13)
520
mfspr r10,SPRN_HDSISR
521
stw r10,PACA_EXGEN+EX_DSISR(r13)
522
EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
523
bl .save_nvgprs
524
addi r3,r1,STACK_FRAME_OVERHEAD
525
bl .unknown_exception
526
b .ret_from_except
527
528
.align 7
529
.globl instruction_access_common
530
instruction_access_common:
531
EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
532
ld r3,_NIP(r1)
533
andis. r4,r12,0x5820
534
li r5,0x400
535
b .do_hash_page /* Try to handle as hpte fault */
536
537
STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception)
538
539
/*
540
* Here is the common SLB miss user that is used when going to virtual
541
* mode for SLB misses, that is currently not used
542
*/
543
#ifdef __DISABLED__
544
.align 7
545
.globl slb_miss_user_common
546
slb_miss_user_common:
547
mflr r10
548
std r3,PACA_EXGEN+EX_DAR(r13)
549
stw r9,PACA_EXGEN+EX_CCR(r13)
550
std r10,PACA_EXGEN+EX_LR(r13)
551
std r11,PACA_EXGEN+EX_SRR0(r13)
552
bl .slb_allocate_user
553
554
ld r10,PACA_EXGEN+EX_LR(r13)
555
ld r3,PACA_EXGEN+EX_R3(r13)
556
lwz r9,PACA_EXGEN+EX_CCR(r13)
557
ld r11,PACA_EXGEN+EX_SRR0(r13)
558
mtlr r10
559
beq- slb_miss_fault
560
561
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
562
beq- unrecov_user_slb
563
mfmsr r10
564
565
.machine push
566
.machine "power4"
567
mtcrf 0x80,r9
568
.machine pop
569
570
clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
571
mtmsrd r10,1
572
573
mtspr SRR0,r11
574
mtspr SRR1,r12
575
576
ld r9,PACA_EXGEN+EX_R9(r13)
577
ld r10,PACA_EXGEN+EX_R10(r13)
578
ld r11,PACA_EXGEN+EX_R11(r13)
579
ld r12,PACA_EXGEN+EX_R12(r13)
580
ld r13,PACA_EXGEN+EX_R13(r13)
581
rfid
582
b .
583
584
slb_miss_fault:
585
EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
586
ld r4,PACA_EXGEN+EX_DAR(r13)
587
li r5,0
588
std r4,_DAR(r1)
589
std r5,_DSISR(r1)
590
b handle_page_fault
591
592
unrecov_user_slb:
593
EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
594
DISABLE_INTS
595
bl .save_nvgprs
596
1: addi r3,r1,STACK_FRAME_OVERHEAD
597
bl .unrecoverable_exception
598
b 1b
599
600
#endif /* __DISABLED__ */
601
602
603
/*
604
* r13 points to the PACA, r9 contains the saved CR,
605
* r12 contain the saved SRR1, SRR0 is still ready for return
606
* r3 has the faulting address
607
* r9 - r13 are saved in paca->exslb.
608
* r3 is saved in paca->slb_r3
609
* We assume we aren't going to take any exceptions during this procedure.
610
*/
611
_GLOBAL(slb_miss_realmode)
612
mflr r10
613
#ifdef CONFIG_RELOCATABLE
614
mtctr r11
615
#endif
616
617
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
618
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
619
620
bl .slb_allocate_realmode
621
622
/* All done -- return from exception. */
623
624
ld r10,PACA_EXSLB+EX_LR(r13)
625
ld r3,PACA_EXSLB+EX_R3(r13)
626
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
627
#ifdef CONFIG_PPC_ISERIES
628
BEGIN_FW_FTR_SECTION
629
ld r11,PACALPPACAPTR(r13)
630
ld r11,LPPACASRR0(r11) /* get SRR0 value */
631
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
632
#endif /* CONFIG_PPC_ISERIES */
633
634
mtlr r10
635
636
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
637
beq- 2f
638
639
.machine push
640
.machine "power4"
641
mtcrf 0x80,r9
642
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
643
.machine pop
644
645
#ifdef CONFIG_PPC_ISERIES
646
BEGIN_FW_FTR_SECTION
647
mtspr SPRN_SRR0,r11
648
mtspr SPRN_SRR1,r12
649
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
650
#endif /* CONFIG_PPC_ISERIES */
651
ld r9,PACA_EXSLB+EX_R9(r13)
652
ld r10,PACA_EXSLB+EX_R10(r13)
653
ld r11,PACA_EXSLB+EX_R11(r13)
654
ld r12,PACA_EXSLB+EX_R12(r13)
655
ld r13,PACA_EXSLB+EX_R13(r13)
656
rfid
657
b . /* prevent speculative execution */
658
659
2:
660
#ifdef CONFIG_PPC_ISERIES
661
BEGIN_FW_FTR_SECTION
662
b unrecov_slb
663
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
664
#endif /* CONFIG_PPC_ISERIES */
665
mfspr r11,SPRN_SRR0
666
ld r10,PACAKBASE(r13)
667
LOAD_HANDLER(r10,unrecov_slb)
668
mtspr SPRN_SRR0,r10
669
ld r10,PACAKMSR(r13)
670
mtspr SPRN_SRR1,r10
671
rfid
672
b .
673
674
unrecov_slb:
675
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
676
DISABLE_INTS
677
bl .save_nvgprs
678
1: addi r3,r1,STACK_FRAME_OVERHEAD
679
bl .unrecoverable_exception
680
b 1b
681
682
.align 7
683
.globl hardware_interrupt_common
684
.globl hardware_interrupt_entry
685
hardware_interrupt_common:
686
EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
687
FINISH_NAP
688
hardware_interrupt_entry:
689
DISABLE_INTS
690
BEGIN_FTR_SECTION
691
bl .ppc64_runlatch_on
692
END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
693
addi r3,r1,STACK_FRAME_OVERHEAD
694
bl .do_IRQ
695
b .ret_from_except_lite
696
697
#ifdef CONFIG_PPC_970_NAP
698
power4_fixup_nap:
699
andc r9,r9,r10
700
std r9,TI_LOCAL_FLAGS(r11)
701
ld r10,_LINK(r1) /* make idle task do the */
702
std r10,_NIP(r1) /* equivalent of a blr */
703
blr
704
#endif
705
706
.align 7
707
.globl alignment_common
708
alignment_common:
709
mfspr r10,SPRN_DAR
710
std r10,PACA_EXGEN+EX_DAR(r13)
711
mfspr r10,SPRN_DSISR
712
stw r10,PACA_EXGEN+EX_DSISR(r13)
713
EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
714
ld r3,PACA_EXGEN+EX_DAR(r13)
715
lwz r4,PACA_EXGEN+EX_DSISR(r13)
716
std r3,_DAR(r1)
717
std r4,_DSISR(r1)
718
bl .save_nvgprs
719
addi r3,r1,STACK_FRAME_OVERHEAD
720
ENABLE_INTS
721
bl .alignment_exception
722
b .ret_from_except
723
724
.align 7
725
.globl program_check_common
726
program_check_common:
727
EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
728
bl .save_nvgprs
729
addi r3,r1,STACK_FRAME_OVERHEAD
730
ENABLE_INTS
731
bl .program_check_exception
732
b .ret_from_except
733
734
.align 7
735
.globl fp_unavailable_common
736
fp_unavailable_common:
737
EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
738
bne 1f /* if from user, just load it up */
739
bl .save_nvgprs
740
addi r3,r1,STACK_FRAME_OVERHEAD
741
ENABLE_INTS
742
bl .kernel_fp_unavailable_exception
743
BUG_OPCODE
744
1: bl .load_up_fpu
745
b fast_exception_return
746
747
.align 7
748
.globl altivec_unavailable_common
749
altivec_unavailable_common:
750
EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
751
#ifdef CONFIG_ALTIVEC
752
BEGIN_FTR_SECTION
753
beq 1f
754
bl .load_up_altivec
755
b fast_exception_return
756
1:
757
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
758
#endif
759
bl .save_nvgprs
760
addi r3,r1,STACK_FRAME_OVERHEAD
761
ENABLE_INTS
762
bl .altivec_unavailable_exception
763
b .ret_from_except
764
765
.align 7
766
.globl vsx_unavailable_common
767
vsx_unavailable_common:
768
EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
769
#ifdef CONFIG_VSX
770
BEGIN_FTR_SECTION
771
bne .load_up_vsx
772
1:
773
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
774
#endif
775
bl .save_nvgprs
776
addi r3,r1,STACK_FRAME_OVERHEAD
777
ENABLE_INTS
778
bl .vsx_unavailable_exception
779
b .ret_from_except
780
781
.align 7
782
.globl __end_handlers
783
__end_handlers:
784
785
/*
786
* Return from an exception with minimal checks.
787
* The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
788
* If interrupts have been enabled, or anything has been
789
* done that might have changed the scheduling status of
790
* any task or sent any task a signal, you should use
791
* ret_from_except or ret_from_except_lite instead of this.
792
*/
793
fast_exc_return_irq: /* restores irq state too */
794
ld r3,SOFTE(r1)
795
TRACE_AND_RESTORE_IRQ(r3);
796
ld r12,_MSR(r1)
797
rldicl r4,r12,49,63 /* get MSR_EE to LSB */
798
stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
799
b 1f
800
801
.globl fast_exception_return
802
fast_exception_return:
803
ld r12,_MSR(r1)
804
1: ld r11,_NIP(r1)
805
andi. r3,r12,MSR_RI /* check if RI is set */
806
beq- unrecov_fer
807
808
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
809
andi. r3,r12,MSR_PR
810
beq 2f
811
ACCOUNT_CPU_USER_EXIT(r3, r4)
812
2:
813
#endif
814
815
ld r3,_CCR(r1)
816
ld r4,_LINK(r1)
817
ld r5,_CTR(r1)
818
ld r6,_XER(r1)
819
mtcr r3
820
mtlr r4
821
mtctr r5
822
mtxer r6
823
REST_GPR(0, r1)
824
REST_8GPRS(2, r1)
825
826
mfmsr r10
827
rldicl r10,r10,48,1 /* clear EE */
828
rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
829
mtmsrd r10,1
830
831
mtspr SPRN_SRR1,r12
832
mtspr SPRN_SRR0,r11
833
REST_4GPRS(10, r1)
834
ld r1,GPR1(r1)
835
rfid
836
b . /* prevent speculative execution */
837
838
unrecov_fer:
839
bl .save_nvgprs
840
1: addi r3,r1,STACK_FRAME_OVERHEAD
841
bl .unrecoverable_exception
842
b 1b
843
844
845
/*
846
* Hash table stuff
847
*/
848
.align 7
849
_STATIC(do_hash_page)
850
std r3,_DAR(r1)
851
std r4,_DSISR(r1)
852
853
andis. r0,r4,0xa410 /* weird error? */
854
bne- handle_page_fault /* if not, try to insert a HPTE */
855
andis. r0,r4,DSISR_DABRMATCH@h
856
bne- handle_dabr_fault
857
858
BEGIN_FTR_SECTION
859
andis. r0,r4,0x0020 /* Is it a segment table fault? */
860
bne- do_ste_alloc /* If so handle it */
861
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
862
863
clrrdi r11,r1,THREAD_SHIFT
864
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
865
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
866
bne 77f /* then don't call hash_page now */
867
868
/*
869
* On iSeries, we soft-disable interrupts here, then
870
* hard-enable interrupts so that the hash_page code can spin on
871
* the hash_table_lock without problems on a shared processor.
872
*/
873
DISABLE_INTS
874
875
/*
876
* Currently, trace_hardirqs_off() will be called by DISABLE_INTS
877
* and will clobber volatile registers when irq tracing is enabled
878
* so we need to reload them. It may be possible to be smarter here
879
* and move the irq tracing elsewhere but let's keep it simple for
880
* now
881
*/
882
#ifdef CONFIG_TRACE_IRQFLAGS
883
ld r3,_DAR(r1)
884
ld r4,_DSISR(r1)
885
ld r5,_TRAP(r1)
886
ld r12,_MSR(r1)
887
clrrdi r5,r5,4
888
#endif /* CONFIG_TRACE_IRQFLAGS */
889
/*
890
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
891
* accessing a userspace segment (even from the kernel). We assume
892
* kernel addresses always have the high bit set.
893
*/
894
rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
895
rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
896
orc r0,r12,r0 /* MSR_PR | ~high_bit */
897
rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
898
ori r4,r4,1 /* add _PAGE_PRESENT */
899
rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
900
901
/*
902
* r3 contains the faulting address
903
* r4 contains the required access permissions
904
* r5 contains the trap number
905
*
906
* at return r3 = 0 for success
907
*/
908
bl .hash_page /* build HPTE if possible */
909
cmpdi r3,0 /* see if hash_page succeeded */
910
911
BEGIN_FW_FTR_SECTION
912
/*
913
* If we had interrupts soft-enabled at the point where the
914
* DSI/ISI occurred, and an interrupt came in during hash_page,
915
* handle it now.
916
* We jump to ret_from_except_lite rather than fast_exception_return
917
* because ret_from_except_lite will check for and handle pending
918
* interrupts if necessary.
919
*/
920
beq 13f
921
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
922
923
BEGIN_FW_FTR_SECTION
924
/*
925
* Here we have interrupts hard-disabled, so it is sufficient
926
* to restore paca->{soft,hard}_enable and get out.
927
*/
928
beq fast_exc_return_irq /* Return from exception on success */
929
END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
930
931
/* For a hash failure, we don't bother re-enabling interrupts */
932
ble- 12f
933
934
/*
935
* hash_page couldn't handle it, set soft interrupt enable back
936
* to what it was before the trap. Note that .arch_local_irq_restore
937
* handles any interrupts pending at this point.
938
*/
939
ld r3,SOFTE(r1)
940
TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
941
bl .arch_local_irq_restore
942
b 11f
943
944
/* We have a data breakpoint exception - handle it */
945
handle_dabr_fault:
946
bl .save_nvgprs
947
ld r4,_DAR(r1)
948
ld r5,_DSISR(r1)
949
addi r3,r1,STACK_FRAME_OVERHEAD
950
bl .do_dabr
951
b .ret_from_except_lite
952
953
/* Here we have a page fault that hash_page can't handle. */
954
handle_page_fault:
955
ENABLE_INTS
956
11: ld r4,_DAR(r1)
957
ld r5,_DSISR(r1)
958
addi r3,r1,STACK_FRAME_OVERHEAD
959
bl .do_page_fault
960
cmpdi r3,0
961
beq+ 13f
962
bl .save_nvgprs
963
mr r5,r3
964
addi r3,r1,STACK_FRAME_OVERHEAD
965
lwz r4,_DAR(r1)
966
bl .bad_page_fault
967
b .ret_from_except
968
969
13: b .ret_from_except_lite
970
971
/* We have a page fault that hash_page could handle but HV refused
972
* the PTE insertion
973
*/
974
12: bl .save_nvgprs
975
mr r5,r3
976
addi r3,r1,STACK_FRAME_OVERHEAD
977
ld r4,_DAR(r1)
978
bl .low_hash_fault
979
b .ret_from_except
980
981
/*
982
* We come here as a result of a DSI at a point where we don't want
983
* to call hash_page, such as when we are accessing memory (possibly
984
* user memory) inside a PMU interrupt that occurred while interrupts
985
* were soft-disabled. We want to invoke the exception handler for
986
* the access, or panic if there isn't a handler.
987
*/
988
77: bl .save_nvgprs
989
mr r4,r3
990
addi r3,r1,STACK_FRAME_OVERHEAD
991
li r5,SIGSEGV
992
bl .bad_page_fault
993
b .ret_from_except
994
995
/* here we have a segment miss */
996
do_ste_alloc:
997
bl .ste_allocate /* try to insert stab entry */
998
cmpdi r3,0
999
bne- handle_page_fault
1000
b fast_exception_return
1001
1002
/*
1003
* r13 points to the PACA, r9 contains the saved CR,
1004
* r11 and r12 contain the saved SRR0 and SRR1.
1005
* r9 - r13 are saved in paca->exslb.
1006
* We assume we aren't going to take any exceptions during this procedure.
1007
* We assume (DAR >> 60) == 0xc.
1008
*/
1009
.align 7
1010
_GLOBAL(do_stab_bolted)
1011
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
1012
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
1013
1014
/* Hash to the primary group */
1015
ld r10,PACASTABVIRT(r13)
1016
mfspr r11,SPRN_DAR
1017
srdi r11,r11,28
1018
rldimi r10,r11,7,52 /* r10 = first ste of the group */
1019
1020
/* Calculate VSID */
1021
/* This is a kernel address, so protovsid = ESID */
1022
ASM_VSID_SCRAMBLE(r11, r9, 256M)
1023
rldic r9,r11,12,16 /* r9 = vsid << 12 */
1024
1025
/* Search the primary group for a free entry */
1026
1: ld r11,0(r10) /* Test valid bit of the current ste */
1027
andi. r11,r11,0x80
1028
beq 2f
1029
addi r10,r10,16
1030
andi. r11,r10,0x70
1031
bne 1b
1032
1033
/* Stick for only searching the primary group for now. */
1034
/* At least for now, we use a very simple random castout scheme */
1035
/* Use the TB as a random number ; OR in 1 to avoid entry 0 */
1036
mftb r11
1037
rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
1038
ori r11,r11,0x10
1039
1040
/* r10 currently points to an ste one past the group of interest */
1041
/* make it point to the randomly selected entry */
1042
subi r10,r10,128
1043
or r10,r10,r11 /* r10 is the entry to invalidate */
1044
1045
isync /* mark the entry invalid */
1046
ld r11,0(r10)
1047
rldicl r11,r11,56,1 /* clear the valid bit */
1048
rotldi r11,r11,8
1049
std r11,0(r10)
1050
sync
1051
1052
clrrdi r11,r11,28 /* Get the esid part of the ste */
1053
slbie r11
1054
1055
2: std r9,8(r10) /* Store the vsid part of the ste */
1056
eieio
1057
1058
mfspr r11,SPRN_DAR /* Get the new esid */
1059
clrrdi r11,r11,28 /* Permits a full 32b of ESID */
1060
ori r11,r11,0x90 /* Turn on valid and kp */
1061
std r11,0(r10) /* Put new entry back into the stab */
1062
1063
sync
1064
1065
/* All done -- return from exception. */
1066
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
1067
ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
1068
1069
andi. r10,r12,MSR_RI
1070
beq- unrecov_slb
1071
1072
mtcrf 0x80,r9 /* restore CR */
1073
1074
mfmsr r10
1075
clrrdi r10,r10,2
1076
mtmsrd r10,1
1077
1078
mtspr SPRN_SRR0,r11
1079
mtspr SPRN_SRR1,r12
1080
ld r9,PACA_EXSLB+EX_R9(r13)
1081
ld r10,PACA_EXSLB+EX_R10(r13)
1082
ld r11,PACA_EXSLB+EX_R11(r13)
1083
ld r12,PACA_EXSLB+EX_R12(r13)
1084
ld r13,PACA_EXSLB+EX_R13(r13)
1085
rfid
1086
b . /* prevent speculative execution */
1087
1088
#ifdef CONFIG_PPC_PSERIES
1089
/*
1090
* Data area reserved for FWNMI option.
1091
* This address (0x7000) is fixed by the RPA.
1092
*/
1093
.= 0x7000
1094
.globl fwnmi_data_area
1095
fwnmi_data_area:
1096
#endif /* CONFIG_PPC_PSERIES */
1097
1098
/* iSeries does not use the FWNMI stuff, so it is safe to put
1099
* this here, even if we later allow kernels that will boot on
1100
* both pSeries and iSeries */
1101
#ifdef CONFIG_PPC_ISERIES
1102
. = LPARMAP_PHYS
1103
.globl xLparMap
1104
xLparMap:
1105
.quad HvEsidsToMap /* xNumberEsids */
1106
.quad HvRangesToMap /* xNumberRanges */
1107
.quad STAB0_PAGE /* xSegmentTableOffs */
1108
.zero 40 /* xRsvd */
1109
/* xEsids (HvEsidsToMap entries of 2 quads) */
1110
.quad PAGE_OFFSET_ESID /* xKernelEsid */
1111
.quad PAGE_OFFSET_VSID /* xKernelVsid */
1112
.quad VMALLOC_START_ESID /* xKernelEsid */
1113
.quad VMALLOC_START_VSID /* xKernelVsid */
1114
/* xRanges (HvRangesToMap entries of 3 quads) */
1115
.quad HvPagesToMap /* xPages */
1116
.quad 0 /* xOffset */
1117
.quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
1118
1119
#endif /* CONFIG_PPC_ISERIES */
1120
1121
#ifdef CONFIG_PPC_PSERIES
1122
. = 0x8000
1123
#endif /* CONFIG_PPC_PSERIES */
1124
1125
/*
1126
* Space for CPU0's segment table.
1127
*
1128
* On iSeries, the hypervisor must fill in at least one entry before
1129
* we get control (with relocate on). The address is given to the hv
1130
* as a page number (see xLparMap above), so this must be at a
1131
* fixed address (the linker can't compute (u64)&initial_stab >>
1132
* PAGE_SHIFT).
1133
*/
1134
. = STAB0_OFFSET /* 0x8000 */
1135
.globl initial_stab
1136
initial_stab:
1137
.space 4096
1138
1139