Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/kernel/exceptions-64s.S
26424 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* This file contains the 64-bit "server" PowerPC variant
4
* of the low level exception handling including exception
5
* vectors, exception return, part of the slb and stab
6
* handling and other fixed offset specific things.
7
*
8
* This file is meant to be #included from head_64.S due to
9
* position dependent assembly.
10
*
11
* Most of this originates from head_64.S and thus has the same
12
* copyright history.
13
*
14
*/
15
16
#include <linux/linkage.h>
17
#include <asm/hw_irq.h>
18
#include <asm/exception-64s.h>
19
#include <asm/ptrace.h>
20
#include <asm/cpuidle.h>
21
#include <asm/head-64.h>
22
#include <asm/feature-fixups.h>
23
#include <asm/kup.h>
24
25
/*
26
* Following are fixed section helper macros.
27
*
28
* EXC_REAL_BEGIN/END - real, unrelocated exception vectors
29
* EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors
30
* TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these)
31
* TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use)
32
* EXC_COMMON - After switching to virtual, relocated mode.
33
*/
34
35
#define EXC_REAL_BEGIN(name, start, size) \
36
FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
37
38
#define EXC_REAL_END(name, start, size) \
39
FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
40
41
#define EXC_VIRT_BEGIN(name, start, size) \
42
FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
43
44
#define EXC_VIRT_END(name, start, size) \
45
FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
46
47
#define EXC_COMMON_BEGIN(name) \
48
USE_TEXT_SECTION(); \
49
.balign IFETCH_ALIGN_BYTES; \
50
.global name; \
51
_ASM_NOKPROBE_SYMBOL(name); \
52
DEFINE_FIXED_SYMBOL(name, text); \
53
name:
54
55
#define TRAMP_REAL_BEGIN(name) \
56
FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name)
57
58
#define TRAMP_VIRT_BEGIN(name) \
59
FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
60
61
#define EXC_REAL_NONE(start, size) \
62
FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
63
FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
64
65
#define EXC_VIRT_NONE(start, size) \
66
FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \
67
FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size)
68
69
/*
70
* We're short on space and time in the exception prolog, so we can't
71
* use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
72
* Instead we get the base of the kernel from paca->kernelbase and or in the low
73
* part of label. This requires that the label be within 64KB of kernelbase, and
74
* that kernelbase be 64K aligned.
75
*/
76
#define LOAD_HANDLER(reg, label) \
77
ld reg,PACAKBASE(r13); /* get high part of &label */ \
78
ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
79
80
#define __LOAD_HANDLER(reg, label, section) \
81
ld reg,PACAKBASE(r13); \
82
ori reg,reg,(ABS_ADDR(label, section))@l
83
84
/*
85
* Branches from unrelocated code (e.g., interrupts) to labels outside
86
* head-y require >64K offsets.
87
*/
88
#define __LOAD_FAR_HANDLER(reg, label, section) \
89
ld reg,PACAKBASE(r13); \
90
ori reg,reg,(ABS_ADDR(label, section))@l; \
91
addis reg,reg,(ABS_ADDR(label, section))@h
92
93
/*
94
* Interrupt code generation macros
95
*/
96
#define IVEC .L_IVEC_\name\() /* Interrupt vector address */
97
#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */
98
#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */
99
#define IAREA .L_IAREA_\name\() /* PACA save area */
100
#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */
101
#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */
102
#define ICFAR .L_ICFAR_\name\() /* Uses CFAR */
103
#define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */
104
#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */
105
#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */
106
#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
107
#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
108
#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
109
#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */
110
#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name
111
#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
112
#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */
113
#define __ISTACK(name) .L_ISTACK_ ## name
114
#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */
115
#define IMSR_R12 .L_IMSR_R12_\name\() /* Assumes MSR saved to r12 */
116
117
#define INT_DEFINE_BEGIN(n) \
118
.macro int_define_ ## n name
119
120
#define INT_DEFINE_END(n) \
121
.endm ; \
122
int_define_ ## n n ; \
123
do_define_int n
124
125
.macro do_define_int name
126
.ifndef IVEC
127
.error "IVEC not defined"
128
.endif
129
.ifndef IHSRR
130
IHSRR=0
131
.endif
132
.ifndef IHSRR_IF_HVMODE
133
IHSRR_IF_HVMODE=0
134
.endif
135
.ifndef IAREA
136
IAREA=PACA_EXGEN
137
.endif
138
.ifndef IVIRT
139
IVIRT=1
140
.endif
141
.ifndef IISIDE
142
IISIDE=0
143
.endif
144
.ifndef ICFAR
145
ICFAR=1
146
.endif
147
.ifndef ICFAR_IF_HVMODE
148
ICFAR_IF_HVMODE=0
149
.endif
150
.ifndef IDAR
151
IDAR=0
152
.endif
153
.ifndef IDSISR
154
IDSISR=0
155
.endif
156
.ifndef IBRANCH_TO_COMMON
157
IBRANCH_TO_COMMON=1
158
.endif
159
.ifndef IREALMODE_COMMON
160
IREALMODE_COMMON=0
161
.else
162
.if ! IBRANCH_TO_COMMON
163
.error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0"
164
.endif
165
.endif
166
.ifndef IMASK
167
IMASK=0
168
.endif
169
.ifndef IKVM_REAL
170
IKVM_REAL=0
171
.endif
172
.ifndef IKVM_VIRT
173
IKVM_VIRT=0
174
.endif
175
.ifndef ISTACK
176
ISTACK=1
177
.endif
178
.ifndef IKUAP
179
IKUAP=1
180
.endif
181
.ifndef IMSR_R12
182
IMSR_R12=0
183
.endif
184
.endm
185
186
/*
187
* All interrupts which set HSRR registers, as well as SRESET and MCE and
188
* syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
189
* so they all generally need to test whether they were taken in guest context.
190
*
191
* Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be
192
* taken with MSR[HV]=0.
193
*
194
* Interrupts which set SRR registers (with the above exceptions) do not
195
* elevate to MSR[HV]=1 mode, though most can be taken when running with
196
* MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do
197
* not need to test whether a guest is running because they get delivered to
198
* the guest directly, including nested HV KVM guests.
199
*
200
* The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host
201
* runs with MSR[HV]=0, so the host takes all interrupts on behalf of the
202
* guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be
203
* delivered to the real-mode entry point, therefore such interrupts only test
204
* KVM in their real mode handlers, and only when PR KVM is possible.
205
*
206
* Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always
207
* delivered in real-mode when the MMU is in hash mode because the MMU
208
* registers are not set appropriately to translate host addresses. In nested
209
* radix mode these can be delivered in virt-mode as the host translations are
210
* used implicitly (see: effective LPID, effective PID).
211
*/
212
213
/*
214
* If an interrupt is taken while a guest is running, it is immediately routed
215
* to KVM to handle.
216
*/
217
218
.macro KVMTEST name handler
219
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
220
lbz r10,HSTATE_IN_GUEST(r13)
221
cmpwi r10,0
222
/* HSRR variants have the 0x2 bit added to their trap number */
223
.if IHSRR_IF_HVMODE
224
BEGIN_FTR_SECTION
225
li r10,(IVEC + 0x2)
226
FTR_SECTION_ELSE
227
li r10,(IVEC)
228
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
229
.elseif IHSRR
230
li r10,(IVEC + 0x2)
231
.else
232
li r10,(IVEC)
233
.endif
234
bne \handler
235
#endif
236
.endm
237
238
/*
239
* This is the BOOK3S interrupt entry code macro.
240
*
241
* This can result in one of several things happening:
242
* - Branch to the _common handler, relocated, in virtual mode.
243
* These are normal interrupts (synchronous and asynchronous) handled by
244
* the kernel.
245
* - Branch to KVM, relocated but real mode interrupts remain in real mode.
246
* These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by
247
* / intended for host or guest kernel, but KVM must always be involved
248
* because the machine state is set for guest execution.
249
* - Branch to the masked handler, unrelocated.
250
* These occur when maskable asynchronous interrupts are taken with the
251
* irq_soft_mask set.
252
* - Branch to an "early" handler in real mode but relocated.
253
* This is done if early=1. MCE and HMI use these to handle errors in real
254
* mode.
255
* - Fall through and continue executing in real, unrelocated mode.
256
* This is done if early=2.
257
*/
258
259
.macro GEN_BRANCH_TO_COMMON name, virt
260
.if IREALMODE_COMMON
261
LOAD_HANDLER(r10, \name\()_common)
262
mtctr r10
263
bctr
264
.else
265
.if \virt
266
#ifndef CONFIG_RELOCATABLE
267
b \name\()_common_virt
268
#else
269
LOAD_HANDLER(r10, \name\()_common_virt)
270
mtctr r10
271
bctr
272
#endif
273
.else
274
LOAD_HANDLER(r10, \name\()_common_real)
275
mtctr r10
276
bctr
277
.endif
278
.endif
279
.endm
280
281
.macro GEN_INT_ENTRY name, virt, ool=0
282
SET_SCRATCH0(r13) /* save r13 */
283
GET_PACA(r13)
284
std r9,IAREA+EX_R9(r13) /* save r9 */
285
BEGIN_FTR_SECTION
286
mfspr r9,SPRN_PPR
287
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
288
HMT_MEDIUM
289
std r10,IAREA+EX_R10(r13) /* save r10 */
290
.if ICFAR
291
BEGIN_FTR_SECTION
292
mfspr r10,SPRN_CFAR
293
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
294
.elseif ICFAR_IF_HVMODE
295
BEGIN_FTR_SECTION
296
BEGIN_FTR_SECTION_NESTED(69)
297
mfspr r10,SPRN_CFAR
298
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69)
299
FTR_SECTION_ELSE
300
BEGIN_FTR_SECTION_NESTED(69)
301
li r10,0
302
END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69)
303
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
304
.endif
305
.if \ool
306
.if !\virt
307
b tramp_real_\name
308
.pushsection .text
309
TRAMP_REAL_BEGIN(tramp_real_\name)
310
.else
311
b tramp_virt_\name
312
.pushsection .text
313
TRAMP_VIRT_BEGIN(tramp_virt_\name)
314
.endif
315
.endif
316
317
BEGIN_FTR_SECTION
318
std r9,IAREA+EX_PPR(r13)
319
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
320
.if ICFAR || ICFAR_IF_HVMODE
321
BEGIN_FTR_SECTION
322
std r10,IAREA+EX_CFAR(r13)
323
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
324
.endif
325
INTERRUPT_TO_KERNEL
326
mfctr r10
327
std r10,IAREA+EX_CTR(r13)
328
mfcr r9
329
std r11,IAREA+EX_R11(r13) /* save r11 - r12 */
330
std r12,IAREA+EX_R12(r13)
331
332
/*
333
* DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
334
* because a d-side MCE will clobber those registers so is
335
* not recoverable if they are live.
336
*/
337
GET_SCRATCH0(r10)
338
std r10,IAREA+EX_R13(r13)
339
.if IDAR && !IISIDE
340
.if IHSRR
341
mfspr r10,SPRN_HDAR
342
.else
343
mfspr r10,SPRN_DAR
344
.endif
345
std r10,IAREA+EX_DAR(r13)
346
.endif
347
.if IDSISR && !IISIDE
348
.if IHSRR
349
mfspr r10,SPRN_HDSISR
350
.else
351
mfspr r10,SPRN_DSISR
352
.endif
353
stw r10,IAREA+EX_DSISR(r13)
354
.endif
355
356
.if IHSRR_IF_HVMODE
357
BEGIN_FTR_SECTION
358
mfspr r11,SPRN_HSRR0 /* save HSRR0 */
359
mfspr r12,SPRN_HSRR1 /* and HSRR1 */
360
FTR_SECTION_ELSE
361
mfspr r11,SPRN_SRR0 /* save SRR0 */
362
mfspr r12,SPRN_SRR1 /* and SRR1 */
363
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
364
.elseif IHSRR
365
mfspr r11,SPRN_HSRR0 /* save HSRR0 */
366
mfspr r12,SPRN_HSRR1 /* and HSRR1 */
367
.else
368
mfspr r11,SPRN_SRR0 /* save SRR0 */
369
mfspr r12,SPRN_SRR1 /* and SRR1 */
370
.endif
371
372
.if IBRANCH_TO_COMMON
373
GEN_BRANCH_TO_COMMON \name \virt
374
.endif
375
376
.if \ool
377
.popsection
378
.endif
379
.endm
380
381
/*
382
* __GEN_COMMON_ENTRY is required to receive the branch from interrupt
383
* entry, except in the case of the real-mode handlers which require
384
* __GEN_REALMODE_COMMON_ENTRY.
385
*
386
* This switches to virtual mode and sets MSR[RI].
387
*/
388
.macro __GEN_COMMON_ENTRY name
389
DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
390
\name\()_common_real:
391
.if IKVM_REAL
392
KVMTEST \name kvm_interrupt
393
.endif
394
395
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
396
/* MSR[RI] is clear iff using SRR regs */
397
.if IHSRR_IF_HVMODE
398
BEGIN_FTR_SECTION
399
xori r10,r10,MSR_RI
400
END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
401
.elseif ! IHSRR
402
xori r10,r10,MSR_RI
403
.endif
404
mtmsrd r10
405
406
.if IVIRT
407
.if IKVM_VIRT
408
b 1f /* skip the virt test coming from real */
409
.endif
410
411
.balign IFETCH_ALIGN_BYTES
412
DEFINE_FIXED_SYMBOL(\name\()_common_virt, text)
413
\name\()_common_virt:
414
.if IKVM_VIRT
415
KVMTEST \name kvm_interrupt
416
1:
417
.endif
418
.endif /* IVIRT */
419
.endm
420
421
/*
422
* Don't switch to virt mode. Used for early MCE and HMI handlers that
423
* want to run in real mode.
424
*/
425
.macro __GEN_REALMODE_COMMON_ENTRY name
426
DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
427
\name\()_common_real:
428
.if IKVM_REAL
429
KVMTEST \name kvm_interrupt
430
.endif
431
.endm
432
433
.macro __GEN_COMMON_BODY name
434
.if IMASK
435
.if ! ISTACK
436
.error "No support for masked interrupt to use custom stack"
437
.endif
438
439
/* If coming from user, skip soft-mask tests. */
440
andi. r10,r12,MSR_PR
441
bne 3f
442
443
/*
444
* Kernel code running below __end_soft_masked may be
445
* implicitly soft-masked if it is within the regions
446
* in the soft mask table.
447
*/
448
LOAD_HANDLER(r10, __end_soft_masked)
449
cmpld r11,r10
450
bge+ 1f
451
452
/* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
453
mtctr r12
454
stw r9,PACA_EXGEN+EX_CCR(r13)
455
SEARCH_SOFT_MASK_TABLE
456
cmpdi r12,0
457
mfctr r12 /* Restore r12 to SRR1 */
458
lwz r9,PACA_EXGEN+EX_CCR(r13)
459
beq 1f /* Not in soft-mask table */
460
li r10,IMASK
461
b 2f /* In soft-mask table, always mask */
462
463
/* Test the soft mask state against our interrupt's bit */
464
1: lbz r10,PACAIRQSOFTMASK(r13)
465
2: andi. r10,r10,IMASK
466
/* Associate vector numbers with bits in paca->irq_happened */
467
.if IVEC == 0x500 || IVEC == 0xea0
468
li r10,PACA_IRQ_EE
469
.elseif IVEC == 0x900
470
li r10,PACA_IRQ_DEC
471
.elseif IVEC == 0xa00 || IVEC == 0xe80
472
li r10,PACA_IRQ_DBELL
473
.elseif IVEC == 0xe60
474
li r10,PACA_IRQ_HMI
475
.elseif IVEC == 0xf00
476
li r10,PACA_IRQ_PMI
477
.else
478
.abort "Bad maskable vector"
479
.endif
480
481
.if IHSRR_IF_HVMODE
482
BEGIN_FTR_SECTION
483
bne masked_Hinterrupt
484
FTR_SECTION_ELSE
485
bne masked_interrupt
486
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
487
.elseif IHSRR
488
bne masked_Hinterrupt
489
.else
490
bne masked_interrupt
491
.endif
492
.endif
493
494
.if ISTACK
495
andi. r10,r12,MSR_PR /* See if coming from user */
496
3: mr r10,r1 /* Save r1 */
497
subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
498
beq- 100f
499
ld r1,PACAKSAVE(r13) /* kernel stack to use */
500
100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */
501
EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
502
.endif
503
504
std r9,_CCR(r1) /* save CR in stackframe */
505
std r11,_NIP(r1) /* save SRR0 in stackframe */
506
std r12,_MSR(r1) /* save SRR1 in stackframe */
507
std r10,0(r1) /* make stack chain pointer */
508
std r0,GPR0(r1) /* save r0 in stackframe */
509
std r10,GPR1(r1) /* save r1 in stackframe */
510
SANITIZE_GPR(0)
511
512
/* Mark our [H]SRRs valid for return */
513
li r10,1
514
.if IHSRR_IF_HVMODE
515
BEGIN_FTR_SECTION
516
stb r10,PACAHSRR_VALID(r13)
517
FTR_SECTION_ELSE
518
stb r10,PACASRR_VALID(r13)
519
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
520
.elseif IHSRR
521
stb r10,PACAHSRR_VALID(r13)
522
.else
523
stb r10,PACASRR_VALID(r13)
524
.endif
525
526
.if ISTACK
527
.if IKUAP
528
kuap_save_amr_and_lock r9, r10, cr1, cr0
529
.endif
530
beq 101f /* if from kernel mode */
531
BEGIN_FTR_SECTION
532
ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */
533
std r9,_PPR(r1)
534
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
535
101:
536
.else
537
.if IKUAP
538
kuap_save_amr_and_lock r9, r10, cr1
539
.endif
540
.endif
541
542
/* Save original regs values from save area to stack frame. */
543
ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */
544
ld r10,IAREA+EX_R10(r13)
545
std r9,GPR9(r1)
546
std r10,GPR10(r1)
547
ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */
548
ld r10,IAREA+EX_R12(r13)
549
ld r11,IAREA+EX_R13(r13)
550
std r9,GPR11(r1)
551
std r10,GPR12(r1)
552
std r11,GPR13(r1)
553
.if !IMSR_R12
554
SANITIZE_GPRS(9, 12)
555
.else
556
SANITIZE_GPRS(9, 11)
557
.endif
558
559
SAVE_NVGPRS(r1)
560
SANITIZE_NVGPRS()
561
562
.if IDAR
563
.if IISIDE
564
ld r10,_NIP(r1)
565
.else
566
ld r10,IAREA+EX_DAR(r13)
567
.endif
568
std r10,_DAR(r1)
569
.endif
570
571
.if IDSISR
572
.if IISIDE
573
ld r10,_MSR(r1)
574
lis r11,DSISR_SRR1_MATCH_64S@h
575
and r10,r10,r11
576
.else
577
lwz r10,IAREA+EX_DSISR(r13)
578
.endif
579
std r10,_DSISR(r1)
580
.endif
581
582
BEGIN_FTR_SECTION
583
.if ICFAR || ICFAR_IF_HVMODE
584
ld r10,IAREA+EX_CFAR(r13)
585
.else
586
li r10,0
587
.endif
588
std r10,ORIG_GPR3(r1)
589
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
590
ld r10,IAREA+EX_CTR(r13)
591
std r10,_CTR(r1)
592
SAVE_GPRS(2, 8, r1) /* save r2 - r8 in stackframe */
593
SANITIZE_GPRS(2, 8)
594
mflr r9 /* Get LR, later save to stack */
595
LOAD_PACA_TOC() /* get kernel TOC into r2 */
596
std r9,_LINK(r1)
597
lbz r10,PACAIRQSOFTMASK(r13)
598
mfspr r11,SPRN_XER /* save XER in stackframe */
599
std r10,SOFTE(r1)
600
std r11,_XER(r1)
601
li r9,IVEC
602
std r9,_TRAP(r1) /* set trap number */
603
li r10,0
604
LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
605
std r10,RESULT(r1) /* clear regs->result */
606
std r11,STACK_INT_FRAME_MARKER(r1) /* mark the frame */
607
.endm
608
609
/*
610
* On entry r13 points to the paca, r9-r13 are saved in the paca,
611
* r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
612
* SRR1, and relocation is on.
613
*
614
* If stack=0, then the stack is already set in r1, and r1 is saved in r10.
615
* PPR save and CPU accounting is not done for the !stack case (XXX why not?)
616
*/
617
.macro GEN_COMMON name
618
__GEN_COMMON_ENTRY \name
619
__GEN_COMMON_BODY \name
620
.endm
621
622
.macro SEARCH_RESTART_TABLE
623
#ifdef CONFIG_RELOCATABLE
624
mr r12,r2
625
LOAD_PACA_TOC()
626
LOAD_REG_ADDR(r9, __start___restart_table)
627
LOAD_REG_ADDR(r10, __stop___restart_table)
628
mr r2,r12
629
#else
630
LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table)
631
LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table)
632
#endif
633
300:
634
cmpd r9,r10
635
beq 302f
636
ld r12,0(r9)
637
cmpld r11,r12
638
blt 301f
639
ld r12,8(r9)
640
cmpld r11,r12
641
bge 301f
642
ld r12,16(r9)
643
b 303f
644
301:
645
addi r9,r9,24
646
b 300b
647
302:
648
li r12,0
649
303:
650
.endm
651
652
.macro SEARCH_SOFT_MASK_TABLE
653
#ifdef CONFIG_RELOCATABLE
654
mr r12,r2
655
LOAD_PACA_TOC()
656
LOAD_REG_ADDR(r9, __start___soft_mask_table)
657
LOAD_REG_ADDR(r10, __stop___soft_mask_table)
658
mr r2,r12
659
#else
660
LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
661
LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
662
#endif
663
300:
664
cmpd r9,r10
665
beq 302f
666
ld r12,0(r9)
667
cmpld r11,r12
668
blt 301f
669
ld r12,8(r9)
670
cmpld r11,r12
671
bge 301f
672
li r12,1
673
b 303f
674
301:
675
addi r9,r9,16
676
b 300b
677
302:
678
li r12,0
679
303:
680
.endm
681
682
/*
683
* Restore all registers including H/SRR0/1 saved in a stack frame of a
684
* standard exception.
685
*/
686
.macro EXCEPTION_RESTORE_REGS hsrr=0
687
/* Move original SRR0 and SRR1 into the respective regs */
688
ld r9,_MSR(r1)
689
li r10,0
690
.if \hsrr
691
mtspr SPRN_HSRR1,r9
692
stb r10,PACAHSRR_VALID(r13)
693
.else
694
mtspr SPRN_SRR1,r9
695
stb r10,PACASRR_VALID(r13)
696
.endif
697
ld r9,_NIP(r1)
698
.if \hsrr
699
mtspr SPRN_HSRR0,r9
700
.else
701
mtspr SPRN_SRR0,r9
702
.endif
703
ld r9,_CTR(r1)
704
mtctr r9
705
ld r9,_XER(r1)
706
mtxer r9
707
ld r9,_LINK(r1)
708
mtlr r9
709
ld r9,_CCR(r1)
710
mtcr r9
711
SANITIZE_RESTORE_NVGPRS()
712
REST_GPRS(2, 13, r1)
713
REST_GPR(0, r1)
714
/* restore original r1. */
715
ld r1,GPR1(r1)
716
.endm
717
718
/*
719
* EARLY_BOOT_FIXUP - Fix real-mode interrupt with wrong endian in early boot.
720
*
721
* There's a short window during boot where although the kernel is running
722
* little endian, any exceptions will cause the CPU to switch back to big
723
* endian. For example a WARN() boils down to a trap instruction, which will
724
* cause a program check, and we end up here but with the CPU in big endian
725
* mode. The first instruction of the program check handler (in GEN_INT_ENTRY
726
* below) is an mtsprg, which when executed in the wrong endian is an lhzu with
727
* a ~3GB displacement from r3. The content of r3 is random, so that is a load
728
* from some random location, and depending on the system can easily lead to a
729
* checkstop, or an infinitely recursive page fault.
730
*
731
* So to handle that case we have a trampoline here that can detect we are in
732
* the wrong endian and flip us back to the correct endian. We can't flip
733
* MSR[LE] using mtmsr, so we have to use rfid. That requires backing up SRR0/1
734
* as well as a GPR. To do that we use SPRG0/2/3, as SPRG1 is already used for
735
* the paca. SPRG3 is user readable, but this trampoline is only active very
736
* early in boot, and SPRG3 will be reinitialised in vdso_getcpu_init() before
737
* userspace starts.
738
*/
739
.macro EARLY_BOOT_FIXUP
740
BEGIN_FTR_SECTION
741
#ifdef CONFIG_CPU_LITTLE_ENDIAN
742
tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8
743
b 2f // Skip trampoline if endian is correct
744
.long 0xa643707d // mtsprg 0, r11 Backup r11
745
.long 0xa6027a7d // mfsrr0 r11
746
.long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2
747
.long 0xa6027b7d // mfsrr1 r11
748
.long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3
749
.long 0xa600607d // mfmsr r11
750
.long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE]
751
.long 0xa6037b7d // mtsrr1 r11
752
/*
753
* This is 'li r11,1f' where 1f is the absolute address of that
754
* label, byteswapped into the SI field of the instruction.
755
*/
756
.long 0x00006039 | \
757
((ABS_ADDR(1f, real_vectors) & 0x00ff) << 24) | \
758
((ABS_ADDR(1f, real_vectors) & 0xff00) << 8)
759
.long 0xa6037a7d // mtsrr0 r11
760
.long 0x2400004c // rfid
761
1:
762
mfsprg r11, 3
763
mtsrr1 r11 // Restore SRR1
764
mfsprg r11, 2
765
mtsrr0 r11 // Restore SRR0
766
mfsprg r11, 0 // Restore r11
767
2:
768
#endif
769
/*
770
* program check could hit at any time, and pseries can not block
771
* MSR[ME] in early boot. So check if there is anything useful in r13
772
* yet, and spin forever if not.
773
*/
774
mtsprg 0, r11
775
mfcr r11
776
cmpdi r13, 0
777
beq .
778
mtcr r11
779
mfsprg r11, 0
780
END_FTR_SECTION(0, 1) // nop out after boot
781
.endm
782
783
/*
784
* There are a few constraints to be concerned with.
785
* - Real mode exceptions code/data must be located at their physical location.
786
* - Virtual mode exceptions must be mapped at their 0xc000... location.
787
* - Fixed location code must not call directly beyond the __end_interrupts
788
* area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
789
* must be used.
790
* - LOAD_HANDLER targets must be within first 64K of physical 0 /
791
* virtual 0xc00...
792
* - Conditional branch targets must be within +/-32K of caller.
793
*
794
* "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
795
* therefore don't have to run in physically located code or rfid to
796
* virtual mode kernel code. However on relocatable kernels they do have
797
* to branch to KERNELBASE offset because the rest of the kernel (outside
798
* the exception vectors) may be located elsewhere.
799
*
800
* Virtual exceptions correspond with physical, except their entry points
801
* are offset by 0xc000000000000000 and also tend to get an added 0x4000
802
* offset applied. Virtual exceptions are enabled with the Alternate
803
* Interrupt Location (AIL) bit set in the LPCR. However this does not
804
* guarantee they will be delivered virtually. Some conditions (see the ISA)
805
* cause exceptions to be delivered in real mode.
806
*
807
* The scv instructions are a special case. They get a 0x3000 offset applied.
808
* scv exceptions have unique reentrancy properties, see below.
809
*
810
* It's impossible to receive interrupts below 0x300 via AIL.
811
*
812
* KVM: None of the virtual exceptions are from the guest. Anything that
813
* escalated to HV=1 from HV=0 is delivered via real mode handlers.
814
*
815
*
816
* We layout physical memory as follows:
817
* 0x0000 - 0x00ff : Secondary processor spin code
818
* 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
819
* 0x1900 - 0x2fff : Real mode trampolines
820
* 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
821
* 0x5900 - 0x6fff : Relon mode trampolines
822
* 0x7000 - 0x7fff : FWNMI data area
823
* 0x8000 - .... : Common interrupt handlers, remaining early
824
* setup code, rest of kernel.
825
*
826
* We could reclaim 0x4000-0x42ff for real mode trampolines if the space
827
* is necessary. Until then it's more consistent to explicitly put VIRT_NONE
828
* vectors there.
829
*/
830
OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
831
OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000)
832
OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900)
833
OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
834
835
#ifdef CONFIG_PPC_POWERNV
836
.globl start_real_trampolines
837
.globl end_real_trampolines
838
.globl start_virt_trampolines
839
.globl end_virt_trampolines
840
#endif
841
842
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
843
/*
844
* Data area reserved for FWNMI option.
845
* This address (0x7000) is fixed by the RPA.
846
* pseries and powernv need to keep the whole page from
847
* 0x7000 to 0x8000 free for use by the firmware
848
*/
849
ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000)
850
OPEN_TEXT_SECTION(0x8000)
851
#else
852
OPEN_TEXT_SECTION(0x7000)
853
#endif
854
855
USE_FIXED_SECTION(real_vectors)
856
857
/*
858
* This is the start of the interrupt handlers for pSeries
859
* This code runs with relocation off.
860
* Code from here to __end_interrupts gets copied down to real
861
* address 0x100 when we are running a relocatable kernel.
862
* Therefore any relative branches in this section must only
863
* branch to labels in this section.
864
*/
865
.globl __start_interrupts
866
__start_interrupts:
867
868
/**
869
* Interrupt 0x3000 - System Call Vectored Interrupt (syscall).
870
* This is a synchronous interrupt invoked with the "scv" instruction. The
871
* system call does not alter the HV bit, so it is directed to the OS.
872
*
873
* Handling:
874
* scv instructions enter the kernel without changing EE, RI, ME, or HV.
875
* In particular, this means we can take a maskable interrupt at any point
876
* in the scv handler, which is unlike any other interrupt. This is solved
877
* by treating the instruction addresses in the handler as being soft-masked,
878
* by adding a SOFT_MASK_TABLE entry for them.
879
*
880
* AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
881
* ensure scv is never executed with relocation off, which means AIL-0
882
* should never happen.
883
*
884
* Before leaving the following inside-__end_soft_masked text, at least of the
885
* following must be true:
886
* - MSR[PR]=1 (i.e., return to userspace)
887
* - MSR_EE|MSR_RI is clear (no reentrant exceptions)
888
* - Standard kernel environment is set up (stack, paca, etc)
889
*
890
* KVM:
891
* These interrupts do not elevate HV 0->1, so HV is not involved. PR KVM
892
* ensures that FSCR[SCV] is disabled whenever it has to force AIL off.
893
*
894
* Call convention:
895
*
896
* syscall register convention is in Documentation/arch/powerpc/syscall64-abi.rst
897
*/
898
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
899
/* SCV 0 */
900
mr r9,r13
901
GET_PACA(r13)
902
mflr r11
903
mfctr r12
904
li r10,IRQS_ALL_DISABLED
905
stb r10,PACAIRQSOFTMASK(r13)
906
#ifdef CONFIG_RELOCATABLE
907
b system_call_vectored_tramp
908
#else
909
b system_call_vectored_common
910
#endif
911
nop
912
913
/* SCV 1 - 127 */
914
.rept 127
915
mr r9,r13
916
GET_PACA(r13)
917
mflr r11
918
mfctr r12
919
li r10,IRQS_ALL_DISABLED
920
stb r10,PACAIRQSOFTMASK(r13)
921
li r0,-1 /* cause failure */
922
#ifdef CONFIG_RELOCATABLE
923
b system_call_vectored_sigill_tramp
924
#else
925
b system_call_vectored_sigill
926
#endif
927
.endr
928
EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
929
930
// Treat scv vectors as soft-masked, see comment above.
931
// Use absolute values rather than labels here, so they don't get relocated,
932
// because this code runs unrelocated.
933
SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
934
935
#ifdef CONFIG_RELOCATABLE
936
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
937
__LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines)
938
mtctr r10
939
bctr
940
941
TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp)
942
__LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines)
943
mtctr r10
944
bctr
945
#endif
946
947
948
/* No virt vectors corresponding with 0x0..0x100 */
949
EXC_VIRT_NONE(0x4000, 0x100)
950
951
952
/**
953
* Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI).
954
* This is a non-maskable, asynchronous interrupt always taken in real-mode.
955
* It is caused by:
956
* - Wake from power-saving state, on powernv.
957
* - An NMI from another CPU, triggered by firmware or hypercall.
958
* - As crash/debug signal injected from BMC, firmware or hypervisor.
959
*
960
* Handling:
961
* Power-save wakeup is the only performance critical path, so this is
962
* determined quickly as possible first. In this case volatile registers
963
* can be discarded and SPRs like CFAR don't need to be read.
964
*
965
* If not a powersave wakeup, then it's run as a regular interrupt, however
966
* it uses its own stack and PACA save area to preserve the regular kernel
967
* environment for debugging.
968
*
969
* This interrupt is not maskable, so triggering it when MSR[RI] is clear,
970
* or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely
971
* correct to switch to virtual mode to run the regular interrupt handler
972
* because it might be interrupted when the MMU is in a bad state (e.g., SLB
973
* is clear).
974
*
975
* FWNMI:
976
* PAPR specifies a "fwnmi" facility which sends the sreset to a different
977
* entry point with a different register set up. Some hypervisors will
978
* send the sreset to 0x100 in the guest if it is not fwnmi capable.
979
*
980
* KVM:
981
* Unlike most SRR interrupts, this may be taken by the host while executing
982
* in a guest, so a KVM test is required. KVM will pull the CPU out of guest
983
* mode and then raise the sreset.
984
*/
985
INT_DEFINE_BEGIN(system_reset)
986
IVEC=0x100
987
IAREA=PACA_EXNMI
988
IVIRT=0 /* no virt entry point */
989
ISTACK=0
990
IKVM_REAL=1
991
INT_DEFINE_END(system_reset)
992
993
EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
994
#ifdef CONFIG_PPC_P7_NAP
995
/*
996
* If running native on arch 2.06 or later, check if we are waking up
997
* from nap/sleep/winkle, and branch to idle handler. This tests SRR1
998
* bits 46:47. A non-0 value indicates that we are coming from a power
999
* saving state. The idle wakeup handler initially runs in real mode,
1000
* but we branch to the 0xc000... address so we can turn on relocation
1001
* with mtmsrd later, after SPRs are restored.
1002
*
1003
* Careful to minimise cost for the fast path (idle wakeup) while
1004
* also avoiding clobbering CFAR for the debug path (non-idle).
1005
*
1006
* For the idle wake case volatile registers can be clobbered, which
1007
* is why we use those initially. If it turns out to not be an idle
1008
* wake, carefully put everything back the way it was, so we can use
1009
* common exception macros to handle it.
1010
*/
1011
BEGIN_FTR_SECTION
1012
SET_SCRATCH0(r13)
1013
GET_PACA(r13)
1014
std r3,PACA_EXNMI+0*8(r13)
1015
std r4,PACA_EXNMI+1*8(r13)
1016
std r5,PACA_EXNMI+2*8(r13)
1017
mfspr r3,SPRN_SRR1
1018
mfocrf r4,0x80
1019
rlwinm. r5,r3,47-31,30,31
1020
bne+ system_reset_idle_wake
1021
/* Not powersave wakeup. Restore regs for regular interrupt handler. */
1022
mtocrf 0x80,r4
1023
ld r3,PACA_EXNMI+0*8(r13)
1024
ld r4,PACA_EXNMI+1*8(r13)
1025
ld r5,PACA_EXNMI+2*8(r13)
1026
GET_SCRATCH0(r13)
1027
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1028
#endif
1029
1030
GEN_INT_ENTRY system_reset, virt=0
1031
/*
1032
* In theory, we should not enable relocation here if it was disabled
1033
* in SRR1, because the MMU may not be configured to support it (e.g.,
1034
* SLB may have been cleared). In practice, there should only be a few
1035
* small windows where that's the case, and sreset is considered to
1036
* be dangerous anyway.
1037
*/
1038
EXC_REAL_END(system_reset, 0x100, 0x100)
1039
EXC_VIRT_NONE(0x4100, 0x100)
1040
1041
#ifdef CONFIG_PPC_P7_NAP
1042
TRAMP_REAL_BEGIN(system_reset_idle_wake)
1043
/* We are waking up from idle, so may clobber any volatile register */
1044
cmpwi cr1,r5,2
1045
bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
1046
__LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines)
1047
mtctr r12
1048
bctr
1049
#endif
1050
1051
#ifdef CONFIG_PPC_PSERIES
1052
/*
1053
* Vectors for the FWNMI option. Share common code.
1054
*/
1055
TRAMP_REAL_BEGIN(system_reset_fwnmi)
1056
GEN_INT_ENTRY system_reset, virt=0
1057
1058
#endif /* CONFIG_PPC_PSERIES */
1059
1060
EXC_COMMON_BEGIN(system_reset_common)
1061
__GEN_COMMON_ENTRY system_reset
1062
/*
1063
* Increment paca->in_nmi. When the interrupt entry wrapper later
1064
* enable MSR_RI, then SLB or MCE will be able to recover, but a nested
1065
* NMI will notice in_nmi and not recover because of the use of the NMI
1066
* stack. in_nmi reentrancy is tested in system_reset_exception.
1067
*/
1068
lhz r10,PACA_IN_NMI(r13)
1069
addi r10,r10,1
1070
sth r10,PACA_IN_NMI(r13)
1071
1072
mr r10,r1
1073
ld r1,PACA_NMI_EMERG_SP(r13)
1074
subi r1,r1,INT_FRAME_SIZE
1075
__GEN_COMMON_BODY system_reset
1076
1077
addi r3,r1,STACK_INT_FRAME_REGS
1078
bl CFUNC(system_reset_exception)
1079
1080
/* Clear MSR_RI before setting SRR0 and SRR1. */
1081
li r9,0
1082
mtmsrd r9,1
1083
1084
/*
1085
* MSR_RI is clear, now we can decrement paca->in_nmi.
1086
*/
1087
lhz r10,PACA_IN_NMI(r13)
1088
subi r10,r10,1
1089
sth r10,PACA_IN_NMI(r13)
1090
1091
kuap_kernel_restore r9, r10
1092
EXCEPTION_RESTORE_REGS
1093
RFI_TO_USER_OR_KERNEL
1094
1095
1096
/**
1097
* Interrupt 0x200 - Machine Check Interrupt (MCE).
1098
* This is a non-maskable interrupt always taken in real-mode. It can be
1099
* synchronous or asynchronous, caused by hardware or software, and it may be
1100
* taken in a power-saving state.
1101
*
1102
* Handling:
1103
* Similarly to system reset, this uses its own stack and PACA save area,
1104
* the difference is re-entrancy is allowed on the machine check stack.
1105
*
1106
* machine_check_early is run in real mode, and carefully decodes the
1107
* machine check and tries to handle it (e.g., flush the SLB if there was an
1108
* error detected there), determines if it was recoverable and logs the
1109
* event.
1110
*
1111
* This early code does not "reconcile" irq soft-mask state like SRESET or
1112
* regular interrupts do, so irqs_disabled() among other things may not work
1113
* properly (irq disable/enable already doesn't work because irq tracing can
1114
* not work in real mode).
1115
*
1116
* Then, depending on the execution context when the interrupt is taken, there
1117
* are 3 main actions:
1118
* - Executing in kernel mode. The event is queued with irq_work, which means
1119
* it is handled when it is next safe to do so (i.e., the kernel has enabled
1120
* interrupts), which could be immediately when the interrupt returns. This
1121
* avoids nasty issues like switching to virtual mode when the MMU is in a
1122
* bad state, or when executing OPAL code. (SRESET is exposed to such issues,
1123
* but it has different priorities). Check to see if the CPU was in power
1124
* save, and return via the wake up code if it was.
1125
*
1126
* - Executing in user mode. machine_check_exception is run like a normal
1127
* interrupt handler, which processes the data generated by the early handler.
1128
*
1129
* - Executing in guest mode. The interrupt is run with its KVM test, and
1130
* branches to KVM to deal with. KVM may queue the event for the host
1131
* to report later.
1132
*
1133
* This interrupt is not maskable, so if it triggers when MSR[RI] is clear,
1134
* or SCRATCH0 is in use, it may cause a crash.
1135
*
1136
* KVM:
1137
* See SRESET.
1138
*/
1139
INT_DEFINE_BEGIN(machine_check_early)
1140
IVEC=0x200
1141
IAREA=PACA_EXMC
1142
IVIRT=0 /* no virt entry point */
1143
IREALMODE_COMMON=1
1144
ISTACK=0
1145
IDAR=1
1146
IDSISR=1
1147
IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
1148
INT_DEFINE_END(machine_check_early)
1149
1150
INT_DEFINE_BEGIN(machine_check)
1151
IVEC=0x200
1152
IAREA=PACA_EXMC
1153
IVIRT=0 /* no virt entry point */
1154
IDAR=1
1155
IDSISR=1
1156
IKVM_REAL=1
1157
INT_DEFINE_END(machine_check)
1158
1159
EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
1160
EARLY_BOOT_FIXUP
1161
GEN_INT_ENTRY machine_check_early, virt=0
1162
EXC_REAL_END(machine_check, 0x200, 0x100)
1163
EXC_VIRT_NONE(0x4200, 0x100)
1164
1165
#ifdef CONFIG_PPC_PSERIES
1166
TRAMP_REAL_BEGIN(machine_check_fwnmi)
1167
/* See comment at machine_check exception, don't turn on RI */
1168
GEN_INT_ENTRY machine_check_early, virt=0
1169
#endif
1170
1171
#define MACHINE_CHECK_HANDLER_WINDUP \
1172
/* Clear MSR_RI before setting SRR0 and SRR1. */\
1173
li r9,0; \
1174
mtmsrd r9,1; /* Clear MSR_RI */ \
1175
/* Decrement paca->in_mce now RI is clear. */ \
1176
lhz r12,PACA_IN_MCE(r13); \
1177
subi r12,r12,1; \
1178
sth r12,PACA_IN_MCE(r13); \
1179
EXCEPTION_RESTORE_REGS
1180
1181
EXC_COMMON_BEGIN(machine_check_early_common)
1182
__GEN_REALMODE_COMMON_ENTRY machine_check_early
1183
1184
/*
1185
* Switch to mc_emergency stack and handle re-entrancy (we limit
1186
* the nested MCE upto level 4 to avoid stack overflow).
1187
* Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
1188
*
1189
* We use paca->in_mce to check whether this is the first entry or
1190
* nested machine check. We increment paca->in_mce to track nested
1191
* machine checks.
1192
*
1193
* If this is the first entry then set stack pointer to
1194
* paca->mc_emergency_sp, otherwise r1 is already pointing to
1195
* stack frame on mc_emergency stack.
1196
*
1197
* NOTE: We are here with MSR_ME=0 (off), which means we risk a
1198
* checkstop if we get another machine check exception before we do
1199
* rfid with MSR_ME=1.
1200
*
1201
* This interrupt can wake directly from idle. If that is the case,
1202
* the machine check is handled then the idle wakeup code is called
1203
* to restore state.
1204
*/
1205
lhz r10,PACA_IN_MCE(r13)
1206
cmpwi r10,0 /* Are we in nested machine check */
1207
cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */
1208
addi r10,r10,1 /* increment paca->in_mce */
1209
sth r10,PACA_IN_MCE(r13)
1210
1211
mr r10,r1 /* Save r1 */
1212
bne 1f
1213
/* First machine check entry */
1214
ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
1215
1: /* Limit nested MCE to level 4 to avoid stack overflow */
1216
bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */
1217
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1218
1219
__GEN_COMMON_BODY machine_check_early
1220
1221
BEGIN_FTR_SECTION
1222
bl enable_machine_check
1223
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1224
addi r3,r1,STACK_INT_FRAME_REGS
1225
BEGIN_FTR_SECTION
1226
bl CFUNC(machine_check_early_boot)
1227
END_FTR_SECTION(0, 1) // nop out after boot
1228
bl CFUNC(machine_check_early)
1229
std r3,RESULT(r1) /* Save result */
1230
ld r12,_MSR(r1)
1231
1232
#ifdef CONFIG_PPC_P7_NAP
1233
/*
1234
* Check if thread was in power saving mode. We come here when any
1235
* of the following is true:
1236
* a. thread wasn't in power saving mode
1237
* b. thread was in power saving mode with no state loss,
1238
* supervisor state loss or hypervisor state loss.
1239
*
1240
* Go back to nap/sleep/winkle mode again if (b) is true.
1241
*/
1242
BEGIN_FTR_SECTION
1243
rlwinm. r11,r12,47-31,30,31
1244
bne machine_check_idle_common
1245
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1246
#endif
1247
1248
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1249
/*
1250
* Check if we are coming from guest. If yes, then run the normal
1251
* exception handler which will take the
1252
* machine_check_kvm->kvm_interrupt branch to deliver the MC event
1253
* to guest.
1254
*/
1255
lbz r11,HSTATE_IN_GUEST(r13)
1256
cmpwi r11,0 /* Check if coming from guest */
1257
bne mce_deliver /* continue if we are. */
1258
#endif
1259
1260
/*
1261
* Check if we are coming from userspace. If yes, then run the normal
1262
* exception handler which will deliver the MC event to this kernel.
1263
*/
1264
andi. r11,r12,MSR_PR /* See if coming from user. */
1265
bne mce_deliver /* continue in V mode if we are. */
1266
1267
/*
1268
* At this point we are coming from kernel context.
1269
* Queue up the MCE event and return from the interrupt.
1270
* But before that, check if this is an un-recoverable exception.
1271
* If yes, then stay on emergency stack and panic.
1272
*/
1273
andi. r11,r12,MSR_RI
1274
beq unrecoverable_mce
1275
1276
/*
1277
* Check if we have successfully handled/recovered from error, if not
1278
* then stay on emergency stack and panic.
1279
*/
1280
ld r3,RESULT(r1) /* Load result */
1281
cmpdi r3,0 /* see if we handled MCE successfully */
1282
beq unrecoverable_mce /* if !handled then panic */
1283
1284
/*
1285
* Return from MC interrupt.
1286
* Queue up the MCE event so that we can log it later, while
1287
* returning from kernel or opal call.
1288
*/
1289
bl CFUNC(machine_check_queue_event)
1290
MACHINE_CHECK_HANDLER_WINDUP
1291
RFI_TO_KERNEL
1292
1293
mce_deliver:
1294
/*
1295
* This is a host user or guest MCE. Restore all registers, then
1296
* run the "late" handler. For host user, this will run the
1297
* machine_check_exception handler in virtual mode like a normal
1298
* interrupt handler. For guest, this will trigger the KVM test
1299
* and branch to the KVM interrupt similarly to other interrupts.
1300
*/
1301
BEGIN_FTR_SECTION
1302
ld r10,ORIG_GPR3(r1)
1303
mtspr SPRN_CFAR,r10
1304
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1305
MACHINE_CHECK_HANDLER_WINDUP
1306
GEN_INT_ENTRY machine_check, virt=0
1307
1308
EXC_COMMON_BEGIN(machine_check_common)
1309
/*
1310
* Machine check is different because we use a different
1311
* save area: PACA_EXMC instead of PACA_EXGEN.
1312
*/
1313
GEN_COMMON machine_check
1314
addi r3,r1,STACK_INT_FRAME_REGS
1315
bl CFUNC(machine_check_exception_async)
1316
b interrupt_return_srr
1317
1318
1319
#ifdef CONFIG_PPC_P7_NAP
1320
/*
1321
* This is an idle wakeup. Low level machine check has already been
1322
* done. Queue the event then call the idle code to do the wake up.
1323
*/
1324
EXC_COMMON_BEGIN(machine_check_idle_common)
1325
bl CFUNC(machine_check_queue_event)
1326
1327
/*
1328
* GPR-loss wakeups are relatively straightforward, because the
1329
* idle sleep code has saved all non-volatile registers on its
1330
* own stack, and r1 in PACAR1.
1331
*
1332
* For no-loss wakeups the r1 and lr registers used by the
1333
* early machine check handler have to be restored first. r2 is
1334
* the kernel TOC, so no need to restore it.
1335
*
1336
* Then decrement MCE nesting after finishing with the stack.
1337
*/
1338
ld r3,_MSR(r1)
1339
ld r4,_LINK(r1)
1340
ld r1,GPR1(r1)
1341
1342
lhz r11,PACA_IN_MCE(r13)
1343
subi r11,r11,1
1344
sth r11,PACA_IN_MCE(r13)
1345
1346
mtlr r4
1347
rlwinm r10,r3,47-31,30,31
1348
cmpwi cr1,r10,2
1349
bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
1350
b idle_return_gpr_loss
1351
#endif
1352
1353
EXC_COMMON_BEGIN(unrecoverable_mce)
1354
/*
1355
* We are going down. But there are chances that we might get hit by
1356
* another MCE during panic path and we may run into unstable state
1357
* with no way out. Hence, turn ME bit off while going down, so that
1358
* when another MCE is hit during panic path, system will checkstop
1359
* and hypervisor will get restarted cleanly by SP.
1360
*/
1361
BEGIN_FTR_SECTION
1362
li r10,0 /* clear MSR_RI */
1363
mtmsrd r10,1
1364
bl CFUNC(disable_machine_check)
1365
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1366
ld r10,PACAKMSR(r13)
1367
li r3,MSR_ME
1368
andc r10,r10,r3
1369
mtmsrd r10
1370
1371
lhz r12,PACA_IN_MCE(r13)
1372
subi r12,r12,1
1373
sth r12,PACA_IN_MCE(r13)
1374
1375
/*
1376
* Invoke machine_check_exception to print MCE event and panic.
1377
* This is the NMI version of the handler because we are called from
1378
* the early handler which is a true NMI.
1379
*/
1380
addi r3,r1,STACK_INT_FRAME_REGS
1381
bl CFUNC(machine_check_exception)
1382
1383
/*
1384
* We will not reach here. Even if we did, there is no way out.
1385
* Call unrecoverable_exception and die.
1386
*/
1387
addi r3,r1,STACK_INT_FRAME_REGS
1388
bl CFUNC(unrecoverable_exception)
1389
b .
1390
1391
1392
/**
1393
* Interrupt 0x300 - Data Storage Interrupt (DSI).
1394
* This is a synchronous interrupt generated due to a data access exception,
1395
* e.g., a load orstore which does not have a valid page table entry with
1396
* permissions. DAWR matches also fault here, as do RC updates, and minor misc
1397
* errors e.g., copy/paste, AMO, certain invalid CI accesses, etc.
1398
*
1399
* Handling:
1400
* - Hash MMU
1401
* Go to do_hash_fault, which attempts to fill the HPT from an entry in the
1402
* Linux page table. Hash faults can hit in kernel mode in a fairly
1403
* arbitrary state (e.g., interrupts disabled, locks held) when accessing
1404
* "non-bolted" regions, e.g., vmalloc space. However these should always be
1405
* backed by Linux page table entries.
1406
*
1407
* If no entry is found the Linux page fault handler is invoked (by
1408
* do_hash_fault). Linux page faults can happen in kernel mode due to user
1409
* copy operations of course.
1410
*
1411
* KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
1412
* MMU context, which may cause a DSI in the host, which must go to the
1413
* KVM handler. MSR[IR] is not enabled, so the real-mode handler will
1414
* always be used regardless of AIL setting.
1415
*
1416
* - Radix MMU
1417
* The hardware loads from the Linux page table directly, so a fault goes
1418
* immediately to Linux page fault.
1419
*
1420
* Conditions like DAWR match are handled on the way in to Linux page fault.
1421
*/
1422
INT_DEFINE_BEGIN(data_access)
1423
IVEC=0x300
1424
IDAR=1
1425
IDSISR=1
1426
IKVM_REAL=1
1427
INT_DEFINE_END(data_access)
1428
1429
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
1430
GEN_INT_ENTRY data_access, virt=0
1431
EXC_REAL_END(data_access, 0x300, 0x80)
1432
EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
1433
GEN_INT_ENTRY data_access, virt=1
1434
EXC_VIRT_END(data_access, 0x4300, 0x80)
1435
EXC_COMMON_BEGIN(data_access_common)
1436
GEN_COMMON data_access
1437
ld r4,_DSISR(r1)
1438
addi r3,r1,STACK_INT_FRAME_REGS
1439
andis. r0,r4,DSISR_DABRMATCH@h
1440
bne- 1f
1441
#ifdef CONFIG_PPC_64S_HASH_MMU
1442
BEGIN_MMU_FTR_SECTION
1443
bl CFUNC(do_hash_fault)
1444
MMU_FTR_SECTION_ELSE
1445
bl CFUNC(do_page_fault)
1446
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1447
#else
1448
bl CFUNC(do_page_fault)
1449
#endif
1450
b interrupt_return_srr
1451
1452
1: bl CFUNC(do_break)
1453
/*
1454
* do_break() may have changed the NV GPRS while handling a breakpoint.
1455
* If so, we need to restore them with their updated values.
1456
*/
1457
HANDLER_RESTORE_NVGPRS()
1458
b interrupt_return_srr
1459
1460
1461
/**
1462
* Interrupt 0x380 - Data Segment Interrupt (DSLB).
1463
* This is a synchronous interrupt in response to an MMU fault missing SLB
1464
* entry for HPT, or an address outside RPT translation range.
1465
*
1466
* Handling:
1467
* - HPT:
1468
* This refills the SLB, or reports an access fault similarly to a bad page
1469
* fault. When coming from user-mode, the SLB handler may access any kernel
1470
* data, though it may itself take a DSLB. When coming from kernel mode,
1471
* recursive faults must be avoided so access is restricted to the kernel
1472
* image text/data, kernel stack, and any data allocated below
1473
* ppc64_bolted_size (first segment). The kernel handler must avoid stomping
1474
* on user-handler data structures.
1475
*
1476
* KVM: Same as 0x300, DSLB must test for KVM guest.
1477
*/
1478
INT_DEFINE_BEGIN(data_access_slb)
1479
IVEC=0x380
1480
IDAR=1
1481
IKVM_REAL=1
1482
INT_DEFINE_END(data_access_slb)
1483
1484
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
1485
GEN_INT_ENTRY data_access_slb, virt=0
1486
EXC_REAL_END(data_access_slb, 0x380, 0x80)
1487
EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
1488
GEN_INT_ENTRY data_access_slb, virt=1
1489
EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
1490
EXC_COMMON_BEGIN(data_access_slb_common)
1491
GEN_COMMON data_access_slb
1492
#ifdef CONFIG_PPC_64S_HASH_MMU
1493
BEGIN_MMU_FTR_SECTION
1494
/* HPT case, do SLB fault */
1495
addi r3,r1,STACK_INT_FRAME_REGS
1496
bl CFUNC(do_slb_fault)
1497
cmpdi r3,0
1498
bne- 1f
1499
b fast_interrupt_return_srr
1500
1: /* Error case */
1501
MMU_FTR_SECTION_ELSE
1502
/* Radix case, access is outside page table range */
1503
li r3,-EFAULT
1504
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1505
#else
1506
li r3,-EFAULT
1507
#endif
1508
std r3,RESULT(r1)
1509
addi r3,r1,STACK_INT_FRAME_REGS
1510
bl CFUNC(do_bad_segment_interrupt)
1511
b interrupt_return_srr
1512
1513
1514
/**
1515
* Interrupt 0x400 - Instruction Storage Interrupt (ISI).
1516
* This is a synchronous interrupt in response to an MMU fault due to an
1517
* instruction fetch.
1518
*
1519
* Handling:
1520
* Similar to DSI, though in response to fetch. The faulting address is found
1521
* in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR).
1522
*/
1523
INT_DEFINE_BEGIN(instruction_access)
1524
IVEC=0x400
1525
IISIDE=1
1526
IDAR=1
1527
IDSISR=1
1528
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1529
IKVM_REAL=1
1530
#endif
1531
INT_DEFINE_END(instruction_access)
1532
1533
EXC_REAL_BEGIN(instruction_access, 0x400, 0x80)
1534
GEN_INT_ENTRY instruction_access, virt=0
1535
EXC_REAL_END(instruction_access, 0x400, 0x80)
1536
EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
1537
GEN_INT_ENTRY instruction_access, virt=1
1538
EXC_VIRT_END(instruction_access, 0x4400, 0x80)
1539
EXC_COMMON_BEGIN(instruction_access_common)
1540
GEN_COMMON instruction_access
1541
addi r3,r1,STACK_INT_FRAME_REGS
1542
#ifdef CONFIG_PPC_64S_HASH_MMU
1543
BEGIN_MMU_FTR_SECTION
1544
bl CFUNC(do_hash_fault)
1545
MMU_FTR_SECTION_ELSE
1546
bl CFUNC(do_page_fault)
1547
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1548
#else
1549
bl CFUNC(do_page_fault)
1550
#endif
1551
b interrupt_return_srr
1552
1553
1554
/**
1555
* Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
1556
* This is a synchronous interrupt in response to an MMU fault due to an
1557
* instruction fetch.
1558
*
1559
* Handling:
1560
* Similar to DSLB, though in response to fetch. The faulting address is found
1561
* in SRR0 (rather than DAR).
1562
*/
1563
INT_DEFINE_BEGIN(instruction_access_slb)
1564
IVEC=0x480
1565
IISIDE=1
1566
IDAR=1
1567
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1568
IKVM_REAL=1
1569
#endif
1570
INT_DEFINE_END(instruction_access_slb)
1571
1572
EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
1573
GEN_INT_ENTRY instruction_access_slb, virt=0
1574
EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
1575
EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
1576
GEN_INT_ENTRY instruction_access_slb, virt=1
1577
EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
1578
EXC_COMMON_BEGIN(instruction_access_slb_common)
1579
GEN_COMMON instruction_access_slb
1580
#ifdef CONFIG_PPC_64S_HASH_MMU
1581
BEGIN_MMU_FTR_SECTION
1582
/* HPT case, do SLB fault */
1583
addi r3,r1,STACK_INT_FRAME_REGS
1584
bl CFUNC(do_slb_fault)
1585
cmpdi r3,0
1586
bne- 1f
1587
b fast_interrupt_return_srr
1588
1: /* Error case */
1589
MMU_FTR_SECTION_ELSE
1590
/* Radix case, access is outside page table range */
1591
li r3,-EFAULT
1592
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1593
#else
1594
li r3,-EFAULT
1595
#endif
1596
std r3,RESULT(r1)
1597
addi r3,r1,STACK_INT_FRAME_REGS
1598
bl CFUNC(do_bad_segment_interrupt)
1599
b interrupt_return_srr
1600
1601
1602
/**
1603
* Interrupt 0x500 - External Interrupt.
1604
* This is an asynchronous maskable interrupt in response to an "external
1605
* exception" from the interrupt controller or hypervisor (e.g., device
1606
* interrupt). It is maskable in hardware by clearing MSR[EE], and
1607
* soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()).
1608
*
1609
* When running in HV mode, Linux sets up the LPCR[LPES] bit such that
1610
* interrupts are delivered with HSRR registers, guests use SRRs, which
1611
* reqiures IHSRR_IF_HVMODE.
1612
*
1613
* On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that
1614
* external interrupts are delivered as Hypervisor Virtualization Interrupts
1615
* rather than External Interrupts.
1616
*
1617
* Handling:
1618
* This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead,
1619
* because registers at the time of the interrupt are not so important as it is
1620
* asynchronous.
1621
*
1622
* If soft masked, the masked handler will note the pending interrupt for
1623
* replay, and clear MSR[EE] in the interrupted context.
1624
*
1625
* CFAR is not required because this is an asynchronous interrupt that in
1626
* general won't have much bearing on the state of the CPU, with the possible
1627
* exception of crash/debug IPIs, but those are generally moving to use SRESET
1628
* IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case
1629
* it may be exiting the guest and need CFAR to be saved.
1630
*/
1631
INT_DEFINE_BEGIN(hardware_interrupt)
1632
IVEC=0x500
1633
IHSRR_IF_HVMODE=1
1634
IMASK=IRQS_DISABLED
1635
IKVM_REAL=1
1636
IKVM_VIRT=1
1637
ICFAR=0
1638
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1639
ICFAR_IF_HVMODE=1
1640
#endif
1641
INT_DEFINE_END(hardware_interrupt)
1642
1643
EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
1644
GEN_INT_ENTRY hardware_interrupt, virt=0
1645
EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
1646
EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
1647
GEN_INT_ENTRY hardware_interrupt, virt=1
1648
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
1649
EXC_COMMON_BEGIN(hardware_interrupt_common)
1650
GEN_COMMON hardware_interrupt
1651
addi r3,r1,STACK_INT_FRAME_REGS
1652
bl CFUNC(do_IRQ)
1653
BEGIN_FTR_SECTION
1654
b interrupt_return_hsrr
1655
FTR_SECTION_ELSE
1656
b interrupt_return_srr
1657
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1658
1659
1660
/**
1661
* Interrupt 0x600 - Alignment Interrupt
1662
* This is a synchronous interrupt in response to data alignment fault.
1663
*/
1664
INT_DEFINE_BEGIN(alignment)
1665
IVEC=0x600
1666
IDAR=1
1667
IDSISR=1
1668
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1669
IKVM_REAL=1
1670
#endif
1671
INT_DEFINE_END(alignment)
1672
1673
EXC_REAL_BEGIN(alignment, 0x600, 0x100)
1674
GEN_INT_ENTRY alignment, virt=0
1675
EXC_REAL_END(alignment, 0x600, 0x100)
1676
EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
1677
GEN_INT_ENTRY alignment, virt=1
1678
EXC_VIRT_END(alignment, 0x4600, 0x100)
1679
EXC_COMMON_BEGIN(alignment_common)
1680
GEN_COMMON alignment
1681
addi r3,r1,STACK_INT_FRAME_REGS
1682
bl CFUNC(alignment_exception)
1683
HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
1684
b interrupt_return_srr
1685
1686
1687
/**
1688
* Interrupt 0x700 - Program Interrupt (program check).
1689
* This is a synchronous interrupt in response to various instruction faults:
1690
* traps, privilege errors, TM errors, floating point exceptions.
1691
*
1692
* Handling:
1693
* This interrupt may use the "emergency stack" in some cases when being taken
1694
* from kernel context, which complicates handling.
1695
*/
1696
INT_DEFINE_BEGIN(program_check)
1697
IVEC=0x700
1698
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1699
IKVM_REAL=1
1700
#endif
1701
INT_DEFINE_END(program_check)
1702
1703
EXC_REAL_BEGIN(program_check, 0x700, 0x100)
1704
EARLY_BOOT_FIXUP
1705
GEN_INT_ENTRY program_check, virt=0
1706
EXC_REAL_END(program_check, 0x700, 0x100)
1707
EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
1708
GEN_INT_ENTRY program_check, virt=1
1709
EXC_VIRT_END(program_check, 0x4700, 0x100)
1710
EXC_COMMON_BEGIN(program_check_common)
1711
__GEN_COMMON_ENTRY program_check
1712
1713
/*
1714
* It's possible to receive a TM Bad Thing type program check with
1715
* userspace register values (in particular r1), but with SRR1 reporting
1716
* that we came from the kernel. Normally that would confuse the bad
1717
* stack logic, and we would report a bad kernel stack pointer. Instead
1718
* we switch to the emergency stack if we're taking a TM Bad Thing from
1719
* the kernel.
1720
*/
1721
1722
andi. r10,r12,MSR_PR
1723
bne .Lnormal_stack /* If userspace, go normal path */
1724
1725
andis. r10,r12,(SRR1_PROGTM)@h
1726
bne .Lemergency_stack /* If TM, emergency */
1727
1728
cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
1729
blt .Lnormal_stack /* normal path if not */
1730
1731
/* Use the emergency stack */
1732
.Lemergency_stack:
1733
andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
1734
/* 3 in EXCEPTION_PROLOG_COMMON */
1735
mr r10,r1 /* Save r1 */
1736
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
1737
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1738
__ISTACK(program_check)=0
1739
__GEN_COMMON_BODY program_check
1740
b .Ldo_program_check
1741
1742
.Lnormal_stack:
1743
__ISTACK(program_check)=1
1744
__GEN_COMMON_BODY program_check
1745
1746
.Ldo_program_check:
1747
addi r3,r1,STACK_INT_FRAME_REGS
1748
bl CFUNC(program_check_exception)
1749
HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
1750
b interrupt_return_srr
1751
1752
1753
/*
1754
* Interrupt 0x800 - Floating-Point Unavailable Interrupt.
1755
* This is a synchronous interrupt in response to executing an fp instruction
1756
* with MSR[FP]=0.
1757
*
1758
* Handling:
1759
* This will load FP registers and enable the FP bit if coming from userspace,
1760
* otherwise report a bad kernel use of FP.
1761
*/
1762
INT_DEFINE_BEGIN(fp_unavailable)
1763
IVEC=0x800
1764
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1765
IKVM_REAL=1
1766
#endif
1767
IMSR_R12=1
1768
INT_DEFINE_END(fp_unavailable)
1769
1770
EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
1771
GEN_INT_ENTRY fp_unavailable, virt=0
1772
EXC_REAL_END(fp_unavailable, 0x800, 0x100)
1773
EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100)
1774
GEN_INT_ENTRY fp_unavailable, virt=1
1775
EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
1776
EXC_COMMON_BEGIN(fp_unavailable_common)
1777
GEN_COMMON fp_unavailable
1778
bne 1f /* if from user, just load it up */
1779
addi r3,r1,STACK_INT_FRAME_REGS
1780
bl CFUNC(kernel_fp_unavailable_exception)
1781
0: trap
1782
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
1783
1:
1784
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1785
BEGIN_FTR_SECTION
1786
/* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1787
* transaction), go do TM stuff
1788
*/
1789
rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1790
bne- 2f
1791
END_FTR_SECTION_IFSET(CPU_FTR_TM)
1792
#endif
1793
bl CFUNC(load_up_fpu)
1794
b fast_interrupt_return_srr
1795
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1796
2: /* User process was in a transaction */
1797
addi r3,r1,STACK_INT_FRAME_REGS
1798
bl CFUNC(fp_unavailable_tm)
1799
b interrupt_return_srr
1800
#endif
1801
1802
1803
/**
1804
* Interrupt 0x900 - Decrementer Interrupt.
1805
* This is an asynchronous interrupt in response to a decrementer exception
1806
* (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing
1807
* MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e.,
1808
* local_irq_disable()).
1809
*
1810
* Handling:
1811
* This calls into Linux timer handler. NVGPRs are not saved (see 0x500).
1812
*
1813
* If soft masked, the masked handler will note the pending interrupt for
1814
* replay, and bump the decrementer to a high value, leaving MSR[EE] enabled
1815
* in the interrupted context.
1816
* If PPC_WATCHDOG is configured, the soft masked handler will actually set
1817
* things back up to run soft_nmi_interrupt as a regular interrupt handler
1818
* on the emergency stack.
1819
*
1820
* CFAR is not required because this is asynchronous (see hardware_interrupt).
1821
* A watchdog interrupt may like to have CFAR, but usually the interesting
1822
* branch is long gone by that point (e.g., infinite loop).
1823
*/
1824
INT_DEFINE_BEGIN(decrementer)
1825
IVEC=0x900
1826
IMASK=IRQS_DISABLED
1827
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1828
IKVM_REAL=1
1829
#endif
1830
ICFAR=0
1831
INT_DEFINE_END(decrementer)
1832
1833
EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
1834
GEN_INT_ENTRY decrementer, virt=0
1835
EXC_REAL_END(decrementer, 0x900, 0x80)
1836
EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
1837
GEN_INT_ENTRY decrementer, virt=1
1838
EXC_VIRT_END(decrementer, 0x4900, 0x80)
1839
EXC_COMMON_BEGIN(decrementer_common)
1840
GEN_COMMON decrementer
1841
addi r3,r1,STACK_INT_FRAME_REGS
1842
bl CFUNC(timer_interrupt)
1843
b interrupt_return_srr
1844
1845
1846
/**
1847
* Interrupt 0x980 - Hypervisor Decrementer Interrupt.
1848
* This is an asynchronous interrupt, similar to 0x900 but for the HDEC
1849
* register.
1850
*
1851
* Handling:
1852
* Linux does not use this outside KVM where it's used to keep a host timer
1853
* while the guest is given control of DEC. It should normally be caught by
1854
* the KVM test and routed there.
1855
*/
1856
INT_DEFINE_BEGIN(hdecrementer)
1857
IVEC=0x980
1858
IHSRR=1
1859
ISTACK=0
1860
IKVM_REAL=1
1861
IKVM_VIRT=1
1862
INT_DEFINE_END(hdecrementer)
1863
1864
EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80)
1865
GEN_INT_ENTRY hdecrementer, virt=0
1866
EXC_REAL_END(hdecrementer, 0x980, 0x80)
1867
EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80)
1868
GEN_INT_ENTRY hdecrementer, virt=1
1869
EXC_VIRT_END(hdecrementer, 0x4980, 0x80)
1870
EXC_COMMON_BEGIN(hdecrementer_common)
1871
__GEN_COMMON_ENTRY hdecrementer
1872
/*
1873
* Hypervisor decrementer interrupts not caught by the KVM test
1874
* shouldn't occur but are sometimes left pending on exit from a KVM
1875
* guest. We don't need to do anything to clear them, as they are
1876
* edge-triggered.
1877
*
1878
* Be careful to avoid touching the kernel stack.
1879
*/
1880
li r10,0
1881
stb r10,PACAHSRR_VALID(r13)
1882
ld r10,PACA_EXGEN+EX_CTR(r13)
1883
mtctr r10
1884
mtcrf 0x80,r9
1885
ld r9,PACA_EXGEN+EX_R9(r13)
1886
ld r10,PACA_EXGEN+EX_R10(r13)
1887
ld r11,PACA_EXGEN+EX_R11(r13)
1888
ld r12,PACA_EXGEN+EX_R12(r13)
1889
ld r13,PACA_EXGEN+EX_R13(r13)
1890
HRFI_TO_KERNEL
1891
1892
1893
/**
1894
* Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
1895
* This is an asynchronous interrupt in response to a msgsndp doorbell.
1896
* It is maskable in hardware by clearing MSR[EE], and soft-maskable with
1897
* IRQS_DISABLED mask (i.e., local_irq_disable()).
1898
*
1899
* Handling:
1900
* Guests may use this for IPIs between threads in a core if the
1901
* hypervisor supports it. NVGPRS are not saved (see 0x500).
1902
*
1903
* If soft masked, the masked handler will note the pending interrupt for
1904
* replay, leaving MSR[EE] enabled in the interrupted context because the
1905
* doorbells are edge triggered.
1906
*
1907
* CFAR is not required, similarly to hardware_interrupt.
1908
*/
1909
INT_DEFINE_BEGIN(doorbell_super)
1910
IVEC=0xa00
1911
IMASK=IRQS_DISABLED
1912
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1913
IKVM_REAL=1
1914
#endif
1915
ICFAR=0
1916
INT_DEFINE_END(doorbell_super)
1917
1918
EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
1919
GEN_INT_ENTRY doorbell_super, virt=0
1920
EXC_REAL_END(doorbell_super, 0xa00, 0x100)
1921
EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
1922
GEN_INT_ENTRY doorbell_super, virt=1
1923
EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
1924
EXC_COMMON_BEGIN(doorbell_super_common)
1925
GEN_COMMON doorbell_super
1926
addi r3,r1,STACK_INT_FRAME_REGS
1927
#ifdef CONFIG_PPC_DOORBELL
1928
bl CFUNC(doorbell_exception)
1929
#else
1930
bl CFUNC(unknown_async_exception)
1931
#endif
1932
b interrupt_return_srr
1933
1934
1935
EXC_REAL_NONE(0xb00, 0x100)
1936
EXC_VIRT_NONE(0x4b00, 0x100)
1937
1938
/**
1939
* Interrupt 0xc00 - System Call Interrupt (syscall, hcall).
1940
* This is a synchronous interrupt invoked with the "sc" instruction. The
1941
* system call is invoked with "sc 0" and does not alter the HV bit, so it
1942
* is directed to the currently running OS. The hypercall is invoked with
1943
* "sc 1" and it sets HV=1, so it elevates to hypervisor.
1944
*
1945
* In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
1946
* 0x4c00 virtual mode.
1947
*
1948
* Handling:
1949
* If the KVM test fires then it was due to a hypercall and is accordingly
1950
* routed to KVM. Otherwise this executes a normal Linux system call.
1951
*
1952
* Call convention:
1953
*
1954
* syscall and hypercalls register conventions are documented in
1955
* Documentation/arch/powerpc/syscall64-abi.rst and
1956
* Documentation/arch/powerpc/papr_hcalls.rst respectively.
1957
*
1958
* The intersection of volatile registers that don't contain possible
1959
* inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
1960
* without saving, though xer is not a good idea to use, as hardware may
1961
* interpret some bits so it may be costly to change them.
1962
*/
1963
INT_DEFINE_BEGIN(system_call)
1964
IVEC=0xc00
1965
IKVM_REAL=1
1966
IKVM_VIRT=1
1967
ICFAR=0
1968
INT_DEFINE_END(system_call)
1969
1970
.macro SYSTEM_CALL virt
1971
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1972
/*
1973
* There is a little bit of juggling to get syscall and hcall
1974
* working well. Save r13 in ctr to avoid using SPRG scratch
1975
* register.
1976
*
1977
* Userspace syscalls have already saved the PPR, hcalls must save
1978
* it before setting HMT_MEDIUM.
1979
*/
1980
mtctr r13
1981
GET_PACA(r13)
1982
std r10,PACA_EXGEN+EX_R10(r13)
1983
INTERRUPT_TO_KERNEL
1984
KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */
1985
mfctr r9
1986
#else
1987
mr r9,r13
1988
GET_PACA(r13)
1989
INTERRUPT_TO_KERNEL
1990
#endif
1991
1992
/* We reach here with PACA in r13, r13 in r9. */
1993
mfspr r11,SPRN_SRR0
1994
mfspr r12,SPRN_SRR1
1995
1996
HMT_MEDIUM
1997
1998
.if ! \virt
1999
__LOAD_HANDLER(r10, system_call_common_real, real_vectors)
2000
mtctr r10
2001
bctr
2002
.else
2003
#ifdef CONFIG_RELOCATABLE
2004
__LOAD_HANDLER(r10, system_call_common, virt_vectors)
2005
mtctr r10
2006
bctr
2007
#else
2008
b system_call_common
2009
#endif
2010
.endif
2011
.endm
2012
2013
EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
2014
SYSTEM_CALL 0
2015
EXC_REAL_END(system_call, 0xc00, 0x100)
2016
EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
2017
SYSTEM_CALL 1
2018
EXC_VIRT_END(system_call, 0x4c00, 0x100)
2019
2020
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2021
TRAMP_REAL_BEGIN(kvm_hcall)
2022
std r9,PACA_EXGEN+EX_R9(r13)
2023
std r11,PACA_EXGEN+EX_R11(r13)
2024
std r12,PACA_EXGEN+EX_R12(r13)
2025
mfcr r9
2026
mfctr r10
2027
std r10,PACA_EXGEN+EX_R13(r13)
2028
li r10,0
2029
std r10,PACA_EXGEN+EX_CFAR(r13)
2030
std r10,PACA_EXGEN+EX_CTR(r13)
2031
/*
2032
* Save the PPR (on systems that support it) before changing to
2033
* HMT_MEDIUM. That allows the KVM code to save that value into the
2034
* guest state (it is the guest's PPR value).
2035
*/
2036
BEGIN_FTR_SECTION
2037
mfspr r10,SPRN_PPR
2038
std r10,PACA_EXGEN+EX_PPR(r13)
2039
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
2040
2041
HMT_MEDIUM
2042
2043
#ifdef CONFIG_RELOCATABLE
2044
/*
2045
* Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
2046
* outside the head section.
2047
*/
2048
__LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines)
2049
mtctr r10
2050
bctr
2051
#else
2052
b kvmppc_hcall
2053
#endif
2054
#endif
2055
2056
/**
2057
* Interrupt 0xd00 - Trace Interrupt.
2058
* This is a synchronous interrupt in response to instruction step or
2059
* breakpoint faults.
2060
*/
2061
INT_DEFINE_BEGIN(single_step)
2062
IVEC=0xd00
2063
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2064
IKVM_REAL=1
2065
#endif
2066
INT_DEFINE_END(single_step)
2067
2068
EXC_REAL_BEGIN(single_step, 0xd00, 0x100)
2069
GEN_INT_ENTRY single_step, virt=0
2070
EXC_REAL_END(single_step, 0xd00, 0x100)
2071
EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
2072
GEN_INT_ENTRY single_step, virt=1
2073
EXC_VIRT_END(single_step, 0x4d00, 0x100)
2074
EXC_COMMON_BEGIN(single_step_common)
2075
GEN_COMMON single_step
2076
addi r3,r1,STACK_INT_FRAME_REGS
2077
bl CFUNC(single_step_exception)
2078
b interrupt_return_srr
2079
2080
2081
/**
2082
* Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI).
2083
* This is a synchronous interrupt in response to an MMU fault caused by a
2084
* guest data access.
2085
*
2086
* Handling:
2087
* This should always get routed to KVM. In radix MMU mode, this is caused
2088
* by a guest nested radix access that can't be performed due to the
2089
* partition scope page table. In hash mode, this can be caused by guests
2090
* running with translation disabled (virtual real mode) or with VPM enabled.
2091
* KVM will update the page table structures or disallow the access.
2092
*/
2093
INT_DEFINE_BEGIN(h_data_storage)
2094
IVEC=0xe00
2095
IHSRR=1
2096
IDAR=1
2097
IDSISR=1
2098
IKVM_REAL=1
2099
IKVM_VIRT=1
2100
INT_DEFINE_END(h_data_storage)
2101
2102
EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20)
2103
GEN_INT_ENTRY h_data_storage, virt=0, ool=1
2104
EXC_REAL_END(h_data_storage, 0xe00, 0x20)
2105
EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
2106
GEN_INT_ENTRY h_data_storage, virt=1, ool=1
2107
EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
2108
EXC_COMMON_BEGIN(h_data_storage_common)
2109
GEN_COMMON h_data_storage
2110
addi r3,r1,STACK_INT_FRAME_REGS
2111
BEGIN_MMU_FTR_SECTION
2112
bl CFUNC(do_bad_page_fault_segv)
2113
MMU_FTR_SECTION_ELSE
2114
bl CFUNC(unknown_exception)
2115
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
2116
b interrupt_return_hsrr
2117
2118
2119
/**
2120
* Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI).
2121
* This is a synchronous interrupt in response to an MMU fault caused by a
2122
* guest instruction fetch, similar to HDSI.
2123
*/
2124
INT_DEFINE_BEGIN(h_instr_storage)
2125
IVEC=0xe20
2126
IHSRR=1
2127
IKVM_REAL=1
2128
IKVM_VIRT=1
2129
INT_DEFINE_END(h_instr_storage)
2130
2131
EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20)
2132
GEN_INT_ENTRY h_instr_storage, virt=0, ool=1
2133
EXC_REAL_END(h_instr_storage, 0xe20, 0x20)
2134
EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
2135
GEN_INT_ENTRY h_instr_storage, virt=1, ool=1
2136
EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
2137
EXC_COMMON_BEGIN(h_instr_storage_common)
2138
GEN_COMMON h_instr_storage
2139
addi r3,r1,STACK_INT_FRAME_REGS
2140
bl CFUNC(unknown_exception)
2141
b interrupt_return_hsrr
2142
2143
2144
/**
2145
* Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt.
2146
*/
2147
INT_DEFINE_BEGIN(emulation_assist)
2148
IVEC=0xe40
2149
IHSRR=1
2150
IKVM_REAL=1
2151
IKVM_VIRT=1
2152
INT_DEFINE_END(emulation_assist)
2153
2154
EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20)
2155
GEN_INT_ENTRY emulation_assist, virt=0, ool=1
2156
EXC_REAL_END(emulation_assist, 0xe40, 0x20)
2157
EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
2158
GEN_INT_ENTRY emulation_assist, virt=1, ool=1
2159
EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
2160
EXC_COMMON_BEGIN(emulation_assist_common)
2161
GEN_COMMON emulation_assist
2162
addi r3,r1,STACK_INT_FRAME_REGS
2163
bl CFUNC(emulation_assist_interrupt)
2164
HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
2165
b interrupt_return_hsrr
2166
2167
2168
/**
2169
* Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI).
2170
* This is an asynchronous interrupt caused by a Hypervisor Maintenance
2171
* Exception. It is always taken in real mode but uses HSRR registers
2172
* unlike SRESET and MCE.
2173
*
2174
* It is maskable in hardware by clearing MSR[EE], and partially soft-maskable
2175
* with IRQS_DISABLED mask (i.e., local_irq_disable()).
2176
*
2177
* Handling:
2178
* This is a special case, this is handled similarly to machine checks, with an
2179
* initial real mode handler that is not soft-masked, which attempts to fix the
2180
* problem. Then a regular handler which is soft-maskable and reports the
2181
* problem.
2182
*
2183
* The emergency stack is used for the early real mode handler.
2184
*
2185
* XXX: unclear why MCE and HMI schemes could not be made common, e.g.,
2186
* either use soft-masking for the MCE, or use irq_work for the HMI.
2187
*
2188
* KVM:
2189
* Unlike MCE, this calls into KVM without calling the real mode handler
2190
* first.
2191
*/
2192
INT_DEFINE_BEGIN(hmi_exception_early)
2193
IVEC=0xe60
2194
IHSRR=1
2195
IREALMODE_COMMON=1
2196
ISTACK=0
2197
IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
2198
IKVM_REAL=1
2199
INT_DEFINE_END(hmi_exception_early)
2200
2201
INT_DEFINE_BEGIN(hmi_exception)
2202
IVEC=0xe60
2203
IHSRR=1
2204
IMASK=IRQS_DISABLED
2205
IKVM_REAL=1
2206
INT_DEFINE_END(hmi_exception)
2207
2208
EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
2209
GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1
2210
EXC_REAL_END(hmi_exception, 0xe60, 0x20)
2211
EXC_VIRT_NONE(0x4e60, 0x20)
2212
2213
EXC_COMMON_BEGIN(hmi_exception_early_common)
2214
__GEN_REALMODE_COMMON_ENTRY hmi_exception_early
2215
2216
mr r10,r1 /* Save r1 */
2217
ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
2218
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
2219
2220
__GEN_COMMON_BODY hmi_exception_early
2221
2222
addi r3,r1,STACK_INT_FRAME_REGS
2223
bl CFUNC(hmi_exception_realmode)
2224
cmpdi cr0,r3,0
2225
bne 1f
2226
2227
EXCEPTION_RESTORE_REGS hsrr=1
2228
HRFI_TO_USER_OR_KERNEL
2229
2230
1:
2231
/*
2232
* Go to virtual mode and pull the HMI event information from
2233
* firmware.
2234
*/
2235
EXCEPTION_RESTORE_REGS hsrr=1
2236
GEN_INT_ENTRY hmi_exception, virt=0
2237
2238
EXC_COMMON_BEGIN(hmi_exception_common)
2239
GEN_COMMON hmi_exception
2240
addi r3,r1,STACK_INT_FRAME_REGS
2241
bl CFUNC(handle_hmi_exception)
2242
b interrupt_return_hsrr
2243
2244
2245
/**
2246
* Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
2247
* This is an asynchronous interrupt in response to a msgsnd doorbell.
2248
* Similar to the 0xa00 doorbell but for host rather than guest.
2249
*
2250
* CFAR is not required (similar to doorbell_interrupt), unless KVM HV
2251
* is enabled, in which case it may be a guest exit. Most PowerNV kernels
2252
* include KVM support so it would be nice if this could be dynamically
2253
* patched out if KVM was not currently running any guests.
2254
*/
2255
INT_DEFINE_BEGIN(h_doorbell)
2256
IVEC=0xe80
2257
IHSRR=1
2258
IMASK=IRQS_DISABLED
2259
IKVM_REAL=1
2260
IKVM_VIRT=1
2261
#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2262
ICFAR=0
2263
#endif
2264
INT_DEFINE_END(h_doorbell)
2265
2266
EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
2267
GEN_INT_ENTRY h_doorbell, virt=0, ool=1
2268
EXC_REAL_END(h_doorbell, 0xe80, 0x20)
2269
EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
2270
GEN_INT_ENTRY h_doorbell, virt=1, ool=1
2271
EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
2272
EXC_COMMON_BEGIN(h_doorbell_common)
2273
GEN_COMMON h_doorbell
2274
addi r3,r1,STACK_INT_FRAME_REGS
2275
#ifdef CONFIG_PPC_DOORBELL
2276
bl CFUNC(doorbell_exception)
2277
#else
2278
bl CFUNC(unknown_async_exception)
2279
#endif
2280
b interrupt_return_hsrr
2281
2282
2283
/**
2284
* Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
2285
* This is an asynchronous interrupt in response to an "external exception".
2286
* Similar to 0x500 but for host only.
2287
*
2288
* Like h_doorbell, CFAR is only required for KVM HV because this can be
2289
* a guest exit.
2290
*/
2291
INT_DEFINE_BEGIN(h_virt_irq)
2292
IVEC=0xea0
2293
IHSRR=1
2294
IMASK=IRQS_DISABLED
2295
IKVM_REAL=1
2296
IKVM_VIRT=1
2297
#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2298
ICFAR=0
2299
#endif
2300
INT_DEFINE_END(h_virt_irq)
2301
2302
EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
2303
GEN_INT_ENTRY h_virt_irq, virt=0, ool=1
2304
EXC_REAL_END(h_virt_irq, 0xea0, 0x20)
2305
EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
2306
GEN_INT_ENTRY h_virt_irq, virt=1, ool=1
2307
EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
2308
EXC_COMMON_BEGIN(h_virt_irq_common)
2309
GEN_COMMON h_virt_irq
2310
addi r3,r1,STACK_INT_FRAME_REGS
2311
bl CFUNC(do_IRQ)
2312
b interrupt_return_hsrr
2313
2314
2315
EXC_REAL_NONE(0xec0, 0x20)
2316
EXC_VIRT_NONE(0x4ec0, 0x20)
2317
EXC_REAL_NONE(0xee0, 0x20)
2318
EXC_VIRT_NONE(0x4ee0, 0x20)
2319
2320
2321
/*
2322
* Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU).
2323
* This is an asynchronous interrupt in response to a PMU exception.
2324
* It is maskable in hardware by clearing MSR[EE], and soft-maskable with
2325
* IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()).
2326
*
2327
* Handling:
2328
* This calls into the perf subsystem.
2329
*
2330
* Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it
2331
* runs under local_irq_disable. However it may be soft-masked in
2332
* powerpc-specific code.
2333
*
2334
* If soft masked, the masked handler will note the pending interrupt for
2335
* replay, and clear MSR[EE] in the interrupted context.
2336
*
2337
* CFAR is not used by perf interrupts so not required.
2338
*/
2339
INT_DEFINE_BEGIN(performance_monitor)
2340
IVEC=0xf00
2341
IMASK=IRQS_PMI_DISABLED
2342
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2343
IKVM_REAL=1
2344
#endif
2345
ICFAR=0
2346
INT_DEFINE_END(performance_monitor)
2347
2348
EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
2349
GEN_INT_ENTRY performance_monitor, virt=0, ool=1
2350
EXC_REAL_END(performance_monitor, 0xf00, 0x20)
2351
EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
2352
GEN_INT_ENTRY performance_monitor, virt=1, ool=1
2353
EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
2354
EXC_COMMON_BEGIN(performance_monitor_common)
2355
GEN_COMMON performance_monitor
2356
addi r3,r1,STACK_INT_FRAME_REGS
2357
lbz r4,PACAIRQSOFTMASK(r13)
2358
cmpdi r4,IRQS_ENABLED
2359
bne 1f
2360
bl CFUNC(performance_monitor_exception_async)
2361
b interrupt_return_srr
2362
1:
2363
bl CFUNC(performance_monitor_exception_nmi)
2364
/* Clear MSR_RI before setting SRR0 and SRR1. */
2365
li r9,0
2366
mtmsrd r9,1
2367
2368
kuap_kernel_restore r9, r10
2369
2370
EXCEPTION_RESTORE_REGS hsrr=0
2371
RFI_TO_KERNEL
2372
2373
/**
2374
* Interrupt 0xf20 - Vector Unavailable Interrupt.
2375
* This is a synchronous interrupt in response to
2376
* executing a vector (or altivec) instruction with MSR[VEC]=0.
2377
* Similar to FP unavailable.
2378
*/
2379
INT_DEFINE_BEGIN(altivec_unavailable)
2380
IVEC=0xf20
2381
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2382
IKVM_REAL=1
2383
#endif
2384
IMSR_R12=1
2385
INT_DEFINE_END(altivec_unavailable)
2386
2387
EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
2388
GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1
2389
EXC_REAL_END(altivec_unavailable, 0xf20, 0x20)
2390
EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20)
2391
GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1
2392
EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20)
2393
EXC_COMMON_BEGIN(altivec_unavailable_common)
2394
GEN_COMMON altivec_unavailable
2395
#ifdef CONFIG_ALTIVEC
2396
BEGIN_FTR_SECTION
2397
beq 1f
2398
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2399
BEGIN_FTR_SECTION_NESTED(69)
2400
/* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
2401
* transaction), go do TM stuff
2402
*/
2403
rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
2404
bne- 2f
2405
END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
2406
#endif
2407
bl CFUNC(load_up_altivec)
2408
b fast_interrupt_return_srr
2409
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2410
2: /* User process was in a transaction */
2411
addi r3,r1,STACK_INT_FRAME_REGS
2412
bl CFUNC(altivec_unavailable_tm)
2413
b interrupt_return_srr
2414
#endif
2415
1:
2416
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2417
#endif
2418
addi r3,r1,STACK_INT_FRAME_REGS
2419
bl CFUNC(altivec_unavailable_exception)
2420
b interrupt_return_srr
2421
2422
2423
/**
2424
* Interrupt 0xf40 - VSX Unavailable Interrupt.
2425
* This is a synchronous interrupt in response to
2426
* executing a VSX instruction with MSR[VSX]=0.
2427
* Similar to FP unavailable.
2428
*/
2429
INT_DEFINE_BEGIN(vsx_unavailable)
2430
IVEC=0xf40
2431
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2432
IKVM_REAL=1
2433
#endif
2434
IMSR_R12=1
2435
INT_DEFINE_END(vsx_unavailable)
2436
2437
EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
2438
GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1
2439
EXC_REAL_END(vsx_unavailable, 0xf40, 0x20)
2440
EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20)
2441
GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1
2442
EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20)
2443
EXC_COMMON_BEGIN(vsx_unavailable_common)
2444
GEN_COMMON vsx_unavailable
2445
#ifdef CONFIG_VSX
2446
BEGIN_FTR_SECTION
2447
beq 1f
2448
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2449
BEGIN_FTR_SECTION_NESTED(69)
2450
/* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
2451
* transaction), go do TM stuff
2452
*/
2453
rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
2454
bne- 2f
2455
END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
2456
#endif
2457
b load_up_vsx
2458
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2459
2: /* User process was in a transaction */
2460
addi r3,r1,STACK_INT_FRAME_REGS
2461
bl CFUNC(vsx_unavailable_tm)
2462
b interrupt_return_srr
2463
#endif
2464
1:
2465
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2466
#endif
2467
addi r3,r1,STACK_INT_FRAME_REGS
2468
bl CFUNC(vsx_unavailable_exception)
2469
b interrupt_return_srr
2470
2471
2472
/**
2473
* Interrupt 0xf60 - Facility Unavailable Interrupt.
2474
* This is a synchronous interrupt in response to
2475
* executing an instruction without access to the facility that can be
2476
* resolved by the OS (e.g., FSCR, MSR).
2477
* Similar to FP unavailable.
2478
*/
2479
INT_DEFINE_BEGIN(facility_unavailable)
2480
IVEC=0xf60
2481
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2482
IKVM_REAL=1
2483
#endif
2484
INT_DEFINE_END(facility_unavailable)
2485
2486
EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20)
2487
GEN_INT_ENTRY facility_unavailable, virt=0, ool=1
2488
EXC_REAL_END(facility_unavailable, 0xf60, 0x20)
2489
EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
2490
GEN_INT_ENTRY facility_unavailable, virt=1, ool=1
2491
EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
2492
EXC_COMMON_BEGIN(facility_unavailable_common)
2493
GEN_COMMON facility_unavailable
2494
addi r3,r1,STACK_INT_FRAME_REGS
2495
bl CFUNC(facility_unavailable_exception)
2496
HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
2497
b interrupt_return_srr
2498
2499
2500
/**
2501
* Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt.
2502
* This is a synchronous interrupt in response to
2503
* executing an instruction without access to the facility that can only
2504
* be resolved in HV mode (e.g., HFSCR).
2505
* Similar to FP unavailable.
2506
*/
2507
INT_DEFINE_BEGIN(h_facility_unavailable)
2508
IVEC=0xf80
2509
IHSRR=1
2510
IKVM_REAL=1
2511
IKVM_VIRT=1
2512
INT_DEFINE_END(h_facility_unavailable)
2513
2514
EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20)
2515
GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1
2516
EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20)
2517
EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
2518
GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1
2519
EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
2520
EXC_COMMON_BEGIN(h_facility_unavailable_common)
2521
GEN_COMMON h_facility_unavailable
2522
addi r3,r1,STACK_INT_FRAME_REGS
2523
bl CFUNC(facility_unavailable_exception)
2524
/* XXX Shouldn't be necessary in practice */
2525
HANDLER_RESTORE_NVGPRS()
2526
b interrupt_return_hsrr
2527
2528
2529
EXC_REAL_NONE(0xfa0, 0x20)
2530
EXC_VIRT_NONE(0x4fa0, 0x20)
2531
EXC_REAL_NONE(0xfc0, 0x20)
2532
EXC_VIRT_NONE(0x4fc0, 0x20)
2533
EXC_REAL_NONE(0xfe0, 0x20)
2534
EXC_VIRT_NONE(0x4fe0, 0x20)
2535
2536
EXC_REAL_NONE(0x1000, 0x100)
2537
EXC_VIRT_NONE(0x5000, 0x100)
2538
EXC_REAL_NONE(0x1100, 0x100)
2539
EXC_VIRT_NONE(0x5100, 0x100)
2540
EXC_REAL_NONE(0x1200, 0x100)
2541
EXC_VIRT_NONE(0x5200, 0x100)
2542
2543
/**
2544
* Interrupt 0x1300 - Instruction Address Breakpoint Interrupt.
2545
* This has been removed from the ISA before 2.01, which is the earliest
2546
* 64-bit BookS ISA supported, however the G5 / 970 implements this
2547
* interrupt with a non-architected feature available through the support
2548
* processor interface.
2549
*/
2550
INT_DEFINE_BEGIN(instruction_breakpoint)
2551
IVEC=0x1300
2552
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2553
IKVM_REAL=1
2554
#endif
2555
INT_DEFINE_END(instruction_breakpoint)
2556
2557
EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100)
2558
GEN_INT_ENTRY instruction_breakpoint, virt=0
2559
EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100)
2560
EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
2561
GEN_INT_ENTRY instruction_breakpoint, virt=1
2562
EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
2563
EXC_COMMON_BEGIN(instruction_breakpoint_common)
2564
GEN_COMMON instruction_breakpoint
2565
addi r3,r1,STACK_INT_FRAME_REGS
2566
bl CFUNC(instruction_breakpoint_exception)
2567
b interrupt_return_srr
2568
2569
2570
EXC_REAL_NONE(0x1400, 0x100)
2571
EXC_VIRT_NONE(0x5400, 0x100)
2572
2573
/**
2574
* Interrupt 0x1500 - Soft Patch Interrupt
2575
*
2576
* Handling:
2577
* This is an implementation specific interrupt which can be used for a
2578
* range of exceptions.
2579
*
2580
* This interrupt handler is unique in that it runs the denormal assist
2581
* code even for guests (and even in guest context) without going to KVM,
2582
* for speed. POWER9 does not raise denorm exceptions, so this special case
2583
* could be phased out in future to reduce special cases.
2584
*/
2585
INT_DEFINE_BEGIN(denorm_exception)
2586
IVEC=0x1500
2587
IHSRR=1
2588
IBRANCH_TO_COMMON=0
2589
IKVM_REAL=1
2590
INT_DEFINE_END(denorm_exception)
2591
2592
EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100)
2593
GEN_INT_ENTRY denorm_exception, virt=0
2594
#ifdef CONFIG_PPC_DENORMALISATION
2595
andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
2596
bne+ denorm_assist
2597
#endif
2598
GEN_BRANCH_TO_COMMON denorm_exception, virt=0
2599
EXC_REAL_END(denorm_exception, 0x1500, 0x100)
2600
#ifdef CONFIG_PPC_DENORMALISATION
2601
EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
2602
GEN_INT_ENTRY denorm_exception, virt=1
2603
andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
2604
bne+ denorm_assist
2605
GEN_BRANCH_TO_COMMON denorm_exception, virt=1
2606
EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
2607
#else
2608
EXC_VIRT_NONE(0x5500, 0x100)
2609
#endif
2610
2611
#ifdef CONFIG_PPC_DENORMALISATION
2612
TRAMP_REAL_BEGIN(denorm_assist)
2613
BEGIN_FTR_SECTION
2614
/*
2615
* To denormalise we need to move a copy of the register to itself.
2616
* For POWER6 do that here for all FP regs.
2617
*/
2618
mfmsr r10
2619
ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
2620
xori r10,r10,(MSR_FE0|MSR_FE1)
2621
mtmsrd r10
2622
sync
2623
2624
.Lreg=0
2625
.rept 32
2626
fmr .Lreg,.Lreg
2627
.Lreg=.Lreg+1
2628
.endr
2629
2630
FTR_SECTION_ELSE
2631
/*
2632
* To denormalise we need to move a copy of the register to itself.
2633
* For POWER7 do that here for the first 32 VSX registers only.
2634
*/
2635
mfmsr r10
2636
oris r10,r10,MSR_VSX@h
2637
mtmsrd r10
2638
sync
2639
2640
.Lreg=0
2641
.rept 32
2642
XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2643
.Lreg=.Lreg+1
2644
.endr
2645
2646
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
2647
2648
BEGIN_FTR_SECTION
2649
b denorm_done
2650
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
2651
/*
2652
* To denormalise we need to move a copy of the register to itself.
2653
* For POWER8 we need to do that for all 64 VSX registers
2654
*/
2655
.Lreg=32
2656
.rept 32
2657
XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2658
.Lreg=.Lreg+1
2659
.endr
2660
2661
denorm_done:
2662
mfspr r11,SPRN_HSRR0
2663
subi r11,r11,4
2664
mtspr SPRN_HSRR0,r11
2665
mtcrf 0x80,r9
2666
ld r9,PACA_EXGEN+EX_R9(r13)
2667
BEGIN_FTR_SECTION
2668
ld r10,PACA_EXGEN+EX_PPR(r13)
2669
mtspr SPRN_PPR,r10
2670
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
2671
BEGIN_FTR_SECTION
2672
ld r10,PACA_EXGEN+EX_CFAR(r13)
2673
mtspr SPRN_CFAR,r10
2674
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
2675
li r10,0
2676
stb r10,PACAHSRR_VALID(r13)
2677
ld r10,PACA_EXGEN+EX_R10(r13)
2678
ld r11,PACA_EXGEN+EX_R11(r13)
2679
ld r12,PACA_EXGEN+EX_R12(r13)
2680
ld r13,PACA_EXGEN+EX_R13(r13)
2681
HRFI_TO_UNKNOWN
2682
b .
2683
#endif
2684
2685
EXC_COMMON_BEGIN(denorm_exception_common)
2686
GEN_COMMON denorm_exception
2687
addi r3,r1,STACK_INT_FRAME_REGS
2688
bl CFUNC(unknown_exception)
2689
b interrupt_return_hsrr
2690
2691
2692
EXC_REAL_NONE(0x1600, 0x100)
2693
EXC_VIRT_NONE(0x5600, 0x100)
2694
2695
2696
INT_DEFINE_BEGIN(altivec_assist)
2697
IVEC=0x1700
2698
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2699
IKVM_REAL=1
2700
#endif
2701
INT_DEFINE_END(altivec_assist)
2702
2703
EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100)
2704
GEN_INT_ENTRY altivec_assist, virt=0
2705
EXC_REAL_END(altivec_assist, 0x1700, 0x100)
2706
EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
2707
GEN_INT_ENTRY altivec_assist, virt=1
2708
EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
2709
EXC_COMMON_BEGIN(altivec_assist_common)
2710
GEN_COMMON altivec_assist
2711
addi r3,r1,STACK_INT_FRAME_REGS
2712
#ifdef CONFIG_ALTIVEC
2713
bl CFUNC(altivec_assist_exception)
2714
HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
2715
#else
2716
bl CFUNC(unknown_exception)
2717
#endif
2718
b interrupt_return_srr
2719
2720
2721
EXC_REAL_NONE(0x1800, 0x100)
2722
EXC_VIRT_NONE(0x5800, 0x100)
2723
2724
2725
#ifdef CONFIG_PPC_WATCHDOG
2726
2727
INT_DEFINE_BEGIN(soft_nmi)
2728
IVEC=0x900
2729
ISTACK=0
2730
ICFAR=0
2731
INT_DEFINE_END(soft_nmi)
2732
2733
/*
2734
* Branch to soft_nmi_interrupt using the emergency stack. The emergency
2735
* stack is one that is usable by maskable interrupts so long as MSR_EE
2736
* remains off. It is used for recovery when something has corrupted the
2737
* normal kernel stack, for example. The "soft NMI" must not use the process
2738
* stack because we want irq disabled sections to avoid touching the stack
2739
* at all (other than PMU interrupts), so use the emergency stack for this,
2740
* and run it entirely with interrupts hard disabled.
2741
*/
2742
EXC_COMMON_BEGIN(soft_nmi_common)
2743
mr r10,r1
2744
ld r1,PACAEMERGSP(r13)
2745
subi r1,r1,INT_FRAME_SIZE
2746
__GEN_COMMON_BODY soft_nmi
2747
2748
addi r3,r1,STACK_INT_FRAME_REGS
2749
bl CFUNC(soft_nmi_interrupt)
2750
2751
/* Clear MSR_RI before setting SRR0 and SRR1. */
2752
li r9,0
2753
mtmsrd r9,1
2754
2755
kuap_kernel_restore r9, r10
2756
2757
EXCEPTION_RESTORE_REGS hsrr=0
2758
RFI_TO_KERNEL
2759
2760
#endif /* CONFIG_PPC_WATCHDOG */
2761
2762
/*
2763
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
2764
* - If it was a decrementer interrupt, we bump the dec to max and return.
2765
* - If it was a doorbell we return immediately since doorbells are edge
2766
* triggered and won't automatically refire.
2767
* - If it was a HMI we return immediately since we handled it in realmode
2768
* and it won't refire.
2769
* - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
2770
* This is called with r10 containing the value to OR to the paca field.
2771
*/
2772
.macro MASKED_INTERRUPT hsrr=0
2773
.if \hsrr
2774
masked_Hinterrupt:
2775
.else
2776
masked_interrupt:
2777
.endif
2778
stw r9,PACA_EXGEN+EX_CCR(r13)
2779
#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
2780
/*
2781
* Ensure there was no previous MUST_HARD_MASK interrupt or
2782
* HARD_DIS setting. If this does fire, the interrupt is still
2783
* masked and MSR[EE] will be cleared on return, so no need to
2784
* panic, but somebody probably enabled MSR[EE] under
2785
* PACA_IRQ_HARD_DIS, mtmsr(mfmsr() | MSR_x) being a common
2786
* cause.
2787
*/
2788
lbz r9,PACAIRQHAPPENED(r13)
2789
andi. r9,r9,(PACA_IRQ_MUST_HARD_MASK|PACA_IRQ_HARD_DIS)
2790
0: tdnei r9,0
2791
EMIT_WARN_ENTRY 0b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
2792
#endif
2793
lbz r9,PACAIRQHAPPENED(r13)
2794
or r9,r9,r10
2795
stb r9,PACAIRQHAPPENED(r13)
2796
2797
.if ! \hsrr
2798
cmpwi r10,PACA_IRQ_DEC
2799
bne 1f
2800
LOAD_REG_IMMEDIATE(r9, 0x7fffffff)
2801
mtspr SPRN_DEC,r9
2802
#ifdef CONFIG_PPC_WATCHDOG
2803
lwz r9,PACA_EXGEN+EX_CCR(r13)
2804
b soft_nmi_common
2805
#else
2806
b 2f
2807
#endif
2808
.endif
2809
2810
1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
2811
beq 2f
2812
xori r12,r12,MSR_EE /* clear MSR_EE */
2813
.if \hsrr
2814
mtspr SPRN_HSRR1,r12
2815
.else
2816
mtspr SPRN_SRR1,r12
2817
.endif
2818
ori r9,r9,PACA_IRQ_HARD_DIS
2819
stb r9,PACAIRQHAPPENED(r13)
2820
2: /* done */
2821
li r9,0
2822
.if \hsrr
2823
stb r9,PACAHSRR_VALID(r13)
2824
.else
2825
stb r9,PACASRR_VALID(r13)
2826
.endif
2827
2828
SEARCH_RESTART_TABLE
2829
cmpdi r12,0
2830
beq 3f
2831
.if \hsrr
2832
mtspr SPRN_HSRR0,r12
2833
.else
2834
mtspr SPRN_SRR0,r12
2835
.endif
2836
3:
2837
2838
ld r9,PACA_EXGEN+EX_CTR(r13)
2839
mtctr r9
2840
lwz r9,PACA_EXGEN+EX_CCR(r13)
2841
mtcrf 0x80,r9
2842
std r1,PACAR1(r13)
2843
ld r9,PACA_EXGEN+EX_R9(r13)
2844
ld r10,PACA_EXGEN+EX_R10(r13)
2845
ld r11,PACA_EXGEN+EX_R11(r13)
2846
ld r12,PACA_EXGEN+EX_R12(r13)
2847
ld r13,PACA_EXGEN+EX_R13(r13)
2848
/* May return to masked low address where r13 is not set up */
2849
.if \hsrr
2850
HRFI_TO_KERNEL
2851
.else
2852
RFI_TO_KERNEL
2853
.endif
2854
b .
2855
.endm
2856
2857
TRAMP_REAL_BEGIN(stf_barrier_fallback)
2858
std r9,PACA_EXRFI+EX_R9(r13)
2859
std r10,PACA_EXRFI+EX_R10(r13)
2860
sync
2861
ld r9,PACA_EXRFI+EX_R9(r13)
2862
ld r10,PACA_EXRFI+EX_R10(r13)
2863
ori 31,31,0
2864
.rept 14
2865
b 1f
2866
1:
2867
.endr
2868
blr
2869
2870
/* Clobbers r10, r11, ctr */
2871
.macro L1D_DISPLACEMENT_FLUSH
2872
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2873
ld r11,PACA_L1D_FLUSH_SIZE(r13)
2874
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2875
mtctr r11
2876
DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2877
2878
/* order ld/st prior to dcbt stop all streams with flushing */
2879
sync
2880
2881
/*
2882
* The load addresses are at staggered offsets within cachelines,
2883
* which suits some pipelines better (on others it should not
2884
* hurt).
2885
*/
2886
1:
2887
ld r11,(0x80 + 8)*0(r10)
2888
ld r11,(0x80 + 8)*1(r10)
2889
ld r11,(0x80 + 8)*2(r10)
2890
ld r11,(0x80 + 8)*3(r10)
2891
ld r11,(0x80 + 8)*4(r10)
2892
ld r11,(0x80 + 8)*5(r10)
2893
ld r11,(0x80 + 8)*6(r10)
2894
ld r11,(0x80 + 8)*7(r10)
2895
addi r10,r10,0x80*8
2896
bdnz 1b
2897
.endm
2898
2899
TRAMP_REAL_BEGIN(entry_flush_fallback)
2900
std r9,PACA_EXRFI+EX_R9(r13)
2901
std r10,PACA_EXRFI+EX_R10(r13)
2902
std r11,PACA_EXRFI+EX_R11(r13)
2903
mfctr r9
2904
L1D_DISPLACEMENT_FLUSH
2905
mtctr r9
2906
ld r9,PACA_EXRFI+EX_R9(r13)
2907
ld r10,PACA_EXRFI+EX_R10(r13)
2908
ld r11,PACA_EXRFI+EX_R11(r13)
2909
blr
2910
2911
/*
2912
* The SCV entry flush happens with interrupts enabled, so it must disable
2913
* to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
2914
* (containing LR) does not need to be preserved here because scv entry
2915
* puts 0 in the pt_regs, CTR can be clobbered for the same reason.
2916
*/
2917
TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
2918
li r10,0
2919
mtmsrd r10,1
2920
lbz r10,PACAIRQHAPPENED(r13)
2921
ori r10,r10,PACA_IRQ_HARD_DIS
2922
stb r10,PACAIRQHAPPENED(r13)
2923
std r11,PACA_EXRFI+EX_R11(r13)
2924
L1D_DISPLACEMENT_FLUSH
2925
ld r11,PACA_EXRFI+EX_R11(r13)
2926
li r10,MSR_RI
2927
mtmsrd r10,1
2928
blr
2929
2930
TRAMP_REAL_BEGIN(rfi_flush_fallback)
2931
SET_SCRATCH0(r13);
2932
GET_PACA(r13);
2933
std r1,PACA_EXRFI+EX_R12(r13)
2934
ld r1,PACAKSAVE(r13)
2935
std r9,PACA_EXRFI+EX_R9(r13)
2936
std r10,PACA_EXRFI+EX_R10(r13)
2937
std r11,PACA_EXRFI+EX_R11(r13)
2938
mfctr r9
2939
L1D_DISPLACEMENT_FLUSH
2940
mtctr r9
2941
ld r9,PACA_EXRFI+EX_R9(r13)
2942
ld r10,PACA_EXRFI+EX_R10(r13)
2943
ld r11,PACA_EXRFI+EX_R11(r13)
2944
ld r1,PACA_EXRFI+EX_R12(r13)
2945
GET_SCRATCH0(r13);
2946
rfid
2947
2948
TRAMP_REAL_BEGIN(hrfi_flush_fallback)
2949
SET_SCRATCH0(r13);
2950
GET_PACA(r13);
2951
std r1,PACA_EXRFI+EX_R12(r13)
2952
ld r1,PACAKSAVE(r13)
2953
std r9,PACA_EXRFI+EX_R9(r13)
2954
std r10,PACA_EXRFI+EX_R10(r13)
2955
std r11,PACA_EXRFI+EX_R11(r13)
2956
mfctr r9
2957
L1D_DISPLACEMENT_FLUSH
2958
mtctr r9
2959
ld r9,PACA_EXRFI+EX_R9(r13)
2960
ld r10,PACA_EXRFI+EX_R10(r13)
2961
ld r11,PACA_EXRFI+EX_R11(r13)
2962
ld r1,PACA_EXRFI+EX_R12(r13)
2963
GET_SCRATCH0(r13);
2964
hrfid
2965
2966
TRAMP_REAL_BEGIN(rfscv_flush_fallback)
2967
/* system call volatile */
2968
mr r7,r13
2969
GET_PACA(r13);
2970
mr r8,r1
2971
ld r1,PACAKSAVE(r13)
2972
mfctr r9
2973
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2974
ld r11,PACA_L1D_FLUSH_SIZE(r13)
2975
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2976
mtctr r11
2977
DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2978
2979
/* order ld/st prior to dcbt stop all streams with flushing */
2980
sync
2981
2982
/*
2983
* The load adresses are at staggered offsets within cachelines,
2984
* which suits some pipelines better (on others it should not
2985
* hurt).
2986
*/
2987
1:
2988
ld r11,(0x80 + 8)*0(r10)
2989
ld r11,(0x80 + 8)*1(r10)
2990
ld r11,(0x80 + 8)*2(r10)
2991
ld r11,(0x80 + 8)*3(r10)
2992
ld r11,(0x80 + 8)*4(r10)
2993
ld r11,(0x80 + 8)*5(r10)
2994
ld r11,(0x80 + 8)*6(r10)
2995
ld r11,(0x80 + 8)*7(r10)
2996
addi r10,r10,0x80*8
2997
bdnz 1b
2998
2999
mtctr r9
3000
li r9,0
3001
li r10,0
3002
li r11,0
3003
mr r1,r8
3004
mr r13,r7
3005
RFSCV
3006
3007
USE_TEXT_SECTION()
3008
3009
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
3010
kvm_interrupt:
3011
/*
3012
* The conditional branch in KVMTEST can't reach all the way,
3013
* make a stub.
3014
*/
3015
b kvmppc_interrupt
3016
#endif
3017
3018
_GLOBAL(do_uaccess_flush)
3019
UACCESS_FLUSH_FIXUP_SECTION
3020
nop
3021
nop
3022
nop
3023
blr
3024
L1D_DISPLACEMENT_FLUSH
3025
blr
3026
_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
3027
EXPORT_SYMBOL(do_uaccess_flush)
3028
3029
3030
MASKED_INTERRUPT
3031
MASKED_INTERRUPT hsrr=1
3032
3033
USE_FIXED_SECTION(virt_trampolines)
3034
/*
3035
* All code below __end_soft_masked is treated as soft-masked. If
3036
* any code runs here with MSR[EE]=1, it must then cope with pending
3037
* soft interrupt being raised (i.e., by ensuring it is replayed).
3038
*
3039
* The __end_interrupts marker must be past the out-of-line (OOL)
3040
* handlers, so that they are copied to real address 0x100 when running
3041
* a relocatable kernel. This ensures they can be reached from the short
3042
* trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
3043
* directly, without using LOAD_HANDLER().
3044
*/
3045
.align 7
3046
.globl __end_interrupts
3047
__end_interrupts:
3048
DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines)
3049
3050
CLOSE_FIXED_SECTION(real_vectors);
3051
CLOSE_FIXED_SECTION(real_trampolines);
3052
CLOSE_FIXED_SECTION(virt_vectors);
3053
CLOSE_FIXED_SECTION(virt_trampolines);
3054
3055
USE_TEXT_SECTION()
3056
3057
/* MSR[RI] should be clear because this uses SRR[01] */
3058
_GLOBAL(enable_machine_check)
3059
mflr r0
3060
bcl 20,31,$+4
3061
0: mflr r3
3062
addi r3,r3,(1f - 0b)
3063
mtspr SPRN_SRR0,r3
3064
mfmsr r3
3065
ori r3,r3,MSR_ME
3066
mtspr SPRN_SRR1,r3
3067
RFI_TO_KERNEL
3068
1: mtlr r0
3069
blr
3070
3071
/* MSR[RI] should be clear because this uses SRR[01] */
3072
SYM_FUNC_START_LOCAL(disable_machine_check)
3073
mflr r0
3074
bcl 20,31,$+4
3075
0: mflr r3
3076
addi r3,r3,(1f - 0b)
3077
mtspr SPRN_SRR0,r3
3078
mfmsr r3
3079
li r4,MSR_ME
3080
andc r3,r3,r4
3081
mtspr SPRN_SRR1,r3
3082
RFI_TO_KERNEL
3083
1: mtlr r0
3084
blr
3085
SYM_FUNC_END(disable_machine_check)
3086
3087