Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/powerpc/kernel/exceptions-64e.S
10817 views
1
/*
2
* Boot code and exception vectors for Book3E processors
3
*
4
* Copyright (C) 2007 Ben. Herrenschmidt ([email protected]), IBM Corp.
5
*
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU General Public License
8
* as published by the Free Software Foundation; either version
9
* 2 of the License, or (at your option) any later version.
10
*/
11
12
#include <linux/threads.h>
13
#include <asm/reg.h>
14
#include <asm/page.h>
15
#include <asm/ppc_asm.h>
16
#include <asm/asm-offsets.h>
17
#include <asm/cputable.h>
18
#include <asm/setup.h>
19
#include <asm/thread_info.h>
20
#include <asm/reg_a2.h>
21
#include <asm/exception-64e.h>
22
#include <asm/bug.h>
23
#include <asm/irqflags.h>
24
#include <asm/ptrace.h>
25
#include <asm/ppc-opcode.h>
26
#include <asm/mmu.h>
27
28
/* XXX This will ultimately add space for a special exception save
29
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
30
* when taking special interrupts. For now we don't support that,
31
* special interrupts from within a non-standard level will probably
32
* blow you up
33
*/
34
#define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE
35
36
/* Exception prolog code for all exceptions */
37
#define EXCEPTION_PROLOG(n, type, addition) \
38
mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \
39
mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \
40
std r10,PACA_EX##type+EX_R10(r13); \
41
std r11,PACA_EX##type+EX_R11(r13); \
42
mfcr r10; /* save CR */ \
43
addition; /* additional code for that exc. */ \
44
std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \
45
stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
46
mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
47
type##_SET_KSTACK; /* get special stack if necessary */\
48
andi. r10,r11,MSR_PR; /* save stack pointer */ \
49
beq 1f; /* branch around if supervisor */ \
50
ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\
51
1: cmpdi cr1,r1,0; /* check if SP makes sense */ \
52
bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
53
mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
54
55
/* Exception type-specific macros */
56
#define GEN_SET_KSTACK \
57
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */
58
#define SPRN_GEN_SRR0 SPRN_SRR0
59
#define SPRN_GEN_SRR1 SPRN_SRR1
60
61
#define CRIT_SET_KSTACK \
62
ld r1,PACA_CRIT_STACK(r13); \
63
subi r1,r1,SPECIAL_EXC_FRAME_SIZE;
64
#define SPRN_CRIT_SRR0 SPRN_CSRR0
65
#define SPRN_CRIT_SRR1 SPRN_CSRR1
66
67
#define DBG_SET_KSTACK \
68
ld r1,PACA_DBG_STACK(r13); \
69
subi r1,r1,SPECIAL_EXC_FRAME_SIZE;
70
#define SPRN_DBG_SRR0 SPRN_DSRR0
71
#define SPRN_DBG_SRR1 SPRN_DSRR1
72
73
#define MC_SET_KSTACK \
74
ld r1,PACA_MC_STACK(r13); \
75
subi r1,r1,SPECIAL_EXC_FRAME_SIZE;
76
#define SPRN_MC_SRR0 SPRN_MCSRR0
77
#define SPRN_MC_SRR1 SPRN_MCSRR1
78
79
#define NORMAL_EXCEPTION_PROLOG(n, addition) \
80
EXCEPTION_PROLOG(n, GEN, addition##_GEN)
81
82
#define CRIT_EXCEPTION_PROLOG(n, addition) \
83
EXCEPTION_PROLOG(n, CRIT, addition##_CRIT)
84
85
#define DBG_EXCEPTION_PROLOG(n, addition) \
86
EXCEPTION_PROLOG(n, DBG, addition##_DBG)
87
88
#define MC_EXCEPTION_PROLOG(n, addition) \
89
EXCEPTION_PROLOG(n, MC, addition##_MC)
90
91
92
/* Variants of the "addition" argument for the prolog
93
*/
94
#define PROLOG_ADDITION_NONE_GEN
95
#define PROLOG_ADDITION_NONE_CRIT
96
#define PROLOG_ADDITION_NONE_DBG
97
#define PROLOG_ADDITION_NONE_MC
98
99
#define PROLOG_ADDITION_MASKABLE_GEN \
100
lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
101
cmpwi cr0,r11,0; /* yes -> go out of line */ \
102
beq masked_interrupt_book3e;
103
104
#define PROLOG_ADDITION_2REGS_GEN \
105
std r14,PACA_EXGEN+EX_R14(r13); \
106
std r15,PACA_EXGEN+EX_R15(r13)
107
108
#define PROLOG_ADDITION_1REG_GEN \
109
std r14,PACA_EXGEN+EX_R14(r13);
110
111
#define PROLOG_ADDITION_2REGS_CRIT \
112
std r14,PACA_EXCRIT+EX_R14(r13); \
113
std r15,PACA_EXCRIT+EX_R15(r13)
114
115
#define PROLOG_ADDITION_2REGS_DBG \
116
std r14,PACA_EXDBG+EX_R14(r13); \
117
std r15,PACA_EXDBG+EX_R15(r13)
118
119
#define PROLOG_ADDITION_2REGS_MC \
120
std r14,PACA_EXMC+EX_R14(r13); \
121
std r15,PACA_EXMC+EX_R15(r13)
122
123
/* Core exception code for all exceptions except TLB misses.
124
* XXX: Needs to make SPRN_SPRG_GEN depend on exception type
125
*/
126
#define EXCEPTION_COMMON(n, excf, ints) \
127
std r0,GPR0(r1); /* save r0 in stackframe */ \
128
std r2,GPR2(r1); /* save r2 in stackframe */ \
129
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
130
SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
131
std r9,GPR9(r1); /* save r9 in stackframe */ \
132
std r10,_NIP(r1); /* save SRR0 to stackframe */ \
133
std r11,_MSR(r1); /* save SRR1 to stackframe */ \
134
ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */ \
135
ld r3,excf+EX_R10(r13); /* get back r10 */ \
136
ld r4,excf+EX_R11(r13); /* get back r11 */ \
137
mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 */ \
138
std r12,GPR12(r1); /* save r12 in stackframe */ \
139
ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
140
mflr r6; /* save LR in stackframe */ \
141
mfctr r7; /* save CTR in stackframe */ \
142
mfspr r8,SPRN_XER; /* save XER in stackframe */ \
143
ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \
144
lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \
145
lbz r11,PACASOFTIRQEN(r13); /* get current IRQ softe */ \
146
ld r12,exception_marker@toc(r2); \
147
li r0,0; \
148
std r3,GPR10(r1); /* save r10 to stackframe */ \
149
std r4,GPR11(r1); /* save r11 to stackframe */ \
150
std r5,GPR13(r1); /* save it to stackframe */ \
151
std r6,_LINK(r1); \
152
std r7,_CTR(r1); \
153
std r8,_XER(r1); \
154
li r3,(n)+1; /* indicate partial regs in trap */ \
155
std r9,0(r1); /* store stack frame back link */ \
156
std r10,_CCR(r1); /* store orig CR in stackframe */ \
157
std r9,GPR1(r1); /* store stack frame back link */ \
158
std r11,SOFTE(r1); /* and save it to stackframe */ \
159
std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
160
std r3,_TRAP(r1); /* set trap number */ \
161
std r0,RESULT(r1); /* clear regs->result */ \
162
ints;
163
164
/* Variants for the "ints" argument */
165
#define INTS_KEEP
166
#define INTS_DISABLE_SOFT \
167
stb r0,PACASOFTIRQEN(r13); /* mark interrupts soft-disabled */ \
168
TRACE_DISABLE_INTS;
169
#define INTS_DISABLE_HARD \
170
stb r0,PACAHARDIRQEN(r13); /* and hard disabled */
171
#define INTS_DISABLE_ALL \
172
INTS_DISABLE_SOFT \
173
INTS_DISABLE_HARD
174
175
/* This is called by exceptions that used INTS_KEEP (that is did not clear
176
* neither soft nor hard IRQ indicators in the PACA. This will restore MSR:EE
177
* to it's previous value
178
*
179
* XXX In the long run, we may want to open-code it in order to separate the
180
* load from the wrtee, thus limiting the latency caused by the dependency
181
* but at this point, I'll favor code clarity until we have a near to final
182
* implementation
183
*/
184
#define INTS_RESTORE_HARD \
185
ld r11,_MSR(r1); \
186
wrtee r11;
187
188
/* XXX FIXME: Restore r14/r15 when necessary */
189
#define BAD_STACK_TRAMPOLINE(n) \
190
exc_##n##_bad_stack: \
191
li r1,(n); /* get exception number */ \
192
sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \
193
b bad_stack_book3e; /* bad stack error */
194
195
/* WARNING: If you change the layout of this stub, make sure you chcek
196
* the debug exception handler which handles single stepping
197
* into exceptions from userspace, and the MM code in
198
* arch/powerpc/mm/tlb_nohash.c which patches the branch here
199
* and would need to be updated if that branch is moved
200
*/
201
#define EXCEPTION_STUB(loc, label) \
202
. = interrupt_base_book3e + loc; \
203
nop; /* To make debug interrupts happy */ \
204
b exc_##label##_book3e;
205
206
#define ACK_NONE(r)
207
#define ACK_DEC(r) \
208
lis r,TSR_DIS@h; \
209
mtspr SPRN_TSR,r
210
#define ACK_FIT(r) \
211
lis r,TSR_FIS@h; \
212
mtspr SPRN_TSR,r
213
214
/* Used by asynchronous interrupt that may happen in the idle loop.
215
*
216
* This check if the thread was in the idle loop, and if yes, returns
217
* to the caller rather than the PC. This is to avoid a race if
218
* interrupts happen before the wait instruction.
219
*/
220
#define CHECK_NAPPING() \
221
clrrdi r11,r1,THREAD_SHIFT; \
222
ld r10,TI_LOCAL_FLAGS(r11); \
223
andi. r9,r10,_TLF_NAPPING; \
224
beq+ 1f; \
225
ld r8,_LINK(r1); \
226
rlwinm r7,r10,0,~_TLF_NAPPING; \
227
std r8,_NIP(r1); \
228
std r7,TI_LOCAL_FLAGS(r11); \
229
1:
230
231
232
#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \
233
START_EXCEPTION(label); \
234
NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \
235
EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE_ALL) \
236
ack(r8); \
237
CHECK_NAPPING(); \
238
addi r3,r1,STACK_FRAME_OVERHEAD; \
239
bl hdlr; \
240
b .ret_from_except_lite;
241
242
/* This value is used to mark exception frames on the stack. */
243
.section ".toc","aw"
244
exception_marker:
245
.tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
246
247
248
/*
249
* And here we have the exception vectors !
250
*/
251
252
.text
253
.balign 0x1000
254
.globl interrupt_base_book3e
255
interrupt_base_book3e: /* fake trap */
256
EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */
257
EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */
258
EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */
259
EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */
260
EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */
261
EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */
262
EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */
263
EXCEPTION_STUB(0x0e0, program) /* 0x0700 */
264
EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */
265
EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */
266
EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */
267
EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */
268
EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */
269
EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */
270
EXCEPTION_STUB(0x1c0, data_tlb_miss)
271
EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
272
EXCEPTION_STUB(0x260, perfmon)
273
EXCEPTION_STUB(0x280, doorbell)
274
EXCEPTION_STUB(0x2a0, doorbell_crit)
275
EXCEPTION_STUB(0x2c0, guest_doorbell)
276
EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
277
EXCEPTION_STUB(0x300, hypercall)
278
EXCEPTION_STUB(0x320, ehpriv)
279
280
.globl interrupt_end_book3e
281
interrupt_end_book3e:
282
283
/* Critical Input Interrupt */
284
START_EXCEPTION(critical_input);
285
CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE)
286
// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE_ALL)
287
// bl special_reg_save_crit
288
// CHECK_NAPPING();
289
// addi r3,r1,STACK_FRAME_OVERHEAD
290
// bl .critical_exception
291
// b ret_from_crit_except
292
b .
293
294
/* Machine Check Interrupt */
295
START_EXCEPTION(machine_check);
296
CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE)
297
// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE_ALL)
298
// bl special_reg_save_mc
299
// addi r3,r1,STACK_FRAME_OVERHEAD
300
// CHECK_NAPPING();
301
// bl .machine_check_exception
302
// b ret_from_mc_except
303
b .
304
305
/* Data Storage Interrupt */
306
START_EXCEPTION(data_storage)
307
NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS)
308
mfspr r14,SPRN_DEAR
309
mfspr r15,SPRN_ESR
310
EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_KEEP)
311
b storage_fault_common
312
313
/* Instruction Storage Interrupt */
314
START_EXCEPTION(instruction_storage);
315
NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS)
316
li r15,0
317
mr r14,r10
318
EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_KEEP)
319
b storage_fault_common
320
321
/* External Input Interrupt */
322
MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE)
323
324
/* Alignment */
325
START_EXCEPTION(alignment);
326
NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS)
327
mfspr r14,SPRN_DEAR
328
mfspr r15,SPRN_ESR
329
EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP)
330
b alignment_more /* no room, go out of line */
331
332
/* Program Interrupt */
333
START_EXCEPTION(program);
334
NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG)
335
mfspr r14,SPRN_ESR
336
EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE_SOFT)
337
std r14,_DSISR(r1)
338
addi r3,r1,STACK_FRAME_OVERHEAD
339
ld r14,PACA_EXGEN+EX_R14(r13)
340
bl .save_nvgprs
341
INTS_RESTORE_HARD
342
bl .program_check_exception
343
b .ret_from_except
344
345
/* Floating Point Unavailable Interrupt */
346
START_EXCEPTION(fp_unavailable);
347
NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE)
348
/* we can probably do a shorter exception entry for that one... */
349
EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP)
350
bne 1f /* if from user, just load it up */
351
bl .save_nvgprs
352
addi r3,r1,STACK_FRAME_OVERHEAD
353
INTS_RESTORE_HARD
354
bl .kernel_fp_unavailable_exception
355
BUG_OPCODE
356
1: ld r12,_MSR(r1)
357
bl .load_up_fpu
358
b fast_exception_return
359
360
/* Decrementer Interrupt */
361
MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC)
362
363
/* Fixed Interval Timer Interrupt */
364
MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT)
365
366
/* Watchdog Timer Interrupt */
367
START_EXCEPTION(watchdog);
368
CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE)
369
// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE_ALL)
370
// bl special_reg_save_crit
371
// CHECK_NAPPING();
372
// addi r3,r1,STACK_FRAME_OVERHEAD
373
// bl .unknown_exception
374
// b ret_from_crit_except
375
b .
376
377
/* System Call Interrupt */
378
START_EXCEPTION(system_call)
379
mr r9,r13 /* keep a copy of userland r13 */
380
mfspr r11,SPRN_SRR0 /* get return address */
381
mfspr r12,SPRN_SRR1 /* get previous MSR */
382
mfspr r13,SPRN_SPRG_PACA /* get our PACA */
383
b system_call_common
384
385
/* Auxiliary Processor Unavailable Interrupt */
386
START_EXCEPTION(ap_unavailable);
387
NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE)
388
EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP)
389
addi r3,r1,STACK_FRAME_OVERHEAD
390
bl .save_nvgprs
391
INTS_RESTORE_HARD
392
bl .unknown_exception
393
b .ret_from_except
394
395
/* Debug exception as a critical interrupt*/
396
START_EXCEPTION(debug_crit);
397
CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS)
398
399
/*
400
* If there is a single step or branch-taken exception in an
401
* exception entry sequence, it was probably meant to apply to
402
* the code where the exception occurred (since exception entry
403
* doesn't turn off DE automatically). We simulate the effect
404
* of turning off DE on entry to an exception handler by turning
405
* off DE in the CSRR1 value and clearing the debug status.
406
*/
407
408
mfspr r14,SPRN_DBSR /* check single-step/branch taken */
409
andis. r15,r14,DBSR_IC@h
410
beq+ 1f
411
412
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
413
LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
414
cmpld cr0,r10,r14
415
cmpld cr1,r10,r15
416
blt+ cr0,1f
417
bge+ cr1,1f
418
419
/* here it looks like we got an inappropriate debug exception. */
420
lis r14,DBSR_IC@h /* clear the IC event */
421
rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */
422
mtspr SPRN_DBSR,r14
423
mtspr SPRN_CSRR1,r11
424
lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */
425
ld r1,PACA_EXCRIT+EX_R1(r13)
426
ld r14,PACA_EXCRIT+EX_R14(r13)
427
ld r15,PACA_EXCRIT+EX_R15(r13)
428
mtcr r10
429
ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */
430
ld r11,PACA_EXCRIT+EX_R11(r13)
431
mfspr r13,SPRN_SPRG_CRIT_SCRATCH
432
rfci
433
434
/* Normal debug exception */
435
/* XXX We only handle coming from userspace for now since we can't
436
* quite save properly an interrupted kernel state yet
437
*/
438
1: andi. r14,r11,MSR_PR; /* check for userspace again */
439
beq kernel_dbg_exc; /* if from kernel mode */
440
441
/* Now we mash up things to make it look like we are coming on a
442
* normal exception
443
*/
444
mfspr r15,SPRN_SPRG_CRIT_SCRATCH
445
mtspr SPRN_SPRG_GEN_SCRATCH,r15
446
mfspr r14,SPRN_DBSR
447
EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE_ALL)
448
std r14,_DSISR(r1)
449
addi r3,r1,STACK_FRAME_OVERHEAD
450
mr r4,r14
451
ld r14,PACA_EXCRIT+EX_R14(r13)
452
ld r15,PACA_EXCRIT+EX_R15(r13)
453
bl .save_nvgprs
454
bl .DebugException
455
b .ret_from_except
456
457
kernel_dbg_exc:
458
b . /* NYI */
459
460
/* Debug exception as a debug interrupt*/
461
START_EXCEPTION(debug_debug);
462
DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS)
463
464
/*
465
* If there is a single step or branch-taken exception in an
466
* exception entry sequence, it was probably meant to apply to
467
* the code where the exception occurred (since exception entry
468
* doesn't turn off DE automatically). We simulate the effect
469
* of turning off DE on entry to an exception handler by turning
470
* off DE in the DSRR1 value and clearing the debug status.
471
*/
472
473
mfspr r14,SPRN_DBSR /* check single-step/branch taken */
474
andis. r15,r14,DBSR_IC@h
475
beq+ 1f
476
477
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
478
LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
479
cmpld cr0,r10,r14
480
cmpld cr1,r10,r15
481
blt+ cr0,1f
482
bge+ cr1,1f
483
484
/* here it looks like we got an inappropriate debug exception. */
485
lis r14,DBSR_IC@h /* clear the IC event */
486
rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
487
mtspr SPRN_DBSR,r14
488
mtspr SPRN_DSRR1,r11
489
lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */
490
ld r1,PACA_EXDBG+EX_R1(r13)
491
ld r14,PACA_EXDBG+EX_R14(r13)
492
ld r15,PACA_EXDBG+EX_R15(r13)
493
mtcr r10
494
ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */
495
ld r11,PACA_EXDBG+EX_R11(r13)
496
mfspr r13,SPRN_SPRG_DBG_SCRATCH
497
rfdi
498
499
/* Normal debug exception */
500
/* XXX We only handle coming from userspace for now since we can't
501
* quite save properly an interrupted kernel state yet
502
*/
503
1: andi. r14,r11,MSR_PR; /* check for userspace again */
504
beq kernel_dbg_exc; /* if from kernel mode */
505
506
/* Now we mash up things to make it look like we are coming on a
507
* normal exception
508
*/
509
mfspr r15,SPRN_SPRG_DBG_SCRATCH
510
mtspr SPRN_SPRG_GEN_SCRATCH,r15
511
mfspr r14,SPRN_DBSR
512
EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL)
513
std r14,_DSISR(r1)
514
addi r3,r1,STACK_FRAME_OVERHEAD
515
mr r4,r14
516
ld r14,PACA_EXDBG+EX_R14(r13)
517
ld r15,PACA_EXDBG+EX_R15(r13)
518
bl .save_nvgprs
519
bl .DebugException
520
b .ret_from_except
521
522
MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE)
523
524
/* Doorbell interrupt */
525
MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE)
526
527
/* Doorbell critical Interrupt */
528
START_EXCEPTION(doorbell_crit);
529
CRIT_EXCEPTION_PROLOG(0x2080, PROLOG_ADDITION_NONE)
530
// EXCEPTION_COMMON(0x2080, PACA_EXCRIT, INTS_DISABLE_ALL)
531
// bl special_reg_save_crit
532
// CHECK_NAPPING();
533
// addi r3,r1,STACK_FRAME_OVERHEAD
534
// bl .doorbell_critical_exception
535
// b ret_from_crit_except
536
b .
537
538
MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE)
539
MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE)
540
MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE)
541
MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE)
542
543
544
/*
545
* An interrupt came in while soft-disabled; clear EE in SRR1,
546
* clear paca->hard_enabled and return.
547
*/
548
masked_interrupt_book3e:
549
mtcr r10
550
stb r11,PACAHARDIRQEN(r13)
551
mfspr r10,SPRN_SRR1
552
rldicl r11,r10,48,1 /* clear MSR_EE */
553
rotldi r10,r11,16
554
mtspr SPRN_SRR1,r10
555
ld r10,PACA_EXGEN+EX_R10(r13); /* restore registers */
556
ld r11,PACA_EXGEN+EX_R11(r13);
557
mfspr r13,SPRN_SPRG_GEN_SCRATCH;
558
rfi
559
b .
560
561
/*
562
* This is called from 0x300 and 0x400 handlers after the prologs with
563
* r14 and r15 containing the fault address and error code, with the
564
* original values stashed away in the PACA
565
*/
566
storage_fault_common:
567
std r14,_DAR(r1)
568
std r15,_DSISR(r1)
569
addi r3,r1,STACK_FRAME_OVERHEAD
570
mr r4,r14
571
mr r5,r15
572
ld r14,PACA_EXGEN+EX_R14(r13)
573
ld r15,PACA_EXGEN+EX_R15(r13)
574
INTS_RESTORE_HARD
575
bl .do_page_fault
576
cmpdi r3,0
577
bne- 1f
578
b .ret_from_except_lite
579
1: bl .save_nvgprs
580
mr r5,r3
581
addi r3,r1,STACK_FRAME_OVERHEAD
582
ld r4,_DAR(r1)
583
bl .bad_page_fault
584
b .ret_from_except
585
586
/*
587
* Alignment exception doesn't fit entirely in the 0x100 bytes so it
588
* continues here.
589
*/
590
alignment_more:
591
std r14,_DAR(r1)
592
std r15,_DSISR(r1)
593
addi r3,r1,STACK_FRAME_OVERHEAD
594
ld r14,PACA_EXGEN+EX_R14(r13)
595
ld r15,PACA_EXGEN+EX_R15(r13)
596
bl .save_nvgprs
597
INTS_RESTORE_HARD
598
bl .alignment_exception
599
b .ret_from_except
600
601
/*
602
* We branch here from entry_64.S for the last stage of the exception
603
* return code path. MSR:EE is expected to be off at that point
604
*/
605
_GLOBAL(exception_return_book3e)
606
b 1f
607
608
/* This is the return from load_up_fpu fast path which could do with
609
* less GPR restores in fact, but for now we have a single return path
610
*/
611
.globl fast_exception_return
612
fast_exception_return:
613
wrteei 0
614
1: mr r0,r13
615
ld r10,_MSR(r1)
616
REST_4GPRS(2, r1)
617
andi. r6,r10,MSR_PR
618
REST_2GPRS(6, r1)
619
beq 1f
620
ACCOUNT_CPU_USER_EXIT(r10, r11)
621
ld r0,GPR13(r1)
622
623
1: stdcx. r0,0,r1 /* to clear the reservation */
624
625
ld r8,_CCR(r1)
626
ld r9,_LINK(r1)
627
ld r10,_CTR(r1)
628
ld r11,_XER(r1)
629
mtcr r8
630
mtlr r9
631
mtctr r10
632
mtxer r11
633
REST_2GPRS(8, r1)
634
ld r10,GPR10(r1)
635
ld r11,GPR11(r1)
636
ld r12,GPR12(r1)
637
mtspr SPRN_SPRG_GEN_SCRATCH,r0
638
639
std r10,PACA_EXGEN+EX_R10(r13);
640
std r11,PACA_EXGEN+EX_R11(r13);
641
ld r10,_NIP(r1)
642
ld r11,_MSR(r1)
643
ld r0,GPR0(r1)
644
ld r1,GPR1(r1)
645
mtspr SPRN_SRR0,r10
646
mtspr SPRN_SRR1,r11
647
ld r10,PACA_EXGEN+EX_R10(r13)
648
ld r11,PACA_EXGEN+EX_R11(r13)
649
mfspr r13,SPRN_SPRG_GEN_SCRATCH
650
rfi
651
652
/*
653
* Trampolines used when spotting a bad kernel stack pointer in
654
* the exception entry code.
655
*
656
* TODO: move some bits like SRR0 read to trampoline, pass PACA
657
* index around, etc... to handle crit & mcheck
658
*/
659
BAD_STACK_TRAMPOLINE(0x000)
660
BAD_STACK_TRAMPOLINE(0x100)
661
BAD_STACK_TRAMPOLINE(0x200)
662
BAD_STACK_TRAMPOLINE(0x260)
663
BAD_STACK_TRAMPOLINE(0x2c0)
664
BAD_STACK_TRAMPOLINE(0x2e0)
665
BAD_STACK_TRAMPOLINE(0x300)
666
BAD_STACK_TRAMPOLINE(0x310)
667
BAD_STACK_TRAMPOLINE(0x320)
668
BAD_STACK_TRAMPOLINE(0x400)
669
BAD_STACK_TRAMPOLINE(0x500)
670
BAD_STACK_TRAMPOLINE(0x600)
671
BAD_STACK_TRAMPOLINE(0x700)
672
BAD_STACK_TRAMPOLINE(0x800)
673
BAD_STACK_TRAMPOLINE(0x900)
674
BAD_STACK_TRAMPOLINE(0x980)
675
BAD_STACK_TRAMPOLINE(0x9f0)
676
BAD_STACK_TRAMPOLINE(0xa00)
677
BAD_STACK_TRAMPOLINE(0xb00)
678
BAD_STACK_TRAMPOLINE(0xc00)
679
BAD_STACK_TRAMPOLINE(0xd00)
680
BAD_STACK_TRAMPOLINE(0xe00)
681
BAD_STACK_TRAMPOLINE(0xf00)
682
BAD_STACK_TRAMPOLINE(0xf20)
683
BAD_STACK_TRAMPOLINE(0x2070)
684
BAD_STACK_TRAMPOLINE(0x2080)
685
686
.globl bad_stack_book3e
687
bad_stack_book3e:
688
/* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
689
mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */
690
ld r1,PACAEMERGSP(r13)
691
subi r1,r1,64+INT_FRAME_SIZE
692
std r10,_NIP(r1)
693
std r11,_MSR(r1)
694
ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */
695
lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */
696
std r10,GPR1(r1)
697
std r11,_CCR(r1)
698
mfspr r10,SPRN_DEAR
699
mfspr r11,SPRN_ESR
700
std r10,_DAR(r1)
701
std r11,_DSISR(r1)
702
std r0,GPR0(r1); /* save r0 in stackframe */ \
703
std r2,GPR2(r1); /* save r2 in stackframe */ \
704
SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
705
SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
706
std r9,GPR9(r1); /* save r9 in stackframe */ \
707
ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \
708
ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \
709
mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
710
std r3,GPR10(r1); /* save r10 to stackframe */ \
711
std r4,GPR11(r1); /* save r11 to stackframe */ \
712
std r12,GPR12(r1); /* save r12 in stackframe */ \
713
std r5,GPR13(r1); /* save it to stackframe */ \
714
mflr r10
715
mfctr r11
716
mfxer r12
717
std r10,_LINK(r1)
718
std r11,_CTR(r1)
719
std r12,_XER(r1)
720
SAVE_10GPRS(14,r1)
721
SAVE_8GPRS(24,r1)
722
lhz r12,PACA_TRAP_SAVE(r13)
723
std r12,_TRAP(r1)
724
addi r11,r1,INT_FRAME_SIZE
725
std r11,0(r1)
726
li r12,0
727
std r12,0(r11)
728
ld r2,PACATOC(r13)
729
1: addi r3,r1,STACK_FRAME_OVERHEAD
730
bl .kernel_bad_stack
731
b 1b
732
733
/*
734
* Setup the initial TLB for a core. This current implementation
735
* assume that whatever we are running off will not conflict with
736
* the new mapping at PAGE_OFFSET.
737
*/
738
_GLOBAL(initial_tlb_book3e)
739
740
/* Look for the first TLB with IPROT set */
741
mfspr r4,SPRN_TLB0CFG
742
andi. r3,r4,TLBnCFG_IPROT
743
lis r3,MAS0_TLBSEL(0)@h
744
bne found_iprot
745
746
mfspr r4,SPRN_TLB1CFG
747
andi. r3,r4,TLBnCFG_IPROT
748
lis r3,MAS0_TLBSEL(1)@h
749
bne found_iprot
750
751
mfspr r4,SPRN_TLB2CFG
752
andi. r3,r4,TLBnCFG_IPROT
753
lis r3,MAS0_TLBSEL(2)@h
754
bne found_iprot
755
756
lis r3,MAS0_TLBSEL(3)@h
757
mfspr r4,SPRN_TLB3CFG
758
/* fall through */
759
760
found_iprot:
761
andi. r5,r4,TLBnCFG_HES
762
bne have_hes
763
764
mflr r8 /* save LR */
765
/* 1. Find the index of the entry we're executing in
766
*
767
* r3 = MAS0_TLBSEL (for the iprot array)
768
* r4 = SPRN_TLBnCFG
769
*/
770
bl invstr /* Find our address */
771
invstr: mflr r6 /* Make it accessible */
772
mfmsr r7
773
rlwinm r5,r7,27,31,31 /* extract MSR[IS] */
774
mfspr r7,SPRN_PID
775
slwi r7,r7,16
776
or r7,r7,r5
777
mtspr SPRN_MAS6,r7
778
tlbsx 0,r6 /* search MSR[IS], SPID=PID */
779
780
mfspr r3,SPRN_MAS0
781
rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */
782
783
mfspr r7,SPRN_MAS1 /* Insure IPROT set */
784
oris r7,r7,MAS1_IPROT@h
785
mtspr SPRN_MAS1,r7
786
tlbwe
787
788
/* 2. Invalidate all entries except the entry we're executing in
789
*
790
* r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
791
* r4 = SPRN_TLBnCFG
792
* r5 = ESEL of entry we are running in
793
*/
794
andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */
795
li r6,0 /* Set Entry counter to 0 */
796
1: mr r7,r3 /* Set MAS0(TLBSEL) */
797
rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
798
mtspr SPRN_MAS0,r7
799
tlbre
800
mfspr r7,SPRN_MAS1
801
rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
802
cmpw r5,r6
803
beq skpinv /* Dont update the current execution TLB */
804
mtspr SPRN_MAS1,r7
805
tlbwe
806
isync
807
skpinv: addi r6,r6,1 /* Increment */
808
cmpw r6,r4 /* Are we done? */
809
bne 1b /* If not, repeat */
810
811
/* Invalidate all TLBs */
812
PPC_TLBILX_ALL(0,0)
813
sync
814
isync
815
816
/* 3. Setup a temp mapping and jump to it
817
*
818
* r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
819
* r5 = ESEL of entry we are running in
820
*/
821
andi. r7,r5,0x1 /* Find an entry not used and is non-zero */
822
addi r7,r7,0x1
823
mr r4,r3 /* Set MAS0(TLBSEL) = 1 */
824
mtspr SPRN_MAS0,r4
825
tlbre
826
827
rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */
828
mtspr SPRN_MAS0,r4
829
830
mfspr r7,SPRN_MAS1
831
xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */
832
mtspr SPRN_MAS1,r6
833
834
tlbwe
835
836
mfmsr r6
837
xori r6,r6,MSR_IS
838
mtspr SPRN_SRR1,r6
839
bl 1f /* Find our address */
840
1: mflr r6
841
addi r6,r6,(2f - 1b)
842
mtspr SPRN_SRR0,r6
843
rfi
844
2:
845
846
/* 4. Clear out PIDs & Search info
847
*
848
* r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
849
* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
850
* r5 = MAS3
851
*/
852
li r6,0
853
mtspr SPRN_MAS6,r6
854
mtspr SPRN_PID,r6
855
856
/* 5. Invalidate mapping we started in
857
*
858
* r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
859
* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
860
* r5 = MAS3
861
*/
862
mtspr SPRN_MAS0,r3
863
tlbre
864
mfspr r6,SPRN_MAS1
865
rlwinm r6,r6,0,2,0 /* clear IPROT */
866
mtspr SPRN_MAS1,r6
867
tlbwe
868
869
/* Invalidate TLB1 */
870
PPC_TLBILX_ALL(0,0)
871
sync
872
isync
873
874
/* The mapping only needs to be cache-coherent on SMP */
875
#ifdef CONFIG_SMP
876
#define M_IF_SMP MAS2_M
877
#else
878
#define M_IF_SMP 0
879
#endif
880
881
/* 6. Setup KERNELBASE mapping in TLB[0]
882
*
883
* r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
884
* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
885
* r5 = MAS3
886
*/
887
rlwinm r3,r3,0,16,3 /* clear ESEL */
888
mtspr SPRN_MAS0,r3
889
lis r6,(MAS1_VALID|MAS1_IPROT)@h
890
ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
891
mtspr SPRN_MAS1,r6
892
893
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_SMP)
894
mtspr SPRN_MAS2,r6
895
896
rlwinm r5,r5,0,0,25
897
ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX
898
mtspr SPRN_MAS3,r5
899
li r5,-1
900
rlwinm r5,r5,0,0,25
901
902
tlbwe
903
904
/* 7. Jump to KERNELBASE mapping
905
*
906
* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
907
*/
908
/* Now we branch the new virtual address mapped by this entry */
909
LOAD_REG_IMMEDIATE(r6,2f)
910
lis r7,MSR_KERNEL@h
911
ori r7,r7,MSR_KERNEL@l
912
mtspr SPRN_SRR0,r6
913
mtspr SPRN_SRR1,r7
914
rfi /* start execution out of TLB1[0] entry */
915
2:
916
917
/* 8. Clear out the temp mapping
918
*
919
* r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in
920
*/
921
mtspr SPRN_MAS0,r4
922
tlbre
923
mfspr r5,SPRN_MAS1
924
rlwinm r5,r5,0,2,0 /* clear IPROT */
925
mtspr SPRN_MAS1,r5
926
tlbwe
927
928
/* Invalidate TLB1 */
929
PPC_TLBILX_ALL(0,0)
930
sync
931
isync
932
933
/* We translate LR and return */
934
tovirt(r8,r8)
935
mtlr r8
936
blr
937
938
have_hes:
939
/* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the
940
* kernel linear mapping. We also set MAS8 once for all here though
941
* that will have to be made dependent on whether we are running under
942
* a hypervisor I suppose.
943
*/
944
945
/* BEWARE, MAGIC
946
* This code is called as an ordinary function on the boot CPU. But to
947
* avoid duplication, this code is also used in SCOM bringup of
948
* secondary CPUs. We read the code between the initial_tlb_code_start
949
* and initial_tlb_code_end labels one instruction at a time and RAM it
950
* into the new core via SCOM. That doesn't process branches, so there
951
* must be none between those two labels. It also means if this code
952
* ever takes any parameters, the SCOM code must also be updated to
953
* provide them.
954
*/
955
.globl a2_tlbinit_code_start
956
a2_tlbinit_code_start:
957
958
ori r11,r3,MAS0_WQ_ALLWAYS
959
oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
960
mtspr SPRN_MAS0,r11
961
lis r3,(MAS1_VALID | MAS1_IPROT)@h
962
ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
963
mtspr SPRN_MAS1,r3
964
LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M)
965
mtspr SPRN_MAS2,r3
966
li r3,MAS3_SR | MAS3_SW | MAS3_SX
967
mtspr SPRN_MAS7_MAS3,r3
968
li r3,0
969
mtspr SPRN_MAS8,r3
970
971
/* Write the TLB entry */
972
tlbwe
973
974
.globl a2_tlbinit_after_linear_map
975
a2_tlbinit_after_linear_map:
976
977
/* Now we branch the new virtual address mapped by this entry */
978
LOAD_REG_IMMEDIATE(r3,1f)
979
mtctr r3
980
bctr
981
982
1: /* We are now running at PAGE_OFFSET, clean the TLB of everything
983
* else (including IPROTed things left by firmware)
984
* r4 = TLBnCFG
985
* r3 = current address (more or less)
986
*/
987
988
li r5,0
989
mtspr SPRN_MAS6,r5
990
tlbsx 0,r3
991
992
rlwinm r9,r4,0,TLBnCFG_N_ENTRY
993
rlwinm r10,r4,8,0xff
994
addi r10,r10,-1 /* Get inner loop mask */
995
996
li r3,1
997
998
mfspr r5,SPRN_MAS1
999
rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
1000
1001
mfspr r6,SPRN_MAS2
1002
rldicr r6,r6,0,51 /* Extract EPN */
1003
1004
mfspr r7,SPRN_MAS0
1005
rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */
1006
1007
rlwinm r8,r7,16,0xfff /* Extract ESEL */
1008
1009
2: add r4,r3,r8
1010
and r4,r4,r10
1011
1012
rlwimi r7,r4,16,MAS0_ESEL_MASK
1013
1014
mtspr SPRN_MAS0,r7
1015
mtspr SPRN_MAS1,r5
1016
mtspr SPRN_MAS2,r6
1017
tlbwe
1018
1019
addi r3,r3,1
1020
and. r4,r3,r10
1021
1022
bne 3f
1023
addis r6,r6,(1<<30)@h
1024
3:
1025
cmpw r3,r9
1026
blt 2b
1027
1028
.globl a2_tlbinit_after_iprot_flush
1029
a2_tlbinit_after_iprot_flush:
1030
1031
#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
1032
/* Now establish early debug mappings if applicable */
1033
/* Restore the MAS0 we used for linear mapping load */
1034
mtspr SPRN_MAS0,r11
1035
1036
lis r3,(MAS1_VALID | MAS1_IPROT)@h
1037
ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT)
1038
mtspr SPRN_MAS1,r3
1039
LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G)
1040
mtspr SPRN_MAS2,r3
1041
LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW)
1042
mtspr SPRN_MAS7_MAS3,r3
1043
/* re-use the MAS8 value from the linear mapping */
1044
tlbwe
1045
#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
1046
1047
PPC_TLBILX(0,0,0)
1048
sync
1049
isync
1050
1051
.globl a2_tlbinit_code_end
1052
a2_tlbinit_code_end:
1053
1054
/* We translate LR and return */
1055
mflr r3
1056
tovirt(r3,r3)
1057
mtlr r3
1058
blr
1059
1060
/*
1061
* Main entry (boot CPU, thread 0)
1062
*
1063
* We enter here from head_64.S, possibly after the prom_init trampoline
1064
* with r3 and r4 already saved to r31 and 30 respectively and in 64 bits
1065
* mode. Anything else is as it was left by the bootloader
1066
*
1067
* Initial requirements of this port:
1068
*
1069
* - Kernel loaded at 0 physical
1070
* - A good lump of memory mapped 0:0 by UTLB entry 0
1071
* - MSR:IS & MSR:DS set to 0
1072
*
1073
* Note that some of the above requirements will be relaxed in the future
1074
* as the kernel becomes smarter at dealing with different initial conditions
1075
* but for now you have to be careful
1076
*/
1077
_GLOBAL(start_initialization_book3e)
1078
mflr r28
1079
1080
/* First, we need to setup some initial TLBs to map the kernel
1081
* text, data and bss at PAGE_OFFSET. We don't have a real mode
1082
* and always use AS 0, so we just set it up to match our link
1083
* address and never use 0 based addresses.
1084
*/
1085
bl .initial_tlb_book3e
1086
1087
/* Init global core bits */
1088
bl .init_core_book3e
1089
1090
/* Init per-thread bits */
1091
bl .init_thread_book3e
1092
1093
/* Return to common init code */
1094
tovirt(r28,r28)
1095
mtlr r28
1096
blr
1097
1098
1099
/*
1100
* Secondary core/processor entry
1101
*
1102
* This is entered for thread 0 of a secondary core, all other threads
1103
* are expected to be stopped. It's similar to start_initialization_book3e
1104
* except that it's generally entered from the holding loop in head_64.S
1105
* after CPUs have been gathered by Open Firmware.
1106
*
1107
* We assume we are in 32 bits mode running with whatever TLB entry was
1108
* set for us by the firmware or POR engine.
1109
*/
1110
_GLOBAL(book3e_secondary_core_init_tlb_set)
1111
li r4,1
1112
b .generic_secondary_smp_init
1113
1114
_GLOBAL(book3e_secondary_core_init)
1115
mflr r28
1116
1117
/* Do we need to setup initial TLB entry ? */
1118
cmplwi r4,0
1119
bne 2f
1120
1121
/* Setup TLB for this core */
1122
bl .initial_tlb_book3e
1123
1124
/* We can return from the above running at a different
1125
* address, so recalculate r2 (TOC)
1126
*/
1127
bl .relative_toc
1128
1129
/* Init global core bits */
1130
2: bl .init_core_book3e
1131
1132
/* Init per-thread bits */
1133
3: bl .init_thread_book3e
1134
1135
/* Return to common init code at proper virtual address.
1136
*
1137
* Due to various previous assumptions, we know we entered this
1138
* function at either the final PAGE_OFFSET mapping or using a
1139
* 1:1 mapping at 0, so we don't bother doing a complicated check
1140
* here, we just ensure the return address has the right top bits.
1141
*
1142
* Note that if we ever want to be smarter about where we can be
1143
* started from, we have to be careful that by the time we reach
1144
* the code below we may already be running at a different location
1145
* than the one we were called from since initial_tlb_book3e can
1146
* have moved us already.
1147
*/
1148
cmpdi cr0,r28,0
1149
blt 1f
1150
lis r3,PAGE_OFFSET@highest
1151
sldi r3,r3,32
1152
or r28,r28,r3
1153
1: mtlr r28
1154
blr
1155
1156
_GLOBAL(book3e_secondary_thread_init)
1157
mflr r28
1158
b 3b
1159
1160
_STATIC(init_core_book3e)
1161
/* Establish the interrupt vector base */
1162
LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
1163
mtspr SPRN_IVPR,r3
1164
sync
1165
blr
1166
1167
_STATIC(init_thread_book3e)
1168
lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
1169
mtspr SPRN_EPCR,r3
1170
1171
/* Make sure interrupts are off */
1172
wrteei 0
1173
1174
/* disable all timers and clear out status */
1175
li r3,0
1176
mtspr SPRN_TCR,r3
1177
mfspr r3,SPRN_TSR
1178
mtspr SPRN_TSR,r3
1179
1180
blr
1181
1182
_GLOBAL(__setup_base_ivors)
1183
SET_IVOR(0, 0x020) /* Critical Input */
1184
SET_IVOR(1, 0x000) /* Machine Check */
1185
SET_IVOR(2, 0x060) /* Data Storage */
1186
SET_IVOR(3, 0x080) /* Instruction Storage */
1187
SET_IVOR(4, 0x0a0) /* External Input */
1188
SET_IVOR(5, 0x0c0) /* Alignment */
1189
SET_IVOR(6, 0x0e0) /* Program */
1190
SET_IVOR(7, 0x100) /* FP Unavailable */
1191
SET_IVOR(8, 0x120) /* System Call */
1192
SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */
1193
SET_IVOR(10, 0x160) /* Decrementer */
1194
SET_IVOR(11, 0x180) /* Fixed Interval Timer */
1195
SET_IVOR(12, 0x1a0) /* Watchdog Timer */
1196
SET_IVOR(13, 0x1c0) /* Data TLB Error */
1197
SET_IVOR(14, 0x1e0) /* Instruction TLB Error */
1198
SET_IVOR(15, 0x040) /* Debug */
1199
1200
sync
1201
1202
blr
1203
1204
_GLOBAL(setup_perfmon_ivor)
1205
SET_IVOR(35, 0x260) /* Performance Monitor */
1206
blr
1207
1208
_GLOBAL(setup_doorbell_ivors)
1209
SET_IVOR(36, 0x280) /* Processor Doorbell */
1210
SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
1211
1212
/* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */
1213
mfspr r10,SPRN_MMUCFG
1214
rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
1215
beqlr
1216
1217
SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
1218
SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
1219
blr
1220
1221
_GLOBAL(setup_ehv_ivors)
1222
/*
1223
* We may be running as a guest and lack E.HV even on a chip
1224
* that normally has it.
1225
*/
1226
mfspr r10,SPRN_MMUCFG
1227
rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
1228
beqlr
1229
1230
SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
1231
SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
1232
blr
1233
1234