Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/powerpc/kernel/head_32.S
10817 views
1
/*
2
* PowerPC version
3
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
4
*
5
* Rewritten by Cort Dougan ([email protected]) for PReP
6
* Copyright (C) 1996 Cort Dougan <[email protected]>
7
* Adapted for Power Macintosh by Paul Mackerras.
8
* Low-level exception handlers and MMU support
9
* rewritten by Paul Mackerras.
10
* Copyright (C) 1996 Paul Mackerras.
11
* MPC8xx modifications Copyright (C) 1997 Dan Malek ([email protected]).
12
*
13
* This file contains the low-level support and setup for the
14
* PowerPC platform, including trap and interrupt dispatch.
15
* (The PPC 8xx embedded CPUs use head_8xx.S instead.)
16
*
17
* This program is free software; you can redistribute it and/or
18
* modify it under the terms of the GNU General Public License
19
* as published by the Free Software Foundation; either version
20
* 2 of the License, or (at your option) any later version.
21
*
22
*/
23
24
#include <linux/init.h>
25
#include <asm/reg.h>
26
#include <asm/page.h>
27
#include <asm/mmu.h>
28
#include <asm/pgtable.h>
29
#include <asm/cputable.h>
30
#include <asm/cache.h>
31
#include <asm/thread_info.h>
32
#include <asm/ppc_asm.h>
33
#include <asm/asm-offsets.h>
34
#include <asm/ptrace.h>
35
#include <asm/bug.h>
36
#include <asm/kvm_book3s_asm.h>
37
38
/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
39
#define LOAD_BAT(n, reg, RA, RB) \
40
/* see the comment for clear_bats() -- Cort */ \
41
li RA,0; \
42
mtspr SPRN_IBAT##n##U,RA; \
43
mtspr SPRN_DBAT##n##U,RA; \
44
lwz RA,(n*16)+0(reg); \
45
lwz RB,(n*16)+4(reg); \
46
mtspr SPRN_IBAT##n##U,RA; \
47
mtspr SPRN_IBAT##n##L,RB; \
48
beq 1f; \
49
lwz RA,(n*16)+8(reg); \
50
lwz RB,(n*16)+12(reg); \
51
mtspr SPRN_DBAT##n##U,RA; \
52
mtspr SPRN_DBAT##n##L,RB; \
53
1:
54
55
__HEAD
56
.stabs "arch/powerpc/kernel/",N_SO,0,0,0f
57
.stabs "head_32.S",N_SO,0,0,0f
58
0:
59
_ENTRY(_stext);
60
61
/*
62
* _start is defined this way because the XCOFF loader in the OpenFirmware
63
* on the powermac expects the entry point to be a procedure descriptor.
64
*/
65
_ENTRY(_start);
66
/*
67
* These are here for legacy reasons, the kernel used to
68
* need to look like a coff function entry for the pmac
69
* but we're always started by some kind of bootloader now.
70
* -- Cort
71
*/
72
nop /* used by __secondary_hold on prep (mtx) and chrp smp */
73
nop /* used by __secondary_hold on prep (mtx) and chrp smp */
74
nop
75
76
/* PMAC
77
* Enter here with the kernel text, data and bss loaded starting at
78
* 0, running with virtual == physical mapping.
79
* r5 points to the prom entry point (the client interface handler
80
* address). Address translation is turned on, with the prom
81
* managing the hash table. Interrupts are disabled. The stack
82
* pointer (r1) points to just below the end of the half-meg region
83
* from 0x380000 - 0x400000, which is mapped in already.
84
*
85
* If we are booted from MacOS via BootX, we enter with the kernel
86
* image loaded somewhere, and the following values in registers:
87
* r3: 'BooX' (0x426f6f58)
88
* r4: virtual address of boot_infos_t
89
* r5: 0
90
*
91
* PREP
92
* This is jumped to on prep systems right after the kernel is relocated
93
* to its proper place in memory by the boot loader. The expected layout
94
* of the regs is:
95
* r3: ptr to residual data
96
* r4: initrd_start or if no initrd then 0
97
* r5: initrd_end - unused if r4 is 0
98
* r6: Start of command line string
99
* r7: End of command line string
100
*
101
* This just gets a minimal mmu environment setup so we can call
102
* start_here() to do the real work.
103
* -- Cort
104
*/
105
106
.globl __start
107
__start:
108
/*
109
* We have to do any OF calls before we map ourselves to KERNELBASE,
110
* because OF may have I/O devices mapped into that area
111
* (particularly on CHRP).
112
*/
113
cmpwi 0,r5,0
114
beq 1f
115
116
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
117
/* find out where we are now */
118
bcl 20,31,$+4
119
0: mflr r8 /* r8 = runtime addr here */
120
addis r8,r8,(_stext - 0b)@ha
121
addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
122
bl prom_init
123
#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
124
125
/* We never return. We also hit that trap if trying to boot
126
* from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
127
trap
128
129
/*
130
* Check for BootX signature when supporting PowerMac and branch to
131
* appropriate trampoline if it's present
132
*/
133
#ifdef CONFIG_PPC_PMAC
134
1: lis r31,0x426f
135
ori r31,r31,0x6f58
136
cmpw 0,r3,r31
137
bne 1f
138
bl bootx_init
139
trap
140
#endif /* CONFIG_PPC_PMAC */
141
142
1: mr r31,r3 /* save parameters */
143
mr r30,r4
144
li r24,0 /* cpu # */
145
146
/*
147
* early_init() does the early machine identification and does
148
* the necessary low-level setup and clears the BSS
149
* -- Cort <[email protected]>
150
*/
151
bl early_init
152
153
/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
154
* the physical address we are running at, returned by early_init()
155
*/
156
bl mmu_off
157
__after_mmu_off:
158
bl clear_bats
159
bl flush_tlbs
160
161
bl initial_bats
162
#if defined(CONFIG_BOOTX_TEXT)
163
bl setup_disp_bat
164
#endif
165
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
166
bl setup_cpm_bat
167
#endif
168
#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
169
bl setup_usbgecko_bat
170
#endif
171
172
/*
173
* Call setup_cpu for CPU 0 and initialize 6xx Idle
174
*/
175
bl reloc_offset
176
li r24,0 /* cpu# */
177
bl call_setup_cpu /* Call setup_cpu for this CPU */
178
#ifdef CONFIG_6xx
179
bl reloc_offset
180
bl init_idle_6xx
181
#endif /* CONFIG_6xx */
182
183
184
/*
185
* We need to run with _start at physical address 0.
186
* On CHRP, we are loaded at 0x10000 since OF on CHRP uses
187
* the exception vectors at 0 (and therefore this copy
188
* overwrites OF's exception vectors with our own).
189
* The MMU is off at this point.
190
*/
191
bl reloc_offset
192
mr r26,r3
193
addis r4,r3,KERNELBASE@h /* current address of _start */
194
lis r5,PHYSICAL_START@h
195
cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
196
bne relocate_kernel
197
/*
198
* we now have the 1st 16M of ram mapped with the bats.
199
* prep needs the mmu to be turned on here, but pmac already has it on.
200
* this shouldn't bother the pmac since it just gets turned on again
201
* as we jump to our code at KERNELBASE. -- Cort
202
* Actually no, pmac doesn't have it on any more. BootX enters with MMU
203
* off, and in other cases, we now turn it off before changing BATs above.
204
*/
205
turn_on_mmu:
206
mfmsr r0
207
ori r0,r0,MSR_DR|MSR_IR
208
mtspr SPRN_SRR1,r0
209
lis r0,start_here@h
210
ori r0,r0,start_here@l
211
mtspr SPRN_SRR0,r0
212
SYNC
213
RFI /* enables MMU */
214
215
/*
216
* We need __secondary_hold as a place to hold the other cpus on
217
* an SMP machine, even when we are running a UP kernel.
218
*/
219
. = 0xc0 /* for prep bootloader */
220
li r3,1 /* MTX only has 1 cpu */
221
.globl __secondary_hold
222
__secondary_hold:
223
/* tell the master we're here */
224
stw r3,__secondary_hold_acknowledge@l(0)
225
#ifdef CONFIG_SMP
226
100: lwz r4,0(0)
227
/* wait until we're told to start */
228
cmpw 0,r4,r3
229
bne 100b
230
/* our cpu # was at addr 0 - go */
231
mr r24,r3 /* cpu # */
232
b __secondary_start
233
#else
234
b .
235
#endif /* CONFIG_SMP */
236
237
.globl __secondary_hold_spinloop
238
__secondary_hold_spinloop:
239
.long 0
240
.globl __secondary_hold_acknowledge
241
__secondary_hold_acknowledge:
242
.long -1
243
244
/*
245
* Exception entry code. This code runs with address translation
246
* turned off, i.e. using physical addresses.
247
* We assume sprg3 has the physical address of the current
248
* task's thread_struct.
249
*/
250
#define EXCEPTION_PROLOG \
251
mtspr SPRN_SPRG_SCRATCH0,r10; \
252
mtspr SPRN_SPRG_SCRATCH1,r11; \
253
mfcr r10; \
254
EXCEPTION_PROLOG_1; \
255
EXCEPTION_PROLOG_2
256
257
#define EXCEPTION_PROLOG_1 \
258
mfspr r11,SPRN_SRR1; /* check whether user or kernel */ \
259
andi. r11,r11,MSR_PR; \
260
tophys(r11,r1); /* use tophys(r1) if kernel */ \
261
beq 1f; \
262
mfspr r11,SPRN_SPRG_THREAD; \
263
lwz r11,THREAD_INFO-THREAD(r11); \
264
addi r11,r11,THREAD_SIZE; \
265
tophys(r11,r11); \
266
1: subi r11,r11,INT_FRAME_SIZE /* alloc exc. frame */
267
268
269
#define EXCEPTION_PROLOG_2 \
270
CLR_TOP32(r11); \
271
stw r10,_CCR(r11); /* save registers */ \
272
stw r12,GPR12(r11); \
273
stw r9,GPR9(r11); \
274
mfspr r10,SPRN_SPRG_SCRATCH0; \
275
stw r10,GPR10(r11); \
276
mfspr r12,SPRN_SPRG_SCRATCH1; \
277
stw r12,GPR11(r11); \
278
mflr r10; \
279
stw r10,_LINK(r11); \
280
mfspr r12,SPRN_SRR0; \
281
mfspr r9,SPRN_SRR1; \
282
stw r1,GPR1(r11); \
283
stw r1,0(r11); \
284
tovirt(r1,r11); /* set new kernel sp */ \
285
li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
286
MTMSRD(r10); /* (except for mach check in rtas) */ \
287
stw r0,GPR0(r11); \
288
lis r10,STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */ \
289
addi r10,r10,STACK_FRAME_REGS_MARKER@l; \
290
stw r10,8(r11); \
291
SAVE_4GPRS(3, r11); \
292
SAVE_2GPRS(7, r11)
293
294
/*
295
* Note: code which follows this uses cr0.eq (set if from kernel),
296
* r11, r12 (SRR0), and r9 (SRR1).
297
*
298
* Note2: once we have set r1 we are in a position to take exceptions
299
* again, and we could thus set MSR:RI at that point.
300
*/
301
302
/*
303
* Exception vectors.
304
*/
305
#define EXCEPTION(n, label, hdlr, xfer) \
306
. = n; \
307
DO_KVM n; \
308
label: \
309
EXCEPTION_PROLOG; \
310
addi r3,r1,STACK_FRAME_OVERHEAD; \
311
xfer(n, hdlr)
312
313
#define EXC_XFER_TEMPLATE(n, hdlr, trap, copyee, tfer, ret) \
314
li r10,trap; \
315
stw r10,_TRAP(r11); \
316
li r10,MSR_KERNEL; \
317
copyee(r10, r9); \
318
bl tfer; \
319
i##n: \
320
.long hdlr; \
321
.long ret
322
323
#define COPY_EE(d, s) rlwimi d,s,0,16,16
324
#define NOCOPY(d, s)
325
326
#define EXC_XFER_STD(n, hdlr) \
327
EXC_XFER_TEMPLATE(n, hdlr, n, NOCOPY, transfer_to_handler_full, \
328
ret_from_except_full)
329
330
#define EXC_XFER_LITE(n, hdlr) \
331
EXC_XFER_TEMPLATE(n, hdlr, n+1, NOCOPY, transfer_to_handler, \
332
ret_from_except)
333
334
#define EXC_XFER_EE(n, hdlr) \
335
EXC_XFER_TEMPLATE(n, hdlr, n, COPY_EE, transfer_to_handler_full, \
336
ret_from_except_full)
337
338
#define EXC_XFER_EE_LITE(n, hdlr) \
339
EXC_XFER_TEMPLATE(n, hdlr, n+1, COPY_EE, transfer_to_handler, \
340
ret_from_except)
341
342
/* System reset */
343
/* core99 pmac starts the seconary here by changing the vector, and
344
putting it back to what it was (unknown_exception) when done. */
345
EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
346
347
/* Machine check */
348
/*
349
* On CHRP, this is complicated by the fact that we could get a
350
* machine check inside RTAS, and we have no guarantee that certain
351
* critical registers will have the values we expect. The set of
352
* registers that might have bad values includes all the GPRs
353
* and all the BATs. We indicate that we are in RTAS by putting
354
* a non-zero value, the address of the exception frame to use,
355
* in SPRG2. The machine check handler checks SPRG2 and uses its
356
* value if it is non-zero. If we ever needed to free up SPRG2,
357
* we could use a field in the thread_info or thread_struct instead.
358
* (Other exception handlers assume that r1 is a valid kernel stack
359
* pointer when we take an exception from supervisor mode.)
360
* -- paulus.
361
*/
362
. = 0x200
363
DO_KVM 0x200
364
mtspr SPRN_SPRG_SCRATCH0,r10
365
mtspr SPRN_SPRG_SCRATCH1,r11
366
mfcr r10
367
#ifdef CONFIG_PPC_CHRP
368
mfspr r11,SPRN_SPRG_RTAS
369
cmpwi 0,r11,0
370
bne 7f
371
#endif /* CONFIG_PPC_CHRP */
372
EXCEPTION_PROLOG_1
373
7: EXCEPTION_PROLOG_2
374
addi r3,r1,STACK_FRAME_OVERHEAD
375
#ifdef CONFIG_PPC_CHRP
376
mfspr r4,SPRN_SPRG_RTAS
377
cmpwi cr1,r4,0
378
bne cr1,1f
379
#endif
380
EXC_XFER_STD(0x200, machine_check_exception)
381
#ifdef CONFIG_PPC_CHRP
382
1: b machine_check_in_rtas
383
#endif
384
385
/* Data access exception. */
386
. = 0x300
387
DO_KVM 0x300
388
DataAccess:
389
EXCEPTION_PROLOG
390
mfspr r10,SPRN_DSISR
391
stw r10,_DSISR(r11)
392
andis. r0,r10,0xa470 /* weird error? */
393
bne 1f /* if not, try to put a PTE */
394
mfspr r4,SPRN_DAR /* into the hash table */
395
rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
396
bl hash_page
397
1: lwz r5,_DSISR(r11) /* get DSISR value */
398
mfspr r4,SPRN_DAR
399
EXC_XFER_EE_LITE(0x300, handle_page_fault)
400
401
402
/* Instruction access exception. */
403
. = 0x400
404
DO_KVM 0x400
405
InstructionAccess:
406
EXCEPTION_PROLOG
407
andis. r0,r9,0x4000 /* no pte found? */
408
beq 1f /* if so, try to put a PTE */
409
li r3,0 /* into the hash table */
410
mr r4,r12 /* SRR0 is fault address */
411
bl hash_page
412
1: mr r4,r12
413
mr r5,r9
414
EXC_XFER_EE_LITE(0x400, handle_page_fault)
415
416
/* External interrupt */
417
EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
418
419
/* Alignment exception */
420
. = 0x600
421
DO_KVM 0x600
422
Alignment:
423
EXCEPTION_PROLOG
424
mfspr r4,SPRN_DAR
425
stw r4,_DAR(r11)
426
mfspr r5,SPRN_DSISR
427
stw r5,_DSISR(r11)
428
addi r3,r1,STACK_FRAME_OVERHEAD
429
EXC_XFER_EE(0x600, alignment_exception)
430
431
/* Program check exception */
432
EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
433
434
/* Floating-point unavailable */
435
. = 0x800
436
DO_KVM 0x800
437
FPUnavailable:
438
BEGIN_FTR_SECTION
439
/*
440
* Certain Freescale cores don't have a FPU and treat fp instructions
441
* as a FP Unavailable exception. Redirect to illegal/emulation handling.
442
*/
443
b ProgramCheck
444
END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
445
EXCEPTION_PROLOG
446
beq 1f
447
bl load_up_fpu /* if from user, just load it up */
448
b fast_exception_return
449
1: addi r3,r1,STACK_FRAME_OVERHEAD
450
EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
451
452
/* Decrementer */
453
EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
454
455
EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_EE)
456
EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_EE)
457
458
/* System call */
459
. = 0xc00
460
DO_KVM 0xc00
461
SystemCall:
462
EXCEPTION_PROLOG
463
EXC_XFER_EE_LITE(0xc00, DoSyscall)
464
465
/* Single step - not used on 601 */
466
EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
467
EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_EE)
468
469
/*
470
* The Altivec unavailable trap is at 0x0f20. Foo.
471
* We effectively remap it to 0x3000.
472
* We include an altivec unavailable exception vector even if
473
* not configured for Altivec, so that you can't panic a
474
* non-altivec kernel running on a machine with altivec just
475
* by executing an altivec instruction.
476
*/
477
. = 0xf00
478
DO_KVM 0xf00
479
b PerformanceMonitor
480
481
. = 0xf20
482
DO_KVM 0xf20
483
b AltiVecUnavailable
484
485
/*
486
* Handle TLB miss for instruction on 603/603e.
487
* Note: we get an alternate set of r0 - r3 to use automatically.
488
*/
489
. = 0x1000
490
InstructionTLBMiss:
491
/*
492
* r0: scratch
493
* r1: linux style pte ( later becomes ppc hardware pte )
494
* r2: ptr to linux-style pte
495
* r3: scratch
496
*/
497
/* Get PTE (linux-style) and check access */
498
mfspr r3,SPRN_IMISS
499
lis r1,PAGE_OFFSET@h /* check if kernel address */
500
cmplw 0,r1,r3
501
mfspr r2,SPRN_SPRG_THREAD
502
li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
503
lwz r2,PGDIR(r2)
504
bge- 112f
505
mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
506
rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
507
lis r2,swapper_pg_dir@ha /* if kernel address, use */
508
addi r2,r2,swapper_pg_dir@l /* kernel page table */
509
112: tophys(r2,r2)
510
rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
511
lwz r2,0(r2) /* get pmd entry */
512
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
513
beq- InstructionAddressInvalid /* return if no mapping */
514
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
515
lwz r0,0(r2) /* get linux-style pte */
516
andc. r1,r1,r0 /* check access & ~permission */
517
bne- InstructionAddressInvalid /* return if access not permitted */
518
ori r0,r0,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
519
/*
520
* NOTE! We are assuming this is not an SMP system, otherwise
521
* we would need to update the pte atomically with lwarx/stwcx.
522
*/
523
stw r0,0(r2) /* update PTE (accessed bit) */
524
/* Convert linux-style PTE to low word of PPC-style PTE */
525
rlwinm r1,r0,32-10,31,31 /* _PAGE_RW -> PP lsb */
526
rlwinm r2,r0,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
527
and r1,r1,r2 /* writable if _RW and _DIRTY */
528
rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
529
rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
530
ori r1,r1,0xe04 /* clear out reserved bits */
531
andc r1,r0,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
532
BEGIN_FTR_SECTION
533
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
534
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
535
mtspr SPRN_RPA,r1
536
tlbli r3
537
mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
538
mtcrf 0x80,r3
539
rfi
540
InstructionAddressInvalid:
541
mfspr r3,SPRN_SRR1
542
rlwinm r1,r3,9,6,6 /* Get load/store bit */
543
544
addis r1,r1,0x2000
545
mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
546
andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
547
or r2,r2,r1
548
mtspr SPRN_SRR1,r2
549
mfspr r1,SPRN_IMISS /* Get failing address */
550
rlwinm. r2,r2,0,31,31 /* Check for little endian access */
551
rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
552
xor r1,r1,r2
553
mtspr SPRN_DAR,r1 /* Set fault address */
554
mfmsr r0 /* Restore "normal" registers */
555
xoris r0,r0,MSR_TGPR>>16
556
mtcrf 0x80,r3 /* Restore CR0 */
557
mtmsr r0
558
b InstructionAccess
559
560
/*
561
* Handle TLB miss for DATA Load operation on 603/603e
562
*/
563
. = 0x1100
564
DataLoadTLBMiss:
565
/*
566
* r0: scratch
567
* r1: linux style pte ( later becomes ppc hardware pte )
568
* r2: ptr to linux-style pte
569
* r3: scratch
570
*/
571
/* Get PTE (linux-style) and check access */
572
mfspr r3,SPRN_DMISS
573
lis r1,PAGE_OFFSET@h /* check if kernel address */
574
cmplw 0,r1,r3
575
mfspr r2,SPRN_SPRG_THREAD
576
li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
577
lwz r2,PGDIR(r2)
578
bge- 112f
579
mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
580
rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
581
lis r2,swapper_pg_dir@ha /* if kernel address, use */
582
addi r2,r2,swapper_pg_dir@l /* kernel page table */
583
112: tophys(r2,r2)
584
rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
585
lwz r2,0(r2) /* get pmd entry */
586
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
587
beq- DataAddressInvalid /* return if no mapping */
588
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
589
lwz r0,0(r2) /* get linux-style pte */
590
andc. r1,r1,r0 /* check access & ~permission */
591
bne- DataAddressInvalid /* return if access not permitted */
592
ori r0,r0,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
593
/*
594
* NOTE! We are assuming this is not an SMP system, otherwise
595
* we would need to update the pte atomically with lwarx/stwcx.
596
*/
597
stw r0,0(r2) /* update PTE (accessed bit) */
598
/* Convert linux-style PTE to low word of PPC-style PTE */
599
rlwinm r1,r0,32-10,31,31 /* _PAGE_RW -> PP lsb */
600
rlwinm r2,r0,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
601
and r1,r1,r2 /* writable if _RW and _DIRTY */
602
rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
603
rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
604
ori r1,r1,0xe04 /* clear out reserved bits */
605
andc r1,r0,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
606
BEGIN_FTR_SECTION
607
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
608
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
609
mtspr SPRN_RPA,r1
610
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
611
mtcrf 0x80,r2
612
BEGIN_MMU_FTR_SECTION
613
li r0,1
614
mfspr r1,SPRN_SPRG_603_LRU
615
rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
616
slw r0,r0,r2
617
xor r1,r0,r1
618
srw r0,r1,r2
619
mtspr SPRN_SPRG_603_LRU,r1
620
mfspr r2,SPRN_SRR1
621
rlwimi r2,r0,31-14,14,14
622
mtspr SPRN_SRR1,r2
623
END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
624
tlbld r3
625
rfi
626
DataAddressInvalid:
627
mfspr r3,SPRN_SRR1
628
rlwinm r1,r3,9,6,6 /* Get load/store bit */
629
addis r1,r1,0x2000
630
mtspr SPRN_DSISR,r1
631
andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
632
mtspr SPRN_SRR1,r2
633
mfspr r1,SPRN_DMISS /* Get failing address */
634
rlwinm. r2,r2,0,31,31 /* Check for little endian access */
635
beq 20f /* Jump if big endian */
636
xori r1,r1,3
637
20: mtspr SPRN_DAR,r1 /* Set fault address */
638
mfmsr r0 /* Restore "normal" registers */
639
xoris r0,r0,MSR_TGPR>>16
640
mtcrf 0x80,r3 /* Restore CR0 */
641
mtmsr r0
642
b DataAccess
643
644
/*
645
* Handle TLB miss for DATA Store on 603/603e
646
*/
647
. = 0x1200
648
DataStoreTLBMiss:
649
/*
650
* r0: scratch
651
* r1: linux style pte ( later becomes ppc hardware pte )
652
* r2: ptr to linux-style pte
653
* r3: scratch
654
*/
655
/* Get PTE (linux-style) and check access */
656
mfspr r3,SPRN_DMISS
657
lis r1,PAGE_OFFSET@h /* check if kernel address */
658
cmplw 0,r1,r3
659
mfspr r2,SPRN_SPRG_THREAD
660
li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
661
lwz r2,PGDIR(r2)
662
bge- 112f
663
mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
664
rlwimi r1,r2,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
665
lis r2,swapper_pg_dir@ha /* if kernel address, use */
666
addi r2,r2,swapper_pg_dir@l /* kernel page table */
667
112: tophys(r2,r2)
668
rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
669
lwz r2,0(r2) /* get pmd entry */
670
rlwinm. r2,r2,0,0,19 /* extract address of pte page */
671
beq- DataAddressInvalid /* return if no mapping */
672
rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
673
lwz r0,0(r2) /* get linux-style pte */
674
andc. r1,r1,r0 /* check access & ~permission */
675
bne- DataAddressInvalid /* return if access not permitted */
676
ori r0,r0,_PAGE_ACCESSED|_PAGE_DIRTY
677
/*
678
* NOTE! We are assuming this is not an SMP system, otherwise
679
* we would need to update the pte atomically with lwarx/stwcx.
680
*/
681
stw r0,0(r2) /* update PTE (accessed/dirty bits) */
682
/* Convert linux-style PTE to low word of PPC-style PTE */
683
rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
684
li r1,0xe05 /* clear out reserved bits & PP lsb */
685
andc r1,r0,r1 /* PP = user? 2: 0 */
686
BEGIN_FTR_SECTION
687
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
688
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
689
mtspr SPRN_RPA,r1
690
mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
691
mtcrf 0x80,r2
692
BEGIN_MMU_FTR_SECTION
693
li r0,1
694
mfspr r1,SPRN_SPRG_603_LRU
695
rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
696
slw r0,r0,r2
697
xor r1,r0,r1
698
srw r0,r1,r2
699
mtspr SPRN_SPRG_603_LRU,r1
700
mfspr r2,SPRN_SRR1
701
rlwimi r2,r0,31-14,14,14
702
mtspr SPRN_SRR1,r2
703
END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
704
tlbld r3
705
rfi
706
707
#ifndef CONFIG_ALTIVEC
708
#define altivec_assist_exception unknown_exception
709
#endif
710
711
EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_EE)
712
EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_EE)
713
EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
714
EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_EE)
715
EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
716
EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
717
EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
718
EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_EE)
719
EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_EE)
720
EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_EE)
721
EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
722
EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_EE)
723
EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_EE)
724
EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_EE)
725
EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_EE)
726
EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_EE)
727
EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_EE)
728
EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_EE)
729
EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_EE)
730
EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_EE)
731
EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_EE)
732
EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_EE)
733
EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_EE)
734
EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_EE)
735
EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_EE)
736
EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_EE)
737
EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_EE)
738
EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_EE)
739
EXCEPTION(0x2f00, MOLTrampoline, unknown_exception, EXC_XFER_EE_LITE)
740
741
.globl mol_trampoline
742
.set mol_trampoline, i0x2f00
743
744
. = 0x3000
745
746
AltiVecUnavailable:
747
EXCEPTION_PROLOG
748
#ifdef CONFIG_ALTIVEC
749
beq 1f
750
bl load_up_altivec /* if from user, just load it up */
751
b fast_exception_return
752
#endif /* CONFIG_ALTIVEC */
753
1: addi r3,r1,STACK_FRAME_OVERHEAD
754
EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
755
756
PerformanceMonitor:
757
EXCEPTION_PROLOG
758
addi r3,r1,STACK_FRAME_OVERHEAD
759
EXC_XFER_STD(0xf00, performance_monitor_exception)
760
761
762
/*
763
* This code is jumped to from the startup code to copy
764
* the kernel image to physical address PHYSICAL_START.
765
*/
766
relocate_kernel:
767
addis r9,r26,klimit@ha /* fetch klimit */
768
lwz r25,klimit@l(r9)
769
addis r25,r25,-KERNELBASE@h
770
lis r3,PHYSICAL_START@h /* Destination base address */
771
li r6,0 /* Destination offset */
772
li r5,0x4000 /* # bytes of memory to copy */
773
bl copy_and_flush /* copy the first 0x4000 bytes */
774
addi r0,r3,4f@l /* jump to the address of 4f */
775
mtctr r0 /* in copy and do the rest. */
776
bctr /* jump to the copy */
777
4: mr r5,r25
778
bl copy_and_flush /* copy the rest */
779
b turn_on_mmu
780
781
/*
782
* Copy routine used to copy the kernel to start at physical address 0
783
* and flush and invalidate the caches as needed.
784
* r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
785
* on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
786
*/
787
_ENTRY(copy_and_flush)
788
addi r5,r5,-4
789
addi r6,r6,-4
790
4: li r0,L1_CACHE_BYTES/4
791
mtctr r0
792
3: addi r6,r6,4 /* copy a cache line */
793
lwzx r0,r6,r4
794
stwx r0,r6,r3
795
bdnz 3b
796
dcbst r6,r3 /* write it to memory */
797
sync
798
icbi r6,r3 /* flush the icache line */
799
cmplw 0,r6,r5
800
blt 4b
801
sync /* additional sync needed on g4 */
802
isync
803
addi r5,r5,4
804
addi r6,r6,4
805
blr
806
807
#ifdef CONFIG_SMP
808
.globl __secondary_start_mpc86xx
809
__secondary_start_mpc86xx:
810
mfspr r3, SPRN_PIR
811
stw r3, __secondary_hold_acknowledge@l(0)
812
mr r24, r3 /* cpu # */
813
b __secondary_start
814
815
.globl __secondary_start_pmac_0
816
__secondary_start_pmac_0:
817
/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
818
li r24,0
819
b 1f
820
li r24,1
821
b 1f
822
li r24,2
823
b 1f
824
li r24,3
825
1:
826
/* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
827
set to map the 0xf0000000 - 0xffffffff region */
828
mfmsr r0
829
rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
830
SYNC
831
mtmsr r0
832
isync
833
834
.globl __secondary_start
835
__secondary_start:
836
/* Copy some CPU settings from CPU 0 */
837
bl __restore_cpu_setup
838
839
lis r3,-KERNELBASE@h
840
mr r4,r24
841
bl call_setup_cpu /* Call setup_cpu for this CPU */
842
#ifdef CONFIG_6xx
843
lis r3,-KERNELBASE@h
844
bl init_idle_6xx
845
#endif /* CONFIG_6xx */
846
847
/* get current_thread_info and current */
848
lis r1,secondary_ti@ha
849
tophys(r1,r1)
850
lwz r1,secondary_ti@l(r1)
851
tophys(r2,r1)
852
lwz r2,TI_TASK(r2)
853
854
/* stack */
855
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
856
li r0,0
857
tophys(r3,r1)
858
stw r0,0(r3)
859
860
/* load up the MMU */
861
bl load_up_mmu
862
863
/* ptr to phys current thread */
864
tophys(r4,r2)
865
addi r4,r4,THREAD /* phys address of our thread_struct */
866
CLR_TOP32(r4)
867
mtspr SPRN_SPRG_THREAD,r4
868
li r3,0
869
mtspr SPRN_SPRG_RTAS,r3 /* 0 => not in RTAS */
870
871
/* enable MMU and jump to start_secondary */
872
li r4,MSR_KERNEL
873
FIX_SRR1(r4,r5)
874
lis r3,start_secondary@h
875
ori r3,r3,start_secondary@l
876
mtspr SPRN_SRR0,r3
877
mtspr SPRN_SRR1,r4
878
SYNC
879
RFI
880
#endif /* CONFIG_SMP */
881
882
#ifdef CONFIG_KVM_BOOK3S_HANDLER
883
#include "../kvm/book3s_rmhandlers.S"
884
#endif
885
886
/*
887
* Those generic dummy functions are kept for CPUs not
888
* included in CONFIG_6xx
889
*/
890
#if !defined(CONFIG_6xx)
891
_ENTRY(__save_cpu_setup)
892
blr
893
_ENTRY(__restore_cpu_setup)
894
blr
895
#endif /* !defined(CONFIG_6xx) */
896
897
898
/*
899
* Load stuff into the MMU. Intended to be called with
900
* IR=0 and DR=0.
901
*/
902
load_up_mmu:
903
sync /* Force all PTE updates to finish */
904
isync
905
tlbia /* Clear all TLB entries */
906
sync /* wait for tlbia/tlbie to finish */
907
TLBSYNC /* ... on all CPUs */
908
/* Load the SDR1 register (hash table base & size) */
909
lis r6,_SDR1@ha
910
tophys(r6,r6)
911
lwz r6,_SDR1@l(r6)
912
mtspr SPRN_SDR1,r6
913
li r0,16 /* load up segment register values */
914
mtctr r0 /* for context 0 */
915
lis r3,0x2000 /* Ku = 1, VSID = 0 */
916
li r4,0
917
3: mtsrin r3,r4
918
addi r3,r3,0x111 /* increment VSID */
919
addis r4,r4,0x1000 /* address of next segment */
920
bdnz 3b
921
922
/* Load the BAT registers with the values set up by MMU_init.
923
MMU_init takes care of whether we're on a 601 or not. */
924
mfpvr r3
925
srwi r3,r3,16
926
cmpwi r3,1
927
lis r3,BATS@ha
928
addi r3,r3,BATS@l
929
tophys(r3,r3)
930
LOAD_BAT(0,r3,r4,r5)
931
LOAD_BAT(1,r3,r4,r5)
932
LOAD_BAT(2,r3,r4,r5)
933
LOAD_BAT(3,r3,r4,r5)
934
BEGIN_MMU_FTR_SECTION
935
LOAD_BAT(4,r3,r4,r5)
936
LOAD_BAT(5,r3,r4,r5)
937
LOAD_BAT(6,r3,r4,r5)
938
LOAD_BAT(7,r3,r4,r5)
939
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
940
blr
941
942
/*
943
* This is where the main kernel code starts.
944
*/
945
start_here:
946
/* ptr to current */
947
lis r2,init_task@h
948
ori r2,r2,init_task@l
949
/* Set up for using our exception vectors */
950
/* ptr to phys current thread */
951
tophys(r4,r2)
952
addi r4,r4,THREAD /* init task's THREAD */
953
CLR_TOP32(r4)
954
mtspr SPRN_SPRG_THREAD,r4
955
li r3,0
956
mtspr SPRN_SPRG_RTAS,r3 /* 0 => not in RTAS */
957
958
/* stack */
959
lis r1,init_thread_union@ha
960
addi r1,r1,init_thread_union@l
961
li r0,0
962
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
963
/*
964
* Do early platform-specific initialization,
965
* and set up the MMU.
966
*/
967
mr r3,r31
968
mr r4,r30
969
bl machine_init
970
bl __save_cpu_setup
971
bl MMU_init
972
973
/*
974
* Go back to running unmapped so we can load up new values
975
* for SDR1 (hash table pointer) and the segment registers
976
* and change to using our exception vectors.
977
*/
978
lis r4,2f@h
979
ori r4,r4,2f@l
980
tophys(r4,r4)
981
li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
982
FIX_SRR1(r3,r5)
983
mtspr SPRN_SRR0,r4
984
mtspr SPRN_SRR1,r3
985
SYNC
986
RFI
987
/* Load up the kernel context */
988
2: bl load_up_mmu
989
990
#ifdef CONFIG_BDI_SWITCH
991
/* Add helper information for the Abatron bdiGDB debugger.
992
* We do this here because we know the mmu is disabled, and
993
* will be enabled for real in just a few instructions.
994
*/
995
lis r5, abatron_pteptrs@h
996
ori r5, r5, abatron_pteptrs@l
997
stw r5, 0xf0(r0) /* This much match your Abatron config */
998
lis r6, swapper_pg_dir@h
999
ori r6, r6, swapper_pg_dir@l
1000
tophys(r5, r5)
1001
stw r6, 0(r5)
1002
#endif /* CONFIG_BDI_SWITCH */
1003
1004
/* Now turn on the MMU for real! */
1005
li r4,MSR_KERNEL
1006
FIX_SRR1(r4,r5)
1007
lis r3,start_kernel@h
1008
ori r3,r3,start_kernel@l
1009
mtspr SPRN_SRR0,r3
1010
mtspr SPRN_SRR1,r4
1011
SYNC
1012
RFI
1013
1014
/*
1015
* void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1016
*
1017
* Set up the segment registers for a new context.
1018
*/
1019
_ENTRY(switch_mmu_context)
1020
lwz r3,MMCONTEXTID(r4)
1021
cmpwi cr0,r3,0
1022
blt- 4f
1023
mulli r3,r3,897 /* multiply context by skew factor */
1024
rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1025
addis r3,r3,0x6000 /* Set Ks, Ku bits */
1026
li r0,NUM_USER_SEGMENTS
1027
mtctr r0
1028
1029
#ifdef CONFIG_BDI_SWITCH
1030
/* Context switch the PTE pointer for the Abatron BDI2000.
1031
* The PGDIR is passed as second argument.
1032
*/
1033
lwz r4,MM_PGD(r4)
1034
lis r5, KERNELBASE@h
1035
lwz r5, 0xf0(r5)
1036
stw r4, 0x4(r5)
1037
#endif
1038
li r4,0
1039
isync
1040
3:
1041
mtsrin r3,r4
1042
addi r3,r3,0x111 /* next VSID */
1043
rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1044
addis r4,r4,0x1000 /* address of next segment */
1045
bdnz 3b
1046
sync
1047
isync
1048
blr
1049
4: trap
1050
EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1051
blr
1052
1053
/*
1054
* An undocumented "feature" of 604e requires that the v bit
1055
* be cleared before changing BAT values.
1056
*
1057
* Also, newer IBM firmware does not clear bat3 and 4 so
1058
* this makes sure it's done.
1059
* -- Cort
1060
*/
1061
clear_bats:
1062
li r10,0
1063
mfspr r9,SPRN_PVR
1064
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1065
cmpwi r9, 1
1066
beq 1f
1067
1068
mtspr SPRN_DBAT0U,r10
1069
mtspr SPRN_DBAT0L,r10
1070
mtspr SPRN_DBAT1U,r10
1071
mtspr SPRN_DBAT1L,r10
1072
mtspr SPRN_DBAT2U,r10
1073
mtspr SPRN_DBAT2L,r10
1074
mtspr SPRN_DBAT3U,r10
1075
mtspr SPRN_DBAT3L,r10
1076
1:
1077
mtspr SPRN_IBAT0U,r10
1078
mtspr SPRN_IBAT0L,r10
1079
mtspr SPRN_IBAT1U,r10
1080
mtspr SPRN_IBAT1L,r10
1081
mtspr SPRN_IBAT2U,r10
1082
mtspr SPRN_IBAT2L,r10
1083
mtspr SPRN_IBAT3U,r10
1084
mtspr SPRN_IBAT3L,r10
1085
BEGIN_MMU_FTR_SECTION
1086
/* Here's a tweak: at this point, CPU setup have
1087
* not been called yet, so HIGH_BAT_EN may not be
1088
* set in HID0 for the 745x processors. However, it
1089
* seems that doesn't affect our ability to actually
1090
* write to these SPRs.
1091
*/
1092
mtspr SPRN_DBAT4U,r10
1093
mtspr SPRN_DBAT4L,r10
1094
mtspr SPRN_DBAT5U,r10
1095
mtspr SPRN_DBAT5L,r10
1096
mtspr SPRN_DBAT6U,r10
1097
mtspr SPRN_DBAT6L,r10
1098
mtspr SPRN_DBAT7U,r10
1099
mtspr SPRN_DBAT7L,r10
1100
mtspr SPRN_IBAT4U,r10
1101
mtspr SPRN_IBAT4L,r10
1102
mtspr SPRN_IBAT5U,r10
1103
mtspr SPRN_IBAT5L,r10
1104
mtspr SPRN_IBAT6U,r10
1105
mtspr SPRN_IBAT6L,r10
1106
mtspr SPRN_IBAT7U,r10
1107
mtspr SPRN_IBAT7L,r10
1108
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1109
blr
1110
1111
flush_tlbs:
1112
lis r10, 0x40
1113
1: addic. r10, r10, -0x1000
1114
tlbie r10
1115
bgt 1b
1116
sync
1117
blr
1118
1119
mmu_off:
1120
addi r4, r3, __after_mmu_off - _start
1121
mfmsr r3
1122
andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1123
beqlr
1124
andc r3,r3,r0
1125
mtspr SPRN_SRR0,r4
1126
mtspr SPRN_SRR1,r3
1127
sync
1128
RFI
1129
1130
/*
1131
* On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
1132
* (we keep one for debugging) and on others, we use one 256M BAT.
1133
*/
1134
initial_bats:
1135
lis r11,PAGE_OFFSET@h
1136
mfspr r9,SPRN_PVR
1137
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1138
cmpwi 0,r9,1
1139
bne 4f
1140
ori r11,r11,4 /* set up BAT registers for 601 */
1141
li r8,0x7f /* valid, block length = 8MB */
1142
mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1143
mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1144
addis r11,r11,0x800000@h
1145
addis r8,r8,0x800000@h
1146
mtspr SPRN_IBAT1U,r11
1147
mtspr SPRN_IBAT1L,r8
1148
addis r11,r11,0x800000@h
1149
addis r8,r8,0x800000@h
1150
mtspr SPRN_IBAT2U,r11
1151
mtspr SPRN_IBAT2L,r8
1152
isync
1153
blr
1154
1155
4: tophys(r8,r11)
1156
#ifdef CONFIG_SMP
1157
ori r8,r8,0x12 /* R/W access, M=1 */
1158
#else
1159
ori r8,r8,2 /* R/W access */
1160
#endif /* CONFIG_SMP */
1161
ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1162
1163
mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1164
mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1165
mtspr SPRN_IBAT0L,r8
1166
mtspr SPRN_IBAT0U,r11
1167
isync
1168
blr
1169
1170
1171
#ifdef CONFIG_BOOTX_TEXT
1172
setup_disp_bat:
1173
/*
1174
* setup the display bat prepared for us in prom.c
1175
*/
1176
mflr r8
1177
bl reloc_offset
1178
mtlr r8
1179
addis r8,r3,disp_BAT@ha
1180
addi r8,r8,disp_BAT@l
1181
cmpwi cr0,r8,0
1182
beqlr
1183
lwz r11,0(r8)
1184
lwz r8,4(r8)
1185
mfspr r9,SPRN_PVR
1186
rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1187
cmpwi 0,r9,1
1188
beq 1f
1189
mtspr SPRN_DBAT3L,r8
1190
mtspr SPRN_DBAT3U,r11
1191
blr
1192
1: mtspr SPRN_IBAT3L,r8
1193
mtspr SPRN_IBAT3U,r11
1194
blr
1195
#endif /* CONFIG_BOOTX_TEXT */
1196
1197
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1198
setup_cpm_bat:
1199
lis r8, 0xf000
1200
ori r8, r8, 0x002a
1201
mtspr SPRN_DBAT1L, r8
1202
1203
lis r11, 0xf000
1204
ori r11, r11, (BL_1M << 2) | 2
1205
mtspr SPRN_DBAT1U, r11
1206
1207
blr
1208
#endif
1209
1210
#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1211
setup_usbgecko_bat:
1212
/* prepare a BAT for early io */
1213
#if defined(CONFIG_GAMECUBE)
1214
lis r8, 0x0c00
1215
#elif defined(CONFIG_WII)
1216
lis r8, 0x0d00
1217
#else
1218
#error Invalid platform for USB Gecko based early debugging.
1219
#endif
1220
/*
1221
* The virtual address used must match the virtual address
1222
* associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1223
*/
1224
lis r11, 0xfffe /* top 128K */
1225
ori r8, r8, 0x002a /* uncached, guarded ,rw */
1226
ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1227
mtspr SPRN_DBAT1L, r8
1228
mtspr SPRN_DBAT1U, r11
1229
blr
1230
#endif
1231
1232
#ifdef CONFIG_8260
1233
/* Jump into the system reset for the rom.
1234
* We first disable the MMU, and then jump to the ROM reset address.
1235
*
1236
* r3 is the board info structure, r4 is the location for starting.
1237
* I use this for building a small kernel that can load other kernels,
1238
* rather than trying to write or rely on a rom monitor that can tftp load.
1239
*/
1240
.globl m8260_gorom
1241
m8260_gorom:
1242
mfmsr r0
1243
rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1244
sync
1245
mtmsr r0
1246
sync
1247
mfspr r11, SPRN_HID0
1248
lis r10, 0
1249
ori r10,r10,HID0_ICE|HID0_DCE
1250
andc r11, r11, r10
1251
mtspr SPRN_HID0, r11
1252
isync
1253
li r5, MSR_ME|MSR_RI
1254
lis r6,2f@h
1255
addis r6,r6,-KERNELBASE@h
1256
ori r6,r6,2f@l
1257
mtspr SPRN_SRR0,r6
1258
mtspr SPRN_SRR1,r5
1259
isync
1260
sync
1261
rfi
1262
2:
1263
mtlr r4
1264
blr
1265
#endif
1266
1267
1268
/*
1269
* We put a few things here that have to be page-aligned.
1270
* This stuff goes at the beginning of the data segment,
1271
* which is page-aligned.
1272
*/
1273
.data
1274
.globl sdata
1275
sdata:
1276
.globl empty_zero_page
1277
empty_zero_page:
1278
.space 4096
1279
1280
.globl swapper_pg_dir
1281
swapper_pg_dir:
1282
.space PGD_TABLE_SIZE
1283
1284
.globl intercept_table
1285
intercept_table:
1286
.long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
1287
.long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
1288
.long 0, 0, 0, i0x1300, 0, 0, 0, 0
1289
.long 0, 0, 0, 0, 0, 0, 0, 0
1290
.long 0, 0, 0, 0, 0, 0, 0, 0
1291
.long 0, 0, 0, 0, 0, 0, 0, 0
1292
1293
/* Room for two PTE pointers, usually the kernel and current user pointers
1294
* to their respective root page table.
1295
*/
1296
abatron_pteptrs:
1297
.space 8
1298
1299