Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/powerpc/kernel/head_44x.S
10818 views
1
/*
2
* Kernel execution entry point code.
3
*
4
* Copyright (c) 1995-1996 Gary Thomas <[email protected]>
5
* Initial PowerPC version.
6
* Copyright (c) 1996 Cort Dougan <[email protected]>
7
* Rewritten for PReP
8
* Copyright (c) 1996 Paul Mackerras <[email protected]>
9
* Low-level exception handers, MMU support, and rewrite.
10
* Copyright (c) 1997 Dan Malek <[email protected]>
11
* PowerPC 8xx modifications.
12
* Copyright (c) 1998-1999 TiVo, Inc.
13
* PowerPC 403GCX modifications.
14
* Copyright (c) 1999 Grant Erickson <[email protected]>
15
* PowerPC 403GCX/405GP modifications.
16
* Copyright 2000 MontaVista Software Inc.
17
* PPC405 modifications
18
* PowerPC 403GCX/405GP modifications.
19
* Author: MontaVista Software, Inc.
20
* [email protected] or [email protected]
21
* [email protected]
22
* Copyright 2002-2005 MontaVista Software, Inc.
23
* PowerPC 44x support, Matt Porter <[email protected]>
24
*
25
* This program is free software; you can redistribute it and/or modify it
26
* under the terms of the GNU General Public License as published by the
27
* Free Software Foundation; either version 2 of the License, or (at your
28
* option) any later version.
29
*/
30
31
#include <linux/init.h>
32
#include <asm/processor.h>
33
#include <asm/page.h>
34
#include <asm/mmu.h>
35
#include <asm/pgtable.h>
36
#include <asm/cputable.h>
37
#include <asm/thread_info.h>
38
#include <asm/ppc_asm.h>
39
#include <asm/asm-offsets.h>
40
#include <asm/ptrace.h>
41
#include <asm/synch.h>
42
#include "head_booke.h"
43
44
45
/* As with the other PowerPC ports, it is expected that when code
46
* execution begins here, the following registers contain valid, yet
47
* optional, information:
48
*
49
* r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
50
* r4 - Starting address of the init RAM disk
51
* r5 - Ending address of the init RAM disk
52
* r6 - Start of kernel command line string (e.g. "mem=128")
53
* r7 - End of kernel command line string
54
*
55
*/
56
__HEAD
57
_ENTRY(_stext);
58
_ENTRY(_start);
59
/*
60
* Reserve a word at a fixed location to store the address
61
* of abatron_pteptrs
62
*/
63
nop
64
/*
65
* Save parameters we are passed
66
*/
67
mr r31,r3
68
mr r30,r4
69
mr r29,r5
70
mr r28,r6
71
mr r27,r7
72
li r24,0 /* CPU number */
73
74
bl init_cpu_state
75
76
/*
77
* This is where the main kernel code starts.
78
*/
79
80
/* ptr to current */
81
lis r2,init_task@h
82
ori r2,r2,init_task@l
83
84
/* ptr to current thread */
85
addi r4,r2,THREAD /* init task's THREAD */
86
mtspr SPRN_SPRG_THREAD,r4
87
88
/* stack */
89
lis r1,init_thread_union@h
90
ori r1,r1,init_thread_union@l
91
li r0,0
92
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
93
94
bl early_init
95
96
/*
97
* Decide what sort of machine this is and initialize the MMU.
98
*/
99
mr r3,r31
100
mr r4,r30
101
mr r5,r29
102
mr r6,r28
103
mr r7,r27
104
bl machine_init
105
bl MMU_init
106
107
/* Setup PTE pointers for the Abatron bdiGDB */
108
lis r6, swapper_pg_dir@h
109
ori r6, r6, swapper_pg_dir@l
110
lis r5, abatron_pteptrs@h
111
ori r5, r5, abatron_pteptrs@l
112
lis r4, KERNELBASE@h
113
ori r4, r4, KERNELBASE@l
114
stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
115
stw r6, 0(r5)
116
117
/* Clear the Machine Check Syndrome Register */
118
li r0,0
119
mtspr SPRN_MCSR,r0
120
121
/* Let's move on */
122
lis r4,start_kernel@h
123
ori r4,r4,start_kernel@l
124
lis r3,MSR_KERNEL@h
125
ori r3,r3,MSR_KERNEL@l
126
mtspr SPRN_SRR0,r4
127
mtspr SPRN_SRR1,r3
128
rfi /* change context and jump to start_kernel */
129
130
/*
131
* Interrupt vector entry code
132
*
133
* The Book E MMUs are always on so we don't need to handle
134
* interrupts in real mode as with previous PPC processors. In
135
* this case we handle interrupts in the kernel virtual address
136
* space.
137
*
138
* Interrupt vectors are dynamically placed relative to the
139
* interrupt prefix as determined by the address of interrupt_base.
140
* The interrupt vectors offsets are programmed using the labels
141
* for each interrupt vector entry.
142
*
143
* Interrupt vectors must be aligned on a 16 byte boundary.
144
* We align on a 32 byte cache line boundary for good measure.
145
*/
146
147
interrupt_base:
148
/* Critical Input Interrupt */
149
CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
150
151
/* Machine Check Interrupt */
152
CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
153
MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
154
155
/* Data Storage Interrupt */
156
DATA_STORAGE_EXCEPTION
157
158
/* Instruction Storage Interrupt */
159
INSTRUCTION_STORAGE_EXCEPTION
160
161
/* External Input Interrupt */
162
EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
163
164
/* Alignment Interrupt */
165
ALIGNMENT_EXCEPTION
166
167
/* Program Interrupt */
168
PROGRAM_EXCEPTION
169
170
/* Floating Point Unavailable Interrupt */
171
#ifdef CONFIG_PPC_FPU
172
FP_UNAVAILABLE_EXCEPTION
173
#else
174
EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
175
#endif
176
/* System Call Interrupt */
177
START_EXCEPTION(SystemCall)
178
NORMAL_EXCEPTION_PROLOG
179
EXC_XFER_EE_LITE(0x0c00, DoSyscall)
180
181
/* Auxiliary Processor Unavailable Interrupt */
182
EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
183
184
/* Decrementer Interrupt */
185
DECREMENTER_EXCEPTION
186
187
/* Fixed Internal Timer Interrupt */
188
/* TODO: Add FIT support */
189
EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
190
191
/* Watchdog Timer Interrupt */
192
/* TODO: Add watchdog support */
193
#ifdef CONFIG_BOOKE_WDT
194
CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
195
#else
196
CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
197
#endif
198
199
/* Data TLB Error Interrupt */
200
START_EXCEPTION(DataTLBError44x)
201
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
202
mtspr SPRN_SPRG_WSCRATCH1, r11
203
mtspr SPRN_SPRG_WSCRATCH2, r12
204
mtspr SPRN_SPRG_WSCRATCH3, r13
205
mfcr r11
206
mtspr SPRN_SPRG_WSCRATCH4, r11
207
mfspr r10, SPRN_DEAR /* Get faulting address */
208
209
/* If we are faulting a kernel address, we have to use the
210
* kernel page tables.
211
*/
212
lis r11, PAGE_OFFSET@h
213
cmplw r10, r11
214
blt+ 3f
215
lis r11, swapper_pg_dir@h
216
ori r11, r11, swapper_pg_dir@l
217
218
mfspr r12,SPRN_MMUCR
219
rlwinm r12,r12,0,0,23 /* Clear TID */
220
221
b 4f
222
223
/* Get the PGD for the current thread */
224
3:
225
mfspr r11,SPRN_SPRG_THREAD
226
lwz r11,PGDIR(r11)
227
228
/* Load PID into MMUCR TID */
229
mfspr r12,SPRN_MMUCR
230
mfspr r13,SPRN_PID /* Get PID */
231
rlwimi r12,r13,0,24,31 /* Set TID */
232
233
4:
234
mtspr SPRN_MMUCR,r12
235
236
/* Mask of required permission bits. Note that while we
237
* do copy ESR:ST to _PAGE_RW position as trying to write
238
* to an RO page is pretty common, we don't do it with
239
* _PAGE_DIRTY. We could do it, but it's a fairly rare
240
* event so I'd rather take the overhead when it happens
241
* rather than adding an instruction here. We should measure
242
* whether the whole thing is worth it in the first place
243
* as we could avoid loading SPRN_ESR completely in the first
244
* place...
245
*
246
* TODO: Is it worth doing that mfspr & rlwimi in the first
247
* place or can we save a couple of instructions here ?
248
*/
249
mfspr r12,SPRN_ESR
250
li r13,_PAGE_PRESENT|_PAGE_ACCESSED
251
rlwimi r13,r12,10,30,30
252
253
/* Load the PTE */
254
/* Compute pgdir/pmd offset */
255
rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
256
lwzx r11, r12, r11 /* Get pgd/pmd entry */
257
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
258
beq 2f /* Bail if no table */
259
260
/* Compute pte address */
261
rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
262
lwz r11, 0(r12) /* Get high word of pte entry */
263
lwz r12, 4(r12) /* Get low word of pte entry */
264
265
lis r10,tlb_44x_index@ha
266
267
andc. r13,r13,r12 /* Check permission */
268
269
/* Load the next available TLB index */
270
lwz r13,tlb_44x_index@l(r10)
271
272
bne 2f /* Bail if permission mismach */
273
274
/* Increment, rollover, and store TLB index */
275
addi r13,r13,1
276
277
/* Compare with watermark (instruction gets patched) */
278
.globl tlb_44x_patch_hwater_D
279
tlb_44x_patch_hwater_D:
280
cmpwi 0,r13,1 /* reserve entries */
281
ble 5f
282
li r13,0
283
5:
284
/* Store the next available TLB index */
285
stw r13,tlb_44x_index@l(r10)
286
287
/* Re-load the faulting address */
288
mfspr r10,SPRN_DEAR
289
290
/* Jump to common tlb load */
291
b finish_tlb_load_44x
292
293
2:
294
/* The bailout. Restore registers to pre-exception conditions
295
* and call the heavyweights to help us out.
296
*/
297
mfspr r11, SPRN_SPRG_RSCRATCH4
298
mtcr r11
299
mfspr r13, SPRN_SPRG_RSCRATCH3
300
mfspr r12, SPRN_SPRG_RSCRATCH2
301
mfspr r11, SPRN_SPRG_RSCRATCH1
302
mfspr r10, SPRN_SPRG_RSCRATCH0
303
b DataStorage
304
305
/* Instruction TLB Error Interrupt */
306
/*
307
* Nearly the same as above, except we get our
308
* information from different registers and bailout
309
* to a different point.
310
*/
311
START_EXCEPTION(InstructionTLBError44x)
312
mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
313
mtspr SPRN_SPRG_WSCRATCH1, r11
314
mtspr SPRN_SPRG_WSCRATCH2, r12
315
mtspr SPRN_SPRG_WSCRATCH3, r13
316
mfcr r11
317
mtspr SPRN_SPRG_WSCRATCH4, r11
318
mfspr r10, SPRN_SRR0 /* Get faulting address */
319
320
/* If we are faulting a kernel address, we have to use the
321
* kernel page tables.
322
*/
323
lis r11, PAGE_OFFSET@h
324
cmplw r10, r11
325
blt+ 3f
326
lis r11, swapper_pg_dir@h
327
ori r11, r11, swapper_pg_dir@l
328
329
mfspr r12,SPRN_MMUCR
330
rlwinm r12,r12,0,0,23 /* Clear TID */
331
332
b 4f
333
334
/* Get the PGD for the current thread */
335
3:
336
mfspr r11,SPRN_SPRG_THREAD
337
lwz r11,PGDIR(r11)
338
339
/* Load PID into MMUCR TID */
340
mfspr r12,SPRN_MMUCR
341
mfspr r13,SPRN_PID /* Get PID */
342
rlwimi r12,r13,0,24,31 /* Set TID */
343
344
4:
345
mtspr SPRN_MMUCR,r12
346
347
/* Make up the required permissions */
348
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
349
350
/* Compute pgdir/pmd offset */
351
rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
352
lwzx r11, r12, r11 /* Get pgd/pmd entry */
353
rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
354
beq 2f /* Bail if no table */
355
356
/* Compute pte address */
357
rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
358
lwz r11, 0(r12) /* Get high word of pte entry */
359
lwz r12, 4(r12) /* Get low word of pte entry */
360
361
lis r10,tlb_44x_index@ha
362
363
andc. r13,r13,r12 /* Check permission */
364
365
/* Load the next available TLB index */
366
lwz r13,tlb_44x_index@l(r10)
367
368
bne 2f /* Bail if permission mismach */
369
370
/* Increment, rollover, and store TLB index */
371
addi r13,r13,1
372
373
/* Compare with watermark (instruction gets patched) */
374
.globl tlb_44x_patch_hwater_I
375
tlb_44x_patch_hwater_I:
376
cmpwi 0,r13,1 /* reserve entries */
377
ble 5f
378
li r13,0
379
5:
380
/* Store the next available TLB index */
381
stw r13,tlb_44x_index@l(r10)
382
383
/* Re-load the faulting address */
384
mfspr r10,SPRN_SRR0
385
386
/* Jump to common TLB load point */
387
b finish_tlb_load_44x
388
389
2:
390
/* The bailout. Restore registers to pre-exception conditions
391
* and call the heavyweights to help us out.
392
*/
393
mfspr r11, SPRN_SPRG_RSCRATCH4
394
mtcr r11
395
mfspr r13, SPRN_SPRG_RSCRATCH3
396
mfspr r12, SPRN_SPRG_RSCRATCH2
397
mfspr r11, SPRN_SPRG_RSCRATCH1
398
mfspr r10, SPRN_SPRG_RSCRATCH0
399
b InstructionStorage
400
401
/*
402
* Both the instruction and data TLB miss get to this
403
* point to load the TLB.
404
* r10 - EA of fault
405
* r11 - PTE high word value
406
* r12 - PTE low word value
407
* r13 - TLB index
408
* MMUCR - loaded with proper value when we get here
409
* Upon exit, we reload everything and RFI.
410
*/
411
finish_tlb_load_44x:
412
/* Combine RPN & ERPN an write WS 0 */
413
rlwimi r11,r12,0,0,31-PAGE_SHIFT
414
tlbwe r11,r13,PPC44x_TLB_XLAT
415
416
/*
417
* Create WS1. This is the faulting address (EPN),
418
* page size, and valid flag.
419
*/
420
li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
421
/* Insert valid and page size */
422
rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
423
tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
424
425
/* And WS 2 */
426
li r10,0xf85 /* Mask to apply from PTE */
427
rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
428
and r11,r12,r10 /* Mask PTE bits to keep */
429
andi. r10,r12,_PAGE_USER /* User page ? */
430
beq 1f /* nope, leave U bits empty */
431
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
432
1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
433
434
/* Done...restore registers and get out of here.
435
*/
436
mfspr r11, SPRN_SPRG_RSCRATCH4
437
mtcr r11
438
mfspr r13, SPRN_SPRG_RSCRATCH3
439
mfspr r12, SPRN_SPRG_RSCRATCH2
440
mfspr r11, SPRN_SPRG_RSCRATCH1
441
mfspr r10, SPRN_SPRG_RSCRATCH0
442
rfi /* Force context change */
443
444
/* TLB error interrupts for 476
445
*/
446
#ifdef CONFIG_PPC_47x
447
START_EXCEPTION(DataTLBError47x)
448
mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
449
mtspr SPRN_SPRG_WSCRATCH1,r11
450
mtspr SPRN_SPRG_WSCRATCH2,r12
451
mtspr SPRN_SPRG_WSCRATCH3,r13
452
mfcr r11
453
mtspr SPRN_SPRG_WSCRATCH4,r11
454
mfspr r10,SPRN_DEAR /* Get faulting address */
455
456
/* If we are faulting a kernel address, we have to use the
457
* kernel page tables.
458
*/
459
lis r11,PAGE_OFFSET@h
460
cmplw cr0,r10,r11
461
blt+ 3f
462
lis r11,swapper_pg_dir@h
463
ori r11,r11, swapper_pg_dir@l
464
li r12,0 /* MMUCR = 0 */
465
b 4f
466
467
/* Get the PGD for the current thread and setup MMUCR */
468
3: mfspr r11,SPRN_SPRG3
469
lwz r11,PGDIR(r11)
470
mfspr r12,SPRN_PID /* Get PID */
471
4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
472
473
/* Mask of required permission bits. Note that while we
474
* do copy ESR:ST to _PAGE_RW position as trying to write
475
* to an RO page is pretty common, we don't do it with
476
* _PAGE_DIRTY. We could do it, but it's a fairly rare
477
* event so I'd rather take the overhead when it happens
478
* rather than adding an instruction here. We should measure
479
* whether the whole thing is worth it in the first place
480
* as we could avoid loading SPRN_ESR completely in the first
481
* place...
482
*
483
* TODO: Is it worth doing that mfspr & rlwimi in the first
484
* place or can we save a couple of instructions here ?
485
*/
486
mfspr r12,SPRN_ESR
487
li r13,_PAGE_PRESENT|_PAGE_ACCESSED
488
rlwimi r13,r12,10,30,30
489
490
/* Load the PTE */
491
/* Compute pgdir/pmd offset */
492
rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
493
lwzx r11,r12,r11 /* Get pgd/pmd entry */
494
495
/* Word 0 is EPN,V,TS,DSIZ */
496
li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
497
rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
498
li r12,0
499
tlbwe r10,r12,0
500
501
/* XXX can we do better ? Need to make sure tlbwe has established
502
* latch V bit in MMUCR0 before the PTE is loaded further down */
503
#ifdef CONFIG_SMP
504
isync
505
#endif
506
507
rlwinm. r12,r11,0,0,20 /* Extract pt base address */
508
/* Compute pte address */
509
rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
510
beq 2f /* Bail if no table */
511
lwz r11,0(r12) /* Get high word of pte entry */
512
513
/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
514
* bottom of r12 to create a data dependency... We can also use r10
515
* as destination nowadays
516
*/
517
#ifdef CONFIG_SMP
518
lwsync
519
#endif
520
lwz r12,4(r12) /* Get low word of pte entry */
521
522
andc. r13,r13,r12 /* Check permission */
523
524
/* Jump to common tlb load */
525
beq finish_tlb_load_47x
526
527
2: /* The bailout. Restore registers to pre-exception conditions
528
* and call the heavyweights to help us out.
529
*/
530
mfspr r11,SPRN_SPRG_RSCRATCH4
531
mtcr r11
532
mfspr r13,SPRN_SPRG_RSCRATCH3
533
mfspr r12,SPRN_SPRG_RSCRATCH2
534
mfspr r11,SPRN_SPRG_RSCRATCH1
535
mfspr r10,SPRN_SPRG_RSCRATCH0
536
b DataStorage
537
538
/* Instruction TLB Error Interrupt */
539
/*
540
* Nearly the same as above, except we get our
541
* information from different registers and bailout
542
* to a different point.
543
*/
544
START_EXCEPTION(InstructionTLBError47x)
545
mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
546
mtspr SPRN_SPRG_WSCRATCH1,r11
547
mtspr SPRN_SPRG_WSCRATCH2,r12
548
mtspr SPRN_SPRG_WSCRATCH3,r13
549
mfcr r11
550
mtspr SPRN_SPRG_WSCRATCH4,r11
551
mfspr r10,SPRN_SRR0 /* Get faulting address */
552
553
/* If we are faulting a kernel address, we have to use the
554
* kernel page tables.
555
*/
556
lis r11,PAGE_OFFSET@h
557
cmplw cr0,r10,r11
558
blt+ 3f
559
lis r11,swapper_pg_dir@h
560
ori r11,r11, swapper_pg_dir@l
561
li r12,0 /* MMUCR = 0 */
562
b 4f
563
564
/* Get the PGD for the current thread and setup MMUCR */
565
3: mfspr r11,SPRN_SPRG_THREAD
566
lwz r11,PGDIR(r11)
567
mfspr r12,SPRN_PID /* Get PID */
568
4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
569
570
/* Make up the required permissions */
571
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
572
573
/* Load PTE */
574
/* Compute pgdir/pmd offset */
575
rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
576
lwzx r11,r12,r11 /* Get pgd/pmd entry */
577
578
/* Word 0 is EPN,V,TS,DSIZ */
579
li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
580
rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
581
li r12,0
582
tlbwe r10,r12,0
583
584
/* XXX can we do better ? Need to make sure tlbwe has established
585
* latch V bit in MMUCR0 before the PTE is loaded further down */
586
#ifdef CONFIG_SMP
587
isync
588
#endif
589
590
rlwinm. r12,r11,0,0,20 /* Extract pt base address */
591
/* Compute pte address */
592
rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
593
beq 2f /* Bail if no table */
594
595
lwz r11,0(r12) /* Get high word of pte entry */
596
/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
597
* bottom of r12 to create a data dependency... We can also use r10
598
* as destination nowadays
599
*/
600
#ifdef CONFIG_SMP
601
lwsync
602
#endif
603
lwz r12,4(r12) /* Get low word of pte entry */
604
605
andc. r13,r13,r12 /* Check permission */
606
607
/* Jump to common TLB load point */
608
beq finish_tlb_load_47x
609
610
2: /* The bailout. Restore registers to pre-exception conditions
611
* and call the heavyweights to help us out.
612
*/
613
mfspr r11, SPRN_SPRG_RSCRATCH4
614
mtcr r11
615
mfspr r13, SPRN_SPRG_RSCRATCH3
616
mfspr r12, SPRN_SPRG_RSCRATCH2
617
mfspr r11, SPRN_SPRG_RSCRATCH1
618
mfspr r10, SPRN_SPRG_RSCRATCH0
619
b InstructionStorage
620
621
/*
622
* Both the instruction and data TLB miss get to this
623
* point to load the TLB.
624
* r10 - free to use
625
* r11 - PTE high word value
626
* r12 - PTE low word value
627
* r13 - free to use
628
* MMUCR - loaded with proper value when we get here
629
* Upon exit, we reload everything and RFI.
630
*/
631
finish_tlb_load_47x:
632
/* Combine RPN & ERPN an write WS 1 */
633
rlwimi r11,r12,0,0,31-PAGE_SHIFT
634
tlbwe r11,r13,1
635
636
/* And make up word 2 */
637
li r10,0xf85 /* Mask to apply from PTE */
638
rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
639
and r11,r12,r10 /* Mask PTE bits to keep */
640
andi. r10,r12,_PAGE_USER /* User page ? */
641
beq 1f /* nope, leave U bits empty */
642
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
643
1: tlbwe r11,r13,2
644
645
/* Done...restore registers and get out of here.
646
*/
647
mfspr r11, SPRN_SPRG_RSCRATCH4
648
mtcr r11
649
mfspr r13, SPRN_SPRG_RSCRATCH3
650
mfspr r12, SPRN_SPRG_RSCRATCH2
651
mfspr r11, SPRN_SPRG_RSCRATCH1
652
mfspr r10, SPRN_SPRG_RSCRATCH0
653
rfi
654
655
#endif /* CONFIG_PPC_47x */
656
657
/* Debug Interrupt */
658
/*
659
* This statement needs to exist at the end of the IVPR
660
* definition just in case you end up taking a debug
661
* exception within another exception.
662
*/
663
DEBUG_CRIT_EXCEPTION
664
665
/*
666
* Global functions
667
*/
668
669
/*
670
* Adjust the machine check IVOR on 440A cores
671
*/
672
_GLOBAL(__fixup_440A_mcheck)
673
li r3,MachineCheckA@l
674
mtspr SPRN_IVOR1,r3
675
sync
676
blr
677
678
/*
679
* extern void giveup_altivec(struct task_struct *prev)
680
*
681
* The 44x core does not have an AltiVec unit.
682
*/
683
_GLOBAL(giveup_altivec)
684
blr
685
686
/*
687
* extern void giveup_fpu(struct task_struct *prev)
688
*
689
* The 44x core does not have an FPU.
690
*/
691
#ifndef CONFIG_PPC_FPU
692
_GLOBAL(giveup_fpu)
693
blr
694
#endif
695
696
_GLOBAL(set_context)
697
698
#ifdef CONFIG_BDI_SWITCH
699
/* Context switch the PTE pointer for the Abatron BDI2000.
700
* The PGDIR is the second parameter.
701
*/
702
lis r5, abatron_pteptrs@h
703
ori r5, r5, abatron_pteptrs@l
704
stw r4, 0x4(r5)
705
#endif
706
mtspr SPRN_PID,r3
707
isync /* Force context change */
708
blr
709
710
/*
711
* Init CPU state. This is called at boot time or for secondary CPUs
712
* to setup initial TLB entries, setup IVORs, etc...
713
*
714
*/
715
_GLOBAL(init_cpu_state)
716
mflr r22
717
#ifdef CONFIG_PPC_47x
718
/* We use the PVR to differenciate 44x cores from 476 */
719
mfspr r3,SPRN_PVR
720
srwi r3,r3,16
721
cmplwi cr0,r3,PVR_476@h
722
beq head_start_47x
723
cmplwi cr0,r3,PVR_476_ISS@h
724
beq head_start_47x
725
#endif /* CONFIG_PPC_47x */
726
727
/*
728
* In case the firmware didn't do it, we apply some workarounds
729
* that are good for all 440 core variants here
730
*/
731
mfspr r3,SPRN_CCR0
732
rlwinm r3,r3,0,0,27 /* disable icache prefetch */
733
isync
734
mtspr SPRN_CCR0,r3
735
isync
736
sync
737
738
/*
739
* Set up the initial MMU state for 44x
740
*
741
* We are still executing code at the virtual address
742
* mappings set by the firmware for the base of RAM.
743
*
744
* We first invalidate all TLB entries but the one
745
* we are running from. We then load the KERNELBASE
746
* mappings so we can begin to use kernel addresses
747
* natively and so the interrupt vector locations are
748
* permanently pinned (necessary since Book E
749
* implementations always have translation enabled).
750
*
751
* TODO: Use the known TLB entry we are running from to
752
* determine which physical region we are located
753
* in. This can be used to determine where in RAM
754
* (on a shared CPU system) or PCI memory space
755
* (on a DRAMless system) we are located.
756
* For now, we assume a perfect world which means
757
* we are located at the base of DRAM (physical 0).
758
*/
759
760
/*
761
* Search TLB for entry that we are currently using.
762
* Invalidate all entries but the one we are using.
763
*/
764
/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
765
mfspr r3,SPRN_PID /* Get PID */
766
mfmsr r4 /* Get MSR */
767
andi. r4,r4,MSR_IS@l /* TS=1? */
768
beq wmmucr /* If not, leave STS=0 */
769
oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
770
wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
771
sync
772
773
bl invstr /* Find our address */
774
invstr: mflr r5 /* Make it accessible */
775
tlbsx r23,0,r5 /* Find entry we are in */
776
li r4,0 /* Start at TLB entry 0 */
777
li r3,0 /* Set PAGEID inval value */
778
1: cmpw r23,r4 /* Is this our entry? */
779
beq skpinv /* If so, skip the inval */
780
tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
781
skpinv: addi r4,r4,1 /* Increment */
782
cmpwi r4,64 /* Are we done? */
783
bne 1b /* If not, repeat */
784
isync /* If so, context change */
785
786
/*
787
* Configure and load pinned entry into TLB slot 63.
788
*/
789
790
lis r3,PAGE_OFFSET@h
791
ori r3,r3,PAGE_OFFSET@l
792
793
/* Kernel is at the base of RAM */
794
li r4, 0 /* Load the kernel physical address */
795
796
/* Load the kernel PID = 0 */
797
li r0,0
798
mtspr SPRN_PID,r0
799
sync
800
801
/* Initialize MMUCR */
802
li r5,0
803
mtspr SPRN_MMUCR,r5
804
sync
805
806
/* pageid fields */
807
clrrwi r3,r3,10 /* Mask off the effective page number */
808
ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
809
810
/* xlat fields */
811
clrrwi r4,r4,10 /* Mask off the real page number */
812
/* ERPN is 0 for first 4GB page */
813
814
/* attrib fields */
815
/* Added guarded bit to protect against speculative loads/stores */
816
li r5,0
817
ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
818
819
li r0,63 /* TLB slot 63 */
820
821
tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
822
tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
823
tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
824
825
/* Force context change */
826
mfmsr r0
827
mtspr SPRN_SRR1, r0
828
lis r0,3f@h
829
ori r0,r0,3f@l
830
mtspr SPRN_SRR0,r0
831
sync
832
rfi
833
834
/* If necessary, invalidate original entry we used */
835
3: cmpwi r23,63
836
beq 4f
837
li r6,0
838
tlbwe r6,r23,PPC44x_TLB_PAGEID
839
isync
840
841
4:
842
#ifdef CONFIG_PPC_EARLY_DEBUG_44x
843
/* Add UART mapping for early debug. */
844
845
/* pageid fields */
846
lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
847
ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
848
849
/* xlat fields */
850
lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
851
ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
852
853
/* attrib fields */
854
li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
855
li r0,62 /* TLB slot 0 */
856
857
tlbwe r3,r0,PPC44x_TLB_PAGEID
858
tlbwe r4,r0,PPC44x_TLB_XLAT
859
tlbwe r5,r0,PPC44x_TLB_ATTRIB
860
861
/* Force context change */
862
isync
863
#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
864
865
/* Establish the interrupt vector offsets */
866
SET_IVOR(0, CriticalInput);
867
SET_IVOR(1, MachineCheck);
868
SET_IVOR(2, DataStorage);
869
SET_IVOR(3, InstructionStorage);
870
SET_IVOR(4, ExternalInput);
871
SET_IVOR(5, Alignment);
872
SET_IVOR(6, Program);
873
SET_IVOR(7, FloatingPointUnavailable);
874
SET_IVOR(8, SystemCall);
875
SET_IVOR(9, AuxillaryProcessorUnavailable);
876
SET_IVOR(10, Decrementer);
877
SET_IVOR(11, FixedIntervalTimer);
878
SET_IVOR(12, WatchdogTimer);
879
SET_IVOR(13, DataTLBError44x);
880
SET_IVOR(14, InstructionTLBError44x);
881
SET_IVOR(15, DebugCrit);
882
883
b head_start_common
884
885
886
#ifdef CONFIG_PPC_47x
887
888
#ifdef CONFIG_SMP
889
890
/* Entry point for secondary 47x processors */
891
_GLOBAL(start_secondary_47x)
892
mr r24,r3 /* CPU number */
893
894
bl init_cpu_state
895
896
/* Now we need to bolt the rest of kernel memory which
897
* is done in C code. We must be careful because our task
898
* struct or our stack can (and will probably) be out
899
* of reach of the initial 256M TLB entry, so we use a
900
* small temporary stack in .bss for that. This works
901
* because only one CPU at a time can be in this code
902
*/
903
lis r1,temp_boot_stack@h
904
ori r1,r1,temp_boot_stack@l
905
addi r1,r1,1024-STACK_FRAME_OVERHEAD
906
li r0,0
907
stw r0,0(r1)
908
bl mmu_init_secondary
909
910
/* Now we can get our task struct and real stack pointer */
911
912
/* Get current_thread_info and current */
913
lis r1,secondary_ti@ha
914
lwz r1,secondary_ti@l(r1)
915
lwz r2,TI_TASK(r1)
916
917
/* Current stack pointer */
918
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
919
li r0,0
920
stw r0,0(r1)
921
922
/* Kernel stack for exception entry in SPRG3 */
923
addi r4,r2,THREAD /* init task's THREAD */
924
mtspr SPRN_SPRG3,r4
925
926
b start_secondary
927
928
#endif /* CONFIG_SMP */
929
930
/*
931
* Set up the initial MMU state for 44x
932
*
933
* We are still executing code at the virtual address
934
* mappings set by the firmware for the base of RAM.
935
*/
936
937
head_start_47x:
938
/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
939
mfspr r3,SPRN_PID /* Get PID */
940
mfmsr r4 /* Get MSR */
941
andi. r4,r4,MSR_IS@l /* TS=1? */
942
beq 1f /* If not, leave STS=0 */
943
oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */
944
1: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
945
sync
946
947
/* Find the entry we are running from */
948
bl 1f
949
1: mflr r23
950
tlbsx r23,0,r23
951
tlbre r24,r23,0
952
tlbre r25,r23,1
953
tlbre r26,r23,2
954
955
/*
956
* Cleanup time
957
*/
958
959
/* Initialize MMUCR */
960
li r5,0
961
mtspr SPRN_MMUCR,r5
962
sync
963
964
clear_all_utlb_entries:
965
966
#; Set initial values.
967
968
addis r3,0,0x8000
969
addi r4,0,0
970
addi r5,0,0
971
b clear_utlb_entry
972
973
#; Align the loop to speed things up.
974
975
.align 6
976
977
clear_utlb_entry:
978
979
tlbwe r4,r3,0
980
tlbwe r5,r3,1
981
tlbwe r5,r3,2
982
addis r3,r3,0x2000
983
cmpwi r3,0
984
bne clear_utlb_entry
985
addis r3,0,0x8000
986
addis r4,r4,0x100
987
cmpwi r4,0
988
bne clear_utlb_entry
989
990
#; Restore original entry.
991
992
oris r23,r23,0x8000 /* specify the way */
993
tlbwe r24,r23,0
994
tlbwe r25,r23,1
995
tlbwe r26,r23,2
996
997
/*
998
* Configure and load pinned entry into TLB for the kernel core
999
*/
1000
1001
lis r3,PAGE_OFFSET@h
1002
ori r3,r3,PAGE_OFFSET@l
1003
1004
/* Kernel is at the base of RAM */
1005
li r4, 0 /* Load the kernel physical address */
1006
1007
/* Load the kernel PID = 0 */
1008
li r0,0
1009
mtspr SPRN_PID,r0
1010
sync
1011
1012
/* Word 0 */
1013
clrrwi r3,r3,12 /* Mask off the effective page number */
1014
ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1015
1016
/* Word 1 */
1017
clrrwi r4,r4,12 /* Mask off the real page number */
1018
/* ERPN is 0 for first 4GB page */
1019
/* Word 2 */
1020
li r5,0
1021
ori r5,r5,PPC47x_TLB2_S_RWX
1022
#ifdef CONFIG_SMP
1023
ori r5,r5,PPC47x_TLB2_M
1024
#endif
1025
1026
/* We write to way 0 and bolted 0 */
1027
lis r0,0x8800
1028
tlbwe r3,r0,0
1029
tlbwe r4,r0,1
1030
tlbwe r5,r0,2
1031
1032
/*
1033
* Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
1034
* them up later
1035
*/
1036
LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
1037
mtspr SPRN_SSPCR,r3
1038
mtspr SPRN_USPCR,r3
1039
LOAD_REG_IMMEDIATE(r3, 0x12345670)
1040
mtspr SPRN_ISPCR,r3
1041
1042
/* Force context change */
1043
mfmsr r0
1044
mtspr SPRN_SRR1, r0
1045
lis r0,3f@h
1046
ori r0,r0,3f@l
1047
mtspr SPRN_SRR0,r0
1048
sync
1049
rfi
1050
1051
/* Invalidate original entry we used */
1052
3:
1053
rlwinm r24,r24,0,21,19 /* clear the "valid" bit */
1054
tlbwe r24,r23,0
1055
addi r24,0,0
1056
tlbwe r24,r23,1
1057
tlbwe r24,r23,2
1058
isync /* Clear out the shadow TLB entries */
1059
1060
#ifdef CONFIG_PPC_EARLY_DEBUG_44x
1061
/* Add UART mapping for early debug. */
1062
1063
/* Word 0 */
1064
lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
1065
ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
1066
1067
/* Word 1 */
1068
lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
1069
ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
1070
1071
/* Word 2 */
1072
li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
1073
1074
/* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
1075
* congruence class as the kernel, we need to make sure of it at
1076
* some point
1077
*/
1078
lis r0,0x8d00
1079
tlbwe r3,r0,0
1080
tlbwe r4,r0,1
1081
tlbwe r5,r0,2
1082
1083
/* Force context change */
1084
isync
1085
#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
1086
1087
/* Establish the interrupt vector offsets */
1088
SET_IVOR(0, CriticalInput);
1089
SET_IVOR(1, MachineCheckA);
1090
SET_IVOR(2, DataStorage);
1091
SET_IVOR(3, InstructionStorage);
1092
SET_IVOR(4, ExternalInput);
1093
SET_IVOR(5, Alignment);
1094
SET_IVOR(6, Program);
1095
SET_IVOR(7, FloatingPointUnavailable);
1096
SET_IVOR(8, SystemCall);
1097
SET_IVOR(9, AuxillaryProcessorUnavailable);
1098
SET_IVOR(10, Decrementer);
1099
SET_IVOR(11, FixedIntervalTimer);
1100
SET_IVOR(12, WatchdogTimer);
1101
SET_IVOR(13, DataTLBError47x);
1102
SET_IVOR(14, InstructionTLBError47x);
1103
SET_IVOR(15, DebugCrit);
1104
1105
/* We configure icbi to invalidate 128 bytes at a time since the
1106
* current 32-bit kernel code isn't too happy with icache != dcache
1107
* block size
1108
*/
1109
mfspr r3,SPRN_CCR0
1110
oris r3,r3,0x0020
1111
mtspr SPRN_CCR0,r3
1112
isync
1113
1114
#endif /* CONFIG_PPC_47x */
1115
1116
/*
1117
* Here we are back to code that is common between 44x and 47x
1118
*
1119
* We proceed to further kernel initialization and return to the
1120
* main kernel entry
1121
*/
1122
head_start_common:
1123
/* Establish the interrupt vector base */
1124
lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
1125
mtspr SPRN_IVPR,r4
1126
1127
addis r22,r22,KERNELBASE@h
1128
mtlr r22
1129
isync
1130
blr
1131
1132
/*
1133
* We put a few things here that have to be page-aligned. This stuff
1134
* goes at the beginning of the data segment, which is page-aligned.
1135
*/
1136
.data
1137
.align PAGE_SHIFT
1138
.globl sdata
1139
sdata:
1140
.globl empty_zero_page
1141
empty_zero_page:
1142
.space PAGE_SIZE
1143
1144
/*
1145
* To support >32-bit physical addresses, we use an 8KB pgdir.
1146
*/
1147
.globl swapper_pg_dir
1148
swapper_pg_dir:
1149
.space PGD_TABLE_SIZE
1150
1151
/*
1152
* Room for two PTE pointers, usually the kernel and current user pointers
1153
* to their respective root page table.
1154
*/
1155
abatron_pteptrs:
1156
.space 8
1157
1158
#ifdef CONFIG_SMP
1159
.align 12
1160
temp_boot_stack:
1161
.space 1024
1162
#endif /* CONFIG_SMP */
1163
1164