Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/parisc/kernel/head.S
26288 views
1
/* This file is subject to the terms and conditions of the GNU General Public
2
* License. See the file "COPYING" in the main directory of this archive
3
* for more details.
4
*
5
* Copyright (C) 1999-2007 by Helge Deller <[email protected]>
6
* Copyright 1999 SuSE GmbH (Philipp Rumpf)
7
* Copyright 1999 Philipp Rumpf ([email protected])
8
* Copyright 2000 Hewlett Packard (Paul Bame, [email protected])
9
* Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10
* Copyright (C) 2004 Kyle McMartin <[email protected]>
11
*
12
* Initial Version 04-23-1999 by Helge Deller <[email protected]>
13
*/
14
15
#include <asm/asm-offsets.h>
16
#include <asm/psw.h>
17
#include <asm/pdc.h>
18
19
#include <asm/assembly.h>
20
21
#include <linux/linkage.h>
22
#include <linux/init.h>
23
#include <linux/pgtable.h>
24
25
.level 1.1
26
27
__INITDATA
28
ENTRY(boot_args)
29
.word 0 /* arg0 */
30
.word 0 /* arg1 */
31
.word 0 /* arg2 */
32
.word 0 /* arg3 */
33
END(boot_args)
34
35
__HEAD
36
37
.align 4
38
.import init_task,data
39
.import init_stack,data
40
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */
41
#ifndef CONFIG_64BIT
42
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
43
.import $global$ /* forward declaration */
44
#endif /*!CONFIG_64BIT*/
45
ENTRY(parisc_kernel_start)
46
.proc
47
.callinfo
48
49
/* Make sure sr4-sr7 are set to zero for the kernel address space */
50
mtsp %r0,%sr4
51
mtsp %r0,%sr5
52
mtsp %r0,%sr6
53
mtsp %r0,%sr7
54
55
/* Clear BSS (shouldn't the boot loader do this?) */
56
57
.import __bss_start,data
58
.import __bss_stop,data
59
60
load32 PA(__bss_start),%r3
61
load32 PA(__bss_stop),%r4
62
$bss_loop:
63
cmpb,<<,n %r3,%r4,$bss_loop
64
stw,ma %r0,4(%r3)
65
66
/* Save away the arguments the boot loader passed in (32 bit args) */
67
load32 PA(boot_args),%r1
68
stw,ma %arg0,4(%r1)
69
stw,ma %arg1,4(%r1)
70
stw,ma %arg2,4(%r1)
71
stw,ma %arg3,4(%r1)
72
73
#if defined(CONFIG_PA20)
74
/* check for 64-bit capable CPU as required by current kernel */
75
ldi 32,%r10
76
mtctl %r10,%cr11
77
.level 2.0
78
mfctl,w %cr11,%r10
79
.level 1.1
80
comib,<>,n 0,%r10,$cpu_ok
81
82
load32 PA(msg1),%arg0
83
ldi msg1_end-msg1,%arg1
84
$iodc_panic:
85
copy %arg0, %r10
86
copy %arg1, %r11
87
load32 PA(init_stack),%sp
88
#define MEM_CONS 0x3A0
89
ldw MEM_CONS+32(%r0),%arg0 // HPA
90
ldi ENTRY_IO_COUT,%arg1
91
ldw MEM_CONS+36(%r0),%arg2 // SPA
92
ldw MEM_CONS+8(%r0),%arg3 // layers
93
load32 PA(__bss_start),%r1
94
stw %r1,-52(%sp) // arg4
95
stw %r0,-56(%sp) // arg5
96
stw %r10,-60(%sp) // arg6 = ptr to text
97
stw %r11,-64(%sp) // arg7 = len
98
stw %r0,-68(%sp) // arg8
99
load32 PA(.iodc_panic_ret), %rp
100
ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC
101
bv,n (%r1)
102
.iodc_panic_ret:
103
b . /* wait endless with ... */
104
or %r10,%r10,%r10 /* qemu idle sleep */
105
msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
106
msg1_end:
107
108
$cpu_ok:
109
#endif
110
111
.level PA_ASM_LEVEL
112
113
/* Initialize startup VM. Just map first 16/32 MB of memory */
114
load32 PA(swapper_pg_dir),%r4
115
mtctl %r4,%cr24 /* Initialize kernel root pointer */
116
mtctl %r4,%cr25 /* Initialize user root pointer */
117
118
#if CONFIG_PGTABLE_LEVELS == 3
119
/* Set pmd in pgd */
120
load32 PA(pmd0),%r5
121
shrd %r5,PxD_VALUE_SHIFT,%r3
122
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
123
stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
124
ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
125
#else
126
/* 2-level page table, so pmd == pgd */
127
ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
128
#endif
129
130
/* Fill in pmd with enough pte directories */
131
load32 PA(pg0),%r1
132
SHRREG %r1,PxD_VALUE_SHIFT,%r3
133
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
134
135
ldi ASM_PT_INITIAL,%r1
136
137
1:
138
stw %r3,0(%r4)
139
ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
140
addib,> -1,%r1,1b
141
#if CONFIG_PGTABLE_LEVELS == 3
142
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
143
#else
144
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
145
#endif
146
147
148
/* Now initialize the PTEs themselves. We use RWX for
149
* everything ... it will get remapped correctly later */
150
ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
151
load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
152
load32 PA(pg0),%r1
153
154
$pgt_fill_loop:
155
STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
156
ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
157
addib,> -1,%r11,$pgt_fill_loop
158
nop
159
160
/* Load the return address...er...crash 'n burn */
161
copy %r0,%r2
162
163
/* And the RFI Target address too */
164
load32 start_parisc,%r11
165
166
/* And the initial task pointer */
167
load32 init_task,%r6
168
mtctl %r6,%cr30
169
170
/* And the stack pointer too */
171
load32 init_stack,%sp
172
tophys_r1 %sp
173
#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
174
.import _mcount,data
175
/* initialize mcount FPTR */
176
/* Get the global data pointer */
177
loadgp
178
load32 PA(_mcount), %r10
179
std %dp,0x18(%r10)
180
#endif
181
182
#define MEM_PDC_LO 0x388
183
#define MEM_PDC_HI 0x35C
184
#ifdef CONFIG_64BIT
185
/* Get PDCE_PROC for monarch CPU. */
186
ldw MEM_PDC_LO(%r0),%r3
187
ldw MEM_PDC_HI(%r0),%r10
188
depd %r10, 31, 32, %r3 /* move to upper word */
189
#endif
190
191
192
#ifdef CONFIG_SMP
193
/* Set the smp rendezvous address into page zero.
194
** It would be safer to do this in init_smp_config() but
195
** it's just way easier to deal with here because
196
** of 64-bit function ptrs and the address is local to this file.
197
*/
198
load32 PA(smp_slave_stext),%r10
199
stw %r10,0x10(%r0) /* MEM_RENDEZ */
200
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
201
202
/* FALLTHROUGH */
203
.procend
204
205
#ifdef CONFIG_HOTPLUG_CPU
206
/* common_stext is far away in another section... jump there */
207
load32 PA(common_stext), %rp
208
bv,n (%rp)
209
210
/* common_stext and smp_slave_stext needs to be in text section */
211
.text
212
#endif
213
214
/*
215
** Code Common to both Monarch and Slave processors.
216
** Entry:
217
**
218
** 1.1:
219
** %r11 must contain RFI target address.
220
** %r25/%r26 args to pass to target function
221
** %r2 in case rfi target decides it didn't like something
222
**
223
** 2.0w:
224
** %r3 PDCE_PROC address
225
** %r11 RFI target address
226
**
227
** Caller must init: SR4-7, %sp, %r10, %cr24/25,
228
*/
229
common_stext:
230
.proc
231
.callinfo
232
#else
233
/* Clear PDC entry point - we won't use it */
234
stw %r0,0x10(%r0) /* MEM_RENDEZ */
235
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
236
#endif /*CONFIG_SMP*/
237
238
#ifdef CONFIG_64BIT
239
mfctl %cr30,%r6 /* PCX-W2 firmware bug */
240
tophys_r1 %r6
241
242
/* Save the rfi target address */
243
STREG %r11, TASK_PT_GR11(%r6)
244
/* Switch to wide mode Superdome doesn't support narrow PDC
245
** calls.
246
*/
247
1: mfia %rp /* clear upper part of pcoq */
248
ldo 2f-1b(%rp),%rp
249
depdi 0,31,32,%rp
250
bv (%rp)
251
ssm PSW_SM_W,%r0
252
253
/* Set Wide mode as the "Default" (eg for traps)
254
** First trap occurs *right* after (or part of) rfi for slave CPUs.
255
** Someday, palo might not do this for the Monarch either.
256
*/
257
2:
258
259
ldo PDC_PSW(%r0),%arg0 /* 21 */
260
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
261
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
262
load32 PA(stext_pdc_ret), %rp
263
bv (%r3)
264
copy %r0,%arg3
265
266
stext_pdc_ret:
267
LDREG TASK_PT_GR11(%r6), %r11
268
tovirt_r1 %r6
269
mtctl %r6,%cr30 /* restore task thread info */
270
#endif
271
272
#ifndef CONFIG_64BIT
273
/* clear all BTLBs */
274
ldi PDC_BLOCK_TLB,%arg0
275
load32 PA(stext_pdc_btlb_ret), %rp
276
ldw MEM_PDC_LO(%r0),%r3
277
bv (%r3)
278
ldi PDC_BTLB_PURGE_ALL,%arg1
279
stext_pdc_btlb_ret:
280
#endif
281
282
/* PARANOID: clear user scratch/user space SR's */
283
mtsp %r0,%sr0
284
mtsp %r0,%sr1
285
mtsp %r0,%sr2
286
mtsp %r0,%sr3
287
288
/* Initialize Protection Registers */
289
mtctl %r0,%cr8
290
mtctl %r0,%cr9
291
mtctl %r0,%cr12
292
mtctl %r0,%cr13
293
294
/* Initialize the global data pointer */
295
loadgp
296
297
/* Set up our interrupt table. HPMCs might not work after this!
298
*
299
* We need to install the correct iva for PA1.1 or PA2.0. The
300
* following short sequence of instructions can determine this
301
* (without being illegal on a PA1.1 machine).
302
*/
303
#ifndef CONFIG_64BIT
304
ldi 32,%r10
305
mtctl %r10,%cr11
306
.level 2.0
307
mfctl,w %cr11,%r10
308
.level 1.1
309
comib,<>,n 0,%r10,$is_pa20
310
ldil L%PA(fault_vector_11),%r10
311
b $install_iva
312
ldo R%PA(fault_vector_11)(%r10),%r10
313
314
$is_pa20:
315
.level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
316
#endif /*!CONFIG_64BIT*/
317
load32 PA(fault_vector_20),%r10
318
319
$install_iva:
320
mtctl %r10,%cr14
321
322
b aligned_rfi /* Prepare to RFI! Man all the cannons! */
323
nop
324
325
.align 128
326
aligned_rfi:
327
pcxt_ssm_bug
328
329
copy %r3, %arg0 /* PDCE_PROC for smp_callin() */
330
331
rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
332
/* Don't need NOPs, have 8 compliant insn before rfi */
333
334
mtctl %r0,%cr17 /* Clear IIASQ tail */
335
mtctl %r0,%cr17 /* Clear IIASQ head */
336
337
/* Load RFI target into PC queue */
338
mtctl %r11,%cr18 /* IIAOQ head */
339
ldo 4(%r11),%r11
340
mtctl %r11,%cr18 /* IIAOQ tail */
341
342
load32 KERNEL_PSW,%r10
343
mtctl %r10,%ipsw
344
345
tovirt_r1 %sp
346
347
/* Jump through hyperspace to Virt Mode */
348
rfi
349
nop
350
351
.procend
352
353
#ifdef CONFIG_SMP
354
355
.import smp_init_current_idle_task,data
356
.import smp_callin,code
357
358
#ifndef CONFIG_64BIT
359
smp_callin_rtn:
360
.proc
361
.callinfo
362
break 1,1 /* Break if returned from start_secondary */
363
nop
364
nop
365
.procend
366
#endif /*!CONFIG_64BIT*/
367
368
/***************************************************************************
369
* smp_slave_stext is executed by all non-monarch Processors when the Monarch
370
* pokes the slave CPUs in smp.c:smp_boot_cpus().
371
*
372
* Once here, registers values are initialized in order to branch to virtual
373
* mode. Once all available/eligible CPUs are in virtual mode, all are
374
* released and start out by executing their own idle task.
375
*****************************************************************************/
376
smp_slave_stext:
377
.proc
378
.callinfo
379
380
/*
381
** Initialize Space registers
382
*/
383
mtsp %r0,%sr4
384
mtsp %r0,%sr5
385
mtsp %r0,%sr6
386
mtsp %r0,%sr7
387
388
#ifdef CONFIG_64BIT
389
/*
390
* Enable Wide mode early, in case the task_struct for the idle
391
* task in smp_init_current_idle_task was allocated above 4GB.
392
*/
393
1: mfia %rp /* clear upper part of pcoq */
394
ldo 2f-1b(%rp),%rp
395
depdi 0,31,32,%rp
396
bv (%rp)
397
ssm PSW_SM_W,%r0
398
2:
399
#endif
400
401
/* Initialize the SP - monarch sets up smp_init_current_idle_task */
402
load32 PA(smp_init_current_idle_task),%r6
403
LDREG 0(%r6),%r6
404
mtctl %r6,%cr30
405
tophys_r1 %r6
406
LDREG TASK_STACK(%r6),%sp
407
tophys_r1 %sp
408
ldo FRAME_SIZE(%sp),%sp
409
410
/* point CPU to kernel page tables */
411
load32 PA(swapper_pg_dir),%r4
412
mtctl %r4,%cr24 /* Initialize kernel root pointer */
413
mtctl %r4,%cr25 /* Initialize user root pointer */
414
415
#ifdef CONFIG_64BIT
416
/* Setup PDCE_PROC entry */
417
copy %arg0,%r3
418
#else
419
/* Load RFI *return* address in case smp_callin bails */
420
load32 smp_callin_rtn,%r2
421
#endif
422
423
/* Load RFI target address. */
424
load32 smp_callin,%r11
425
426
/* ok...common code can handle the rest */
427
b common_stext
428
nop
429
430
.procend
431
#endif /* CONFIG_SMP */
432
433
#ifndef CONFIG_64BIT
434
.section .data..ro_after_init
435
436
.align 4
437
.export $global$,data
438
439
.type $global$,@object
440
.size $global$,4
441
$global$:
442
.word 0
443
#endif /*!CONFIG_64BIT*/
444
445