Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/parisc/kernel/head.S
10819 views
1
/* This file is subject to the terms and conditions of the GNU General Public
2
* License. See the file "COPYING" in the main directory of this archive
3
* for more details.
4
*
5
* Copyright (C) 1999-2007 by Helge Deller <[email protected]>
6
* Copyright 1999 SuSE GmbH (Philipp Rumpf)
7
* Copyright 1999 Philipp Rumpf ([email protected])
8
* Copyright 2000 Hewlett Packard (Paul Bame, [email protected])
9
* Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10
* Copyright (C) 2004 Kyle McMartin <[email protected]>
11
*
12
* Initial Version 04-23-1999 by Helge Deller <[email protected]>
13
*/
14
15
#include <asm/asm-offsets.h>
16
#include <asm/psw.h>
17
#include <asm/pdc.h>
18
19
#include <asm/assembly.h>
20
#include <asm/pgtable.h>
21
22
#include <linux/linkage.h>
23
#include <linux/init.h>
24
25
.level LEVEL
26
27
__INITDATA
28
ENTRY(boot_args)
29
.word 0 /* arg0 */
30
.word 0 /* arg1 */
31
.word 0 /* arg2 */
32
.word 0 /* arg3 */
33
END(boot_args)
34
35
__HEAD
36
37
.align 4
38
.import init_thread_union,data
39
.import fault_vector_20,code /* IVA parisc 2.0 32 bit */
40
#ifndef CONFIG_64BIT
41
.import fault_vector_11,code /* IVA parisc 1.1 32 bit */
42
.import $global$ /* forward declaration */
43
#endif /*!CONFIG_64BIT*/
44
.export _stext,data /* Kernel want it this way! */
45
_stext:
46
ENTRY(stext)
47
.proc
48
.callinfo
49
50
/* Make sure sr4-sr7 are set to zero for the kernel address space */
51
mtsp %r0,%sr4
52
mtsp %r0,%sr5
53
mtsp %r0,%sr6
54
mtsp %r0,%sr7
55
56
/* Clear BSS (shouldn't the boot loader do this?) */
57
58
.import __bss_start,data
59
.import __bss_stop,data
60
61
load32 PA(__bss_start),%r3
62
load32 PA(__bss_stop),%r4
63
$bss_loop:
64
cmpb,<<,n %r3,%r4,$bss_loop
65
stw,ma %r0,4(%r3)
66
67
/* Save away the arguments the boot loader passed in (32 bit args) */
68
load32 PA(boot_args),%r1
69
stw,ma %arg0,4(%r1)
70
stw,ma %arg1,4(%r1)
71
stw,ma %arg2,4(%r1)
72
stw,ma %arg3,4(%r1)
73
74
/* Initialize startup VM. Just map first 8/16 MB of memory */
75
load32 PA(swapper_pg_dir),%r4
76
mtctl %r4,%cr24 /* Initialize kernel root pointer */
77
mtctl %r4,%cr25 /* Initialize user root pointer */
78
79
#if PT_NLEVELS == 3
80
/* Set pmd in pgd */
81
load32 PA(pmd0),%r5
82
shrd %r5,PxD_VALUE_SHIFT,%r3
83
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
84
stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
85
ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
86
#else
87
/* 2-level page table, so pmd == pgd */
88
ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
89
#endif
90
91
/* Fill in pmd with enough pte directories */
92
load32 PA(pg0),%r1
93
SHRREG %r1,PxD_VALUE_SHIFT,%r3
94
ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
95
96
ldi ASM_PT_INITIAL,%r1
97
98
1:
99
stw %r3,0(%r4)
100
ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
101
addib,> -1,%r1,1b
102
#if PT_NLEVELS == 3
103
ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
104
#else
105
ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
106
#endif
107
108
109
/* Now initialize the PTEs themselves. We use RWX for
110
* everything ... it will get remapped correctly later */
111
ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
112
ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
113
load32 PA(pg0),%r1
114
115
$pgt_fill_loop:
116
STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
117
ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
118
addib,> -1,%r11,$pgt_fill_loop
119
nop
120
121
/* Load the return address...er...crash 'n burn */
122
copy %r0,%r2
123
124
/* And the RFI Target address too */
125
load32 start_parisc,%r11
126
127
/* And the initial task pointer */
128
load32 init_thread_union,%r6
129
mtctl %r6,%cr30
130
131
/* And the stack pointer too */
132
ldo THREAD_SZ_ALGN(%r6),%sp
133
134
#ifdef CONFIG_SMP
135
/* Set the smp rendezvous address into page zero.
136
** It would be safer to do this in init_smp_config() but
137
** it's just way easier to deal with here because
138
** of 64-bit function ptrs and the address is local to this file.
139
*/
140
load32 PA(smp_slave_stext),%r10
141
stw %r10,0x10(%r0) /* MEM_RENDEZ */
142
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
143
144
/* FALLTHROUGH */
145
.procend
146
147
/*
148
** Code Common to both Monarch and Slave processors.
149
** Entry:
150
**
151
** 1.1:
152
** %r11 must contain RFI target address.
153
** %r25/%r26 args to pass to target function
154
** %r2 in case rfi target decides it didn't like something
155
**
156
** 2.0w:
157
** %r3 PDCE_PROC address
158
** %r11 RFI target address
159
**
160
** Caller must init: SR4-7, %sp, %r10, %cr24/25,
161
*/
162
common_stext:
163
.proc
164
.callinfo
165
#else
166
/* Clear PDC entry point - we won't use it */
167
stw %r0,0x10(%r0) /* MEM_RENDEZ */
168
stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
169
#endif /*CONFIG_SMP*/
170
171
#ifdef CONFIG_64BIT
172
tophys_r1 %sp
173
174
/* Save the rfi target address */
175
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
176
tophys_r1 %r10
177
std %r11, TASK_PT_GR11(%r10)
178
/* Switch to wide mode Superdome doesn't support narrow PDC
179
** calls.
180
*/
181
1: mfia %rp /* clear upper part of pcoq */
182
ldo 2f-1b(%rp),%rp
183
depdi 0,31,32,%rp
184
bv (%rp)
185
ssm PSW_SM_W,%r0
186
187
/* Set Wide mode as the "Default" (eg for traps)
188
** First trap occurs *right* after (or part of) rfi for slave CPUs.
189
** Someday, palo might not do this for the Monarch either.
190
*/
191
2:
192
#define MEM_PDC_LO 0x388
193
#define MEM_PDC_HI 0x35C
194
ldw MEM_PDC_LO(%r0),%r3
195
ldw MEM_PDC_HI(%r0),%r6
196
depd %r6, 31, 32, %r3 /* move to upper word */
197
198
ldo PDC_PSW(%r0),%arg0 /* 21 */
199
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
200
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
201
load32 PA(stext_pdc_ret), %rp
202
bv (%r3)
203
copy %r0,%arg3
204
205
stext_pdc_ret:
206
/* restore rfi target address*/
207
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
208
tophys_r1 %r10
209
ldd TASK_PT_GR11(%r10), %r11
210
tovirt_r1 %sp
211
#endif
212
213
/* PARANOID: clear user scratch/user space SR's */
214
mtsp %r0,%sr0
215
mtsp %r0,%sr1
216
mtsp %r0,%sr2
217
mtsp %r0,%sr3
218
219
/* Initialize Protection Registers */
220
mtctl %r0,%cr8
221
mtctl %r0,%cr9
222
mtctl %r0,%cr12
223
mtctl %r0,%cr13
224
225
/* Initialize the global data pointer */
226
loadgp
227
228
/* Set up our interrupt table. HPMCs might not work after this!
229
*
230
* We need to install the correct iva for PA1.1 or PA2.0. The
231
* following short sequence of instructions can determine this
232
* (without being illegal on a PA1.1 machine).
233
*/
234
#ifndef CONFIG_64BIT
235
ldi 32,%r10
236
mtctl %r10,%cr11
237
.level 2.0
238
mfctl,w %cr11,%r10
239
.level 1.1
240
comib,<>,n 0,%r10,$is_pa20
241
ldil L%PA(fault_vector_11),%r10
242
b $install_iva
243
ldo R%PA(fault_vector_11)(%r10),%r10
244
245
$is_pa20:
246
.level LEVEL /* restore 1.1 || 2.0w */
247
#endif /*!CONFIG_64BIT*/
248
load32 PA(fault_vector_20),%r10
249
250
$install_iva:
251
mtctl %r10,%cr14
252
253
b aligned_rfi /* Prepare to RFI! Man all the cannons! */
254
nop
255
256
.align 128
257
aligned_rfi:
258
pcxt_ssm_bug
259
260
rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
261
/* Don't need NOPs, have 8 compliant insn before rfi */
262
263
mtctl %r0,%cr17 /* Clear IIASQ tail */
264
mtctl %r0,%cr17 /* Clear IIASQ head */
265
266
/* Load RFI target into PC queue */
267
mtctl %r11,%cr18 /* IIAOQ head */
268
ldo 4(%r11),%r11
269
mtctl %r11,%cr18 /* IIAOQ tail */
270
271
load32 KERNEL_PSW,%r10
272
mtctl %r10,%ipsw
273
274
/* Jump through hyperspace to Virt Mode */
275
rfi
276
nop
277
278
.procend
279
280
#ifdef CONFIG_SMP
281
282
.import smp_init_current_idle_task,data
283
.import smp_callin,code
284
285
#ifndef CONFIG_64BIT
286
smp_callin_rtn:
287
.proc
288
.callinfo
289
break 1,1 /* Break if returned from start_secondary */
290
nop
291
nop
292
.procend
293
#endif /*!CONFIG_64BIT*/
294
295
/***************************************************************************
296
* smp_slave_stext is executed by all non-monarch Processors when the Monarch
297
* pokes the slave CPUs in smp.c:smp_boot_cpus().
298
*
299
* Once here, registers values are initialized in order to branch to virtual
300
* mode. Once all available/eligible CPUs are in virtual mode, all are
301
* released and start out by executing their own idle task.
302
*****************************************************************************/
303
smp_slave_stext:
304
.proc
305
.callinfo
306
307
/*
308
** Initialize Space registers
309
*/
310
mtsp %r0,%sr4
311
mtsp %r0,%sr5
312
mtsp %r0,%sr6
313
mtsp %r0,%sr7
314
315
/* Initialize the SP - monarch sets up smp_init_current_idle_task */
316
load32 PA(smp_init_current_idle_task),%sp
317
LDREG 0(%sp),%sp /* load task address */
318
tophys_r1 %sp
319
LDREG TASK_THREAD_INFO(%sp),%sp
320
mtctl %sp,%cr30 /* store in cr30 */
321
ldo THREAD_SZ_ALGN(%sp),%sp
322
323
/* point CPU to kernel page tables */
324
load32 PA(swapper_pg_dir),%r4
325
mtctl %r4,%cr24 /* Initialize kernel root pointer */
326
mtctl %r4,%cr25 /* Initialize user root pointer */
327
328
#ifdef CONFIG_64BIT
329
/* Setup PDCE_PROC entry */
330
copy %arg0,%r3
331
#else
332
/* Load RFI *return* address in case smp_callin bails */
333
load32 smp_callin_rtn,%r2
334
#endif
335
336
/* Load RFI target address. */
337
load32 smp_callin,%r11
338
339
/* ok...common code can handle the rest */
340
b common_stext
341
nop
342
343
.procend
344
#endif /* CONFIG_SMP */
345
346
ENDPROC(stext)
347
348
#ifndef CONFIG_64BIT
349
.section .data..read_mostly
350
351
.align 4
352
.export $global$,data
353
354
.type $global$,@object
355
.size $global$,4
356
$global$:
357
.word 0
358
#endif /*!CONFIG_64BIT*/
359
360