Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/powerpc/aim/locore64.S
39536 views
1
2
/*-
3
* Copyright (C) 2010-2016 Nathan Whitehorn
4
* All rights reserved.
5
*
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
8
* are met:
9
* 1. Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
21
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
22
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
23
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
24
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
*/
26
27
#include "assym.inc"
28
29
#include <sys/syscall.h>
30
31
#include <machine/trap.h>
32
#include <machine/param.h>
33
#include <machine/spr.h>
34
#include <machine/asm.h>
35
#include <machine/vmparam.h>
36
37
#ifdef _CALL_ELF
38
.abiversion _CALL_ELF
39
#endif
40
41
/* Glue for linker script */
42
.globl kernbase
43
.set kernbase, KERNBASE
44
45
/*
46
* Globals
47
*/
48
.data
49
.align 3
50
GLOBAL(__startkernel)
51
.llong begin
52
GLOBAL(__endkernel)
53
.llong end
54
GLOBAL(can_wakeup)
55
.llong 0x0
56
57
.align 4
58
#define TMPSTKSZ 16384 /* 16K temporary stack */
59
GLOBAL(tmpstk)
60
.space TMPSTKSZ
61
62
TOC_ENTRY(tmpstk)
63
TOC_ENTRY(can_wakeup)
64
65
#ifdef KDB
66
#define TRAPSTKSZ 8192 /* 8k trap stack */
67
GLOBAL(trapstk)
68
.space TRAPSTKSZ
69
TOC_ENTRY(trapstk)
70
#endif
71
72
73
/*
74
* Entry point for bootloaders that do not fully implement ELF and start
75
* at the beginning of the image (kexec, notably). In its own section so
76
* that it ends up before any linker-generated call stubs and actually at
77
* the beginning of the image. kexec on some systems also enters at
78
* (start of image) + 0x60, so put a spin loop there.
79
*/
80
.section ".text.kboot", "x", @progbits
81
kbootentry:
82
#ifdef __LITTLE_ENDIAN__
83
RETURN_TO_NATIVE_ENDIAN
84
#endif
85
b __start
86
. = kbootentry + 0x40 /* Magic address used in platform layer */
87
.global smp_spin_sem
88
ap_kexec_spin_sem:
89
.long -1
90
. = kbootentry + 0x60 /* Entry point for kexec APs */
91
ap_kexec_start: /* At 0x60 past start, copied to 0x60 by kexec */
92
/* r3 set to CPU ID by kexec */
93
94
/* Invalidate icache for low-memory copy and jump there */
95
li %r0,0x80
96
dcbst 0,%r0
97
sync
98
icbi 0,%r0
99
isync
100
ba 0x80 /* Absolute branch to next inst */
101
102
. = kbootentry + 0x80 /* Aligned to cache line */
103
1: or 31,31,31 /* yield */
104
sync
105
lwz %r1,0x40(0) /* Spin on ap_kexec_spin_sem */
106
cmpw %r1,%r3 /* Until it equals our CPU ID */
107
bne 1b
108
109
/* Released */
110
or 2,2,2 /* unyield */
111
112
/* Make sure that it will be software reset. Clear SRR1 */
113
li %r1,0
114
mtsrr1 %r1
115
ba EXC_RST
116
117
/*
118
* Now start the real text section
119
*/
120
121
.text
122
.globl btext
123
btext:
124
125
/*
126
* Main kernel entry point.
127
*
128
* Calling convention:
129
* r3: Flattened Device Tree pointer (or zero)
130
* r4: ignored
131
* r5: OF client interface pointer (or zero)
132
* r6: Loader metadata pointer (or zero)
133
* r7: Magic cookie (0xfb5d104d) to indicate that r6 has loader metadata
134
*/
135
.text
136
_NAKED_ENTRY(__start)
137
138
#ifdef __LITTLE_ENDIAN__
139
RETURN_TO_NATIVE_ENDIAN
140
#endif
141
/* Set 64-bit mode if not yet set before branching to C */
142
mfmsr %r20
143
li %r21,1
144
insrdi %r20,%r21,1,0
145
mtmsrd %r20
146
isync
147
nop /* Make this block a multiple of 8 bytes */
148
149
/* Set up the TOC pointer */
150
b 0f
151
.align 3
152
0: nop
153
bl 1f
154
.llong __tocbase + 0x8000 - .
155
1: mflr %r2
156
ld %r1,0(%r2)
157
add %r2,%r1,%r2
158
159
/* Get load offset */
160
ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */
161
subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */
162
163
/* Set up the stack pointer */
164
bl 1f
165
.llong tmpstk + TMPSTKSZ - 96 - .
166
1: mflr %r30
167
ld %r1,0(%r30)
168
add %r1,%r1,%r30
169
nop
170
171
/* Relocate kernel */
172
std %r3,48(%r1)
173
std %r4,56(%r1)
174
std %r5,64(%r1)
175
std %r6,72(%r1)
176
std %r7,80(%r1)
177
178
bl 1f
179
.llong _DYNAMIC-.
180
1: mflr %r3
181
ld %r4,0(%r3)
182
add %r3,%r4,%r3
183
mr %r4,%r31
184
bl elf_reloc_self
185
nop
186
ld %r3,48(%r1)
187
ld %r4,56(%r1)
188
ld %r5,64(%r1)
189
ld %r6,72(%r1)
190
ld %r7,80(%r1)
191
192
/* Begin CPU init */
193
mr %r4,%r2 /* Replace ignored r4 with tocbase for trap handlers */
194
bl powerpc_init
195
nop
196
197
/* Set stack pointer to new value and branch to mi_startup */
198
mr %r1, %r3
199
li %r3, 0
200
std %r3, 0(%r1)
201
bl mi_startup
202
nop
203
204
/* Unreachable */
205
b .
206
_END(__start)
207
208
ASENTRY_NOPROF(__restartkernel_virtual)
209
/*
210
* When coming in via this entry point, we need to alter the SLB to
211
* shadow the segment register emulation entries in DMAP space.
212
* We need to do this dance because we are running with virtual-mode
213
* OpenFirmware and have not yet taken over the MMU.
214
*
215
* Assumptions:
216
* 1) The kernel is currently identity-mapped.
217
* 2) We are currently executing at an address compatible with
218
* real mode.
219
* 3) The first 16 SLB entries are emulating SRs.
220
* 4) The rest of the SLB is not in use.
221
* 5) OpenFirmware is not manipulating the SLB at runtime.
222
* 6) We are running on 64-bit AIM.
223
*
224
* Tested on a G5.
225
*/
226
mfmsr %r14
227
/* Switch to real mode because we are about to mess with the SLB. */
228
andi. %r14, %r14, ~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l
229
mtmsr %r14
230
isync
231
/* Prepare variables for later use. */
232
li %r14, 0
233
li %r18, 0
234
oris %r18, %r18, 0xc000
235
sldi %r18, %r18, 32 /* r18: 0xc000000000000000 */
236
1:
237
/*
238
* Loop over the first 16 SLB entries.
239
* Offset the SLBE into the DMAP, add 16 to the index, and write
240
* it back to the SLB.
241
*/
242
/* XXX add more safety checks */
243
slbmfev %r15, %r14
244
slbmfee %r16, %r14
245
or %r16, %r16, %r14 /* index is 0-15 */
246
ori %r16, %r16, 0x10 /* add 16 to index. */
247
or %r16, %r16, %r18 /* SLBE DMAP offset */
248
rldicr %r17, %r16, 0, 37 /* Invalidation SLBE */
249
250
isync
251
slbie %r17
252
/* isync */
253
slbmte %r15, %r16
254
isync
255
addi %r14, %r14, 1
256
cmpdi %r14, 16
257
blt 1b
258
259
/*
260
* Now that we are set up with a temporary direct map, we can
261
* continue with __restartkernel. Translation will be switched
262
* back on at the rfid, at which point we will be executing from
263
* the temporary direct map we just installed, until the kernel
264
* takes over responsibility for the MMU.
265
*/
266
bl __restartkernel
267
nop
268
ASEND(__restartkernel_virtual)
269
270
ASENTRY_NOPROF(__restartkernel)
271
/*
272
* r3-r7: arguments to go to __start
273
* r8: offset from current kernel address to apply
274
* r9: MSR to set when (atomically) jumping to __start + r8
275
*/
276
mtsrr1 %r9
277
bl 1f
278
1: mflr %r25
279
add %r25,%r8,%r25
280
addi %r25,%r25,2f-1b
281
mtsrr0 %r25
282
rfid
283
2: bl __start
284
nop
285
ASEND(__restartkernel)
286
287
#include <powerpc/aim/trap_subr64.S>
288
289