Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sh/kernel/head_32.S
26424 views
1
/* SPDX-License-Identifier: GPL-2.0
2
* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
3
*
4
* arch/sh/kernel/head.S
5
*
6
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
7
* Copyright (C) 2010 Matt Fleming
8
*
9
* Head.S contains the SH exception handlers and startup code.
10
*/
11
#include <linux/init.h>
12
#include <linux/linkage.h>
13
#include <asm/thread_info.h>
14
#include <asm/mmu.h>
15
#include <cpu/mmu_context.h>
16
17
#ifdef CONFIG_CPU_SH4A
18
#define SYNCO() synco
19
20
#define PREFI(label, reg) \
21
mov.l label, reg; \
22
prefi @reg
23
#else
24
#define SYNCO()
25
#define PREFI(label, reg)
26
#endif
27
28
.section .empty_zero_page, "aw"
29
ENTRY(empty_zero_page)
30
.long 1 /* MOUNT_ROOT_RDONLY */
31
.long 0 /* RAMDISK_FLAGS */
32
.long 0x0200 /* ORIG_ROOT_DEV */
33
.long 1 /* LOADER_TYPE */
34
.long 0x00000000 /* INITRD_START */
35
.long 0x00000000 /* INITRD_SIZE */
36
#ifdef CONFIG_32BIT
37
.long 0x53453f00 + 32 /* "SE?" = 32 bit */
38
#else
39
.long 0x53453f00 + 29 /* "SE?" = 29 bit */
40
#endif
41
1:
42
.skip PAGE_SIZE - empty_zero_page - 1b
43
44
__HEAD
45
46
/*
47
* Condition at the entry of _stext:
48
*
49
* BSC has already been initialized.
50
* INTC may or may not be initialized.
51
* VBR may or may not be initialized.
52
* MMU may or may not be initialized.
53
* Cache may or may not be initialized.
54
* Hardware (including on-chip modules) may or may not be initialized.
55
*
56
*/
57
ENTRY(_stext)
58
! Initialize Status Register
59
mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF
60
ldc r0, sr
61
! Initialize global interrupt mask
62
#ifdef CONFIG_CPU_HAS_SR_RB
63
mov #0, r0
64
ldc r0, r6_bank
65
#endif
66
67
#ifdef CONFIG_OF_EARLY_FLATTREE
68
mov r4, r12 ! Store device tree blob pointer in r12
69
#endif
70
71
/*
72
* Prefetch if possible to reduce cache miss penalty.
73
*
74
* We do this early on for SH-4A as a micro-optimization,
75
* as later on we will have speculative execution enabled
76
* and this will become less of an issue.
77
*/
78
PREFI(5f, r0)
79
PREFI(6f, r0)
80
81
!
82
mov.l 2f, r0
83
mov r0, r15 ! Set initial r15 (stack pointer)
84
#ifdef CONFIG_CPU_HAS_SR_RB
85
mov.l 7f, r0
86
ldc r0, r7_bank ! ... and initial thread_info
87
#endif
88
89
#ifdef CONFIG_PMB
90
/*
91
* Reconfigure the initial PMB mappings setup by the hardware.
92
*
93
* When we boot in 32-bit MMU mode there are 2 PMB entries already
94
* setup for us.
95
*
96
* Entry VPN PPN V SZ C UB WT
97
* ---------------------------------------------------------------
98
* 0 0x80000000 0x00000000 1 512MB 1 0 1
99
* 1 0xA0000000 0x00000000 1 512MB 0 0 0
100
*
101
* But we reprogram them here because we want complete control over
102
* our address space and the initial mappings may not map PAGE_OFFSET
103
* to __MEMORY_START (or even map all of our RAM).
104
*
105
* Once we've setup cached and uncached mappings we clear the rest of the
106
* PMB entries. This clearing also deals with the fact that PMB entries
107
* can persist across reboots. The PMB could have been left in any state
108
* when the reboot occurred, so to be safe we clear all entries and start
109
* with with a clean slate.
110
*
111
* The uncached mapping is constructed using the smallest possible
112
* mapping with a single unbufferable page. Only the kernel text needs to
113
* be covered via the uncached mapping so that certain functions can be
114
* run uncached.
115
*
116
* Drivers and the like that have previously abused the 1:1 identity
117
* mapping are unsupported in 32-bit mode and must specify their caching
118
* preference when page tables are constructed.
119
*
120
* This frees up the P2 space for more nefarious purposes.
121
*
122
* Register utilization is as follows:
123
*
124
* r0 = PMB_DATA data field
125
* r1 = PMB_DATA address field
126
* r2 = PMB_ADDR data field
127
* r3 = PMB_ADDR address field
128
* r4 = PMB_E_SHIFT
129
* r5 = remaining amount of RAM to map
130
* r6 = PMB mapping size we're trying to use
131
* r7 = cached_to_uncached
132
* r8 = scratch register
133
* r9 = scratch register
134
* r10 = number of PMB entries we've setup
135
* r11 = scratch register
136
*/
137
138
mov.l .LMMUCR, r1 /* Flush the TLB */
139
mov.l @r1, r0
140
or #MMUCR_TI, r0
141
mov.l r0, @r1
142
143
mov.l .LMEMORY_SIZE, r5
144
145
mov #PMB_E_SHIFT, r0
146
mov #0x1, r4
147
shld r0, r4
148
149
mov.l .LFIRST_DATA_ENTRY, r0
150
mov.l .LPMB_DATA, r1
151
mov.l .LFIRST_ADDR_ENTRY, r2
152
mov.l .LPMB_ADDR, r3
153
154
/*
155
* First we need to walk the PMB and figure out if there are any
156
* existing mappings that match the initial mappings VPN/PPN.
157
* If these have already been established by the bootloader, we
158
* don't bother setting up new entries here, and let the late PMB
159
* initialization take care of things instead.
160
*
161
* Note that we may need to coalesce and merge entries in order
162
* to reclaim more available PMB slots, which is much more than
163
* we want to do at this early stage.
164
*/
165
mov #0, r10
166
mov #NR_PMB_ENTRIES, r9
167
168
mov r1, r7 /* temporary PMB_DATA iter */
169
170
.Lvalidate_existing_mappings:
171
172
mov.l .LPMB_DATA_MASK, r11
173
mov.l @r7, r8
174
and r11, r8
175
cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
176
bt .Lpmb_done
177
178
add #1, r10 /* Increment the loop counter */
179
cmp/eq r9, r10
180
bf/s .Lvalidate_existing_mappings
181
add r4, r7 /* Increment to the next PMB_DATA entry */
182
183
/*
184
* If we've fallen through, continue with setting up the initial
185
* mappings.
186
*/
187
188
mov r5, r7 /* cached_to_uncached */
189
mov #0, r10
190
191
#ifdef CONFIG_UNCACHED_MAPPING
192
/*
193
* Uncached mapping
194
*/
195
mov #(PMB_SZ_16M >> 2), r9
196
shll2 r9
197
198
mov #(PMB_UB >> 8), r8
199
shll8 r8
200
201
or r0, r8
202
or r9, r8
203
mov.l r8, @r1
204
mov r2, r8
205
add r7, r8
206
mov.l r8, @r3
207
208
add r4, r1
209
add r4, r3
210
add #1, r10
211
#endif
212
213
/*
214
* Iterate over all of the available sizes from largest to
215
* smallest for constructing the cached mapping.
216
*/
217
#define __PMB_ITER_BY_SIZE(size) \
218
.L##size: \
219
mov #(size >> 4), r6; \
220
shll16 r6; \
221
shll8 r6; \
222
\
223
cmp/hi r5, r6; \
224
bt 9999f; \
225
\
226
mov #(PMB_SZ_##size##M >> 2), r9; \
227
shll2 r9; \
228
\
229
/* \
230
* Cached mapping \
231
*/ \
232
mov #PMB_C, r8; \
233
or r0, r8; \
234
or r9, r8; \
235
mov.l r8, @r1; \
236
mov.l r2, @r3; \
237
\
238
/* Increment to the next PMB_DATA entry */ \
239
add r4, r1; \
240
/* Increment to the next PMB_ADDR entry */ \
241
add r4, r3; \
242
/* Increment number of PMB entries */ \
243
add #1, r10; \
244
\
245
sub r6, r5; \
246
add r6, r0; \
247
add r6, r2; \
248
\
249
bra .L##size; \
250
9999:
251
252
__PMB_ITER_BY_SIZE(512)
253
__PMB_ITER_BY_SIZE(128)
254
__PMB_ITER_BY_SIZE(64)
255
__PMB_ITER_BY_SIZE(16)
256
257
#ifdef CONFIG_UNCACHED_MAPPING
258
/*
259
* Now that we can access it, update cached_to_uncached and
260
* uncached_size.
261
*/
262
mov.l .Lcached_to_uncached, r0
263
mov.l r7, @r0
264
265
mov.l .Luncached_size, r0
266
mov #1, r7
267
shll16 r7
268
shll8 r7
269
mov.l r7, @r0
270
#endif
271
272
/*
273
* Clear the remaining PMB entries.
274
*
275
* r3 = entry to begin clearing from
276
* r10 = number of entries we've setup so far
277
*/
278
mov #0, r1
279
mov #NR_PMB_ENTRIES, r0
280
281
.Lagain:
282
mov.l r1, @r3 /* Clear PMB_ADDR entry */
283
add #1, r10 /* Increment the loop counter */
284
cmp/eq r0, r10
285
bf/s .Lagain
286
add r4, r3 /* Increment to the next PMB_ADDR entry */
287
288
mov.l 6f, r0
289
icbi @r0
290
291
.Lpmb_done:
292
#endif /* CONFIG_PMB */
293
294
#ifndef CONFIG_SH_NO_BSS_INIT
295
/*
296
* Don't clear BSS if running on slow platforms such as an RTL simulation,
297
* remote memory via SHdebug link, etc. For these the memory can be guaranteed
298
* to be all zero on boot anyway.
299
*/
300
! Clear BSS area
301
#ifdef CONFIG_SMP
302
mov.l 3f, r0
303
cmp/eq #0, r0 ! skip clear if set to zero
304
bt 10f
305
#endif
306
307
mov.l 3f, r1
308
add #4, r1
309
mov.l 4f, r2
310
mov #0, r0
311
9: cmp/hs r2, r1
312
bf/s 9b ! while (r1 < r2)
313
mov.l r0,@-r2
314
315
10:
316
#endif
317
318
#ifdef CONFIG_OF_EARLY_FLATTREE
319
mov.l 8f, r0 ! Make flat device tree available early.
320
jsr @r0
321
mov r12, r4
322
#endif
323
324
! Additional CPU initialization
325
mov.l 6f, r0
326
jsr @r0
327
nop
328
329
SYNCO() ! Wait for pending instructions..
330
331
! Start kernel
332
mov.l 5f, r0
333
jmp @r0
334
nop
335
336
.balign 4
337
#if defined(CONFIG_CPU_SH2)
338
1: .long 0x000000F0 ! IMASK=0xF
339
#else
340
1: .long 0x500080F0 ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF
341
#endif
342
ENTRY(stack_start)
343
2: .long init_thread_union+THREAD_SIZE
344
3: .long __bss_start
345
4: .long _end
346
5: .long start_kernel
347
6: .long cpu_init
348
7: .long init_thread_union
349
#if defined(CONFIG_OF_EARLY_FLATTREE)
350
8: .long sh_fdt_init
351
#endif
352
353
#ifdef CONFIG_PMB
354
.LPMB_ADDR: .long PMB_ADDR
355
.LPMB_DATA: .long PMB_DATA
356
.LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V
357
.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
358
.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
359
.LMMUCR: .long MMUCR
360
.LMEMORY_SIZE: .long __MEMORY_SIZE
361
#ifdef CONFIG_UNCACHED_MAPPING
362
.Lcached_to_uncached: .long cached_to_uncached
363
.Luncached_size: .long uncached_size
364
#endif
365
#endif
366
367