Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/sh/kernel/head_32.S
10819 views
1
/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
2
*
3
* arch/sh/kernel/head.S
4
*
5
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6
* Copyright (C) 2010 Matt Fleming
7
*
8
* This file is subject to the terms and conditions of the GNU General Public
9
* License. See the file "COPYING" in the main directory of this archive
10
* for more details.
11
*
12
* Head.S contains the SH exception handlers and startup code.
13
*/
14
#include <linux/init.h>
15
#include <linux/linkage.h>
16
#include <asm/thread_info.h>
17
#include <asm/mmu.h>
18
#include <cpu/mmu_context.h>
19
20
#ifdef CONFIG_CPU_SH4A
21
#define SYNCO() synco
22
23
#define PREFI(label, reg) \
24
mov.l label, reg; \
25
prefi @reg
26
#else
27
#define SYNCO()
28
#define PREFI(label, reg)
29
#endif
30
31
.section .empty_zero_page, "aw"
32
ENTRY(empty_zero_page)
33
.long 1 /* MOUNT_ROOT_RDONLY */
34
.long 0 /* RAMDISK_FLAGS */
35
.long 0x0200 /* ORIG_ROOT_DEV */
36
.long 1 /* LOADER_TYPE */
37
.long 0x00000000 /* INITRD_START */
38
.long 0x00000000 /* INITRD_SIZE */
39
#ifdef CONFIG_32BIT
40
.long 0x53453f00 + 32 /* "SE?" = 32 bit */
41
#else
42
.long 0x53453f00 + 29 /* "SE?" = 29 bit */
43
#endif
44
1:
45
.skip PAGE_SIZE - empty_zero_page - 1b
46
47
__HEAD
48
49
/*
50
* Condition at the entry of _stext:
51
*
52
* BSC has already been initialized.
53
* INTC may or may not be initialized.
54
* VBR may or may not be initialized.
55
* MMU may or may not be initialized.
56
* Cache may or may not be initialized.
57
* Hardware (including on-chip modules) may or may not be initialized.
58
*
59
*/
60
ENTRY(_stext)
61
! Initialize Status Register
62
mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF
63
ldc r0, sr
64
! Initialize global interrupt mask
65
#ifdef CONFIG_CPU_HAS_SR_RB
66
mov #0, r0
67
ldc r0, r6_bank
68
#endif
69
70
/*
71
* Prefetch if possible to reduce cache miss penalty.
72
*
73
* We do this early on for SH-4A as a micro-optimization,
74
* as later on we will have speculative execution enabled
75
* and this will become less of an issue.
76
*/
77
PREFI(5f, r0)
78
PREFI(6f, r0)
79
80
!
81
mov.l 2f, r0
82
mov r0, r15 ! Set initial r15 (stack pointer)
83
#ifdef CONFIG_CPU_HAS_SR_RB
84
mov.l 7f, r0
85
ldc r0, r7_bank ! ... and initial thread_info
86
#endif
87
88
#ifdef CONFIG_PMB
89
/*
90
* Reconfigure the initial PMB mappings setup by the hardware.
91
*
92
* When we boot in 32-bit MMU mode there are 2 PMB entries already
93
* setup for us.
94
*
95
* Entry VPN PPN V SZ C UB WT
96
* ---------------------------------------------------------------
97
* 0 0x80000000 0x00000000 1 512MB 1 0 1
98
* 1 0xA0000000 0x00000000 1 512MB 0 0 0
99
*
100
* But we reprogram them here because we want complete control over
101
* our address space and the initial mappings may not map PAGE_OFFSET
102
* to __MEMORY_START (or even map all of our RAM).
103
*
104
* Once we've setup cached and uncached mappings we clear the rest of the
105
* PMB entries. This clearing also deals with the fact that PMB entries
106
* can persist across reboots. The PMB could have been left in any state
107
* when the reboot occurred, so to be safe we clear all entries and start
108
* with with a clean slate.
109
*
110
* The uncached mapping is constructed using the smallest possible
111
* mapping with a single unbufferable page. Only the kernel text needs to
112
* be covered via the uncached mapping so that certain functions can be
113
* run uncached.
114
*
115
* Drivers and the like that have previously abused the 1:1 identity
116
* mapping are unsupported in 32-bit mode and must specify their caching
117
* preference when page tables are constructed.
118
*
119
* This frees up the P2 space for more nefarious purposes.
120
*
121
* Register utilization is as follows:
122
*
123
* r0 = PMB_DATA data field
124
* r1 = PMB_DATA address field
125
* r2 = PMB_ADDR data field
126
* r3 = PMB_ADDR address field
127
* r4 = PMB_E_SHIFT
128
* r5 = remaining amount of RAM to map
129
* r6 = PMB mapping size we're trying to use
130
* r7 = cached_to_uncached
131
* r8 = scratch register
132
* r9 = scratch register
133
* r10 = number of PMB entries we've setup
134
* r11 = scratch register
135
*/
136
137
mov.l .LMMUCR, r1 /* Flush the TLB */
138
mov.l @r1, r0
139
or #MMUCR_TI, r0
140
mov.l r0, @r1
141
142
mov.l .LMEMORY_SIZE, r5
143
144
mov #PMB_E_SHIFT, r0
145
mov #0x1, r4
146
shld r0, r4
147
148
mov.l .LFIRST_DATA_ENTRY, r0
149
mov.l .LPMB_DATA, r1
150
mov.l .LFIRST_ADDR_ENTRY, r2
151
mov.l .LPMB_ADDR, r3
152
153
/*
154
* First we need to walk the PMB and figure out if there are any
155
* existing mappings that match the initial mappings VPN/PPN.
156
* If these have already been established by the bootloader, we
157
* don't bother setting up new entries here, and let the late PMB
158
* initialization take care of things instead.
159
*
160
* Note that we may need to coalesce and merge entries in order
161
* to reclaim more available PMB slots, which is much more than
162
* we want to do at this early stage.
163
*/
164
mov #0, r10
165
mov #NR_PMB_ENTRIES, r9
166
167
mov r1, r7 /* temporary PMB_DATA iter */
168
169
.Lvalidate_existing_mappings:
170
171
mov.l .LPMB_DATA_MASK, r11
172
mov.l @r7, r8
173
and r11, r8
174
cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */
175
bt .Lpmb_done
176
177
add #1, r10 /* Increment the loop counter */
178
cmp/eq r9, r10
179
bf/s .Lvalidate_existing_mappings
180
add r4, r7 /* Increment to the next PMB_DATA entry */
181
182
/*
183
* If we've fallen through, continue with setting up the initial
184
* mappings.
185
*/
186
187
mov r5, r7 /* cached_to_uncached */
188
mov #0, r10
189
190
#ifdef CONFIG_UNCACHED_MAPPING
191
/*
192
* Uncached mapping
193
*/
194
mov #(PMB_SZ_16M >> 2), r9
195
shll2 r9
196
197
mov #(PMB_UB >> 8), r8
198
shll8 r8
199
200
or r0, r8
201
or r9, r8
202
mov.l r8, @r1
203
mov r2, r8
204
add r7, r8
205
mov.l r8, @r3
206
207
add r4, r1
208
add r4, r3
209
add #1, r10
210
#endif
211
212
/*
213
* Iterate over all of the available sizes from largest to
214
* smallest for constructing the cached mapping.
215
*/
216
#define __PMB_ITER_BY_SIZE(size) \
217
.L##size: \
218
mov #(size >> 4), r6; \
219
shll16 r6; \
220
shll8 r6; \
221
\
222
cmp/hi r5, r6; \
223
bt 9999f; \
224
\
225
mov #(PMB_SZ_##size##M >> 2), r9; \
226
shll2 r9; \
227
\
228
/* \
229
* Cached mapping \
230
*/ \
231
mov #PMB_C, r8; \
232
or r0, r8; \
233
or r9, r8; \
234
mov.l r8, @r1; \
235
mov.l r2, @r3; \
236
\
237
/* Increment to the next PMB_DATA entry */ \
238
add r4, r1; \
239
/* Increment to the next PMB_ADDR entry */ \
240
add r4, r3; \
241
/* Increment number of PMB entries */ \
242
add #1, r10; \
243
\
244
sub r6, r5; \
245
add r6, r0; \
246
add r6, r2; \
247
\
248
bra .L##size; \
249
9999:
250
251
__PMB_ITER_BY_SIZE(512)
252
__PMB_ITER_BY_SIZE(128)
253
__PMB_ITER_BY_SIZE(64)
254
__PMB_ITER_BY_SIZE(16)
255
256
#ifdef CONFIG_UNCACHED_MAPPING
257
/*
258
* Now that we can access it, update cached_to_uncached and
259
* uncached_size.
260
*/
261
mov.l .Lcached_to_uncached, r0
262
mov.l r7, @r0
263
264
mov.l .Luncached_size, r0
265
mov #1, r7
266
shll16 r7
267
shll8 r7
268
mov.l r7, @r0
269
#endif
270
271
/*
272
* Clear the remaining PMB entries.
273
*
274
* r3 = entry to begin clearing from
275
* r10 = number of entries we've setup so far
276
*/
277
mov #0, r1
278
mov #NR_PMB_ENTRIES, r0
279
280
.Lagain:
281
mov.l r1, @r3 /* Clear PMB_ADDR entry */
282
add #1, r10 /* Increment the loop counter */
283
cmp/eq r0, r10
284
bf/s .Lagain
285
add r4, r3 /* Increment to the next PMB_ADDR entry */
286
287
mov.l 6f, r0
288
icbi @r0
289
290
.Lpmb_done:
291
#endif /* CONFIG_PMB */
292
293
#ifndef CONFIG_SH_NO_BSS_INIT
294
/*
295
* Don't clear BSS if running on slow platforms such as an RTL simulation,
296
* remote memory via SHdebug link, etc. For these the memory can be guaranteed
297
* to be all zero on boot anyway.
298
*/
299
! Clear BSS area
300
#ifdef CONFIG_SMP
301
mov.l 3f, r0
302
cmp/eq #0, r0 ! skip clear if set to zero
303
bt 10f
304
#endif
305
306
mov.l 3f, r1
307
add #4, r1
308
mov.l 4f, r2
309
mov #0, r0
310
9: cmp/hs r2, r1
311
bf/s 9b ! while (r1 < r2)
312
mov.l r0,@-r2
313
314
10:
315
#endif
316
317
! Additional CPU initialization
318
mov.l 6f, r0
319
jsr @r0
320
nop
321
322
SYNCO() ! Wait for pending instructions..
323
324
! Start kernel
325
mov.l 5f, r0
326
jmp @r0
327
nop
328
329
.balign 4
330
#if defined(CONFIG_CPU_SH2)
331
1: .long 0x000000F0 ! IMASK=0xF
332
#else
333
1: .long 0x500080F0 ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF
334
#endif
335
ENTRY(stack_start)
336
2: .long init_thread_union+THREAD_SIZE
337
3: .long __bss_start
338
4: .long _end
339
5: .long start_kernel
340
6: .long cpu_init
341
7: .long init_thread_union
342
343
#ifdef CONFIG_PMB
344
.LPMB_ADDR: .long PMB_ADDR
345
.LPMB_DATA: .long PMB_DATA
346
.LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V
347
.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V
348
.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V
349
.LMMUCR: .long MMUCR
350
.LMEMORY_SIZE: .long __MEMORY_SIZE
351
#ifdef CONFIG_UNCACHED_MAPPING
352
.Lcached_to_uncached: .long cached_to_uncached
353
.Luncached_size: .long uncached_size
354
#endif
355
#endif
356
357