Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/microblaze/kernel/head.S
26424 views
1
/*
2
* Copyright (C) 2007-2009 Michal Simek <[email protected]>
3
* Copyright (C) 2007-2009 PetaLogix
4
* Copyright (C) 2006 Atmark Techno, Inc.
5
*
6
* MMU code derived from arch/ppc/kernel/head_4xx.S:
7
* Copyright (c) 1995-1996 Gary Thomas <[email protected]>
8
* Initial PowerPC version.
9
* Copyright (c) 1996 Cort Dougan <[email protected]>
10
* Rewritten for PReP
11
* Copyright (c) 1996 Paul Mackerras <[email protected]>
12
* Low-level exception handers, MMU support, and rewrite.
13
* Copyright (c) 1997 Dan Malek <[email protected]>
14
* PowerPC 8xx modifications.
15
* Copyright (c) 1998-1999 TiVo, Inc.
16
* PowerPC 403GCX modifications.
17
* Copyright (c) 1999 Grant Erickson <[email protected]>
18
* PowerPC 403GCX/405GP modifications.
19
* Copyright 2000 MontaVista Software Inc.
20
* PPC405 modifications
21
* PowerPC 403GCX/405GP modifications.
22
* Author: MontaVista Software, Inc.
23
* [email protected] or [email protected]
24
* [email protected]
25
*
26
* This file is subject to the terms and conditions of the GNU General Public
27
* License. See the file "COPYING" in the main directory of this archive
28
* for more details.
29
*/
30
31
#include <linux/init.h>
32
#include <linux/linkage.h>
33
#include <asm/thread_info.h>
34
#include <asm/page.h>
35
#include <linux/of_fdt.h> /* for OF_DT_HEADER */
36
37
#include <asm/setup.h> /* COMMAND_LINE_SIZE */
38
#include <asm/mmu.h>
39
#include <asm/processor.h>
40
41
.section .data
42
.global empty_zero_page
43
.align 12
44
empty_zero_page:
45
.space PAGE_SIZE
46
.global swapper_pg_dir
47
swapper_pg_dir:
48
.space PAGE_SIZE
49
50
.section .rodata
51
.align 4
52
endian_check:
53
.word 1
54
55
__HEAD
56
ENTRY(_start)
57
#if CONFIG_KERNEL_BASE_ADDR == 0
58
brai TOPHYS(real_start)
59
.org 0x100
60
real_start:
61
#endif
62
63
mts rmsr, r0
64
/* Disable stack protection from bootloader */
65
mts rslr, r0
66
addi r8, r0, 0xFFFFFFFF
67
mts rshr, r8
68
/*
69
* According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
70
* if the msrclr instruction is not enabled. We use this to detect
71
* if the opcode is available, by issuing msrclr and then testing the result.
72
* r8 == 0 - msr instructions are implemented
73
* r8 != 0 - msr instructions are not implemented
74
*/
75
mfs r1, rmsr
76
msrclr r8, 0 /* clear nothing - just read msr for test */
77
cmpu r8, r8, r1 /* r1 must contain msr reg content */
78
79
/* r7 may point to an FDT, or there may be one linked in.
80
if it's in r7, we've got to save it away ASAP.
81
We ensure r7 points to a valid FDT, just in case the bootloader
82
is broken or non-existent */
83
beqi r7, no_fdt_arg /* NULL pointer? don't copy */
84
/* Does r7 point to a valid FDT? Load HEADER magic number */
85
/* Run time Big/Little endian platform */
86
/* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */
87
lbui r11, r0, TOPHYS(endian_check)
88
beqid r11, big_endian /* DO NOT break delay stop dependency */
89
lw r11, r0, r7 /* Big endian load in delay slot */
90
lwr r11, r0, r7 /* Little endian load */
91
big_endian:
92
rsubi r11, r11, OF_DT_HEADER /* Check FDT header */
93
beqi r11, _prepare_copy_fdt
94
or r7, r0, r0 /* clear R7 when not valid DTB */
95
bnei r11, no_fdt_arg /* No - get out of here */
96
_prepare_copy_fdt:
97
or r11, r0, r0 /* incremment */
98
ori r4, r0, TOPHYS(_fdt_start)
99
ori r3, r0, (0x10000 - 4)
100
_copy_fdt:
101
lw r12, r7, r11 /* r12 = r7 + r11 */
102
sw r12, r4, r11 /* addr[r4 + r11] = r12 */
103
addik r11, r11, 4 /* increment counting */
104
bgtid r3, _copy_fdt /* loop for all entries */
105
addik r3, r3, -4 /* descrement loop */
106
no_fdt_arg:
107
108
#ifndef CONFIG_CMDLINE_BOOL
109
/*
110
* handling command line
111
* copy command line directly to cmd_line placed in data section.
112
*/
113
beqid r5, skip /* Skip if NULL pointer */
114
or r11, r0, r0 /* incremment */
115
ori r4, r0, cmd_line /* load address of command line */
116
tophys(r4,r4) /* convert to phys address */
117
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
118
_copy_command_line:
119
/* r2=r5+r11 - r5 contain pointer to command line */
120
lbu r2, r5, r11
121
beqid r2, skip /* Skip if no data */
122
sb r2, r4, r11 /* addr[r4+r11]= r2 */
123
addik r11, r11, 1 /* increment counting */
124
bgtid r3, _copy_command_line /* loop for all entries */
125
addik r3, r3, -1 /* decrement loop */
126
addik r5, r4, 0 /* add new space for command line */
127
tovirt(r5,r5)
128
skip:
129
#endif /* CONFIG_CMDLINE_BOOL */
130
131
#ifdef NOT_COMPILE
132
/* save bram context */
133
or r11, r0, r0 /* incremment */
134
ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
135
ori r3, r0, (LMB_SIZE - 4)
136
_copy_bram:
137
lw r7, r0, r11 /* r7 = r0 + r11 */
138
sw r7, r4, r11 /* addr[r4 + r11] = r7 */
139
addik r11, r11, 4 /* increment counting */
140
bgtid r3, _copy_bram /* loop for all entries */
141
addik r3, r3, -4 /* descrement loop */
142
#endif
143
/* We have to turn on the MMU right away. */
144
145
/*
146
* Set up the initial MMU state so we can do the first level of
147
* kernel initialization. This maps the first 16 MBytes of memory 1:1
148
* virtual to physical.
149
*/
150
nop
151
addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
152
_invalidate:
153
mts rtlbx, r3
154
mts rtlbhi, r0 /* flush: ensure V is clear */
155
mts rtlblo, r0
156
bgtid r3, _invalidate /* loop for all entries */
157
addik r3, r3, -1
158
/* sync */
159
160
/* Setup the kernel PID */
161
mts rpid,r0 /* Load the kernel PID */
162
nop
163
bri 4
164
165
/*
166
* We should still be executing code at physical address area
167
* RAM_BASEADDR at this point. However, kernel code is at
168
* a virtual address. So, set up a TLB mapping to cover this once
169
* translation is enabled.
170
*/
171
172
addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
173
tophys(r4,r3) /* Load the kernel physical address */
174
175
/* start to do TLB calculation */
176
addik r12, r0, _end
177
rsub r12, r3, r12
178
addik r12, r12, CONFIG_LOWMEM_SIZE >> PTE_SHIFT /* that's the pad */
179
180
or r9, r0, r0 /* TLB0 = 0 */
181
or r10, r0, r0 /* TLB1 = 0 */
182
183
addik r11, r12, -0x1000000
184
bgei r11, GT16 /* size is greater than 16MB */
185
addik r11, r12, -0x0800000
186
bgei r11, GT8 /* size is greater than 8MB */
187
addik r11, r12, -0x0400000
188
bgei r11, GT4 /* size is greater than 4MB */
189
/* size is less than 4MB */
190
addik r11, r12, -0x0200000
191
bgei r11, GT2 /* size is greater than 2MB */
192
addik r9, r0, 0x0100000 /* TLB0 must be 1MB */
193
addik r11, r12, -0x0100000
194
bgei r11, GT1 /* size is greater than 1MB */
195
/* TLB1 is 0 which is setup above */
196
bri tlb_end
197
GT4: /* r11 contains the rest - will be either 1 or 4 */
198
ori r9, r0, 0x400000 /* TLB0 is 4MB */
199
bri TLB1
200
GT16: /* TLB0 is 16MB */
201
addik r9, r0, 0x1000000 /* means TLB0 is 16MB */
202
TLB1:
203
/* must be used r2 because of subtract if failed */
204
addik r2, r11, -0x0400000
205
bgei r2, GT20 /* size is greater than 16MB */
206
/* size is >16MB and <20MB */
207
addik r11, r11, -0x0100000
208
bgei r11, GT17 /* size is greater than 17MB */
209
/* kernel is >16MB and < 17MB */
210
GT1:
211
addik r10, r0, 0x0100000 /* means TLB1 is 1MB */
212
bri tlb_end
213
GT2: /* TLB0 is 0 and TLB1 will be 4MB */
214
GT17: /* TLB1 is 4MB - kernel size <20MB */
215
addik r10, r0, 0x0400000 /* means TLB1 is 4MB */
216
bri tlb_end
217
GT8: /* TLB0 is still zero that's why I can use only TLB1 */
218
GT20: /* TLB1 is 16MB - kernel size >20MB */
219
addik r10, r0, 0x1000000 /* means TLB1 is 16MB */
220
tlb_end:
221
222
/*
223
* Configure and load two entries into TLB slots 0 and 1.
224
* In case we are pinning TLBs, these are reserved in by the
225
* other TLB functions. If not reserving, then it doesn't
226
* matter where they are loaded.
227
*/
228
andi r4,r4,0xfffffc00 /* Mask off the real page number */
229
ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
230
231
/*
232
* TLB0 is always used - check if is not zero (r9 stores TLB0 value)
233
* if is use TLB1 value and clear it (r10 stores TLB1 value)
234
*/
235
bnei r9, tlb0_not_zero
236
add r9, r10, r0
237
add r10, r0, r0
238
tlb0_not_zero:
239
240
/* look at the code below */
241
ori r30, r0, 0x200
242
andi r29, r9, 0x100000
243
bneid r29, 1f
244
addik r30, r30, 0x80
245
andi r29, r9, 0x400000
246
bneid r29, 1f
247
addik r30, r30, 0x80
248
andi r29, r9, 0x1000000
249
bneid r29, 1f
250
addik r30, r30, 0x80
251
1:
252
andi r3,r3,0xfffffc00 /* Mask off the effective page number */
253
ori r3,r3,(TLB_VALID)
254
or r3, r3, r30
255
256
/* Load tlb_skip size value which is index to first unused TLB entry */
257
lwi r11, r0, TOPHYS(tlb_skip)
258
mts rtlbx,r11 /* TLB slow 0 */
259
260
mts rtlblo,r4 /* Load the data portion of the entry */
261
mts rtlbhi,r3 /* Load the tag portion of the entry */
262
263
/* Increase tlb_skip size */
264
addik r11, r11, 1
265
swi r11, r0, TOPHYS(tlb_skip)
266
267
/* TLB1 can be zeroes that's why we not setup it */
268
beqi r10, jump_over2
269
270
/* look at the code below */
271
ori r30, r0, 0x200
272
andi r29, r10, 0x100000
273
bneid r29, 1f
274
addik r30, r30, 0x80
275
andi r29, r10, 0x400000
276
bneid r29, 1f
277
addik r30, r30, 0x80
278
andi r29, r10, 0x1000000
279
bneid r29, 1f
280
addik r30, r30, 0x80
281
1:
282
addk r4, r4, r9 /* previous addr + TLB0 size */
283
addk r3, r3, r9
284
285
andi r3,r3,0xfffffc00 /* Mask off the effective page number */
286
ori r3,r3,(TLB_VALID)
287
or r3, r3, r30
288
289
lwi r11, r0, TOPHYS(tlb_skip)
290
mts rtlbx, r11 /* r11 is used from TLB0 */
291
292
mts rtlblo,r4 /* Load the data portion of the entry */
293
mts rtlbhi,r3 /* Load the tag portion of the entry */
294
295
/* Increase tlb_skip size */
296
addik r11, r11, 1
297
swi r11, r0, TOPHYS(tlb_skip)
298
299
jump_over2:
300
/*
301
* Load a TLB entry for LMB, since we need access to
302
* the exception vectors, using a 4k real==virtual mapping.
303
*/
304
/* Use temporary TLB_ID for LMB - clear this temporary mapping later */
305
ori r11, r0, MICROBLAZE_LMB_TLB_ID
306
mts rtlbx,r11
307
308
ori r4,r0,(TLB_WR | TLB_EX)
309
ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
310
311
mts rtlblo,r4 /* Load the data portion of the entry */
312
mts rtlbhi,r3 /* Load the tag portion of the entry */
313
314
/*
315
* We now have the lower 16 Meg of RAM mapped into TLB entries, and the
316
* caches ready to work.
317
*/
318
turn_on_mmu:
319
ori r15,r0,start_here
320
ori r4,r0,MSR_KERNEL_VMS
321
mts rmsr,r4
322
nop
323
rted r15,0 /* enables MMU */
324
nop
325
326
start_here:
327
328
/* Initialize small data anchors */
329
addik r13, r0, _KERNEL_SDA_BASE_
330
addik r2, r0, _KERNEL_SDA2_BASE_
331
332
/* Initialize stack pointer */
333
addik r1, r0, init_thread_union + THREAD_SIZE - 4
334
335
/* Initialize r31 with current task address */
336
addik r31, r0, init_task
337
338
addik r11, r0, machine_early_init
339
brald r15, r11
340
nop
341
342
/*
343
* Initialize the MMU.
344
*/
345
bralid r15, mmu_init
346
nop
347
348
/* Go back to running unmapped so we can load up new values
349
* and change to using our exception vectors.
350
* On the MicroBlaze, all we invalidate the used TLB entries to clear
351
* the old 16M byte TLB mappings.
352
*/
353
ori r15,r0,TOPHYS(kernel_load_context)
354
ori r4,r0,MSR_KERNEL
355
mts rmsr,r4
356
nop
357
bri 4
358
rted r15,0
359
nop
360
361
/* Load up the kernel context */
362
kernel_load_context:
363
ori r5, r0, MICROBLAZE_LMB_TLB_ID
364
mts rtlbx,r5
365
nop
366
mts rtlbhi,r0
367
nop
368
addi r15, r0, machine_halt
369
ori r17, r0, start_kernel
370
ori r4, r0, MSR_KERNEL_VMS
371
mts rmsr, r4
372
nop
373
rted r17, 0 /* enable MMU and jump to start_kernel */
374
nop
375
376