Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/unicore32/kernel/head.S
10817 views
1
/*
2
* linux/arch/unicore32/kernel/head.S
3
*
4
* Code specific to PKUnity SoC and UniCore ISA
5
*
6
* Copyright (C) 2001-2010 GUAN Xue-tao
7
*
8
* This program is free software; you can redistribute it and/or modify
9
* it under the terms of the GNU General Public License version 2 as
10
* published by the Free Software Foundation.
11
*/
12
#include <linux/linkage.h>
13
#include <linux/init.h>
14
15
#include <asm/assembler.h>
16
#include <asm/ptrace.h>
17
#include <generated/asm-offsets.h>
18
#include <asm/memory.h>
19
#include <asm/thread_info.h>
20
#include <asm/system.h>
21
#include <asm/pgtable-hwdef.h>
22
23
#if (PHYS_OFFSET & 0x003fffff)
24
#error "PHYS_OFFSET must be at an even 4MiB boundary!"
25
#endif
26
27
#define KERNEL_RAM_VADDR (PAGE_OFFSET + KERNEL_IMAGE_START)
28
#define KERNEL_RAM_PADDR (PHYS_OFFSET + KERNEL_IMAGE_START)
29
30
#define KERNEL_PGD_PADDR (KERNEL_RAM_PADDR - 0x1000)
31
#define KERNEL_PGD_VADDR (KERNEL_RAM_VADDR - 0x1000)
32
33
#define KERNEL_START KERNEL_RAM_VADDR
34
#define KERNEL_END _end
35
36
/*
37
* swapper_pg_dir is the virtual address of the initial page table.
38
* We place the page tables 4K below KERNEL_RAM_VADDR. Therefore, we must
39
* make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
40
* the least significant 16 bits to be 0x8000, but we could probably
41
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x1000.
42
*/
43
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
44
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
45
#endif
46
47
.globl swapper_pg_dir
48
.equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x1000
49
50
/*
51
* Kernel startup entry point.
52
* ---------------------------
53
*
54
* This is normally called from the decompressor code. The requirements
55
* are: MMU = off, D-cache = off, I-cache = dont care
56
*
57
* This code is mostly position independent, so if you link the kernel at
58
* 0xc0008000, you call this at __pa(0xc0008000).
59
*/
60
__HEAD
61
ENTRY(stext)
62
@ set asr
63
mov r0, #PRIV_MODE @ ensure priv mode
64
or r0, #PSR_R_BIT | PSR_I_BIT @ disable irqs
65
mov.a asr, r0
66
67
@ process identify
68
movc r0, p0.c0, #0 @ cpuid
69
movl r1, 0xff00ffff @ mask
70
movl r2, 0x4d000863 @ value
71
and r0, r1, r0
72
cxor.a r0, r2
73
bne __error_p @ invalid processor id
74
75
/*
76
* Clear the 4K level 1 swapper page table
77
*/
78
movl r0, #KERNEL_PGD_PADDR @ page table address
79
mov r1, #0
80
add r2, r0, #0x1000
81
101: stw.w r1, [r0]+, #4
82
stw.w r1, [r0]+, #4
83
stw.w r1, [r0]+, #4
84
stw.w r1, [r0]+, #4
85
cxor.a r0, r2
86
bne 101b
87
88
movl r4, #KERNEL_PGD_PADDR @ page table address
89
mov r7, #PMD_TYPE_SECT | PMD_PRESENT @ page size: section
90
or r7, r7, #PMD_SECT_CACHEABLE @ cacheable
91
or r7, r7, #PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC
92
93
/*
94
* Create identity mapping for first 4MB of kernel to
95
* cater for the MMU enable. This identity mapping
96
* will be removed by paging_init(). We use our current program
97
* counter to determine corresponding section base address.
98
*/
99
mov r6, pc
100
mov r6, r6 >> #22 @ start of kernel section
101
or r1, r7, r6 << #22 @ flags + kernel base
102
stw r1, [r4+], r6 << #2 @ identity mapping
103
104
/*
105
* Now setup the pagetables for our kernel direct
106
* mapped region.
107
*/
108
add r0, r4, #(KERNEL_START & 0xff000000) >> 20
109
stw.w r1, [r0+], #(KERNEL_START & 0x00c00000) >> 20
110
movl r6, #(KERNEL_END - 1)
111
add r0, r0, #4
112
add r6, r4, r6 >> #20
113
102: csub.a r0, r6
114
add r1, r1, #1 << 22
115
bua 103f
116
stw.w r1, [r0]+, #4
117
b 102b
118
103:
119
/*
120
* Then map first 4MB of ram in case it contains our boot params.
121
*/
122
add r0, r4, #PAGE_OFFSET >> 20
123
or r6, r7, #(PHYS_OFFSET & 0xffc00000)
124
stw r6, [r0]
125
126
ldw r15, __switch_data @ address to jump to after
127
128
/*
129
* Initialise TLB, Caches, and MMU state ready to switch the MMU
130
* on.
131
*/
132
mov r0, #0
133
movc p0.c5, r0, #28 @ cache invalidate all
134
nop8
135
movc p0.c6, r0, #6 @ TLB invalidate all
136
nop8
137
138
/*
139
* ..V. .... ..TB IDAM
140
* ..1. .... ..01 1111
141
*/
142
movl r0, #0x201f @ control register setting
143
144
/*
145
* Setup common bits before finally enabling the MMU. Essentially
146
* this is just loading the page table pointer and domain access
147
* registers.
148
*/
149
#ifndef CONFIG_ALIGNMENT_TRAP
150
andn r0, r0, #CR_A
151
#endif
152
#ifdef CONFIG_CPU_DCACHE_DISABLE
153
andn r0, r0, #CR_D
154
#endif
155
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
156
andn r0, r0, #CR_B
157
#endif
158
#ifdef CONFIG_CPU_ICACHE_DISABLE
159
andn r0, r0, #CR_I
160
#endif
161
162
movc p0.c2, r4, #0 @ set pgd
163
b __turn_mmu_on
164
ENDPROC(stext)
165
166
/*
167
* Enable the MMU. This completely changes the structure of the visible
168
* memory space. You will not be able to trace execution through this.
169
*
170
* r0 = cp#0 control register
171
* r15 = *virtual* address to jump to upon completion
172
*/
173
.align 5
174
__turn_mmu_on:
175
mov r0, r0
176
movc p0.c1, r0, #0 @ write control reg
177
nop @ fetch inst by phys addr
178
mov pc, r15
179
nop8 @ fetch inst by phys addr
180
ENDPROC(__turn_mmu_on)
181
182
/*
183
* Setup the initial page tables. We only setup the barest
184
* amount which are required to get the kernel running, which
185
* generally means mapping in the kernel code.
186
*
187
* r9 = cpuid
188
* r10 = procinfo
189
*
190
* Returns:
191
* r0, r3, r6, r7 corrupted
192
* r4 = physical page table address
193
*/
194
.ltorg
195
196
.align 2
197
.type __switch_data, %object
198
__switch_data:
199
.long __mmap_switched
200
.long __bss_start @ r6
201
.long _end @ r7
202
.long cr_alignment @ r8
203
.long init_thread_union + THREAD_START_SP @ sp
204
205
/*
206
* The following fragment of code is executed with the MMU on in MMU mode,
207
* and uses absolute addresses; this is not position independent.
208
*
209
* r0 = cp#0 control register
210
*/
211
__mmap_switched:
212
adr r3, __switch_data + 4
213
214
ldm.w (r6, r7, r8), [r3]+
215
ldw sp, [r3]
216
217
mov fp, #0 @ Clear BSS (and zero fp)
218
203: csub.a r6, r7
219
bea 204f
220
stw.w fp, [r6]+,#4
221
b 203b
222
204:
223
andn r1, r0, #CR_A @ Clear 'A' bit
224
stm (r0, r1), [r8]+ @ Save control register values
225
b start_kernel
226
ENDPROC(__mmap_switched)
227
228
/*
229
* Exception handling. Something went wrong and we can't proceed. We
230
* ought to tell the user, but since we don't have any guarantee that
231
* we're even running on the right architecture, we do virtually nothing.
232
*
233
* If CONFIG_DEBUG_LL is set we try to print out something about the error
234
* and hope for the best (useful if bootloader fails to pass a proper
235
* machine ID for example).
236
*/
237
__error_p:
238
#ifdef CONFIG_DEBUG_LL
239
adr r0, str_p1
240
b.l printascii
241
mov r0, r9
242
b.l printhex8
243
adr r0, str_p2
244
b.l printascii
245
901: nop8
246
b 901b
247
str_p1: .asciz "\nError: unrecognized processor variant (0x"
248
str_p2: .asciz ").\n"
249
.align
250
#endif
251
ENDPROC(__error_p)
252
253
254