Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/tile/kernel/head_64.S
10817 views
1
/*
2
* Copyright 2011 Tilera Corporation. All Rights Reserved.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation, version 2.
7
*
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11
* NON INFRINGEMENT. See the GNU General Public License for
12
* more details.
13
*
14
* TILE startup code.
15
*/
16
17
#include <linux/linkage.h>
18
#include <linux/init.h>
19
#include <asm/page.h>
20
#include <asm/pgtable.h>
21
#include <asm/thread_info.h>
22
#include <asm/processor.h>
23
#include <asm/asm-offsets.h>
24
#include <hv/hypervisor.h>
25
#include <arch/chip.h>
26
#include <arch/spr_def.h>
27
28
/*
29
* This module contains the entry code for kernel images. It performs the
30
* minimal setup needed to call the generic C routines.
31
*/
32
33
__HEAD
34
ENTRY(_start)
35
/* Notify the hypervisor of what version of the API we want */
36
{
37
movei r1, TILE_CHIP
38
movei r2, TILE_CHIP_REV
39
}
40
{
41
moveli r0, _HV_VERSION
42
jal hv_init
43
}
44
/* Get a reasonable default ASID in r0 */
45
{
46
move r0, zero
47
jal hv_inquire_asid
48
}
49
50
/*
51
* Install the default page table. The relocation required to
52
* statically define the table is a bit too complex, so we have
53
* to plug in the pointer from the L0 to the L1 table by hand.
54
* We only do this on the first cpu to boot, though, since the
55
* other CPUs should see a properly-constructed page table.
56
*/
57
{
58
v4int_l r2, zero, r0 /* ASID for hv_install_context */
59
moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
60
}
61
{
62
shl16insli r4, r4, hw0(swapper_pgprot - PAGE_OFFSET)
63
}
64
{
65
ld r1, r4 /* access_pte for hv_install_context */
66
}
67
{
68
moveli r0, hw1_last(.Lsv_data_pmd - PAGE_OFFSET)
69
moveli r6, hw1_last(temp_data_pmd - PAGE_OFFSET)
70
}
71
{
72
/* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
73
bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
74
inv r4
75
}
76
bnez r7, .Lno_write
77
{
78
shl16insli r0, r0, hw0(.Lsv_data_pmd - PAGE_OFFSET)
79
shl16insli r6, r6, hw0(temp_data_pmd - PAGE_OFFSET)
80
}
81
{
82
/* Cut off the low bits of the PT address. */
83
shrui r6, r6, HV_LOG2_PAGE_TABLE_ALIGN
84
/* Start with our access pte. */
85
move r5, r1
86
}
87
{
88
/* Stuff the address into the page table pointer slot of the PTE. */
89
bfins r5, r6, HV_PTE_INDEX_PTFN, \
90
HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
91
}
92
{
93
/* Store the L0 data PTE. */
94
st r0, r5
95
addli r6, r6, (temp_code_pmd - temp_data_pmd) >> \
96
HV_LOG2_PAGE_TABLE_ALIGN
97
}
98
{
99
addli r0, r0, .Lsv_code_pmd - .Lsv_data_pmd
100
bfins r5, r6, HV_PTE_INDEX_PTFN, \
101
HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
102
}
103
/* Store the L0 code PTE. */
104
st r0, r5
105
106
.Lno_write:
107
moveli lr, hw2_last(1f)
108
{
109
shl16insli lr, lr, hw1(1f)
110
moveli r0, hw1_last(swapper_pg_dir - PAGE_OFFSET)
111
}
112
{
113
shl16insli lr, lr, hw0(1f)
114
shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
115
}
116
{
117
move r3, zero
118
j hv_install_context
119
}
120
1:
121
122
/* Install the interrupt base. */
123
moveli r0, hw2_last(MEM_SV_START)
124
shl16insli r0, r0, hw1(MEM_SV_START)
125
shl16insli r0, r0, hw0(MEM_SV_START)
126
mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
127
128
/*
129
* Get our processor number and save it away in SAVE_K_0.
130
* Extract stuff from the topology structure: r4 = y, r6 = x,
131
* r5 = width. FIXME: consider whether we want to just make these
132
* 64-bit values (and if so fix smp_topology write below, too).
133
*/
134
jal hv_inquire_topology
135
{
136
v4int_l r5, zero, r1 /* r5 = width */
137
shrui r4, r0, 32 /* r4 = y */
138
}
139
{
140
v4int_l r6, zero, r0 /* r6 = x */
141
mul_lu_lu r4, r4, r5
142
}
143
{
144
add r4, r4, r6 /* r4 == cpu == y*width + x */
145
}
146
147
#ifdef CONFIG_SMP
148
/*
149
* Load up our per-cpu offset. When the first (master) tile
150
* boots, this value is still zero, so we will load boot_pc
151
* with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
152
* The master tile initializes the per-cpu offset array, so that
153
* when subsequent (secondary) tiles boot, they will instead load
154
* from their per-cpu versions of boot_sp and boot_pc.
155
*/
156
moveli r5, hw2_last(__per_cpu_offset)
157
shl16insli r5, r5, hw1(__per_cpu_offset)
158
shl16insli r5, r5, hw0(__per_cpu_offset)
159
shl3add r5, r4, r5
160
ld r5, r5
161
bnez r5, 1f
162
163
/*
164
* Save the width and height to the smp_topology variable
165
* for later use.
166
*/
167
moveli r0, hw2_last(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
168
shl16insli r0, r0, hw1(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
169
shl16insli r0, r0, hw0(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
170
st r0, r1
171
1:
172
#else
173
move r5, zero
174
#endif
175
176
/* Load and go with the correct pc and sp. */
177
{
178
moveli r1, hw2_last(boot_sp)
179
moveli r0, hw2_last(boot_pc)
180
}
181
{
182
shl16insli r1, r1, hw1(boot_sp)
183
shl16insli r0, r0, hw1(boot_pc)
184
}
185
{
186
shl16insli r1, r1, hw0(boot_sp)
187
shl16insli r0, r0, hw0(boot_pc)
188
}
189
{
190
add r1, r1, r5
191
add r0, r0, r5
192
}
193
ld r0, r0
194
ld sp, r1
195
or r4, sp, r4
196
mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
197
addi sp, sp, -STACK_TOP_DELTA
198
{
199
move lr, zero /* stop backtraces in the called function */
200
jr r0
201
}
202
ENDPROC(_start)
203
204
__PAGE_ALIGNED_BSS
205
.align PAGE_SIZE
206
ENTRY(empty_zero_page)
207
.fill PAGE_SIZE,1,0
208
END(empty_zero_page)
209
210
.macro PTE cpa, bits1
211
.quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
212
HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
213
(\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
214
.endm
215
216
__PAGE_ALIGNED_DATA
217
.align PAGE_SIZE
218
ENTRY(swapper_pg_dir)
219
.org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
220
.Lsv_data_pmd:
221
.quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
222
.org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE
223
.Lsv_code_pmd:
224
.quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
225
.org swapper_pg_dir + HV_L0_SIZE
226
END(swapper_pg_dir)
227
228
.align HV_PAGE_TABLE_ALIGN
229
ENTRY(temp_data_pmd)
230
/*
231
* We fill the PAGE_OFFSET pmd with huge pages with
232
* VA = PA + PAGE_OFFSET. We remap things with more precise access
233
* permissions later.
234
*/
235
.set addr, 0
236
.rept HV_L1_ENTRIES
237
PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
238
.set addr, addr + HV_PAGE_SIZE_LARGE
239
.endr
240
.org temp_data_pmd + HV_L1_SIZE
241
END(temp_data_pmd)
242
243
.align HV_PAGE_TABLE_ALIGN
244
ENTRY(temp_code_pmd)
245
/*
246
* We fill the MEM_SV_START pmd with huge pages with
247
* VA = PA + PAGE_OFFSET. We remap things with more precise access
248
* permissions later.
249
*/
250
.set addr, 0
251
.rept HV_L1_ENTRIES
252
PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
253
.set addr, addr + HV_PAGE_SIZE_LARGE
254
.endr
255
.org temp_code_pmd + HV_L1_SIZE
256
END(temp_code_pmd)
257
258
/*
259
* Isolate swapper_pgprot to its own cache line, since each cpu
260
* starting up will read it using VA-is-PA and local homing.
261
* This would otherwise likely conflict with other data on the cache
262
* line, once we have set its permanent home in the page tables.
263
*/
264
__INITDATA
265
.align CHIP_L2_LINE_SIZE()
266
ENTRY(swapper_pgprot)
267
.quad HV_PTE_PRESENT | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
268
.align CHIP_L2_LINE_SIZE()
269
END(swapper_pgprot)
270
271