Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/kvm/switch.S
26436 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4
*/
5
6
#include <linux/linkage.h>
7
#include <asm/asm.h>
8
#include <asm/asmmacro.h>
9
#include <asm/loongarch.h>
10
#include <asm/regdef.h>
11
#include <asm/unwind_hints.h>
12
13
#define HGPR_OFFSET(x) (PT_R0 + 8*x)
14
#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x)
15
16
.macro kvm_save_host_gpr base
17
.irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
18
st.d $r\n, \base, HGPR_OFFSET(\n)
19
.endr
20
.endm
21
22
.macro kvm_restore_host_gpr base
23
.irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
24
ld.d $r\n, \base, HGPR_OFFSET(\n)
25
.endr
26
.endm
27
28
/*
29
* Save and restore all GPRs except base register,
30
* and default value of base register is a2.
31
*/
32
.macro kvm_save_guest_gprs base
33
.irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
34
st.d $r\n, \base, GGPR_OFFSET(\n)
35
.endr
36
.endm
37
38
.macro kvm_restore_guest_gprs base
39
.irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
40
ld.d $r\n, \base, GGPR_OFFSET(\n)
41
.endr
42
.endm
43
44
/*
45
* Prepare switch to guest, save host regs and restore guest regs.
46
* a2: kvm_vcpu_arch, don't touch it until 'ertn'
47
* t0, t1: temp register
48
*/
49
.macro kvm_switch_to_guest
50
/* Set host ECFG.VS=0, all exceptions share one exception entry */
51
csrrd t0, LOONGARCH_CSR_ECFG
52
bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT
53
csrwr t0, LOONGARCH_CSR_ECFG
54
55
/* Load up the new EENTRY */
56
ld.d t0, a2, KVM_ARCH_GEENTRY
57
csrwr t0, LOONGARCH_CSR_EENTRY
58
59
/* Set Guest ERA */
60
ld.d t0, a2, KVM_ARCH_GPC
61
csrwr t0, LOONGARCH_CSR_ERA
62
63
/* Load PGD for KVM hypervisor */
64
ld.d t0, a2, KVM_ARCH_KVMPGD
65
csrwr t0, LOONGARCH_CSR_PGDL
66
67
/* Mix GID and RID */
68
csrrd t1, LOONGARCH_CSR_GSTAT
69
bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT
70
csrrd t0, LOONGARCH_CSR_GTLBC
71
bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT
72
csrwr t0, LOONGARCH_CSR_GTLBC
73
74
/*
75
* Enable intr in root mode with future ertn so that host interrupt
76
* can be responsed during VM runs
77
* Guest CRMD comes from separate GCSR_CRMD register
78
*/
79
ori t0, zero, CSR_PRMD_PIE
80
csrwr t0, LOONGARCH_CSR_PRMD
81
82
/* Set PVM bit to setup ertn to guest context */
83
ori t0, zero, CSR_GSTAT_PVM
84
csrxchg t0, t0, LOONGARCH_CSR_GSTAT
85
86
/* Load Guest GPRs */
87
kvm_restore_guest_gprs a2
88
/* Load KVM_ARCH register */
89
ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2)
90
91
ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */
92
.endm
93
94
/*
95
* Exception entry for general exception from guest mode
96
* - IRQ is disabled
97
* - kernel privilege in root mode
98
* - page mode keep unchanged from previous PRMD in root mode
99
* - Fixme: tlb exception cannot happen since registers relative with TLB
100
* - is still in guest mode, such as pgd table/vmid registers etc,
101
* - will fix with hw page walk enabled in future
102
* load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS
103
*/
104
.text
105
.cfi_sections .debug_frame
106
SYM_CODE_START(kvm_exc_entry)
107
UNWIND_HINT_UNDEFINED
108
csrwr a2, KVM_TEMP_KS
109
csrrd a2, KVM_VCPU_KS
110
addi.d a2, a2, KVM_VCPU_ARCH
111
112
/* After save GPRs, free to use any GPR */
113
kvm_save_guest_gprs a2
114
/* Save guest A2 */
115
csrrd t0, KVM_TEMP_KS
116
st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2)
117
118
/* A2 is kvm_vcpu_arch, A1 is free to use */
119
csrrd s1, KVM_VCPU_KS
120
ld.d s0, s1, KVM_VCPU_RUN
121
122
csrrd t0, LOONGARCH_CSR_ESTAT
123
st.d t0, a2, KVM_ARCH_HESTAT
124
csrrd t0, LOONGARCH_CSR_ERA
125
st.d t0, a2, KVM_ARCH_GPC
126
csrrd t0, LOONGARCH_CSR_BADV
127
st.d t0, a2, KVM_ARCH_HBADV
128
csrrd t0, LOONGARCH_CSR_BADI
129
st.d t0, a2, KVM_ARCH_HBADI
130
131
/* Restore host ECFG.VS */
132
csrrd t0, LOONGARCH_CSR_ECFG
133
ld.d t1, a2, KVM_ARCH_HECFG
134
or t0, t0, t1
135
csrwr t0, LOONGARCH_CSR_ECFG
136
137
/* Restore host EENTRY */
138
ld.d t0, a2, KVM_ARCH_HEENTRY
139
csrwr t0, LOONGARCH_CSR_EENTRY
140
141
/* Restore host pgd table */
142
ld.d t0, a2, KVM_ARCH_HPGD
143
csrwr t0, LOONGARCH_CSR_PGDL
144
145
/*
146
* Disable PGM bit to enter root mode by default with next ertn
147
*/
148
ori t0, zero, CSR_GSTAT_PVM
149
csrxchg zero, t0, LOONGARCH_CSR_GSTAT
150
151
/*
152
* Clear GTLBC.TGID field
153
* 0: for root tlb update in future tlb instr
154
* others: for guest tlb update like gpa to hpa in future tlb instr
155
*/
156
csrrd t0, LOONGARCH_CSR_GTLBC
157
bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT
158
csrwr t0, LOONGARCH_CSR_GTLBC
159
ld.d tp, a2, KVM_ARCH_HTP
160
ld.d sp, a2, KVM_ARCH_HSP
161
/* restore per cpu register */
162
ld.d u0, a2, KVM_ARCH_HPERCPU
163
addi.d sp, sp, -PT_SIZE
164
165
/* Prepare handle exception */
166
or a0, s0, zero
167
or a1, s1, zero
168
ld.d t8, a2, KVM_ARCH_HANDLE_EXIT
169
jirl ra, t8, 0
170
171
or a2, s1, zero
172
addi.d a2, a2, KVM_VCPU_ARCH
173
174
/* Resume host when ret <= 0 */
175
blez a0, ret_to_host
176
177
/*
178
* Return to guest
179
* Save per cpu register again, maybe switched to another cpu
180
*/
181
st.d u0, a2, KVM_ARCH_HPERCPU
182
183
/* Save kvm_vcpu to kscratch */
184
csrwr s1, KVM_VCPU_KS
185
kvm_switch_to_guest
186
187
ret_to_host:
188
ld.d a2, a2, KVM_ARCH_HSP
189
addi.d a2, a2, -PT_SIZE
190
kvm_restore_host_gpr a2
191
jr ra
192
193
SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL)
194
SYM_CODE_END(kvm_exc_entry)
195
196
/*
197
* int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
198
*
199
* @register_param:
200
* a0: kvm_run* run
201
* a1: kvm_vcpu* vcpu
202
*/
203
SYM_FUNC_START(kvm_enter_guest)
204
/* Allocate space in stack bottom */
205
addi.d a2, sp, -PT_SIZE
206
/* Save host GPRs */
207
kvm_save_host_gpr a2
208
209
addi.d a2, a1, KVM_VCPU_ARCH
210
st.d sp, a2, KVM_ARCH_HSP
211
st.d tp, a2, KVM_ARCH_HTP
212
/* Save per cpu register */
213
st.d u0, a2, KVM_ARCH_HPERCPU
214
215
/* Save kvm_vcpu to kscratch */
216
csrwr a1, KVM_VCPU_KS
217
kvm_switch_to_guest
218
SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL)
219
SYM_FUNC_END(kvm_enter_guest)
220
221
SYM_FUNC_START(kvm_save_fpu)
222
fpu_save_csr a0 t1
223
fpu_save_double a0 t1
224
fpu_save_cc a0 t1 t2
225
jr ra
226
SYM_FUNC_END(kvm_save_fpu)
227
228
SYM_FUNC_START(kvm_restore_fpu)
229
fpu_restore_double a0 t1
230
fpu_restore_csr a0 t1 t2
231
fpu_restore_cc a0 t1 t2
232
jr ra
233
SYM_FUNC_END(kvm_restore_fpu)
234
235
#ifdef CONFIG_CPU_HAS_LSX
236
SYM_FUNC_START(kvm_save_lsx)
237
fpu_save_csr a0 t1
238
fpu_save_cc a0 t1 t2
239
lsx_save_data a0 t1
240
jr ra
241
SYM_FUNC_END(kvm_save_lsx)
242
243
SYM_FUNC_START(kvm_restore_lsx)
244
lsx_restore_data a0 t1
245
fpu_restore_cc a0 t1 t2
246
fpu_restore_csr a0 t1 t2
247
jr ra
248
SYM_FUNC_END(kvm_restore_lsx)
249
#endif
250
251
#ifdef CONFIG_CPU_HAS_LASX
252
SYM_FUNC_START(kvm_save_lasx)
253
fpu_save_csr a0 t1
254
fpu_save_cc a0 t1 t2
255
lasx_save_data a0 t1
256
jr ra
257
SYM_FUNC_END(kvm_save_lasx)
258
259
SYM_FUNC_START(kvm_restore_lasx)
260
lasx_restore_data a0 t1
261
fpu_restore_cc a0 t1 t2
262
fpu_restore_csr a0 t1 t2
263
jr ra
264
SYM_FUNC_END(kvm_restore_lasx)
265
#endif
266
.section ".rodata"
267
SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
268
SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
269
270
#ifdef CONFIG_CPU_HAS_LBT
271
STACK_FRAME_NON_STANDARD kvm_restore_fpu
272
#ifdef CONFIG_CPU_HAS_LSX
273
STACK_FRAME_NON_STANDARD kvm_restore_lsx
274
#endif
275
#ifdef CONFIG_CPU_HAS_LASX
276
STACK_FRAME_NON_STANDARD kvm_restore_lasx
277
#endif
278
#endif
279
280