Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/reset.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2012,2013 - ARM Ltd
4
* Author: Marc Zyngier <[email protected]>
5
*
6
* Derived from arch/arm/kvm/reset.c
7
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
8
* Author: Christoffer Dall <[email protected]>
9
*/
10
11
#include <linux/errno.h>
12
#include <linux/kernel.h>
13
#include <linux/kvm_host.h>
14
#include <linux/kvm.h>
15
#include <linux/hw_breakpoint.h>
16
#include <linux/slab.h>
17
#include <linux/string.h>
18
#include <linux/types.h>
19
20
#include <kvm/arm_arch_timer.h>
21
22
#include <asm/cpufeature.h>
23
#include <asm/cputype.h>
24
#include <asm/fpsimd.h>
25
#include <asm/ptrace.h>
26
#include <asm/kvm_arm.h>
27
#include <asm/kvm_asm.h>
28
#include <asm/kvm_emulate.h>
29
#include <asm/kvm_mmu.h>
30
#include <asm/kvm_nested.h>
31
#include <asm/virt.h>
32
33
/* Maximum phys_shift supported for any VM on this host */
34
static u32 __ro_after_init kvm_ipa_limit;
35
unsigned int __ro_after_init kvm_host_sve_max_vl;
36
37
/*
38
* ARMv8 Reset Values
39
*/
40
#define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
41
PSR_F_BIT | PSR_D_BIT)
42
43
#define VCPU_RESET_PSTATE_EL2 (PSR_MODE_EL2h | PSR_A_BIT | PSR_I_BIT | \
44
PSR_F_BIT | PSR_D_BIT)
45
46
#define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
47
PSR_AA32_I_BIT | PSR_AA32_F_BIT)
48
49
unsigned int __ro_after_init kvm_sve_max_vl;
50
51
int __init kvm_arm_init_sve(void)
52
{
53
if (system_supports_sve()) {
54
kvm_sve_max_vl = sve_max_virtualisable_vl();
55
kvm_host_sve_max_vl = sve_max_vl();
56
kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
57
58
/*
59
* The get_sve_reg()/set_sve_reg() ioctl interface will need
60
* to be extended with multiple register slice support in
61
* order to support vector lengths greater than
62
* VL_ARCH_MAX:
63
*/
64
if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX))
65
kvm_sve_max_vl = VL_ARCH_MAX;
66
67
/*
68
* Don't even try to make use of vector lengths that
69
* aren't available on all CPUs, for now:
70
*/
71
if (kvm_sve_max_vl < sve_max_vl())
72
pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
73
kvm_sve_max_vl);
74
}
75
76
return 0;
77
}
78
79
static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
80
{
81
vcpu->arch.sve_max_vl = kvm_sve_max_vl;
82
83
/*
84
* Userspace can still customize the vector lengths by writing
85
* KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
86
* kvm_arm_vcpu_finalize(), which freezes the configuration.
87
*/
88
set_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &vcpu->kvm->arch.flags);
89
}
90
91
/*
92
* Finalize vcpu's maximum SVE vector length, allocating
93
* vcpu->arch.sve_state as necessary.
94
*/
95
static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
96
{
97
void *buf;
98
unsigned int vl;
99
size_t reg_sz;
100
int ret;
101
102
vl = vcpu->arch.sve_max_vl;
103
104
/*
105
* Responsibility for these properties is shared between
106
* kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
107
* set_sve_vls(). Double-check here just to be sure:
108
*/
109
if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
110
vl > VL_ARCH_MAX))
111
return -EIO;
112
113
reg_sz = vcpu_sve_state_size(vcpu);
114
buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
115
if (!buf)
116
return -ENOMEM;
117
118
ret = kvm_share_hyp(buf, buf + reg_sz);
119
if (ret) {
120
kfree(buf);
121
return ret;
122
}
123
124
vcpu->arch.sve_state = buf;
125
vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
126
return 0;
127
}
128
129
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
130
{
131
switch (feature) {
132
case KVM_ARM_VCPU_SVE:
133
if (!vcpu_has_sve(vcpu))
134
return -EINVAL;
135
136
if (kvm_arm_vcpu_sve_finalized(vcpu))
137
return -EPERM;
138
139
return kvm_vcpu_finalize_sve(vcpu);
140
}
141
142
return -EINVAL;
143
}
144
145
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
146
{
147
if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
148
return false;
149
150
return true;
151
}
152
153
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
154
{
155
void *sve_state = vcpu->arch.sve_state;
156
157
kvm_unshare_hyp(vcpu, vcpu + 1);
158
if (sve_state)
159
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
160
kfree(sve_state);
161
free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
162
kfree(vcpu->arch.vncr_tlb);
163
kfree(vcpu->arch.ccsidr);
164
}
165
166
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
167
{
168
if (vcpu_has_sve(vcpu))
169
memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
170
}
171
172
/**
173
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
174
* @vcpu: The VCPU pointer
175
*
176
* This function sets the registers on the virtual CPU struct to their
177
* architecturally defined reset values, except for registers whose reset is
178
* deferred until kvm_arm_vcpu_finalize().
179
*
180
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
181
* ioctl or as part of handling a request issued by another VCPU in the PSCI
182
* handling code. In the first case, the VCPU will not be loaded, and in the
183
* second case the VCPU will be loaded. Because this function operates purely
184
* on the memory-backed values of system registers, we want to do a full put if
185
* we were loaded (handling a request) and load the values back at the end of
186
* the function. Otherwise we leave the state alone. In both cases, we
187
* disable preemption around the vcpu reset as we would otherwise race with
188
* preempt notifiers which also call put/load.
189
*/
190
void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
191
{
192
struct vcpu_reset_state reset_state;
193
bool loaded;
194
u32 pstate;
195
196
spin_lock(&vcpu->arch.mp_state_lock);
197
reset_state = vcpu->arch.reset_state;
198
vcpu->arch.reset_state.reset = false;
199
spin_unlock(&vcpu->arch.mp_state_lock);
200
201
preempt_disable();
202
loaded = (vcpu->cpu != -1);
203
if (loaded)
204
kvm_arch_vcpu_put(vcpu);
205
206
if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
207
if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
208
kvm_vcpu_enable_sve(vcpu);
209
} else {
210
kvm_vcpu_reset_sve(vcpu);
211
}
212
213
if (vcpu_el1_is_32bit(vcpu))
214
pstate = VCPU_RESET_PSTATE_SVC;
215
else if (vcpu_has_nv(vcpu))
216
pstate = VCPU_RESET_PSTATE_EL2;
217
else
218
pstate = VCPU_RESET_PSTATE_EL1;
219
220
/* Reset core registers */
221
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
222
memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
223
vcpu->arch.ctxt.spsr_abt = 0;
224
vcpu->arch.ctxt.spsr_und = 0;
225
vcpu->arch.ctxt.spsr_irq = 0;
226
vcpu->arch.ctxt.spsr_fiq = 0;
227
vcpu_gp_regs(vcpu)->pstate = pstate;
228
229
/* Reset system registers */
230
kvm_reset_sys_regs(vcpu);
231
232
/*
233
* Additional reset state handling that PSCI may have imposed on us.
234
* Must be done after all the sys_reg reset.
235
*/
236
if (reset_state.reset) {
237
unsigned long target_pc = reset_state.pc;
238
239
/* Gracefully handle Thumb2 entry point */
240
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
241
target_pc &= ~1UL;
242
vcpu_set_thumb(vcpu);
243
}
244
245
/* Propagate caller endianness */
246
if (reset_state.be)
247
kvm_vcpu_set_be(vcpu);
248
249
*vcpu_pc(vcpu) = target_pc;
250
vcpu_set_reg(vcpu, 0, reset_state.r0);
251
}
252
253
/* Reset timer */
254
kvm_timer_vcpu_reset(vcpu);
255
256
if (loaded)
257
kvm_arch_vcpu_load(vcpu, smp_processor_id());
258
preempt_enable();
259
}
260
261
u32 kvm_get_pa_bits(struct kvm *kvm)
262
{
263
/* Fixed limit until we can configure ID_AA64MMFR0.PARange */
264
return kvm_ipa_limit;
265
}
266
267
u32 get_kvm_ipa_limit(void)
268
{
269
return kvm_ipa_limit;
270
}
271
272
int __init kvm_set_ipa_limit(void)
273
{
274
unsigned int parange;
275
u64 mmfr0;
276
277
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
278
parange = cpuid_feature_extract_unsigned_field(mmfr0,
279
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
280
/*
281
* IPA size beyond 48 bits for 4K and 16K page size is only supported
282
* when LPA2 is available. So if we have LPA2, enable it, else cap to 48
283
* bits, in case it's reported as larger on the system.
284
*/
285
if (!kvm_lpa2_is_enabled() && PAGE_SIZE != SZ_64K)
286
parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
287
288
/*
289
* Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
290
* Stage-2. If not, things will stop very quickly.
291
*/
292
switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
293
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
294
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
295
return -EINVAL;
296
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
297
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
298
break;
299
case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
300
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
301
break;
302
default:
303
kvm_err("Unsupported value for TGRAN_2, giving up\n");
304
return -EINVAL;
305
}
306
307
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
308
kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
309
((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
310
" (Reduced IPA size, limited VM/VMM compatibility)" : ""));
311
312
return 0;
313
}
314
315