Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hypercalls.c
26442 views
1
// SPDX-License-Identifier: GPL-2.0
2
// Copyright (C) 2019 Arm Ltd.
3
4
#include <linux/arm-smccc.h>
5
#include <linux/kvm_host.h>
6
7
#include <asm/kvm_emulate.h>
8
9
#include <kvm/arm_hypercalls.h>
10
#include <kvm/arm_psci.h>
11
12
#define KVM_ARM_SMCCC_STD_FEATURES \
13
GENMASK(KVM_REG_ARM_STD_BMAP_BIT_COUNT - 1, 0)
14
#define KVM_ARM_SMCCC_STD_HYP_FEATURES \
15
GENMASK(KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT - 1, 0)
16
#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES \
17
GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT - 1, 0)
18
#define KVM_ARM_SMCCC_VENDOR_HYP_FEATURES_2 \
19
GENMASK(KVM_REG_ARM_VENDOR_HYP_BMAP_2_BIT_COUNT - 1, 0)
20
21
static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
22
{
23
struct system_time_snapshot systime_snapshot;
24
u64 cycles = ~0UL;
25
u32 feature;
26
27
/*
28
* system time and counter value must captured at the same
29
* time to keep consistency and precision.
30
*/
31
ktime_get_snapshot(&systime_snapshot);
32
33
/*
34
* This is only valid if the current clocksource is the
35
* architected counter, as this is the only one the guest
36
* can see.
37
*/
38
if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER)
39
return;
40
41
/*
42
* The guest selects one of the two reference counters
43
* (virtual or physical) with the first argument of the SMCCC
44
* call. In case the identifier is not supported, error out.
45
*/
46
feature = smccc_get_arg1(vcpu);
47
switch (feature) {
48
case KVM_PTP_VIRT_COUNTER:
49
cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset;
50
break;
51
case KVM_PTP_PHYS_COUNTER:
52
cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.poffset;
53
break;
54
default:
55
return;
56
}
57
58
/*
59
* This relies on the top bit of val[0] never being set for
60
* valid values of system time, because that is *really* far
61
* in the future (about 292 years from 1970, and at that stage
62
* nobody will give a damn about it).
63
*/
64
val[0] = upper_32_bits(systime_snapshot.real);
65
val[1] = lower_32_bits(systime_snapshot.real);
66
val[2] = upper_32_bits(cycles);
67
val[3] = lower_32_bits(cycles);
68
}
69
70
static bool kvm_smccc_default_allowed(u32 func_id)
71
{
72
switch (func_id) {
73
/*
74
* List of function-ids that are not gated with the bitmapped
75
* feature firmware registers, and are to be allowed for
76
* servicing the call by default.
77
*/
78
case ARM_SMCCC_VERSION_FUNC_ID:
79
case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
80
return true;
81
default:
82
/* PSCI 0.2 and up is in the 0:0x1f range */
83
if (ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
84
ARM_SMCCC_FUNC_NUM(func_id) <= 0x1f)
85
return true;
86
87
/*
88
* KVM's PSCI 0.1 doesn't comply with SMCCC, and has
89
* its own function-id base and range
90
*/
91
if (func_id >= KVM_PSCI_FN(0) && func_id <= KVM_PSCI_FN(3))
92
return true;
93
94
return false;
95
}
96
}
97
98
static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id)
99
{
100
struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
101
102
switch (func_id) {
103
case ARM_SMCCC_TRNG_VERSION:
104
case ARM_SMCCC_TRNG_FEATURES:
105
case ARM_SMCCC_TRNG_GET_UUID:
106
case ARM_SMCCC_TRNG_RND32:
107
case ARM_SMCCC_TRNG_RND64:
108
return test_bit(KVM_REG_ARM_STD_BIT_TRNG_V1_0,
109
&smccc_feat->std_bmap);
110
case ARM_SMCCC_HV_PV_TIME_FEATURES:
111
case ARM_SMCCC_HV_PV_TIME_ST:
112
return test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
113
&smccc_feat->std_hyp_bmap);
114
case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
115
case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
116
return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT,
117
&smccc_feat->vendor_hyp_bmap);
118
case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
119
return test_bit(KVM_REG_ARM_VENDOR_HYP_BIT_PTP,
120
&smccc_feat->vendor_hyp_bmap);
121
default:
122
return false;
123
}
124
}
125
126
#define SMC32_ARCH_RANGE_BEGIN ARM_SMCCC_VERSION_FUNC_ID
127
#define SMC32_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
128
ARM_SMCCC_SMC_32, \
129
0, ARM_SMCCC_FUNC_MASK)
130
131
#define SMC64_ARCH_RANGE_BEGIN ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
132
ARM_SMCCC_SMC_64, \
133
0, 0)
134
#define SMC64_ARCH_RANGE_END ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
135
ARM_SMCCC_SMC_64, \
136
0, ARM_SMCCC_FUNC_MASK)
137
138
static int kvm_smccc_filter_insert_reserved(struct kvm *kvm)
139
{
140
int r;
141
142
/*
143
* Prevent userspace from handling any SMCCC calls in the architecture
144
* range, avoiding the risk of misrepresenting Spectre mitigation status
145
* to the guest.
146
*/
147
r = mtree_insert_range(&kvm->arch.smccc_filter,
148
SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END,
149
xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
150
GFP_KERNEL_ACCOUNT);
151
if (r)
152
goto out_destroy;
153
154
r = mtree_insert_range(&kvm->arch.smccc_filter,
155
SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END,
156
xa_mk_value(KVM_SMCCC_FILTER_HANDLE),
157
GFP_KERNEL_ACCOUNT);
158
if (r)
159
goto out_destroy;
160
161
return 0;
162
out_destroy:
163
mtree_destroy(&kvm->arch.smccc_filter);
164
return r;
165
}
166
167
static bool kvm_smccc_filter_configured(struct kvm *kvm)
168
{
169
return !mtree_empty(&kvm->arch.smccc_filter);
170
}
171
172
static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr)
173
{
174
const void *zero_page = page_to_virt(ZERO_PAGE(0));
175
struct kvm_smccc_filter filter;
176
u32 start, end;
177
int r;
178
179
if (copy_from_user(&filter, uaddr, sizeof(filter)))
180
return -EFAULT;
181
182
if (memcmp(filter.pad, zero_page, sizeof(filter.pad)))
183
return -EINVAL;
184
185
start = filter.base;
186
end = start + filter.nr_functions - 1;
187
188
if (end < start || filter.action >= NR_SMCCC_FILTER_ACTIONS)
189
return -EINVAL;
190
191
mutex_lock(&kvm->arch.config_lock);
192
193
if (kvm_vm_has_ran_once(kvm)) {
194
r = -EBUSY;
195
goto out_unlock;
196
}
197
198
if (!kvm_smccc_filter_configured(kvm)) {
199
r = kvm_smccc_filter_insert_reserved(kvm);
200
if (WARN_ON_ONCE(r))
201
goto out_unlock;
202
}
203
204
r = mtree_insert_range(&kvm->arch.smccc_filter, start, end,
205
xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT);
206
out_unlock:
207
mutex_unlock(&kvm->arch.config_lock);
208
return r;
209
}
210
211
static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id)
212
{
213
unsigned long idx = func_id;
214
void *val;
215
216
if (!kvm_smccc_filter_configured(kvm))
217
return KVM_SMCCC_FILTER_HANDLE;
218
219
/*
220
* But where's the error handling, you say?
221
*
222
* mt_find() returns NULL if no entry was found, which just so happens
223
* to match KVM_SMCCC_FILTER_HANDLE.
224
*/
225
val = mt_find(&kvm->arch.smccc_filter, &idx, idx);
226
return xa_to_value(val);
227
}
228
229
static u8 kvm_smccc_get_action(struct kvm_vcpu *vcpu, u32 func_id)
230
{
231
/*
232
* Intervening actions in the SMCCC filter take precedence over the
233
* pseudo-firmware register bitmaps.
234
*/
235
u8 action = kvm_smccc_filter_get_action(vcpu->kvm, func_id);
236
if (action != KVM_SMCCC_FILTER_HANDLE)
237
return action;
238
239
if (kvm_smccc_test_fw_bmap(vcpu, func_id) ||
240
kvm_smccc_default_allowed(func_id))
241
return KVM_SMCCC_FILTER_HANDLE;
242
243
return KVM_SMCCC_FILTER_DENY;
244
}
245
246
static void kvm_prepare_hypercall_exit(struct kvm_vcpu *vcpu, u32 func_id)
247
{
248
u8 ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
249
struct kvm_run *run = vcpu->run;
250
u64 flags = 0;
251
252
if (ec == ESR_ELx_EC_SMC32 || ec == ESR_ELx_EC_SMC64)
253
flags |= KVM_HYPERCALL_EXIT_SMC;
254
255
if (!kvm_vcpu_trap_il_is32bit(vcpu))
256
flags |= KVM_HYPERCALL_EXIT_16BIT;
257
258
run->exit_reason = KVM_EXIT_HYPERCALL;
259
run->hypercall = (typeof(run->hypercall)) {
260
.nr = func_id,
261
.flags = flags,
262
};
263
}
264
265
int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
266
{
267
struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
268
u32 func_id = smccc_get_function(vcpu);
269
u64 val[4] = {SMCCC_RET_NOT_SUPPORTED};
270
u32 feature;
271
u8 action;
272
gpa_t gpa;
273
uuid_t uuid;
274
275
action = kvm_smccc_get_action(vcpu, func_id);
276
switch (action) {
277
case KVM_SMCCC_FILTER_HANDLE:
278
break;
279
case KVM_SMCCC_FILTER_DENY:
280
goto out;
281
case KVM_SMCCC_FILTER_FWD_TO_USER:
282
kvm_prepare_hypercall_exit(vcpu, func_id);
283
return 0;
284
default:
285
WARN_RATELIMIT(1, "Unhandled SMCCC filter action: %d\n", action);
286
goto out;
287
}
288
289
switch (func_id) {
290
case ARM_SMCCC_VERSION_FUNC_ID:
291
val[0] = ARM_SMCCC_VERSION_1_1;
292
break;
293
case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
294
feature = smccc_get_arg1(vcpu);
295
switch (feature) {
296
case ARM_SMCCC_ARCH_WORKAROUND_1:
297
switch (arm64_get_spectre_v2_state()) {
298
case SPECTRE_VULNERABLE:
299
break;
300
case SPECTRE_MITIGATED:
301
val[0] = SMCCC_RET_SUCCESS;
302
break;
303
case SPECTRE_UNAFFECTED:
304
val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
305
break;
306
}
307
break;
308
case ARM_SMCCC_ARCH_WORKAROUND_2:
309
switch (arm64_get_spectre_v4_state()) {
310
case SPECTRE_VULNERABLE:
311
break;
312
case SPECTRE_MITIGATED:
313
/*
314
* SSBS everywhere: Indicate no firmware
315
* support, as the SSBS support will be
316
* indicated to the guest and the default is
317
* safe.
318
*
319
* Otherwise, expose a permanent mitigation
320
* to the guest, and hide SSBS so that the
321
* guest stays protected.
322
*/
323
if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SSBS, IMP))
324
break;
325
fallthrough;
326
case SPECTRE_UNAFFECTED:
327
val[0] = SMCCC_RET_NOT_REQUIRED;
328
break;
329
}
330
break;
331
case ARM_SMCCC_ARCH_WORKAROUND_3:
332
switch (arm64_get_spectre_bhb_state()) {
333
case SPECTRE_VULNERABLE:
334
break;
335
case SPECTRE_MITIGATED:
336
val[0] = SMCCC_RET_SUCCESS;
337
break;
338
case SPECTRE_UNAFFECTED:
339
val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
340
break;
341
}
342
break;
343
case ARM_SMCCC_HV_PV_TIME_FEATURES:
344
if (test_bit(KVM_REG_ARM_STD_HYP_BIT_PV_TIME,
345
&smccc_feat->std_hyp_bmap))
346
val[0] = SMCCC_RET_SUCCESS;
347
break;
348
}
349
break;
350
case ARM_SMCCC_HV_PV_TIME_FEATURES:
351
val[0] = kvm_hypercall_pv_features(vcpu);
352
break;
353
case ARM_SMCCC_HV_PV_TIME_ST:
354
gpa = kvm_init_stolen_time(vcpu);
355
if (gpa != INVALID_GPA)
356
val[0] = gpa;
357
break;
358
case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
359
uuid = ARM_SMCCC_VENDOR_HYP_UID_KVM;
360
val[0] = smccc_uuid_to_reg(&uuid, 0);
361
val[1] = smccc_uuid_to_reg(&uuid, 1);
362
val[2] = smccc_uuid_to_reg(&uuid, 2);
363
val[3] = smccc_uuid_to_reg(&uuid, 3);
364
break;
365
case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
366
val[0] = smccc_feat->vendor_hyp_bmap;
367
/* Function numbers 2-63 are reserved for pKVM for now */
368
val[2] = smccc_feat->vendor_hyp_bmap_2;
369
break;
370
case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
371
kvm_ptp_get_time(vcpu, val);
372
break;
373
case ARM_SMCCC_TRNG_VERSION:
374
case ARM_SMCCC_TRNG_FEATURES:
375
case ARM_SMCCC_TRNG_GET_UUID:
376
case ARM_SMCCC_TRNG_RND32:
377
case ARM_SMCCC_TRNG_RND64:
378
return kvm_trng_call(vcpu);
379
default:
380
return kvm_psci_call(vcpu);
381
}
382
383
out:
384
smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]);
385
return 1;
386
}
387
388
static const u64 kvm_arm_fw_reg_ids[] = {
389
KVM_REG_ARM_PSCI_VERSION,
390
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1,
391
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2,
392
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3,
393
KVM_REG_ARM_STD_BMAP,
394
KVM_REG_ARM_STD_HYP_BMAP,
395
KVM_REG_ARM_VENDOR_HYP_BMAP,
396
KVM_REG_ARM_VENDOR_HYP_BMAP_2,
397
};
398
399
void kvm_arm_init_hypercalls(struct kvm *kvm)
400
{
401
struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
402
403
smccc_feat->std_bmap = KVM_ARM_SMCCC_STD_FEATURES;
404
smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES;
405
smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
406
407
mt_init(&kvm->arch.smccc_filter);
408
}
409
410
void kvm_arm_teardown_hypercalls(struct kvm *kvm)
411
{
412
mtree_destroy(&kvm->arch.smccc_filter);
413
}
414
415
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
416
{
417
return ARRAY_SIZE(kvm_arm_fw_reg_ids);
418
}
419
420
int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
421
{
422
int i;
423
424
for (i = 0; i < ARRAY_SIZE(kvm_arm_fw_reg_ids); i++) {
425
if (put_user(kvm_arm_fw_reg_ids[i], uindices++))
426
return -EFAULT;
427
}
428
429
return 0;
430
}
431
432
#define KVM_REG_FEATURE_LEVEL_MASK GENMASK(3, 0)
433
434
/*
435
* Convert the workaround level into an easy-to-compare number, where higher
436
* values mean better protection.
437
*/
438
static int get_kernel_wa_level(struct kvm_vcpu *vcpu, u64 regid)
439
{
440
switch (regid) {
441
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
442
switch (arm64_get_spectre_v2_state()) {
443
case SPECTRE_VULNERABLE:
444
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
445
case SPECTRE_MITIGATED:
446
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
447
case SPECTRE_UNAFFECTED:
448
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
449
}
450
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
451
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
452
switch (arm64_get_spectre_v4_state()) {
453
case SPECTRE_MITIGATED:
454
/*
455
* As for the hypercall discovery, we pretend we
456
* don't have any FW mitigation if SSBS is there at
457
* all times.
458
*/
459
if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SSBS, IMP))
460
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
461
fallthrough;
462
case SPECTRE_UNAFFECTED:
463
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
464
case SPECTRE_VULNERABLE:
465
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
466
}
467
break;
468
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
469
switch (arm64_get_spectre_bhb_state()) {
470
case SPECTRE_VULNERABLE:
471
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
472
case SPECTRE_MITIGATED:
473
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL;
474
case SPECTRE_UNAFFECTED:
475
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED;
476
}
477
return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL;
478
}
479
480
return -EINVAL;
481
}
482
483
int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
484
{
485
struct kvm_smccc_features *smccc_feat = &vcpu->kvm->arch.smccc_feat;
486
void __user *uaddr = (void __user *)(long)reg->addr;
487
u64 val;
488
489
switch (reg->id) {
490
case KVM_REG_ARM_PSCI_VERSION:
491
val = kvm_psci_version(vcpu);
492
break;
493
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
494
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
495
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
496
val = get_kernel_wa_level(vcpu, reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
497
break;
498
case KVM_REG_ARM_STD_BMAP:
499
val = READ_ONCE(smccc_feat->std_bmap);
500
break;
501
case KVM_REG_ARM_STD_HYP_BMAP:
502
val = READ_ONCE(smccc_feat->std_hyp_bmap);
503
break;
504
case KVM_REG_ARM_VENDOR_HYP_BMAP:
505
val = READ_ONCE(smccc_feat->vendor_hyp_bmap);
506
break;
507
case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
508
val = READ_ONCE(smccc_feat->vendor_hyp_bmap_2);
509
break;
510
default:
511
return -ENOENT;
512
}
513
514
if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
515
return -EFAULT;
516
517
return 0;
518
}
519
520
static int kvm_arm_set_fw_reg_bmap(struct kvm_vcpu *vcpu, u64 reg_id, u64 val)
521
{
522
int ret = 0;
523
struct kvm *kvm = vcpu->kvm;
524
struct kvm_smccc_features *smccc_feat = &kvm->arch.smccc_feat;
525
unsigned long *fw_reg_bmap, fw_reg_features;
526
527
switch (reg_id) {
528
case KVM_REG_ARM_STD_BMAP:
529
fw_reg_bmap = &smccc_feat->std_bmap;
530
fw_reg_features = KVM_ARM_SMCCC_STD_FEATURES;
531
break;
532
case KVM_REG_ARM_STD_HYP_BMAP:
533
fw_reg_bmap = &smccc_feat->std_hyp_bmap;
534
fw_reg_features = KVM_ARM_SMCCC_STD_HYP_FEATURES;
535
break;
536
case KVM_REG_ARM_VENDOR_HYP_BMAP:
537
fw_reg_bmap = &smccc_feat->vendor_hyp_bmap;
538
fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES;
539
break;
540
case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
541
fw_reg_bmap = &smccc_feat->vendor_hyp_bmap_2;
542
fw_reg_features = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES_2;
543
break;
544
default:
545
return -ENOENT;
546
}
547
548
/* Check for unsupported bit */
549
if (val & ~fw_reg_features)
550
return -EINVAL;
551
552
mutex_lock(&kvm->arch.config_lock);
553
554
if (kvm_vm_has_ran_once(kvm) && val != *fw_reg_bmap) {
555
ret = -EBUSY;
556
goto out;
557
}
558
559
WRITE_ONCE(*fw_reg_bmap, val);
560
out:
561
mutex_unlock(&kvm->arch.config_lock);
562
return ret;
563
}
564
565
int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
566
{
567
void __user *uaddr = (void __user *)(long)reg->addr;
568
u64 val;
569
int wa_level;
570
571
if (KVM_REG_SIZE(reg->id) != sizeof(val))
572
return -ENOENT;
573
if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
574
return -EFAULT;
575
576
switch (reg->id) {
577
case KVM_REG_ARM_PSCI_VERSION:
578
{
579
bool wants_02;
580
581
wants_02 = vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2);
582
583
switch (val) {
584
case KVM_ARM_PSCI_0_1:
585
if (wants_02)
586
return -EINVAL;
587
vcpu->kvm->arch.psci_version = val;
588
return 0;
589
case KVM_ARM_PSCI_0_2:
590
case KVM_ARM_PSCI_1_0:
591
case KVM_ARM_PSCI_1_1:
592
case KVM_ARM_PSCI_1_2:
593
case KVM_ARM_PSCI_1_3:
594
if (!wants_02)
595
return -EINVAL;
596
vcpu->kvm->arch.psci_version = val;
597
return 0;
598
}
599
break;
600
}
601
602
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
603
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3:
604
if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
605
return -EINVAL;
606
607
if (get_kernel_wa_level(vcpu, reg->id) < val)
608
return -EINVAL;
609
610
return 0;
611
612
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
613
if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
614
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
615
return -EINVAL;
616
617
/* The enabled bit must not be set unless the level is AVAIL. */
618
if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
619
(val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
620
return -EINVAL;
621
622
/*
623
* Map all the possible incoming states to the only two we
624
* really want to deal with.
625
*/
626
switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
627
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
628
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
629
wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
630
break;
631
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
632
case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
633
wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
634
break;
635
default:
636
return -EINVAL;
637
}
638
639
/*
640
* We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
641
* other way around.
642
*/
643
if (get_kernel_wa_level(vcpu, reg->id) < wa_level)
644
return -EINVAL;
645
646
return 0;
647
case KVM_REG_ARM_STD_BMAP:
648
case KVM_REG_ARM_STD_HYP_BMAP:
649
case KVM_REG_ARM_VENDOR_HYP_BMAP:
650
case KVM_REG_ARM_VENDOR_HYP_BMAP_2:
651
return kvm_arm_set_fw_reg_bmap(vcpu, reg->id, val);
652
default:
653
return -ENOENT;
654
}
655
656
return -EINVAL;
657
}
658
659
int kvm_vm_smccc_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
660
{
661
switch (attr->attr) {
662
case KVM_ARM_VM_SMCCC_FILTER:
663
return 0;
664
default:
665
return -ENXIO;
666
}
667
}
668
669
int kvm_vm_smccc_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
670
{
671
void __user *uaddr = (void __user *)attr->addr;
672
673
switch (attr->attr) {
674
case KVM_ARM_VM_SMCCC_FILTER:
675
return kvm_smccc_set_filter(kvm, uaddr);
676
default:
677
return -ENXIO;
678
}
679
}
680
681