Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/riscv/kvm/vcpu.c
26444 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
4
*
5
* Authors:
6
* Anup Patel <[email protected]>
7
*/
8
9
#include <linux/bitops.h>
10
#include <linux/entry-kvm.h>
11
#include <linux/errno.h>
12
#include <linux/err.h>
13
#include <linux/kdebug.h>
14
#include <linux/module.h>
15
#include <linux/percpu.h>
16
#include <linux/vmalloc.h>
17
#include <linux/sched/signal.h>
18
#include <linux/fs.h>
19
#include <linux/kvm_host.h>
20
#include <asm/cacheflush.h>
21
#include <asm/kvm_mmu.h>
22
#include <asm/kvm_nacl.h>
23
#include <asm/kvm_vcpu_vector.h>
24
25
#define CREATE_TRACE_POINTS
26
#include "trace.h"
27
28
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
29
KVM_GENERIC_VCPU_STATS(),
30
STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
31
STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
32
STATS_DESC_COUNTER(VCPU, wrs_exit_stat),
33
STATS_DESC_COUNTER(VCPU, mmio_exit_user),
34
STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
35
STATS_DESC_COUNTER(VCPU, csr_exit_user),
36
STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
37
STATS_DESC_COUNTER(VCPU, signal_exits),
38
STATS_DESC_COUNTER(VCPU, exits),
39
STATS_DESC_COUNTER(VCPU, instr_illegal_exits),
40
STATS_DESC_COUNTER(VCPU, load_misaligned_exits),
41
STATS_DESC_COUNTER(VCPU, store_misaligned_exits),
42
STATS_DESC_COUNTER(VCPU, load_access_exits),
43
STATS_DESC_COUNTER(VCPU, store_access_exits),
44
};
45
46
const struct kvm_stats_header kvm_vcpu_stats_header = {
47
.name_size = KVM_STATS_NAME_SIZE,
48
.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
49
.id_offset = sizeof(struct kvm_stats_header),
50
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
51
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
52
sizeof(kvm_vcpu_stats_desc),
53
};
54
55
static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu,
56
bool kvm_sbi_reset)
57
{
58
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
59
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
60
void *vector_datap = cntx->vector.datap;
61
62
memset(cntx, 0, sizeof(*cntx));
63
memset(csr, 0, sizeof(*csr));
64
memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
65
66
/* Restore datap as it's not a part of the guest context. */
67
cntx->vector.datap = vector_datap;
68
69
if (kvm_sbi_reset)
70
kvm_riscv_vcpu_sbi_load_reset_state(vcpu);
71
72
/* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
73
cntx->sstatus = SR_SPP | SR_SPIE;
74
75
cntx->hstatus |= HSTATUS_VTW;
76
cntx->hstatus |= HSTATUS_SPVP;
77
cntx->hstatus |= HSTATUS_SPV;
78
}
79
80
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset)
81
{
82
bool loaded;
83
84
/**
85
* The preemption should be disabled here because it races with
86
* kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
87
* also calls vcpu_load/put.
88
*/
89
get_cpu();
90
loaded = (vcpu->cpu != -1);
91
if (loaded)
92
kvm_arch_vcpu_put(vcpu);
93
94
vcpu->arch.last_exit_cpu = -1;
95
96
kvm_riscv_vcpu_context_reset(vcpu, kvm_sbi_reset);
97
98
kvm_riscv_vcpu_fp_reset(vcpu);
99
100
kvm_riscv_vcpu_vector_reset(vcpu);
101
102
kvm_riscv_vcpu_timer_reset(vcpu);
103
104
kvm_riscv_vcpu_aia_reset(vcpu);
105
106
bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
107
bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
108
109
kvm_riscv_vcpu_pmu_reset(vcpu);
110
111
vcpu->arch.hfence_head = 0;
112
vcpu->arch.hfence_tail = 0;
113
memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
114
115
kvm_riscv_vcpu_sbi_reset(vcpu);
116
117
/* Reset the guest CSRs for hotplug usecase */
118
if (loaded)
119
kvm_arch_vcpu_load(vcpu, smp_processor_id());
120
put_cpu();
121
}
122
123
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
124
{
125
return 0;
126
}
127
128
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
129
{
130
int rc;
131
132
spin_lock_init(&vcpu->arch.mp_state_lock);
133
134
/* Mark this VCPU never ran */
135
vcpu->arch.ran_atleast_once = false;
136
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
137
bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
138
139
/* Setup ISA features available to VCPU */
140
kvm_riscv_vcpu_setup_isa(vcpu);
141
142
/* Setup vendor, arch, and implementation details */
143
vcpu->arch.mvendorid = sbi_get_mvendorid();
144
vcpu->arch.marchid = sbi_get_marchid();
145
vcpu->arch.mimpid = sbi_get_mimpid();
146
147
/* Setup VCPU hfence queue */
148
spin_lock_init(&vcpu->arch.hfence_lock);
149
150
spin_lock_init(&vcpu->arch.reset_state.lock);
151
152
rc = kvm_riscv_vcpu_alloc_vector_context(vcpu);
153
if (rc)
154
return rc;
155
156
/* Setup VCPU timer */
157
kvm_riscv_vcpu_timer_init(vcpu);
158
159
/* setup performance monitoring */
160
kvm_riscv_vcpu_pmu_init(vcpu);
161
162
/* Setup VCPU AIA */
163
kvm_riscv_vcpu_aia_init(vcpu);
164
165
/*
166
* Setup SBI extensions
167
* NOTE: This must be the last thing to be initialized.
168
*/
169
kvm_riscv_vcpu_sbi_init(vcpu);
170
171
/* Reset VCPU */
172
kvm_riscv_reset_vcpu(vcpu, false);
173
174
return 0;
175
}
176
177
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
178
{
179
/**
180
* vcpu with id 0 is the designated boot cpu.
181
* Keep all vcpus with non-zero id in power-off state so that
182
* they can be brought up using SBI HSM extension.
183
*/
184
if (vcpu->vcpu_idx != 0)
185
kvm_riscv_vcpu_power_off(vcpu);
186
}
187
188
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
189
{
190
kvm_riscv_vcpu_sbi_deinit(vcpu);
191
192
/* Cleanup VCPU AIA context */
193
kvm_riscv_vcpu_aia_deinit(vcpu);
194
195
/* Cleanup VCPU timer */
196
kvm_riscv_vcpu_timer_deinit(vcpu);
197
198
kvm_riscv_vcpu_pmu_deinit(vcpu);
199
200
/* Free unused pages pre-allocated for G-stage page table mappings */
201
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
202
203
/* Free vector context space for host and guest kernel */
204
kvm_riscv_vcpu_free_vector_context(vcpu);
205
}
206
207
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
208
{
209
return kvm_riscv_vcpu_timer_pending(vcpu);
210
}
211
212
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
213
{
214
return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
215
!kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause);
216
}
217
218
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
219
{
220
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
221
}
222
223
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
224
{
225
return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
226
}
227
228
#ifdef CONFIG_GUEST_PERF_EVENTS
229
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
230
{
231
return vcpu->arch.guest_context.sepc;
232
}
233
#endif
234
235
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
236
{
237
return VM_FAULT_SIGBUS;
238
}
239
240
long kvm_arch_vcpu_async_ioctl(struct file *filp,
241
unsigned int ioctl, unsigned long arg)
242
{
243
struct kvm_vcpu *vcpu = filp->private_data;
244
void __user *argp = (void __user *)arg;
245
246
if (ioctl == KVM_INTERRUPT) {
247
struct kvm_interrupt irq;
248
249
if (copy_from_user(&irq, argp, sizeof(irq)))
250
return -EFAULT;
251
252
if (irq.irq == KVM_INTERRUPT_SET)
253
return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
254
else
255
return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
256
}
257
258
return -ENOIOCTLCMD;
259
}
260
261
long kvm_arch_vcpu_ioctl(struct file *filp,
262
unsigned int ioctl, unsigned long arg)
263
{
264
struct kvm_vcpu *vcpu = filp->private_data;
265
void __user *argp = (void __user *)arg;
266
long r = -EINVAL;
267
268
switch (ioctl) {
269
case KVM_SET_ONE_REG:
270
case KVM_GET_ONE_REG: {
271
struct kvm_one_reg reg;
272
273
r = -EFAULT;
274
if (copy_from_user(&reg, argp, sizeof(reg)))
275
break;
276
277
if (ioctl == KVM_SET_ONE_REG)
278
r = kvm_riscv_vcpu_set_reg(vcpu, &reg);
279
else
280
r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
281
break;
282
}
283
case KVM_GET_REG_LIST: {
284
struct kvm_reg_list __user *user_list = argp;
285
struct kvm_reg_list reg_list;
286
unsigned int n;
287
288
r = -EFAULT;
289
if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
290
break;
291
n = reg_list.n;
292
reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
293
if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
294
break;
295
r = -E2BIG;
296
if (n < reg_list.n)
297
break;
298
r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
299
break;
300
}
301
default:
302
break;
303
}
304
305
return r;
306
}
307
308
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
309
struct kvm_sregs *sregs)
310
{
311
return -EINVAL;
312
}
313
314
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
315
struct kvm_sregs *sregs)
316
{
317
return -EINVAL;
318
}
319
320
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
321
{
322
return -EINVAL;
323
}
324
325
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
326
{
327
return -EINVAL;
328
}
329
330
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
331
struct kvm_translation *tr)
332
{
333
return -EINVAL;
334
}
335
336
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
337
{
338
return -EINVAL;
339
}
340
341
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
342
{
343
return -EINVAL;
344
}
345
346
void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
347
{
348
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
349
unsigned long mask, val;
350
351
if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) {
352
mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0);
353
val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask;
354
355
csr->hvip &= ~mask;
356
csr->hvip |= val;
357
}
358
359
/* Flush AIA high interrupts */
360
kvm_riscv_vcpu_aia_flush_interrupts(vcpu);
361
}
362
363
void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
364
{
365
unsigned long hvip;
366
struct kvm_vcpu_arch *v = &vcpu->arch;
367
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
368
369
/* Read current HVIP and VSIE CSRs */
370
csr->vsie = ncsr_read(CSR_VSIE);
371
372
/* Sync-up HVIP.VSSIP bit changes does by Guest */
373
hvip = ncsr_read(CSR_HVIP);
374
if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
375
if (hvip & (1UL << IRQ_VS_SOFT)) {
376
if (!test_and_set_bit(IRQ_VS_SOFT,
377
v->irqs_pending_mask))
378
set_bit(IRQ_VS_SOFT, v->irqs_pending);
379
} else {
380
if (!test_and_set_bit(IRQ_VS_SOFT,
381
v->irqs_pending_mask))
382
clear_bit(IRQ_VS_SOFT, v->irqs_pending);
383
}
384
}
385
386
/* Sync up the HVIP.LCOFIP bit changes (only clear) by the guest */
387
if ((csr->hvip ^ hvip) & (1UL << IRQ_PMU_OVF)) {
388
if (!(hvip & (1UL << IRQ_PMU_OVF)) &&
389
!test_and_set_bit(IRQ_PMU_OVF, v->irqs_pending_mask))
390
clear_bit(IRQ_PMU_OVF, v->irqs_pending);
391
}
392
393
/* Sync-up AIA high interrupts */
394
kvm_riscv_vcpu_aia_sync_interrupts(vcpu);
395
396
/* Sync-up timer CSRs */
397
kvm_riscv_vcpu_timer_sync(vcpu);
398
}
399
400
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
401
{
402
/*
403
* We only allow VS-mode software, timer, and external
404
* interrupts when irq is one of the local interrupts
405
* defined by RISC-V privilege specification.
406
*/
407
if (irq < IRQ_LOCAL_MAX &&
408
irq != IRQ_VS_SOFT &&
409
irq != IRQ_VS_TIMER &&
410
irq != IRQ_VS_EXT &&
411
irq != IRQ_PMU_OVF)
412
return -EINVAL;
413
414
set_bit(irq, vcpu->arch.irqs_pending);
415
smp_mb__before_atomic();
416
set_bit(irq, vcpu->arch.irqs_pending_mask);
417
418
kvm_vcpu_kick(vcpu);
419
420
return 0;
421
}
422
423
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
424
{
425
/*
426
* We only allow VS-mode software, timer, counter overflow and external
427
* interrupts when irq is one of the local interrupts
428
* defined by RISC-V privilege specification.
429
*/
430
if (irq < IRQ_LOCAL_MAX &&
431
irq != IRQ_VS_SOFT &&
432
irq != IRQ_VS_TIMER &&
433
irq != IRQ_VS_EXT &&
434
irq != IRQ_PMU_OVF)
435
return -EINVAL;
436
437
clear_bit(irq, vcpu->arch.irqs_pending);
438
smp_mb__before_atomic();
439
set_bit(irq, vcpu->arch.irqs_pending_mask);
440
441
return 0;
442
}
443
444
bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
445
{
446
unsigned long ie;
447
448
ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
449
<< VSIP_TO_HVIP_SHIFT) & (unsigned long)mask;
450
ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK &
451
(unsigned long)mask;
452
if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie)
453
return true;
454
455
/* Check AIA high interrupts */
456
return kvm_riscv_vcpu_aia_has_interrupts(vcpu, mask);
457
}
458
459
void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
460
{
461
WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
462
kvm_make_request(KVM_REQ_SLEEP, vcpu);
463
kvm_vcpu_kick(vcpu);
464
}
465
466
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
467
{
468
spin_lock(&vcpu->arch.mp_state_lock);
469
__kvm_riscv_vcpu_power_off(vcpu);
470
spin_unlock(&vcpu->arch.mp_state_lock);
471
}
472
473
void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
474
{
475
WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE);
476
kvm_vcpu_wake_up(vcpu);
477
}
478
479
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
480
{
481
spin_lock(&vcpu->arch.mp_state_lock);
482
__kvm_riscv_vcpu_power_on(vcpu);
483
spin_unlock(&vcpu->arch.mp_state_lock);
484
}
485
486
bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu)
487
{
488
return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED;
489
}
490
491
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
492
struct kvm_mp_state *mp_state)
493
{
494
*mp_state = READ_ONCE(vcpu->arch.mp_state);
495
496
return 0;
497
}
498
499
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
500
struct kvm_mp_state *mp_state)
501
{
502
int ret = 0;
503
504
spin_lock(&vcpu->arch.mp_state_lock);
505
506
switch (mp_state->mp_state) {
507
case KVM_MP_STATE_RUNNABLE:
508
WRITE_ONCE(vcpu->arch.mp_state, *mp_state);
509
break;
510
case KVM_MP_STATE_STOPPED:
511
__kvm_riscv_vcpu_power_off(vcpu);
512
break;
513
case KVM_MP_STATE_INIT_RECEIVED:
514
if (vcpu->kvm->arch.mp_state_reset)
515
kvm_riscv_reset_vcpu(vcpu, false);
516
else
517
ret = -EINVAL;
518
break;
519
default:
520
ret = -EINVAL;
521
}
522
523
spin_unlock(&vcpu->arch.mp_state_lock);
524
525
return ret;
526
}
527
528
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
529
struct kvm_guest_debug *dbg)
530
{
531
if (dbg->control & KVM_GUESTDBG_ENABLE) {
532
vcpu->guest_debug = dbg->control;
533
vcpu->arch.cfg.hedeleg &= ~BIT(EXC_BREAKPOINT);
534
} else {
535
vcpu->guest_debug = 0;
536
vcpu->arch.cfg.hedeleg |= BIT(EXC_BREAKPOINT);
537
}
538
539
return 0;
540
}
541
542
static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu)
543
{
544
const unsigned long *isa = vcpu->arch.isa;
545
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
546
547
if (riscv_isa_extension_available(isa, SVPBMT))
548
cfg->henvcfg |= ENVCFG_PBMTE;
549
550
if (riscv_isa_extension_available(isa, SSTC))
551
cfg->henvcfg |= ENVCFG_STCE;
552
553
if (riscv_isa_extension_available(isa, ZICBOM))
554
cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
555
556
if (riscv_isa_extension_available(isa, ZICBOZ))
557
cfg->henvcfg |= ENVCFG_CBZE;
558
559
if (riscv_isa_extension_available(isa, SVADU) &&
560
!riscv_isa_extension_available(isa, SVADE))
561
cfg->henvcfg |= ENVCFG_ADUE;
562
563
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
564
cfg->hstateen0 |= SMSTATEEN0_HSENVCFG;
565
if (riscv_isa_extension_available(isa, SSAIA))
566
cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC |
567
SMSTATEEN0_AIA |
568
SMSTATEEN0_AIA_ISEL;
569
if (riscv_isa_extension_available(isa, SMSTATEEN))
570
cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0;
571
}
572
573
cfg->hedeleg = KVM_HEDELEG_DEFAULT;
574
if (vcpu->guest_debug)
575
cfg->hedeleg &= ~BIT(EXC_BREAKPOINT);
576
}
577
578
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
579
{
580
void *nsh;
581
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
582
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
583
584
if (kvm_riscv_nacl_sync_csr_available()) {
585
nsh = nacl_shmem();
586
nacl_csr_write(nsh, CSR_VSSTATUS, csr->vsstatus);
587
nacl_csr_write(nsh, CSR_VSIE, csr->vsie);
588
nacl_csr_write(nsh, CSR_VSTVEC, csr->vstvec);
589
nacl_csr_write(nsh, CSR_VSSCRATCH, csr->vsscratch);
590
nacl_csr_write(nsh, CSR_VSEPC, csr->vsepc);
591
nacl_csr_write(nsh, CSR_VSCAUSE, csr->vscause);
592
nacl_csr_write(nsh, CSR_VSTVAL, csr->vstval);
593
nacl_csr_write(nsh, CSR_HEDELEG, cfg->hedeleg);
594
nacl_csr_write(nsh, CSR_HVIP, csr->hvip);
595
nacl_csr_write(nsh, CSR_VSATP, csr->vsatp);
596
nacl_csr_write(nsh, CSR_HENVCFG, cfg->henvcfg);
597
if (IS_ENABLED(CONFIG_32BIT))
598
nacl_csr_write(nsh, CSR_HENVCFGH, cfg->henvcfg >> 32);
599
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
600
nacl_csr_write(nsh, CSR_HSTATEEN0, cfg->hstateen0);
601
if (IS_ENABLED(CONFIG_32BIT))
602
nacl_csr_write(nsh, CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
603
}
604
} else {
605
csr_write(CSR_VSSTATUS, csr->vsstatus);
606
csr_write(CSR_VSIE, csr->vsie);
607
csr_write(CSR_VSTVEC, csr->vstvec);
608
csr_write(CSR_VSSCRATCH, csr->vsscratch);
609
csr_write(CSR_VSEPC, csr->vsepc);
610
csr_write(CSR_VSCAUSE, csr->vscause);
611
csr_write(CSR_VSTVAL, csr->vstval);
612
csr_write(CSR_HEDELEG, cfg->hedeleg);
613
csr_write(CSR_HVIP, csr->hvip);
614
csr_write(CSR_VSATP, csr->vsatp);
615
csr_write(CSR_HENVCFG, cfg->henvcfg);
616
if (IS_ENABLED(CONFIG_32BIT))
617
csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32);
618
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) {
619
csr_write(CSR_HSTATEEN0, cfg->hstateen0);
620
if (IS_ENABLED(CONFIG_32BIT))
621
csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32);
622
}
623
}
624
625
kvm_riscv_mmu_update_hgatp(vcpu);
626
627
kvm_riscv_vcpu_timer_restore(vcpu);
628
629
kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
630
kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
631
vcpu->arch.isa);
632
kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context);
633
kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context,
634
vcpu->arch.isa);
635
636
kvm_riscv_vcpu_aia_load(vcpu, cpu);
637
638
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
639
640
vcpu->cpu = cpu;
641
}
642
643
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
644
{
645
void *nsh;
646
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
647
648
vcpu->cpu = -1;
649
650
kvm_riscv_vcpu_aia_put(vcpu);
651
652
kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
653
vcpu->arch.isa);
654
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
655
656
kvm_riscv_vcpu_timer_save(vcpu);
657
kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context,
658
vcpu->arch.isa);
659
kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context);
660
661
if (kvm_riscv_nacl_available()) {
662
nsh = nacl_shmem();
663
csr->vsstatus = nacl_csr_read(nsh, CSR_VSSTATUS);
664
csr->vsie = nacl_csr_read(nsh, CSR_VSIE);
665
csr->vstvec = nacl_csr_read(nsh, CSR_VSTVEC);
666
csr->vsscratch = nacl_csr_read(nsh, CSR_VSSCRATCH);
667
csr->vsepc = nacl_csr_read(nsh, CSR_VSEPC);
668
csr->vscause = nacl_csr_read(nsh, CSR_VSCAUSE);
669
csr->vstval = nacl_csr_read(nsh, CSR_VSTVAL);
670
csr->hvip = nacl_csr_read(nsh, CSR_HVIP);
671
csr->vsatp = nacl_csr_read(nsh, CSR_VSATP);
672
} else {
673
csr->vsstatus = csr_read(CSR_VSSTATUS);
674
csr->vsie = csr_read(CSR_VSIE);
675
csr->vstvec = csr_read(CSR_VSTVEC);
676
csr->vsscratch = csr_read(CSR_VSSCRATCH);
677
csr->vsepc = csr_read(CSR_VSEPC);
678
csr->vscause = csr_read(CSR_VSCAUSE);
679
csr->vstval = csr_read(CSR_VSTVAL);
680
csr->hvip = csr_read(CSR_HVIP);
681
csr->vsatp = csr_read(CSR_VSATP);
682
}
683
}
684
685
/**
686
* kvm_riscv_check_vcpu_requests - check and handle pending vCPU requests
687
* @vcpu: the VCPU pointer
688
*
689
* Return: 1 if we should enter the guest
690
* 0 if we should exit to userspace
691
*/
692
static int kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
693
{
694
struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
695
696
if (kvm_request_pending(vcpu)) {
697
if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
698
kvm_vcpu_srcu_read_unlock(vcpu);
699
rcuwait_wait_event(wait,
700
(!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause),
701
TASK_INTERRUPTIBLE);
702
kvm_vcpu_srcu_read_lock(vcpu);
703
704
if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) {
705
/*
706
* Awaken to handle a signal, request to
707
* sleep again later.
708
*/
709
kvm_make_request(KVM_REQ_SLEEP, vcpu);
710
}
711
}
712
713
if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
714
kvm_riscv_reset_vcpu(vcpu, true);
715
716
if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
717
kvm_riscv_mmu_update_hgatp(vcpu);
718
719
if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
720
kvm_riscv_fence_i_process(vcpu);
721
722
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
723
kvm_riscv_tlb_flush_process(vcpu);
724
725
if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
726
kvm_riscv_hfence_vvma_all_process(vcpu);
727
728
if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
729
kvm_riscv_hfence_process(vcpu);
730
731
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
732
kvm_riscv_vcpu_record_steal_time(vcpu);
733
734
if (kvm_dirty_ring_check_request(vcpu))
735
return 0;
736
}
737
738
return 1;
739
}
740
741
static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
742
{
743
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
744
745
ncsr_write(CSR_HVIP, csr->hvip);
746
kvm_riscv_vcpu_aia_update_hvip(vcpu);
747
}
748
749
static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu)
750
{
751
struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
752
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
753
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
754
755
vcpu->arch.host_scounteren = csr_swap(CSR_SCOUNTEREN, csr->scounteren);
756
vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg);
757
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
758
(cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
759
vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0,
760
smcsr->sstateen0);
761
}
762
763
static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu)
764
{
765
struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr;
766
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
767
struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
768
769
csr->scounteren = csr_swap(CSR_SCOUNTEREN, vcpu->arch.host_scounteren);
770
csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg);
771
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) &&
772
(cfg->hstateen0 & SMSTATEEN0_SSTATEEN0))
773
smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0,
774
vcpu->arch.host_sstateen0);
775
}
776
777
/*
778
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
779
* the vCPU is running.
780
*
781
* This must be noinstr as instrumentation may make use of RCU, and this is not
782
* safe during the EQS.
783
*/
784
static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu,
785
struct kvm_cpu_trap *trap)
786
{
787
void *nsh;
788
struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context;
789
struct kvm_cpu_context *hcntx = &vcpu->arch.host_context;
790
791
/*
792
* We save trap CSRs (such as SEPC, SCAUSE, STVAL, HTVAL, and
793
* HTINST) here because we do local_irq_enable() after this
794
* function in kvm_arch_vcpu_ioctl_run() which can result in
795
* an interrupt immediately after local_irq_enable() and can
796
* potentially change trap CSRs.
797
*/
798
799
kvm_riscv_vcpu_swap_in_guest_state(vcpu);
800
guest_state_enter_irqoff();
801
802
if (kvm_riscv_nacl_sync_sret_available()) {
803
nsh = nacl_shmem();
804
805
if (kvm_riscv_nacl_autoswap_csr_available()) {
806
hcntx->hstatus =
807
nacl_csr_read(nsh, CSR_HSTATUS);
808
nacl_scratch_write_long(nsh,
809
SBI_NACL_SHMEM_AUTOSWAP_OFFSET +
810
SBI_NACL_SHMEM_AUTOSWAP_HSTATUS,
811
gcntx->hstatus);
812
nacl_scratch_write_long(nsh,
813
SBI_NACL_SHMEM_AUTOSWAP_OFFSET,
814
SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS);
815
} else if (kvm_riscv_nacl_sync_csr_available()) {
816
hcntx->hstatus = nacl_csr_swap(nsh,
817
CSR_HSTATUS, gcntx->hstatus);
818
} else {
819
hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
820
}
821
822
nacl_scratch_write_longs(nsh,
823
SBI_NACL_SHMEM_SRET_OFFSET +
824
SBI_NACL_SHMEM_SRET_X(1),
825
&gcntx->ra,
826
SBI_NACL_SHMEM_SRET_X_LAST);
827
828
__kvm_riscv_nacl_switch_to(&vcpu->arch, SBI_EXT_NACL,
829
SBI_EXT_NACL_SYNC_SRET);
830
831
if (kvm_riscv_nacl_autoswap_csr_available()) {
832
nacl_scratch_write_long(nsh,
833
SBI_NACL_SHMEM_AUTOSWAP_OFFSET,
834
0);
835
gcntx->hstatus = nacl_scratch_read_long(nsh,
836
SBI_NACL_SHMEM_AUTOSWAP_OFFSET +
837
SBI_NACL_SHMEM_AUTOSWAP_HSTATUS);
838
} else {
839
gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
840
}
841
842
trap->htval = nacl_csr_read(nsh, CSR_HTVAL);
843
trap->htinst = nacl_csr_read(nsh, CSR_HTINST);
844
} else {
845
hcntx->hstatus = csr_swap(CSR_HSTATUS, gcntx->hstatus);
846
847
__kvm_riscv_switch_to(&vcpu->arch);
848
849
gcntx->hstatus = csr_swap(CSR_HSTATUS, hcntx->hstatus);
850
851
trap->htval = csr_read(CSR_HTVAL);
852
trap->htinst = csr_read(CSR_HTINST);
853
}
854
855
trap->sepc = gcntx->sepc;
856
trap->scause = csr_read(CSR_SCAUSE);
857
trap->stval = csr_read(CSR_STVAL);
858
859
vcpu->arch.last_exit_cpu = vcpu->cpu;
860
guest_state_exit_irqoff();
861
kvm_riscv_vcpu_swap_in_host_state(vcpu);
862
}
863
864
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
865
{
866
int ret;
867
struct kvm_cpu_trap trap;
868
struct kvm_run *run = vcpu->run;
869
870
if (!vcpu->arch.ran_atleast_once)
871
kvm_riscv_vcpu_setup_config(vcpu);
872
873
/* Mark this VCPU ran at least once */
874
vcpu->arch.ran_atleast_once = true;
875
876
kvm_vcpu_srcu_read_lock(vcpu);
877
878
switch (run->exit_reason) {
879
case KVM_EXIT_MMIO:
880
/* Process MMIO value returned from user-space */
881
ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
882
break;
883
case KVM_EXIT_RISCV_SBI:
884
/* Process SBI value returned from user-space */
885
ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
886
break;
887
case KVM_EXIT_RISCV_CSR:
888
/* Process CSR value returned from user-space */
889
ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
890
break;
891
default:
892
ret = 0;
893
break;
894
}
895
if (ret) {
896
kvm_vcpu_srcu_read_unlock(vcpu);
897
return ret;
898
}
899
900
if (!vcpu->wants_to_run) {
901
kvm_vcpu_srcu_read_unlock(vcpu);
902
return -EINTR;
903
}
904
905
vcpu_load(vcpu);
906
907
kvm_sigset_activate(vcpu);
908
909
ret = 1;
910
run->exit_reason = KVM_EXIT_UNKNOWN;
911
while (ret > 0) {
912
/* Check conditions before entering the guest */
913
ret = xfer_to_guest_mode_handle_work(vcpu);
914
if (ret)
915
continue;
916
ret = 1;
917
918
kvm_riscv_gstage_vmid_update(vcpu);
919
920
ret = kvm_riscv_check_vcpu_requests(vcpu);
921
if (ret <= 0)
922
continue;
923
924
preempt_disable();
925
926
/* Update AIA HW state before entering guest */
927
ret = kvm_riscv_vcpu_aia_update(vcpu);
928
if (ret <= 0) {
929
preempt_enable();
930
continue;
931
}
932
933
local_irq_disable();
934
935
/*
936
* Ensure we set mode to IN_GUEST_MODE after we disable
937
* interrupts and before the final VCPU requests check.
938
* See the comment in kvm_vcpu_exiting_guest_mode() and
939
* Documentation/virt/kvm/vcpu-requests.rst
940
*/
941
vcpu->mode = IN_GUEST_MODE;
942
943
kvm_vcpu_srcu_read_unlock(vcpu);
944
smp_mb__after_srcu_read_unlock();
945
946
/*
947
* We might have got VCPU interrupts updated asynchronously
948
* so update it in HW.
949
*/
950
kvm_riscv_vcpu_flush_interrupts(vcpu);
951
952
/* Update HVIP CSR for current CPU */
953
kvm_riscv_update_hvip(vcpu);
954
955
if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
956
kvm_request_pending(vcpu) ||
957
xfer_to_guest_mode_work_pending()) {
958
vcpu->mode = OUTSIDE_GUEST_MODE;
959
local_irq_enable();
960
preempt_enable();
961
kvm_vcpu_srcu_read_lock(vcpu);
962
continue;
963
}
964
965
/*
966
* Sanitize VMID mappings cached (TLB) on current CPU
967
*
968
* Note: This should be done after G-stage VMID has been
969
* updated using kvm_riscv_gstage_vmid_ver_changed()
970
*/
971
kvm_riscv_gstage_vmid_sanitize(vcpu);
972
973
trace_kvm_entry(vcpu);
974
975
guest_timing_enter_irqoff();
976
977
kvm_riscv_vcpu_enter_exit(vcpu, &trap);
978
979
vcpu->mode = OUTSIDE_GUEST_MODE;
980
vcpu->stat.exits++;
981
982
/* Syncup interrupts state with HW */
983
kvm_riscv_vcpu_sync_interrupts(vcpu);
984
985
/*
986
* We must ensure that any pending interrupts are taken before
987
* we exit guest timing so that timer ticks are accounted as
988
* guest time. Transiently unmask interrupts so that any
989
* pending interrupts are taken.
990
*
991
* There's no barrier which ensures that pending interrupts are
992
* recognised, so we just hope that the CPU takes any pending
993
* interrupts between the enable and disable.
994
*/
995
local_irq_enable();
996
local_irq_disable();
997
998
guest_timing_exit_irqoff();
999
1000
local_irq_enable();
1001
1002
trace_kvm_exit(&trap);
1003
1004
preempt_enable();
1005
1006
kvm_vcpu_srcu_read_lock(vcpu);
1007
1008
ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
1009
}
1010
1011
kvm_sigset_deactivate(vcpu);
1012
1013
vcpu_put(vcpu);
1014
1015
kvm_vcpu_srcu_read_unlock(vcpu);
1016
1017
return ret;
1018
}
1019
1020