Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/riscv/vmm/vmm.c
105994 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2015 Mihai Carabas <[email protected]>
5
* Copyright (c) 2024 Ruslan Bukin <[email protected]>
6
*
7
* This software was developed by the University of Cambridge Computer
8
* Laboratory (Department of Computer Science and Technology) under Innovate
9
* UK project 105694, "Digital Security by Design (DSbD) Technology Platform
10
* Prototype".
11
*
12
* Redistribution and use in source and binary forms, with or without
13
* modification, are permitted provided that the following conditions
14
* are met:
15
* 1. Redistributions of source code must retain the above copyright
16
* notice, this list of conditions and the following disclaimer.
17
* 2. Redistributions in binary form must reproduce the above copyright
18
* notice, this list of conditions and the following disclaimer in the
19
* documentation and/or other materials provided with the distribution.
20
*
21
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
25
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31
* SUCH DAMAGE.
32
*/
33
34
#include <sys/param.h>
35
#include <sys/systm.h>
36
#include <sys/cpuset.h>
37
#include <sys/kernel.h>
38
#include <sys/linker.h>
39
#include <sys/lock.h>
40
#include <sys/malloc.h>
41
#include <sys/mutex.h>
42
#include <sys/pcpu.h>
43
#include <sys/proc.h>
44
#include <sys/queue.h>
45
#include <sys/rwlock.h>
46
#include <sys/sched.h>
47
#include <sys/smp.h>
48
49
#include <vm/vm.h>
50
#include <vm/vm_object.h>
51
#include <vm/vm_page.h>
52
#include <vm/pmap.h>
53
#include <vm/vm_map.h>
54
#include <vm/vm_extern.h>
55
#include <vm/vm_param.h>
56
57
#include <machine/riscvreg.h>
58
#include <machine/cpu.h>
59
#include <machine/fpe.h>
60
#include <machine/machdep.h>
61
#include <machine/pcb.h>
62
#include <machine/smp.h>
63
#include <machine/vm.h>
64
#include <machine/vmparam.h>
65
#include <machine/vmm.h>
66
#include <machine/vmm_instruction_emul.h>
67
68
#include <dev/pci/pcireg.h>
69
70
#include <dev/vmm/vmm_dev.h>
71
#include <dev/vmm/vmm_ktr.h>
72
#include <dev/vmm/vmm_mem.h>
73
#include <dev/vmm/vmm_vm.h>
74
75
#include "vmm_stat.h"
76
#include "riscv.h"
77
78
#include "vmm_aplic.h"
79
80
static MALLOC_DEFINE(M_VMM, "vmm", "vmm");
81
82
/* statistics */
83
static VMM_STAT(VCPU_TOTAL_RUNTIME, "vcpu total runtime");
84
85
/* global statistics */
86
VMM_STAT(VMEXIT_COUNT, "total number of vm exits");
87
VMM_STAT(VMEXIT_IRQ, "number of vmexits for an irq");
88
VMM_STAT(VMEXIT_UNHANDLED, "number of vmexits for an unhandled exception");
89
90
static void
91
vcpu_cleanup(struct vcpu *vcpu, bool destroy)
92
{
93
vmmops_vcpu_cleanup(vcpu->cookie);
94
vcpu->cookie = NULL;
95
if (destroy) {
96
vmm_stat_free(vcpu->stats);
97
fpu_save_area_free(vcpu->guestfpu);
98
vcpu_lock_destroy(vcpu);
99
free(vcpu, M_VMM);
100
}
101
}
102
103
static struct vcpu *
104
vcpu_alloc(struct vm *vm, int vcpu_id)
105
{
106
struct vcpu *vcpu;
107
108
KASSERT(vcpu_id >= 0 && vcpu_id < vm->maxcpus,
109
("vcpu_alloc: invalid vcpu %d", vcpu_id));
110
111
vcpu = malloc(sizeof(*vcpu), M_VMM, M_WAITOK | M_ZERO);
112
vcpu_lock_init(vcpu);
113
vcpu->state = VCPU_IDLE;
114
vcpu->hostcpu = NOCPU;
115
vcpu->vcpuid = vcpu_id;
116
vcpu->vm = vm;
117
vcpu->guestfpu = fpu_save_area_alloc();
118
vcpu->stats = vmm_stat_alloc();
119
return (vcpu);
120
}
121
122
static void
123
vcpu_init(struct vcpu *vcpu)
124
{
125
vcpu->cookie = vmmops_vcpu_init(vcpu->vm->cookie, vcpu, vcpu->vcpuid);
126
MPASS(vcpu->cookie != NULL);
127
fpu_save_area_reset(vcpu->guestfpu);
128
vmm_stat_init(vcpu->stats);
129
}
130
131
struct vm_exit *
132
vm_exitinfo(struct vcpu *vcpu)
133
{
134
return (&vcpu->exitinfo);
135
}
136
137
int
138
vmm_modinit(void)
139
{
140
return (vmmops_modinit());
141
}
142
143
int
144
vmm_modcleanup(void)
145
{
146
return (vmmops_modcleanup());
147
}
148
149
static void
150
vm_init(struct vm *vm, bool create)
151
{
152
int i;
153
154
vm->cookie = vmmops_init(vm, vmspace_pmap(vm_vmspace(vm)));
155
MPASS(vm->cookie != NULL);
156
157
CPU_ZERO(&vm->active_cpus);
158
CPU_ZERO(&vm->debug_cpus);
159
160
vm->suspend = 0;
161
CPU_ZERO(&vm->suspended_cpus);
162
163
memset(vm->mmio_region, 0, sizeof(vm->mmio_region));
164
165
if (!create) {
166
for (i = 0; i < vm->maxcpus; i++) {
167
if (vm->vcpu[i] != NULL)
168
vcpu_init(vm->vcpu[i]);
169
}
170
}
171
}
172
173
struct vcpu *
174
vm_alloc_vcpu(struct vm *vm, int vcpuid)
175
{
176
struct vcpu *vcpu;
177
178
if (vcpuid < 0 || vcpuid >= vm_get_maxcpus(vm))
179
return (NULL);
180
181
vcpu = (struct vcpu *)
182
atomic_load_acq_ptr((uintptr_t *)&vm->vcpu[vcpuid]);
183
if (__predict_true(vcpu != NULL))
184
return (vcpu);
185
186
sx_xlock(&vm->vcpus_init_lock);
187
vcpu = vm->vcpu[vcpuid];
188
if (vcpu == NULL && !vm->dying) {
189
vcpu = vcpu_alloc(vm, vcpuid);
190
vcpu_init(vcpu);
191
192
/*
193
* Ensure vCPU is fully created before updating pointer
194
* to permit unlocked reads above.
195
*/
196
atomic_store_rel_ptr((uintptr_t *)&vm->vcpu[vcpuid],
197
(uintptr_t)vcpu);
198
}
199
sx_xunlock(&vm->vcpus_init_lock);
200
return (vcpu);
201
}
202
203
int
204
vm_create(const char *name, struct vm **retvm)
205
{
206
struct vm *vm;
207
int error;
208
209
vm = malloc(sizeof(struct vm), M_VMM, M_WAITOK | M_ZERO);
210
error = vm_mem_init(&vm->mem, 0, 1ul << 39);
211
if (error != 0) {
212
free(vm, M_VMM);
213
return (error);
214
}
215
strcpy(vm->name, name);
216
mtx_init(&vm->rendezvous_mtx, "vm rendezvous lock", 0, MTX_DEF);
217
sx_init(&vm->vcpus_init_lock, "vm vcpus");
218
219
vm->sockets = 1;
220
vm->cores = 1; /* XXX backwards compatibility */
221
vm->threads = 1; /* XXX backwards compatibility */
222
vm->maxcpus = vm_maxcpu;
223
224
vm->vcpu = malloc(sizeof(*vm->vcpu) * vm->maxcpus, M_VMM,
225
M_WAITOK | M_ZERO);
226
227
vm_init(vm, true);
228
229
*retvm = vm;
230
return (0);
231
}
232
233
static void
234
vm_cleanup(struct vm *vm, bool destroy)
235
{
236
int i;
237
238
if (destroy)
239
vm_xlock_memsegs(vm);
240
else
241
vm_assert_memseg_xlocked(vm);
242
243
aplic_detach_from_vm(vm->cookie);
244
245
for (i = 0; i < vm->maxcpus; i++) {
246
if (vm->vcpu[i] != NULL)
247
vcpu_cleanup(vm->vcpu[i], destroy);
248
}
249
250
vmmops_cleanup(vm->cookie);
251
252
vm_mem_cleanup(vm);
253
if (destroy) {
254
vm_mem_destroy(vm);
255
256
free(vm->vcpu, M_VMM);
257
sx_destroy(&vm->vcpus_init_lock);
258
}
259
}
260
261
void
262
vm_destroy(struct vm *vm)
263
{
264
vm_cleanup(vm, true);
265
free(vm, M_VMM);
266
}
267
268
void
269
vm_reset(struct vm *vm)
270
{
271
vm_cleanup(vm, false);
272
vm_init(vm, false);
273
}
274
275
int
276
vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
277
uint64_t gla, int prot, uint64_t *gpa, int *is_fault)
278
{
279
return (vmmops_gla2gpa(vcpu->cookie, paging, gla, prot, gpa, is_fault));
280
}
281
282
void
283
vm_register_inst_handler(struct vm *vm, uint64_t start, uint64_t size,
284
mem_region_read_t mmio_read, mem_region_write_t mmio_write)
285
{
286
int i;
287
288
for (i = 0; i < nitems(vm->mmio_region); i++) {
289
if (vm->mmio_region[i].start == 0 &&
290
vm->mmio_region[i].end == 0) {
291
vm->mmio_region[i].start = start;
292
vm->mmio_region[i].end = start + size;
293
vm->mmio_region[i].read = mmio_read;
294
vm->mmio_region[i].write = mmio_write;
295
return;
296
}
297
}
298
299
panic("%s: No free MMIO region", __func__);
300
}
301
302
void
303
vm_deregister_inst_handler(struct vm *vm, uint64_t start, uint64_t size)
304
{
305
int i;
306
307
for (i = 0; i < nitems(vm->mmio_region); i++) {
308
if (vm->mmio_region[i].start == start &&
309
vm->mmio_region[i].end == start + size) {
310
memset(&vm->mmio_region[i], 0,
311
sizeof(vm->mmio_region[i]));
312
return;
313
}
314
}
315
316
panic("%s: Invalid MMIO region: %lx - %lx", __func__, start,
317
start + size);
318
}
319
320
static int
321
vm_handle_inst_emul(struct vcpu *vcpu, bool *retu)
322
{
323
struct vm *vm;
324
struct vm_exit *vme;
325
struct vie *vie;
326
struct hyp *hyp;
327
uint64_t fault_ipa;
328
struct vm_guest_paging *paging;
329
struct vmm_mmio_region *vmr;
330
int error, i;
331
332
vm = vcpu->vm;
333
hyp = vm->cookie;
334
if (!hyp->aplic_attached)
335
goto out_user;
336
337
vme = &vcpu->exitinfo;
338
vie = &vme->u.inst_emul.vie;
339
paging = &vme->u.inst_emul.paging;
340
341
fault_ipa = vme->u.inst_emul.gpa;
342
343
vmr = NULL;
344
for (i = 0; i < nitems(vm->mmio_region); i++) {
345
if (vm->mmio_region[i].start <= fault_ipa &&
346
vm->mmio_region[i].end > fault_ipa) {
347
vmr = &vm->mmio_region[i];
348
break;
349
}
350
}
351
if (vmr == NULL)
352
goto out_user;
353
354
error = vmm_emulate_instruction(vcpu, fault_ipa, vie, paging,
355
vmr->read, vmr->write, retu);
356
return (error);
357
358
out_user:
359
*retu = true;
360
return (0);
361
}
362
363
void
364
vm_exit_suspended(struct vcpu *vcpu, uint64_t pc)
365
{
366
struct vm *vm = vcpu->vm;
367
struct vm_exit *vmexit;
368
369
KASSERT(vm->suspend > VM_SUSPEND_NONE && vm->suspend < VM_SUSPEND_LAST,
370
("vm_exit_suspended: invalid suspend type %d", vm->suspend));
371
372
vmexit = vm_exitinfo(vcpu);
373
vmexit->pc = pc;
374
vmexit->inst_length = 4;
375
vmexit->exitcode = VM_EXITCODE_SUSPENDED;
376
vmexit->u.suspended.how = vm->suspend;
377
}
378
379
void
380
vm_exit_debug(struct vcpu *vcpu, uint64_t pc)
381
{
382
struct vm_exit *vmexit;
383
384
vmexit = vm_exitinfo(vcpu);
385
vmexit->pc = pc;
386
vmexit->inst_length = 4;
387
vmexit->exitcode = VM_EXITCODE_DEBUG;
388
}
389
390
static void
391
restore_guest_fpustate(struct vcpu *vcpu)
392
{
393
394
/* Flush host state to the pcb. */
395
fpe_state_save(curthread);
396
397
/* Ensure the VFP state will be re-loaded when exiting the guest. */
398
PCPU_SET(fpcurthread, NULL);
399
400
/* restore guest FPU state */
401
fpe_enable();
402
fpe_restore(vcpu->guestfpu);
403
404
/*
405
* The FPU is now "dirty" with the guest's state so turn on emulation
406
* to trap any access to the FPU by the host.
407
*/
408
fpe_disable();
409
}
410
411
static void
412
save_guest_fpustate(struct vcpu *vcpu)
413
{
414
415
/* Save guest FPE state. */
416
fpe_enable();
417
fpe_store(vcpu->guestfpu);
418
fpe_disable();
419
420
KASSERT(PCPU_GET(fpcurthread) == NULL,
421
("%s: fpcurthread set with guest registers", __func__));
422
}
423
424
static void
425
vcpu_require_state(struct vcpu *vcpu, enum vcpu_state newstate)
426
{
427
int error;
428
429
if ((error = vcpu_set_state(vcpu, newstate, false)) != 0)
430
panic("Error %d setting state to %d\n", error, newstate);
431
}
432
433
static void
434
vcpu_require_state_locked(struct vcpu *vcpu, enum vcpu_state newstate)
435
{
436
int error;
437
438
if ((error = vcpu_set_state_locked(vcpu, newstate, false)) != 0)
439
panic("Error %d setting state to %d", error, newstate);
440
}
441
442
int
443
vm_get_capability(struct vcpu *vcpu, int type, int *retval)
444
{
445
446
if (type < 0 || type >= VM_CAP_MAX)
447
return (EINVAL);
448
449
return (vmmops_getcap(vcpu->cookie, type, retval));
450
}
451
452
int
453
vm_set_capability(struct vcpu *vcpu, int type, int val)
454
{
455
456
if (type < 0 || type >= VM_CAP_MAX)
457
return (EINVAL);
458
459
return (vmmops_setcap(vcpu->cookie, type, val));
460
}
461
462
void *
463
vcpu_get_cookie(struct vcpu *vcpu)
464
{
465
466
return (vcpu->cookie);
467
}
468
469
int
470
vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
471
{
472
if (reg < 0 || reg >= VM_REG_LAST)
473
return (EINVAL);
474
475
return (vmmops_getreg(vcpu->cookie, reg, retval));
476
}
477
478
int
479
vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
480
{
481
int error;
482
483
if (reg < 0 || reg >= VM_REG_LAST)
484
return (EINVAL);
485
error = vmmops_setreg(vcpu->cookie, reg, val);
486
if (error || reg != VM_REG_GUEST_SEPC)
487
return (error);
488
489
vcpu->nextpc = val;
490
491
return (0);
492
}
493
494
void *
495
vm_get_cookie(struct vm *vm)
496
{
497
498
return (vm->cookie);
499
}
500
501
int
502
vm_inject_exception(struct vcpu *vcpu, uint64_t scause)
503
{
504
505
return (vmmops_exception(vcpu->cookie, scause));
506
}
507
508
int
509
vm_attach_aplic(struct vm *vm, struct vm_aplic_descr *descr)
510
{
511
512
return (aplic_attach_to_vm(vm->cookie, descr));
513
}
514
515
int
516
vm_assert_irq(struct vm *vm, uint32_t irq)
517
{
518
519
return (aplic_inject_irq(vm->cookie, -1, irq, true));
520
}
521
522
int
523
vm_deassert_irq(struct vm *vm, uint32_t irq)
524
{
525
526
return (aplic_inject_irq(vm->cookie, -1, irq, false));
527
}
528
529
int
530
vm_raise_msi(struct vm *vm, uint64_t msg, uint64_t addr, int bus, int slot,
531
int func)
532
{
533
534
return (aplic_inject_msi(vm->cookie, msg, addr));
535
}
536
537
static int
538
vm_handle_wfi(struct vcpu *vcpu, struct vm_exit *vme, bool *retu)
539
{
540
struct vm *vm;
541
542
vm = vcpu->vm;
543
vcpu_lock(vcpu);
544
while (1) {
545
if (vm->suspend)
546
break;
547
548
if (aplic_check_pending(vcpu->cookie))
549
break;
550
551
if (riscv_check_ipi(vcpu->cookie, false))
552
break;
553
554
if (riscv_check_interrupts_pending(vcpu->cookie))
555
break;
556
557
if (vcpu_should_yield(vcpu))
558
break;
559
560
vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
561
/*
562
* XXX msleep_spin() cannot be interrupted by signals so
563
* wake up periodically to check pending signals.
564
*/
565
msleep_spin(vcpu, &vcpu->mtx, "vmidle", hz);
566
vcpu_require_state_locked(vcpu, VCPU_FROZEN);
567
}
568
vcpu_unlock(vcpu);
569
570
*retu = false;
571
572
return (0);
573
}
574
575
static int
576
vm_handle_paging(struct vcpu *vcpu, bool *retu)
577
{
578
struct vm *vm;
579
struct vm_exit *vme;
580
struct vm_map *map;
581
uint64_t addr;
582
pmap_t pmap;
583
int ftype, rv;
584
585
vm = vcpu->vm;
586
vme = &vcpu->exitinfo;
587
588
pmap = vmspace_pmap(vm_vmspace(vm));
589
addr = (vme->htval << 2) & ~(PAGE_SIZE - 1);
590
591
dprintf("%s: %lx\n", __func__, addr);
592
593
switch (vme->scause) {
594
case SCAUSE_STORE_GUEST_PAGE_FAULT:
595
ftype = VM_PROT_WRITE;
596
break;
597
case SCAUSE_FETCH_GUEST_PAGE_FAULT:
598
ftype = VM_PROT_EXECUTE;
599
break;
600
case SCAUSE_LOAD_GUEST_PAGE_FAULT:
601
ftype = VM_PROT_READ;
602
break;
603
default:
604
panic("unknown page trap: %lu", vme->scause);
605
}
606
607
/* The page exists, but the page table needs to be updated. */
608
if (pmap_fault(pmap, addr, ftype))
609
return (0);
610
611
map = &vm_vmspace(vm)->vm_map;
612
rv = vm_fault(map, addr, ftype, VM_FAULT_NORMAL, NULL);
613
if (rv != KERN_SUCCESS) {
614
printf("%s: vm_fault failed, addr %lx, ftype %d, err %d\n",
615
__func__, addr, ftype, rv);
616
return (EFAULT);
617
}
618
619
return (0);
620
}
621
622
static int
623
vm_handle_suspend(struct vcpu *vcpu, bool *retu)
624
{
625
struct vm *vm = vcpu->vm;
626
int error, i;
627
struct thread *td;
628
629
error = 0;
630
td = curthread;
631
632
CPU_SET_ATOMIC(vcpu->vcpuid, &vm->suspended_cpus);
633
634
/*
635
* Wait until all 'active_cpus' have suspended themselves.
636
*
637
* Since a VM may be suspended at any time including when one or
638
* more vcpus are doing a rendezvous we need to call the rendezvous
639
* handler while we are waiting to prevent a deadlock.
640
*/
641
vcpu_lock(vcpu);
642
while (error == 0) {
643
if (CPU_CMP(&vm->suspended_cpus, &vm->active_cpus) == 0)
644
break;
645
646
vcpu_require_state_locked(vcpu, VCPU_SLEEPING);
647
msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
648
vcpu_require_state_locked(vcpu, VCPU_FROZEN);
649
if (td_ast_pending(td, TDA_SUSPEND)) {
650
vcpu_unlock(vcpu);
651
error = thread_check_susp(td, false);
652
vcpu_lock(vcpu);
653
}
654
}
655
vcpu_unlock(vcpu);
656
657
/*
658
* Wakeup the other sleeping vcpus and return to userspace.
659
*/
660
for (i = 0; i < vm->maxcpus; i++) {
661
if (CPU_ISSET(i, &vm->suspended_cpus)) {
662
vcpu_notify_event(vm_vcpu(vm, i));
663
}
664
}
665
666
*retu = true;
667
return (error);
668
}
669
670
int
671
vm_run(struct vcpu *vcpu)
672
{
673
struct vm_eventinfo evinfo;
674
struct vm_exit *vme;
675
struct vm *vm;
676
pmap_t pmap;
677
int error;
678
int vcpuid;
679
bool retu;
680
681
vm = vcpu->vm;
682
683
dprintf("%s\n", __func__);
684
685
vcpuid = vcpu->vcpuid;
686
687
if (!CPU_ISSET(vcpuid, &vm->active_cpus))
688
return (EINVAL);
689
690
if (CPU_ISSET(vcpuid, &vm->suspended_cpus))
691
return (EINVAL);
692
693
pmap = vmspace_pmap(vm_vmspace(vm));
694
vme = &vcpu->exitinfo;
695
evinfo.rptr = NULL;
696
evinfo.sptr = &vm->suspend;
697
evinfo.iptr = NULL;
698
restart:
699
critical_enter();
700
701
restore_guest_fpustate(vcpu);
702
703
vcpu_require_state(vcpu, VCPU_RUNNING);
704
error = vmmops_run(vcpu->cookie, vcpu->nextpc, pmap, &evinfo);
705
vcpu_require_state(vcpu, VCPU_FROZEN);
706
707
save_guest_fpustate(vcpu);
708
709
critical_exit();
710
711
if (error == 0) {
712
retu = false;
713
switch (vme->exitcode) {
714
case VM_EXITCODE_INST_EMUL:
715
vcpu->nextpc = vme->pc + vme->inst_length;
716
error = vm_handle_inst_emul(vcpu, &retu);
717
break;
718
case VM_EXITCODE_WFI:
719
vcpu->nextpc = vme->pc + vme->inst_length;
720
error = vm_handle_wfi(vcpu, vme, &retu);
721
break;
722
case VM_EXITCODE_ECALL:
723
/* Handle in userland. */
724
vcpu->nextpc = vme->pc + vme->inst_length;
725
retu = true;
726
break;
727
case VM_EXITCODE_PAGING:
728
vcpu->nextpc = vme->pc;
729
error = vm_handle_paging(vcpu, &retu);
730
break;
731
case VM_EXITCODE_BOGUS:
732
vcpu->nextpc = vme->pc;
733
retu = false;
734
error = 0;
735
break;
736
case VM_EXITCODE_SUSPENDED:
737
vcpu->nextpc = vme->pc;
738
error = vm_handle_suspend(vcpu, &retu);
739
break;
740
default:
741
/* Handle in userland. */
742
vcpu->nextpc = vme->pc;
743
retu = true;
744
break;
745
}
746
}
747
748
if (error == 0 && retu == false)
749
goto restart;
750
751
return (error);
752
}
753
754