Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/kvm/exit.c
26436 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4
*/
5
6
#include <linux/err.h>
7
#include <linux/errno.h>
8
#include <linux/kvm_host.h>
9
#include <linux/module.h>
10
#include <linux/preempt.h>
11
#include <linux/vmalloc.h>
12
#include <trace/events/kvm.h>
13
#include <asm/fpu.h>
14
#include <asm/inst.h>
15
#include <asm/loongarch.h>
16
#include <asm/mmzone.h>
17
#include <asm/numa.h>
18
#include <asm/time.h>
19
#include <asm/tlb.h>
20
#include <asm/kvm_csr.h>
21
#include <asm/kvm_vcpu.h>
22
#include "trace.h"
23
24
static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
25
{
26
int rd, rj;
27
unsigned int index, ret;
28
29
if (inst.reg2_format.opcode != cpucfg_op)
30
return EMULATE_FAIL;
31
32
rd = inst.reg2_format.rd;
33
rj = inst.reg2_format.rj;
34
++vcpu->stat.cpucfg_exits;
35
index = vcpu->arch.gprs[rj];
36
37
/*
38
* By LoongArch Reference Manual 2.2.10.5
39
* Return value is 0 for undefined CPUCFG index
40
*
41
* Disable preemption since hw gcsr is accessed
42
*/
43
preempt_disable();
44
switch (index) {
45
case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
46
vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
47
break;
48
case CPUCFG_KVM_SIG:
49
/* CPUCFG emulation between 0x40000000 -- 0x400000ff */
50
vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
51
break;
52
case CPUCFG_KVM_FEATURE:
53
ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
54
vcpu->arch.gprs[rd] = ret;
55
break;
56
default:
57
vcpu->arch.gprs[rd] = 0;
58
break;
59
}
60
preempt_enable();
61
62
return EMULATE_DONE;
63
}
64
65
static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
66
{
67
unsigned long val = 0;
68
struct loongarch_csrs *csr = vcpu->arch.csr;
69
70
/*
71
* From LoongArch Reference Manual Volume 1 Chapter 4.2.1
72
* For undefined CSR id, return value is 0
73
*/
74
if (get_gcsr_flag(csrid) & SW_GCSR)
75
val = kvm_read_sw_gcsr(csr, csrid);
76
else
77
pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
78
79
return val;
80
}
81
82
static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
83
{
84
unsigned long old = 0;
85
struct loongarch_csrs *csr = vcpu->arch.csr;
86
87
if (get_gcsr_flag(csrid) & SW_GCSR) {
88
old = kvm_read_sw_gcsr(csr, csrid);
89
kvm_write_sw_gcsr(csr, csrid, val);
90
} else
91
pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
92
93
return old;
94
}
95
96
static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
97
unsigned long csr_mask, unsigned long val)
98
{
99
unsigned long old = 0;
100
struct loongarch_csrs *csr = vcpu->arch.csr;
101
102
if (get_gcsr_flag(csrid) & SW_GCSR) {
103
old = kvm_read_sw_gcsr(csr, csrid);
104
val = (old & ~csr_mask) | (val & csr_mask);
105
kvm_write_sw_gcsr(csr, csrid, val);
106
old = old & csr_mask;
107
} else
108
pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
109
110
return old;
111
}
112
113
static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
114
{
115
unsigned int rd, rj, csrid;
116
unsigned long csr_mask, val = 0;
117
118
/*
119
* CSR value mask imm
120
* rj = 0 means csrrd
121
* rj = 1 means csrwr
122
* rj != 0,1 means csrxchg
123
*/
124
rd = inst.reg2csr_format.rd;
125
rj = inst.reg2csr_format.rj;
126
csrid = inst.reg2csr_format.csr;
127
128
if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
129
if (kvm_guest_has_pmu(&vcpu->arch)) {
130
vcpu->arch.pc -= 4;
131
kvm_make_request(KVM_REQ_PMU, vcpu);
132
return EMULATE_DONE;
133
}
134
}
135
136
/* Process CSR ops */
137
switch (rj) {
138
case 0: /* process csrrd */
139
val = kvm_emu_read_csr(vcpu, csrid);
140
vcpu->arch.gprs[rd] = val;
141
break;
142
case 1: /* process csrwr */
143
val = vcpu->arch.gprs[rd];
144
val = kvm_emu_write_csr(vcpu, csrid, val);
145
vcpu->arch.gprs[rd] = val;
146
break;
147
default: /* process csrxchg */
148
val = vcpu->arch.gprs[rd];
149
csr_mask = vcpu->arch.gprs[rj];
150
val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
151
vcpu->arch.gprs[rd] = val;
152
}
153
154
return EMULATE_DONE;
155
}
156
157
int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
158
{
159
int idx, ret;
160
unsigned long *val;
161
u32 addr, rd, rj, opcode;
162
163
/*
164
* Each IOCSR with different opcode
165
*/
166
rd = inst.reg2_format.rd;
167
rj = inst.reg2_format.rj;
168
opcode = inst.reg2_format.opcode;
169
addr = vcpu->arch.gprs[rj];
170
run->iocsr_io.phys_addr = addr;
171
run->iocsr_io.is_write = 0;
172
val = &vcpu->arch.gprs[rd];
173
174
/* LoongArch is Little endian */
175
switch (opcode) {
176
case iocsrrdb_op:
177
run->iocsr_io.len = 1;
178
break;
179
case iocsrrdh_op:
180
run->iocsr_io.len = 2;
181
break;
182
case iocsrrdw_op:
183
run->iocsr_io.len = 4;
184
break;
185
case iocsrrdd_op:
186
run->iocsr_io.len = 8;
187
break;
188
case iocsrwrb_op:
189
run->iocsr_io.len = 1;
190
run->iocsr_io.is_write = 1;
191
break;
192
case iocsrwrh_op:
193
run->iocsr_io.len = 2;
194
run->iocsr_io.is_write = 1;
195
break;
196
case iocsrwrw_op:
197
run->iocsr_io.len = 4;
198
run->iocsr_io.is_write = 1;
199
break;
200
case iocsrwrd_op:
201
run->iocsr_io.len = 8;
202
run->iocsr_io.is_write = 1;
203
break;
204
default:
205
return EMULATE_FAIL;
206
}
207
208
if (run->iocsr_io.is_write) {
209
idx = srcu_read_lock(&vcpu->kvm->srcu);
210
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
211
srcu_read_unlock(&vcpu->kvm->srcu, idx);
212
if (ret == 0)
213
ret = EMULATE_DONE;
214
else {
215
ret = EMULATE_DO_IOCSR;
216
/* Save data and let user space to write it */
217
memcpy(run->iocsr_io.data, val, run->iocsr_io.len);
218
}
219
trace_kvm_iocsr(KVM_TRACE_IOCSR_WRITE, run->iocsr_io.len, addr, val);
220
} else {
221
idx = srcu_read_lock(&vcpu->kvm->srcu);
222
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, run->iocsr_io.len, val);
223
srcu_read_unlock(&vcpu->kvm->srcu, idx);
224
if (ret == 0)
225
ret = EMULATE_DONE;
226
else {
227
ret = EMULATE_DO_IOCSR;
228
/* Save register id for iocsr read completion */
229
vcpu->arch.io_gpr = rd;
230
}
231
trace_kvm_iocsr(KVM_TRACE_IOCSR_READ, run->iocsr_io.len, addr, NULL);
232
}
233
234
return ret;
235
}
236
237
int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
238
{
239
enum emulation_result er = EMULATE_DONE;
240
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
241
242
switch (run->iocsr_io.len) {
243
case 1:
244
*gpr = *(s8 *)run->iocsr_io.data;
245
break;
246
case 2:
247
*gpr = *(s16 *)run->iocsr_io.data;
248
break;
249
case 4:
250
*gpr = *(s32 *)run->iocsr_io.data;
251
break;
252
case 8:
253
*gpr = *(s64 *)run->iocsr_io.data;
254
break;
255
default:
256
kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
257
run->iocsr_io.len, vcpu->arch.badv);
258
er = EMULATE_FAIL;
259
break;
260
}
261
262
return er;
263
}
264
265
int kvm_emu_idle(struct kvm_vcpu *vcpu)
266
{
267
++vcpu->stat.idle_exits;
268
trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
269
270
if (!kvm_arch_vcpu_runnable(vcpu))
271
kvm_vcpu_halt(vcpu);
272
273
return EMULATE_DONE;
274
}
275
276
static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
277
{
278
unsigned long curr_pc;
279
larch_inst inst;
280
enum emulation_result er = EMULATE_DONE;
281
struct kvm_run *run = vcpu->run;
282
283
/* Fetch the instruction */
284
inst.word = vcpu->arch.badi;
285
curr_pc = vcpu->arch.pc;
286
update_pc(&vcpu->arch);
287
288
trace_kvm_exit_gspr(vcpu, inst.word);
289
er = EMULATE_FAIL;
290
switch (((inst.word >> 24) & 0xff)) {
291
case 0x0: /* CPUCFG GSPR */
292
trace_kvm_exit_cpucfg(vcpu, KVM_TRACE_EXIT_CPUCFG);
293
er = kvm_emu_cpucfg(vcpu, inst);
294
break;
295
case 0x4: /* CSR{RD,WR,XCHG} GSPR */
296
trace_kvm_exit_csr(vcpu, KVM_TRACE_EXIT_CSR);
297
er = kvm_handle_csr(vcpu, inst);
298
break;
299
case 0x6: /* Cache, Idle and IOCSR GSPR */
300
switch (((inst.word >> 22) & 0x3ff)) {
301
case 0x18: /* Cache GSPR */
302
er = EMULATE_DONE;
303
trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
304
break;
305
case 0x19: /* Idle/IOCSR GSPR */
306
switch (((inst.word >> 15) & 0x1ffff)) {
307
case 0xc90: /* IOCSR GSPR */
308
er = kvm_emu_iocsr(inst, run, vcpu);
309
break;
310
case 0xc91: /* Idle GSPR */
311
er = kvm_emu_idle(vcpu);
312
break;
313
default:
314
er = EMULATE_FAIL;
315
break;
316
}
317
break;
318
default:
319
er = EMULATE_FAIL;
320
break;
321
}
322
break;
323
default:
324
er = EMULATE_FAIL;
325
break;
326
}
327
328
/* Rollback PC only if emulation was unsuccessful */
329
if (er == EMULATE_FAIL) {
330
kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
331
curr_pc, __func__, inst.word);
332
333
kvm_arch_vcpu_dump_regs(vcpu);
334
vcpu->arch.pc = curr_pc;
335
}
336
337
return er;
338
}
339
340
/*
341
* Trigger GSPR:
342
* 1) Execute CPUCFG instruction;
343
* 2) Execute CACOP/IDLE instructions;
344
* 3) Access to unimplemented CSRs/IOCSRs.
345
*/
346
static int kvm_handle_gspr(struct kvm_vcpu *vcpu, int ecode)
347
{
348
int ret = RESUME_GUEST;
349
enum emulation_result er = EMULATE_DONE;
350
351
er = kvm_trap_handle_gspr(vcpu);
352
353
if (er == EMULATE_DONE) {
354
ret = RESUME_GUEST;
355
} else if (er == EMULATE_DO_MMIO) {
356
vcpu->run->exit_reason = KVM_EXIT_MMIO;
357
ret = RESUME_HOST;
358
} else if (er == EMULATE_DO_IOCSR) {
359
vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
360
ret = RESUME_HOST;
361
} else {
362
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
363
ret = RESUME_GUEST;
364
}
365
366
return ret;
367
}
368
369
int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
370
{
371
int idx, ret;
372
unsigned int op8, opcode, rd;
373
struct kvm_run *run = vcpu->run;
374
375
run->mmio.phys_addr = vcpu->arch.badv;
376
vcpu->mmio_needed = 2; /* signed */
377
op8 = (inst.word >> 24) & 0xff;
378
ret = EMULATE_DO_MMIO;
379
380
switch (op8) {
381
case 0x24 ... 0x27: /* ldptr.w/d process */
382
rd = inst.reg2i14_format.rd;
383
opcode = inst.reg2i14_format.opcode;
384
385
switch (opcode) {
386
case ldptrw_op:
387
run->mmio.len = 4;
388
break;
389
case ldptrd_op:
390
run->mmio.len = 8;
391
break;
392
default:
393
break;
394
}
395
break;
396
case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */
397
rd = inst.reg2i12_format.rd;
398
opcode = inst.reg2i12_format.opcode;
399
400
switch (opcode) {
401
case ldb_op:
402
run->mmio.len = 1;
403
break;
404
case ldbu_op:
405
vcpu->mmio_needed = 1; /* unsigned */
406
run->mmio.len = 1;
407
break;
408
case ldh_op:
409
run->mmio.len = 2;
410
break;
411
case ldhu_op:
412
vcpu->mmio_needed = 1; /* unsigned */
413
run->mmio.len = 2;
414
break;
415
case ldw_op:
416
run->mmio.len = 4;
417
break;
418
case ldwu_op:
419
vcpu->mmio_needed = 1; /* unsigned */
420
run->mmio.len = 4;
421
break;
422
case ldd_op:
423
run->mmio.len = 8;
424
break;
425
default:
426
ret = EMULATE_FAIL;
427
break;
428
}
429
break;
430
case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */
431
rd = inst.reg3_format.rd;
432
opcode = inst.reg3_format.opcode;
433
434
switch (opcode) {
435
case ldxb_op:
436
run->mmio.len = 1;
437
break;
438
case ldxbu_op:
439
run->mmio.len = 1;
440
vcpu->mmio_needed = 1; /* unsigned */
441
break;
442
case ldxh_op:
443
run->mmio.len = 2;
444
break;
445
case ldxhu_op:
446
run->mmio.len = 2;
447
vcpu->mmio_needed = 1; /* unsigned */
448
break;
449
case ldxw_op:
450
run->mmio.len = 4;
451
break;
452
case ldxwu_op:
453
run->mmio.len = 4;
454
vcpu->mmio_needed = 1; /* unsigned */
455
break;
456
case ldxd_op:
457
run->mmio.len = 8;
458
break;
459
default:
460
ret = EMULATE_FAIL;
461
break;
462
}
463
break;
464
default:
465
ret = EMULATE_FAIL;
466
}
467
468
if (ret == EMULATE_DO_MMIO) {
469
trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len, run->mmio.phys_addr, NULL);
470
471
/*
472
* If mmio device such as PCH-PIC is emulated in KVM,
473
* it need not return to user space to handle the mmio
474
* exception.
475
*/
476
idx = srcu_read_lock(&vcpu->kvm->srcu);
477
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, vcpu->arch.badv,
478
run->mmio.len, &vcpu->arch.gprs[rd]);
479
srcu_read_unlock(&vcpu->kvm->srcu, idx);
480
if (!ret) {
481
update_pc(&vcpu->arch);
482
vcpu->mmio_needed = 0;
483
return EMULATE_DONE;
484
}
485
486
/* Set for kvm_complete_mmio_read() use */
487
vcpu->arch.io_gpr = rd;
488
run->mmio.is_write = 0;
489
vcpu->mmio_is_write = 0;
490
return EMULATE_DO_MMIO;
491
}
492
493
kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
494
inst.word, vcpu->arch.pc, vcpu->arch.badv);
495
kvm_arch_vcpu_dump_regs(vcpu);
496
vcpu->mmio_needed = 0;
497
498
return ret;
499
}
500
501
int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
502
{
503
enum emulation_result er = EMULATE_DONE;
504
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
505
506
/* Update with new PC */
507
update_pc(&vcpu->arch);
508
switch (run->mmio.len) {
509
case 1:
510
if (vcpu->mmio_needed == 2)
511
*gpr = *(s8 *)run->mmio.data;
512
else
513
*gpr = *(u8 *)run->mmio.data;
514
break;
515
case 2:
516
if (vcpu->mmio_needed == 2)
517
*gpr = *(s16 *)run->mmio.data;
518
else
519
*gpr = *(u16 *)run->mmio.data;
520
break;
521
case 4:
522
if (vcpu->mmio_needed == 2)
523
*gpr = *(s32 *)run->mmio.data;
524
else
525
*gpr = *(u32 *)run->mmio.data;
526
break;
527
case 8:
528
*gpr = *(s64 *)run->mmio.data;
529
break;
530
default:
531
kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
532
run->mmio.len, vcpu->arch.badv);
533
er = EMULATE_FAIL;
534
break;
535
}
536
537
trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
538
run->mmio.phys_addr, run->mmio.data);
539
540
return er;
541
}
542
543
int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
544
{
545
int idx, ret;
546
unsigned int rd, op8, opcode;
547
unsigned long curr_pc, rd_val = 0;
548
struct kvm_run *run = vcpu->run;
549
void *data = run->mmio.data;
550
551
/*
552
* Update PC and hold onto current PC in case there is
553
* an error and we want to rollback the PC
554
*/
555
curr_pc = vcpu->arch.pc;
556
update_pc(&vcpu->arch);
557
558
op8 = (inst.word >> 24) & 0xff;
559
run->mmio.phys_addr = vcpu->arch.badv;
560
ret = EMULATE_DO_MMIO;
561
switch (op8) {
562
case 0x24 ... 0x27: /* stptr.w/d process */
563
rd = inst.reg2i14_format.rd;
564
opcode = inst.reg2i14_format.opcode;
565
566
switch (opcode) {
567
case stptrw_op:
568
run->mmio.len = 4;
569
*(unsigned int *)data = vcpu->arch.gprs[rd];
570
break;
571
case stptrd_op:
572
run->mmio.len = 8;
573
*(unsigned long *)data = vcpu->arch.gprs[rd];
574
break;
575
default:
576
ret = EMULATE_FAIL;
577
break;
578
}
579
break;
580
case 0x28 ... 0x2e: /* st.b/h/w/d process */
581
rd = inst.reg2i12_format.rd;
582
opcode = inst.reg2i12_format.opcode;
583
rd_val = vcpu->arch.gprs[rd];
584
585
switch (opcode) {
586
case stb_op:
587
run->mmio.len = 1;
588
*(unsigned char *)data = rd_val;
589
break;
590
case sth_op:
591
run->mmio.len = 2;
592
*(unsigned short *)data = rd_val;
593
break;
594
case stw_op:
595
run->mmio.len = 4;
596
*(unsigned int *)data = rd_val;
597
break;
598
case std_op:
599
run->mmio.len = 8;
600
*(unsigned long *)data = rd_val;
601
break;
602
default:
603
ret = EMULATE_FAIL;
604
break;
605
}
606
break;
607
case 0x38: /* stx.b/h/w/d process */
608
rd = inst.reg3_format.rd;
609
opcode = inst.reg3_format.opcode;
610
611
switch (opcode) {
612
case stxb_op:
613
run->mmio.len = 1;
614
*(unsigned char *)data = vcpu->arch.gprs[rd];
615
break;
616
case stxh_op:
617
run->mmio.len = 2;
618
*(unsigned short *)data = vcpu->arch.gprs[rd];
619
break;
620
case stxw_op:
621
run->mmio.len = 4;
622
*(unsigned int *)data = vcpu->arch.gprs[rd];
623
break;
624
case stxd_op:
625
run->mmio.len = 8;
626
*(unsigned long *)data = vcpu->arch.gprs[rd];
627
break;
628
default:
629
ret = EMULATE_FAIL;
630
break;
631
}
632
break;
633
default:
634
ret = EMULATE_FAIL;
635
}
636
637
if (ret == EMULATE_DO_MMIO) {
638
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len, run->mmio.phys_addr, data);
639
640
/*
641
* If mmio device such as PCH-PIC is emulated in KVM,
642
* it need not return to user space to handle the mmio
643
* exception.
644
*/
645
idx = srcu_read_lock(&vcpu->kvm->srcu);
646
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, vcpu->arch.badv, run->mmio.len, data);
647
srcu_read_unlock(&vcpu->kvm->srcu, idx);
648
if (!ret)
649
return EMULATE_DONE;
650
651
run->mmio.is_write = 1;
652
vcpu->mmio_needed = 1;
653
vcpu->mmio_is_write = 1;
654
return EMULATE_DO_MMIO;
655
}
656
657
vcpu->arch.pc = curr_pc;
658
kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
659
inst.word, vcpu->arch.pc, vcpu->arch.badv);
660
kvm_arch_vcpu_dump_regs(vcpu);
661
/* Rollback PC if emulation was unsuccessful */
662
663
return ret;
664
}
665
666
static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write, int ecode)
667
{
668
int ret;
669
larch_inst inst;
670
enum emulation_result er = EMULATE_DONE;
671
struct kvm_run *run = vcpu->run;
672
unsigned long badv = vcpu->arch.badv;
673
674
/* Inject ADE exception if exceed max GPA size */
675
if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
676
kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
677
return RESUME_GUEST;
678
}
679
680
ret = kvm_handle_mm_fault(vcpu, badv, write, ecode);
681
if (ret) {
682
/* Treat as MMIO */
683
inst.word = vcpu->arch.badi;
684
if (write) {
685
er = kvm_emu_mmio_write(vcpu, inst);
686
} else {
687
/* A code fetch fault doesn't count as an MMIO */
688
if (kvm_is_ifetch_fault(&vcpu->arch)) {
689
kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
690
return RESUME_GUEST;
691
}
692
693
er = kvm_emu_mmio_read(vcpu, inst);
694
}
695
}
696
697
if (er == EMULATE_DONE) {
698
ret = RESUME_GUEST;
699
} else if (er == EMULATE_DO_MMIO) {
700
run->exit_reason = KVM_EXIT_MMIO;
701
ret = RESUME_HOST;
702
} else {
703
kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
704
ret = RESUME_GUEST;
705
}
706
707
return ret;
708
}
709
710
static int kvm_handle_read_fault(struct kvm_vcpu *vcpu, int ecode)
711
{
712
return kvm_handle_rdwr_fault(vcpu, false, ecode);
713
}
714
715
static int kvm_handle_write_fault(struct kvm_vcpu *vcpu, int ecode)
716
{
717
return kvm_handle_rdwr_fault(vcpu, true, ecode);
718
}
719
720
int kvm_complete_user_service(struct kvm_vcpu *vcpu, struct kvm_run *run)
721
{
722
update_pc(&vcpu->arch);
723
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, run->hypercall.ret);
724
725
return 0;
726
}
727
728
/**
729
* kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
730
* @vcpu: Virtual CPU context.
731
* @ecode: Exception code.
732
*
733
* Handle when the guest attempts to use fpu which hasn't been allowed
734
* by the root context.
735
*/
736
static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu, int ecode)
737
{
738
struct kvm_run *run = vcpu->run;
739
740
if (!kvm_guest_has_fpu(&vcpu->arch)) {
741
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
742
return RESUME_GUEST;
743
}
744
745
/*
746
* If guest FPU not present, the FPU operation should have been
747
* treated as a reserved instruction!
748
* If FPU already in use, we shouldn't get this at all.
749
*/
750
if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
751
kvm_err("%s internal error\n", __func__);
752
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
753
return RESUME_HOST;
754
}
755
756
kvm_own_fpu(vcpu);
757
758
return RESUME_GUEST;
759
}
760
761
static long kvm_save_notify(struct kvm_vcpu *vcpu)
762
{
763
unsigned long id, data;
764
765
id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
766
data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
767
switch (id) {
768
case BIT(KVM_FEATURE_STEAL_TIME):
769
if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
770
return KVM_HCALL_INVALID_PARAMETER;
771
772
vcpu->arch.st.guest_addr = data;
773
if (!(data & KVM_STEAL_PHYS_VALID))
774
return 0;
775
776
vcpu->arch.st.last_steal = current->sched_info.run_delay;
777
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
778
return 0;
779
default:
780
return KVM_HCALL_INVALID_CODE;
781
};
782
783
return KVM_HCALL_INVALID_CODE;
784
};
785
786
/*
787
* kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
788
* @vcpu: Virtual CPU context.
789
* @ecode: Exception code.
790
*
791
* Handle when the guest attempts to use LSX when it is disabled in the root
792
* context.
793
*/
794
static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode)
795
{
796
if (kvm_own_lsx(vcpu))
797
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
798
799
return RESUME_GUEST;
800
}
801
802
/*
803
* kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
804
* @vcpu: Virtual CPU context.
805
* @ecode: Exception code.
806
*
807
* Handle when the guest attempts to use LASX when it is disabled in the root
808
* context.
809
*/
810
static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode)
811
{
812
if (kvm_own_lasx(vcpu))
813
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
814
815
return RESUME_GUEST;
816
}
817
818
static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode)
819
{
820
if (kvm_own_lbt(vcpu))
821
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
822
823
return RESUME_GUEST;
824
}
825
826
static void kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
827
{
828
unsigned int min, cpu;
829
struct kvm_vcpu *dest;
830
DECLARE_BITMAP(ipi_bitmap, BITS_PER_LONG * 2) = {
831
kvm_read_reg(vcpu, LOONGARCH_GPR_A1),
832
kvm_read_reg(vcpu, LOONGARCH_GPR_A2)
833
};
834
835
min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
836
for_each_set_bit(cpu, ipi_bitmap, BITS_PER_LONG * 2) {
837
dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
838
if (!dest)
839
continue;
840
841
/* Send SWI0 to dest vcpu to emulate IPI interrupt */
842
kvm_queue_irq(dest, INT_SWI0);
843
kvm_vcpu_kick(dest);
844
}
845
}
846
847
/*
848
* Hypercall emulation always return to guest, Caller should check retval.
849
*/
850
static void kvm_handle_service(struct kvm_vcpu *vcpu)
851
{
852
long ret = KVM_HCALL_INVALID_CODE;
853
unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
854
855
switch (func) {
856
case KVM_HCALL_FUNC_IPI:
857
if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
858
kvm_send_pv_ipi(vcpu);
859
ret = KVM_HCALL_SUCCESS;
860
}
861
break;
862
case KVM_HCALL_FUNC_NOTIFY:
863
if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
864
ret = kvm_save_notify(vcpu);
865
break;
866
default:
867
break;
868
}
869
870
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
871
}
872
873
static int kvm_handle_hypercall(struct kvm_vcpu *vcpu, int ecode)
874
{
875
int ret;
876
larch_inst inst;
877
unsigned int code;
878
879
inst.word = vcpu->arch.badi;
880
code = inst.reg0i15_format.immediate;
881
ret = RESUME_GUEST;
882
883
switch (code) {
884
case KVM_HCALL_SERVICE:
885
vcpu->stat.hypercall_exits++;
886
kvm_handle_service(vcpu);
887
break;
888
case KVM_HCALL_USER_SERVICE:
889
if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_USER_HCALL)) {
890
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
891
break;
892
}
893
894
vcpu->stat.hypercall_exits++;
895
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
896
vcpu->run->hypercall.nr = KVM_HCALL_USER_SERVICE;
897
vcpu->run->hypercall.args[0] = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
898
vcpu->run->hypercall.args[1] = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
899
vcpu->run->hypercall.args[2] = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
900
vcpu->run->hypercall.args[3] = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
901
vcpu->run->hypercall.args[4] = kvm_read_reg(vcpu, LOONGARCH_GPR_A4);
902
vcpu->run->hypercall.args[5] = kvm_read_reg(vcpu, LOONGARCH_GPR_A5);
903
vcpu->run->hypercall.flags = 0;
904
/*
905
* Set invalid return value by default, let user-mode VMM modify it.
906
*/
907
vcpu->run->hypercall.ret = KVM_HCALL_INVALID_CODE;
908
ret = RESUME_HOST;
909
break;
910
case KVM_HCALL_SWDBG:
911
/* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
912
if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
913
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
914
ret = RESUME_HOST;
915
break;
916
}
917
fallthrough;
918
default:
919
/* Treat it as noop intruction, only set return value */
920
kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
921
break;
922
}
923
924
if (ret == RESUME_GUEST)
925
update_pc(&vcpu->arch);
926
927
return ret;
928
}
929
930
/*
931
* LoongArch KVM callback handling for unimplemented guest exiting
932
*/
933
static int kvm_fault_ni(struct kvm_vcpu *vcpu, int ecode)
934
{
935
unsigned int inst;
936
unsigned long badv;
937
938
/* Fetch the instruction */
939
inst = vcpu->arch.badi;
940
badv = vcpu->arch.badv;
941
kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
942
ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
943
kvm_arch_vcpu_dump_regs(vcpu);
944
kvm_queue_exception(vcpu, EXCCODE_INE, 0);
945
946
return RESUME_GUEST;
947
}
948
949
static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
950
[0 ... EXCCODE_INT_START - 1] = kvm_fault_ni,
951
[EXCCODE_TLBI] = kvm_handle_read_fault,
952
[EXCCODE_TLBL] = kvm_handle_read_fault,
953
[EXCCODE_TLBS] = kvm_handle_write_fault,
954
[EXCCODE_TLBM] = kvm_handle_write_fault,
955
[EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
956
[EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
957
[EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
958
[EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
959
[EXCCODE_GSPR] = kvm_handle_gspr,
960
[EXCCODE_HVC] = kvm_handle_hypercall,
961
};
962
963
int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
964
{
965
return kvm_fault_tables[fault](vcpu, fault);
966
}
967
968