Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/kvm/mips.c
26424 views
1
/*
2
* This file is subject to the terms and conditions of the GNU General Public
3
* License. See the file "COPYING" in the main directory of this archive
4
* for more details.
5
*
6
* KVM/MIPS: MIPS specific KVM APIs
7
*
8
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9
* Authors: Sanjay Lal <[email protected]>
10
*/
11
12
#include <linux/bitops.h>
13
#include <linux/errno.h>
14
#include <linux/err.h>
15
#include <linux/kdebug.h>
16
#include <linux/module.h>
17
#include <linux/uaccess.h>
18
#include <linux/vmalloc.h>
19
#include <linux/sched/signal.h>
20
#include <linux/fs.h>
21
#include <linux/memblock.h>
22
#include <linux/pgtable.h>
23
24
#include <asm/fpu.h>
25
#include <asm/page.h>
26
#include <asm/cacheflush.h>
27
#include <asm/mmu_context.h>
28
#include <asm/pgalloc.h>
29
30
#include <linux/kvm_host.h>
31
32
#include "interrupt.h"
33
34
#define CREATE_TRACE_POINTS
35
#include "trace.h"
36
37
#ifndef VECTORSPACING
38
#define VECTORSPACING 0x100 /* for EI/VI mode */
39
#endif
40
41
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
42
KVM_GENERIC_VM_STATS()
43
};
44
45
const struct kvm_stats_header kvm_vm_stats_header = {
46
.name_size = KVM_STATS_NAME_SIZE,
47
.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
48
.id_offset = sizeof(struct kvm_stats_header),
49
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
50
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
51
sizeof(kvm_vm_stats_desc),
52
};
53
54
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
55
KVM_GENERIC_VCPU_STATS(),
56
STATS_DESC_COUNTER(VCPU, wait_exits),
57
STATS_DESC_COUNTER(VCPU, cache_exits),
58
STATS_DESC_COUNTER(VCPU, signal_exits),
59
STATS_DESC_COUNTER(VCPU, int_exits),
60
STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
61
STATS_DESC_COUNTER(VCPU, tlbmod_exits),
62
STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
63
STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
64
STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
65
STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
66
STATS_DESC_COUNTER(VCPU, syscall_exits),
67
STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
68
STATS_DESC_COUNTER(VCPU, break_inst_exits),
69
STATS_DESC_COUNTER(VCPU, trap_inst_exits),
70
STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
71
STATS_DESC_COUNTER(VCPU, fpe_exits),
72
STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
73
STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
74
STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
75
STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
76
STATS_DESC_COUNTER(VCPU, vz_hc_exits),
77
STATS_DESC_COUNTER(VCPU, vz_grr_exits),
78
STATS_DESC_COUNTER(VCPU, vz_gva_exits),
79
STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
80
STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
81
STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
82
#ifdef CONFIG_CPU_LOONGSON64
83
STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
84
#endif
85
};
86
87
const struct kvm_stats_header kvm_vcpu_stats_header = {
88
.name_size = KVM_STATS_NAME_SIZE,
89
.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
90
.id_offset = sizeof(struct kvm_stats_header),
91
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
92
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
93
sizeof(kvm_vcpu_stats_desc),
94
};
95
96
bool kvm_trace_guest_mode_change;
97
98
int kvm_guest_mode_change_trace_reg(void)
99
{
100
kvm_trace_guest_mode_change = true;
101
return 0;
102
}
103
104
void kvm_guest_mode_change_trace_unreg(void)
105
{
106
kvm_trace_guest_mode_change = false;
107
}
108
109
/*
110
* XXXKYMA: We are simulatoring a processor that has the WII bit set in
111
* Config7, so we are "runnable" if interrupts are pending
112
*/
113
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
114
{
115
return !!(vcpu->arch.pending_exceptions);
116
}
117
118
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
119
{
120
return false;
121
}
122
123
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
124
{
125
return 1;
126
}
127
128
int kvm_arch_enable_virtualization_cpu(void)
129
{
130
return kvm_mips_callbacks->enable_virtualization_cpu();
131
}
132
133
void kvm_arch_disable_virtualization_cpu(void)
134
{
135
kvm_mips_callbacks->disable_virtualization_cpu();
136
}
137
138
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
139
{
140
switch (type) {
141
case KVM_VM_MIPS_AUTO:
142
break;
143
case KVM_VM_MIPS_VZ:
144
break;
145
default:
146
/* Unsupported KVM type */
147
return -EINVAL;
148
}
149
150
/* Allocate page table to map GPA -> RPA */
151
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
152
if (!kvm->arch.gpa_mm.pgd)
153
return -ENOMEM;
154
155
#ifdef CONFIG_CPU_LOONGSON64
156
kvm_init_loongson_ipi(kvm);
157
#endif
158
159
return 0;
160
}
161
162
static void kvm_mips_free_gpa_pt(struct kvm *kvm)
163
{
164
/* It should always be safe to remove after flushing the whole range */
165
WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
166
pgd_free(NULL, kvm->arch.gpa_mm.pgd);
167
}
168
169
void kvm_arch_destroy_vm(struct kvm *kvm)
170
{
171
kvm_destroy_vcpus(kvm);
172
kvm_mips_free_gpa_pt(kvm);
173
}
174
175
long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
176
unsigned long arg)
177
{
178
return -ENOIOCTLCMD;
179
}
180
181
void kvm_arch_flush_shadow_all(struct kvm *kvm)
182
{
183
/* Flush whole GPA */
184
kvm_mips_flush_gpa_pt(kvm, 0, ~0);
185
kvm_flush_remote_tlbs(kvm);
186
}
187
188
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
189
struct kvm_memory_slot *slot)
190
{
191
/*
192
* The slot has been made invalid (ready for moving or deletion), so we
193
* need to ensure that it can no longer be accessed by any guest VCPUs.
194
*/
195
196
spin_lock(&kvm->mmu_lock);
197
/* Flush slot from GPA */
198
kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
199
slot->base_gfn + slot->npages - 1);
200
kvm_flush_remote_tlbs_memslot(kvm, slot);
201
spin_unlock(&kvm->mmu_lock);
202
}
203
204
int kvm_arch_prepare_memory_region(struct kvm *kvm,
205
const struct kvm_memory_slot *old,
206
struct kvm_memory_slot *new,
207
enum kvm_mr_change change)
208
{
209
return 0;
210
}
211
212
void kvm_arch_commit_memory_region(struct kvm *kvm,
213
struct kvm_memory_slot *old,
214
const struct kvm_memory_slot *new,
215
enum kvm_mr_change change)
216
{
217
int needs_flush;
218
219
/*
220
* If dirty page logging is enabled, write protect all pages in the slot
221
* ready for dirty logging.
222
*
223
* There is no need to do this in any of the following cases:
224
* CREATE: No dirty mappings will already exist.
225
* MOVE/DELETE: The old mappings will already have been cleaned up by
226
* kvm_arch_flush_shadow_memslot()
227
*/
228
if (change == KVM_MR_FLAGS_ONLY &&
229
(!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
230
new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
231
spin_lock(&kvm->mmu_lock);
232
/* Write protect GPA page table entries */
233
needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
234
new->base_gfn + new->npages - 1);
235
if (needs_flush)
236
kvm_flush_remote_tlbs_memslot(kvm, new);
237
spin_unlock(&kvm->mmu_lock);
238
}
239
}
240
241
static inline void dump_handler(const char *symbol, void *start, void *end)
242
{
243
u32 *p;
244
245
pr_debug("LEAF(%s)\n", symbol);
246
247
pr_debug("\t.set push\n");
248
pr_debug("\t.set noreorder\n");
249
250
for (p = start; p < (u32 *)end; ++p)
251
pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
252
253
pr_debug("\t.set\tpop\n");
254
255
pr_debug("\tEND(%s)\n", symbol);
256
}
257
258
/* low level hrtimer wake routine */
259
static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
260
{
261
struct kvm_vcpu *vcpu;
262
263
vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
264
265
kvm_mips_callbacks->queue_timer_int(vcpu);
266
267
vcpu->arch.wait = 0;
268
rcuwait_wake_up(&vcpu->wait);
269
270
return kvm_mips_count_timeout(vcpu);
271
}
272
273
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
274
{
275
return 0;
276
}
277
278
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
279
{
280
int err, size;
281
void *gebase, *p, *handler, *refill_start, *refill_end;
282
int i;
283
284
kvm_debug("kvm @ %p: create cpu %d at %p\n",
285
vcpu->kvm, vcpu->vcpu_id, vcpu);
286
287
err = kvm_mips_callbacks->vcpu_init(vcpu);
288
if (err)
289
return err;
290
291
hrtimer_setup(&vcpu->arch.comparecount_timer, kvm_mips_comparecount_wakeup, CLOCK_MONOTONIC,
292
HRTIMER_MODE_REL);
293
294
/*
295
* Allocate space for host mode exception handlers that handle
296
* guest mode exits
297
*/
298
if (cpu_has_veic || cpu_has_vint)
299
size = 0x200 + VECTORSPACING * 64;
300
else
301
size = 0x4000;
302
303
gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
304
305
if (!gebase) {
306
err = -ENOMEM;
307
goto out_uninit_vcpu;
308
}
309
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
310
ALIGN(size, PAGE_SIZE), gebase);
311
312
/*
313
* Check new ebase actually fits in CP0_EBase. The lack of a write gate
314
* limits us to the low 512MB of physical address space. If the memory
315
* we allocate is out of range, just give up now.
316
*/
317
if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
318
kvm_err("CP0_EBase.WG required for guest exception base %p\n",
319
gebase);
320
err = -ENOMEM;
321
goto out_free_gebase;
322
}
323
324
/* Save new ebase */
325
vcpu->arch.guest_ebase = gebase;
326
327
/* Build guest exception vectors dynamically in unmapped memory */
328
handler = gebase + 0x2000;
329
330
/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
331
refill_start = gebase;
332
if (IS_ENABLED(CONFIG_64BIT))
333
refill_start += 0x080;
334
refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
335
336
/* General Exception Entry point */
337
kvm_mips_build_exception(gebase + 0x180, handler);
338
339
/* For vectored interrupts poke the exception code @ all offsets 0-7 */
340
for (i = 0; i < 8; i++) {
341
kvm_debug("L1 Vectored handler @ %p\n",
342
gebase + 0x200 + (i * VECTORSPACING));
343
kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
344
handler);
345
}
346
347
/* General exit handler */
348
p = handler;
349
p = kvm_mips_build_exit(p);
350
351
/* Guest entry routine */
352
vcpu->arch.vcpu_run = p;
353
p = kvm_mips_build_vcpu_run(p);
354
355
/* Dump the generated code */
356
pr_debug("#include <asm/asm.h>\n");
357
pr_debug("#include <asm/regdef.h>\n");
358
pr_debug("\n");
359
dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
360
dump_handler("kvm_tlb_refill", refill_start, refill_end);
361
dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
362
dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
363
364
/* Invalidate the icache for these ranges */
365
flush_icache_range((unsigned long)gebase,
366
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
367
368
/* Init */
369
vcpu->arch.last_sched_cpu = -1;
370
vcpu->arch.last_exec_cpu = -1;
371
372
/* Initial guest state */
373
err = kvm_mips_callbacks->vcpu_setup(vcpu);
374
if (err)
375
goto out_free_gebase;
376
377
return 0;
378
379
out_free_gebase:
380
kfree(gebase);
381
out_uninit_vcpu:
382
kvm_mips_callbacks->vcpu_uninit(vcpu);
383
return err;
384
}
385
386
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
387
{
388
hrtimer_cancel(&vcpu->arch.comparecount_timer);
389
390
kvm_mips_dump_stats(vcpu);
391
392
kvm_mmu_free_memory_caches(vcpu);
393
kfree(vcpu->arch.guest_ebase);
394
395
kvm_mips_callbacks->vcpu_uninit(vcpu);
396
}
397
398
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
399
struct kvm_guest_debug *dbg)
400
{
401
return -ENOIOCTLCMD;
402
}
403
404
/*
405
* Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
406
* the vCPU is running.
407
*
408
* This must be noinstr as instrumentation may make use of RCU, and this is not
409
* safe during the EQS.
410
*/
411
static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
412
{
413
int ret;
414
415
guest_state_enter_irqoff();
416
ret = kvm_mips_callbacks->vcpu_run(vcpu);
417
guest_state_exit_irqoff();
418
419
return ret;
420
}
421
422
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
423
{
424
int r = -EINTR;
425
426
vcpu_load(vcpu);
427
428
kvm_sigset_activate(vcpu);
429
430
if (vcpu->mmio_needed) {
431
if (!vcpu->mmio_is_write)
432
kvm_mips_complete_mmio_load(vcpu);
433
vcpu->mmio_needed = 0;
434
}
435
436
if (!vcpu->wants_to_run)
437
goto out;
438
439
lose_fpu(1);
440
441
local_irq_disable();
442
guest_timing_enter_irqoff();
443
trace_kvm_enter(vcpu);
444
445
/*
446
* Make sure the read of VCPU requests in vcpu_run() callback is not
447
* reordered ahead of the write to vcpu->mode, or we could miss a TLB
448
* flush request while the requester sees the VCPU as outside of guest
449
* mode and not needing an IPI.
450
*/
451
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
452
453
r = kvm_mips_vcpu_enter_exit(vcpu);
454
455
/*
456
* We must ensure that any pending interrupts are taken before
457
* we exit guest timing so that timer ticks are accounted as
458
* guest time. Transiently unmask interrupts so that any
459
* pending interrupts are taken.
460
*
461
* TODO: is there a barrier which ensures that pending interrupts are
462
* recognised? Currently this just hopes that the CPU takes any pending
463
* interrupts between the enable and disable.
464
*/
465
local_irq_enable();
466
local_irq_disable();
467
468
trace_kvm_out(vcpu);
469
guest_timing_exit_irqoff();
470
local_irq_enable();
471
472
out:
473
kvm_sigset_deactivate(vcpu);
474
475
vcpu_put(vcpu);
476
return r;
477
}
478
479
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
480
struct kvm_mips_interrupt *irq)
481
{
482
int intr = (int)irq->irq;
483
struct kvm_vcpu *dvcpu = NULL;
484
485
if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
486
intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
487
intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
488
intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
489
kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
490
(int)intr);
491
492
if (irq->cpu == -1)
493
dvcpu = vcpu;
494
else
495
dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu);
496
497
if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
498
kvm_mips_callbacks->queue_io_int(dvcpu, irq);
499
500
} else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
501
kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
502
} else {
503
kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
504
irq->cpu, irq->irq);
505
return -EINVAL;
506
}
507
508
dvcpu->arch.wait = 0;
509
510
rcuwait_wake_up(&dvcpu->wait);
511
512
return 0;
513
}
514
515
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
516
struct kvm_mp_state *mp_state)
517
{
518
return -ENOIOCTLCMD;
519
}
520
521
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
522
struct kvm_mp_state *mp_state)
523
{
524
return -ENOIOCTLCMD;
525
}
526
527
static u64 kvm_mips_get_one_regs[] = {
528
KVM_REG_MIPS_R0,
529
KVM_REG_MIPS_R1,
530
KVM_REG_MIPS_R2,
531
KVM_REG_MIPS_R3,
532
KVM_REG_MIPS_R4,
533
KVM_REG_MIPS_R5,
534
KVM_REG_MIPS_R6,
535
KVM_REG_MIPS_R7,
536
KVM_REG_MIPS_R8,
537
KVM_REG_MIPS_R9,
538
KVM_REG_MIPS_R10,
539
KVM_REG_MIPS_R11,
540
KVM_REG_MIPS_R12,
541
KVM_REG_MIPS_R13,
542
KVM_REG_MIPS_R14,
543
KVM_REG_MIPS_R15,
544
KVM_REG_MIPS_R16,
545
KVM_REG_MIPS_R17,
546
KVM_REG_MIPS_R18,
547
KVM_REG_MIPS_R19,
548
KVM_REG_MIPS_R20,
549
KVM_REG_MIPS_R21,
550
KVM_REG_MIPS_R22,
551
KVM_REG_MIPS_R23,
552
KVM_REG_MIPS_R24,
553
KVM_REG_MIPS_R25,
554
KVM_REG_MIPS_R26,
555
KVM_REG_MIPS_R27,
556
KVM_REG_MIPS_R28,
557
KVM_REG_MIPS_R29,
558
KVM_REG_MIPS_R30,
559
KVM_REG_MIPS_R31,
560
561
#ifndef CONFIG_CPU_MIPSR6
562
KVM_REG_MIPS_HI,
563
KVM_REG_MIPS_LO,
564
#endif
565
KVM_REG_MIPS_PC,
566
};
567
568
static u64 kvm_mips_get_one_regs_fpu[] = {
569
KVM_REG_MIPS_FCR_IR,
570
KVM_REG_MIPS_FCR_CSR,
571
};
572
573
static u64 kvm_mips_get_one_regs_msa[] = {
574
KVM_REG_MIPS_MSA_IR,
575
KVM_REG_MIPS_MSA_CSR,
576
};
577
578
static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
579
{
580
unsigned long ret;
581
582
ret = ARRAY_SIZE(kvm_mips_get_one_regs);
583
if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
584
ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
585
/* odd doubles */
586
if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
587
ret += 16;
588
}
589
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
590
ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
591
ret += kvm_mips_callbacks->num_regs(vcpu);
592
593
return ret;
594
}
595
596
static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
597
{
598
u64 index;
599
unsigned int i;
600
601
if (copy_to_user(indices, kvm_mips_get_one_regs,
602
sizeof(kvm_mips_get_one_regs)))
603
return -EFAULT;
604
indices += ARRAY_SIZE(kvm_mips_get_one_regs);
605
606
if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
607
if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
608
sizeof(kvm_mips_get_one_regs_fpu)))
609
return -EFAULT;
610
indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
611
612
for (i = 0; i < 32; ++i) {
613
index = KVM_REG_MIPS_FPR_32(i);
614
if (copy_to_user(indices, &index, sizeof(index)))
615
return -EFAULT;
616
++indices;
617
618
/* skip odd doubles if no F64 */
619
if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
620
continue;
621
622
index = KVM_REG_MIPS_FPR_64(i);
623
if (copy_to_user(indices, &index, sizeof(index)))
624
return -EFAULT;
625
++indices;
626
}
627
}
628
629
if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
630
if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
631
sizeof(kvm_mips_get_one_regs_msa)))
632
return -EFAULT;
633
indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
634
635
for (i = 0; i < 32; ++i) {
636
index = KVM_REG_MIPS_VEC_128(i);
637
if (copy_to_user(indices, &index, sizeof(index)))
638
return -EFAULT;
639
++indices;
640
}
641
}
642
643
return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
644
}
645
646
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
647
const struct kvm_one_reg *reg)
648
{
649
struct mips_coproc *cop0 = &vcpu->arch.cop0;
650
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
651
int ret;
652
s64 v;
653
s64 vs[2];
654
unsigned int idx;
655
656
switch (reg->id) {
657
/* General purpose registers */
658
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
659
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
660
break;
661
#ifndef CONFIG_CPU_MIPSR6
662
case KVM_REG_MIPS_HI:
663
v = (long)vcpu->arch.hi;
664
break;
665
case KVM_REG_MIPS_LO:
666
v = (long)vcpu->arch.lo;
667
break;
668
#endif
669
case KVM_REG_MIPS_PC:
670
v = (long)vcpu->arch.pc;
671
break;
672
673
/* Floating point registers */
674
case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
675
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
676
return -EINVAL;
677
idx = reg->id - KVM_REG_MIPS_FPR_32(0);
678
/* Odd singles in top of even double when FR=0 */
679
if (kvm_read_c0_guest_status(cop0) & ST0_FR)
680
v = get_fpr32(&fpu->fpr[idx], 0);
681
else
682
v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
683
break;
684
case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
685
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
686
return -EINVAL;
687
idx = reg->id - KVM_REG_MIPS_FPR_64(0);
688
/* Can't access odd doubles in FR=0 mode */
689
if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
690
return -EINVAL;
691
v = get_fpr64(&fpu->fpr[idx], 0);
692
break;
693
case KVM_REG_MIPS_FCR_IR:
694
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
695
return -EINVAL;
696
v = boot_cpu_data.fpu_id;
697
break;
698
case KVM_REG_MIPS_FCR_CSR:
699
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
700
return -EINVAL;
701
v = fpu->fcr31;
702
break;
703
704
/* MIPS SIMD Architecture (MSA) registers */
705
case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
706
if (!kvm_mips_guest_has_msa(&vcpu->arch))
707
return -EINVAL;
708
/* Can't access MSA registers in FR=0 mode */
709
if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
710
return -EINVAL;
711
idx = reg->id - KVM_REG_MIPS_VEC_128(0);
712
#ifdef CONFIG_CPU_LITTLE_ENDIAN
713
/* least significant byte first */
714
vs[0] = get_fpr64(&fpu->fpr[idx], 0);
715
vs[1] = get_fpr64(&fpu->fpr[idx], 1);
716
#else
717
/* most significant byte first */
718
vs[0] = get_fpr64(&fpu->fpr[idx], 1);
719
vs[1] = get_fpr64(&fpu->fpr[idx], 0);
720
#endif
721
break;
722
case KVM_REG_MIPS_MSA_IR:
723
if (!kvm_mips_guest_has_msa(&vcpu->arch))
724
return -EINVAL;
725
v = boot_cpu_data.msa_id;
726
break;
727
case KVM_REG_MIPS_MSA_CSR:
728
if (!kvm_mips_guest_has_msa(&vcpu->arch))
729
return -EINVAL;
730
v = fpu->msacsr;
731
break;
732
733
/* registers to be handled specially */
734
default:
735
ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
736
if (ret)
737
return ret;
738
break;
739
}
740
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
741
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
742
743
return put_user(v, uaddr64);
744
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
745
u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
746
u32 v32 = (u32)v;
747
748
return put_user(v32, uaddr32);
749
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
750
void __user *uaddr = (void __user *)(long)reg->addr;
751
752
return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
753
} else {
754
return -EINVAL;
755
}
756
}
757
758
static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
759
const struct kvm_one_reg *reg)
760
{
761
struct mips_coproc *cop0 = &vcpu->arch.cop0;
762
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
763
s64 v;
764
s64 vs[2];
765
unsigned int idx;
766
767
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
768
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
769
770
if (get_user(v, uaddr64) != 0)
771
return -EFAULT;
772
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
773
u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
774
s32 v32;
775
776
if (get_user(v32, uaddr32) != 0)
777
return -EFAULT;
778
v = (s64)v32;
779
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
780
void __user *uaddr = (void __user *)(long)reg->addr;
781
782
return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
783
} else {
784
return -EINVAL;
785
}
786
787
switch (reg->id) {
788
/* General purpose registers */
789
case KVM_REG_MIPS_R0:
790
/* Silently ignore requests to set $0 */
791
break;
792
case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
793
vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
794
break;
795
#ifndef CONFIG_CPU_MIPSR6
796
case KVM_REG_MIPS_HI:
797
vcpu->arch.hi = v;
798
break;
799
case KVM_REG_MIPS_LO:
800
vcpu->arch.lo = v;
801
break;
802
#endif
803
case KVM_REG_MIPS_PC:
804
vcpu->arch.pc = v;
805
break;
806
807
/* Floating point registers */
808
case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
809
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
810
return -EINVAL;
811
idx = reg->id - KVM_REG_MIPS_FPR_32(0);
812
/* Odd singles in top of even double when FR=0 */
813
if (kvm_read_c0_guest_status(cop0) & ST0_FR)
814
set_fpr32(&fpu->fpr[idx], 0, v);
815
else
816
set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
817
break;
818
case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
819
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
820
return -EINVAL;
821
idx = reg->id - KVM_REG_MIPS_FPR_64(0);
822
/* Can't access odd doubles in FR=0 mode */
823
if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
824
return -EINVAL;
825
set_fpr64(&fpu->fpr[idx], 0, v);
826
break;
827
case KVM_REG_MIPS_FCR_IR:
828
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
829
return -EINVAL;
830
/* Read-only */
831
break;
832
case KVM_REG_MIPS_FCR_CSR:
833
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
834
return -EINVAL;
835
fpu->fcr31 = v;
836
break;
837
838
/* MIPS SIMD Architecture (MSA) registers */
839
case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
840
if (!kvm_mips_guest_has_msa(&vcpu->arch))
841
return -EINVAL;
842
idx = reg->id - KVM_REG_MIPS_VEC_128(0);
843
#ifdef CONFIG_CPU_LITTLE_ENDIAN
844
/* least significant byte first */
845
set_fpr64(&fpu->fpr[idx], 0, vs[0]);
846
set_fpr64(&fpu->fpr[idx], 1, vs[1]);
847
#else
848
/* most significant byte first */
849
set_fpr64(&fpu->fpr[idx], 1, vs[0]);
850
set_fpr64(&fpu->fpr[idx], 0, vs[1]);
851
#endif
852
break;
853
case KVM_REG_MIPS_MSA_IR:
854
if (!kvm_mips_guest_has_msa(&vcpu->arch))
855
return -EINVAL;
856
/* Read-only */
857
break;
858
case KVM_REG_MIPS_MSA_CSR:
859
if (!kvm_mips_guest_has_msa(&vcpu->arch))
860
return -EINVAL;
861
fpu->msacsr = v;
862
break;
863
864
/* registers to be handled specially */
865
default:
866
return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
867
}
868
return 0;
869
}
870
871
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
872
struct kvm_enable_cap *cap)
873
{
874
int r = 0;
875
876
if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
877
return -EINVAL;
878
if (cap->flags)
879
return -EINVAL;
880
if (cap->args[0])
881
return -EINVAL;
882
883
switch (cap->cap) {
884
case KVM_CAP_MIPS_FPU:
885
vcpu->arch.fpu_enabled = true;
886
break;
887
case KVM_CAP_MIPS_MSA:
888
vcpu->arch.msa_enabled = true;
889
break;
890
default:
891
r = -EINVAL;
892
break;
893
}
894
895
return r;
896
}
897
898
long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
899
unsigned long arg)
900
{
901
struct kvm_vcpu *vcpu = filp->private_data;
902
void __user *argp = (void __user *)arg;
903
904
if (ioctl == KVM_INTERRUPT) {
905
struct kvm_mips_interrupt irq;
906
907
if (copy_from_user(&irq, argp, sizeof(irq)))
908
return -EFAULT;
909
kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
910
irq.irq);
911
912
return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
913
}
914
915
return -ENOIOCTLCMD;
916
}
917
918
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
919
unsigned long arg)
920
{
921
struct kvm_vcpu *vcpu = filp->private_data;
922
void __user *argp = (void __user *)arg;
923
long r;
924
925
vcpu_load(vcpu);
926
927
switch (ioctl) {
928
case KVM_SET_ONE_REG:
929
case KVM_GET_ONE_REG: {
930
struct kvm_one_reg reg;
931
932
r = -EFAULT;
933
if (copy_from_user(&reg, argp, sizeof(reg)))
934
break;
935
if (ioctl == KVM_SET_ONE_REG)
936
r = kvm_mips_set_reg(vcpu, &reg);
937
else
938
r = kvm_mips_get_reg(vcpu, &reg);
939
break;
940
}
941
case KVM_GET_REG_LIST: {
942
struct kvm_reg_list __user *user_list = argp;
943
struct kvm_reg_list reg_list;
944
unsigned n;
945
946
r = -EFAULT;
947
if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
948
break;
949
n = reg_list.n;
950
reg_list.n = kvm_mips_num_regs(vcpu);
951
if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
952
break;
953
r = -E2BIG;
954
if (n < reg_list.n)
955
break;
956
r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
957
break;
958
}
959
case KVM_ENABLE_CAP: {
960
struct kvm_enable_cap cap;
961
962
r = -EFAULT;
963
if (copy_from_user(&cap, argp, sizeof(cap)))
964
break;
965
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
966
break;
967
}
968
default:
969
r = -ENOIOCTLCMD;
970
}
971
972
vcpu_put(vcpu);
973
return r;
974
}
975
976
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
977
{
978
979
}
980
981
int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
982
{
983
kvm_mips_callbacks->prepare_flush_shadow(kvm);
984
return 1;
985
}
986
987
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
988
{
989
int r;
990
991
switch (ioctl) {
992
default:
993
r = -ENOIOCTLCMD;
994
}
995
996
return r;
997
}
998
999
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1000
struct kvm_sregs *sregs)
1001
{
1002
return -ENOIOCTLCMD;
1003
}
1004
1005
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1006
struct kvm_sregs *sregs)
1007
{
1008
return -ENOIOCTLCMD;
1009
}
1010
1011
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1012
{
1013
}
1014
1015
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1016
{
1017
return -ENOIOCTLCMD;
1018
}
1019
1020
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1021
{
1022
return -ENOIOCTLCMD;
1023
}
1024
1025
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1026
{
1027
return VM_FAULT_SIGBUS;
1028
}
1029
1030
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1031
{
1032
int r;
1033
1034
switch (ext) {
1035
case KVM_CAP_ONE_REG:
1036
case KVM_CAP_ENABLE_CAP:
1037
case KVM_CAP_READONLY_MEM:
1038
case KVM_CAP_SYNC_MMU:
1039
case KVM_CAP_IMMEDIATE_EXIT:
1040
r = 1;
1041
break;
1042
case KVM_CAP_NR_VCPUS:
1043
r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
1044
break;
1045
case KVM_CAP_MAX_VCPUS:
1046
r = KVM_MAX_VCPUS;
1047
break;
1048
case KVM_CAP_MAX_VCPU_ID:
1049
r = KVM_MAX_VCPU_IDS;
1050
break;
1051
case KVM_CAP_MIPS_FPU:
1052
/* We don't handle systems with inconsistent cpu_has_fpu */
1053
r = !!raw_cpu_has_fpu;
1054
break;
1055
case KVM_CAP_MIPS_MSA:
1056
/*
1057
* We don't support MSA vector partitioning yet:
1058
* 1) It would require explicit support which can't be tested
1059
* yet due to lack of support in current hardware.
1060
* 2) It extends the state that would need to be saved/restored
1061
* by e.g. QEMU for migration.
1062
*
1063
* When vector partitioning hardware becomes available, support
1064
* could be added by requiring a flag when enabling
1065
* KVM_CAP_MIPS_MSA capability to indicate that userland knows
1066
* to save/restore the appropriate extra state.
1067
*/
1068
r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1069
break;
1070
default:
1071
r = kvm_mips_callbacks->check_extension(kvm, ext);
1072
break;
1073
}
1074
return r;
1075
}
1076
1077
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1078
{
1079
return kvm_mips_pending_timer(vcpu) ||
1080
kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI;
1081
}
1082
1083
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1084
{
1085
int i;
1086
struct mips_coproc *cop0;
1087
1088
if (!vcpu)
1089
return -1;
1090
1091
kvm_debug("VCPU Register Dump:\n");
1092
kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1093
kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1094
1095
for (i = 0; i < 32; i += 4) {
1096
kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1097
vcpu->arch.gprs[i],
1098
vcpu->arch.gprs[i + 1],
1099
vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1100
}
1101
kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1102
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1103
1104
cop0 = &vcpu->arch.cop0;
1105
kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1106
kvm_read_c0_guest_status(cop0),
1107
kvm_read_c0_guest_cause(cop0));
1108
1109
kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1110
1111
return 0;
1112
}
1113
1114
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1115
{
1116
int i;
1117
1118
vcpu_load(vcpu);
1119
1120
for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1121
vcpu->arch.gprs[i] = regs->gpr[i];
1122
vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1123
vcpu->arch.hi = regs->hi;
1124
vcpu->arch.lo = regs->lo;
1125
vcpu->arch.pc = regs->pc;
1126
1127
vcpu_put(vcpu);
1128
return 0;
1129
}
1130
1131
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1132
{
1133
int i;
1134
1135
vcpu_load(vcpu);
1136
1137
for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1138
regs->gpr[i] = vcpu->arch.gprs[i];
1139
1140
regs->hi = vcpu->arch.hi;
1141
regs->lo = vcpu->arch.lo;
1142
regs->pc = vcpu->arch.pc;
1143
1144
vcpu_put(vcpu);
1145
return 0;
1146
}
1147
1148
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1149
struct kvm_translation *tr)
1150
{
1151
return 0;
1152
}
1153
1154
static void kvm_mips_set_c0_status(void)
1155
{
1156
u32 status = read_c0_status();
1157
1158
if (cpu_has_dsp)
1159
status |= (ST0_MX);
1160
1161
write_c0_status(status);
1162
ehb();
1163
}
1164
1165
/*
1166
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1167
*/
1168
static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
1169
{
1170
struct kvm_run *run = vcpu->run;
1171
u32 cause = vcpu->arch.host_cp0_cause;
1172
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1173
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1174
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1175
enum emulation_result er = EMULATE_DONE;
1176
u32 inst;
1177
int ret = RESUME_GUEST;
1178
1179
vcpu->mode = OUTSIDE_GUEST_MODE;
1180
1181
/* Set a default exit reason */
1182
run->exit_reason = KVM_EXIT_UNKNOWN;
1183
run->ready_for_interrupt_injection = 1;
1184
1185
/*
1186
* Set the appropriate status bits based on host CPU features,
1187
* before we hit the scheduler
1188
*/
1189
kvm_mips_set_c0_status();
1190
1191
local_irq_enable();
1192
1193
kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1194
cause, opc, run, vcpu);
1195
trace_kvm_exit(vcpu, exccode);
1196
1197
switch (exccode) {
1198
case EXCCODE_INT:
1199
kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1200
1201
++vcpu->stat.int_exits;
1202
1203
if (need_resched())
1204
cond_resched();
1205
1206
ret = RESUME_GUEST;
1207
break;
1208
1209
case EXCCODE_CPU:
1210
kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1211
1212
++vcpu->stat.cop_unusable_exits;
1213
ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1214
/* XXXKYMA: Might need to return to user space */
1215
if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1216
ret = RESUME_HOST;
1217
break;
1218
1219
case EXCCODE_MOD:
1220
++vcpu->stat.tlbmod_exits;
1221
ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1222
break;
1223
1224
case EXCCODE_TLBS:
1225
kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1226
cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc,
1227
badvaddr);
1228
1229
++vcpu->stat.tlbmiss_st_exits;
1230
ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1231
break;
1232
1233
case EXCCODE_TLBL:
1234
kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1235
cause, opc, badvaddr);
1236
1237
++vcpu->stat.tlbmiss_ld_exits;
1238
ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1239
break;
1240
1241
case EXCCODE_ADES:
1242
++vcpu->stat.addrerr_st_exits;
1243
ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1244
break;
1245
1246
case EXCCODE_ADEL:
1247
++vcpu->stat.addrerr_ld_exits;
1248
ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1249
break;
1250
1251
case EXCCODE_SYS:
1252
++vcpu->stat.syscall_exits;
1253
ret = kvm_mips_callbacks->handle_syscall(vcpu);
1254
break;
1255
1256
case EXCCODE_RI:
1257
++vcpu->stat.resvd_inst_exits;
1258
ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1259
break;
1260
1261
case EXCCODE_BP:
1262
++vcpu->stat.break_inst_exits;
1263
ret = kvm_mips_callbacks->handle_break(vcpu);
1264
break;
1265
1266
case EXCCODE_TR:
1267
++vcpu->stat.trap_inst_exits;
1268
ret = kvm_mips_callbacks->handle_trap(vcpu);
1269
break;
1270
1271
case EXCCODE_MSAFPE:
1272
++vcpu->stat.msa_fpe_exits;
1273
ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1274
break;
1275
1276
case EXCCODE_FPE:
1277
++vcpu->stat.fpe_exits;
1278
ret = kvm_mips_callbacks->handle_fpe(vcpu);
1279
break;
1280
1281
case EXCCODE_MSADIS:
1282
++vcpu->stat.msa_disabled_exits;
1283
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1284
break;
1285
1286
case EXCCODE_GE:
1287
/* defer exit accounting to handler */
1288
ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1289
break;
1290
1291
default:
1292
if (cause & CAUSEF_BD)
1293
opc += 1;
1294
inst = 0;
1295
kvm_get_badinstr(opc, vcpu, &inst);
1296
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1297
exccode, opc, inst, badvaddr,
1298
kvm_read_c0_guest_status(&vcpu->arch.cop0));
1299
kvm_arch_vcpu_dump_regs(vcpu);
1300
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1301
ret = RESUME_HOST;
1302
break;
1303
1304
}
1305
1306
local_irq_disable();
1307
1308
if (ret == RESUME_GUEST)
1309
kvm_vz_acquire_htimer(vcpu);
1310
1311
if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1312
kvm_mips_deliver_interrupts(vcpu, cause);
1313
1314
if (!(ret & RESUME_HOST)) {
1315
/* Only check for signals if not already exiting to userspace */
1316
if (signal_pending(current)) {
1317
run->exit_reason = KVM_EXIT_INTR;
1318
ret = (-EINTR << 2) | RESUME_HOST;
1319
++vcpu->stat.signal_exits;
1320
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1321
}
1322
}
1323
1324
if (ret == RESUME_GUEST) {
1325
trace_kvm_reenter(vcpu);
1326
1327
/*
1328
* Make sure the read of VCPU requests in vcpu_reenter()
1329
* callback is not reordered ahead of the write to vcpu->mode,
1330
* or we could miss a TLB flush request while the requester sees
1331
* the VCPU as outside of guest mode and not needing an IPI.
1332
*/
1333
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1334
1335
kvm_mips_callbacks->vcpu_reenter(vcpu);
1336
1337
/*
1338
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1339
* is live), restore FCR31 / MSACSR.
1340
*
1341
* This should be before returning to the guest exception
1342
* vector, as it may well cause an [MSA] FP exception if there
1343
* are pending exception bits unmasked. (see
1344
* kvm_mips_csr_die_notifier() for how that is handled).
1345
*/
1346
if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1347
read_c0_status() & ST0_CU1)
1348
__kvm_restore_fcsr(&vcpu->arch);
1349
1350
if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1351
read_c0_config5() & MIPS_CONF5_MSAEN)
1352
__kvm_restore_msacsr(&vcpu->arch);
1353
}
1354
return ret;
1355
}
1356
1357
int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
1358
{
1359
int ret;
1360
1361
guest_state_exit_irqoff();
1362
ret = __kvm_mips_handle_exit(vcpu);
1363
guest_state_enter_irqoff();
1364
1365
return ret;
1366
}
1367
1368
/* Enable FPU for guest and restore context */
1369
void kvm_own_fpu(struct kvm_vcpu *vcpu)
1370
{
1371
struct mips_coproc *cop0 = &vcpu->arch.cop0;
1372
unsigned int sr, cfg5;
1373
1374
preempt_disable();
1375
1376
sr = kvm_read_c0_guest_status(cop0);
1377
1378
/*
1379
* If MSA state is already live, it is undefined how it interacts with
1380
* FR=0 FPU state, and we don't want to hit reserved instruction
1381
* exceptions trying to save the MSA state later when CU=1 && FR=1, so
1382
* play it safe and save it first.
1383
*/
1384
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1385
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1386
kvm_lose_fpu(vcpu);
1387
1388
/*
1389
* Enable FPU for guest
1390
* We set FR and FRE according to guest context
1391
*/
1392
change_c0_status(ST0_CU1 | ST0_FR, sr);
1393
if (cpu_has_fre) {
1394
cfg5 = kvm_read_c0_guest_config5(cop0);
1395
change_c0_config5(MIPS_CONF5_FRE, cfg5);
1396
}
1397
enable_fpu_hazard();
1398
1399
/* If guest FPU state not active, restore it now */
1400
if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1401
__kvm_restore_fpu(&vcpu->arch);
1402
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1403
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1404
} else {
1405
trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1406
}
1407
1408
preempt_enable();
1409
}
1410
1411
#ifdef CONFIG_CPU_HAS_MSA
1412
/* Enable MSA for guest and restore context */
1413
void kvm_own_msa(struct kvm_vcpu *vcpu)
1414
{
1415
struct mips_coproc *cop0 = &vcpu->arch.cop0;
1416
unsigned int sr, cfg5;
1417
1418
preempt_disable();
1419
1420
/*
1421
* Enable FPU if enabled in guest, since we're restoring FPU context
1422
* anyway. We set FR and FRE according to guest context.
1423
*/
1424
if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1425
sr = kvm_read_c0_guest_status(cop0);
1426
1427
/*
1428
* If FR=0 FPU state is already live, it is undefined how it
1429
* interacts with MSA state, so play it safe and save it first.
1430
*/
1431
if (!(sr & ST0_FR) &&
1432
(vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1433
KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1434
kvm_lose_fpu(vcpu);
1435
1436
change_c0_status(ST0_CU1 | ST0_FR, sr);
1437
if (sr & ST0_CU1 && cpu_has_fre) {
1438
cfg5 = kvm_read_c0_guest_config5(cop0);
1439
change_c0_config5(MIPS_CONF5_FRE, cfg5);
1440
}
1441
}
1442
1443
/* Enable MSA for guest */
1444
set_c0_config5(MIPS_CONF5_MSAEN);
1445
enable_fpu_hazard();
1446
1447
switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1448
case KVM_MIPS_AUX_FPU:
1449
/*
1450
* Guest FPU state already loaded, only restore upper MSA state
1451
*/
1452
__kvm_restore_msa_upper(&vcpu->arch);
1453
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1454
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1455
break;
1456
case 0:
1457
/* Neither FPU or MSA already active, restore full MSA state */
1458
__kvm_restore_msa(&vcpu->arch);
1459
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1460
if (kvm_mips_guest_has_fpu(&vcpu->arch))
1461
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1462
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1463
KVM_TRACE_AUX_FPU_MSA);
1464
break;
1465
default:
1466
trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1467
break;
1468
}
1469
1470
preempt_enable();
1471
}
1472
#endif
1473
1474
/* Drop FPU & MSA without saving it */
1475
void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1476
{
1477
preempt_disable();
1478
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1479
disable_msa();
1480
trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1481
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1482
}
1483
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1484
clear_c0_status(ST0_CU1 | ST0_FR);
1485
trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1486
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1487
}
1488
preempt_enable();
1489
}
1490
1491
/* Save and disable FPU & MSA */
1492
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1493
{
1494
/*
1495
* With T&E, FPU & MSA get disabled in root context (hardware) when it
1496
* is disabled in guest context (software), but the register state in
1497
* the hardware may still be in use.
1498
* This is why we explicitly re-enable the hardware before saving.
1499
*/
1500
1501
preempt_disable();
1502
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1503
__kvm_save_msa(&vcpu->arch);
1504
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1505
1506
/* Disable MSA & FPU */
1507
disable_msa();
1508
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1509
clear_c0_status(ST0_CU1 | ST0_FR);
1510
disable_fpu_hazard();
1511
}
1512
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1513
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1514
__kvm_save_fpu(&vcpu->arch);
1515
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1516
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1517
1518
/* Disable FPU */
1519
clear_c0_status(ST0_CU1 | ST0_FR);
1520
disable_fpu_hazard();
1521
}
1522
preempt_enable();
1523
}
1524
1525
/*
1526
* Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1527
* used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1528
* exception if cause bits are set in the value being written.
1529
*/
1530
static int kvm_mips_csr_die_notify(struct notifier_block *self,
1531
unsigned long cmd, void *ptr)
1532
{
1533
struct die_args *args = (struct die_args *)ptr;
1534
struct pt_regs *regs = args->regs;
1535
unsigned long pc;
1536
1537
/* Only interested in FPE and MSAFPE */
1538
if (cmd != DIE_FP && cmd != DIE_MSAFP)
1539
return NOTIFY_DONE;
1540
1541
/* Return immediately if guest context isn't active */
1542
if (!(current->flags & PF_VCPU))
1543
return NOTIFY_DONE;
1544
1545
/* Should never get here from user mode */
1546
BUG_ON(user_mode(regs));
1547
1548
pc = instruction_pointer(regs);
1549
switch (cmd) {
1550
case DIE_FP:
1551
/* match 2nd instruction in __kvm_restore_fcsr */
1552
if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1553
return NOTIFY_DONE;
1554
break;
1555
case DIE_MSAFP:
1556
/* match 2nd/3rd instruction in __kvm_restore_msacsr */
1557
if (!cpu_has_msa ||
1558
pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1559
pc > (unsigned long)&__kvm_restore_msacsr + 8)
1560
return NOTIFY_DONE;
1561
break;
1562
}
1563
1564
/* Move PC forward a little and continue executing */
1565
instruction_pointer(regs) += 4;
1566
1567
return NOTIFY_STOP;
1568
}
1569
1570
static struct notifier_block kvm_mips_csr_die_notifier = {
1571
.notifier_call = kvm_mips_csr_die_notify,
1572
};
1573
1574
static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
1575
[MIPS_EXC_INT_TIMER] = C_IRQ5,
1576
[MIPS_EXC_INT_IO_1] = C_IRQ0,
1577
[MIPS_EXC_INT_IPI_1] = C_IRQ1,
1578
[MIPS_EXC_INT_IPI_2] = C_IRQ2,
1579
};
1580
1581
static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
1582
[MIPS_EXC_INT_TIMER] = C_IRQ5,
1583
[MIPS_EXC_INT_IO_1] = C_IRQ0,
1584
[MIPS_EXC_INT_IO_2] = C_IRQ1,
1585
[MIPS_EXC_INT_IPI_1] = C_IRQ4,
1586
};
1587
1588
u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
1589
1590
u32 kvm_irq_to_priority(u32 irq)
1591
{
1592
int i;
1593
1594
for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
1595
if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
1596
return i;
1597
}
1598
1599
return MIPS_EXC_MAX;
1600
}
1601
1602
static int __init kvm_mips_init(void)
1603
{
1604
int ret;
1605
1606
if (cpu_has_mmid) {
1607
pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1608
return -EOPNOTSUPP;
1609
}
1610
1611
ret = kvm_mips_entry_setup();
1612
if (ret)
1613
return ret;
1614
1615
ret = kvm_mips_emulation_init();
1616
if (ret)
1617
return ret;
1618
1619
1620
if (boot_cpu_type() == CPU_LOONGSON64)
1621
kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
1622
1623
register_die_notifier(&kvm_mips_csr_die_notifier);
1624
1625
ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1626
if (ret) {
1627
unregister_die_notifier(&kvm_mips_csr_die_notifier);
1628
return ret;
1629
}
1630
return 0;
1631
}
1632
1633
static void __exit kvm_mips_exit(void)
1634
{
1635
kvm_exit();
1636
1637
unregister_die_notifier(&kvm_mips_csr_die_notifier);
1638
}
1639
1640
module_init(kvm_mips_init);
1641
module_exit(kvm_mips_exit);
1642
1643
EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
1644
1645