Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/kvm/vcpu.c
26436 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4
*/
5
6
#include <linux/kvm_host.h>
7
#include <linux/entry-kvm.h>
8
#include <asm/fpu.h>
9
#include <asm/lbt.h>
10
#include <asm/loongarch.h>
11
#include <asm/setup.h>
12
#include <asm/time.h>
13
14
#define CREATE_TRACE_POINTS
15
#include "trace.h"
16
17
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
18
KVM_GENERIC_VCPU_STATS(),
19
STATS_DESC_COUNTER(VCPU, int_exits),
20
STATS_DESC_COUNTER(VCPU, idle_exits),
21
STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22
STATS_DESC_COUNTER(VCPU, signal_exits),
23
STATS_DESC_COUNTER(VCPU, hypercall_exits),
24
STATS_DESC_COUNTER(VCPU, ipi_read_exits),
25
STATS_DESC_COUNTER(VCPU, ipi_write_exits),
26
STATS_DESC_COUNTER(VCPU, eiointc_read_exits),
27
STATS_DESC_COUNTER(VCPU, eiointc_write_exits),
28
STATS_DESC_COUNTER(VCPU, pch_pic_read_exits),
29
STATS_DESC_COUNTER(VCPU, pch_pic_write_exits)
30
};
31
32
const struct kvm_stats_header kvm_vcpu_stats_header = {
33
.name_size = KVM_STATS_NAME_SIZE,
34
.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35
.id_offset = sizeof(struct kvm_stats_header),
36
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38
sizeof(kvm_vcpu_stats_desc),
39
};
40
41
static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
42
{
43
struct kvm_context *context;
44
45
context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
46
context->perf_cntr[0] = read_csr_perfcntr0();
47
context->perf_cntr[1] = read_csr_perfcntr1();
48
context->perf_cntr[2] = read_csr_perfcntr2();
49
context->perf_cntr[3] = read_csr_perfcntr3();
50
context->perf_ctrl[0] = write_csr_perfctrl0(0);
51
context->perf_ctrl[1] = write_csr_perfctrl1(0);
52
context->perf_ctrl[2] = write_csr_perfctrl2(0);
53
context->perf_ctrl[3] = write_csr_perfctrl3(0);
54
}
55
56
static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
57
{
58
struct kvm_context *context;
59
60
context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
61
write_csr_perfcntr0(context->perf_cntr[0]);
62
write_csr_perfcntr1(context->perf_cntr[1]);
63
write_csr_perfcntr2(context->perf_cntr[2]);
64
write_csr_perfcntr3(context->perf_cntr[3]);
65
write_csr_perfctrl0(context->perf_ctrl[0]);
66
write_csr_perfctrl1(context->perf_ctrl[1]);
67
write_csr_perfctrl2(context->perf_ctrl[2]);
68
write_csr_perfctrl3(context->perf_ctrl[3]);
69
}
70
71
72
static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
73
{
74
struct loongarch_csrs *csr = vcpu->arch.csr;
75
76
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
77
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
78
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
79
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
80
kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
81
kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
82
kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
83
kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
84
}
85
86
static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
87
{
88
struct loongarch_csrs *csr = vcpu->arch.csr;
89
90
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
91
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
92
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
93
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
94
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
95
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
96
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
97
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
98
}
99
100
static int kvm_own_pmu(struct kvm_vcpu *vcpu)
101
{
102
unsigned long val;
103
104
if (!kvm_guest_has_pmu(&vcpu->arch))
105
return -EINVAL;
106
107
kvm_save_host_pmu(vcpu);
108
109
/* Set PM0-PM(num) to guest */
110
val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
111
val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
112
write_csr_gcfg(val);
113
114
kvm_restore_guest_pmu(vcpu);
115
116
return 0;
117
}
118
119
static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
120
{
121
unsigned long val;
122
struct loongarch_csrs *csr = vcpu->arch.csr;
123
124
if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
125
return;
126
127
kvm_save_guest_pmu(vcpu);
128
129
/* Disable pmu access from guest */
130
write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
131
132
/*
133
* Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
134
* exiting the guest, so that the next time trap into the guest.
135
* We don't need to deal with PMU CSRs contexts.
136
*/
137
val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
138
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
139
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
140
val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
141
if (!(val & KVM_PMU_EVENT_ENABLED))
142
vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
143
144
kvm_restore_host_pmu(vcpu);
145
}
146
147
static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
148
{
149
if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
150
kvm_make_request(KVM_REQ_PMU, vcpu);
151
}
152
153
static void kvm_check_pmu(struct kvm_vcpu *vcpu)
154
{
155
if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
156
kvm_own_pmu(vcpu);
157
vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
158
}
159
}
160
161
static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
162
{
163
u32 version;
164
u64 steal;
165
gpa_t gpa;
166
struct kvm_memslots *slots;
167
struct kvm_steal_time __user *st;
168
struct gfn_to_hva_cache *ghc;
169
170
ghc = &vcpu->arch.st.cache;
171
gpa = vcpu->arch.st.guest_addr;
172
if (!(gpa & KVM_STEAL_PHYS_VALID))
173
return;
174
175
gpa &= KVM_STEAL_PHYS_MASK;
176
slots = kvm_memslots(vcpu->kvm);
177
if (slots->generation != ghc->generation || gpa != ghc->gpa) {
178
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
179
ghc->gpa = INVALID_GPA;
180
return;
181
}
182
}
183
184
st = (struct kvm_steal_time __user *)ghc->hva;
185
unsafe_get_user(version, &st->version, out);
186
if (version & 1)
187
version += 1; /* first time write, random junk */
188
189
version += 1;
190
unsafe_put_user(version, &st->version, out);
191
smp_wmb();
192
193
unsafe_get_user(steal, &st->steal, out);
194
steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
195
vcpu->arch.st.last_steal = current->sched_info.run_delay;
196
unsafe_put_user(steal, &st->steal, out);
197
198
smp_wmb();
199
version += 1;
200
unsafe_put_user(version, &st->version, out);
201
out:
202
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
203
}
204
205
/*
206
* kvm_check_requests - check and handle pending vCPU requests
207
*
208
* Return: RESUME_GUEST if we should enter the guest
209
* RESUME_HOST if we should exit to userspace
210
*/
211
static int kvm_check_requests(struct kvm_vcpu *vcpu)
212
{
213
if (!kvm_request_pending(vcpu))
214
return RESUME_GUEST;
215
216
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
217
vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
218
219
if (kvm_dirty_ring_check_request(vcpu))
220
return RESUME_HOST;
221
222
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
223
kvm_update_stolen_time(vcpu);
224
225
return RESUME_GUEST;
226
}
227
228
static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
229
{
230
lockdep_assert_irqs_disabled();
231
if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
232
if (vcpu->arch.flush_gpa != INVALID_GPA) {
233
kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
234
vcpu->arch.flush_gpa = INVALID_GPA;
235
}
236
}
237
238
/*
239
* Check and handle pending signal and vCPU requests etc
240
* Run with irq enabled and preempt enabled
241
*
242
* Return: RESUME_GUEST if we should enter the guest
243
* RESUME_HOST if we should exit to userspace
244
* < 0 if we should exit to userspace, where the return value
245
* indicates an error
246
*/
247
static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
248
{
249
int idx, ret;
250
251
/*
252
* Check conditions before entering the guest
253
*/
254
ret = xfer_to_guest_mode_handle_work(vcpu);
255
if (ret < 0)
256
return ret;
257
258
idx = srcu_read_lock(&vcpu->kvm->srcu);
259
ret = kvm_check_requests(vcpu);
260
srcu_read_unlock(&vcpu->kvm->srcu, idx);
261
262
return ret;
263
}
264
265
/*
266
* Called with irq enabled
267
*
268
* Return: RESUME_GUEST if we should enter the guest, and irq disabled
269
* Others if we should exit to userspace
270
*/
271
static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
272
{
273
int ret;
274
275
do {
276
ret = kvm_enter_guest_check(vcpu);
277
if (ret != RESUME_GUEST)
278
break;
279
280
/*
281
* Handle vcpu timer, interrupts, check requests and
282
* check vmid before vcpu enter guest
283
*/
284
local_irq_disable();
285
kvm_deliver_intr(vcpu);
286
kvm_deliver_exception(vcpu);
287
/* Make sure the vcpu mode has been written */
288
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
289
kvm_check_vpid(vcpu);
290
kvm_check_pmu(vcpu);
291
292
/*
293
* Called after function kvm_check_vpid()
294
* Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
295
* and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
296
*/
297
kvm_late_check_requests(vcpu);
298
vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
299
/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
300
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
301
302
if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
303
kvm_lose_pmu(vcpu);
304
/* make sure the vcpu mode has been written */
305
smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
306
local_irq_enable();
307
ret = -EAGAIN;
308
}
309
} while (ret != RESUME_GUEST);
310
311
return ret;
312
}
313
314
/*
315
* Return 1 for resume guest and "<= 0" for resume host.
316
*/
317
static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
318
{
319
int ret = RESUME_GUEST;
320
unsigned long estat = vcpu->arch.host_estat;
321
u32 intr = estat & CSR_ESTAT_IS;
322
u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
323
324
vcpu->mode = OUTSIDE_GUEST_MODE;
325
326
/* Set a default exit reason */
327
run->exit_reason = KVM_EXIT_UNKNOWN;
328
329
kvm_lose_pmu(vcpu);
330
331
guest_timing_exit_irqoff();
332
guest_state_exit_irqoff();
333
local_irq_enable();
334
335
trace_kvm_exit(vcpu, ecode);
336
if (ecode) {
337
ret = kvm_handle_fault(vcpu, ecode);
338
} else {
339
WARN(!intr, "vm exiting with suspicious irq\n");
340
++vcpu->stat.int_exits;
341
}
342
343
if (ret == RESUME_GUEST)
344
ret = kvm_pre_enter_guest(vcpu);
345
346
if (ret != RESUME_GUEST) {
347
local_irq_disable();
348
return ret;
349
}
350
351
guest_timing_enter_irqoff();
352
guest_state_enter_irqoff();
353
trace_kvm_reenter(vcpu);
354
355
return RESUME_GUEST;
356
}
357
358
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
359
{
360
return !!(vcpu->arch.irq_pending) &&
361
vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
362
}
363
364
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
365
{
366
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
367
}
368
369
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
370
{
371
unsigned long val;
372
373
preempt_disable();
374
val = gcsr_read(LOONGARCH_CSR_CRMD);
375
preempt_enable();
376
377
return (val & CSR_PRMD_PPLV) == PLV_KERN;
378
}
379
380
#ifdef CONFIG_GUEST_PERF_EVENTS
381
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
382
{
383
return vcpu->arch.pc;
384
}
385
386
/*
387
* Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
388
* arrived in guest context. For LoongArch64, if PMU is not passthrough to VM,
389
* any event that arrives while a vCPU is loaded is considered to be "in guest".
390
*/
391
bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
392
{
393
return (vcpu && !(vcpu->arch.aux_inuse & KVM_LARCH_PMU));
394
}
395
#endif
396
397
bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu)
398
{
399
return false;
400
}
401
402
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
403
{
404
return VM_FAULT_SIGBUS;
405
}
406
407
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
408
struct kvm_translation *tr)
409
{
410
return -EINVAL;
411
}
412
413
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
414
{
415
int ret;
416
417
/* Protect from TOD sync and vcpu_load/put() */
418
preempt_disable();
419
ret = kvm_pending_timer(vcpu) ||
420
kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
421
preempt_enable();
422
423
return ret;
424
}
425
426
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
427
{
428
int i;
429
430
kvm_debug("vCPU Register Dump:\n");
431
kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
432
kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
433
434
for (i = 0; i < 32; i += 4) {
435
kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
436
vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
437
vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
438
}
439
440
kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
441
kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
442
kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
443
444
kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
445
446
return 0;
447
}
448
449
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
450
struct kvm_mp_state *mp_state)
451
{
452
*mp_state = vcpu->arch.mp_state;
453
454
return 0;
455
}
456
457
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
458
struct kvm_mp_state *mp_state)
459
{
460
int ret = 0;
461
462
switch (mp_state->mp_state) {
463
case KVM_MP_STATE_RUNNABLE:
464
vcpu->arch.mp_state = *mp_state;
465
break;
466
default:
467
ret = -EINVAL;
468
}
469
470
return ret;
471
}
472
473
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
474
struct kvm_guest_debug *dbg)
475
{
476
if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
477
return -EINVAL;
478
479
if (dbg->control & KVM_GUESTDBG_ENABLE)
480
vcpu->guest_debug = dbg->control;
481
else
482
vcpu->guest_debug = 0;
483
484
return 0;
485
}
486
487
static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
488
{
489
int cpuid;
490
struct kvm_phyid_map *map;
491
struct loongarch_csrs *csr = vcpu->arch.csr;
492
493
if (val >= KVM_MAX_PHYID)
494
return -EINVAL;
495
496
map = vcpu->kvm->arch.phyid_map;
497
cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
498
499
spin_lock(&vcpu->kvm->arch.phyid_map_lock);
500
if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
501
/* Discard duplicated CPUID set operation */
502
if (cpuid == val) {
503
spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
504
return 0;
505
}
506
507
/*
508
* CPUID is already set before
509
* Forbid changing to a different CPUID at runtime
510
*/
511
spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
512
return -EINVAL;
513
}
514
515
if (map->phys_map[val].enabled) {
516
/* Discard duplicated CPUID set operation */
517
if (vcpu == map->phys_map[val].vcpu) {
518
spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
519
return 0;
520
}
521
522
/*
523
* New CPUID is already set with other vcpu
524
* Forbid sharing the same CPUID between different vcpus
525
*/
526
spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
527
return -EINVAL;
528
}
529
530
kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
531
map->phys_map[val].enabled = true;
532
map->phys_map[val].vcpu = vcpu;
533
spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
534
535
return 0;
536
}
537
538
static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
539
{
540
int cpuid;
541
struct kvm_phyid_map *map;
542
struct loongarch_csrs *csr = vcpu->arch.csr;
543
544
map = vcpu->kvm->arch.phyid_map;
545
cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
546
547
if (cpuid >= KVM_MAX_PHYID)
548
return;
549
550
spin_lock(&vcpu->kvm->arch.phyid_map_lock);
551
if (map->phys_map[cpuid].enabled) {
552
map->phys_map[cpuid].vcpu = NULL;
553
map->phys_map[cpuid].enabled = false;
554
kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
555
}
556
spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
557
}
558
559
struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
560
{
561
struct kvm_phyid_map *map;
562
563
if (cpuid >= KVM_MAX_PHYID)
564
return NULL;
565
566
map = kvm->arch.phyid_map;
567
if (!map->phys_map[cpuid].enabled)
568
return NULL;
569
570
return map->phys_map[cpuid].vcpu;
571
}
572
573
static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
574
{
575
unsigned long gintc;
576
struct loongarch_csrs *csr = vcpu->arch.csr;
577
578
if (get_gcsr_flag(id) & INVALID_GCSR)
579
return -EINVAL;
580
581
if (id == LOONGARCH_CSR_ESTAT) {
582
preempt_disable();
583
vcpu_load(vcpu);
584
/*
585
* Sync pending interrupts into ESTAT so that interrupt
586
* remains during VM migration stage
587
*/
588
kvm_deliver_intr(vcpu);
589
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
590
vcpu_put(vcpu);
591
preempt_enable();
592
593
/* ESTAT IP0~IP7 get from GINTC */
594
gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
595
*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
596
return 0;
597
}
598
599
/*
600
* Get software CSR state since software state is consistent
601
* with hardware for synchronous ioctl
602
*/
603
*val = kvm_read_sw_gcsr(csr, id);
604
605
return 0;
606
}
607
608
static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
609
{
610
int ret = 0, gintc;
611
struct loongarch_csrs *csr = vcpu->arch.csr;
612
613
if (get_gcsr_flag(id) & INVALID_GCSR)
614
return -EINVAL;
615
616
if (id == LOONGARCH_CSR_CPUID)
617
return kvm_set_cpuid(vcpu, val);
618
619
if (id == LOONGARCH_CSR_ESTAT) {
620
/* ESTAT IP0~IP7 inject through GINTC */
621
gintc = (val >> 2) & 0xff;
622
kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
623
624
gintc = val & ~(0xffUL << 2);
625
kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
626
627
return ret;
628
}
629
630
kvm_write_sw_gcsr(csr, id, val);
631
632
/*
633
* After modifying the PMU CSR register value of the vcpu.
634
* If the PMU CSRs are used, we need to set KVM_REQ_PMU.
635
*/
636
if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
637
unsigned long val;
638
639
val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
640
kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
641
kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
642
kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
643
644
if (val & KVM_PMU_EVENT_ENABLED)
645
kvm_make_request(KVM_REQ_PMU, vcpu);
646
}
647
648
return ret;
649
}
650
651
static int _kvm_get_cpucfg_mask(int id, u64 *v)
652
{
653
if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
654
return -EINVAL;
655
656
switch (id) {
657
case LOONGARCH_CPUCFG0:
658
*v = GENMASK(31, 0);
659
return 0;
660
case LOONGARCH_CPUCFG1:
661
/* CPUCFG1_MSGINT is not supported by KVM */
662
*v = GENMASK(25, 0);
663
return 0;
664
case LOONGARCH_CPUCFG2:
665
/* CPUCFG2 features unconditionally supported by KVM */
666
*v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
667
CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
668
CPUCFG2_LSPW | CPUCFG2_LAM;
669
/*
670
* For the ISA extensions listed below, if one is supported
671
* by the host, then it is also supported by KVM.
672
*/
673
if (cpu_has_lsx)
674
*v |= CPUCFG2_LSX;
675
if (cpu_has_lasx)
676
*v |= CPUCFG2_LASX;
677
if (cpu_has_lbt_x86)
678
*v |= CPUCFG2_X86BT;
679
if (cpu_has_lbt_arm)
680
*v |= CPUCFG2_ARMBT;
681
if (cpu_has_lbt_mips)
682
*v |= CPUCFG2_MIPSBT;
683
684
return 0;
685
case LOONGARCH_CPUCFG3:
686
*v = GENMASK(16, 0);
687
return 0;
688
case LOONGARCH_CPUCFG4:
689
case LOONGARCH_CPUCFG5:
690
*v = GENMASK(31, 0);
691
return 0;
692
case LOONGARCH_CPUCFG6:
693
if (cpu_has_pmp)
694
*v = GENMASK(14, 0);
695
else
696
*v = 0;
697
return 0;
698
case LOONGARCH_CPUCFG16:
699
*v = GENMASK(16, 0);
700
return 0;
701
case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
702
*v = GENMASK(30, 0);
703
return 0;
704
default:
705
/*
706
* CPUCFG bits should be zero if reserved by HW or not
707
* supported by KVM.
708
*/
709
*v = 0;
710
return 0;
711
}
712
}
713
714
static int kvm_check_cpucfg(int id, u64 val)
715
{
716
int ret;
717
u64 mask = 0;
718
719
ret = _kvm_get_cpucfg_mask(id, &mask);
720
if (ret)
721
return ret;
722
723
if (val & ~mask)
724
/* Unsupported features and/or the higher 32 bits should not be set */
725
return -EINVAL;
726
727
switch (id) {
728
case LOONGARCH_CPUCFG2:
729
if (!(val & CPUCFG2_LLFTP))
730
/* Guests must have a constant timer */
731
return -EINVAL;
732
if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
733
/* Single and double float point must both be set when FP is enabled */
734
return -EINVAL;
735
if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
736
/* LSX architecturally implies FP but val does not satisfy that */
737
return -EINVAL;
738
if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
739
/* LASX architecturally implies LSX and FP but val does not satisfy that */
740
return -EINVAL;
741
return 0;
742
case LOONGARCH_CPUCFG6:
743
if (val & CPUCFG6_PMP) {
744
u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
745
if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
746
return -EINVAL;
747
if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
748
return -EINVAL;
749
if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
750
return -EINVAL;
751
}
752
return 0;
753
default:
754
/*
755
* Values for the other CPUCFG IDs are not being further validated
756
* besides the mask check above.
757
*/
758
return 0;
759
}
760
}
761
762
static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
763
const struct kvm_one_reg *reg, u64 *v)
764
{
765
int id, ret = 0;
766
u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
767
768
switch (type) {
769
case KVM_REG_LOONGARCH_CSR:
770
id = KVM_GET_IOC_CSR_IDX(reg->id);
771
ret = _kvm_getcsr(vcpu, id, v);
772
break;
773
case KVM_REG_LOONGARCH_CPUCFG:
774
id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
775
if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
776
*v = vcpu->arch.cpucfg[id];
777
else
778
ret = -EINVAL;
779
break;
780
case KVM_REG_LOONGARCH_LBT:
781
if (!kvm_guest_has_lbt(&vcpu->arch))
782
return -ENXIO;
783
784
switch (reg->id) {
785
case KVM_REG_LOONGARCH_LBT_SCR0:
786
*v = vcpu->arch.lbt.scr0;
787
break;
788
case KVM_REG_LOONGARCH_LBT_SCR1:
789
*v = vcpu->arch.lbt.scr1;
790
break;
791
case KVM_REG_LOONGARCH_LBT_SCR2:
792
*v = vcpu->arch.lbt.scr2;
793
break;
794
case KVM_REG_LOONGARCH_LBT_SCR3:
795
*v = vcpu->arch.lbt.scr3;
796
break;
797
case KVM_REG_LOONGARCH_LBT_EFLAGS:
798
*v = vcpu->arch.lbt.eflags;
799
break;
800
case KVM_REG_LOONGARCH_LBT_FTOP:
801
*v = vcpu->arch.fpu.ftop;
802
break;
803
default:
804
ret = -EINVAL;
805
break;
806
}
807
break;
808
case KVM_REG_LOONGARCH_KVM:
809
switch (reg->id) {
810
case KVM_REG_LOONGARCH_COUNTER:
811
*v = drdtime() + vcpu->kvm->arch.time_offset;
812
break;
813
case KVM_REG_LOONGARCH_DEBUG_INST:
814
*v = INSN_HVCL | KVM_HCALL_SWDBG;
815
break;
816
default:
817
ret = -EINVAL;
818
break;
819
}
820
break;
821
default:
822
ret = -EINVAL;
823
break;
824
}
825
826
return ret;
827
}
828
829
static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
830
{
831
int ret = 0;
832
u64 v, size = reg->id & KVM_REG_SIZE_MASK;
833
834
switch (size) {
835
case KVM_REG_SIZE_U64:
836
ret = kvm_get_one_reg(vcpu, reg, &v);
837
if (ret)
838
return ret;
839
ret = put_user(v, (u64 __user *)(long)reg->addr);
840
break;
841
default:
842
ret = -EINVAL;
843
break;
844
}
845
846
return ret;
847
}
848
849
static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
850
const struct kvm_one_reg *reg, u64 v)
851
{
852
int id, ret = 0;
853
u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
854
855
switch (type) {
856
case KVM_REG_LOONGARCH_CSR:
857
id = KVM_GET_IOC_CSR_IDX(reg->id);
858
ret = _kvm_setcsr(vcpu, id, v);
859
break;
860
case KVM_REG_LOONGARCH_CPUCFG:
861
id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
862
ret = kvm_check_cpucfg(id, v);
863
if (ret)
864
break;
865
vcpu->arch.cpucfg[id] = (u32)v;
866
if (id == LOONGARCH_CPUCFG6)
867
vcpu->arch.max_pmu_csrid =
868
LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
869
break;
870
case KVM_REG_LOONGARCH_LBT:
871
if (!kvm_guest_has_lbt(&vcpu->arch))
872
return -ENXIO;
873
874
switch (reg->id) {
875
case KVM_REG_LOONGARCH_LBT_SCR0:
876
vcpu->arch.lbt.scr0 = v;
877
break;
878
case KVM_REG_LOONGARCH_LBT_SCR1:
879
vcpu->arch.lbt.scr1 = v;
880
break;
881
case KVM_REG_LOONGARCH_LBT_SCR2:
882
vcpu->arch.lbt.scr2 = v;
883
break;
884
case KVM_REG_LOONGARCH_LBT_SCR3:
885
vcpu->arch.lbt.scr3 = v;
886
break;
887
case KVM_REG_LOONGARCH_LBT_EFLAGS:
888
vcpu->arch.lbt.eflags = v;
889
break;
890
case KVM_REG_LOONGARCH_LBT_FTOP:
891
vcpu->arch.fpu.ftop = v;
892
break;
893
default:
894
ret = -EINVAL;
895
break;
896
}
897
break;
898
case KVM_REG_LOONGARCH_KVM:
899
switch (reg->id) {
900
case KVM_REG_LOONGARCH_COUNTER:
901
/*
902
* gftoffset is relative with board, not vcpu
903
* only set for the first time for smp system
904
*/
905
if (vcpu->vcpu_id == 0)
906
vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
907
break;
908
case KVM_REG_LOONGARCH_VCPU_RESET:
909
vcpu->arch.st.guest_addr = 0;
910
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
911
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
912
913
/*
914
* When vCPU reset, clear the ESTAT and GINTC registers
915
* Other CSR registers are cleared with function _kvm_setcsr().
916
*/
917
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
918
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
919
break;
920
default:
921
ret = -EINVAL;
922
break;
923
}
924
break;
925
default:
926
ret = -EINVAL;
927
break;
928
}
929
930
return ret;
931
}
932
933
static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
934
{
935
int ret = 0;
936
u64 v, size = reg->id & KVM_REG_SIZE_MASK;
937
938
switch (size) {
939
case KVM_REG_SIZE_U64:
940
ret = get_user(v, (u64 __user *)(long)reg->addr);
941
if (ret)
942
return ret;
943
break;
944
default:
945
return -EINVAL;
946
}
947
948
return kvm_set_one_reg(vcpu, reg, v);
949
}
950
951
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
952
{
953
return -ENOIOCTLCMD;
954
}
955
956
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
957
{
958
return -ENOIOCTLCMD;
959
}
960
961
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
962
{
963
int i;
964
965
for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
966
regs->gpr[i] = vcpu->arch.gprs[i];
967
968
regs->pc = vcpu->arch.pc;
969
970
return 0;
971
}
972
973
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
974
{
975
int i;
976
977
for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
978
vcpu->arch.gprs[i] = regs->gpr[i];
979
980
vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
981
vcpu->arch.pc = regs->pc;
982
983
return 0;
984
}
985
986
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
987
struct kvm_enable_cap *cap)
988
{
989
/* FPU is enabled by default, will support LSX/LASX later. */
990
return -EINVAL;
991
}
992
993
static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
994
struct kvm_device_attr *attr)
995
{
996
switch (attr->attr) {
997
case LOONGARCH_CPUCFG2:
998
case LOONGARCH_CPUCFG6:
999
return 0;
1000
case CPUCFG_KVM_FEATURE:
1001
return 0;
1002
default:
1003
return -ENXIO;
1004
}
1005
1006
return -ENXIO;
1007
}
1008
1009
static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
1010
struct kvm_device_attr *attr)
1011
{
1012
if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1013
|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1014
return -ENXIO;
1015
1016
return 0;
1017
}
1018
1019
static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
1020
struct kvm_device_attr *attr)
1021
{
1022
int ret = -ENXIO;
1023
1024
switch (attr->group) {
1025
case KVM_LOONGARCH_VCPU_CPUCFG:
1026
ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
1027
break;
1028
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1029
ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
1030
break;
1031
default:
1032
break;
1033
}
1034
1035
return ret;
1036
}
1037
1038
static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1039
struct kvm_device_attr *attr)
1040
{
1041
int ret = 0;
1042
uint64_t val;
1043
uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1044
1045
switch (attr->attr) {
1046
case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1047
ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1048
if (ret)
1049
return ret;
1050
break;
1051
case CPUCFG_KVM_FEATURE:
1052
val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1053
break;
1054
default:
1055
return -ENXIO;
1056
}
1057
1058
put_user(val, uaddr);
1059
1060
return ret;
1061
}
1062
1063
static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1064
struct kvm_device_attr *attr)
1065
{
1066
u64 gpa;
1067
u64 __user *user = (u64 __user *)attr->addr;
1068
1069
if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1070
|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1071
return -ENXIO;
1072
1073
gpa = vcpu->arch.st.guest_addr;
1074
if (put_user(gpa, user))
1075
return -EFAULT;
1076
1077
return 0;
1078
}
1079
1080
static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1081
struct kvm_device_attr *attr)
1082
{
1083
int ret = -ENXIO;
1084
1085
switch (attr->group) {
1086
case KVM_LOONGARCH_VCPU_CPUCFG:
1087
ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1088
break;
1089
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1090
ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1091
break;
1092
default:
1093
break;
1094
}
1095
1096
return ret;
1097
}
1098
1099
static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1100
struct kvm_device_attr *attr)
1101
{
1102
u64 val, valid;
1103
u64 __user *user = (u64 __user *)attr->addr;
1104
struct kvm *kvm = vcpu->kvm;
1105
1106
switch (attr->attr) {
1107
case CPUCFG_KVM_FEATURE:
1108
if (get_user(val, user))
1109
return -EFAULT;
1110
1111
valid = LOONGARCH_PV_FEAT_MASK;
1112
if (val & ~valid)
1113
return -EINVAL;
1114
1115
/* All vCPUs need set the same PV features */
1116
if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1117
&& ((kvm->arch.pv_features & valid) != val))
1118
return -EINVAL;
1119
kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1120
return 0;
1121
default:
1122
return -ENXIO;
1123
}
1124
}
1125
1126
static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1127
struct kvm_device_attr *attr)
1128
{
1129
int idx, ret = 0;
1130
u64 gpa, __user *user = (u64 __user *)attr->addr;
1131
struct kvm *kvm = vcpu->kvm;
1132
1133
if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1134
|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1135
return -ENXIO;
1136
1137
if (get_user(gpa, user))
1138
return -EFAULT;
1139
1140
if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1141
return -EINVAL;
1142
1143
if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1144
vcpu->arch.st.guest_addr = gpa;
1145
return 0;
1146
}
1147
1148
/* Check the address is in a valid memslot */
1149
idx = srcu_read_lock(&kvm->srcu);
1150
if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1151
ret = -EINVAL;
1152
srcu_read_unlock(&kvm->srcu, idx);
1153
1154
if (!ret) {
1155
vcpu->arch.st.guest_addr = gpa;
1156
vcpu->arch.st.last_steal = current->sched_info.run_delay;
1157
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1158
}
1159
1160
return ret;
1161
}
1162
1163
static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1164
struct kvm_device_attr *attr)
1165
{
1166
int ret = -ENXIO;
1167
1168
switch (attr->group) {
1169
case KVM_LOONGARCH_VCPU_CPUCFG:
1170
ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1171
break;
1172
case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1173
ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1174
break;
1175
default:
1176
break;
1177
}
1178
1179
return ret;
1180
}
1181
1182
long kvm_arch_vcpu_ioctl(struct file *filp,
1183
unsigned int ioctl, unsigned long arg)
1184
{
1185
long r;
1186
struct kvm_device_attr attr;
1187
void __user *argp = (void __user *)arg;
1188
struct kvm_vcpu *vcpu = filp->private_data;
1189
1190
/*
1191
* Only software CSR should be modified
1192
*
1193
* If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1194
* should be used. Since CSR registers owns by this vcpu, if switch
1195
* to other vcpus, other vcpus need reload CSR registers.
1196
*
1197
* If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1198
* be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1199
* aux_inuse flag and reload CSR registers form software.
1200
*/
1201
1202
switch (ioctl) {
1203
case KVM_SET_ONE_REG:
1204
case KVM_GET_ONE_REG: {
1205
struct kvm_one_reg reg;
1206
1207
r = -EFAULT;
1208
if (copy_from_user(&reg, argp, sizeof(reg)))
1209
break;
1210
if (ioctl == KVM_SET_ONE_REG) {
1211
r = kvm_set_reg(vcpu, &reg);
1212
vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1213
} else
1214
r = kvm_get_reg(vcpu, &reg);
1215
break;
1216
}
1217
case KVM_ENABLE_CAP: {
1218
struct kvm_enable_cap cap;
1219
1220
r = -EFAULT;
1221
if (copy_from_user(&cap, argp, sizeof(cap)))
1222
break;
1223
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1224
break;
1225
}
1226
case KVM_HAS_DEVICE_ATTR: {
1227
r = -EFAULT;
1228
if (copy_from_user(&attr, argp, sizeof(attr)))
1229
break;
1230
r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1231
break;
1232
}
1233
case KVM_GET_DEVICE_ATTR: {
1234
r = -EFAULT;
1235
if (copy_from_user(&attr, argp, sizeof(attr)))
1236
break;
1237
r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1238
break;
1239
}
1240
case KVM_SET_DEVICE_ATTR: {
1241
r = -EFAULT;
1242
if (copy_from_user(&attr, argp, sizeof(attr)))
1243
break;
1244
r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1245
break;
1246
}
1247
default:
1248
r = -ENOIOCTLCMD;
1249
break;
1250
}
1251
1252
return r;
1253
}
1254
1255
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1256
{
1257
int i = 0;
1258
1259
fpu->fcc = vcpu->arch.fpu.fcc;
1260
fpu->fcsr = vcpu->arch.fpu.fcsr;
1261
for (i = 0; i < NUM_FPU_REGS; i++)
1262
memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1263
1264
return 0;
1265
}
1266
1267
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1268
{
1269
int i = 0;
1270
1271
vcpu->arch.fpu.fcc = fpu->fcc;
1272
vcpu->arch.fpu.fcsr = fpu->fcsr;
1273
for (i = 0; i < NUM_FPU_REGS; i++)
1274
memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1275
1276
return 0;
1277
}
1278
1279
#ifdef CONFIG_CPU_HAS_LBT
1280
int kvm_own_lbt(struct kvm_vcpu *vcpu)
1281
{
1282
if (!kvm_guest_has_lbt(&vcpu->arch))
1283
return -EINVAL;
1284
1285
preempt_disable();
1286
if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1287
set_csr_euen(CSR_EUEN_LBTEN);
1288
_restore_lbt(&vcpu->arch.lbt);
1289
vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1290
}
1291
preempt_enable();
1292
1293
return 0;
1294
}
1295
1296
static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1297
{
1298
preempt_disable();
1299
if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1300
_save_lbt(&vcpu->arch.lbt);
1301
clear_csr_euen(CSR_EUEN_LBTEN);
1302
vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1303
}
1304
preempt_enable();
1305
}
1306
1307
static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1308
{
1309
/*
1310
* If TM is enabled, top register save/restore will
1311
* cause lbt exception, here enable lbt in advance
1312
*/
1313
if (fcsr & FPU_CSR_TM)
1314
kvm_own_lbt(vcpu);
1315
}
1316
1317
static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1318
{
1319
if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1320
if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1321
return;
1322
kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1323
}
1324
}
1325
#else
1326
static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
1327
static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
1328
static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1329
#endif
1330
1331
/* Enable FPU and restore context */
1332
void kvm_own_fpu(struct kvm_vcpu *vcpu)
1333
{
1334
preempt_disable();
1335
1336
/*
1337
* Enable FPU for guest
1338
* Set FR and FRE according to guest context
1339
*/
1340
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1341
set_csr_euen(CSR_EUEN_FPEN);
1342
1343
kvm_restore_fpu(&vcpu->arch.fpu);
1344
vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1345
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1346
1347
preempt_enable();
1348
}
1349
1350
#ifdef CONFIG_CPU_HAS_LSX
1351
/* Enable LSX and restore context */
1352
int kvm_own_lsx(struct kvm_vcpu *vcpu)
1353
{
1354
if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1355
return -EINVAL;
1356
1357
preempt_disable();
1358
1359
/* Enable LSX for guest */
1360
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1361
set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1362
switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1363
case KVM_LARCH_FPU:
1364
/*
1365
* Guest FPU state already loaded,
1366
* only restore upper LSX state
1367
*/
1368
_restore_lsx_upper(&vcpu->arch.fpu);
1369
break;
1370
default:
1371
/* Neither FP or LSX already active,
1372
* restore full LSX state
1373
*/
1374
kvm_restore_lsx(&vcpu->arch.fpu);
1375
break;
1376
}
1377
1378
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1379
vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1380
preempt_enable();
1381
1382
return 0;
1383
}
1384
#endif
1385
1386
#ifdef CONFIG_CPU_HAS_LASX
1387
/* Enable LASX and restore context */
1388
int kvm_own_lasx(struct kvm_vcpu *vcpu)
1389
{
1390
if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1391
return -EINVAL;
1392
1393
preempt_disable();
1394
1395
kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1396
set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1397
switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1398
case KVM_LARCH_LSX:
1399
case KVM_LARCH_LSX | KVM_LARCH_FPU:
1400
/* Guest LSX state already loaded, only restore upper LASX state */
1401
_restore_lasx_upper(&vcpu->arch.fpu);
1402
break;
1403
case KVM_LARCH_FPU:
1404
/* Guest FP state already loaded, only restore upper LSX & LASX state */
1405
_restore_lsx_upper(&vcpu->arch.fpu);
1406
_restore_lasx_upper(&vcpu->arch.fpu);
1407
break;
1408
default:
1409
/* Neither FP or LSX already active, restore full LASX state */
1410
kvm_restore_lasx(&vcpu->arch.fpu);
1411
break;
1412
}
1413
1414
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1415
vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1416
preempt_enable();
1417
1418
return 0;
1419
}
1420
#endif
1421
1422
/* Save context and disable FPU */
1423
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1424
{
1425
preempt_disable();
1426
1427
kvm_check_fcsr_alive(vcpu);
1428
if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1429
kvm_save_lasx(&vcpu->arch.fpu);
1430
vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1431
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1432
1433
/* Disable LASX & LSX & FPU */
1434
clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1435
} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1436
kvm_save_lsx(&vcpu->arch.fpu);
1437
vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1438
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1439
1440
/* Disable LSX & FPU */
1441
clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1442
} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1443
kvm_save_fpu(&vcpu->arch.fpu);
1444
vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1445
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1446
1447
/* Disable FPU */
1448
clear_csr_euen(CSR_EUEN_FPEN);
1449
}
1450
kvm_lose_lbt(vcpu);
1451
1452
preempt_enable();
1453
}
1454
1455
int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1456
{
1457
int intr = (int)irq->irq;
1458
1459
if (intr > 0)
1460
kvm_queue_irq(vcpu, intr);
1461
else if (intr < 0)
1462
kvm_dequeue_irq(vcpu, -intr);
1463
else {
1464
kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1465
return -EINVAL;
1466
}
1467
1468
kvm_vcpu_kick(vcpu);
1469
1470
return 0;
1471
}
1472
1473
long kvm_arch_vcpu_async_ioctl(struct file *filp,
1474
unsigned int ioctl, unsigned long arg)
1475
{
1476
void __user *argp = (void __user *)arg;
1477
struct kvm_vcpu *vcpu = filp->private_data;
1478
1479
if (ioctl == KVM_INTERRUPT) {
1480
struct kvm_interrupt irq;
1481
1482
if (copy_from_user(&irq, argp, sizeof(irq)))
1483
return -EFAULT;
1484
1485
kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1486
1487
return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1488
}
1489
1490
return -ENOIOCTLCMD;
1491
}
1492
1493
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1494
{
1495
return 0;
1496
}
1497
1498
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1499
{
1500
unsigned long timer_hz;
1501
struct loongarch_csrs *csr;
1502
1503
vcpu->arch.vpid = 0;
1504
vcpu->arch.flush_gpa = INVALID_GPA;
1505
1506
hrtimer_setup(&vcpu->arch.swtimer, kvm_swtimer_wakeup, CLOCK_MONOTONIC,
1507
HRTIMER_MODE_ABS_PINNED_HARD);
1508
1509
/* Get GPA (=HVA) of PGD for kvm hypervisor */
1510
vcpu->arch.kvm_pgd = __pa(vcpu->kvm->arch.pgd);
1511
1512
/*
1513
* Get PGD for primary mmu, virtual address is used since there is
1514
* memory access after loading from CSR_PGD in tlb exception fast path.
1515
*/
1516
vcpu->arch.host_pgd = (unsigned long)vcpu->kvm->mm->pgd;
1517
1518
vcpu->arch.handle_exit = kvm_handle_exit;
1519
vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1520
vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1521
if (!vcpu->arch.csr)
1522
return -ENOMEM;
1523
1524
/*
1525
* All kvm exceptions share one exception entry, and host <-> guest
1526
* switch also switch ECFG.VS field, keep host ECFG.VS info here.
1527
*/
1528
vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1529
1530
/* Init */
1531
vcpu->arch.last_sched_cpu = -1;
1532
1533
/* Init ipi_state lock */
1534
spin_lock_init(&vcpu->arch.ipi_state.lock);
1535
1536
/*
1537
* Initialize guest register state to valid architectural reset state.
1538
*/
1539
timer_hz = calc_const_freq();
1540
kvm_init_timer(vcpu, timer_hz);
1541
1542
/* Set Initialize mode for guest */
1543
csr = vcpu->arch.csr;
1544
kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1545
1546
/* Set cpuid */
1547
kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1548
kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1549
1550
/* Start with no pending virtual guest interrupts */
1551
csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1552
1553
return 0;
1554
}
1555
1556
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1557
{
1558
}
1559
1560
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1561
{
1562
int cpu;
1563
struct kvm_context *context;
1564
1565
hrtimer_cancel(&vcpu->arch.swtimer);
1566
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1567
kvm_drop_cpuid(vcpu);
1568
kfree(vcpu->arch.csr);
1569
1570
/*
1571
* If the vCPU is freed and reused as another vCPU, we don't want the
1572
* matching pointer wrongly hanging around in last_vcpu.
1573
*/
1574
for_each_possible_cpu(cpu) {
1575
context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1576
if (context->last_vcpu == vcpu)
1577
context->last_vcpu = NULL;
1578
}
1579
}
1580
1581
static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1582
{
1583
bool migrated;
1584
struct kvm_context *context;
1585
struct loongarch_csrs *csr = vcpu->arch.csr;
1586
1587
/*
1588
* Have we migrated to a different CPU?
1589
* If so, any old guest TLB state may be stale.
1590
*/
1591
migrated = (vcpu->arch.last_sched_cpu != cpu);
1592
1593
/*
1594
* Was this the last vCPU to run on this CPU?
1595
* If not, any old guest state from this vCPU will have been clobbered.
1596
*/
1597
context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1598
if (migrated || (context->last_vcpu != vcpu))
1599
vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1600
context->last_vcpu = vcpu;
1601
1602
/* Restore timer state regardless */
1603
kvm_restore_timer(vcpu);
1604
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1605
1606
/* Restore hardware PMU CSRs */
1607
kvm_restore_pmu(vcpu);
1608
1609
/* Don't bother restoring registers multiple times unless necessary */
1610
if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1611
return 0;
1612
1613
write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1614
1615
/* Restore guest CSR registers */
1616
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1617
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1618
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1619
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1620
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1621
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1622
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1623
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1624
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1625
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1626
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1627
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1628
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1629
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1630
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1631
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1632
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1633
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1634
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1635
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1636
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1637
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1638
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1639
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1640
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1641
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1642
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1643
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1644
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1645
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1646
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1647
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1648
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1649
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1650
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1651
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1652
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1653
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1654
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1655
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1656
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1657
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1658
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1659
kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1660
1661
/* Restore Root.GINTC from unused Guest.GINTC register */
1662
write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1663
1664
/*
1665
* We should clear linked load bit to break interrupted atomics. This
1666
* prevents a SC on the next vCPU from succeeding by matching a LL on
1667
* the previous vCPU.
1668
*/
1669
if (vcpu->kvm->created_vcpus > 1)
1670
set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1671
1672
vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1673
1674
return 0;
1675
}
1676
1677
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1678
{
1679
unsigned long flags;
1680
1681
local_irq_save(flags);
1682
/* Restore guest state to registers */
1683
_kvm_vcpu_load(vcpu, cpu);
1684
local_irq_restore(flags);
1685
}
1686
1687
static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1688
{
1689
struct loongarch_csrs *csr = vcpu->arch.csr;
1690
1691
kvm_lose_fpu(vcpu);
1692
1693
/*
1694
* Update CSR state from hardware if software CSR state is stale,
1695
* most CSR registers are kept unchanged during process context
1696
* switch except CSR registers like remaining timer tick value and
1697
* injected interrupt state.
1698
*/
1699
if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1700
goto out;
1701
1702
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1703
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1704
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1705
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1706
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1707
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1708
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1709
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1710
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1711
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1712
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1713
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1714
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1715
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1716
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1717
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1718
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1719
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1720
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1721
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1722
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1723
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1724
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1725
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1726
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1727
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1728
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1729
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1730
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1731
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1732
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1733
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1734
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1735
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1736
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1737
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1738
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1739
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1740
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1741
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1742
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1743
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1744
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1745
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1746
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1747
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1748
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1749
1750
vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1751
1752
out:
1753
kvm_save_timer(vcpu);
1754
/* Save Root.GINTC into unused Guest.GINTC register */
1755
csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1756
1757
return 0;
1758
}
1759
1760
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1761
{
1762
int cpu;
1763
unsigned long flags;
1764
1765
local_irq_save(flags);
1766
cpu = smp_processor_id();
1767
vcpu->arch.last_sched_cpu = cpu;
1768
1769
/* Save guest state in registers */
1770
_kvm_vcpu_put(vcpu, cpu);
1771
local_irq_restore(flags);
1772
}
1773
1774
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1775
{
1776
int r = -EINTR;
1777
struct kvm_run *run = vcpu->run;
1778
1779
if (vcpu->mmio_needed) {
1780
if (!vcpu->mmio_is_write)
1781
kvm_complete_mmio_read(vcpu, run);
1782
vcpu->mmio_needed = 0;
1783
}
1784
1785
switch (run->exit_reason) {
1786
case KVM_EXIT_HYPERCALL:
1787
kvm_complete_user_service(vcpu, run);
1788
break;
1789
case KVM_EXIT_LOONGARCH_IOCSR:
1790
if (!run->iocsr_io.is_write)
1791
kvm_complete_iocsr_read(vcpu, run);
1792
break;
1793
}
1794
1795
if (!vcpu->wants_to_run)
1796
return r;
1797
1798
/* Clear exit_reason */
1799
run->exit_reason = KVM_EXIT_UNKNOWN;
1800
lose_fpu(1);
1801
vcpu_load(vcpu);
1802
kvm_sigset_activate(vcpu);
1803
r = kvm_pre_enter_guest(vcpu);
1804
if (r != RESUME_GUEST)
1805
goto out;
1806
1807
guest_timing_enter_irqoff();
1808
guest_state_enter_irqoff();
1809
trace_kvm_enter(vcpu);
1810
r = kvm_loongarch_ops->enter_guest(run, vcpu);
1811
1812
trace_kvm_out(vcpu);
1813
/*
1814
* Guest exit is already recorded at kvm_handle_exit()
1815
* return value must not be RESUME_GUEST
1816
*/
1817
local_irq_enable();
1818
out:
1819
kvm_sigset_deactivate(vcpu);
1820
vcpu_put(vcpu);
1821
1822
return r;
1823
}
1824
1825