Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/kvm/book3s.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4
*
5
* Authors:
6
* Alexander Graf <[email protected]>
7
* Kevin Wolf <[email protected]>
8
*
9
* Description:
10
* This file is derived from arch/powerpc/kvm/44x.c,
11
* by Hollis Blanchard <[email protected]>.
12
*/
13
14
#include <linux/kvm_host.h>
15
#include <linux/err.h>
16
#include <linux/export.h>
17
#include <linux/slab.h>
18
#include <linux/module.h>
19
#include <linux/miscdevice.h>
20
#include <linux/gfp.h>
21
#include <linux/sched.h>
22
#include <linux/vmalloc.h>
23
#include <linux/highmem.h>
24
25
#include <asm/reg.h>
26
#include <asm/cputable.h>
27
#include <asm/cacheflush.h>
28
#include <linux/uaccess.h>
29
#include <asm/io.h>
30
#include <asm/kvm_ppc.h>
31
#include <asm/kvm_book3s.h>
32
#include <asm/mmu_context.h>
33
#include <asm/page.h>
34
#include <asm/xive.h>
35
36
#include "book3s.h"
37
#include "trace.h"
38
39
/* #define EXIT_DEBUG */
40
41
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
42
KVM_GENERIC_VM_STATS(),
43
STATS_DESC_ICOUNTER(VM, num_2M_pages),
44
STATS_DESC_ICOUNTER(VM, num_1G_pages)
45
};
46
47
const struct kvm_stats_header kvm_vm_stats_header = {
48
.name_size = KVM_STATS_NAME_SIZE,
49
.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
50
.id_offset = sizeof(struct kvm_stats_header),
51
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
52
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
53
sizeof(kvm_vm_stats_desc),
54
};
55
56
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
57
KVM_GENERIC_VCPU_STATS(),
58
STATS_DESC_COUNTER(VCPU, sum_exits),
59
STATS_DESC_COUNTER(VCPU, mmio_exits),
60
STATS_DESC_COUNTER(VCPU, signal_exits),
61
STATS_DESC_COUNTER(VCPU, light_exits),
62
STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
63
STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
64
STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
65
STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
66
STATS_DESC_COUNTER(VCPU, syscall_exits),
67
STATS_DESC_COUNTER(VCPU, isi_exits),
68
STATS_DESC_COUNTER(VCPU, dsi_exits),
69
STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
70
STATS_DESC_COUNTER(VCPU, dec_exits),
71
STATS_DESC_COUNTER(VCPU, ext_intr_exits),
72
STATS_DESC_COUNTER(VCPU, halt_successful_wait),
73
STATS_DESC_COUNTER(VCPU, dbell_exits),
74
STATS_DESC_COUNTER(VCPU, gdbell_exits),
75
STATS_DESC_COUNTER(VCPU, ld),
76
STATS_DESC_COUNTER(VCPU, st),
77
STATS_DESC_COUNTER(VCPU, pf_storage),
78
STATS_DESC_COUNTER(VCPU, pf_instruc),
79
STATS_DESC_COUNTER(VCPU, sp_storage),
80
STATS_DESC_COUNTER(VCPU, sp_instruc),
81
STATS_DESC_COUNTER(VCPU, queue_intr),
82
STATS_DESC_COUNTER(VCPU, ld_slow),
83
STATS_DESC_COUNTER(VCPU, st_slow),
84
STATS_DESC_COUNTER(VCPU, pthru_all),
85
STATS_DESC_COUNTER(VCPU, pthru_host),
86
STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
87
};
88
89
const struct kvm_stats_header kvm_vcpu_stats_header = {
90
.name_size = KVM_STATS_NAME_SIZE,
91
.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
92
.id_offset = sizeof(struct kvm_stats_header),
93
.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
94
.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
95
sizeof(kvm_vcpu_stats_desc),
96
};
97
98
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
99
unsigned long pending_now, unsigned long old_pending)
100
{
101
if (is_kvmppc_hv_enabled(vcpu->kvm))
102
return;
103
if (pending_now)
104
kvmppc_set_int_pending(vcpu, 1);
105
else if (old_pending)
106
kvmppc_set_int_pending(vcpu, 0);
107
}
108
109
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
110
{
111
ulong crit_raw;
112
ulong crit_r1;
113
bool crit;
114
115
if (is_kvmppc_hv_enabled(vcpu->kvm))
116
return false;
117
118
crit_raw = kvmppc_get_critical(vcpu);
119
crit_r1 = kvmppc_get_gpr(vcpu, 1);
120
121
/* Truncate crit indicators in 32 bit mode */
122
if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
123
crit_raw &= 0xffffffff;
124
crit_r1 &= 0xffffffff;
125
}
126
127
/* Critical section when crit == r1 */
128
crit = (crit_raw == crit_r1);
129
/* ... and we're in supervisor mode */
130
crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
131
132
return crit;
133
}
134
135
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
136
{
137
vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
138
}
139
140
static int kvmppc_book3s_vec2irqprio(unsigned int vec)
141
{
142
unsigned int prio;
143
144
switch (vec) {
145
case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
146
case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
147
case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
148
case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
149
case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
150
case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
151
case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
152
case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
153
case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
154
case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
155
case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
156
case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
157
case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
158
case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
159
case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
160
case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
161
default: prio = BOOK3S_IRQPRIO_MAX; break;
162
}
163
164
return prio;
165
}
166
167
void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
168
unsigned int vec)
169
{
170
unsigned long old_pending = vcpu->arch.pending_exceptions;
171
172
clear_bit(kvmppc_book3s_vec2irqprio(vec),
173
&vcpu->arch.pending_exceptions);
174
175
kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
176
old_pending);
177
}
178
179
void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
180
{
181
vcpu->stat.queue_intr++;
182
183
set_bit(kvmppc_book3s_vec2irqprio(vec),
184
&vcpu->arch.pending_exceptions);
185
#ifdef EXIT_DEBUG
186
printk(KERN_INFO "Queueing interrupt %x\n", vec);
187
#endif
188
}
189
EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
190
191
void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong srr1_flags)
192
{
193
/* might as well deliver this straight away */
194
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, srr1_flags);
195
}
196
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
197
198
void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
199
{
200
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
201
}
202
EXPORT_SYMBOL(kvmppc_core_queue_syscall);
203
204
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong srr1_flags)
205
{
206
/* might as well deliver this straight away */
207
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, srr1_flags);
208
}
209
EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
210
211
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
212
{
213
/* might as well deliver this straight away */
214
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, srr1_flags);
215
}
216
217
void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
218
{
219
/* might as well deliver this straight away */
220
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, srr1_flags);
221
}
222
223
void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu, ulong srr1_flags)
224
{
225
/* might as well deliver this straight away */
226
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, srr1_flags);
227
}
228
229
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
230
{
231
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
232
}
233
EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
234
235
int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
236
{
237
return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
238
}
239
EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
240
241
void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
242
{
243
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
244
}
245
EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
246
247
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
248
struct kvm_interrupt *irq)
249
{
250
/*
251
* This case (KVM_INTERRUPT_SET) should never actually arise for
252
* a pseries guest (because pseries guests expect their interrupt
253
* controllers to continue asserting an external interrupt request
254
* until it is acknowledged at the interrupt controller), but is
255
* included to avoid ABI breakage and potentially for other
256
* sorts of guest.
257
*
258
* There is a subtlety here: HV KVM does not test the
259
* external_oneshot flag in the code that synthesizes
260
* external interrupts for the guest just before entering
261
* the guest. That is OK even if userspace did do a
262
* KVM_INTERRUPT_SET on a pseries guest vcpu, because the
263
* caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
264
* which ends up doing a smp_send_reschedule(), which will
265
* pull the guest all the way out to the host, meaning that
266
* we will call kvmppc_core_prepare_to_enter() before entering
267
* the guest again, and that will handle the external_oneshot
268
* flag correctly.
269
*/
270
if (irq->irq == KVM_INTERRUPT_SET)
271
vcpu->arch.external_oneshot = 1;
272
273
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
274
}
275
276
void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
277
{
278
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
279
}
280
281
void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong srr1_flags,
282
ulong dar, ulong dsisr)
283
{
284
kvmppc_set_dar(vcpu, dar);
285
kvmppc_set_dsisr(vcpu, dsisr);
286
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, srr1_flags);
287
}
288
EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
289
290
void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong srr1_flags)
291
{
292
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, srr1_flags);
293
}
294
EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
295
296
static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
297
unsigned int priority)
298
{
299
int deliver = 1;
300
int vec = 0;
301
bool crit = kvmppc_critical_section(vcpu);
302
303
switch (priority) {
304
case BOOK3S_IRQPRIO_DECREMENTER:
305
deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
306
vec = BOOK3S_INTERRUPT_DECREMENTER;
307
break;
308
case BOOK3S_IRQPRIO_EXTERNAL:
309
deliver = !kvmhv_is_nestedv2() && (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
310
vec = BOOK3S_INTERRUPT_EXTERNAL;
311
break;
312
case BOOK3S_IRQPRIO_SYSTEM_RESET:
313
vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
314
break;
315
case BOOK3S_IRQPRIO_MACHINE_CHECK:
316
vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
317
break;
318
case BOOK3S_IRQPRIO_DATA_STORAGE:
319
vec = BOOK3S_INTERRUPT_DATA_STORAGE;
320
break;
321
case BOOK3S_IRQPRIO_INST_STORAGE:
322
vec = BOOK3S_INTERRUPT_INST_STORAGE;
323
break;
324
case BOOK3S_IRQPRIO_DATA_SEGMENT:
325
vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
326
break;
327
case BOOK3S_IRQPRIO_INST_SEGMENT:
328
vec = BOOK3S_INTERRUPT_INST_SEGMENT;
329
break;
330
case BOOK3S_IRQPRIO_ALIGNMENT:
331
vec = BOOK3S_INTERRUPT_ALIGNMENT;
332
break;
333
case BOOK3S_IRQPRIO_PROGRAM:
334
vec = BOOK3S_INTERRUPT_PROGRAM;
335
break;
336
case BOOK3S_IRQPRIO_VSX:
337
vec = BOOK3S_INTERRUPT_VSX;
338
break;
339
case BOOK3S_IRQPRIO_ALTIVEC:
340
vec = BOOK3S_INTERRUPT_ALTIVEC;
341
break;
342
case BOOK3S_IRQPRIO_FP_UNAVAIL:
343
vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
344
break;
345
case BOOK3S_IRQPRIO_SYSCALL:
346
vec = BOOK3S_INTERRUPT_SYSCALL;
347
break;
348
case BOOK3S_IRQPRIO_DEBUG:
349
vec = BOOK3S_INTERRUPT_TRACE;
350
break;
351
case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
352
vec = BOOK3S_INTERRUPT_PERFMON;
353
break;
354
case BOOK3S_IRQPRIO_FAC_UNAVAIL:
355
vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
356
break;
357
default:
358
deliver = 0;
359
printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
360
break;
361
}
362
363
if (deliver)
364
kvmppc_inject_interrupt(vcpu, vec, 0);
365
366
return deliver;
367
}
368
369
/*
370
* This function determines if an irqprio should be cleared once issued.
371
*/
372
static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
373
{
374
switch (priority) {
375
case BOOK3S_IRQPRIO_DECREMENTER:
376
/* DEC interrupts get cleared by mtdec */
377
return false;
378
case BOOK3S_IRQPRIO_EXTERNAL:
379
/*
380
* External interrupts get cleared by userspace
381
* except when set by the KVM_INTERRUPT ioctl with
382
* KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
383
*/
384
if (vcpu->arch.external_oneshot) {
385
vcpu->arch.external_oneshot = 0;
386
return true;
387
}
388
return false;
389
}
390
391
return true;
392
}
393
394
int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
395
{
396
unsigned long *pending = &vcpu->arch.pending_exceptions;
397
unsigned long old_pending = vcpu->arch.pending_exceptions;
398
unsigned int priority;
399
400
#ifdef EXIT_DEBUG
401
if (vcpu->arch.pending_exceptions)
402
printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
403
#endif
404
priority = __ffs(*pending);
405
while (priority < BOOK3S_IRQPRIO_MAX) {
406
if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
407
clear_irqprio(vcpu, priority)) {
408
clear_bit(priority, &vcpu->arch.pending_exceptions);
409
break;
410
}
411
412
priority = find_next_bit(pending,
413
BITS_PER_BYTE * sizeof(*pending),
414
priority + 1);
415
}
416
417
/* Tell the guest about our interrupt status */
418
kvmppc_update_int_pending(vcpu, *pending, old_pending);
419
420
return 0;
421
}
422
EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
423
424
kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
425
bool *writable, struct page **page)
426
{
427
ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
428
gfn_t gfn = gpa >> PAGE_SHIFT;
429
430
if (!(kvmppc_get_msr(vcpu) & MSR_SF))
431
mp_pa = (uint32_t)mp_pa;
432
433
/* Magic page override */
434
gpa &= ~0xFFFULL;
435
if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
436
ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
437
kvm_pfn_t pfn;
438
439
pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
440
*page = pfn_to_page(pfn);
441
get_page(*page);
442
if (writable)
443
*writable = true;
444
return pfn;
445
}
446
447
return kvm_faultin_pfn(vcpu, gfn, writing, writable, page);
448
}
449
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
450
451
int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
452
enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
453
{
454
bool data = (xlid == XLATE_DATA);
455
bool iswrite = (xlrw == XLATE_WRITE);
456
int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
457
int r;
458
459
if (relocated) {
460
r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
461
} else {
462
pte->eaddr = eaddr;
463
pte->raddr = eaddr & KVM_PAM;
464
pte->vpage = VSID_REAL | eaddr >> 12;
465
pte->may_read = true;
466
pte->may_write = true;
467
pte->may_execute = true;
468
r = 0;
469
470
if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
471
!data) {
472
if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
473
((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
474
pte->raddr &= ~SPLIT_HACK_MASK;
475
}
476
}
477
478
return r;
479
}
480
481
/*
482
* Returns prefixed instructions with the prefix in the high 32 bits
483
* of *inst and suffix in the low 32 bits. This is the same convention
484
* as used in HEIR, vcpu->arch.last_inst and vcpu->arch.emul_inst.
485
* Like vcpu->arch.last_inst but unlike vcpu->arch.emul_inst, each
486
* half of the value needs byte-swapping if the guest endianness is
487
* different from the host endianness.
488
*/
489
int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
490
enum instruction_fetch_type type, unsigned long *inst)
491
{
492
ulong pc = kvmppc_get_pc(vcpu);
493
int r;
494
u32 iw;
495
496
if (type == INST_SC)
497
pc -= 4;
498
499
r = kvmppc_ld(vcpu, &pc, sizeof(u32), &iw, false);
500
if (r != EMULATE_DONE)
501
return EMULATE_AGAIN;
502
/*
503
* If [H]SRR1 indicates that the instruction that caused the
504
* current interrupt is a prefixed instruction, get the suffix.
505
*/
506
if (kvmppc_get_msr(vcpu) & SRR1_PREFIXED) {
507
u32 suffix;
508
pc += 4;
509
r = kvmppc_ld(vcpu, &pc, sizeof(u32), &suffix, false);
510
if (r != EMULATE_DONE)
511
return EMULATE_AGAIN;
512
*inst = ((u64)iw << 32) | suffix;
513
} else {
514
*inst = iw;
515
}
516
return r;
517
}
518
EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
519
520
int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
521
{
522
return 0;
523
}
524
525
void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
526
{
527
}
528
529
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
530
struct kvm_sregs *sregs)
531
{
532
int ret;
533
534
vcpu_load(vcpu);
535
ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
536
vcpu_put(vcpu);
537
538
return ret;
539
}
540
541
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
542
struct kvm_sregs *sregs)
543
{
544
int ret;
545
546
vcpu_load(vcpu);
547
ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
548
vcpu_put(vcpu);
549
550
return ret;
551
}
552
553
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
554
{
555
int i;
556
557
regs->pc = kvmppc_get_pc(vcpu);
558
regs->cr = kvmppc_get_cr(vcpu);
559
regs->ctr = kvmppc_get_ctr(vcpu);
560
regs->lr = kvmppc_get_lr(vcpu);
561
regs->xer = kvmppc_get_xer(vcpu);
562
regs->msr = kvmppc_get_msr(vcpu);
563
regs->srr0 = kvmppc_get_srr0(vcpu);
564
regs->srr1 = kvmppc_get_srr1(vcpu);
565
regs->pid = kvmppc_get_pid(vcpu);
566
regs->sprg0 = kvmppc_get_sprg0(vcpu);
567
regs->sprg1 = kvmppc_get_sprg1(vcpu);
568
regs->sprg2 = kvmppc_get_sprg2(vcpu);
569
regs->sprg3 = kvmppc_get_sprg3(vcpu);
570
regs->sprg4 = kvmppc_get_sprg4(vcpu);
571
regs->sprg5 = kvmppc_get_sprg5(vcpu);
572
regs->sprg6 = kvmppc_get_sprg6(vcpu);
573
regs->sprg7 = kvmppc_get_sprg7(vcpu);
574
575
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
576
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
577
578
return 0;
579
}
580
581
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
582
{
583
int i;
584
585
kvmppc_set_pc(vcpu, regs->pc);
586
kvmppc_set_cr(vcpu, regs->cr);
587
kvmppc_set_ctr(vcpu, regs->ctr);
588
kvmppc_set_lr(vcpu, regs->lr);
589
kvmppc_set_xer(vcpu, regs->xer);
590
kvmppc_set_msr(vcpu, regs->msr);
591
kvmppc_set_srr0(vcpu, regs->srr0);
592
kvmppc_set_srr1(vcpu, regs->srr1);
593
kvmppc_set_sprg0(vcpu, regs->sprg0);
594
kvmppc_set_sprg1(vcpu, regs->sprg1);
595
kvmppc_set_sprg2(vcpu, regs->sprg2);
596
kvmppc_set_sprg3(vcpu, regs->sprg3);
597
kvmppc_set_sprg4(vcpu, regs->sprg4);
598
kvmppc_set_sprg5(vcpu, regs->sprg5);
599
kvmppc_set_sprg6(vcpu, regs->sprg6);
600
kvmppc_set_sprg7(vcpu, regs->sprg7);
601
602
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
603
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
604
605
return 0;
606
}
607
608
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
609
{
610
return -EOPNOTSUPP;
611
}
612
613
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
614
{
615
return -EOPNOTSUPP;
616
}
617
618
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
619
union kvmppc_one_reg *val)
620
{
621
int r = 0;
622
long int i;
623
624
r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
625
if (r == -EINVAL) {
626
r = 0;
627
switch (id) {
628
case KVM_REG_PPC_DAR:
629
*val = get_reg_val(id, kvmppc_get_dar(vcpu));
630
break;
631
case KVM_REG_PPC_DSISR:
632
*val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
633
break;
634
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
635
i = id - KVM_REG_PPC_FPR0;
636
*val = get_reg_val(id, kvmppc_get_fpr(vcpu, i));
637
break;
638
case KVM_REG_PPC_FPSCR:
639
*val = get_reg_val(id, kvmppc_get_fpscr(vcpu));
640
break;
641
#ifdef CONFIG_VSX
642
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
643
if (cpu_has_feature(CPU_FTR_VSX)) {
644
i = id - KVM_REG_PPC_VSR0;
645
val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0);
646
val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1);
647
} else {
648
r = -ENXIO;
649
}
650
break;
651
#endif /* CONFIG_VSX */
652
case KVM_REG_PPC_DEBUG_INST:
653
*val = get_reg_val(id, INS_TW);
654
break;
655
#ifdef CONFIG_KVM_XICS
656
case KVM_REG_PPC_ICP_STATE:
657
if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
658
r = -ENXIO;
659
break;
660
}
661
if (xics_on_xive())
662
*val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
663
else
664
*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
665
break;
666
#endif /* CONFIG_KVM_XICS */
667
#ifdef CONFIG_KVM_XIVE
668
case KVM_REG_PPC_VP_STATE:
669
if (!vcpu->arch.xive_vcpu) {
670
r = -ENXIO;
671
break;
672
}
673
if (xive_enabled())
674
r = kvmppc_xive_native_get_vp(vcpu, val);
675
else
676
r = -ENXIO;
677
break;
678
#endif /* CONFIG_KVM_XIVE */
679
case KVM_REG_PPC_FSCR:
680
*val = get_reg_val(id, vcpu->arch.fscr);
681
break;
682
case KVM_REG_PPC_TAR:
683
*val = get_reg_val(id, kvmppc_get_tar(vcpu));
684
break;
685
case KVM_REG_PPC_EBBHR:
686
*val = get_reg_val(id, kvmppc_get_ebbhr(vcpu));
687
break;
688
case KVM_REG_PPC_EBBRR:
689
*val = get_reg_val(id, kvmppc_get_ebbrr(vcpu));
690
break;
691
case KVM_REG_PPC_BESCR:
692
*val = get_reg_val(id, kvmppc_get_bescr(vcpu));
693
break;
694
case KVM_REG_PPC_IC:
695
*val = get_reg_val(id, kvmppc_get_ic(vcpu));
696
break;
697
default:
698
r = -EINVAL;
699
break;
700
}
701
}
702
703
return r;
704
}
705
706
int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
707
union kvmppc_one_reg *val)
708
{
709
int r = 0;
710
long int i;
711
712
r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
713
if (r == -EINVAL) {
714
r = 0;
715
switch (id) {
716
case KVM_REG_PPC_DAR:
717
kvmppc_set_dar(vcpu, set_reg_val(id, *val));
718
break;
719
case KVM_REG_PPC_DSISR:
720
kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
721
break;
722
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
723
i = id - KVM_REG_PPC_FPR0;
724
kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val));
725
break;
726
case KVM_REG_PPC_FPSCR:
727
vcpu->arch.fp.fpscr = set_reg_val(id, *val);
728
break;
729
#ifdef CONFIG_VSX
730
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
731
if (cpu_has_feature(CPU_FTR_VSX)) {
732
i = id - KVM_REG_PPC_VSR0;
733
kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]);
734
kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]);
735
} else {
736
r = -ENXIO;
737
}
738
break;
739
#endif /* CONFIG_VSX */
740
#ifdef CONFIG_KVM_XICS
741
case KVM_REG_PPC_ICP_STATE:
742
if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
743
r = -ENXIO;
744
break;
745
}
746
if (xics_on_xive())
747
r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
748
else
749
r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
750
break;
751
#endif /* CONFIG_KVM_XICS */
752
#ifdef CONFIG_KVM_XIVE
753
case KVM_REG_PPC_VP_STATE:
754
if (!vcpu->arch.xive_vcpu) {
755
r = -ENXIO;
756
break;
757
}
758
if (xive_enabled())
759
r = kvmppc_xive_native_set_vp(vcpu, val);
760
else
761
r = -ENXIO;
762
break;
763
#endif /* CONFIG_KVM_XIVE */
764
case KVM_REG_PPC_FSCR:
765
kvmppc_set_fpscr(vcpu, set_reg_val(id, *val));
766
break;
767
case KVM_REG_PPC_TAR:
768
kvmppc_set_tar(vcpu, set_reg_val(id, *val));
769
break;
770
case KVM_REG_PPC_EBBHR:
771
kvmppc_set_ebbhr(vcpu, set_reg_val(id, *val));
772
break;
773
case KVM_REG_PPC_EBBRR:
774
kvmppc_set_ebbrr(vcpu, set_reg_val(id, *val));
775
break;
776
case KVM_REG_PPC_BESCR:
777
kvmppc_set_bescr(vcpu, set_reg_val(id, *val));
778
break;
779
case KVM_REG_PPC_IC:
780
kvmppc_set_ic(vcpu, set_reg_val(id, *val));
781
break;
782
default:
783
r = -EINVAL;
784
break;
785
}
786
}
787
788
return r;
789
}
790
791
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
792
{
793
vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
794
}
795
796
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
797
{
798
vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
799
}
800
801
void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
802
{
803
vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
804
}
805
EXPORT_SYMBOL_GPL(kvmppc_set_msr);
806
807
int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
808
{
809
return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
810
}
811
812
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
813
struct kvm_translation *tr)
814
{
815
return 0;
816
}
817
818
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
819
struct kvm_guest_debug *dbg)
820
{
821
vcpu_load(vcpu);
822
vcpu->guest_debug = dbg->control;
823
vcpu_put(vcpu);
824
return 0;
825
}
826
827
void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
828
{
829
kvmppc_core_queue_dec(vcpu);
830
kvm_vcpu_kick(vcpu);
831
}
832
833
int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
834
{
835
return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
836
}
837
838
void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
839
{
840
vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
841
}
842
843
int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
844
{
845
return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
846
}
847
848
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
849
{
850
851
}
852
853
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
854
{
855
return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
856
}
857
858
void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
859
{
860
kvm->arch.kvm_ops->free_memslot(slot);
861
}
862
863
void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
864
{
865
kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
866
}
867
868
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
869
const struct kvm_memory_slot *old,
870
struct kvm_memory_slot *new,
871
enum kvm_mr_change change)
872
{
873
return kvm->arch.kvm_ops->prepare_memory_region(kvm, old, new, change);
874
}
875
876
void kvmppc_core_commit_memory_region(struct kvm *kvm,
877
struct kvm_memory_slot *old,
878
const struct kvm_memory_slot *new,
879
enum kvm_mr_change change)
880
{
881
kvm->arch.kvm_ops->commit_memory_region(kvm, old, new, change);
882
}
883
884
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
885
{
886
return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range);
887
}
888
889
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
890
{
891
return kvm->arch.kvm_ops->age_gfn(kvm, range);
892
}
893
894
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
895
{
896
return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
897
}
898
899
int kvmppc_core_init_vm(struct kvm *kvm)
900
{
901
902
#ifdef CONFIG_PPC64
903
INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
904
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
905
mutex_init(&kvm->arch.rtas_token_lock);
906
#endif
907
908
return kvm->arch.kvm_ops->init_vm(kvm);
909
}
910
911
void kvmppc_core_destroy_vm(struct kvm *kvm)
912
{
913
kvm->arch.kvm_ops->destroy_vm(kvm);
914
915
#ifdef CONFIG_PPC64
916
kvmppc_rtas_tokens_free(kvm);
917
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
918
#endif
919
920
#ifdef CONFIG_KVM_XICS
921
/*
922
* Free the XIVE and XICS devices which are not directly freed by the
923
* device 'release' method
924
*/
925
kfree(kvm->arch.xive_devices.native);
926
kvm->arch.xive_devices.native = NULL;
927
kfree(kvm->arch.xive_devices.xics_on_xive);
928
kvm->arch.xive_devices.xics_on_xive = NULL;
929
kfree(kvm->arch.xics_device);
930
kvm->arch.xics_device = NULL;
931
#endif /* CONFIG_KVM_XICS */
932
}
933
934
int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
935
{
936
unsigned long size = kvmppc_get_gpr(vcpu, 4);
937
unsigned long addr = kvmppc_get_gpr(vcpu, 5);
938
u64 buf;
939
int srcu_idx;
940
int ret;
941
942
if (!is_power_of_2(size) || (size > sizeof(buf)))
943
return H_TOO_HARD;
944
945
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
946
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
947
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
948
if (ret != 0)
949
return H_TOO_HARD;
950
951
switch (size) {
952
case 1:
953
kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
954
break;
955
956
case 2:
957
kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
958
break;
959
960
case 4:
961
kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
962
break;
963
964
case 8:
965
kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
966
break;
967
968
default:
969
BUG();
970
}
971
972
return H_SUCCESS;
973
}
974
EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
975
976
int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
977
{
978
unsigned long size = kvmppc_get_gpr(vcpu, 4);
979
unsigned long addr = kvmppc_get_gpr(vcpu, 5);
980
unsigned long val = kvmppc_get_gpr(vcpu, 6);
981
u64 buf;
982
int srcu_idx;
983
int ret;
984
985
switch (size) {
986
case 1:
987
*(u8 *)&buf = val;
988
break;
989
990
case 2:
991
*(__be16 *)&buf = cpu_to_be16(val);
992
break;
993
994
case 4:
995
*(__be32 *)&buf = cpu_to_be32(val);
996
break;
997
998
case 8:
999
*(__be64 *)&buf = cpu_to_be64(val);
1000
break;
1001
1002
default:
1003
return H_TOO_HARD;
1004
}
1005
1006
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1007
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
1008
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1009
if (ret != 0)
1010
return H_TOO_HARD;
1011
1012
return H_SUCCESS;
1013
}
1014
EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
1015
1016
int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
1017
{
1018
return kvm->arch.kvm_ops->hcall_implemented(hcall);
1019
}
1020
1021
#ifdef CONFIG_KVM_XICS
1022
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1023
bool line_status)
1024
{
1025
if (xics_on_xive())
1026
return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
1027
line_status);
1028
else
1029
return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
1030
line_status);
1031
}
1032
1033
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
1034
struct kvm *kvm, int irq_source_id,
1035
int level, bool line_status)
1036
{
1037
return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1038
level, line_status);
1039
}
1040
static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
1041
struct kvm *kvm, int irq_source_id, int level,
1042
bool line_status)
1043
{
1044
return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1045
}
1046
1047
int kvm_irq_map_gsi(struct kvm *kvm,
1048
struct kvm_kernel_irq_routing_entry *entries, int gsi)
1049
{
1050
entries->gsi = gsi;
1051
entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1052
entries->set = kvmppc_book3s_set_irq;
1053
entries->irqchip.irqchip = 0;
1054
entries->irqchip.pin = gsi;
1055
return 1;
1056
}
1057
1058
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1059
{
1060
return pin;
1061
}
1062
1063
#endif /* CONFIG_KVM_XICS */
1064
1065
static int kvmppc_book3s_init(void)
1066
{
1067
int r;
1068
1069
r = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1070
if (r)
1071
return r;
1072
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1073
r = kvmppc_book3s_init_pr();
1074
#endif
1075
1076
#ifdef CONFIG_KVM_XICS
1077
#ifdef CONFIG_KVM_XIVE
1078
if (xics_on_xive()) {
1079
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
1080
if (kvmppc_xive_native_supported())
1081
kvm_register_device_ops(&kvm_xive_native_ops,
1082
KVM_DEV_TYPE_XIVE);
1083
} else
1084
#endif
1085
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
1086
#endif
1087
return r;
1088
}
1089
1090
static void kvmppc_book3s_exit(void)
1091
{
1092
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1093
kvmppc_book3s_exit_pr();
1094
#endif
1095
kvm_exit();
1096
}
1097
1098
module_init(kvmppc_book3s_init);
1099
module_exit(kvmppc_book3s_exit);
1100
1101
/* On 32bit this is our one and only kernel module */
1102
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1103
MODULE_ALIAS_MISCDEV(KVM_MINOR);
1104
MODULE_ALIAS("devname:kvm");
1105
#endif
1106
1107