Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/vgic/vgic-init.c
52338 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2015, 2016 ARM Ltd.
4
*/
5
6
#include <linux/uaccess.h>
7
#include <linux/interrupt.h>
8
#include <linux/cpu.h>
9
#include <linux/kvm_host.h>
10
#include <kvm/arm_vgic.h>
11
#include <asm/kvm_emulate.h>
12
#include <asm/kvm_mmu.h>
13
#include "vgic.h"
14
15
/*
16
* Initialization rules: there are multiple stages to the vgic
17
* initialization, both for the distributor and the CPU interfaces. The basic
18
* idea is that even though the VGIC is not functional or not requested from
19
* user space, the critical path of the run loop can still call VGIC functions
20
* that just won't do anything, without them having to check additional
21
* initialization flags to ensure they don't look at uninitialized data
22
* structures.
23
*
24
* Distributor:
25
*
26
* - kvm_vgic_early_init(): initialization of static data that doesn't
27
* depend on any sizing information or emulation type. No allocation
28
* is allowed there.
29
*
30
* - vgic_init(): allocation and initialization of the generic data
31
* structures that depend on sizing information (number of CPUs,
32
* number of interrupts). Also initializes the vcpu specific data
33
* structures. Can be executed lazily for GICv2.
34
*
35
* CPU Interface:
36
*
37
* - kvm_vgic_vcpu_init(): initialization of static data that doesn't depend
38
* on any sizing information. Private interrupts are allocated if not
39
* already allocated at vgic-creation time.
40
*/
41
42
/* EARLY INIT */
43
44
/**
45
* kvm_vgic_early_init() - Initialize static VGIC VCPU data structures
46
* @kvm: The VM whose VGIC districutor should be initialized
47
*
48
* Only do initialization of static structures that don't require any
49
* allocation or sizing information from userspace. vgic_init() called
50
* kvm_vgic_dist_init() which takes care of the rest.
51
*/
52
void kvm_vgic_early_init(struct kvm *kvm)
53
{
54
struct vgic_dist *dist = &kvm->arch.vgic;
55
56
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
57
}
58
59
/* CREATION */
60
61
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type);
62
63
/**
64
* kvm_vgic_create: triggered by the instantiation of the VGIC device by
65
* user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only)
66
* or through the generic KVM_CREATE_DEVICE API ioctl.
67
* irqchip_in_kernel() tells you if this function succeeded or not.
68
* @kvm: kvm struct pointer
69
* @type: KVM_DEV_TYPE_ARM_VGIC_V[23]
70
*/
71
int kvm_vgic_create(struct kvm *kvm, u32 type)
72
{
73
struct kvm_vcpu *vcpu;
74
u64 aa64pfr0, pfr1;
75
unsigned long i;
76
int ret;
77
78
/*
79
* This function is also called by the KVM_CREATE_IRQCHIP handler,
80
* which had no chance yet to check the availability of the GICv2
81
* emulation. So check this here again. KVM_CREATE_DEVICE does
82
* the proper checks already.
83
*/
84
if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
85
!kvm_vgic_global_state.can_emulate_gicv2)
86
return -ENODEV;
87
88
/*
89
* Ensure mutual exclusion with vCPU creation and any vCPU ioctls by:
90
*
91
* - Holding kvm->lock to prevent KVM_CREATE_VCPU from reaching
92
* kvm_arch_vcpu_precreate() and ensuring created_vcpus is stable.
93
* This alone is insufficient, as kvm_vm_ioctl_create_vcpu() drops
94
* the kvm->lock before completing the vCPU creation.
95
*/
96
lockdep_assert_held(&kvm->lock);
97
98
/*
99
* - Acquiring the vCPU mutex for every *online* vCPU to prevent
100
* concurrent vCPU ioctls for vCPUs already visible to userspace.
101
*/
102
ret = -EBUSY;
103
if (kvm_trylock_all_vcpus(kvm))
104
return ret;
105
106
/*
107
* - Taking the config_lock which protects VGIC data structures such
108
* as the per-vCPU arrays of private IRQs (SGIs, PPIs).
109
*/
110
mutex_lock(&kvm->arch.config_lock);
111
112
/*
113
* - Bailing on the entire thing if a vCPU is in the middle of creation,
114
* dropped the kvm->lock, but hasn't reached kvm_arch_vcpu_create().
115
*
116
* The whole combination of this guarantees that no vCPU can get into
117
* KVM with a VGIC configuration inconsistent with the VM's VGIC.
118
*/
119
if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
120
goto out_unlock;
121
122
if (irqchip_in_kernel(kvm)) {
123
ret = -EEXIST;
124
goto out_unlock;
125
}
126
127
kvm_for_each_vcpu(i, vcpu, kvm) {
128
if (vcpu_has_run_once(vcpu))
129
goto out_unlock;
130
}
131
ret = 0;
132
133
if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
134
kvm->max_vcpus = VGIC_V2_MAX_CPUS;
135
else
136
kvm->max_vcpus = VGIC_V3_MAX_CPUS;
137
138
if (atomic_read(&kvm->online_vcpus) > kvm->max_vcpus) {
139
ret = -E2BIG;
140
goto out_unlock;
141
}
142
143
kvm->arch.vgic.in_kernel = true;
144
kvm->arch.vgic.vgic_model = type;
145
kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST;
146
147
kvm_for_each_vcpu(i, vcpu, kvm) {
148
ret = vgic_allocate_private_irqs_locked(vcpu, type);
149
if (ret)
150
break;
151
}
152
153
if (ret) {
154
kvm_for_each_vcpu(i, vcpu, kvm) {
155
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
156
kfree(vgic_cpu->private_irqs);
157
vgic_cpu->private_irqs = NULL;
158
}
159
160
goto out_unlock;
161
}
162
163
kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
164
165
aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
166
pfr1 = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
167
168
if (type == KVM_DEV_TYPE_ARM_VGIC_V2) {
169
kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
170
} else {
171
INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
172
aa64pfr0 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
173
pfr1 |= SYS_FIELD_PREP_ENUM(ID_PFR1_EL1, GIC, GICv3);
174
}
175
176
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0);
177
kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1);
178
179
if (type == KVM_DEV_TYPE_ARM_VGIC_V3)
180
kvm->arch.vgic.nassgicap = system_supports_direct_sgis();
181
182
out_unlock:
183
mutex_unlock(&kvm->arch.config_lock);
184
kvm_unlock_all_vcpus(kvm);
185
return ret;
186
}
187
188
/* INIT/DESTROY */
189
190
/**
191
* kvm_vgic_dist_init: initialize the dist data structures
192
* @kvm: kvm struct pointer
193
* @nr_spis: number of spis, frozen by caller
194
*/
195
static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
196
{
197
struct vgic_dist *dist = &kvm->arch.vgic;
198
struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
199
int i;
200
201
dist->active_spis = (atomic_t)ATOMIC_INIT(0);
202
dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
203
if (!dist->spis)
204
return -ENOMEM;
205
206
/*
207
* In the following code we do not take the irq struct lock since
208
* no other action on irq structs can happen while the VGIC is
209
* not initialized yet:
210
* If someone wants to inject an interrupt or does a MMIO access, we
211
* require prior initialization in case of a virtual GICv3 or trigger
212
* initialization when using a virtual GICv2.
213
*/
214
for (i = 0; i < nr_spis; i++) {
215
struct vgic_irq *irq = &dist->spis[i];
216
217
irq->intid = i + VGIC_NR_PRIVATE_IRQS;
218
INIT_LIST_HEAD(&irq->ap_list);
219
raw_spin_lock_init(&irq->irq_lock);
220
irq->vcpu = NULL;
221
irq->target_vcpu = vcpu0;
222
refcount_set(&irq->refcount, 0);
223
switch (dist->vgic_model) {
224
case KVM_DEV_TYPE_ARM_VGIC_V2:
225
irq->targets = 0;
226
irq->group = 0;
227
break;
228
case KVM_DEV_TYPE_ARM_VGIC_V3:
229
irq->mpidr = 0;
230
irq->group = 1;
231
break;
232
default:
233
kfree(dist->spis);
234
dist->spis = NULL;
235
return -EINVAL;
236
}
237
}
238
return 0;
239
}
240
241
/* Default GICv3 Maintenance Interrupt INTID, as per SBSA */
242
#define DEFAULT_MI_INTID 25
243
244
int kvm_vgic_vcpu_nv_init(struct kvm_vcpu *vcpu)
245
{
246
int ret;
247
248
guard(mutex)(&vcpu->kvm->arch.config_lock);
249
250
/*
251
* Matching the tradition established with the timers, provide
252
* a default PPI for the maintenance interrupt. It makes
253
* things easier to reason about.
254
*/
255
if (vcpu->kvm->arch.vgic.mi_intid == 0)
256
vcpu->kvm->arch.vgic.mi_intid = DEFAULT_MI_INTID;
257
ret = kvm_vgic_set_owner(vcpu, vcpu->kvm->arch.vgic.mi_intid, vcpu);
258
259
return ret;
260
}
261
262
static int vgic_allocate_private_irqs_locked(struct kvm_vcpu *vcpu, u32 type)
263
{
264
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
265
int i;
266
267
lockdep_assert_held(&vcpu->kvm->arch.config_lock);
268
269
if (vgic_cpu->private_irqs)
270
return 0;
271
272
vgic_cpu->private_irqs = kcalloc(VGIC_NR_PRIVATE_IRQS,
273
sizeof(struct vgic_irq),
274
GFP_KERNEL_ACCOUNT);
275
276
if (!vgic_cpu->private_irqs)
277
return -ENOMEM;
278
279
/*
280
* Enable and configure all SGIs to be edge-triggered and
281
* configure all PPIs as level-triggered.
282
*/
283
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
284
struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
285
286
INIT_LIST_HEAD(&irq->ap_list);
287
raw_spin_lock_init(&irq->irq_lock);
288
irq->intid = i;
289
irq->vcpu = NULL;
290
irq->target_vcpu = vcpu;
291
refcount_set(&irq->refcount, 0);
292
if (vgic_irq_is_sgi(i)) {
293
/* SGIs */
294
irq->enabled = 1;
295
irq->config = VGIC_CONFIG_EDGE;
296
} else {
297
/* PPIs */
298
irq->config = VGIC_CONFIG_LEVEL;
299
}
300
301
switch (type) {
302
case KVM_DEV_TYPE_ARM_VGIC_V3:
303
irq->group = 1;
304
irq->mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
305
break;
306
case KVM_DEV_TYPE_ARM_VGIC_V2:
307
irq->group = 0;
308
irq->targets = BIT(vcpu->vcpu_id);
309
break;
310
}
311
}
312
313
return 0;
314
}
315
316
static int vgic_allocate_private_irqs(struct kvm_vcpu *vcpu, u32 type)
317
{
318
int ret;
319
320
mutex_lock(&vcpu->kvm->arch.config_lock);
321
ret = vgic_allocate_private_irqs_locked(vcpu, type);
322
mutex_unlock(&vcpu->kvm->arch.config_lock);
323
324
return ret;
325
}
326
327
/**
328
* kvm_vgic_vcpu_init() - Initialize static VGIC VCPU data
329
* structures and register VCPU-specific KVM iodevs
330
*
331
* @vcpu: pointer to the VCPU being created and initialized
332
*
333
* Only do initialization, but do not actually enable the
334
* VGIC CPU interface
335
*/
336
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
337
{
338
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
339
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
340
int ret = 0;
341
342
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
343
344
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
345
raw_spin_lock_init(&vgic_cpu->ap_list_lock);
346
atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
347
348
if (!irqchip_in_kernel(vcpu->kvm))
349
return 0;
350
351
ret = vgic_allocate_private_irqs(vcpu, dist->vgic_model);
352
if (ret)
353
return ret;
354
355
/*
356
* If we are creating a VCPU with a GICv3 we must also register the
357
* KVM io device for the redistributor that belongs to this VCPU.
358
*/
359
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
360
mutex_lock(&vcpu->kvm->slots_lock);
361
ret = vgic_register_redist_iodev(vcpu);
362
mutex_unlock(&vcpu->kvm->slots_lock);
363
}
364
return ret;
365
}
366
367
static void kvm_vgic_vcpu_reset(struct kvm_vcpu *vcpu)
368
{
369
if (kvm_vgic_global_state.type == VGIC_V2)
370
vgic_v2_reset(vcpu);
371
else
372
vgic_v3_reset(vcpu);
373
}
374
375
/*
376
* vgic_init: allocates and initializes dist and vcpu data structures
377
* depending on two dimensioning parameters:
378
* - the number of spis
379
* - the number of vcpus
380
* The function is generally called when nr_spis has been explicitly set
381
* by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
382
* vgic_initialized() returns true when this function has succeeded.
383
*/
384
int vgic_init(struct kvm *kvm)
385
{
386
struct vgic_dist *dist = &kvm->arch.vgic;
387
struct kvm_vcpu *vcpu;
388
int ret = 0;
389
unsigned long idx;
390
391
lockdep_assert_held(&kvm->arch.config_lock);
392
393
if (vgic_initialized(kvm))
394
return 0;
395
396
/* Are we also in the middle of creating a VCPU? */
397
if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
398
return -EBUSY;
399
400
/* freeze the number of spis */
401
if (!dist->nr_spis)
402
dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
403
404
ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
405
if (ret)
406
goto out;
407
408
/*
409
* Ensure vPEs are allocated if direct IRQ injection (e.g. vSGIs,
410
* vLPIs) is supported.
411
*/
412
if (vgic_supports_direct_irqs(kvm)) {
413
ret = vgic_v4_init(kvm);
414
if (ret)
415
goto out;
416
}
417
418
kvm_for_each_vcpu(idx, vcpu, kvm)
419
kvm_vgic_vcpu_reset(vcpu);
420
421
ret = kvm_vgic_setup_default_irq_routing(kvm);
422
if (ret)
423
goto out;
424
425
vgic_debug_init(kvm);
426
dist->initialized = true;
427
out:
428
return ret;
429
}
430
431
static void kvm_vgic_dist_destroy(struct kvm *kvm)
432
{
433
struct vgic_dist *dist = &kvm->arch.vgic;
434
struct vgic_redist_region *rdreg, *next;
435
436
dist->ready = false;
437
dist->initialized = false;
438
439
kfree(dist->spis);
440
dist->spis = NULL;
441
dist->nr_spis = 0;
442
dist->vgic_dist_base = VGIC_ADDR_UNDEF;
443
444
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
445
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
446
vgic_v3_free_redist_region(kvm, rdreg);
447
INIT_LIST_HEAD(&dist->rd_regions);
448
} else {
449
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
450
}
451
452
if (vgic_supports_direct_irqs(kvm))
453
vgic_v4_teardown(kvm);
454
455
xa_destroy(&dist->lpi_xa);
456
}
457
458
static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
459
{
460
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
461
462
/*
463
* Retire all pending LPIs on this vcpu anyway as we're
464
* going to destroy it.
465
*/
466
vgic_flush_pending_lpis(vcpu);
467
468
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
469
kfree(vgic_cpu->private_irqs);
470
vgic_cpu->private_irqs = NULL;
471
472
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
473
/*
474
* If this vCPU is being destroyed because of a failed creation
475
* then unregister the redistributor to avoid leaving behind a
476
* dangling pointer to the vCPU struct.
477
*
478
* vCPUs that have been successfully created (i.e. added to
479
* kvm->vcpu_array) get unregistered in kvm_vgic_destroy(), as
480
* this function gets called while holding kvm->arch.config_lock
481
* in the VM teardown path and would otherwise introduce a lock
482
* inversion w.r.t. kvm->srcu.
483
*
484
* vCPUs that failed creation are torn down outside of the
485
* kvm->arch.config_lock and do not get unregistered in
486
* kvm_vgic_destroy(), meaning it is both safe and necessary to
487
* do so here.
488
*/
489
if (kvm_get_vcpu_by_id(vcpu->kvm, vcpu->vcpu_id) != vcpu)
490
vgic_unregister_redist_iodev(vcpu);
491
492
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
493
}
494
}
495
496
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
497
{
498
struct kvm *kvm = vcpu->kvm;
499
500
mutex_lock(&kvm->slots_lock);
501
__kvm_vgic_vcpu_destroy(vcpu);
502
mutex_unlock(&kvm->slots_lock);
503
}
504
505
void kvm_vgic_destroy(struct kvm *kvm)
506
{
507
struct kvm_vcpu *vcpu;
508
unsigned long i;
509
510
mutex_lock(&kvm->slots_lock);
511
mutex_lock(&kvm->arch.config_lock);
512
513
vgic_debug_destroy(kvm);
514
515
kvm_for_each_vcpu(i, vcpu, kvm)
516
__kvm_vgic_vcpu_destroy(vcpu);
517
518
kvm_vgic_dist_destroy(kvm);
519
520
mutex_unlock(&kvm->arch.config_lock);
521
522
if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
523
kvm_for_each_vcpu(i, vcpu, kvm)
524
vgic_unregister_redist_iodev(vcpu);
525
526
mutex_unlock(&kvm->slots_lock);
527
}
528
529
/**
530
* vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
531
* is a GICv2. A GICv3 must be explicitly initialized by userspace using the
532
* KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
533
* @kvm: kvm struct pointer
534
*/
535
int vgic_lazy_init(struct kvm *kvm)
536
{
537
int ret = 0;
538
539
if (unlikely(!vgic_initialized(kvm))) {
540
/*
541
* We only provide the automatic initialization of the VGIC
542
* for the legacy case of a GICv2. Any other type must
543
* be explicitly initialized once setup with the respective
544
* KVM device call.
545
*/
546
if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
547
return -EBUSY;
548
549
mutex_lock(&kvm->arch.config_lock);
550
ret = vgic_init(kvm);
551
mutex_unlock(&kvm->arch.config_lock);
552
}
553
554
return ret;
555
}
556
557
/* RESOURCE MAPPING */
558
559
/**
560
* kvm_vgic_map_resources - map the MMIO regions
561
* @kvm: kvm struct pointer
562
*
563
* Map the MMIO regions depending on the VGIC model exposed to the guest
564
* called on the first VCPU run.
565
* Also map the virtual CPU interface into the VM.
566
* v2 calls vgic_init() if not already done.
567
* v3 and derivatives return an error if the VGIC is not initialized.
568
*/
569
int kvm_vgic_map_resources(struct kvm *kvm)
570
{
571
struct vgic_dist *dist = &kvm->arch.vgic;
572
enum vgic_type type;
573
gpa_t dist_base;
574
int ret = 0;
575
576
if (likely(smp_load_acquire(&dist->ready)))
577
return 0;
578
579
mutex_lock(&kvm->slots_lock);
580
mutex_lock(&kvm->arch.config_lock);
581
if (dist->ready)
582
goto out;
583
584
if (!irqchip_in_kernel(kvm))
585
goto out;
586
587
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) {
588
ret = vgic_v2_map_resources(kvm);
589
type = VGIC_V2;
590
} else {
591
ret = vgic_v3_map_resources(kvm);
592
type = VGIC_V3;
593
}
594
595
if (ret)
596
goto out;
597
598
dist_base = dist->vgic_dist_base;
599
mutex_unlock(&kvm->arch.config_lock);
600
601
ret = vgic_register_dist_iodev(kvm, dist_base, type);
602
if (ret) {
603
kvm_err("Unable to register VGIC dist MMIO regions\n");
604
goto out_slots;
605
}
606
607
smp_store_release(&dist->ready, true);
608
goto out_slots;
609
out:
610
mutex_unlock(&kvm->arch.config_lock);
611
out_slots:
612
if (ret)
613
kvm_vm_dead(kvm);
614
615
mutex_unlock(&kvm->slots_lock);
616
617
return ret;
618
}
619
620
/* GENERIC PROBE */
621
622
void kvm_vgic_cpu_up(void)
623
{
624
enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
625
}
626
627
628
void kvm_vgic_cpu_down(void)
629
{
630
disable_percpu_irq(kvm_vgic_global_state.maint_irq);
631
}
632
633
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
634
{
635
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)data;
636
637
/*
638
* We cannot rely on the vgic maintenance interrupt to be
639
* delivered synchronously. This means we can only use it to
640
* exit the VM, and we perform the handling of EOIed
641
* interrupts on the exit path (see vgic_fold_lr_state).
642
*
643
* Of course, NV throws a wrench in this plan, and needs
644
* something special.
645
*/
646
if (vcpu && vgic_state_is_nested(vcpu))
647
vgic_v3_handle_nested_maint_irq(vcpu);
648
649
return IRQ_HANDLED;
650
}
651
652
static struct gic_kvm_info *gic_kvm_info;
653
654
void __init vgic_set_kvm_info(const struct gic_kvm_info *info)
655
{
656
BUG_ON(gic_kvm_info != NULL);
657
gic_kvm_info = kmalloc(sizeof(*info), GFP_KERNEL);
658
if (gic_kvm_info)
659
*gic_kvm_info = *info;
660
}
661
662
/**
663
* kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
664
*
665
* For a specific CPU, initialize the GIC VE hardware.
666
*/
667
void kvm_vgic_init_cpu_hardware(void)
668
{
669
BUG_ON(preemptible());
670
671
/*
672
* We want to make sure the list registers start out clear so that we
673
* only have the program the used registers.
674
*/
675
if (kvm_vgic_global_state.type == VGIC_V2) {
676
vgic_v2_init_lrs();
677
} else if (kvm_vgic_global_state.type == VGIC_V3 ||
678
kvm_vgic_global_state.has_gcie_v3_compat) {
679
kvm_call_hyp(__vgic_v3_init_lrs);
680
}
681
}
682
683
/**
684
* kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
685
* according to the host GIC model. Accordingly calls either
686
* vgic_v2/v3_probe which registers the KVM_DEVICE that can be
687
* instantiated by a guest later on .
688
*/
689
int kvm_vgic_hyp_init(void)
690
{
691
bool has_mask;
692
int ret;
693
694
if (!gic_kvm_info)
695
return -ENODEV;
696
697
has_mask = !gic_kvm_info->no_maint_irq_mask;
698
699
if (has_mask && !gic_kvm_info->maint_irq) {
700
kvm_err("No vgic maintenance irq\n");
701
return -ENXIO;
702
}
703
704
/*
705
* If we get one of these oddball non-GICs, taint the kernel,
706
* as we have no idea of how they *really* behave.
707
*/
708
if (gic_kvm_info->no_hw_deactivation) {
709
kvm_info("Non-architectural vgic, tainting kernel\n");
710
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
711
kvm_vgic_global_state.no_hw_deactivation = true;
712
}
713
714
switch (gic_kvm_info->type) {
715
case GIC_V2:
716
ret = vgic_v2_probe(gic_kvm_info);
717
break;
718
case GIC_V3:
719
ret = vgic_v3_probe(gic_kvm_info);
720
if (!ret) {
721
static_branch_enable(&kvm_vgic_global_state.gicv3_cpuif);
722
kvm_info("GIC system register CPU interface enabled\n");
723
}
724
break;
725
case GIC_V5:
726
ret = vgic_v5_probe(gic_kvm_info);
727
break;
728
default:
729
ret = -ENODEV;
730
}
731
732
kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
733
734
kfree(gic_kvm_info);
735
gic_kvm_info = NULL;
736
737
if (ret)
738
return ret;
739
740
if (!has_mask && !kvm_vgic_global_state.maint_irq)
741
return 0;
742
743
ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
744
vgic_maintenance_handler,
745
"vgic", kvm_get_running_vcpus());
746
if (ret) {
747
kvm_err("Cannot register interrupt %d\n",
748
kvm_vgic_global_state.maint_irq);
749
return ret;
750
}
751
752
kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
753
return 0;
754
}
755
756