Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/vgic/vgic-mmio.c
26532 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* VGIC MMIO handling functions
4
*/
5
6
#include <linux/bitops.h>
7
#include <linux/bsearch.h>
8
#include <linux/interrupt.h>
9
#include <linux/irq.h>
10
#include <linux/kvm.h>
11
#include <linux/kvm_host.h>
12
#include <kvm/iodev.h>
13
#include <kvm/arm_arch_timer.h>
14
#include <kvm/arm_vgic.h>
15
16
#include "vgic.h"
17
#include "vgic-mmio.h"
18
19
unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
20
gpa_t addr, unsigned int len)
21
{
22
return 0;
23
}
24
25
unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
26
gpa_t addr, unsigned int len)
27
{
28
return -1UL;
29
}
30
31
void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
32
unsigned int len, unsigned long val)
33
{
34
/* Ignore */
35
}
36
37
int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
38
unsigned int len, unsigned long val)
39
{
40
/* Ignore */
41
return 0;
42
}
43
44
unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
45
gpa_t addr, unsigned int len)
46
{
47
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
48
u32 value = 0;
49
int i;
50
51
/* Loop over all IRQs affected by this read */
52
for (i = 0; i < len * 8; i++) {
53
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
54
55
if (irq->group)
56
value |= BIT(i);
57
58
vgic_put_irq(vcpu->kvm, irq);
59
}
60
61
return value;
62
}
63
64
static void vgic_update_vsgi(struct vgic_irq *irq)
65
{
66
WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
67
}
68
69
void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
70
unsigned int len, unsigned long val)
71
{
72
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
73
int i;
74
unsigned long flags;
75
76
for (i = 0; i < len * 8; i++) {
77
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
78
79
raw_spin_lock_irqsave(&irq->irq_lock, flags);
80
irq->group = !!(val & BIT(i));
81
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
82
vgic_update_vsgi(irq);
83
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
84
} else {
85
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
86
}
87
88
vgic_put_irq(vcpu->kvm, irq);
89
}
90
}
91
92
/*
93
* Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
94
* of the enabled bit, so there is only one function for both here.
95
*/
96
unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
97
gpa_t addr, unsigned int len)
98
{
99
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
100
u32 value = 0;
101
int i;
102
103
/* Loop over all IRQs affected by this read */
104
for (i = 0; i < len * 8; i++) {
105
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
106
107
if (irq->enabled)
108
value |= (1U << i);
109
110
vgic_put_irq(vcpu->kvm, irq);
111
}
112
113
return value;
114
}
115
116
void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
117
gpa_t addr, unsigned int len,
118
unsigned long val)
119
{
120
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
121
int i;
122
unsigned long flags;
123
124
for_each_set_bit(i, &val, len * 8) {
125
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
126
127
raw_spin_lock_irqsave(&irq->irq_lock, flags);
128
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
129
if (!irq->enabled) {
130
struct irq_data *data;
131
132
irq->enabled = true;
133
data = &irq_to_desc(irq->host_irq)->irq_data;
134
while (irqd_irq_disabled(data))
135
enable_irq(irq->host_irq);
136
}
137
138
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
139
vgic_put_irq(vcpu->kvm, irq);
140
141
continue;
142
} else if (vgic_irq_is_mapped_level(irq)) {
143
bool was_high = irq->line_level;
144
145
/*
146
* We need to update the state of the interrupt because
147
* the guest might have changed the state of the device
148
* while the interrupt was disabled at the VGIC level.
149
*/
150
irq->line_level = vgic_get_phys_line_level(irq);
151
/*
152
* Deactivate the physical interrupt so the GIC will let
153
* us know when it is asserted again.
154
*/
155
if (!irq->active && was_high && !irq->line_level)
156
vgic_irq_set_phys_active(irq, false);
157
}
158
irq->enabled = true;
159
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
160
161
vgic_put_irq(vcpu->kvm, irq);
162
}
163
}
164
165
void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
166
gpa_t addr, unsigned int len,
167
unsigned long val)
168
{
169
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
170
int i;
171
unsigned long flags;
172
173
for_each_set_bit(i, &val, len * 8) {
174
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
175
176
raw_spin_lock_irqsave(&irq->irq_lock, flags);
177
if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
178
disable_irq_nosync(irq->host_irq);
179
180
irq->enabled = false;
181
182
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
183
vgic_put_irq(vcpu->kvm, irq);
184
}
185
}
186
187
int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
188
gpa_t addr, unsigned int len,
189
unsigned long val)
190
{
191
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
192
int i;
193
unsigned long flags;
194
195
for_each_set_bit(i, &val, len * 8) {
196
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
197
198
raw_spin_lock_irqsave(&irq->irq_lock, flags);
199
irq->enabled = true;
200
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
201
202
vgic_put_irq(vcpu->kvm, irq);
203
}
204
205
return 0;
206
}
207
208
int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
209
gpa_t addr, unsigned int len,
210
unsigned long val)
211
{
212
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
213
int i;
214
unsigned long flags;
215
216
for_each_set_bit(i, &val, len * 8) {
217
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
218
219
raw_spin_lock_irqsave(&irq->irq_lock, flags);
220
irq->enabled = false;
221
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
222
223
vgic_put_irq(vcpu->kvm, irq);
224
}
225
226
return 0;
227
}
228
229
static unsigned long __read_pending(struct kvm_vcpu *vcpu,
230
gpa_t addr, unsigned int len,
231
bool is_user)
232
{
233
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
234
u32 value = 0;
235
int i;
236
237
/* Loop over all IRQs affected by this read */
238
for (i = 0; i < len * 8; i++) {
239
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
240
unsigned long flags;
241
bool val;
242
243
/*
244
* When used from userspace with a GICv3 model:
245
*
246
* Pending state of interrupt is latched in pending_latch
247
* variable. Userspace will save and restore pending state
248
* and line_level separately.
249
* Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst
250
* for handling of ISPENDR and ICPENDR.
251
*/
252
raw_spin_lock_irqsave(&irq->irq_lock, flags);
253
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
254
int err;
255
256
val = false;
257
err = irq_get_irqchip_state(irq->host_irq,
258
IRQCHIP_STATE_PENDING,
259
&val);
260
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
261
} else if (!is_user && vgic_irq_is_mapped_level(irq)) {
262
val = vgic_get_phys_line_level(irq);
263
} else {
264
switch (vcpu->kvm->arch.vgic.vgic_model) {
265
case KVM_DEV_TYPE_ARM_VGIC_V3:
266
if (is_user) {
267
val = irq->pending_latch;
268
break;
269
}
270
fallthrough;
271
default:
272
val = irq_is_pending(irq);
273
break;
274
}
275
}
276
277
value |= ((u32)val << i);
278
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
279
280
vgic_put_irq(vcpu->kvm, irq);
281
}
282
283
return value;
284
}
285
286
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
287
gpa_t addr, unsigned int len)
288
{
289
return __read_pending(vcpu, addr, len, false);
290
}
291
292
unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
293
gpa_t addr, unsigned int len)
294
{
295
return __read_pending(vcpu, addr, len, true);
296
}
297
298
static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
299
{
300
return (vgic_irq_is_sgi(irq->intid) &&
301
vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
302
}
303
304
static void __set_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len,
305
unsigned long val, bool is_user)
306
{
307
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
308
int i;
309
unsigned long flags;
310
311
for_each_set_bit(i, &val, len * 8) {
312
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
313
314
/* GICD_ISPENDR0 SGI bits are WI when written from the guest. */
315
if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
316
vgic_put_irq(vcpu->kvm, irq);
317
continue;
318
}
319
320
raw_spin_lock_irqsave(&irq->irq_lock, flags);
321
322
/*
323
* GICv2 SGIs are terribly broken. We can't restore
324
* the source of the interrupt, so just pick the vcpu
325
* itself as the source...
326
*/
327
if (is_vgic_v2_sgi(vcpu, irq))
328
irq->source |= BIT(vcpu->vcpu_id);
329
330
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
331
/* HW SGI? Ask the GIC to inject it */
332
int err;
333
err = irq_set_irqchip_state(irq->host_irq,
334
IRQCHIP_STATE_PENDING,
335
true);
336
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
337
338
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
339
vgic_put_irq(vcpu->kvm, irq);
340
341
continue;
342
}
343
344
irq->pending_latch = true;
345
if (irq->hw && !is_user)
346
vgic_irq_set_phys_active(irq, true);
347
348
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
349
vgic_put_irq(vcpu->kvm, irq);
350
}
351
}
352
353
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
354
gpa_t addr, unsigned int len,
355
unsigned long val)
356
{
357
__set_pending(vcpu, addr, len, val, false);
358
}
359
360
int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
361
gpa_t addr, unsigned int len,
362
unsigned long val)
363
{
364
__set_pending(vcpu, addr, len, val, true);
365
return 0;
366
}
367
368
/* Must be called with irq->irq_lock held */
369
static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
370
{
371
irq->pending_latch = false;
372
373
/*
374
* We don't want the guest to effectively mask the physical
375
* interrupt by doing a write to SPENDR followed by a write to
376
* CPENDR for HW interrupts, so we clear the active state on
377
* the physical side if the virtual interrupt is not active.
378
* This may lead to taking an additional interrupt on the
379
* host, but that should not be a problem as the worst that
380
* can happen is an additional vgic injection. We also clear
381
* the pending state to maintain proper semantics for edge HW
382
* interrupts.
383
*/
384
vgic_irq_set_phys_pending(irq, false);
385
if (!irq->active)
386
vgic_irq_set_phys_active(irq, false);
387
}
388
389
static void __clear_pending(struct kvm_vcpu *vcpu,
390
gpa_t addr, unsigned int len,
391
unsigned long val, bool is_user)
392
{
393
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
394
int i;
395
unsigned long flags;
396
397
for_each_set_bit(i, &val, len * 8) {
398
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
399
400
/* GICD_ICPENDR0 SGI bits are WI when written from the guest. */
401
if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
402
vgic_put_irq(vcpu->kvm, irq);
403
continue;
404
}
405
406
raw_spin_lock_irqsave(&irq->irq_lock, flags);
407
408
/*
409
* More fun with GICv2 SGIs! If we're clearing one of them
410
* from userspace, which source vcpu to clear? Let's not
411
* even think of it, and blow the whole set.
412
*/
413
if (is_vgic_v2_sgi(vcpu, irq))
414
irq->source = 0;
415
416
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
417
/* HW SGI? Ask the GIC to clear its pending bit */
418
int err;
419
err = irq_set_irqchip_state(irq->host_irq,
420
IRQCHIP_STATE_PENDING,
421
false);
422
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
423
424
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
425
vgic_put_irq(vcpu->kvm, irq);
426
427
continue;
428
}
429
430
if (irq->hw && !is_user)
431
vgic_hw_irq_cpending(vcpu, irq);
432
else
433
irq->pending_latch = false;
434
435
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
436
vgic_put_irq(vcpu->kvm, irq);
437
}
438
}
439
440
void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
441
gpa_t addr, unsigned int len,
442
unsigned long val)
443
{
444
__clear_pending(vcpu, addr, len, val, false);
445
}
446
447
int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
448
gpa_t addr, unsigned int len,
449
unsigned long val)
450
{
451
__clear_pending(vcpu, addr, len, val, true);
452
return 0;
453
}
454
455
/*
456
* If we are fiddling with an IRQ's active state, we have to make sure the IRQ
457
* is not queued on some running VCPU's LRs, because then the change to the
458
* active state can be overwritten when the VCPU's state is synced coming back
459
* from the guest.
460
*
461
* For shared interrupts as well as GICv3 private interrupts accessed from the
462
* non-owning CPU, we have to stop all the VCPUs because interrupts can be
463
* migrated while we don't hold the IRQ locks and we don't want to be chasing
464
* moving targets.
465
*
466
* For GICv2 private interrupts we don't have to do anything because
467
* userspace accesses to the VGIC state already require all VCPUs to be
468
* stopped, and only the VCPU itself can modify its private interrupts
469
* active state, which guarantees that the VCPU is not running.
470
*/
471
static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
472
{
473
if ((vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
474
vcpu != kvm_get_running_vcpu()) ||
475
intid >= VGIC_NR_PRIVATE_IRQS)
476
kvm_arm_halt_guest(vcpu->kvm);
477
}
478
479
/* See vgic_access_active_prepare */
480
static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
481
{
482
if ((vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 &&
483
vcpu != kvm_get_running_vcpu()) ||
484
intid >= VGIC_NR_PRIVATE_IRQS)
485
kvm_arm_resume_guest(vcpu->kvm);
486
}
487
488
static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
489
gpa_t addr, unsigned int len)
490
{
491
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
492
u32 value = 0;
493
int i;
494
495
/* Loop over all IRQs affected by this read */
496
for (i = 0; i < len * 8; i++) {
497
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
498
499
/*
500
* Even for HW interrupts, don't evaluate the HW state as
501
* all the guest is interested in is the virtual state.
502
*/
503
if (irq->active)
504
value |= (1U << i);
505
506
vgic_put_irq(vcpu->kvm, irq);
507
}
508
509
return value;
510
}
511
512
unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
513
gpa_t addr, unsigned int len)
514
{
515
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
516
u32 val;
517
518
mutex_lock(&vcpu->kvm->arch.config_lock);
519
vgic_access_active_prepare(vcpu, intid);
520
521
val = __vgic_mmio_read_active(vcpu, addr, len);
522
523
vgic_access_active_finish(vcpu, intid);
524
mutex_unlock(&vcpu->kvm->arch.config_lock);
525
526
return val;
527
}
528
529
unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
530
gpa_t addr, unsigned int len)
531
{
532
return __vgic_mmio_read_active(vcpu, addr, len);
533
}
534
535
/* Must be called with irq->irq_lock held */
536
static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
537
bool active, bool is_uaccess)
538
{
539
if (is_uaccess)
540
return;
541
542
irq->active = active;
543
vgic_irq_set_phys_active(irq, active);
544
}
545
546
static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
547
bool active)
548
{
549
unsigned long flags;
550
struct kvm_vcpu *requester_vcpu = kvm_get_running_vcpu();
551
552
raw_spin_lock_irqsave(&irq->irq_lock, flags);
553
554
if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
555
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
556
} else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
557
/*
558
* GICv4.1 VSGI feature doesn't track an active state,
559
* so let's not kid ourselves, there is nothing we can
560
* do here.
561
*/
562
irq->active = false;
563
} else {
564
u32 model = vcpu->kvm->arch.vgic.vgic_model;
565
u8 active_source;
566
567
irq->active = active;
568
569
/*
570
* The GICv2 architecture indicates that the source CPUID for
571
* an SGI should be provided during an EOI which implies that
572
* the active state is stored somewhere, but at the same time
573
* this state is not architecturally exposed anywhere and we
574
* have no way of knowing the right source.
575
*
576
* This may lead to a VCPU not being able to receive
577
* additional instances of a particular SGI after migration
578
* for a GICv2 VM on some GIC implementations. Oh well.
579
*/
580
active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
581
582
if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
583
active && vgic_irq_is_sgi(irq->intid))
584
irq->active_source = active_source;
585
}
586
587
if (irq->active)
588
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
589
else
590
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
591
}
592
593
static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
594
gpa_t addr, unsigned int len,
595
unsigned long val)
596
{
597
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
598
int i;
599
600
for_each_set_bit(i, &val, len * 8) {
601
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
602
vgic_mmio_change_active(vcpu, irq, false);
603
vgic_put_irq(vcpu->kvm, irq);
604
}
605
}
606
607
void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
608
gpa_t addr, unsigned int len,
609
unsigned long val)
610
{
611
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
612
613
mutex_lock(&vcpu->kvm->arch.config_lock);
614
vgic_access_active_prepare(vcpu, intid);
615
616
__vgic_mmio_write_cactive(vcpu, addr, len, val);
617
618
vgic_access_active_finish(vcpu, intid);
619
mutex_unlock(&vcpu->kvm->arch.config_lock);
620
}
621
622
int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
623
gpa_t addr, unsigned int len,
624
unsigned long val)
625
{
626
__vgic_mmio_write_cactive(vcpu, addr, len, val);
627
return 0;
628
}
629
630
static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
631
gpa_t addr, unsigned int len,
632
unsigned long val)
633
{
634
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
635
int i;
636
637
for_each_set_bit(i, &val, len * 8) {
638
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
639
vgic_mmio_change_active(vcpu, irq, true);
640
vgic_put_irq(vcpu->kvm, irq);
641
}
642
}
643
644
void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
645
gpa_t addr, unsigned int len,
646
unsigned long val)
647
{
648
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
649
650
mutex_lock(&vcpu->kvm->arch.config_lock);
651
vgic_access_active_prepare(vcpu, intid);
652
653
__vgic_mmio_write_sactive(vcpu, addr, len, val);
654
655
vgic_access_active_finish(vcpu, intid);
656
mutex_unlock(&vcpu->kvm->arch.config_lock);
657
}
658
659
int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
660
gpa_t addr, unsigned int len,
661
unsigned long val)
662
{
663
__vgic_mmio_write_sactive(vcpu, addr, len, val);
664
return 0;
665
}
666
667
unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
668
gpa_t addr, unsigned int len)
669
{
670
u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
671
int i;
672
u64 val = 0;
673
674
for (i = 0; i < len; i++) {
675
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
676
677
val |= (u64)irq->priority << (i * 8);
678
679
vgic_put_irq(vcpu->kvm, irq);
680
}
681
682
return val;
683
}
684
685
/*
686
* We currently don't handle changing the priority of an interrupt that
687
* is already pending on a VCPU. If there is a need for this, we would
688
* need to make this VCPU exit and re-evaluate the priorities, potentially
689
* leading to this interrupt getting presented now to the guest (if it has
690
* been masked by the priority mask before).
691
*/
692
void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
693
gpa_t addr, unsigned int len,
694
unsigned long val)
695
{
696
u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
697
int i;
698
unsigned long flags;
699
700
for (i = 0; i < len; i++) {
701
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
702
703
raw_spin_lock_irqsave(&irq->irq_lock, flags);
704
/* Narrow the priority range to what we actually support */
705
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
706
if (irq->hw && vgic_irq_is_sgi(irq->intid))
707
vgic_update_vsgi(irq);
708
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
709
710
vgic_put_irq(vcpu->kvm, irq);
711
}
712
}
713
714
unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
715
gpa_t addr, unsigned int len)
716
{
717
u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
718
u32 value = 0;
719
int i;
720
721
for (i = 0; i < len * 4; i++) {
722
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid + i);
723
724
if (irq->config == VGIC_CONFIG_EDGE)
725
value |= (2U << (i * 2));
726
727
vgic_put_irq(vcpu->kvm, irq);
728
}
729
730
return value;
731
}
732
733
void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
734
gpa_t addr, unsigned int len,
735
unsigned long val)
736
{
737
u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
738
int i;
739
unsigned long flags;
740
741
for (i = 0; i < len * 4; i++) {
742
struct vgic_irq *irq;
743
744
/*
745
* The configuration cannot be changed for SGIs in general,
746
* for PPIs this is IMPLEMENTATION DEFINED. The arch timer
747
* code relies on PPIs being level triggered, so we also
748
* make them read-only here.
749
*/
750
if (intid + i < VGIC_NR_PRIVATE_IRQS)
751
continue;
752
753
irq = vgic_get_irq(vcpu->kvm, intid + i);
754
raw_spin_lock_irqsave(&irq->irq_lock, flags);
755
756
if (test_bit(i * 2 + 1, &val))
757
irq->config = VGIC_CONFIG_EDGE;
758
else
759
irq->config = VGIC_CONFIG_LEVEL;
760
761
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
762
vgic_put_irq(vcpu->kvm, irq);
763
}
764
}
765
766
u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
767
{
768
int i;
769
u32 val = 0;
770
int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
771
772
for (i = 0; i < 32; i++) {
773
struct vgic_irq *irq;
774
775
if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
776
continue;
777
778
irq = vgic_get_vcpu_irq(vcpu, intid + i);
779
if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
780
val |= (1U << i);
781
782
vgic_put_irq(vcpu->kvm, irq);
783
}
784
785
return val;
786
}
787
788
void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
789
const u32 val)
790
{
791
int i;
792
int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
793
unsigned long flags;
794
795
for (i = 0; i < 32; i++) {
796
struct vgic_irq *irq;
797
bool new_level;
798
799
if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
800
continue;
801
802
irq = vgic_get_vcpu_irq(vcpu, intid + i);
803
804
/*
805
* Line level is set irrespective of irq type
806
* (level or edge) to avoid dependency that VM should
807
* restore irq config before line level.
808
*/
809
new_level = !!(val & (1U << i));
810
raw_spin_lock_irqsave(&irq->irq_lock, flags);
811
irq->line_level = new_level;
812
if (new_level)
813
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
814
else
815
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
816
817
vgic_put_irq(vcpu->kvm, irq);
818
}
819
}
820
821
static int match_region(const void *key, const void *elt)
822
{
823
const unsigned int offset = (unsigned long)key;
824
const struct vgic_register_region *region = elt;
825
826
if (offset < region->reg_offset)
827
return -1;
828
829
if (offset >= region->reg_offset + region->len)
830
return 1;
831
832
return 0;
833
}
834
835
const struct vgic_register_region *
836
vgic_find_mmio_region(const struct vgic_register_region *regions,
837
int nr_regions, unsigned int offset)
838
{
839
return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
840
sizeof(regions[0]), match_region);
841
}
842
843
void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
844
{
845
if (kvm_vgic_global_state.type == VGIC_V2)
846
vgic_v2_set_vmcr(vcpu, vmcr);
847
else
848
vgic_v3_set_vmcr(vcpu, vmcr);
849
}
850
851
void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
852
{
853
if (kvm_vgic_global_state.type == VGIC_V2)
854
vgic_v2_get_vmcr(vcpu, vmcr);
855
else
856
vgic_v3_get_vmcr(vcpu, vmcr);
857
}
858
859
/*
860
* kvm_mmio_read_buf() returns a value in a format where it can be converted
861
* to a byte array and be directly observed as the guest wanted it to appear
862
* in memory if it had done the store itself, which is LE for the GIC, as the
863
* guest knows the GIC is always LE.
864
*
865
* We convert this value to the CPUs native format to deal with it as a data
866
* value.
867
*/
868
unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
869
{
870
unsigned long data = kvm_mmio_read_buf(val, len);
871
872
switch (len) {
873
case 1:
874
return data;
875
case 2:
876
return le16_to_cpu(data);
877
case 4:
878
return le32_to_cpu(data);
879
default:
880
return le64_to_cpu(data);
881
}
882
}
883
884
/*
885
* kvm_mmio_write_buf() expects a value in a format such that if converted to
886
* a byte array it is observed as the guest would see it if it could perform
887
* the load directly. Since the GIC is LE, and the guest knows this, the
888
* guest expects a value in little endian format.
889
*
890
* We convert the data value from the CPUs native format to LE so that the
891
* value is returned in the proper format.
892
*/
893
void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
894
unsigned long data)
895
{
896
switch (len) {
897
case 1:
898
break;
899
case 2:
900
data = cpu_to_le16(data);
901
break;
902
case 4:
903
data = cpu_to_le32(data);
904
break;
905
default:
906
data = cpu_to_le64(data);
907
}
908
909
kvm_mmio_write_buf(buf, len, data);
910
}
911
912
static
913
struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
914
{
915
return container_of(dev, struct vgic_io_device, dev);
916
}
917
918
static bool check_region(const struct kvm *kvm,
919
const struct vgic_register_region *region,
920
gpa_t addr, int len)
921
{
922
int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
923
924
switch (len) {
925
case sizeof(u8):
926
flags = VGIC_ACCESS_8bit;
927
break;
928
case sizeof(u32):
929
flags = VGIC_ACCESS_32bit;
930
break;
931
case sizeof(u64):
932
flags = VGIC_ACCESS_64bit;
933
break;
934
default:
935
return false;
936
}
937
938
if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
939
if (!region->bits_per_irq)
940
return true;
941
942
/* Do we access a non-allocated IRQ? */
943
return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
944
}
945
946
return false;
947
}
948
949
const struct vgic_register_region *
950
vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
951
gpa_t addr, int len)
952
{
953
const struct vgic_register_region *region;
954
955
region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
956
addr - iodev->base_addr);
957
if (!region || !check_region(vcpu->kvm, region, addr, len))
958
return NULL;
959
960
return region;
961
}
962
963
static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
964
gpa_t addr, u32 *val)
965
{
966
const struct vgic_register_region *region;
967
struct kvm_vcpu *r_vcpu;
968
969
region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
970
if (!region) {
971
*val = 0;
972
return 0;
973
}
974
975
r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
976
if (region->uaccess_read)
977
*val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
978
else
979
*val = region->read(r_vcpu, addr, sizeof(u32));
980
981
return 0;
982
}
983
984
static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
985
gpa_t addr, const u32 *val)
986
{
987
const struct vgic_register_region *region;
988
struct kvm_vcpu *r_vcpu;
989
990
region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
991
if (!region)
992
return 0;
993
994
r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
995
if (region->uaccess_write)
996
return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
997
998
region->write(r_vcpu, addr, sizeof(u32), *val);
999
return 0;
1000
}
1001
1002
/*
1003
* Userland access to VGIC registers.
1004
*/
1005
int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
1006
bool is_write, int offset, u32 *val)
1007
{
1008
if (is_write)
1009
return vgic_uaccess_write(vcpu, dev, offset, val);
1010
else
1011
return vgic_uaccess_read(vcpu, dev, offset, val);
1012
}
1013
1014
static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1015
gpa_t addr, int len, void *val)
1016
{
1017
struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
1018
const struct vgic_register_region *region;
1019
unsigned long data = 0;
1020
1021
region = vgic_get_mmio_region(vcpu, iodev, addr, len);
1022
if (!region) {
1023
memset(val, 0, len);
1024
return 0;
1025
}
1026
1027
switch (iodev->iodev_type) {
1028
case IODEV_CPUIF:
1029
data = region->read(vcpu, addr, len);
1030
break;
1031
case IODEV_DIST:
1032
data = region->read(vcpu, addr, len);
1033
break;
1034
case IODEV_REDIST:
1035
data = region->read(iodev->redist_vcpu, addr, len);
1036
break;
1037
case IODEV_ITS:
1038
data = region->its_read(vcpu->kvm, iodev->its, addr, len);
1039
break;
1040
}
1041
1042
vgic_data_host_to_mmio_bus(val, len, data);
1043
return 0;
1044
}
1045
1046
static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
1047
gpa_t addr, int len, const void *val)
1048
{
1049
struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
1050
const struct vgic_register_region *region;
1051
unsigned long data = vgic_data_mmio_bus_to_host(val, len);
1052
1053
region = vgic_get_mmio_region(vcpu, iodev, addr, len);
1054
if (!region)
1055
return 0;
1056
1057
switch (iodev->iodev_type) {
1058
case IODEV_CPUIF:
1059
region->write(vcpu, addr, len, data);
1060
break;
1061
case IODEV_DIST:
1062
region->write(vcpu, addr, len, data);
1063
break;
1064
case IODEV_REDIST:
1065
region->write(iodev->redist_vcpu, addr, len, data);
1066
break;
1067
case IODEV_ITS:
1068
region->its_write(vcpu->kvm, iodev->its, addr, len, data);
1069
break;
1070
}
1071
1072
return 0;
1073
}
1074
1075
const struct kvm_io_device_ops kvm_io_gic_ops = {
1076
.read = dispatch_mmio_read,
1077
.write = dispatch_mmio_write,
1078
};
1079
1080
int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
1081
enum vgic_type type)
1082
{
1083
struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
1084
unsigned int len;
1085
1086
switch (type) {
1087
case VGIC_V2:
1088
len = vgic_v2_init_dist_iodev(io_device);
1089
break;
1090
case VGIC_V3:
1091
len = vgic_v3_init_dist_iodev(io_device);
1092
break;
1093
default:
1094
BUG();
1095
}
1096
1097
io_device->base_addr = dist_base_address;
1098
io_device->iodev_type = IODEV_DIST;
1099
io_device->redist_vcpu = NULL;
1100
1101
return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
1102
len, &io_device->dev);
1103
}
1104
1105