Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hyp/vgic-v3-sr.c
52068 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2012-2015 - ARM Ltd
4
* Author: Marc Zyngier <[email protected]>
5
*/
6
7
#include <hyp/adjust_pc.h>
8
9
#include <linux/compiler.h>
10
#include <linux/irqchip/arm-gic-v3.h>
11
#include <linux/kvm_host.h>
12
13
#include <asm/kvm_emulate.h>
14
#include <asm/kvm_hyp.h>
15
#include <asm/kvm_mmu.h>
16
17
#include "../../vgic/vgic.h"
18
19
#define vtr_to_max_lr_idx(v) ((v) & 0xf)
20
#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
21
#define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
22
23
u64 __gic_v3_get_lr(unsigned int lr)
24
{
25
switch (lr & 0xf) {
26
case 0:
27
return read_gicreg(ICH_LR0_EL2);
28
case 1:
29
return read_gicreg(ICH_LR1_EL2);
30
case 2:
31
return read_gicreg(ICH_LR2_EL2);
32
case 3:
33
return read_gicreg(ICH_LR3_EL2);
34
case 4:
35
return read_gicreg(ICH_LR4_EL2);
36
case 5:
37
return read_gicreg(ICH_LR5_EL2);
38
case 6:
39
return read_gicreg(ICH_LR6_EL2);
40
case 7:
41
return read_gicreg(ICH_LR7_EL2);
42
case 8:
43
return read_gicreg(ICH_LR8_EL2);
44
case 9:
45
return read_gicreg(ICH_LR9_EL2);
46
case 10:
47
return read_gicreg(ICH_LR10_EL2);
48
case 11:
49
return read_gicreg(ICH_LR11_EL2);
50
case 12:
51
return read_gicreg(ICH_LR12_EL2);
52
case 13:
53
return read_gicreg(ICH_LR13_EL2);
54
case 14:
55
return read_gicreg(ICH_LR14_EL2);
56
case 15:
57
return read_gicreg(ICH_LR15_EL2);
58
}
59
60
unreachable();
61
}
62
63
void __gic_v3_set_lr(u64 val, int lr)
64
{
65
switch (lr & 0xf) {
66
case 0:
67
write_gicreg(val, ICH_LR0_EL2);
68
break;
69
case 1:
70
write_gicreg(val, ICH_LR1_EL2);
71
break;
72
case 2:
73
write_gicreg(val, ICH_LR2_EL2);
74
break;
75
case 3:
76
write_gicreg(val, ICH_LR3_EL2);
77
break;
78
case 4:
79
write_gicreg(val, ICH_LR4_EL2);
80
break;
81
case 5:
82
write_gicreg(val, ICH_LR5_EL2);
83
break;
84
case 6:
85
write_gicreg(val, ICH_LR6_EL2);
86
break;
87
case 7:
88
write_gicreg(val, ICH_LR7_EL2);
89
break;
90
case 8:
91
write_gicreg(val, ICH_LR8_EL2);
92
break;
93
case 9:
94
write_gicreg(val, ICH_LR9_EL2);
95
break;
96
case 10:
97
write_gicreg(val, ICH_LR10_EL2);
98
break;
99
case 11:
100
write_gicreg(val, ICH_LR11_EL2);
101
break;
102
case 12:
103
write_gicreg(val, ICH_LR12_EL2);
104
break;
105
case 13:
106
write_gicreg(val, ICH_LR13_EL2);
107
break;
108
case 14:
109
write_gicreg(val, ICH_LR14_EL2);
110
break;
111
case 15:
112
write_gicreg(val, ICH_LR15_EL2);
113
break;
114
}
115
}
116
117
static void __vgic_v3_write_ap0rn(u32 val, int n)
118
{
119
switch (n) {
120
case 0:
121
write_gicreg(val, ICH_AP0R0_EL2);
122
break;
123
case 1:
124
write_gicreg(val, ICH_AP0R1_EL2);
125
break;
126
case 2:
127
write_gicreg(val, ICH_AP0R2_EL2);
128
break;
129
case 3:
130
write_gicreg(val, ICH_AP0R3_EL2);
131
break;
132
}
133
}
134
135
static void __vgic_v3_write_ap1rn(u32 val, int n)
136
{
137
switch (n) {
138
case 0:
139
write_gicreg(val, ICH_AP1R0_EL2);
140
break;
141
case 1:
142
write_gicreg(val, ICH_AP1R1_EL2);
143
break;
144
case 2:
145
write_gicreg(val, ICH_AP1R2_EL2);
146
break;
147
case 3:
148
write_gicreg(val, ICH_AP1R3_EL2);
149
break;
150
}
151
}
152
153
static u32 __vgic_v3_read_ap0rn(int n)
154
{
155
u32 val;
156
157
switch (n) {
158
case 0:
159
val = read_gicreg(ICH_AP0R0_EL2);
160
break;
161
case 1:
162
val = read_gicreg(ICH_AP0R1_EL2);
163
break;
164
case 2:
165
val = read_gicreg(ICH_AP0R2_EL2);
166
break;
167
case 3:
168
val = read_gicreg(ICH_AP0R3_EL2);
169
break;
170
default:
171
unreachable();
172
}
173
174
return val;
175
}
176
177
static u32 __vgic_v3_read_ap1rn(int n)
178
{
179
u32 val;
180
181
switch (n) {
182
case 0:
183
val = read_gicreg(ICH_AP1R0_EL2);
184
break;
185
case 1:
186
val = read_gicreg(ICH_AP1R1_EL2);
187
break;
188
case 2:
189
val = read_gicreg(ICH_AP1R2_EL2);
190
break;
191
case 3:
192
val = read_gicreg(ICH_AP1R3_EL2);
193
break;
194
default:
195
unreachable();
196
}
197
198
return val;
199
}
200
201
static u64 compute_ich_hcr(struct vgic_v3_cpu_if *cpu_if)
202
{
203
return cpu_if->vgic_hcr | vgic_ich_hcr_trap_bits();
204
}
205
206
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
207
{
208
u64 used_lrs = cpu_if->used_lrs;
209
210
/*
211
* Make sure stores to the GIC via the memory mapped interface
212
* are now visible to the system register interface when reading the
213
* LRs, and when reading back the VMCR on non-VHE systems.
214
*/
215
if (used_lrs || !has_vhe()) {
216
if (!cpu_if->vgic_sre) {
217
dsb(sy);
218
isb();
219
}
220
}
221
222
if (used_lrs) {
223
int i;
224
u32 elrsr;
225
226
elrsr = read_gicreg(ICH_ELRSR_EL2);
227
228
for (i = 0; i < used_lrs; i++) {
229
if (elrsr & (1 << i))
230
cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
231
else
232
cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
233
234
__gic_v3_set_lr(0, i);
235
}
236
}
237
238
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
239
240
if (cpu_if->vgic_hcr & ICH_HCR_EL2_LRENPIE) {
241
u64 val = read_gicreg(ICH_HCR_EL2);
242
cpu_if->vgic_hcr &= ~ICH_HCR_EL2_EOIcount;
243
cpu_if->vgic_hcr |= val & ICH_HCR_EL2_EOIcount;
244
}
245
246
write_gicreg(0, ICH_HCR_EL2);
247
248
/*
249
* Hack alert: On NV, this results in a trap so that the above write
250
* actually takes effect... No synchronisation is necessary, as we
251
* only care about the effects when this traps.
252
*/
253
read_gicreg(ICH_MISR_EL2);
254
}
255
256
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
257
{
258
u64 used_lrs = cpu_if->used_lrs;
259
int i;
260
261
write_gicreg(compute_ich_hcr(cpu_if), ICH_HCR_EL2);
262
263
for (i = 0; i < used_lrs; i++)
264
__gic_v3_set_lr(cpu_if->vgic_lr[i], i);
265
266
/*
267
* Ensure that writes to the LRs, and on non-VHE systems ensure that
268
* the write to the VMCR in __vgic_v3_activate_traps(), will have
269
* reached the (re)distributors. This ensure the guest will read the
270
* correct values from the memory-mapped interface.
271
*/
272
if (used_lrs || !has_vhe()) {
273
if (!cpu_if->vgic_sre) {
274
isb();
275
dsb(sy);
276
}
277
}
278
}
279
280
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
281
{
282
/*
283
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
284
* Group0 interrupt (as generated in GICv2 mode) to be
285
* delivered as a FIQ to the guest, with potentially fatal
286
* consequences. So we must make sure that ICC_SRE_EL1 has
287
* been actually programmed with the value we want before
288
* starting to mess with the rest of the GIC, and VMCR_EL2 in
289
* particular. This logic must be called before
290
* __vgic_v3_restore_state().
291
*
292
* However, if the vgic is disabled (ICH_HCR_EL2.EN==0), no GIC is
293
* provisioned at all. In order to prevent illegal accesses to the
294
* system registers to trap to EL1 (duh), force ICC_SRE_EL1.SRE to 1
295
* so that the trap bits can take effect. Yes, we *loves* the GIC.
296
*/
297
if (!(cpu_if->vgic_hcr & ICH_HCR_EL2_En)) {
298
write_gicreg(ICC_SRE_EL1_SRE, ICC_SRE_EL1);
299
isb();
300
} else if (!cpu_if->vgic_sre) {
301
write_gicreg(0, ICC_SRE_EL1);
302
isb();
303
write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
304
305
306
if (has_vhe()) {
307
/*
308
* Ensure that the write to the VMCR will have reached
309
* the (re)distributors. This ensure the guest will
310
* read the correct values from the memory-mapped
311
* interface.
312
*/
313
isb();
314
dsb(sy);
315
}
316
}
317
318
/* Only disable SRE if the host implements the GICv2 interface */
319
if (static_branch_unlikely(&vgic_v3_has_v2_compat)) {
320
/*
321
* Prevent the guest from touching the ICC_SRE_EL1 system
322
* register. Note that this may not have any effect, as
323
* ICC_SRE_EL2.Enable being RAO/WI is a valid implementation.
324
*/
325
write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
326
ICC_SRE_EL2);
327
}
328
329
/*
330
* If we need to trap system registers, we must write ICH_HCR_EL2
331
* anyway, even if no interrupts are being injected. Note that this
332
* also applies if we don't expect any system register access (no
333
* vgic at all). In any case, no need to provide MI configuration.
334
*/
335
if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
336
cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre)
337
write_gicreg(vgic_ich_hcr_trap_bits() | ICH_HCR_EL2_En, ICH_HCR_EL2);
338
}
339
340
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
341
{
342
u64 val;
343
344
/* Only restore SRE if the host implements the GICv2 interface */
345
if (static_branch_unlikely(&vgic_v3_has_v2_compat)) {
346
val = read_gicreg(ICC_SRE_EL2);
347
write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
348
349
if (!cpu_if->vgic_sre) {
350
/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
351
isb();
352
write_gicreg(1, ICC_SRE_EL1);
353
}
354
}
355
356
/*
357
* If we were trapping system registers, we enabled the VGIC even if
358
* no interrupts were being injected, and we disable it again here.
359
*/
360
if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
361
cpu_if->its_vpe.its_vm || !cpu_if->vgic_sre)
362
write_gicreg(0, ICH_HCR_EL2);
363
}
364
365
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
366
{
367
u64 val;
368
u32 nr_pre_bits;
369
370
val = read_gicreg(ICH_VTR_EL2);
371
nr_pre_bits = vtr_to_nr_pre_bits(val);
372
373
switch (nr_pre_bits) {
374
case 7:
375
cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
376
cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
377
fallthrough;
378
case 6:
379
cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
380
fallthrough;
381
default:
382
cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
383
}
384
385
switch (nr_pre_bits) {
386
case 7:
387
cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
388
cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
389
fallthrough;
390
case 6:
391
cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
392
fallthrough;
393
default:
394
cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
395
}
396
}
397
398
static void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
399
{
400
u64 val;
401
u32 nr_pre_bits;
402
403
val = read_gicreg(ICH_VTR_EL2);
404
nr_pre_bits = vtr_to_nr_pre_bits(val);
405
406
switch (nr_pre_bits) {
407
case 7:
408
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
409
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
410
fallthrough;
411
case 6:
412
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
413
fallthrough;
414
default:
415
__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
416
}
417
418
switch (nr_pre_bits) {
419
case 7:
420
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
421
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
422
fallthrough;
423
case 6:
424
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
425
fallthrough;
426
default:
427
__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
428
}
429
}
430
431
void __vgic_v3_init_lrs(void)
432
{
433
int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
434
int i;
435
436
for (i = 0; i <= max_lr_idx; i++)
437
__gic_v3_set_lr(0, i);
438
}
439
440
/*
441
* Return the GIC CPU configuration:
442
* - [31:0] ICH_VTR_EL2
443
* - [62:32] RES0
444
* - [63] MMIO (GICv2) capable
445
*/
446
u64 __vgic_v3_get_gic_config(void)
447
{
448
u64 val, sre;
449
unsigned long flags = 0;
450
451
/*
452
* In compat mode, we cannot access ICC_SRE_EL1 at any EL
453
* other than EL1 itself; just return the
454
* ICH_VTR_EL2. ICC_IDR0_EL1 is only implemented on a GICv5
455
* system, so we first check if we have GICv5 support.
456
*/
457
if (cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF))
458
return read_gicreg(ICH_VTR_EL2);
459
460
sre = read_gicreg(ICC_SRE_EL1);
461
/*
462
* To check whether we have a MMIO-based (GICv2 compatible)
463
* CPU interface, we need to disable the system register
464
* view.
465
*
466
* Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
467
* that to be able to set ICC_SRE_EL1.SRE to 0, all the
468
* interrupt overrides must be set. You've got to love this.
469
*
470
* As we always run VHE with HCR_xMO set, no extra xMO
471
* manipulation is required in that case.
472
*
473
* To safely disable SRE, we have to prevent any interrupt
474
* from firing (which would be deadly). This only makes sense
475
* on VHE, as interrupts are already masked for nVHE as part
476
* of the exception entry to EL2.
477
*/
478
if (has_vhe()) {
479
flags = local_daif_save();
480
} else {
481
sysreg_clear_set_hcr(0, HCR_AMO | HCR_FMO | HCR_IMO);
482
isb();
483
}
484
485
write_gicreg(0, ICC_SRE_EL1);
486
isb();
487
488
val = read_gicreg(ICC_SRE_EL1);
489
490
write_gicreg(sre, ICC_SRE_EL1);
491
isb();
492
493
if (has_vhe()) {
494
local_daif_restore(flags);
495
} else {
496
sysreg_clear_set_hcr(HCR_AMO | HCR_FMO | HCR_IMO, 0);
497
isb();
498
}
499
500
val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
501
val |= read_gicreg(ICH_VTR_EL2);
502
503
return val;
504
}
505
506
static void __vgic_v3_compat_mode_enable(void)
507
{
508
if (!cpus_have_final_cap(ARM64_HAS_GICV5_CPUIF))
509
return;
510
511
sysreg_clear_set_s(SYS_ICH_VCTLR_EL2, 0, ICH_VCTLR_EL2_V3);
512
/* Wait for V3 to become enabled */
513
isb();
514
}
515
516
static u64 __vgic_v3_read_vmcr(void)
517
{
518
return read_gicreg(ICH_VMCR_EL2);
519
}
520
521
static void __vgic_v3_write_vmcr(u32 vmcr)
522
{
523
write_gicreg(vmcr, ICH_VMCR_EL2);
524
}
525
526
void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
527
{
528
__vgic_v3_compat_mode_enable();
529
530
/*
531
* If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
532
* is dependent on ICC_SRE_EL1.SRE, and we have to perform the
533
* VMCR_EL2 save/restore in the world switch.
534
*/
535
if (cpu_if->vgic_sre)
536
__vgic_v3_write_vmcr(cpu_if->vgic_vmcr);
537
__vgic_v3_restore_aprs(cpu_if);
538
}
539
540
static int __vgic_v3_bpr_min(void)
541
{
542
/* See Pseudocode for VPriorityGroup */
543
return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
544
}
545
546
static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
547
{
548
u64 esr = kvm_vcpu_get_esr(vcpu);
549
u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
550
551
return crm != 8;
552
}
553
554
#define GICv3_IDLE_PRIORITY 0xff
555
556
static int __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, u32 vmcr,
557
u64 *lr_val)
558
{
559
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
560
u8 priority = GICv3_IDLE_PRIORITY;
561
int i, lr = -1;
562
563
for (i = 0; i < used_lrs; i++) {
564
u64 val = __gic_v3_get_lr(i);
565
u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
566
567
/* Not pending in the state? */
568
if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
569
continue;
570
571
/* Group-0 interrupt, but Group-0 disabled? */
572
if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_EL2_VENG0_MASK))
573
continue;
574
575
/* Group-1 interrupt, but Group-1 disabled? */
576
if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_EL2_VENG1_MASK))
577
continue;
578
579
/* Not the highest priority? */
580
if (lr_prio >= priority)
581
continue;
582
583
/* This is a candidate */
584
priority = lr_prio;
585
*lr_val = val;
586
lr = i;
587
}
588
589
if (lr == -1)
590
*lr_val = ICC_IAR1_EL1_SPURIOUS;
591
592
return lr;
593
}
594
595
static int __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, int intid,
596
u64 *lr_val)
597
{
598
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
599
int i;
600
601
for (i = 0; i < used_lrs; i++) {
602
u64 val = __gic_v3_get_lr(i);
603
604
if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
605
(val & ICH_LR_ACTIVE_BIT)) {
606
*lr_val = val;
607
return i;
608
}
609
}
610
611
*lr_val = ICC_IAR1_EL1_SPURIOUS;
612
return -1;
613
}
614
615
static int __vgic_v3_get_highest_active_priority(void)
616
{
617
u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
618
u32 hap = 0;
619
int i;
620
621
for (i = 0; i < nr_apr_regs; i++) {
622
u32 val;
623
624
/*
625
* The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
626
* contain the active priority levels for this VCPU
627
* for the maximum number of supported priority
628
* levels, and we return the full priority level only
629
* if the BPR is programmed to its minimum, otherwise
630
* we return a combination of the priority level and
631
* subpriority, as determined by the setting of the
632
* BPR, but without the full subpriority.
633
*/
634
val = __vgic_v3_read_ap0rn(i);
635
val |= __vgic_v3_read_ap1rn(i);
636
if (!val) {
637
hap += 32;
638
continue;
639
}
640
641
return (hap + __ffs(val)) << __vgic_v3_bpr_min();
642
}
643
644
return GICv3_IDLE_PRIORITY;
645
}
646
647
static unsigned int __vgic_v3_get_bpr0(u32 vmcr)
648
{
649
return FIELD_GET(ICH_VMCR_EL2_VBPR0, vmcr);
650
}
651
652
static unsigned int __vgic_v3_get_bpr1(u32 vmcr)
653
{
654
unsigned int bpr;
655
656
if (vmcr & ICH_VMCR_EL2_VCBPR_MASK) {
657
bpr = __vgic_v3_get_bpr0(vmcr);
658
if (bpr < 7)
659
bpr++;
660
} else {
661
bpr = FIELD_GET(ICH_VMCR_EL2_VBPR1, vmcr);
662
}
663
664
return bpr;
665
}
666
667
/*
668
* Convert a priority to a preemption level, taking the relevant BPR
669
* into account by zeroing the sub-priority bits.
670
*/
671
static u8 __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
672
{
673
unsigned int bpr;
674
675
if (!grp)
676
bpr = __vgic_v3_get_bpr0(vmcr) + 1;
677
else
678
bpr = __vgic_v3_get_bpr1(vmcr);
679
680
return pri & (GENMASK(7, 0) << bpr);
681
}
682
683
/*
684
* The priority value is independent of any of the BPR values, so we
685
* normalize it using the minimal BPR value. This guarantees that no
686
* matter what the guest does with its BPR, we can always set/get the
687
* same value of a priority.
688
*/
689
static void __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
690
{
691
u8 pre, ap;
692
u32 val;
693
int apr;
694
695
pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
696
ap = pre >> __vgic_v3_bpr_min();
697
apr = ap / 32;
698
699
if (!grp) {
700
val = __vgic_v3_read_ap0rn(apr);
701
__vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
702
} else {
703
val = __vgic_v3_read_ap1rn(apr);
704
__vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
705
}
706
}
707
708
static int __vgic_v3_clear_highest_active_priority(void)
709
{
710
u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
711
u32 hap = 0;
712
int i;
713
714
for (i = 0; i < nr_apr_regs; i++) {
715
u32 ap0, ap1;
716
int c0, c1;
717
718
ap0 = __vgic_v3_read_ap0rn(i);
719
ap1 = __vgic_v3_read_ap1rn(i);
720
if (!ap0 && !ap1) {
721
hap += 32;
722
continue;
723
}
724
725
c0 = ap0 ? __ffs(ap0) : 32;
726
c1 = ap1 ? __ffs(ap1) : 32;
727
728
/* Always clear the LSB, which is the highest priority */
729
if (c0 < c1) {
730
ap0 &= ~BIT(c0);
731
__vgic_v3_write_ap0rn(ap0, i);
732
hap += c0;
733
} else {
734
ap1 &= ~BIT(c1);
735
__vgic_v3_write_ap1rn(ap1, i);
736
hap += c1;
737
}
738
739
/* Rescale to 8 bits of priority */
740
return hap << __vgic_v3_bpr_min();
741
}
742
743
return GICv3_IDLE_PRIORITY;
744
}
745
746
static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
747
{
748
u64 lr_val;
749
u8 lr_prio, pmr;
750
int lr, grp;
751
752
grp = __vgic_v3_get_group(vcpu);
753
754
lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
755
if (lr < 0)
756
goto spurious;
757
758
if (grp != !!(lr_val & ICH_LR_GROUP))
759
goto spurious;
760
761
pmr = FIELD_GET(ICH_VMCR_EL2_VPMR, vmcr);
762
lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
763
if (pmr <= lr_prio)
764
goto spurious;
765
766
if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
767
goto spurious;
768
769
lr_val &= ~ICH_LR_STATE;
770
lr_val |= ICH_LR_ACTIVE_BIT;
771
__gic_v3_set_lr(lr_val, lr);
772
__vgic_v3_set_active_priority(lr_prio, vmcr, grp);
773
vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
774
return;
775
776
spurious:
777
vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
778
}
779
780
static void __vgic_v3_clear_active_lr(int lr, u64 lr_val)
781
{
782
lr_val &= ~ICH_LR_ACTIVE_BIT;
783
if (lr_val & ICH_LR_HW) {
784
u32 pid;
785
786
pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
787
gic_write_dir(pid);
788
}
789
790
__gic_v3_set_lr(lr_val, lr);
791
}
792
793
static void __vgic_v3_bump_eoicount(void)
794
{
795
u32 hcr;
796
797
hcr = read_gicreg(ICH_HCR_EL2);
798
hcr += 1 << ICH_HCR_EL2_EOIcount_SHIFT;
799
write_gicreg(hcr, ICH_HCR_EL2);
800
}
801
802
static int ___vgic_v3_write_dir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
803
{
804
u32 vid = vcpu_get_reg(vcpu, rt);
805
u64 lr_val;
806
int lr;
807
808
/* EOImode == 0, nothing to be done here */
809
if (!(vmcr & ICH_VMCR_EL2_VEOIM_MASK))
810
return 1;
811
812
/* No deactivate to be performed on an LPI */
813
if (vid >= VGIC_MIN_LPI)
814
return 1;
815
816
lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
817
if (lr != -1) {
818
__vgic_v3_clear_active_lr(lr, lr_val);
819
return 1;
820
}
821
822
return 0;
823
}
824
825
static void __vgic_v3_write_dir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
826
{
827
if (!___vgic_v3_write_dir(vcpu, vmcr, rt))
828
__vgic_v3_bump_eoicount();
829
}
830
831
static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
832
{
833
u32 vid = vcpu_get_reg(vcpu, rt);
834
u64 lr_val;
835
u8 lr_prio, act_prio;
836
int lr, grp;
837
838
grp = __vgic_v3_get_group(vcpu);
839
840
/* Drop priority in any case */
841
act_prio = __vgic_v3_clear_highest_active_priority();
842
843
lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
844
if (lr == -1) {
845
/* Do not bump EOIcount for LPIs that aren't in the LRs */
846
if (!(vid >= VGIC_MIN_LPI))
847
__vgic_v3_bump_eoicount();
848
return;
849
}
850
851
/* EOImode == 1 and not an LPI, nothing to be done here */
852
if ((vmcr & ICH_VMCR_EL2_VEOIM_MASK) && !(vid >= VGIC_MIN_LPI))
853
return;
854
855
lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
856
857
/* If priorities or group do not match, the guest has fscked-up. */
858
if (grp != !!(lr_val & ICH_LR_GROUP) ||
859
__vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
860
return;
861
862
/* Let's now perform the deactivation */
863
__vgic_v3_clear_active_lr(lr, lr_val);
864
}
865
866
static void __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
867
{
868
vcpu_set_reg(vcpu, rt, FIELD_GET(ICH_VMCR_EL2_VENG0, vmcr));
869
}
870
871
static void __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
872
{
873
vcpu_set_reg(vcpu, rt, FIELD_GET(ICH_VMCR_EL2_VENG1, vmcr));
874
}
875
876
static void __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
877
{
878
u64 val = vcpu_get_reg(vcpu, rt);
879
880
FIELD_MODIFY(ICH_VMCR_EL2_VENG0, &vmcr, val & 1);
881
882
__vgic_v3_write_vmcr(vmcr);
883
}
884
885
static void __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
886
{
887
u64 val = vcpu_get_reg(vcpu, rt);
888
889
FIELD_MODIFY(ICH_VMCR_EL2_VENG1, &vmcr, val & 1);
890
891
__vgic_v3_write_vmcr(vmcr);
892
}
893
894
static void __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
895
{
896
vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
897
}
898
899
static void __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
900
{
901
vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
902
}
903
904
static void __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
905
{
906
u64 val = vcpu_get_reg(vcpu, rt);
907
u8 bpr_min = __vgic_v3_bpr_min() - 1;
908
909
/* Enforce BPR limiting */
910
if (val < bpr_min)
911
val = bpr_min;
912
913
FIELD_MODIFY(ICH_VMCR_EL2_VBPR0, &vmcr, val);
914
915
__vgic_v3_write_vmcr(vmcr);
916
}
917
918
static void __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
919
{
920
u64 val = vcpu_get_reg(vcpu, rt);
921
u8 bpr_min = __vgic_v3_bpr_min();
922
923
if (FIELD_GET(ICH_VMCR_EL2_VCBPR, val))
924
return;
925
926
/* Enforce BPR limiting */
927
if (val < bpr_min)
928
val = bpr_min;
929
930
FIELD_MODIFY(ICH_VMCR_EL2_VBPR1, &vmcr, val);
931
932
__vgic_v3_write_vmcr(vmcr);
933
}
934
935
static void __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
936
{
937
u32 val;
938
939
if (!__vgic_v3_get_group(vcpu))
940
val = __vgic_v3_read_ap0rn(n);
941
else
942
val = __vgic_v3_read_ap1rn(n);
943
944
vcpu_set_reg(vcpu, rt, val);
945
}
946
947
static void __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
948
{
949
u32 val = vcpu_get_reg(vcpu, rt);
950
951
if (!__vgic_v3_get_group(vcpu))
952
__vgic_v3_write_ap0rn(val, n);
953
else
954
__vgic_v3_write_ap1rn(val, n);
955
}
956
957
static void __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
958
u32 vmcr, int rt)
959
{
960
__vgic_v3_read_apxrn(vcpu, rt, 0);
961
}
962
963
static void __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
964
u32 vmcr, int rt)
965
{
966
__vgic_v3_read_apxrn(vcpu, rt, 1);
967
}
968
969
static void __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
970
{
971
__vgic_v3_read_apxrn(vcpu, rt, 2);
972
}
973
974
static void __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
975
{
976
__vgic_v3_read_apxrn(vcpu, rt, 3);
977
}
978
979
static void __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
980
{
981
__vgic_v3_write_apxrn(vcpu, rt, 0);
982
}
983
984
static void __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
985
{
986
__vgic_v3_write_apxrn(vcpu, rt, 1);
987
}
988
989
static void __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
990
{
991
__vgic_v3_write_apxrn(vcpu, rt, 2);
992
}
993
994
static void __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
995
{
996
__vgic_v3_write_apxrn(vcpu, rt, 3);
997
}
998
999
static void __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
1000
{
1001
u64 lr_val;
1002
int lr, lr_grp, grp;
1003
1004
grp = __vgic_v3_get_group(vcpu);
1005
1006
lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
1007
if (lr == -1)
1008
goto spurious;
1009
1010
lr_grp = !!(lr_val & ICH_LR_GROUP);
1011
if (lr_grp != grp)
1012
lr_val = ICC_IAR1_EL1_SPURIOUS;
1013
1014
spurious:
1015
vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
1016
}
1017
1018
static void __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
1019
{
1020
vcpu_set_reg(vcpu, rt, FIELD_GET(ICH_VMCR_EL2_VPMR, vmcr));
1021
}
1022
1023
static void __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
1024
{
1025
u32 val = vcpu_get_reg(vcpu, rt);
1026
1027
FIELD_MODIFY(ICH_VMCR_EL2_VPMR, &vmcr, val);
1028
1029
write_gicreg(vmcr, ICH_VMCR_EL2);
1030
}
1031
1032
static void __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
1033
{
1034
u32 val = __vgic_v3_get_highest_active_priority();
1035
vcpu_set_reg(vcpu, rt, val);
1036
}
1037
1038
static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
1039
{
1040
u32 vtr, val;
1041
1042
vtr = read_gicreg(ICH_VTR_EL2);
1043
/* PRIbits */
1044
val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
1045
/* IDbits */
1046
val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
1047
/* A3V */
1048
val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
1049
/* EOImode */
1050
val |= FIELD_PREP(ICC_CTLR_EL1_EOImode_MASK,
1051
FIELD_GET(ICH_VMCR_EL2_VEOIM, vmcr));
1052
/* CBPR */
1053
val |= FIELD_PREP(ICC_CTLR_EL1_CBPR_MASK,
1054
FIELD_GET(ICH_VMCR_EL2_VCBPR, vmcr));
1055
1056
vcpu_set_reg(vcpu, rt, val);
1057
}
1058
1059
static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
1060
{
1061
u32 val = vcpu_get_reg(vcpu, rt);
1062
1063
FIELD_MODIFY(ICH_VMCR_EL2_VCBPR, &vmcr,
1064
FIELD_GET(ICC_CTLR_EL1_CBPR_MASK, val));
1065
1066
FIELD_MODIFY(ICH_VMCR_EL2_VEOIM, &vmcr,
1067
FIELD_GET(ICC_CTLR_EL1_EOImode_MASK, val));
1068
1069
write_gicreg(vmcr, ICH_VMCR_EL2);
1070
}
1071
1072
static bool __vgic_v3_check_trap_forwarding(struct kvm_vcpu *vcpu,
1073
u32 sysreg, bool is_read)
1074
{
1075
u64 ich_hcr;
1076
1077
if (!is_nested_ctxt(vcpu))
1078
return false;
1079
1080
ich_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
1081
1082
switch (sysreg) {
1083
case SYS_ICC_IGRPEN0_EL1:
1084
if (is_read &&
1085
(__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGRTR_EL2_ICC_IGRPENn_EL1))
1086
return true;
1087
1088
if (!is_read &&
1089
(__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGWTR_EL2_ICC_IGRPENn_EL1))
1090
return true;
1091
1092
fallthrough;
1093
1094
case SYS_ICC_AP0Rn_EL1(0):
1095
case SYS_ICC_AP0Rn_EL1(1):
1096
case SYS_ICC_AP0Rn_EL1(2):
1097
case SYS_ICC_AP0Rn_EL1(3):
1098
case SYS_ICC_BPR0_EL1:
1099
case SYS_ICC_EOIR0_EL1:
1100
case SYS_ICC_HPPIR0_EL1:
1101
case SYS_ICC_IAR0_EL1:
1102
return ich_hcr & ICH_HCR_EL2_TALL0;
1103
1104
case SYS_ICC_IGRPEN1_EL1:
1105
if (is_read &&
1106
(__vcpu_sys_reg(vcpu, HFGRTR_EL2) & HFGRTR_EL2_ICC_IGRPENn_EL1))
1107
return true;
1108
1109
if (!is_read &&
1110
(__vcpu_sys_reg(vcpu, HFGWTR_EL2) & HFGWTR_EL2_ICC_IGRPENn_EL1))
1111
return true;
1112
1113
fallthrough;
1114
1115
case SYS_ICC_AP1Rn_EL1(0):
1116
case SYS_ICC_AP1Rn_EL1(1):
1117
case SYS_ICC_AP1Rn_EL1(2):
1118
case SYS_ICC_AP1Rn_EL1(3):
1119
case SYS_ICC_BPR1_EL1:
1120
case SYS_ICC_EOIR1_EL1:
1121
case SYS_ICC_HPPIR1_EL1:
1122
case SYS_ICC_IAR1_EL1:
1123
return ich_hcr & ICH_HCR_EL2_TALL1;
1124
1125
case SYS_ICC_DIR_EL1:
1126
if (ich_hcr & ICH_HCR_EL2_TDIR)
1127
return true;
1128
1129
fallthrough;
1130
1131
case SYS_ICC_RPR_EL1:
1132
case SYS_ICC_CTLR_EL1:
1133
case SYS_ICC_PMR_EL1:
1134
return ich_hcr & ICH_HCR_EL2_TC;
1135
1136
default:
1137
return false;
1138
}
1139
}
1140
1141
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
1142
{
1143
int rt;
1144
u64 esr;
1145
u32 vmcr;
1146
void (*fn)(struct kvm_vcpu *, u32, int);
1147
bool is_read;
1148
u32 sysreg;
1149
1150
if (kern_hyp_va(vcpu->kvm)->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
1151
return 0;
1152
1153
esr = kvm_vcpu_get_esr(vcpu);
1154
if (vcpu_mode_is_32bit(vcpu)) {
1155
if (!kvm_condition_valid(vcpu)) {
1156
__kvm_skip_instr(vcpu);
1157
return 1;
1158
}
1159
1160
sysreg = esr_cp15_to_sysreg(esr);
1161
} else {
1162
sysreg = esr_sys64_to_sysreg(esr);
1163
}
1164
1165
is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
1166
1167
if (__vgic_v3_check_trap_forwarding(vcpu, sysreg, is_read))
1168
return 0;
1169
1170
switch (sysreg) {
1171
case SYS_ICC_IAR0_EL1:
1172
case SYS_ICC_IAR1_EL1:
1173
if (unlikely(!is_read))
1174
return 0;
1175
fn = __vgic_v3_read_iar;
1176
break;
1177
case SYS_ICC_EOIR0_EL1:
1178
case SYS_ICC_EOIR1_EL1:
1179
if (unlikely(is_read))
1180
return 0;
1181
fn = __vgic_v3_write_eoir;
1182
break;
1183
case SYS_ICC_IGRPEN1_EL1:
1184
if (is_read)
1185
fn = __vgic_v3_read_igrpen1;
1186
else
1187
fn = __vgic_v3_write_igrpen1;
1188
break;
1189
case SYS_ICC_BPR1_EL1:
1190
if (is_read)
1191
fn = __vgic_v3_read_bpr1;
1192
else
1193
fn = __vgic_v3_write_bpr1;
1194
break;
1195
case SYS_ICC_AP0Rn_EL1(0):
1196
case SYS_ICC_AP1Rn_EL1(0):
1197
if (is_read)
1198
fn = __vgic_v3_read_apxr0;
1199
else
1200
fn = __vgic_v3_write_apxr0;
1201
break;
1202
case SYS_ICC_AP0Rn_EL1(1):
1203
case SYS_ICC_AP1Rn_EL1(1):
1204
if (is_read)
1205
fn = __vgic_v3_read_apxr1;
1206
else
1207
fn = __vgic_v3_write_apxr1;
1208
break;
1209
case SYS_ICC_AP0Rn_EL1(2):
1210
case SYS_ICC_AP1Rn_EL1(2):
1211
if (is_read)
1212
fn = __vgic_v3_read_apxr2;
1213
else
1214
fn = __vgic_v3_write_apxr2;
1215
break;
1216
case SYS_ICC_AP0Rn_EL1(3):
1217
case SYS_ICC_AP1Rn_EL1(3):
1218
if (is_read)
1219
fn = __vgic_v3_read_apxr3;
1220
else
1221
fn = __vgic_v3_write_apxr3;
1222
break;
1223
case SYS_ICC_HPPIR0_EL1:
1224
case SYS_ICC_HPPIR1_EL1:
1225
if (unlikely(!is_read))
1226
return 0;
1227
fn = __vgic_v3_read_hppir;
1228
break;
1229
case SYS_ICC_IGRPEN0_EL1:
1230
if (is_read)
1231
fn = __vgic_v3_read_igrpen0;
1232
else
1233
fn = __vgic_v3_write_igrpen0;
1234
break;
1235
case SYS_ICC_BPR0_EL1:
1236
if (is_read)
1237
fn = __vgic_v3_read_bpr0;
1238
else
1239
fn = __vgic_v3_write_bpr0;
1240
break;
1241
case SYS_ICC_DIR_EL1:
1242
if (unlikely(is_read))
1243
return 0;
1244
/*
1245
* Full exit if required to handle overflow deactivation,
1246
* unless we can emulate it in the LRs (likely the majority
1247
* of the cases).
1248
*/
1249
if (vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr & ICH_HCR_EL2_TDIR) {
1250
int ret;
1251
1252
ret = ___vgic_v3_write_dir(vcpu, __vgic_v3_read_vmcr(),
1253
kvm_vcpu_sys_get_rt(vcpu));
1254
if (ret)
1255
__kvm_skip_instr(vcpu);
1256
1257
return ret;
1258
}
1259
fn = __vgic_v3_write_dir;
1260
break;
1261
case SYS_ICC_RPR_EL1:
1262
if (unlikely(!is_read))
1263
return 0;
1264
fn = __vgic_v3_read_rpr;
1265
break;
1266
case SYS_ICC_CTLR_EL1:
1267
if (is_read)
1268
fn = __vgic_v3_read_ctlr;
1269
else
1270
fn = __vgic_v3_write_ctlr;
1271
break;
1272
case SYS_ICC_PMR_EL1:
1273
if (is_read)
1274
fn = __vgic_v3_read_pmr;
1275
else
1276
fn = __vgic_v3_write_pmr;
1277
break;
1278
default:
1279
return 0;
1280
}
1281
1282
vmcr = __vgic_v3_read_vmcr();
1283
rt = kvm_vcpu_sys_get_rt(vcpu);
1284
fn(vcpu, vmcr, rt);
1285
1286
__kvm_skip_instr(vcpu);
1287
1288
return 1;
1289
}
1290
1291