Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/vgic-sys-reg-v3.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* VGIC system registers handling functions for AArch64 mode
4
*/
5
6
#include <linux/irqchip/arm-gic-v3.h>
7
#include <linux/kvm.h>
8
#include <linux/kvm_host.h>
9
#include <asm/kvm_emulate.h>
10
#include "vgic/vgic.h"
11
#include "sys_regs.h"
12
13
static int set_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
14
u64 val)
15
{
16
u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
17
struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
18
struct vgic_vmcr vmcr;
19
20
vgic_get_vmcr(vcpu, &vmcr);
21
22
/*
23
* Disallow restoring VM state if not supported by this
24
* hardware.
25
*/
26
host_pri_bits = FIELD_GET(ICC_CTLR_EL1_PRI_BITS_MASK, val) + 1;
27
if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
28
return -EINVAL;
29
30
vgic_v3_cpu->num_pri_bits = host_pri_bits;
31
32
host_id_bits = FIELD_GET(ICC_CTLR_EL1_ID_BITS_MASK, val);
33
if (host_id_bits > vgic_v3_cpu->num_id_bits)
34
return -EINVAL;
35
36
vgic_v3_cpu->num_id_bits = host_id_bits;
37
38
host_seis = FIELD_GET(ICH_VTR_EL2_SEIS, kvm_vgic_global_state.ich_vtr_el2);
39
seis = FIELD_GET(ICC_CTLR_EL1_SEIS_MASK, val);
40
if (host_seis != seis)
41
return -EINVAL;
42
43
host_a3v = FIELD_GET(ICH_VTR_EL2_A3V, kvm_vgic_global_state.ich_vtr_el2);
44
a3v = FIELD_GET(ICC_CTLR_EL1_A3V_MASK, val);
45
if (host_a3v != a3v)
46
return -EINVAL;
47
48
/*
49
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
50
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
51
*/
52
vmcr.cbpr = FIELD_GET(ICC_CTLR_EL1_CBPR_MASK, val);
53
vmcr.eoim = FIELD_GET(ICC_CTLR_EL1_EOImode_MASK, val);
54
vgic_set_vmcr(vcpu, &vmcr);
55
56
return 0;
57
}
58
59
static int get_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
60
u64 *valp)
61
{
62
struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
63
struct vgic_vmcr vmcr;
64
u64 val;
65
66
vgic_get_vmcr(vcpu, &vmcr);
67
val = 0;
68
val |= FIELD_PREP(ICC_CTLR_EL1_PRI_BITS_MASK, vgic_v3_cpu->num_pri_bits - 1);
69
val |= FIELD_PREP(ICC_CTLR_EL1_ID_BITS_MASK, vgic_v3_cpu->num_id_bits);
70
val |= FIELD_PREP(ICC_CTLR_EL1_SEIS_MASK,
71
FIELD_GET(ICH_VTR_EL2_SEIS,
72
kvm_vgic_global_state.ich_vtr_el2));
73
val |= FIELD_PREP(ICC_CTLR_EL1_A3V_MASK,
74
FIELD_GET(ICH_VTR_EL2_A3V, kvm_vgic_global_state.ich_vtr_el2));
75
/*
76
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
77
* Extract it directly using ICC_CTLR_EL1 reg definitions.
78
*/
79
val |= FIELD_PREP(ICC_CTLR_EL1_CBPR_MASK, vmcr.cbpr);
80
val |= FIELD_PREP(ICC_CTLR_EL1_EOImode_MASK, vmcr.eoim);
81
82
*valp = val;
83
84
return 0;
85
}
86
87
static int set_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
88
u64 val)
89
{
90
struct vgic_vmcr vmcr;
91
92
vgic_get_vmcr(vcpu, &vmcr);
93
vmcr.pmr = FIELD_GET(ICC_PMR_EL1_MASK, val);
94
vgic_set_vmcr(vcpu, &vmcr);
95
96
return 0;
97
}
98
99
static int get_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
100
u64 *val)
101
{
102
struct vgic_vmcr vmcr;
103
104
vgic_get_vmcr(vcpu, &vmcr);
105
*val = FIELD_PREP(ICC_PMR_EL1_MASK, vmcr.pmr);
106
107
return 0;
108
}
109
110
static int set_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
111
u64 val)
112
{
113
struct vgic_vmcr vmcr;
114
115
vgic_get_vmcr(vcpu, &vmcr);
116
vmcr.bpr = FIELD_GET(ICC_BPR0_EL1_MASK, val);
117
vgic_set_vmcr(vcpu, &vmcr);
118
119
return 0;
120
}
121
122
static int get_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
123
u64 *val)
124
{
125
struct vgic_vmcr vmcr;
126
127
vgic_get_vmcr(vcpu, &vmcr);
128
*val = FIELD_PREP(ICC_BPR0_EL1_MASK, vmcr.bpr);
129
130
return 0;
131
}
132
133
static int set_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
134
u64 val)
135
{
136
struct vgic_vmcr vmcr;
137
138
vgic_get_vmcr(vcpu, &vmcr);
139
if (!vmcr.cbpr) {
140
vmcr.abpr = FIELD_GET(ICC_BPR1_EL1_MASK, val);
141
vgic_set_vmcr(vcpu, &vmcr);
142
}
143
144
return 0;
145
}
146
147
static int get_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
148
u64 *val)
149
{
150
struct vgic_vmcr vmcr;
151
152
vgic_get_vmcr(vcpu, &vmcr);
153
if (!vmcr.cbpr)
154
*val = FIELD_PREP(ICC_BPR1_EL1_MASK, vmcr.abpr);
155
else
156
*val = min((vmcr.bpr + 1), 7U);
157
158
159
return 0;
160
}
161
162
static int set_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
163
u64 val)
164
{
165
struct vgic_vmcr vmcr;
166
167
vgic_get_vmcr(vcpu, &vmcr);
168
vmcr.grpen0 = FIELD_GET(ICC_IGRPEN0_EL1_MASK, val);
169
vgic_set_vmcr(vcpu, &vmcr);
170
171
return 0;
172
}
173
174
static int get_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
175
u64 *val)
176
{
177
struct vgic_vmcr vmcr;
178
179
vgic_get_vmcr(vcpu, &vmcr);
180
*val = FIELD_PREP(ICC_IGRPEN0_EL1_MASK, vmcr.grpen0);
181
182
return 0;
183
}
184
185
static int set_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
186
u64 val)
187
{
188
struct vgic_vmcr vmcr;
189
190
vgic_get_vmcr(vcpu, &vmcr);
191
vmcr.grpen1 = FIELD_GET(ICC_IGRPEN1_EL1_MASK, val);
192
vgic_set_vmcr(vcpu, &vmcr);
193
194
return 0;
195
}
196
197
static int get_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
198
u64 *val)
199
{
200
struct vgic_vmcr vmcr;
201
202
vgic_get_vmcr(vcpu, &vmcr);
203
*val = FIELD_GET(ICC_IGRPEN1_EL1_MASK, vmcr.grpen1);
204
205
return 0;
206
}
207
208
static void set_apr_reg(struct kvm_vcpu *vcpu, u64 val, u8 apr, u8 idx)
209
{
210
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
211
212
if (apr)
213
vgicv3->vgic_ap1r[idx] = val;
214
else
215
vgicv3->vgic_ap0r[idx] = val;
216
}
217
218
static u64 get_apr_reg(struct kvm_vcpu *vcpu, u8 apr, u8 idx)
219
{
220
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
221
222
if (apr)
223
return vgicv3->vgic_ap1r[idx];
224
else
225
return vgicv3->vgic_ap0r[idx];
226
}
227
228
static int set_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
229
u64 val)
230
231
{
232
u8 idx = r->Op2 & 3;
233
234
if (idx > vgic_v3_max_apr_idx(vcpu))
235
return -EINVAL;
236
237
set_apr_reg(vcpu, val, 0, idx);
238
return 0;
239
}
240
241
static int get_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
242
u64 *val)
243
{
244
u8 idx = r->Op2 & 3;
245
246
if (idx > vgic_v3_max_apr_idx(vcpu))
247
return -EINVAL;
248
249
*val = get_apr_reg(vcpu, 0, idx);
250
251
return 0;
252
}
253
254
static int set_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
255
u64 val)
256
257
{
258
u8 idx = r->Op2 & 3;
259
260
if (idx > vgic_v3_max_apr_idx(vcpu))
261
return -EINVAL;
262
263
set_apr_reg(vcpu, val, 1, idx);
264
return 0;
265
}
266
267
static int get_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
268
u64 *val)
269
{
270
u8 idx = r->Op2 & 3;
271
272
if (idx > vgic_v3_max_apr_idx(vcpu))
273
return -EINVAL;
274
275
*val = get_apr_reg(vcpu, 1, idx);
276
277
return 0;
278
}
279
280
static int set_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
281
u64 val)
282
{
283
/* Validate SRE bit */
284
if (!(val & ICC_SRE_EL1_SRE))
285
return -EINVAL;
286
287
return 0;
288
}
289
290
static int get_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
291
u64 *val)
292
{
293
struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
294
295
*val = vgicv3->vgic_sre;
296
297
return 0;
298
}
299
300
static int set_gic_ich_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
301
u64 val)
302
{
303
__vcpu_assign_sys_reg(vcpu, r->reg, val);
304
return 0;
305
}
306
307
static int get_gic_ich_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
308
u64 *val)
309
{
310
*val = __vcpu_sys_reg(vcpu, r->reg);
311
return 0;
312
}
313
314
static int set_gic_ich_apr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
315
u64 val)
316
{
317
u8 idx = r->Op2 & 3;
318
319
if (idx > vgic_v3_max_apr_idx(vcpu))
320
return -EINVAL;
321
322
return set_gic_ich_reg(vcpu, r, val);
323
}
324
325
static int get_gic_ich_apr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
326
u64 *val)
327
{
328
u8 idx = r->Op2 & 3;
329
330
if (idx > vgic_v3_max_apr_idx(vcpu))
331
return -EINVAL;
332
333
return get_gic_ich_reg(vcpu, r, val);
334
}
335
336
static int set_gic_icc_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
337
u64 val)
338
{
339
if (val != KVM_ICC_SRE_EL2)
340
return -EINVAL;
341
return 0;
342
}
343
344
static int get_gic_icc_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
345
u64 *val)
346
{
347
*val = KVM_ICC_SRE_EL2;
348
return 0;
349
}
350
351
static int set_gic_ich_vtr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
352
u64 val)
353
{
354
if (val != kvm_get_guest_vtr_el2())
355
return -EINVAL;
356
return 0;
357
}
358
359
static int get_gic_ich_vtr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
360
u64 *val)
361
{
362
*val = kvm_get_guest_vtr_el2();
363
return 0;
364
}
365
366
static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
367
const struct sys_reg_desc *rd)
368
{
369
return vcpu_has_nv(vcpu) ? 0 : REG_HIDDEN;
370
}
371
372
#define __EL2_REG(r, acc, i) \
373
{ \
374
SYS_DESC(SYS_ ## r), \
375
.get_user = get_gic_ ## acc, \
376
.set_user = set_gic_ ## acc, \
377
.reg = i, \
378
.visibility = el2_visibility, \
379
}
380
381
#define EL2_REG(r, acc) __EL2_REG(r, acc, r)
382
383
#define EL2_REG_RO(r, acc) __EL2_REG(r, acc, 0)
384
385
static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
386
{ SYS_DESC(SYS_ICC_PMR_EL1),
387
.set_user = set_gic_pmr, .get_user = get_gic_pmr, },
388
{ SYS_DESC(SYS_ICC_BPR0_EL1),
389
.set_user = set_gic_bpr0, .get_user = get_gic_bpr0, },
390
{ SYS_DESC(SYS_ICC_AP0R0_EL1),
391
.set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
392
{ SYS_DESC(SYS_ICC_AP0R1_EL1),
393
.set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
394
{ SYS_DESC(SYS_ICC_AP0R2_EL1),
395
.set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
396
{ SYS_DESC(SYS_ICC_AP0R3_EL1),
397
.set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
398
{ SYS_DESC(SYS_ICC_AP1R0_EL1),
399
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
400
{ SYS_DESC(SYS_ICC_AP1R1_EL1),
401
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
402
{ SYS_DESC(SYS_ICC_AP1R2_EL1),
403
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
404
{ SYS_DESC(SYS_ICC_AP1R3_EL1),
405
.set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
406
{ SYS_DESC(SYS_ICC_BPR1_EL1),
407
.set_user = set_gic_bpr1, .get_user = get_gic_bpr1, },
408
{ SYS_DESC(SYS_ICC_CTLR_EL1),
409
.set_user = set_gic_ctlr, .get_user = get_gic_ctlr, },
410
{ SYS_DESC(SYS_ICC_SRE_EL1),
411
.set_user = set_gic_sre, .get_user = get_gic_sre, },
412
{ SYS_DESC(SYS_ICC_IGRPEN0_EL1),
413
.set_user = set_gic_grpen0, .get_user = get_gic_grpen0, },
414
{ SYS_DESC(SYS_ICC_IGRPEN1_EL1),
415
.set_user = set_gic_grpen1, .get_user = get_gic_grpen1, },
416
EL2_REG(ICH_AP0R0_EL2, ich_apr),
417
EL2_REG(ICH_AP0R1_EL2, ich_apr),
418
EL2_REG(ICH_AP0R2_EL2, ich_apr),
419
EL2_REG(ICH_AP0R3_EL2, ich_apr),
420
EL2_REG(ICH_AP1R0_EL2, ich_apr),
421
EL2_REG(ICH_AP1R1_EL2, ich_apr),
422
EL2_REG(ICH_AP1R2_EL2, ich_apr),
423
EL2_REG(ICH_AP1R3_EL2, ich_apr),
424
EL2_REG_RO(ICC_SRE_EL2, icc_sre),
425
EL2_REG(ICH_HCR_EL2, ich_reg),
426
EL2_REG_RO(ICH_VTR_EL2, ich_vtr),
427
EL2_REG(ICH_VMCR_EL2, ich_reg),
428
EL2_REG(ICH_LR0_EL2, ich_reg),
429
EL2_REG(ICH_LR1_EL2, ich_reg),
430
EL2_REG(ICH_LR2_EL2, ich_reg),
431
EL2_REG(ICH_LR3_EL2, ich_reg),
432
EL2_REG(ICH_LR4_EL2, ich_reg),
433
EL2_REG(ICH_LR5_EL2, ich_reg),
434
EL2_REG(ICH_LR6_EL2, ich_reg),
435
EL2_REG(ICH_LR7_EL2, ich_reg),
436
EL2_REG(ICH_LR8_EL2, ich_reg),
437
EL2_REG(ICH_LR9_EL2, ich_reg),
438
EL2_REG(ICH_LR10_EL2, ich_reg),
439
EL2_REG(ICH_LR11_EL2, ich_reg),
440
EL2_REG(ICH_LR12_EL2, ich_reg),
441
EL2_REG(ICH_LR13_EL2, ich_reg),
442
EL2_REG(ICH_LR14_EL2, ich_reg),
443
EL2_REG(ICH_LR15_EL2, ich_reg),
444
};
445
446
const struct sys_reg_desc *vgic_v3_get_sysreg_table(unsigned int *sz)
447
{
448
*sz = ARRAY_SIZE(gic_v3_icc_reg_descs);
449
return gic_v3_icc_reg_descs;
450
}
451
452
static u64 attr_to_id(u64 attr)
453
{
454
return ARM64_SYS_REG(FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP0_MASK, attr),
455
FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP1_MASK, attr),
456
FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRN_MASK, attr),
457
FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRM_MASK, attr),
458
FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP2_MASK, attr));
459
}
460
461
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
462
{
463
const struct sys_reg_desc *r;
464
465
r = get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs,
466
ARRAY_SIZE(gic_v3_icc_reg_descs));
467
468
if (r && !sysreg_hidden(vcpu, r))
469
return 0;
470
471
return -ENXIO;
472
}
473
474
int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
475
struct kvm_device_attr *attr,
476
bool is_write)
477
{
478
struct kvm_one_reg reg = {
479
.id = attr_to_id(attr->attr),
480
.addr = attr->addr,
481
};
482
483
if (is_write)
484
return kvm_sys_reg_set_user(vcpu, &reg, gic_v3_icc_reg_descs,
485
ARRAY_SIZE(gic_v3_icc_reg_descs));
486
else
487
return kvm_sys_reg_get_user(vcpu, &reg, gic_v3_icc_reg_descs,
488
ARRAY_SIZE(gic_v3_icc_reg_descs));
489
}
490
491