Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/sys_regs.c
50374 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2012,2013 - ARM Ltd
4
* Author: Marc Zyngier <[email protected]>
5
*
6
* Derived from arch/arm/kvm/coproc.c:
7
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
8
* Authors: Rusty Russell <[email protected]>
9
* Christoffer Dall <[email protected]>
10
*/
11
12
#include <linux/bitfield.h>
13
#include <linux/bsearch.h>
14
#include <linux/cacheinfo.h>
15
#include <linux/debugfs.h>
16
#include <linux/kvm_host.h>
17
#include <linux/mm.h>
18
#include <linux/printk.h>
19
#include <linux/uaccess.h>
20
#include <linux/irqchip/arm-gic-v3.h>
21
22
#include <asm/arm_pmuv3.h>
23
#include <asm/cacheflush.h>
24
#include <asm/cputype.h>
25
#include <asm/debug-monitors.h>
26
#include <asm/esr.h>
27
#include <asm/kvm_arm.h>
28
#include <asm/kvm_emulate.h>
29
#include <asm/kvm_hyp.h>
30
#include <asm/kvm_mmu.h>
31
#include <asm/kvm_nested.h>
32
#include <asm/perf_event.h>
33
#include <asm/sysreg.h>
34
35
#include <trace/events/kvm.h>
36
37
#include "sys_regs.h"
38
#include "vgic/vgic.h"
39
40
#include "trace.h"
41
42
/*
43
* For AArch32, we only take care of what is being trapped. Anything
44
* that has to do with init and userspace access has to go via the
45
* 64bit interface.
46
*/
47
48
static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
49
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
50
u64 val);
51
52
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
53
const struct sys_reg_desc *r)
54
{
55
kvm_inject_undefined(vcpu);
56
return false;
57
}
58
59
static bool bad_trap(struct kvm_vcpu *vcpu,
60
struct sys_reg_params *params,
61
const struct sys_reg_desc *r,
62
const char *msg)
63
{
64
WARN_ONCE(1, "Unexpected %s\n", msg);
65
print_sys_reg_instr(params);
66
return undef_access(vcpu, params, r);
67
}
68
69
static bool read_from_write_only(struct kvm_vcpu *vcpu,
70
struct sys_reg_params *params,
71
const struct sys_reg_desc *r)
72
{
73
return bad_trap(vcpu, params, r,
74
"sys_reg read to write-only register");
75
}
76
77
static bool write_to_read_only(struct kvm_vcpu *vcpu,
78
struct sys_reg_params *params,
79
const struct sys_reg_desc *r)
80
{
81
return bad_trap(vcpu, params, r,
82
"sys_reg write to read-only register");
83
}
84
85
enum sr_loc_attr {
86
SR_LOC_MEMORY = 0, /* Register definitely in memory */
87
SR_LOC_LOADED = BIT(0), /* Register on CPU, unless it cannot */
88
SR_LOC_MAPPED = BIT(1), /* Register in a different CPU register */
89
SR_LOC_XLATED = BIT(2), /* Register translated to fit another reg */
90
SR_LOC_SPECIAL = BIT(3), /* Demanding register, implies loaded */
91
};
92
93
struct sr_loc {
94
enum sr_loc_attr loc;
95
enum vcpu_sysreg map_reg;
96
u64 (*xlate)(u64);
97
};
98
99
static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu,
100
enum vcpu_sysreg reg)
101
{
102
switch (reg) {
103
case SCTLR_EL1:
104
case CPACR_EL1:
105
case TTBR0_EL1:
106
case TTBR1_EL1:
107
case TCR_EL1:
108
case TCR2_EL1:
109
case PIR_EL1:
110
case PIRE0_EL1:
111
case POR_EL1:
112
case ESR_EL1:
113
case AFSR0_EL1:
114
case AFSR1_EL1:
115
case FAR_EL1:
116
case MAIR_EL1:
117
case VBAR_EL1:
118
case CONTEXTIDR_EL1:
119
case AMAIR_EL1:
120
case CNTKCTL_EL1:
121
case ELR_EL1:
122
case SPSR_EL1:
123
case ZCR_EL1:
124
case SCTLR2_EL1:
125
/*
126
* EL1 registers which have an ELx2 mapping are loaded if
127
* we're not in hypervisor context.
128
*/
129
return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED;
130
131
case TPIDR_EL0:
132
case TPIDRRO_EL0:
133
case TPIDR_EL1:
134
case PAR_EL1:
135
case DACR32_EL2:
136
case IFSR32_EL2:
137
case DBGVCR32_EL2:
138
/* These registers are always loaded, no matter what */
139
return SR_LOC_LOADED;
140
141
default:
142
/* Non-mapped EL2 registers are by definition in memory. */
143
return SR_LOC_MEMORY;
144
}
145
}
146
147
static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu,
148
enum vcpu_sysreg reg,
149
enum vcpu_sysreg map_reg,
150
u64 (*xlate)(u64),
151
struct sr_loc *loc)
152
{
153
if (!is_hyp_ctxt(vcpu)) {
154
loc->loc = SR_LOC_MEMORY;
155
return;
156
}
157
158
loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED;
159
loc->map_reg = map_reg;
160
161
WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY);
162
163
if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) {
164
loc->loc |= SR_LOC_XLATED;
165
loc->xlate = xlate;
166
}
167
}
168
169
#define MAPPED_EL2_SYSREG(r, m, t) \
170
case r: { \
171
locate_mapped_el2_register(vcpu, r, m, t, loc); \
172
break; \
173
}
174
175
static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
176
struct sr_loc *loc)
177
{
178
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) {
179
loc->loc = SR_LOC_MEMORY;
180
return;
181
}
182
183
switch (reg) {
184
MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
185
translate_sctlr_el2_to_sctlr_el1 );
186
MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
187
translate_cptr_el2_to_cpacr_el1 );
188
MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1,
189
translate_ttbr0_el2_to_ttbr0_el1 );
190
MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL );
191
MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1,
192
translate_tcr_el2_to_tcr_el1 );
193
MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL );
194
MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL );
195
MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL );
196
MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
197
MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
198
MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
199
MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1, NULL );
200
MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1, NULL );
201
MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1, NULL );
202
MAPPED_EL2_SYSREG(POR_EL2, POR_EL1, NULL );
203
MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
204
MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
205
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
206
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
207
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
208
case CNTHCTL_EL2:
209
/* CNTHCTL_EL2 is super special, until we support NV2.1 */
210
loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ?
211
SR_LOC_SPECIAL : SR_LOC_MEMORY);
212
break;
213
default:
214
loc->loc = locate_direct_register(vcpu, reg);
215
}
216
}
217
218
static u64 read_sr_from_cpu(enum vcpu_sysreg reg)
219
{
220
u64 val = 0x8badf00d8badf00d;
221
222
switch (reg) {
223
case SCTLR_EL1: val = read_sysreg_s(SYS_SCTLR_EL12); break;
224
case CPACR_EL1: val = read_sysreg_s(SYS_CPACR_EL12); break;
225
case TTBR0_EL1: val = read_sysreg_s(SYS_TTBR0_EL12); break;
226
case TTBR1_EL1: val = read_sysreg_s(SYS_TTBR1_EL12); break;
227
case TCR_EL1: val = read_sysreg_s(SYS_TCR_EL12); break;
228
case TCR2_EL1: val = read_sysreg_s(SYS_TCR2_EL12); break;
229
case PIR_EL1: val = read_sysreg_s(SYS_PIR_EL12); break;
230
case PIRE0_EL1: val = read_sysreg_s(SYS_PIRE0_EL12); break;
231
case POR_EL1: val = read_sysreg_s(SYS_POR_EL12); break;
232
case ESR_EL1: val = read_sysreg_s(SYS_ESR_EL12); break;
233
case AFSR0_EL1: val = read_sysreg_s(SYS_AFSR0_EL12); break;
234
case AFSR1_EL1: val = read_sysreg_s(SYS_AFSR1_EL12); break;
235
case FAR_EL1: val = read_sysreg_s(SYS_FAR_EL12); break;
236
case MAIR_EL1: val = read_sysreg_s(SYS_MAIR_EL12); break;
237
case VBAR_EL1: val = read_sysreg_s(SYS_VBAR_EL12); break;
238
case CONTEXTIDR_EL1: val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
239
case AMAIR_EL1: val = read_sysreg_s(SYS_AMAIR_EL12); break;
240
case CNTKCTL_EL1: val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
241
case ELR_EL1: val = read_sysreg_s(SYS_ELR_EL12); break;
242
case SPSR_EL1: val = read_sysreg_s(SYS_SPSR_EL12); break;
243
case ZCR_EL1: val = read_sysreg_s(SYS_ZCR_EL12); break;
244
case SCTLR2_EL1: val = read_sysreg_s(SYS_SCTLR2_EL12); break;
245
case TPIDR_EL0: val = read_sysreg_s(SYS_TPIDR_EL0); break;
246
case TPIDRRO_EL0: val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
247
case TPIDR_EL1: val = read_sysreg_s(SYS_TPIDR_EL1); break;
248
case PAR_EL1: val = read_sysreg_par(); break;
249
case DACR32_EL2: val = read_sysreg_s(SYS_DACR32_EL2); break;
250
case IFSR32_EL2: val = read_sysreg_s(SYS_IFSR32_EL2); break;
251
case DBGVCR32_EL2: val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
252
default: WARN_ON_ONCE(1);
253
}
254
255
return val;
256
}
257
258
static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val)
259
{
260
switch (reg) {
261
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
262
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
263
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
264
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
265
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
266
case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
267
case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
268
case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
269
case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
270
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
271
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
272
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
273
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
274
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
275
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
276
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
277
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
278
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
279
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
280
case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
281
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
282
case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
283
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
284
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
285
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
286
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
287
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
288
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
289
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
290
default: WARN_ON_ONCE(1);
291
}
292
}
293
294
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
295
{
296
struct sr_loc loc = {};
297
298
locate_register(vcpu, reg, &loc);
299
300
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
301
302
if (loc.loc & SR_LOC_SPECIAL) {
303
u64 val;
304
305
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
306
307
/*
308
* CNTHCTL_EL2 requires some special treatment to account
309
* for the bits that can be set via CNTKCTL_EL1 when E2H==1.
310
*/
311
switch (reg) {
312
case CNTHCTL_EL2:
313
val = read_sysreg_el1(SYS_CNTKCTL);
314
val &= CNTKCTL_VALID_BITS;
315
val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
316
return val;
317
default:
318
WARN_ON_ONCE(1);
319
}
320
}
321
322
if (loc.loc & SR_LOC_LOADED) {
323
enum vcpu_sysreg map_reg = reg;
324
325
if (loc.loc & SR_LOC_MAPPED)
326
map_reg = loc.map_reg;
327
328
if (!(loc.loc & SR_LOC_XLATED)) {
329
u64 val = read_sr_from_cpu(map_reg);
330
331
if (reg >= __SANITISED_REG_START__)
332
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
333
334
return val;
335
}
336
}
337
338
return __vcpu_sys_reg(vcpu, reg);
339
}
340
341
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg)
342
{
343
struct sr_loc loc = {};
344
345
locate_register(vcpu, reg, &loc);
346
347
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
348
349
if (loc.loc & SR_LOC_SPECIAL) {
350
351
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
352
353
switch (reg) {
354
case CNTHCTL_EL2:
355
/*
356
* If E2H=1, some of the bits are backed by
357
* CNTKCTL_EL1, while the rest is kept in memory.
358
* Yes, this is fun stuff.
359
*/
360
write_sysreg_el1(val, SYS_CNTKCTL);
361
break;
362
default:
363
WARN_ON_ONCE(1);
364
}
365
}
366
367
if (loc.loc & SR_LOC_LOADED) {
368
enum vcpu_sysreg map_reg = reg;
369
u64 xlated_val;
370
371
if (reg >= __SANITISED_REG_START__)
372
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
373
374
if (loc.loc & SR_LOC_MAPPED)
375
map_reg = loc.map_reg;
376
377
if (loc.loc & SR_LOC_XLATED)
378
xlated_val = loc.xlate(val);
379
else
380
xlated_val = val;
381
382
write_sr_to_cpu(map_reg, xlated_val);
383
384
/*
385
* Fall through to write the backing store anyway, which
386
* allows translated registers to be directly read without a
387
* reverse translation.
388
*/
389
}
390
391
__vcpu_assign_sys_reg(vcpu, reg, val);
392
}
393
394
/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
395
#define CSSELR_MAX 14
396
397
/*
398
* Returns the minimum line size for the selected cache, expressed as
399
* Log2(bytes).
400
*/
401
static u8 get_min_cache_line_size(bool icache)
402
{
403
u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
404
u8 field;
405
406
if (icache)
407
field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
408
else
409
field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
410
411
/*
412
* Cache line size is represented as Log2(words) in CTR_EL0.
413
* Log2(bytes) can be derived with the following:
414
*
415
* Log2(words) + 2 = Log2(bytes / 4) + 2
416
* = Log2(bytes) - 2 + 2
417
* = Log2(bytes)
418
*/
419
return field + 2;
420
}
421
422
/* Which cache CCSIDR represents depends on CSSELR value. */
423
static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
424
{
425
u8 line_size;
426
427
if (vcpu->arch.ccsidr)
428
return vcpu->arch.ccsidr[csselr];
429
430
line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
431
432
/*
433
* Fabricate a CCSIDR value as the overriding value does not exist.
434
* The real CCSIDR value will not be used as it can vary by the
435
* physical CPU which the vcpu currently resides in.
436
*
437
* The line size is determined with get_min_cache_line_size(), which
438
* should be valid for all CPUs even if they have different cache
439
* configuration.
440
*
441
* The associativity bits are cleared, meaning the geometry of all data
442
* and unified caches (which are guaranteed to be PIPT and thus
443
* non-aliasing) are 1 set and 1 way.
444
* Guests should not be doing cache operations by set/way at all, and
445
* for this reason, we trap them and attempt to infer the intent, so
446
* that we can flush the entire guest's address space at the appropriate
447
* time. The exposed geometry minimizes the number of the traps.
448
* [If guests should attempt to infer aliasing properties from the
449
* geometry (which is not permitted by the architecture), they would
450
* only do so for virtually indexed caches.]
451
*
452
* We don't check if the cache level exists as it is allowed to return
453
* an UNKNOWN value if not.
454
*/
455
return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
456
}
457
458
static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
459
{
460
u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
461
u32 *ccsidr = vcpu->arch.ccsidr;
462
u32 i;
463
464
if ((val & CCSIDR_EL1_RES0) ||
465
line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
466
return -EINVAL;
467
468
if (!ccsidr) {
469
if (val == get_ccsidr(vcpu, csselr))
470
return 0;
471
472
ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
473
if (!ccsidr)
474
return -ENOMEM;
475
476
for (i = 0; i < CSSELR_MAX; i++)
477
ccsidr[i] = get_ccsidr(vcpu, i);
478
479
vcpu->arch.ccsidr = ccsidr;
480
}
481
482
ccsidr[csselr] = val;
483
484
return 0;
485
}
486
487
static bool access_rw(struct kvm_vcpu *vcpu,
488
struct sys_reg_params *p,
489
const struct sys_reg_desc *r)
490
{
491
if (p->is_write)
492
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
493
else
494
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
495
496
return true;
497
}
498
499
/*
500
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
501
*/
502
static bool access_dcsw(struct kvm_vcpu *vcpu,
503
struct sys_reg_params *p,
504
const struct sys_reg_desc *r)
505
{
506
if (!p->is_write)
507
return read_from_write_only(vcpu, p, r);
508
509
/*
510
* Only track S/W ops if we don't have FWB. It still indicates
511
* that the guest is a bit broken (S/W operations should only
512
* be done by firmware, knowing that there is only a single
513
* CPU left in the system, and certainly not from non-secure
514
* software).
515
*/
516
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
517
kvm_set_way_flush(vcpu);
518
519
return true;
520
}
521
522
static bool access_dcgsw(struct kvm_vcpu *vcpu,
523
struct sys_reg_params *p,
524
const struct sys_reg_desc *r)
525
{
526
if (!kvm_has_mte(vcpu->kvm))
527
return undef_access(vcpu, p, r);
528
529
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
530
return access_dcsw(vcpu, p, r);
531
}
532
533
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
534
{
535
switch (r->aarch32_map) {
536
case AA32_LO:
537
*mask = GENMASK_ULL(31, 0);
538
*shift = 0;
539
break;
540
case AA32_HI:
541
*mask = GENMASK_ULL(63, 32);
542
*shift = 32;
543
break;
544
default:
545
*mask = GENMASK_ULL(63, 0);
546
*shift = 0;
547
break;
548
}
549
}
550
551
/*
552
* Generic accessor for VM registers. Only called as long as HCR_TVM
553
* is set. If the guest enables the MMU, we stop trapping the VM
554
* sys_regs and leave it in complete control of the caches.
555
*/
556
static bool access_vm_reg(struct kvm_vcpu *vcpu,
557
struct sys_reg_params *p,
558
const struct sys_reg_desc *r)
559
{
560
bool was_enabled = vcpu_has_cache_enabled(vcpu);
561
u64 val, mask, shift;
562
563
BUG_ON(!p->is_write);
564
565
get_access_mask(r, &mask, &shift);
566
567
if (~mask) {
568
val = vcpu_read_sys_reg(vcpu, r->reg);
569
val &= ~mask;
570
} else {
571
val = 0;
572
}
573
574
val |= (p->regval & (mask >> shift)) << shift;
575
vcpu_write_sys_reg(vcpu, val, r->reg);
576
577
kvm_toggle_cache(vcpu, was_enabled);
578
return true;
579
}
580
581
static bool access_actlr(struct kvm_vcpu *vcpu,
582
struct sys_reg_params *p,
583
const struct sys_reg_desc *r)
584
{
585
u64 mask, shift;
586
587
if (p->is_write)
588
return ignore_write(vcpu, p);
589
590
get_access_mask(r, &mask, &shift);
591
p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
592
593
return true;
594
}
595
596
/*
597
* Trap handler for the GICv3 SGI generation system register.
598
* Forward the request to the VGIC emulation.
599
* The cp15_64 code makes sure this automatically works
600
* for both AArch64 and AArch32 accesses.
601
*/
602
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
603
struct sys_reg_params *p,
604
const struct sys_reg_desc *r)
605
{
606
bool g1;
607
608
if (!kvm_has_gicv3(vcpu->kvm))
609
return undef_access(vcpu, p, r);
610
611
if (!p->is_write)
612
return read_from_write_only(vcpu, p, r);
613
614
/*
615
* In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
616
* Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
617
* depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
618
* equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
619
* group.
620
*/
621
if (p->Op0 == 0) { /* AArch32 */
622
switch (p->Op1) {
623
default: /* Keep GCC quiet */
624
case 0: /* ICC_SGI1R */
625
g1 = true;
626
break;
627
case 1: /* ICC_ASGI1R */
628
case 2: /* ICC_SGI0R */
629
g1 = false;
630
break;
631
}
632
} else { /* AArch64 */
633
switch (p->Op2) {
634
default: /* Keep GCC quiet */
635
case 5: /* ICC_SGI1R_EL1 */
636
g1 = true;
637
break;
638
case 6: /* ICC_ASGI1R_EL1 */
639
case 7: /* ICC_SGI0R_EL1 */
640
g1 = false;
641
break;
642
}
643
}
644
645
vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
646
647
return true;
648
}
649
650
static bool access_gic_sre(struct kvm_vcpu *vcpu,
651
struct sys_reg_params *p,
652
const struct sys_reg_desc *r)
653
{
654
if (!kvm_has_gicv3(vcpu->kvm))
655
return undef_access(vcpu, p, r);
656
657
if (p->is_write)
658
return ignore_write(vcpu, p);
659
660
if (p->Op1 == 4) { /* ICC_SRE_EL2 */
661
p->regval = KVM_ICC_SRE_EL2;
662
} else { /* ICC_SRE_EL1 */
663
p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
664
}
665
666
return true;
667
}
668
669
static bool access_gic_dir(struct kvm_vcpu *vcpu,
670
struct sys_reg_params *p,
671
const struct sys_reg_desc *r)
672
{
673
if (!kvm_has_gicv3(vcpu->kvm))
674
return undef_access(vcpu, p, r);
675
676
if (!p->is_write)
677
return undef_access(vcpu, p, r);
678
679
vgic_v3_deactivate(vcpu, p->regval);
680
681
return true;
682
}
683
684
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
685
struct sys_reg_params *p,
686
const struct sys_reg_desc *r)
687
{
688
if (p->is_write)
689
return ignore_write(vcpu, p);
690
else
691
return read_zero(vcpu, p);
692
}
693
694
/*
695
* ARMv8.1 mandates at least a trivial LORegion implementation, where all the
696
* RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
697
* system, these registers should UNDEF. LORID_EL1 being a RO register, we
698
* treat it separately.
699
*/
700
static bool trap_loregion(struct kvm_vcpu *vcpu,
701
struct sys_reg_params *p,
702
const struct sys_reg_desc *r)
703
{
704
u32 sr = reg_to_encoding(r);
705
706
if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP))
707
return undef_access(vcpu, p, r);
708
709
if (p->is_write && sr == SYS_LORID_EL1)
710
return write_to_read_only(vcpu, p, r);
711
712
return trap_raz_wi(vcpu, p, r);
713
}
714
715
static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
716
struct sys_reg_params *p,
717
const struct sys_reg_desc *r)
718
{
719
if (!p->is_write)
720
return read_from_write_only(vcpu, p, r);
721
722
kvm_debug_handle_oslar(vcpu, p->regval);
723
return true;
724
}
725
726
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
727
struct sys_reg_params *p,
728
const struct sys_reg_desc *r)
729
{
730
if (p->is_write)
731
return write_to_read_only(vcpu, p, r);
732
733
p->regval = __vcpu_sys_reg(vcpu, r->reg);
734
return true;
735
}
736
737
static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
738
u64 val)
739
{
740
/*
741
* The only modifiable bit is the OSLK bit. Refuse the write if
742
* userspace attempts to change any other bit in the register.
743
*/
744
if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
745
return -EINVAL;
746
747
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
748
return 0;
749
}
750
751
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
752
struct sys_reg_params *p,
753
const struct sys_reg_desc *r)
754
{
755
if (p->is_write) {
756
return ignore_write(vcpu, p);
757
} else {
758
p->regval = read_sysreg(dbgauthstatus_el1);
759
return true;
760
}
761
}
762
763
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
764
struct sys_reg_params *p,
765
const struct sys_reg_desc *r)
766
{
767
access_rw(vcpu, p, r);
768
769
kvm_debug_set_guest_ownership(vcpu);
770
return true;
771
}
772
773
/*
774
* reg_to_dbg/dbg_to_reg
775
*
776
* A 32 bit write to a debug register leave top bits alone
777
* A 32 bit read from a debug register only returns the bottom bits
778
*/
779
static void reg_to_dbg(struct kvm_vcpu *vcpu,
780
struct sys_reg_params *p,
781
const struct sys_reg_desc *rd,
782
u64 *dbg_reg)
783
{
784
u64 mask, shift, val;
785
786
get_access_mask(rd, &mask, &shift);
787
788
val = *dbg_reg;
789
val &= ~mask;
790
val |= (p->regval & (mask >> shift)) << shift;
791
*dbg_reg = val;
792
}
793
794
static void dbg_to_reg(struct kvm_vcpu *vcpu,
795
struct sys_reg_params *p,
796
const struct sys_reg_desc *rd,
797
u64 *dbg_reg)
798
{
799
u64 mask, shift;
800
801
get_access_mask(rd, &mask, &shift);
802
p->regval = (*dbg_reg & mask) >> shift;
803
}
804
805
static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
806
{
807
struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state;
808
809
switch (rd->Op2) {
810
case 0b100:
811
return &dbg->dbg_bvr[rd->CRm];
812
case 0b101:
813
return &dbg->dbg_bcr[rd->CRm];
814
case 0b110:
815
return &dbg->dbg_wvr[rd->CRm];
816
case 0b111:
817
return &dbg->dbg_wcr[rd->CRm];
818
default:
819
KVM_BUG_ON(1, vcpu->kvm);
820
return NULL;
821
}
822
}
823
824
static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
825
const struct sys_reg_desc *rd)
826
{
827
u64 *reg = demux_wb_reg(vcpu, rd);
828
829
if (!reg)
830
return false;
831
832
if (p->is_write)
833
reg_to_dbg(vcpu, p, rd, reg);
834
else
835
dbg_to_reg(vcpu, p, rd, reg);
836
837
kvm_debug_set_guest_ownership(vcpu);
838
return true;
839
}
840
841
static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
842
u64 val)
843
{
844
u64 *reg = demux_wb_reg(vcpu, rd);
845
846
if (!reg)
847
return -EINVAL;
848
849
*reg = val;
850
return 0;
851
}
852
853
static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
854
u64 *val)
855
{
856
u64 *reg = demux_wb_reg(vcpu, rd);
857
858
if (!reg)
859
return -EINVAL;
860
861
*val = *reg;
862
return 0;
863
}
864
865
static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
866
{
867
u64 *reg = demux_wb_reg(vcpu, rd);
868
869
/*
870
* Bail early if we couldn't find storage for the register, the
871
* KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever
872
* being run.
873
*/
874
if (!reg)
875
return 0;
876
877
*reg = rd->val;
878
return rd->val;
879
}
880
881
static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
882
{
883
u64 amair = read_sysreg(amair_el1);
884
vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
885
return amair;
886
}
887
888
static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
889
{
890
u64 actlr = read_sysreg(actlr_el1);
891
vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
892
return actlr;
893
}
894
895
static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
896
{
897
u64 mpidr;
898
899
/*
900
* Map the vcpu_id into the first three affinity level fields of
901
* the MPIDR. We limit the number of VCPUs in level 0 due to a
902
* limitation to 16 CPUs in that level in the ICC_SGIxR registers
903
* of the GICv3 to be able to address each CPU directly when
904
* sending IPIs.
905
*/
906
mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
907
mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
908
mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
909
mpidr |= (1ULL << 31);
910
vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
911
912
return mpidr;
913
}
914
915
static unsigned int hidden_visibility(const struct kvm_vcpu *vcpu,
916
const struct sys_reg_desc *r)
917
{
918
return REG_HIDDEN;
919
}
920
921
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
922
const struct sys_reg_desc *r)
923
{
924
if (kvm_vcpu_has_pmu(vcpu))
925
return 0;
926
927
return REG_HIDDEN;
928
}
929
930
static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
931
{
932
u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
933
u8 n = vcpu->kvm->arch.nr_pmu_counters;
934
935
if (n)
936
mask |= GENMASK(n - 1, 0);
937
938
reset_unknown(vcpu, r);
939
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
940
941
return __vcpu_sys_reg(vcpu, r->reg);
942
}
943
944
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
945
{
946
reset_unknown(vcpu, r);
947
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
948
949
return __vcpu_sys_reg(vcpu, r->reg);
950
}
951
952
static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
953
{
954
/* This thing will UNDEF, who cares about the reset value? */
955
if (!kvm_vcpu_has_pmu(vcpu))
956
return 0;
957
958
reset_unknown(vcpu, r);
959
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
960
961
return __vcpu_sys_reg(vcpu, r->reg);
962
}
963
964
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
965
{
966
reset_unknown(vcpu, r);
967
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
968
969
return __vcpu_sys_reg(vcpu, r->reg);
970
}
971
972
static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
973
{
974
u64 pmcr = 0;
975
976
if (!kvm_supports_32bit_el0())
977
pmcr |= ARMV8_PMU_PMCR_LC;
978
979
/*
980
* The value of PMCR.N field is included when the
981
* vCPU register is read via kvm_vcpu_read_pmcr().
982
*/
983
__vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
984
985
return __vcpu_sys_reg(vcpu, r->reg);
986
}
987
988
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
989
{
990
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
991
bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
992
993
if (!enabled)
994
kvm_inject_undefined(vcpu);
995
996
return !enabled;
997
}
998
999
static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
1000
{
1001
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
1002
}
1003
1004
static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
1005
{
1006
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
1007
}
1008
1009
static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
1010
{
1011
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
1012
}
1013
1014
static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
1015
{
1016
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
1017
}
1018
1019
static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1020
const struct sys_reg_desc *r)
1021
{
1022
u64 val;
1023
1024
if (pmu_access_el0_disabled(vcpu))
1025
return false;
1026
1027
if (p->is_write) {
1028
/*
1029
* Only update writeable bits of PMCR (continuing into
1030
* kvm_pmu_handle_pmcr() as well)
1031
*/
1032
val = kvm_vcpu_read_pmcr(vcpu);
1033
val &= ~ARMV8_PMU_PMCR_MASK;
1034
val |= p->regval & ARMV8_PMU_PMCR_MASK;
1035
if (!kvm_supports_32bit_el0())
1036
val |= ARMV8_PMU_PMCR_LC;
1037
kvm_pmu_handle_pmcr(vcpu, val);
1038
} else {
1039
/* PMCR.P & PMCR.C are RAZ */
1040
val = kvm_vcpu_read_pmcr(vcpu)
1041
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
1042
p->regval = val;
1043
}
1044
1045
return true;
1046
}
1047
1048
static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1049
const struct sys_reg_desc *r)
1050
{
1051
if (pmu_access_event_counter_el0_disabled(vcpu))
1052
return false;
1053
1054
if (p->is_write)
1055
__vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
1056
else
1057
/* return PMSELR.SEL field */
1058
p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
1059
& PMSELR_EL0_SEL_MASK;
1060
1061
return true;
1062
}
1063
1064
static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1065
const struct sys_reg_desc *r)
1066
{
1067
u64 pmceid, mask, shift;
1068
1069
BUG_ON(p->is_write);
1070
1071
if (pmu_access_el0_disabled(vcpu))
1072
return false;
1073
1074
get_access_mask(r, &mask, &shift);
1075
1076
pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
1077
pmceid &= mask;
1078
pmceid >>= shift;
1079
1080
p->regval = pmceid;
1081
1082
return true;
1083
}
1084
1085
static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
1086
{
1087
u64 pmcr, val;
1088
1089
pmcr = kvm_vcpu_read_pmcr(vcpu);
1090
val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
1091
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
1092
kvm_inject_undefined(vcpu);
1093
return false;
1094
}
1095
1096
return true;
1097
}
1098
1099
static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1100
u64 *val)
1101
{
1102
u64 idx;
1103
1104
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1105
/* PMCCNTR_EL0 */
1106
idx = ARMV8_PMU_CYCLE_IDX;
1107
else
1108
/* PMEVCNTRn_EL0 */
1109
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1110
1111
*val = kvm_pmu_get_counter_value(vcpu, idx);
1112
return 0;
1113
}
1114
1115
static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1116
u64 val)
1117
{
1118
u64 idx;
1119
1120
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1121
/* PMCCNTR_EL0 */
1122
idx = ARMV8_PMU_CYCLE_IDX;
1123
else
1124
/* PMEVCNTRn_EL0 */
1125
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1126
1127
kvm_pmu_set_counter_value_user(vcpu, idx, val);
1128
return 0;
1129
}
1130
1131
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
1132
struct sys_reg_params *p,
1133
const struct sys_reg_desc *r)
1134
{
1135
u64 idx = ~0UL;
1136
1137
if (r->CRn == 9 && r->CRm == 13) {
1138
if (r->Op2 == 2) {
1139
/* PMXEVCNTR_EL0 */
1140
if (pmu_access_event_counter_el0_disabled(vcpu))
1141
return false;
1142
1143
idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
1144
__vcpu_sys_reg(vcpu, PMSELR_EL0));
1145
} else if (r->Op2 == 0) {
1146
/* PMCCNTR_EL0 */
1147
if (pmu_access_cycle_counter_el0_disabled(vcpu))
1148
return false;
1149
1150
idx = ARMV8_PMU_CYCLE_IDX;
1151
}
1152
} else if (r->CRn == 0 && r->CRm == 9) {
1153
/* PMCCNTR */
1154
if (pmu_access_event_counter_el0_disabled(vcpu))
1155
return false;
1156
1157
idx = ARMV8_PMU_CYCLE_IDX;
1158
} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
1159
/* PMEVCNTRn_EL0 */
1160
if (pmu_access_event_counter_el0_disabled(vcpu))
1161
return false;
1162
1163
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1164
}
1165
1166
/* Catch any decoding mistake */
1167
WARN_ON(idx == ~0UL);
1168
1169
if (!pmu_counter_idx_valid(vcpu, idx))
1170
return false;
1171
1172
if (p->is_write) {
1173
if (pmu_access_el0_disabled(vcpu))
1174
return false;
1175
1176
kvm_pmu_set_counter_value(vcpu, idx, p->regval);
1177
} else {
1178
p->regval = kvm_pmu_get_counter_value(vcpu, idx);
1179
}
1180
1181
return true;
1182
}
1183
1184
static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1185
const struct sys_reg_desc *r)
1186
{
1187
u64 idx, reg;
1188
1189
if (pmu_access_el0_disabled(vcpu))
1190
return false;
1191
1192
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
1193
/* PMXEVTYPER_EL0 */
1194
idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
1195
reg = PMEVTYPER0_EL0 + idx;
1196
} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
1197
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1198
if (idx == ARMV8_PMU_CYCLE_IDX)
1199
reg = PMCCFILTR_EL0;
1200
else
1201
/* PMEVTYPERn_EL0 */
1202
reg = PMEVTYPER0_EL0 + idx;
1203
} else {
1204
BUG();
1205
}
1206
1207
if (!pmu_counter_idx_valid(vcpu, idx))
1208
return false;
1209
1210
if (p->is_write) {
1211
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
1212
kvm_vcpu_pmu_restore_guest(vcpu);
1213
} else {
1214
p->regval = __vcpu_sys_reg(vcpu, reg);
1215
}
1216
1217
return true;
1218
}
1219
1220
static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
1221
{
1222
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1223
1224
__vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
1225
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1226
1227
return 0;
1228
}
1229
1230
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1231
{
1232
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1233
1234
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1235
return 0;
1236
}
1237
1238
static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1239
const struct sys_reg_desc *r)
1240
{
1241
u64 val, mask;
1242
1243
if (pmu_access_el0_disabled(vcpu))
1244
return false;
1245
1246
mask = kvm_pmu_accessible_counter_mask(vcpu);
1247
if (p->is_write) {
1248
val = p->regval & mask;
1249
if (r->Op2 & 0x1)
1250
/* accessing PMCNTENSET_EL0 */
1251
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
1252
else
1253
/* accessing PMCNTENCLR_EL0 */
1254
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
1255
1256
kvm_pmu_reprogram_counter_mask(vcpu, val);
1257
} else {
1258
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1259
}
1260
1261
return true;
1262
}
1263
1264
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1265
const struct sys_reg_desc *r)
1266
{
1267
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1268
1269
if (check_pmu_access_disabled(vcpu, 0))
1270
return false;
1271
1272
if (p->is_write) {
1273
u64 val = p->regval & mask;
1274
1275
if (r->Op2 & 0x1)
1276
/* accessing PMINTENSET_EL1 */
1277
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
1278
else
1279
/* accessing PMINTENCLR_EL1 */
1280
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
1281
} else {
1282
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1283
}
1284
1285
return true;
1286
}
1287
1288
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1289
const struct sys_reg_desc *r)
1290
{
1291
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1292
1293
if (pmu_access_el0_disabled(vcpu))
1294
return false;
1295
1296
if (p->is_write) {
1297
if (r->CRm & 0x2)
1298
/* accessing PMOVSSET_EL0 */
1299
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
1300
else
1301
/* accessing PMOVSCLR_EL0 */
1302
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
1303
} else {
1304
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1305
}
1306
1307
return true;
1308
}
1309
1310
static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1311
const struct sys_reg_desc *r)
1312
{
1313
u64 mask;
1314
1315
if (!p->is_write)
1316
return read_from_write_only(vcpu, p, r);
1317
1318
if (pmu_write_swinc_el0_disabled(vcpu))
1319
return false;
1320
1321
mask = kvm_pmu_accessible_counter_mask(vcpu);
1322
kvm_pmu_software_increment(vcpu, p->regval & mask);
1323
return true;
1324
}
1325
1326
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1327
const struct sys_reg_desc *r)
1328
{
1329
if (p->is_write) {
1330
if (!vcpu_mode_priv(vcpu))
1331
return undef_access(vcpu, p, r);
1332
1333
__vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
1334
(p->regval & ARMV8_PMU_USERENR_MASK));
1335
} else {
1336
p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1337
& ARMV8_PMU_USERENR_MASK;
1338
}
1339
1340
return true;
1341
}
1342
1343
static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1344
u64 *val)
1345
{
1346
*val = kvm_vcpu_read_pmcr(vcpu);
1347
return 0;
1348
}
1349
1350
static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1351
u64 val)
1352
{
1353
u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
1354
struct kvm *kvm = vcpu->kvm;
1355
1356
mutex_lock(&kvm->arch.config_lock);
1357
1358
/*
1359
* The vCPU can't have more counters than the PMU hardware
1360
* implements. Ignore this error to maintain compatibility
1361
* with the existing KVM behavior.
1362
*/
1363
if (!kvm_vm_has_ran_once(kvm) &&
1364
!vcpu_has_nv(vcpu) &&
1365
new_n <= kvm_arm_pmu_get_max_counters(kvm))
1366
kvm->arch.nr_pmu_counters = new_n;
1367
1368
mutex_unlock(&kvm->arch.config_lock);
1369
1370
/*
1371
* Ignore writes to RES0 bits, read only bits that are cleared on
1372
* vCPU reset, and writable bits that KVM doesn't support yet.
1373
* (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1374
* The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1375
* But, we leave the bit as it is here, as the vCPU's PMUver might
1376
* be changed later (NOTE: the bit will be cleared on first vCPU run
1377
* if necessary).
1378
*/
1379
val &= ARMV8_PMU_PMCR_MASK;
1380
1381
/* The LC bit is RES1 when AArch32 is not supported */
1382
if (!kvm_supports_32bit_el0())
1383
val |= ARMV8_PMU_PMCR_LC;
1384
1385
__vcpu_assign_sys_reg(vcpu, r->reg, val);
1386
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1387
1388
return 0;
1389
}
1390
1391
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1392
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1393
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1394
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1395
get_dbg_wb_reg, set_dbg_wb_reg }, \
1396
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1397
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1398
get_dbg_wb_reg, set_dbg_wb_reg }, \
1399
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1400
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1401
get_dbg_wb_reg, set_dbg_wb_reg }, \
1402
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1403
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1404
get_dbg_wb_reg, set_dbg_wb_reg }
1405
1406
#define PMU_SYS_REG(name) \
1407
SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1408
.visibility = pmu_visibility
1409
1410
/* Macro to expand the PMEVCNTRn_EL0 register */
1411
#define PMU_PMEVCNTR_EL0(n) \
1412
{ PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
1413
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1414
.set_user = set_pmu_evcntr, \
1415
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1416
1417
/* Macro to expand the PMEVTYPERn_EL0 register */
1418
#define PMU_PMEVTYPER_EL0(n) \
1419
{ PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
1420
.reset = reset_pmevtyper, \
1421
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1422
1423
/* Macro to expand the AMU counter and type registers*/
1424
#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1425
#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1426
#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1427
#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1428
1429
static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1430
const struct sys_reg_desc *rd)
1431
{
1432
return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1433
}
1434
1435
/*
1436
* If we land here on a PtrAuth access, that is because we didn't
1437
* fixup the access on exit by allowing the PtrAuth sysregs. The only
1438
* way this happens is when the guest does not have PtrAuth support
1439
* enabled.
1440
*/
1441
#define __PTRAUTH_KEY(k) \
1442
{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1443
.visibility = ptrauth_visibility}
1444
1445
#define PTRAUTH_KEY(k) \
1446
__PTRAUTH_KEY(k ## KEYLO_EL1), \
1447
__PTRAUTH_KEY(k ## KEYHI_EL1)
1448
1449
static bool access_arch_timer(struct kvm_vcpu *vcpu,
1450
struct sys_reg_params *p,
1451
const struct sys_reg_desc *r)
1452
{
1453
enum kvm_arch_timers tmr;
1454
enum kvm_arch_timer_regs treg;
1455
u64 reg = reg_to_encoding(r);
1456
1457
switch (reg) {
1458
case SYS_CNTP_TVAL_EL0:
1459
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1460
tmr = TIMER_HPTIMER;
1461
else
1462
tmr = TIMER_PTIMER;
1463
treg = TIMER_REG_TVAL;
1464
break;
1465
1466
case SYS_CNTV_TVAL_EL0:
1467
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1468
tmr = TIMER_HVTIMER;
1469
else
1470
tmr = TIMER_VTIMER;
1471
treg = TIMER_REG_TVAL;
1472
break;
1473
1474
case SYS_AARCH32_CNTP_TVAL:
1475
case SYS_CNTP_TVAL_EL02:
1476
tmr = TIMER_PTIMER;
1477
treg = TIMER_REG_TVAL;
1478
break;
1479
1480
case SYS_CNTV_TVAL_EL02:
1481
tmr = TIMER_VTIMER;
1482
treg = TIMER_REG_TVAL;
1483
break;
1484
1485
case SYS_CNTHP_TVAL_EL2:
1486
tmr = TIMER_HPTIMER;
1487
treg = TIMER_REG_TVAL;
1488
break;
1489
1490
case SYS_CNTHV_TVAL_EL2:
1491
tmr = TIMER_HVTIMER;
1492
treg = TIMER_REG_TVAL;
1493
break;
1494
1495
case SYS_CNTP_CTL_EL0:
1496
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1497
tmr = TIMER_HPTIMER;
1498
else
1499
tmr = TIMER_PTIMER;
1500
treg = TIMER_REG_CTL;
1501
break;
1502
1503
case SYS_CNTV_CTL_EL0:
1504
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1505
tmr = TIMER_HVTIMER;
1506
else
1507
tmr = TIMER_VTIMER;
1508
treg = TIMER_REG_CTL;
1509
break;
1510
1511
case SYS_AARCH32_CNTP_CTL:
1512
case SYS_CNTP_CTL_EL02:
1513
tmr = TIMER_PTIMER;
1514
treg = TIMER_REG_CTL;
1515
break;
1516
1517
case SYS_CNTV_CTL_EL02:
1518
tmr = TIMER_VTIMER;
1519
treg = TIMER_REG_CTL;
1520
break;
1521
1522
case SYS_CNTHP_CTL_EL2:
1523
tmr = TIMER_HPTIMER;
1524
treg = TIMER_REG_CTL;
1525
break;
1526
1527
case SYS_CNTHV_CTL_EL2:
1528
tmr = TIMER_HVTIMER;
1529
treg = TIMER_REG_CTL;
1530
break;
1531
1532
case SYS_CNTP_CVAL_EL0:
1533
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1534
tmr = TIMER_HPTIMER;
1535
else
1536
tmr = TIMER_PTIMER;
1537
treg = TIMER_REG_CVAL;
1538
break;
1539
1540
case SYS_CNTV_CVAL_EL0:
1541
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1542
tmr = TIMER_HVTIMER;
1543
else
1544
tmr = TIMER_VTIMER;
1545
treg = TIMER_REG_CVAL;
1546
break;
1547
1548
case SYS_AARCH32_CNTP_CVAL:
1549
case SYS_CNTP_CVAL_EL02:
1550
tmr = TIMER_PTIMER;
1551
treg = TIMER_REG_CVAL;
1552
break;
1553
1554
case SYS_CNTV_CVAL_EL02:
1555
tmr = TIMER_VTIMER;
1556
treg = TIMER_REG_CVAL;
1557
break;
1558
1559
case SYS_CNTHP_CVAL_EL2:
1560
tmr = TIMER_HPTIMER;
1561
treg = TIMER_REG_CVAL;
1562
break;
1563
1564
case SYS_CNTHV_CVAL_EL2:
1565
tmr = TIMER_HVTIMER;
1566
treg = TIMER_REG_CVAL;
1567
break;
1568
1569
case SYS_CNTPCT_EL0:
1570
case SYS_CNTPCTSS_EL0:
1571
if (is_hyp_ctxt(vcpu))
1572
tmr = TIMER_HPTIMER;
1573
else
1574
tmr = TIMER_PTIMER;
1575
treg = TIMER_REG_CNT;
1576
break;
1577
1578
case SYS_AARCH32_CNTPCT:
1579
case SYS_AARCH32_CNTPCTSS:
1580
tmr = TIMER_PTIMER;
1581
treg = TIMER_REG_CNT;
1582
break;
1583
1584
case SYS_CNTVCT_EL0:
1585
case SYS_CNTVCTSS_EL0:
1586
if (is_hyp_ctxt(vcpu))
1587
tmr = TIMER_HVTIMER;
1588
else
1589
tmr = TIMER_VTIMER;
1590
treg = TIMER_REG_CNT;
1591
break;
1592
1593
case SYS_AARCH32_CNTVCT:
1594
case SYS_AARCH32_CNTVCTSS:
1595
tmr = TIMER_VTIMER;
1596
treg = TIMER_REG_CNT;
1597
break;
1598
1599
default:
1600
print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1601
return undef_access(vcpu, p, r);
1602
}
1603
1604
if (p->is_write)
1605
kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1606
else
1607
p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1608
1609
return true;
1610
}
1611
1612
static int arch_timer_set_user(struct kvm_vcpu *vcpu,
1613
const struct sys_reg_desc *rd,
1614
u64 val)
1615
{
1616
switch (reg_to_encoding(rd)) {
1617
case SYS_CNTV_CTL_EL0:
1618
case SYS_CNTP_CTL_EL0:
1619
case SYS_CNTHV_CTL_EL2:
1620
case SYS_CNTHP_CTL_EL2:
1621
val &= ~ARCH_TIMER_CTRL_IT_STAT;
1622
break;
1623
case SYS_CNTVCT_EL0:
1624
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
1625
timer_set_offset(vcpu_vtimer(vcpu), kvm_phys_timer_read() - val);
1626
return 0;
1627
case SYS_CNTPCT_EL0:
1628
if (!test_bit(KVM_ARCH_FLAG_VM_COUNTER_OFFSET, &vcpu->kvm->arch.flags))
1629
timer_set_offset(vcpu_ptimer(vcpu), kvm_phys_timer_read() - val);
1630
return 0;
1631
}
1632
1633
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
1634
return 0;
1635
}
1636
1637
static int arch_timer_get_user(struct kvm_vcpu *vcpu,
1638
const struct sys_reg_desc *rd,
1639
u64 *val)
1640
{
1641
switch (reg_to_encoding(rd)) {
1642
case SYS_CNTVCT_EL0:
1643
*val = kvm_phys_timer_read() - timer_get_offset(vcpu_vtimer(vcpu));
1644
break;
1645
case SYS_CNTPCT_EL0:
1646
*val = kvm_phys_timer_read() - timer_get_offset(vcpu_ptimer(vcpu));
1647
break;
1648
default:
1649
*val = __vcpu_sys_reg(vcpu, rd->reg);
1650
}
1651
1652
return 0;
1653
}
1654
1655
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1656
s64 new, s64 cur)
1657
{
1658
struct arm64_ftr_bits kvm_ftr = *ftrp;
1659
1660
/* Some features have different safe value type in KVM than host features */
1661
switch (id) {
1662
case SYS_ID_AA64DFR0_EL1:
1663
switch (kvm_ftr.shift) {
1664
case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1665
kvm_ftr.type = FTR_LOWER_SAFE;
1666
break;
1667
case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1668
kvm_ftr.type = FTR_LOWER_SAFE;
1669
break;
1670
}
1671
break;
1672
case SYS_ID_DFR0_EL1:
1673
if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1674
kvm_ftr.type = FTR_LOWER_SAFE;
1675
break;
1676
}
1677
1678
return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1679
}
1680
1681
/*
1682
* arm64_check_features() - Check if a feature register value constitutes
1683
* a subset of features indicated by the idreg's KVM sanitised limit.
1684
*
1685
* This function will check if each feature field of @val is the "safe" value
1686
* against idreg's KVM sanitised limit return from reset() callback.
1687
* If a field value in @val is the same as the one in limit, it is always
1688
* considered the safe value regardless For register fields that are not in
1689
* writable, only the value in limit is considered the safe value.
1690
*
1691
* Return: 0 if all the fields are safe. Otherwise, return negative errno.
1692
*/
1693
static int arm64_check_features(struct kvm_vcpu *vcpu,
1694
const struct sys_reg_desc *rd,
1695
u64 val)
1696
{
1697
const struct arm64_ftr_reg *ftr_reg;
1698
const struct arm64_ftr_bits *ftrp = NULL;
1699
u32 id = reg_to_encoding(rd);
1700
u64 writable_mask = rd->val;
1701
u64 limit = rd->reset(vcpu, rd);
1702
u64 mask = 0;
1703
1704
/*
1705
* Hidden and unallocated ID registers may not have a corresponding
1706
* struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1707
* only safe value is 0.
1708
*/
1709
if (sysreg_visible_as_raz(vcpu, rd))
1710
return val ? -E2BIG : 0;
1711
1712
ftr_reg = get_arm64_ftr_reg(id);
1713
if (!ftr_reg)
1714
return -EINVAL;
1715
1716
ftrp = ftr_reg->ftr_bits;
1717
1718
for (; ftrp && ftrp->width; ftrp++) {
1719
s64 f_val, f_lim, safe_val;
1720
u64 ftr_mask;
1721
1722
ftr_mask = arm64_ftr_mask(ftrp);
1723
if ((ftr_mask & writable_mask) != ftr_mask)
1724
continue;
1725
1726
f_val = arm64_ftr_value(ftrp, val);
1727
f_lim = arm64_ftr_value(ftrp, limit);
1728
mask |= ftr_mask;
1729
1730
if (f_val == f_lim)
1731
safe_val = f_val;
1732
else
1733
safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1734
1735
if (safe_val != f_val)
1736
return -E2BIG;
1737
}
1738
1739
/* For fields that are not writable, values in limit are the safe values. */
1740
if ((val & ~mask) != (limit & ~mask))
1741
return -E2BIG;
1742
1743
return 0;
1744
}
1745
1746
static u8 pmuver_to_perfmon(u8 pmuver)
1747
{
1748
switch (pmuver) {
1749
case ID_AA64DFR0_EL1_PMUVer_IMP:
1750
return ID_DFR0_EL1_PerfMon_PMUv3;
1751
case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1752
return ID_DFR0_EL1_PerfMon_IMPDEF;
1753
default:
1754
/* Anything ARMv8.1+ and NI have the same value. For now. */
1755
return pmuver;
1756
}
1757
}
1758
1759
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1760
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
1761
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1762
1763
/* Read a sanitised cpufeature ID register by sys_reg_desc */
1764
static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1765
const struct sys_reg_desc *r)
1766
{
1767
u32 id = reg_to_encoding(r);
1768
u64 val;
1769
1770
if (sysreg_visible_as_raz(vcpu, r))
1771
return 0;
1772
1773
val = read_sanitised_ftr_reg(id);
1774
1775
switch (id) {
1776
case SYS_ID_AA64DFR0_EL1:
1777
val = sanitise_id_aa64dfr0_el1(vcpu, val);
1778
break;
1779
case SYS_ID_AA64PFR0_EL1:
1780
val = sanitise_id_aa64pfr0_el1(vcpu, val);
1781
break;
1782
case SYS_ID_AA64PFR1_EL1:
1783
val = sanitise_id_aa64pfr1_el1(vcpu, val);
1784
break;
1785
case SYS_ID_AA64PFR2_EL1:
1786
val &= ID_AA64PFR2_EL1_FPMR |
1787
(kvm_has_mte(vcpu->kvm) ?
1788
ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY :
1789
0);
1790
break;
1791
case SYS_ID_AA64ISAR1_EL1:
1792
if (!vcpu_has_ptrauth(vcpu))
1793
val &= ~(ID_AA64ISAR1_EL1_APA |
1794
ID_AA64ISAR1_EL1_API |
1795
ID_AA64ISAR1_EL1_GPA |
1796
ID_AA64ISAR1_EL1_GPI);
1797
break;
1798
case SYS_ID_AA64ISAR2_EL1:
1799
if (!vcpu_has_ptrauth(vcpu))
1800
val &= ~(ID_AA64ISAR2_EL1_APA3 |
1801
ID_AA64ISAR2_EL1_GPA3);
1802
if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
1803
has_broken_cntvoff())
1804
val &= ~ID_AA64ISAR2_EL1_WFxT;
1805
break;
1806
case SYS_ID_AA64ISAR3_EL1:
1807
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE |
1808
ID_AA64ISAR3_EL1_FAMINMAX;
1809
break;
1810
case SYS_ID_AA64MMFR2_EL1:
1811
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1812
val &= ~ID_AA64MMFR2_EL1_NV;
1813
break;
1814
case SYS_ID_AA64MMFR3_EL1:
1815
val &= ID_AA64MMFR3_EL1_TCRX |
1816
ID_AA64MMFR3_EL1_SCTLRX |
1817
ID_AA64MMFR3_EL1_S1POE |
1818
ID_AA64MMFR3_EL1_S1PIE;
1819
break;
1820
case SYS_ID_MMFR4_EL1:
1821
val &= ~ID_MMFR4_EL1_CCIDX;
1822
break;
1823
}
1824
1825
if (vcpu_has_nv(vcpu))
1826
val = limit_nv_id_reg(vcpu->kvm, id, val);
1827
1828
return val;
1829
}
1830
1831
static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1832
const struct sys_reg_desc *r)
1833
{
1834
return __kvm_read_sanitised_id_reg(vcpu, r);
1835
}
1836
1837
static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1838
{
1839
return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
1840
}
1841
1842
static bool is_feature_id_reg(u32 encoding)
1843
{
1844
return (sys_reg_Op0(encoding) == 3 &&
1845
(sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
1846
sys_reg_CRn(encoding) == 0 &&
1847
sys_reg_CRm(encoding) <= 7);
1848
}
1849
1850
/*
1851
* Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1852
* (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
1853
* registers KVM maintains on a per-VM basis.
1854
*
1855
* Additionally, the implementation ID registers and CTR_EL0 are handled as
1856
* per-VM registers.
1857
*/
1858
static inline bool is_vm_ftr_id_reg(u32 id)
1859
{
1860
switch (id) {
1861
case SYS_CTR_EL0:
1862
case SYS_MIDR_EL1:
1863
case SYS_REVIDR_EL1:
1864
case SYS_AIDR_EL1:
1865
return true;
1866
default:
1867
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1868
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1869
sys_reg_CRm(id) < 8);
1870
1871
}
1872
}
1873
1874
static inline bool is_vcpu_ftr_id_reg(u32 id)
1875
{
1876
return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
1877
}
1878
1879
static inline bool is_aa32_id_reg(u32 id)
1880
{
1881
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1882
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1883
sys_reg_CRm(id) <= 3);
1884
}
1885
1886
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1887
const struct sys_reg_desc *r)
1888
{
1889
u32 id = reg_to_encoding(r);
1890
1891
switch (id) {
1892
case SYS_ID_AA64ZFR0_EL1:
1893
if (!vcpu_has_sve(vcpu))
1894
return REG_RAZ;
1895
break;
1896
}
1897
1898
return 0;
1899
}
1900
1901
static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1902
const struct sys_reg_desc *r)
1903
{
1904
/*
1905
* AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1906
* EL. Promote to RAZ/WI in order to guarantee consistency between
1907
* systems.
1908
*/
1909
if (!kvm_supports_32bit_el0())
1910
return REG_RAZ | REG_USER_WI;
1911
1912
return id_visibility(vcpu, r);
1913
}
1914
1915
static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1916
const struct sys_reg_desc *r)
1917
{
1918
return REG_RAZ;
1919
}
1920
1921
/* cpufeature ID register access trap handlers */
1922
1923
static bool access_id_reg(struct kvm_vcpu *vcpu,
1924
struct sys_reg_params *p,
1925
const struct sys_reg_desc *r)
1926
{
1927
if (p->is_write)
1928
return write_to_read_only(vcpu, p, r);
1929
1930
p->regval = read_id_reg(vcpu, r);
1931
1932
return true;
1933
}
1934
1935
/* Visibility overrides for SVE-specific control registers */
1936
static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1937
const struct sys_reg_desc *rd)
1938
{
1939
if (vcpu_has_sve(vcpu))
1940
return 0;
1941
1942
return REG_HIDDEN;
1943
}
1944
1945
static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
1946
const struct sys_reg_desc *rd)
1947
{
1948
if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
1949
return 0;
1950
1951
return REG_HIDDEN;
1952
}
1953
1954
static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
1955
const struct sys_reg_desc *rd)
1956
{
1957
if (kvm_has_fpmr(vcpu->kvm))
1958
return 0;
1959
1960
return REG_HIDDEN;
1961
}
1962
1963
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
1964
{
1965
if (!vcpu_has_sve(vcpu))
1966
val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1967
1968
/*
1969
* The default is to expose CSV2 == 1 if the HW isn't affected.
1970
* Although this is a per-CPU feature, we make it global because
1971
* asymmetric systems are just a nuisance.
1972
*
1973
* Userspace can override this as long as it doesn't promise
1974
* the impossible.
1975
*/
1976
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1977
val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1978
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1979
}
1980
if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1981
val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1982
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1983
}
1984
1985
if (vgic_is_v3(vcpu->kvm)) {
1986
val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1987
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1988
}
1989
1990
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1991
1992
/*
1993
* MPAM is disabled by default as KVM also needs a set of PARTID to
1994
* program the MPAMVPMx_EL2 PARTID remapping registers with. But some
1995
* older kernels let the guest see the ID bit.
1996
*/
1997
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
1998
1999
return val;
2000
}
2001
2002
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
2003
{
2004
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2005
2006
if (!kvm_has_mte(vcpu->kvm)) {
2007
val &= ~ID_AA64PFR1_EL1_MTE;
2008
val &= ~ID_AA64PFR1_EL1_MTE_frac;
2009
}
2010
2011
if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
2012
SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
2013
val &= ~ID_AA64PFR1_EL1_RAS_frac;
2014
2015
val &= ~ID_AA64PFR1_EL1_SME;
2016
val &= ~ID_AA64PFR1_EL1_RNDR_trap;
2017
val &= ~ID_AA64PFR1_EL1_NMI;
2018
val &= ~ID_AA64PFR1_EL1_GCS;
2019
val &= ~ID_AA64PFR1_EL1_THE;
2020
val &= ~ID_AA64PFR1_EL1_MTEX;
2021
val &= ~ID_AA64PFR1_EL1_PFAR;
2022
val &= ~ID_AA64PFR1_EL1_MPAM_frac;
2023
2024
return val;
2025
}
2026
2027
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
2028
{
2029
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
2030
2031
/*
2032
* Only initialize the PMU version if the vCPU was configured with one.
2033
*/
2034
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
2035
if (kvm_vcpu_has_pmu(vcpu))
2036
val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
2037
kvm_arm_pmu_get_pmuver_limit());
2038
2039
/* Hide SPE from guests */
2040
val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
2041
2042
/* Hide BRBE from guests */
2043
val &= ~ID_AA64DFR0_EL1_BRBE_MASK;
2044
2045
return val;
2046
}
2047
2048
/*
2049
* Older versions of KVM erroneously claim support for FEAT_DoubleLock with
2050
* NV-enabled VMs on unsupporting hardware. Silently ignore the incorrect
2051
* value if it is consistent with the bug.
2052
*/
2053
static bool ignore_feat_doublelock(struct kvm_vcpu *vcpu, u64 val)
2054
{
2055
u8 host, user;
2056
2057
if (!vcpu_has_nv(vcpu))
2058
return false;
2059
2060
host = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock,
2061
read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1));
2062
user = SYS_FIELD_GET(ID_AA64DFR0_EL1, DoubleLock, val);
2063
2064
return host == ID_AA64DFR0_EL1_DoubleLock_NI &&
2065
user == ID_AA64DFR0_EL1_DoubleLock_IMP;
2066
}
2067
2068
static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
2069
const struct sys_reg_desc *rd,
2070
u64 val)
2071
{
2072
u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
2073
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
2074
2075
/*
2076
* Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
2077
* ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
2078
* exposed an IMP_DEF PMU to userspace and the guest on systems w/
2079
* non-architectural PMUs. Of course, PMUv3 is the only game in town for
2080
* PMU virtualization, so the IMP_DEF value was rather user-hostile.
2081
*
2082
* At minimum, we're on the hook to allow values that were given to
2083
* userspace by KVM. Cover our tracks here and replace the IMP_DEF value
2084
* with a more sensible NI. The value of an ID register changing under
2085
* the nose of the guest is unfortunate, but is certainly no more
2086
* surprising than an ill-guided PMU driver poking at impdef system
2087
* registers that end in an UNDEF...
2088
*/
2089
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
2090
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
2091
2092
/*
2093
* ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
2094
* nonzero minimum safe value.
2095
*/
2096
if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
2097
return -EINVAL;
2098
2099
if (ignore_feat_doublelock(vcpu, val)) {
2100
val &= ~ID_AA64DFR0_EL1_DoubleLock;
2101
val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DoubleLock, NI);
2102
}
2103
2104
return set_id_reg(vcpu, rd, val);
2105
}
2106
2107
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
2108
const struct sys_reg_desc *rd)
2109
{
2110
u8 perfmon;
2111
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
2112
2113
val &= ~ID_DFR0_EL1_PerfMon_MASK;
2114
if (kvm_vcpu_has_pmu(vcpu)) {
2115
perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
2116
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
2117
}
2118
2119
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
2120
2121
return val;
2122
}
2123
2124
static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
2125
const struct sys_reg_desc *rd,
2126
u64 val)
2127
{
2128
u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
2129
u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
2130
2131
if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
2132
val &= ~ID_DFR0_EL1_PerfMon_MASK;
2133
perfmon = 0;
2134
}
2135
2136
/*
2137
* Allow DFR0_EL1.PerfMon to be set from userspace as long as
2138
* it doesn't promise more than what the HW gives us on the
2139
* AArch64 side (as everything is emulated with that), and
2140
* that this is a PMUv3.
2141
*/
2142
if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
2143
return -EINVAL;
2144
2145
if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
2146
return -EINVAL;
2147
2148
return set_id_reg(vcpu, rd, val);
2149
}
2150
2151
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
2152
const struct sys_reg_desc *rd, u64 user_val)
2153
{
2154
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2155
u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
2156
2157
/*
2158
* Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
2159
* in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
2160
* guests, but didn't add trap handling. KVM doesn't support MPAM and
2161
* always returns an UNDEF for these registers. The guest must see 0
2162
* for this field.
2163
*
2164
* But KVM must also accept values from user-space that were provided
2165
* by KVM. On CPUs that support MPAM, permit user-space to write
2166
* the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
2167
*/
2168
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2169
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
2170
2171
/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
2172
if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
2173
!FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
2174
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
2175
return -EINVAL;
2176
2177
/*
2178
* If we are running on a GICv5 host and support FEAT_GCIE_LEGACY, then
2179
* we support GICv3. Fail attempts to do anything but set that to IMP.
2180
*/
2181
if (vgic_is_v3_compat(vcpu->kvm) &&
2182
FIELD_GET(ID_AA64PFR0_EL1_GIC_MASK, user_val) != ID_AA64PFR0_EL1_GIC_IMP)
2183
return -EINVAL;
2184
2185
return set_id_reg(vcpu, rd, user_val);
2186
}
2187
2188
static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
2189
const struct sys_reg_desc *rd, u64 user_val)
2190
{
2191
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2192
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
2193
u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
2194
u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
2195
u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
2196
2197
/* See set_id_aa64pfr0_el1 for comment about MPAM */
2198
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2199
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
2200
2201
/*
2202
* Previously MTE_frac was hidden from guest. However, if the
2203
* hardware supports MTE2 but not MTE_ASYM_FAULT then a value
2204
* of 0 for this field indicates that the hardware supports
2205
* MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
2206
*
2207
* As KVM must accept values from KVM provided by user-space,
2208
* when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
2209
* ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
2210
* incorrectly claiming hardware support for MTE_ASYNC in the
2211
* guest.
2212
*/
2213
2214
if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
2215
hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
2216
user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
2217
user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
2218
user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
2219
}
2220
2221
return set_id_reg(vcpu, rd, user_val);
2222
}
2223
2224
/*
2225
* Allow userspace to de-feature a stage-2 translation granule but prevent it
2226
* from claiming the impossible.
2227
*/
2228
#define tgran2_val_allowed(tg, safe, user) \
2229
({ \
2230
u8 __s = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, safe); \
2231
u8 __u = SYS_FIELD_GET(ID_AA64MMFR0_EL1, tg, user); \
2232
\
2233
__s == __u || __u == ID_AA64MMFR0_EL1_##tg##_NI; \
2234
})
2235
2236
static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
2237
const struct sys_reg_desc *rd, u64 user_val)
2238
{
2239
u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd);
2240
2241
if (!vcpu_has_nv(vcpu))
2242
return set_id_reg(vcpu, rd, user_val);
2243
2244
if (!tgran2_val_allowed(TGRAN4_2, sanitized_val, user_val) ||
2245
!tgran2_val_allowed(TGRAN16_2, sanitized_val, user_val) ||
2246
!tgran2_val_allowed(TGRAN64_2, sanitized_val, user_val))
2247
return -EINVAL;
2248
2249
return set_id_reg(vcpu, rd, user_val);
2250
}
2251
2252
static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
2253
const struct sys_reg_desc *rd, u64 user_val)
2254
{
2255
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2256
u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK;
2257
2258
/*
2259
* We made the mistake to expose the now deprecated NV field,
2260
* so allow userspace to write it, but silently ignore it.
2261
*/
2262
if ((hw_val & nv_mask) == (user_val & nv_mask))
2263
user_val &= ~nv_mask;
2264
2265
return set_id_reg(vcpu, rd, user_val);
2266
}
2267
2268
static int set_ctr_el0(struct kvm_vcpu *vcpu,
2269
const struct sys_reg_desc *rd, u64 user_val)
2270
{
2271
u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val);
2272
2273
/*
2274
* Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved.
2275
* Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based
2276
* on what hardware reports.
2277
*
2278
* Using a VIPT software model on PIPT will lead to over invalidation,
2279
* but still correct. Hence, we can allow downgrading PIPT to VIPT,
2280
* but not the other way around. This is handled via arm64_ftr_safe_value()
2281
* as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value
2282
* set as VIPT.
2283
*/
2284
switch (user_L1Ip) {
2285
case CTR_EL0_L1Ip_RESERVED_VPIPT:
2286
case CTR_EL0_L1Ip_RESERVED_AIVIVT:
2287
return -EINVAL;
2288
case CTR_EL0_L1Ip_VIPT:
2289
case CTR_EL0_L1Ip_PIPT:
2290
return set_id_reg(vcpu, rd, user_val);
2291
default:
2292
return -ENOENT;
2293
}
2294
}
2295
2296
/*
2297
* cpufeature ID register user accessors
2298
*
2299
* For now, these registers are immutable for userspace, so no values
2300
* are stored, and for set_id_reg() we don't allow the effective value
2301
* to be changed.
2302
*/
2303
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2304
u64 *val)
2305
{
2306
/*
2307
* Avoid locking if the VM has already started, as the ID registers are
2308
* guaranteed to be invariant at that point.
2309
*/
2310
if (kvm_vm_has_ran_once(vcpu->kvm)) {
2311
*val = read_id_reg(vcpu, rd);
2312
return 0;
2313
}
2314
2315
mutex_lock(&vcpu->kvm->arch.config_lock);
2316
*val = read_id_reg(vcpu, rd);
2317
mutex_unlock(&vcpu->kvm->arch.config_lock);
2318
2319
return 0;
2320
}
2321
2322
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2323
u64 val)
2324
{
2325
u32 id = reg_to_encoding(rd);
2326
int ret;
2327
2328
mutex_lock(&vcpu->kvm->arch.config_lock);
2329
2330
/*
2331
* Once the VM has started the ID registers are immutable. Reject any
2332
* write that does not match the final register value.
2333
*/
2334
if (kvm_vm_has_ran_once(vcpu->kvm)) {
2335
if (val != read_id_reg(vcpu, rd))
2336
ret = -EBUSY;
2337
else
2338
ret = 0;
2339
2340
mutex_unlock(&vcpu->kvm->arch.config_lock);
2341
return ret;
2342
}
2343
2344
ret = arm64_check_features(vcpu, rd, val);
2345
if (!ret)
2346
kvm_set_vm_id_reg(vcpu->kvm, id, val);
2347
2348
mutex_unlock(&vcpu->kvm->arch.config_lock);
2349
2350
/*
2351
* arm64_check_features() returns -E2BIG to indicate the register's
2352
* feature set is a superset of the maximally-allowed register value.
2353
* While it would be nice to precisely describe this to userspace, the
2354
* existing UAPI for KVM_SET_ONE_REG has it that invalid register
2355
* writes return -EINVAL.
2356
*/
2357
if (ret == -E2BIG)
2358
ret = -EINVAL;
2359
return ret;
2360
}
2361
2362
void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
2363
{
2364
u64 *p = __vm_id_reg(&kvm->arch, reg);
2365
2366
lockdep_assert_held(&kvm->arch.config_lock);
2367
2368
if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
2369
return;
2370
2371
*p = val;
2372
}
2373
2374
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2375
u64 *val)
2376
{
2377
*val = 0;
2378
return 0;
2379
}
2380
2381
static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2382
u64 val)
2383
{
2384
return 0;
2385
}
2386
2387
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2388
const struct sys_reg_desc *r)
2389
{
2390
if (p->is_write)
2391
return write_to_read_only(vcpu, p, r);
2392
2393
p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
2394
return true;
2395
}
2396
2397
static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2398
const struct sys_reg_desc *r)
2399
{
2400
if (p->is_write)
2401
return write_to_read_only(vcpu, p, r);
2402
2403
p->regval = __vcpu_sys_reg(vcpu, r->reg);
2404
return true;
2405
}
2406
2407
/*
2408
* Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
2409
* by the physical CPU which the vcpu currently resides in.
2410
*/
2411
static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2412
{
2413
u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2414
u64 clidr;
2415
u8 loc;
2416
2417
if ((ctr_el0 & CTR_EL0_IDC)) {
2418
/*
2419
* Data cache clean to the PoU is not required so LoUU and LoUIS
2420
* will not be set and a unified cache, which will be marked as
2421
* LoC, will be added.
2422
*
2423
* If not DIC, let the unified cache L2 so that an instruction
2424
* cache can be added as L1 later.
2425
*/
2426
loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
2427
clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
2428
} else {
2429
/*
2430
* Data cache clean to the PoU is required so let L1 have a data
2431
* cache and mark it as LoUU and LoUIS. As L1 has a data cache,
2432
* it can be marked as LoC too.
2433
*/
2434
loc = 1;
2435
clidr = 1 << CLIDR_LOUU_SHIFT;
2436
clidr |= 1 << CLIDR_LOUIS_SHIFT;
2437
clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
2438
}
2439
2440
/*
2441
* Instruction cache invalidation to the PoU is required so let L1 have
2442
* an instruction cache. If L1 already has a data cache, it will be
2443
* CACHE_TYPE_SEPARATE.
2444
*/
2445
if (!(ctr_el0 & CTR_EL0_DIC))
2446
clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
2447
2448
clidr |= loc << CLIDR_LOC_SHIFT;
2449
2450
/*
2451
* Add tag cache unified to data cache. Allocation tags and data are
2452
* unified in a cache line so that it looks valid even if there is only
2453
* one cache line.
2454
*/
2455
if (kvm_has_mte(vcpu->kvm))
2456
clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
2457
2458
__vcpu_assign_sys_reg(vcpu, r->reg, clidr);
2459
2460
return __vcpu_sys_reg(vcpu, r->reg);
2461
}
2462
2463
static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2464
u64 val)
2465
{
2466
u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2467
u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
2468
2469
if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
2470
return -EINVAL;
2471
2472
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
2473
2474
return 0;
2475
}
2476
2477
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2478
const struct sys_reg_desc *r)
2479
{
2480
int reg = r->reg;
2481
2482
if (p->is_write)
2483
vcpu_write_sys_reg(vcpu, p->regval, reg);
2484
else
2485
p->regval = vcpu_read_sys_reg(vcpu, reg);
2486
return true;
2487
}
2488
2489
static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2490
const struct sys_reg_desc *r)
2491
{
2492
u32 csselr;
2493
2494
if (p->is_write)
2495
return write_to_read_only(vcpu, p, r);
2496
2497
csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
2498
csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
2499
if (csselr < CSSELR_MAX)
2500
p->regval = get_ccsidr(vcpu, csselr);
2501
2502
return true;
2503
}
2504
2505
static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
2506
const struct sys_reg_desc *rd)
2507
{
2508
if (kvm_has_mte(vcpu->kvm))
2509
return 0;
2510
2511
return REG_HIDDEN;
2512
}
2513
2514
#define MTE_REG(name) { \
2515
SYS_DESC(SYS_##name), \
2516
.access = undef_access, \
2517
.reset = reset_unknown, \
2518
.reg = name, \
2519
.visibility = mte_visibility, \
2520
}
2521
2522
static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
2523
const struct sys_reg_desc *rd)
2524
{
2525
if (vcpu_has_nv(vcpu))
2526
return 0;
2527
2528
return REG_HIDDEN;
2529
}
2530
2531
static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
2532
struct sys_reg_params *p,
2533
const struct sys_reg_desc *r)
2534
{
2535
/*
2536
* We really shouldn't be here, and this is likely the result
2537
* of a misconfigured trap, as this register should target the
2538
* VNCR page, and nothing else.
2539
*/
2540
return bad_trap(vcpu, p, r,
2541
"trap of VNCR-backed register");
2542
}
2543
2544
static bool bad_redir_trap(struct kvm_vcpu *vcpu,
2545
struct sys_reg_params *p,
2546
const struct sys_reg_desc *r)
2547
{
2548
/*
2549
* We really shouldn't be here, and this is likely the result
2550
* of a misconfigured trap, as this register should target the
2551
* corresponding EL1, and nothing else.
2552
*/
2553
return bad_trap(vcpu, p, r,
2554
"trap of EL2 register redirected to EL1");
2555
}
2556
2557
#define SYS_REG_USER_FILTER(name, acc, rst, v, gu, su, filter) { \
2558
SYS_DESC(SYS_##name), \
2559
.access = acc, \
2560
.reset = rst, \
2561
.reg = name, \
2562
.get_user = gu, \
2563
.set_user = su, \
2564
.visibility = filter, \
2565
.val = v, \
2566
}
2567
2568
#define EL2_REG_FILTERED(name, acc, rst, v, filter) \
2569
SYS_REG_USER_FILTER(name, acc, rst, v, NULL, NULL, filter)
2570
2571
#define EL2_REG(name, acc, rst, v) \
2572
EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
2573
2574
#define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
2575
#define EL2_REG_VNCR_FILT(name, vis) \
2576
EL2_REG_FILTERED(name, bad_vncr_trap, reset_val, 0, vis)
2577
#define EL2_REG_VNCR_GICv3(name) \
2578
EL2_REG_VNCR_FILT(name, hidden_visibility)
2579
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
2580
2581
#define TIMER_REG(name, vis) \
2582
SYS_REG_USER_FILTER(name, access_arch_timer, reset_val, 0, \
2583
arch_timer_get_user, arch_timer_set_user, vis)
2584
2585
/*
2586
* Since reset() callback and field val are not used for idregs, they will be
2587
* used for specific purposes for idregs.
2588
* The reset() would return KVM sanitised register value. The value would be the
2589
* same as the host kernel sanitised value if there is no KVM sanitisation.
2590
* The val would be used as a mask indicating writable fields for the idreg.
2591
* Only bits with 1 are writable from userspace. This mask might not be
2592
* necessary in the future whenever all ID registers are enabled as writable
2593
* from userspace.
2594
*/
2595
2596
#define ID_DESC_DEFAULT_CALLBACKS \
2597
.access = access_id_reg, \
2598
.get_user = get_id_reg, \
2599
.set_user = set_id_reg, \
2600
.visibility = id_visibility, \
2601
.reset = kvm_read_sanitised_id_reg
2602
2603
#define ID_DESC(name) \
2604
SYS_DESC(SYS_##name), \
2605
ID_DESC_DEFAULT_CALLBACKS
2606
2607
/* sys_reg_desc initialiser for known cpufeature ID registers */
2608
#define ID_SANITISED(name) { \
2609
ID_DESC(name), \
2610
.val = 0, \
2611
}
2612
2613
/* sys_reg_desc initialiser for writable ID registers */
2614
#define ID_WRITABLE(name, mask) { \
2615
ID_DESC(name), \
2616
.val = mask, \
2617
}
2618
2619
/*
2620
* 32bit ID regs are fully writable when the guest is 32bit
2621
* capable. Nothing in the KVM code should rely on 32bit features
2622
* anyway, only 64bit, so let the VMM do its worse.
2623
*/
2624
#define AA32_ID_WRITABLE(name) { \
2625
ID_DESC(name), \
2626
.visibility = aa32_id_visibility, \
2627
.val = GENMASK(31, 0), \
2628
}
2629
2630
/* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
2631
#define ID_FILTERED(sysreg, name, mask) { \
2632
ID_DESC(sysreg), \
2633
.set_user = set_##name, \
2634
.val = (mask), \
2635
}
2636
2637
/*
2638
* sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2639
* register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2640
* (1 <= crm < 8, 0 <= Op2 < 8).
2641
*/
2642
#define ID_UNALLOCATED(crm, op2) { \
2643
.name = "S3_0_0_" #crm "_" #op2, \
2644
Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
2645
ID_DESC_DEFAULT_CALLBACKS, \
2646
.visibility = raz_visibility, \
2647
.val = 0, \
2648
}
2649
2650
/*
2651
* sys_reg_desc initialiser for known ID registers that we hide from guests.
2652
* For now, these are exposed just like unallocated ID regs: they appear
2653
* RAZ for the guest.
2654
*/
2655
#define ID_HIDDEN(name) { \
2656
ID_DESC(name), \
2657
.visibility = raz_visibility, \
2658
.val = 0, \
2659
}
2660
2661
static bool access_sp_el1(struct kvm_vcpu *vcpu,
2662
struct sys_reg_params *p,
2663
const struct sys_reg_desc *r)
2664
{
2665
if (p->is_write)
2666
__vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
2667
else
2668
p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2669
2670
return true;
2671
}
2672
2673
static bool access_elr(struct kvm_vcpu *vcpu,
2674
struct sys_reg_params *p,
2675
const struct sys_reg_desc *r)
2676
{
2677
if (p->is_write)
2678
vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2679
else
2680
p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2681
2682
return true;
2683
}
2684
2685
static bool access_spsr(struct kvm_vcpu *vcpu,
2686
struct sys_reg_params *p,
2687
const struct sys_reg_desc *r)
2688
{
2689
if (p->is_write)
2690
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
2691
else
2692
p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2693
2694
return true;
2695
}
2696
2697
static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
2698
struct sys_reg_params *p,
2699
const struct sys_reg_desc *r)
2700
{
2701
if (p->is_write)
2702
__vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
2703
else
2704
p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
2705
2706
return true;
2707
}
2708
2709
static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2710
{
2711
u64 val = r->val;
2712
2713
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
2714
val |= HCR_E2H;
2715
2716
__vcpu_assign_sys_reg(vcpu, r->reg, val);
2717
2718
return __vcpu_sys_reg(vcpu, r->reg);
2719
}
2720
2721
static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
2722
const struct sys_reg_desc *rd,
2723
unsigned int (*fn)(const struct kvm_vcpu *,
2724
const struct sys_reg_desc *))
2725
{
2726
return el2_visibility(vcpu, rd) ?: fn(vcpu, rd);
2727
}
2728
2729
static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
2730
const struct sys_reg_desc *rd)
2731
{
2732
return __el2_visibility(vcpu, rd, sve_visibility);
2733
}
2734
2735
static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu,
2736
const struct sys_reg_desc *rd)
2737
{
2738
if (el2_visibility(vcpu, rd) == 0 &&
2739
kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
2740
return 0;
2741
2742
return REG_HIDDEN;
2743
}
2744
2745
static unsigned int sctlr2_visibility(const struct kvm_vcpu *vcpu,
2746
const struct sys_reg_desc *rd)
2747
{
2748
if (kvm_has_sctlr2(vcpu->kvm))
2749
return 0;
2750
2751
return REG_HIDDEN;
2752
}
2753
2754
static unsigned int sctlr2_el2_visibility(const struct kvm_vcpu *vcpu,
2755
const struct sys_reg_desc *rd)
2756
{
2757
return __el2_visibility(vcpu, rd, sctlr2_visibility);
2758
}
2759
2760
static bool access_zcr_el2(struct kvm_vcpu *vcpu,
2761
struct sys_reg_params *p,
2762
const struct sys_reg_desc *r)
2763
{
2764
unsigned int vq;
2765
2766
if (guest_hyp_sve_traps_enabled(vcpu)) {
2767
kvm_inject_nested_sve_trap(vcpu);
2768
return false;
2769
}
2770
2771
if (!p->is_write) {
2772
p->regval = __vcpu_sys_reg(vcpu, ZCR_EL2);
2773
return true;
2774
}
2775
2776
vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
2777
vq = min(vq, vcpu_sve_max_vq(vcpu));
2778
__vcpu_assign_sys_reg(vcpu, ZCR_EL2, vq - 1);
2779
return true;
2780
}
2781
2782
static bool access_gic_vtr(struct kvm_vcpu *vcpu,
2783
struct sys_reg_params *p,
2784
const struct sys_reg_desc *r)
2785
{
2786
if (p->is_write)
2787
return write_to_read_only(vcpu, p, r);
2788
2789
p->regval = kvm_get_guest_vtr_el2();
2790
2791
return true;
2792
}
2793
2794
static bool access_gic_misr(struct kvm_vcpu *vcpu,
2795
struct sys_reg_params *p,
2796
const struct sys_reg_desc *r)
2797
{
2798
if (p->is_write)
2799
return write_to_read_only(vcpu, p, r);
2800
2801
p->regval = vgic_v3_get_misr(vcpu);
2802
2803
return true;
2804
}
2805
2806
static bool access_gic_eisr(struct kvm_vcpu *vcpu,
2807
struct sys_reg_params *p,
2808
const struct sys_reg_desc *r)
2809
{
2810
if (p->is_write)
2811
return write_to_read_only(vcpu, p, r);
2812
2813
p->regval = vgic_v3_get_eisr(vcpu);
2814
2815
return true;
2816
}
2817
2818
static bool access_gic_elrsr(struct kvm_vcpu *vcpu,
2819
struct sys_reg_params *p,
2820
const struct sys_reg_desc *r)
2821
{
2822
if (p->is_write)
2823
return write_to_read_only(vcpu, p, r);
2824
2825
p->regval = vgic_v3_get_elrsr(vcpu);
2826
2827
return true;
2828
}
2829
2830
static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
2831
const struct sys_reg_desc *rd)
2832
{
2833
if (kvm_has_s1poe(vcpu->kvm))
2834
return 0;
2835
2836
return REG_HIDDEN;
2837
}
2838
2839
static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu,
2840
const struct sys_reg_desc *rd)
2841
{
2842
return __el2_visibility(vcpu, rd, s1poe_visibility);
2843
}
2844
2845
static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu,
2846
const struct sys_reg_desc *rd)
2847
{
2848
if (kvm_has_tcr2(vcpu->kvm))
2849
return 0;
2850
2851
return REG_HIDDEN;
2852
}
2853
2854
static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu,
2855
const struct sys_reg_desc *rd)
2856
{
2857
return __el2_visibility(vcpu, rd, tcr2_visibility);
2858
}
2859
2860
static unsigned int fgt2_visibility(const struct kvm_vcpu *vcpu,
2861
const struct sys_reg_desc *rd)
2862
{
2863
if (el2_visibility(vcpu, rd) == 0 &&
2864
kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, FGT2))
2865
return 0;
2866
2867
return REG_HIDDEN;
2868
}
2869
2870
static unsigned int fgt_visibility(const struct kvm_vcpu *vcpu,
2871
const struct sys_reg_desc *rd)
2872
{
2873
if (el2_visibility(vcpu, rd) == 0 &&
2874
kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, IMP))
2875
return 0;
2876
2877
return REG_HIDDEN;
2878
}
2879
2880
static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu,
2881
const struct sys_reg_desc *rd)
2882
{
2883
if (kvm_has_s1pie(vcpu->kvm))
2884
return 0;
2885
2886
return REG_HIDDEN;
2887
}
2888
2889
static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
2890
const struct sys_reg_desc *rd)
2891
{
2892
return __el2_visibility(vcpu, rd, s1pie_visibility);
2893
}
2894
2895
static unsigned int cnthv_visibility(const struct kvm_vcpu *vcpu,
2896
const struct sys_reg_desc *rd)
2897
{
2898
if (vcpu_has_nv(vcpu) &&
2899
!vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2_E2H0))
2900
return 0;
2901
2902
return REG_HIDDEN;
2903
}
2904
2905
static bool access_mdcr(struct kvm_vcpu *vcpu,
2906
struct sys_reg_params *p,
2907
const struct sys_reg_desc *r)
2908
{
2909
u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2);
2910
2911
if (!p->is_write) {
2912
p->regval = old;
2913
return true;
2914
}
2915
2916
val = p->regval;
2917
hpmn = FIELD_GET(MDCR_EL2_HPMN, val);
2918
2919
/*
2920
* If HPMN is out of bounds, limit it to what we actually
2921
* support. This matches the UNKNOWN definition of the field
2922
* in that case, and keeps the emulation simple. Sort of.
2923
*/
2924
if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
2925
hpmn = vcpu->kvm->arch.nr_pmu_counters;
2926
u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN);
2927
}
2928
2929
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
2930
2931
/*
2932
* Request a reload of the PMU to enable/disable the counters
2933
* affected by HPME.
2934
*/
2935
if ((old ^ val) & MDCR_EL2_HPME)
2936
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
2937
2938
return true;
2939
}
2940
2941
static bool access_ras(struct kvm_vcpu *vcpu,
2942
struct sys_reg_params *p,
2943
const struct sys_reg_desc *r)
2944
{
2945
struct kvm *kvm = vcpu->kvm;
2946
2947
switch(reg_to_encoding(r)) {
2948
case SYS_ERXPFGCDN_EL1:
2949
case SYS_ERXPFGCTL_EL1:
2950
case SYS_ERXPFGF_EL1:
2951
case SYS_ERXMISC2_EL1:
2952
case SYS_ERXMISC3_EL1:
2953
if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
2954
(kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
2955
kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) {
2956
kvm_inject_undefined(vcpu);
2957
return false;
2958
}
2959
break;
2960
default:
2961
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
2962
kvm_inject_undefined(vcpu);
2963
return false;
2964
}
2965
}
2966
2967
return trap_raz_wi(vcpu, p, r);
2968
}
2969
2970
/*
2971
* For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
2972
* AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
2973
* The values made visible to userspace were the register values of the boot
2974
* CPU.
2975
*
2976
* At the same time, reads from these registers at EL1 previously were not
2977
* trapped, allowing the guest to read the actual hardware value. On big-little
2978
* machines, this means the VM can see different values depending on where a
2979
* given vCPU got scheduled.
2980
*
2981
* These registers are now trapped as collateral damage from SME, and what
2982
* follows attempts to give a user / guest view consistent with the existing
2983
* ABI.
2984
*/
2985
static bool access_imp_id_reg(struct kvm_vcpu *vcpu,
2986
struct sys_reg_params *p,
2987
const struct sys_reg_desc *r)
2988
{
2989
if (p->is_write)
2990
return write_to_read_only(vcpu, p, r);
2991
2992
/*
2993
* Return the VM-scoped implementation ID register values if userspace
2994
* has made them writable.
2995
*/
2996
if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags))
2997
return access_id_reg(vcpu, p, r);
2998
2999
/*
3000
* Otherwise, fall back to the old behavior of returning the value of
3001
* the current CPU.
3002
*/
3003
switch (reg_to_encoding(r)) {
3004
case SYS_REVIDR_EL1:
3005
p->regval = read_sysreg(revidr_el1);
3006
break;
3007
case SYS_AIDR_EL1:
3008
p->regval = read_sysreg(aidr_el1);
3009
break;
3010
default:
3011
WARN_ON_ONCE(1);
3012
}
3013
3014
return true;
3015
}
3016
3017
static u64 __ro_after_init boot_cpu_midr_val;
3018
static u64 __ro_after_init boot_cpu_revidr_val;
3019
static u64 __ro_after_init boot_cpu_aidr_val;
3020
3021
static void init_imp_id_regs(void)
3022
{
3023
boot_cpu_midr_val = read_sysreg(midr_el1);
3024
boot_cpu_revidr_val = read_sysreg(revidr_el1);
3025
boot_cpu_aidr_val = read_sysreg(aidr_el1);
3026
}
3027
3028
static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
3029
{
3030
switch (reg_to_encoding(r)) {
3031
case SYS_MIDR_EL1:
3032
return boot_cpu_midr_val;
3033
case SYS_REVIDR_EL1:
3034
return boot_cpu_revidr_val;
3035
case SYS_AIDR_EL1:
3036
return boot_cpu_aidr_val;
3037
default:
3038
KVM_BUG_ON(1, vcpu->kvm);
3039
return 0;
3040
}
3041
}
3042
3043
static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
3044
u64 val)
3045
{
3046
struct kvm *kvm = vcpu->kvm;
3047
u64 expected;
3048
3049
guard(mutex)(&kvm->arch.config_lock);
3050
3051
expected = read_id_reg(vcpu, r);
3052
if (expected == val)
3053
return 0;
3054
3055
if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags))
3056
return -EINVAL;
3057
3058
/*
3059
* Once the VM has started the ID registers are immutable. Reject the
3060
* write if userspace tries to change it.
3061
*/
3062
if (kvm_vm_has_ran_once(kvm))
3063
return -EBUSY;
3064
3065
/*
3066
* Any value is allowed for the implementation ID registers so long as
3067
* it is within the writable mask.
3068
*/
3069
if ((val & r->val) != val)
3070
return -EINVAL;
3071
3072
kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val);
3073
return 0;
3074
}
3075
3076
#define IMPLEMENTATION_ID(reg, mask) { \
3077
SYS_DESC(SYS_##reg), \
3078
.access = access_imp_id_reg, \
3079
.get_user = get_id_reg, \
3080
.set_user = set_imp_id_reg, \
3081
.reset = reset_imp_id_reg, \
3082
.val = mask, \
3083
}
3084
3085
static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
3086
{
3087
__vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
3088
return vcpu->kvm->arch.nr_pmu_counters;
3089
}
3090
3091
/*
3092
* Architected system registers.
3093
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
3094
*
3095
* Debug handling: We do trap most, if not all debug related system
3096
* registers. The implementation is good enough to ensure that a guest
3097
* can use these with minimal performance degradation. The drawback is
3098
* that we don't implement any of the external debug architecture.
3099
* This should be revisited if we ever encounter a more demanding
3100
* guest...
3101
*/
3102
static const struct sys_reg_desc sys_reg_descs[] = {
3103
DBG_BCR_BVR_WCR_WVR_EL1(0),
3104
DBG_BCR_BVR_WCR_WVR_EL1(1),
3105
{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
3106
{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
3107
DBG_BCR_BVR_WCR_WVR_EL1(2),
3108
DBG_BCR_BVR_WCR_WVR_EL1(3),
3109
DBG_BCR_BVR_WCR_WVR_EL1(4),
3110
DBG_BCR_BVR_WCR_WVR_EL1(5),
3111
DBG_BCR_BVR_WCR_WVR_EL1(6),
3112
DBG_BCR_BVR_WCR_WVR_EL1(7),
3113
DBG_BCR_BVR_WCR_WVR_EL1(8),
3114
DBG_BCR_BVR_WCR_WVR_EL1(9),
3115
DBG_BCR_BVR_WCR_WVR_EL1(10),
3116
DBG_BCR_BVR_WCR_WVR_EL1(11),
3117
DBG_BCR_BVR_WCR_WVR_EL1(12),
3118
DBG_BCR_BVR_WCR_WVR_EL1(13),
3119
DBG_BCR_BVR_WCR_WVR_EL1(14),
3120
DBG_BCR_BVR_WCR_WVR_EL1(15),
3121
3122
{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
3123
{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
3124
{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
3125
OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
3126
{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
3127
{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
3128
{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
3129
{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
3130
{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
3131
3132
{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
3133
{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
3134
// DBGDTR[TR]X_EL0 share the same encoding
3135
{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
3136
3137
{ SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
3138
3139
IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)),
3140
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
3141
IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)),
3142
3143
/*
3144
* ID regs: all ID_SANITISED() entries here must have corresponding
3145
* entries in arm64_ftr_regs[].
3146
*/
3147
3148
/* AArch64 mappings of the AArch32 ID registers */
3149
/* CRm=1 */
3150
AA32_ID_WRITABLE(ID_PFR0_EL1),
3151
AA32_ID_WRITABLE(ID_PFR1_EL1),
3152
{ SYS_DESC(SYS_ID_DFR0_EL1),
3153
.access = access_id_reg,
3154
.get_user = get_id_reg,
3155
.set_user = set_id_dfr0_el1,
3156
.visibility = aa32_id_visibility,
3157
.reset = read_sanitised_id_dfr0_el1,
3158
.val = GENMASK(31, 0) },
3159
ID_HIDDEN(ID_AFR0_EL1),
3160
AA32_ID_WRITABLE(ID_MMFR0_EL1),
3161
AA32_ID_WRITABLE(ID_MMFR1_EL1),
3162
AA32_ID_WRITABLE(ID_MMFR2_EL1),
3163
AA32_ID_WRITABLE(ID_MMFR3_EL1),
3164
3165
/* CRm=2 */
3166
AA32_ID_WRITABLE(ID_ISAR0_EL1),
3167
AA32_ID_WRITABLE(ID_ISAR1_EL1),
3168
AA32_ID_WRITABLE(ID_ISAR2_EL1),
3169
AA32_ID_WRITABLE(ID_ISAR3_EL1),
3170
AA32_ID_WRITABLE(ID_ISAR4_EL1),
3171
AA32_ID_WRITABLE(ID_ISAR5_EL1),
3172
AA32_ID_WRITABLE(ID_MMFR4_EL1),
3173
AA32_ID_WRITABLE(ID_ISAR6_EL1),
3174
3175
/* CRm=3 */
3176
AA32_ID_WRITABLE(MVFR0_EL1),
3177
AA32_ID_WRITABLE(MVFR1_EL1),
3178
AA32_ID_WRITABLE(MVFR2_EL1),
3179
ID_UNALLOCATED(3,3),
3180
AA32_ID_WRITABLE(ID_PFR2_EL1),
3181
ID_HIDDEN(ID_DFR1_EL1),
3182
AA32_ID_WRITABLE(ID_MMFR5_EL1),
3183
ID_UNALLOCATED(3,7),
3184
3185
/* AArch64 ID registers */
3186
/* CRm=4 */
3187
ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
3188
~(ID_AA64PFR0_EL1_AMU |
3189
ID_AA64PFR0_EL1_MPAM |
3190
ID_AA64PFR0_EL1_SVE |
3191
ID_AA64PFR0_EL1_AdvSIMD |
3192
ID_AA64PFR0_EL1_FP)),
3193
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
3194
~(ID_AA64PFR1_EL1_PFAR |
3195
ID_AA64PFR1_EL1_MTEX |
3196
ID_AA64PFR1_EL1_THE |
3197
ID_AA64PFR1_EL1_GCS |
3198
ID_AA64PFR1_EL1_MTE_frac |
3199
ID_AA64PFR1_EL1_NMI |
3200
ID_AA64PFR1_EL1_RNDR_trap |
3201
ID_AA64PFR1_EL1_SME |
3202
ID_AA64PFR1_EL1_RES0 |
3203
ID_AA64PFR1_EL1_MPAM_frac |
3204
ID_AA64PFR1_EL1_MTE)),
3205
ID_WRITABLE(ID_AA64PFR2_EL1,
3206
ID_AA64PFR2_EL1_FPMR |
3207
ID_AA64PFR2_EL1_MTEFAR |
3208
ID_AA64PFR2_EL1_MTESTOREONLY),
3209
ID_UNALLOCATED(4,3),
3210
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
3211
ID_HIDDEN(ID_AA64SMFR0_EL1),
3212
ID_UNALLOCATED(4,6),
3213
ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
3214
3215
/* CRm=5 */
3216
/*
3217
* Prior to FEAT_Debugv8.9, the architecture defines context-aware
3218
* breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
3219
* KVM does not trap + emulate the breakpoint registers, and as such
3220
* cannot support a layout that misaligns with the underlying hardware.
3221
* While it may be possible to describe a subset that aligns with
3222
* hardware, just prevent changes to BRPs and CTX_CMPs altogether for
3223
* simplicity.
3224
*
3225
* See DDI0487K.a, section D2.8.3 Breakpoint types and linking
3226
* of breakpoints for more details.
3227
*/
3228
ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
3229
ID_AA64DFR0_EL1_DoubleLock_MASK |
3230
ID_AA64DFR0_EL1_WRPs_MASK |
3231
ID_AA64DFR0_EL1_PMUVer_MASK |
3232
ID_AA64DFR0_EL1_DebugVer_MASK),
3233
ID_SANITISED(ID_AA64DFR1_EL1),
3234
ID_UNALLOCATED(5,2),
3235
ID_UNALLOCATED(5,3),
3236
ID_HIDDEN(ID_AA64AFR0_EL1),
3237
ID_HIDDEN(ID_AA64AFR1_EL1),
3238
ID_UNALLOCATED(5,6),
3239
ID_UNALLOCATED(5,7),
3240
3241
/* CRm=6 */
3242
ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
3243
ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
3244
ID_AA64ISAR1_EL1_GPA |
3245
ID_AA64ISAR1_EL1_API |
3246
ID_AA64ISAR1_EL1_APA)),
3247
ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
3248
ID_AA64ISAR2_EL1_APA3 |
3249
ID_AA64ISAR2_EL1_GPA3)),
3250
ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
3251
ID_AA64ISAR3_EL1_LSFE |
3252
ID_AA64ISAR3_EL1_FAMINMAX)),
3253
ID_UNALLOCATED(6,4),
3254
ID_UNALLOCATED(6,5),
3255
ID_UNALLOCATED(6,6),
3256
ID_UNALLOCATED(6,7),
3257
3258
/* CRm=7 */
3259
ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1,
3260
~(ID_AA64MMFR0_EL1_RES0 |
3261
ID_AA64MMFR0_EL1_ASIDBITS)),
3262
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
3263
ID_AA64MMFR1_EL1_XNX |
3264
ID_AA64MMFR1_EL1_VH |
3265
ID_AA64MMFR1_EL1_VMIDBits)),
3266
ID_FILTERED(ID_AA64MMFR2_EL1,
3267
id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 |
3268
ID_AA64MMFR2_EL1_EVT |
3269
ID_AA64MMFR2_EL1_FWB |
3270
ID_AA64MMFR2_EL1_IDS |
3271
ID_AA64MMFR2_EL1_NV |
3272
ID_AA64MMFR2_EL1_CCIDX)),
3273
ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
3274
ID_AA64MMFR3_EL1_SCTLRX |
3275
ID_AA64MMFR3_EL1_S1PIE |
3276
ID_AA64MMFR3_EL1_S1POE)),
3277
ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac),
3278
ID_UNALLOCATED(7,5),
3279
ID_UNALLOCATED(7,6),
3280
ID_UNALLOCATED(7,7),
3281
3282
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
3283
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
3284
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
3285
{ SYS_DESC(SYS_SCTLR2_EL1), access_vm_reg, reset_val, SCTLR2_EL1, 0,
3286
.visibility = sctlr2_visibility },
3287
3288
MTE_REG(RGSR_EL1),
3289
MTE_REG(GCR_EL1),
3290
3291
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
3292
{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
3293
{ SYS_DESC(SYS_SMPRI_EL1), undef_access },
3294
{ SYS_DESC(SYS_SMCR_EL1), undef_access },
3295
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
3296
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
3297
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
3298
{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0,
3299
.visibility = tcr2_visibility },
3300
3301
PTRAUTH_KEY(APIA),
3302
PTRAUTH_KEY(APIB),
3303
PTRAUTH_KEY(APDA),
3304
PTRAUTH_KEY(APDB),
3305
PTRAUTH_KEY(APGA),
3306
3307
{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
3308
{ SYS_DESC(SYS_ELR_EL1), access_elr},
3309
3310
{ SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
3311
3312
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
3313
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
3314
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
3315
3316
{ SYS_DESC(SYS_ERRIDR_EL1), access_ras },
3317
{ SYS_DESC(SYS_ERRSELR_EL1), access_ras },
3318
{ SYS_DESC(SYS_ERXFR_EL1), access_ras },
3319
{ SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
3320
{ SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
3321
{ SYS_DESC(SYS_ERXADDR_EL1), access_ras },
3322
{ SYS_DESC(SYS_ERXPFGF_EL1), access_ras },
3323
{ SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras },
3324
{ SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras },
3325
{ SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
3326
{ SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
3327
{ SYS_DESC(SYS_ERXMISC2_EL1), access_ras },
3328
{ SYS_DESC(SYS_ERXMISC3_EL1), access_ras },
3329
3330
MTE_REG(TFSR_EL1),
3331
MTE_REG(TFSRE0_EL1),
3332
3333
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
3334
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
3335
3336
{ SYS_DESC(SYS_PMSCR_EL1), undef_access },
3337
{ SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
3338
{ SYS_DESC(SYS_PMSICR_EL1), undef_access },
3339
{ SYS_DESC(SYS_PMSIRR_EL1), undef_access },
3340
{ SYS_DESC(SYS_PMSFCR_EL1), undef_access },
3341
{ SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
3342
{ SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
3343
{ SYS_DESC(SYS_PMSIDR_EL1), undef_access },
3344
{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
3345
{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
3346
{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
3347
{ SYS_DESC(SYS_PMSDSFR_EL1), undef_access },
3348
/* PMBIDR_EL1 is not trapped */
3349
3350
{ PMU_SYS_REG(PMINTENSET_EL1),
3351
.access = access_pminten, .reg = PMINTENSET_EL1,
3352
.get_user = get_pmreg, .set_user = set_pmreg },
3353
{ PMU_SYS_REG(PMINTENCLR_EL1),
3354
.access = access_pminten, .reg = PMINTENSET_EL1,
3355
.get_user = get_pmreg, .set_user = set_pmreg },
3356
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
3357
3358
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
3359
{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1,
3360
.visibility = s1pie_visibility },
3361
{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1,
3362
.visibility = s1pie_visibility },
3363
{ SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
3364
.visibility = s1poe_visibility },
3365
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
3366
3367
{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
3368
{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
3369
{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
3370
{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
3371
{ SYS_DESC(SYS_MPAMIDR_EL1), undef_access },
3372
{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
3373
3374
{ SYS_DESC(SYS_MPAM1_EL1), undef_access },
3375
{ SYS_DESC(SYS_MPAM0_EL1), undef_access },
3376
{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
3377
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
3378
3379
{ SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
3380
{ SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
3381
{ SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
3382
{ SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
3383
{ SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
3384
{ SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
3385
{ SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
3386
{ SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
3387
{ SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
3388
{ SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
3389
{ SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
3390
{ SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
3391
{ SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir },
3392
{ SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
3393
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
3394
{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
3395
{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
3396
{ SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
3397
{ SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
3398
{ SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
3399
{ SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
3400
{ SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
3401
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
3402
{ SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
3403
{ SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
3404
3405
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
3406
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
3407
3408
{ SYS_DESC(SYS_ACCDATA_EL1), undef_access },
3409
3410
{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
3411
3412
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
3413
3414
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
3415
{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
3416
.set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
3417
{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
3418
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
3419
IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)),
3420
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
3421
ID_FILTERED(CTR_EL0, ctr_el0,
3422
CTR_EL0_DIC_MASK |
3423
CTR_EL0_IDC_MASK |
3424
CTR_EL0_DminLine_MASK |
3425
CTR_EL0_L1Ip_MASK |
3426
CTR_EL0_IminLine_MASK),
3427
{ SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
3428
{ SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
3429
3430
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
3431
.reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
3432
{ PMU_SYS_REG(PMCNTENSET_EL0),
3433
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
3434
.get_user = get_pmreg, .set_user = set_pmreg },
3435
{ PMU_SYS_REG(PMCNTENCLR_EL0),
3436
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
3437
.get_user = get_pmreg, .set_user = set_pmreg },
3438
{ PMU_SYS_REG(PMOVSCLR_EL0),
3439
.access = access_pmovs, .reg = PMOVSSET_EL0,
3440
.get_user = get_pmreg, .set_user = set_pmreg },
3441
/*
3442
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
3443
* previously (and pointlessly) advertised in the past...
3444
*/
3445
{ PMU_SYS_REG(PMSWINC_EL0),
3446
.get_user = get_raz_reg, .set_user = set_wi_reg,
3447
.access = access_pmswinc, .reset = NULL },
3448
{ PMU_SYS_REG(PMSELR_EL0),
3449
.access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
3450
{ PMU_SYS_REG(PMCEID0_EL0),
3451
.access = access_pmceid, .reset = NULL },
3452
{ PMU_SYS_REG(PMCEID1_EL0),
3453
.access = access_pmceid, .reset = NULL },
3454
{ PMU_SYS_REG(PMCCNTR_EL0),
3455
.access = access_pmu_evcntr, .reset = reset_unknown,
3456
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
3457
.set_user = set_pmu_evcntr },
3458
{ PMU_SYS_REG(PMXEVTYPER_EL0),
3459
.access = access_pmu_evtyper, .reset = NULL },
3460
{ PMU_SYS_REG(PMXEVCNTR_EL0),
3461
.access = access_pmu_evcntr, .reset = NULL },
3462
/*
3463
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
3464
* in 32bit mode. Here we choose to reset it as zero for consistency.
3465
*/
3466
{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
3467
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
3468
{ PMU_SYS_REG(PMOVSSET_EL0),
3469
.access = access_pmovs, .reg = PMOVSSET_EL0,
3470
.get_user = get_pmreg, .set_user = set_pmreg },
3471
3472
{ SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
3473
.visibility = s1poe_visibility },
3474
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
3475
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
3476
{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
3477
3478
{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
3479
3480
{ SYS_DESC(SYS_AMCR_EL0), undef_access },
3481
{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
3482
{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
3483
{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
3484
{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
3485
{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
3486
{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
3487
{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
3488
AMU_AMEVCNTR0_EL0(0),
3489
AMU_AMEVCNTR0_EL0(1),
3490
AMU_AMEVCNTR0_EL0(2),
3491
AMU_AMEVCNTR0_EL0(3),
3492
AMU_AMEVCNTR0_EL0(4),
3493
AMU_AMEVCNTR0_EL0(5),
3494
AMU_AMEVCNTR0_EL0(6),
3495
AMU_AMEVCNTR0_EL0(7),
3496
AMU_AMEVCNTR0_EL0(8),
3497
AMU_AMEVCNTR0_EL0(9),
3498
AMU_AMEVCNTR0_EL0(10),
3499
AMU_AMEVCNTR0_EL0(11),
3500
AMU_AMEVCNTR0_EL0(12),
3501
AMU_AMEVCNTR0_EL0(13),
3502
AMU_AMEVCNTR0_EL0(14),
3503
AMU_AMEVCNTR0_EL0(15),
3504
AMU_AMEVTYPER0_EL0(0),
3505
AMU_AMEVTYPER0_EL0(1),
3506
AMU_AMEVTYPER0_EL0(2),
3507
AMU_AMEVTYPER0_EL0(3),
3508
AMU_AMEVTYPER0_EL0(4),
3509
AMU_AMEVTYPER0_EL0(5),
3510
AMU_AMEVTYPER0_EL0(6),
3511
AMU_AMEVTYPER0_EL0(7),
3512
AMU_AMEVTYPER0_EL0(8),
3513
AMU_AMEVTYPER0_EL0(9),
3514
AMU_AMEVTYPER0_EL0(10),
3515
AMU_AMEVTYPER0_EL0(11),
3516
AMU_AMEVTYPER0_EL0(12),
3517
AMU_AMEVTYPER0_EL0(13),
3518
AMU_AMEVTYPER0_EL0(14),
3519
AMU_AMEVTYPER0_EL0(15),
3520
AMU_AMEVCNTR1_EL0(0),
3521
AMU_AMEVCNTR1_EL0(1),
3522
AMU_AMEVCNTR1_EL0(2),
3523
AMU_AMEVCNTR1_EL0(3),
3524
AMU_AMEVCNTR1_EL0(4),
3525
AMU_AMEVCNTR1_EL0(5),
3526
AMU_AMEVCNTR1_EL0(6),
3527
AMU_AMEVCNTR1_EL0(7),
3528
AMU_AMEVCNTR1_EL0(8),
3529
AMU_AMEVCNTR1_EL0(9),
3530
AMU_AMEVCNTR1_EL0(10),
3531
AMU_AMEVCNTR1_EL0(11),
3532
AMU_AMEVCNTR1_EL0(12),
3533
AMU_AMEVCNTR1_EL0(13),
3534
AMU_AMEVCNTR1_EL0(14),
3535
AMU_AMEVCNTR1_EL0(15),
3536
AMU_AMEVTYPER1_EL0(0),
3537
AMU_AMEVTYPER1_EL0(1),
3538
AMU_AMEVTYPER1_EL0(2),
3539
AMU_AMEVTYPER1_EL0(3),
3540
AMU_AMEVTYPER1_EL0(4),
3541
AMU_AMEVTYPER1_EL0(5),
3542
AMU_AMEVTYPER1_EL0(6),
3543
AMU_AMEVTYPER1_EL0(7),
3544
AMU_AMEVTYPER1_EL0(8),
3545
AMU_AMEVTYPER1_EL0(9),
3546
AMU_AMEVTYPER1_EL0(10),
3547
AMU_AMEVTYPER1_EL0(11),
3548
AMU_AMEVTYPER1_EL0(12),
3549
AMU_AMEVTYPER1_EL0(13),
3550
AMU_AMEVTYPER1_EL0(14),
3551
AMU_AMEVTYPER1_EL0(15),
3552
3553
{ SYS_DESC(SYS_CNTPCT_EL0), .access = access_arch_timer,
3554
.get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
3555
{ SYS_DESC(SYS_CNTVCT_EL0), .access = access_arch_timer,
3556
.get_user = arch_timer_get_user, .set_user = arch_timer_set_user },
3557
{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
3558
{ SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
3559
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
3560
TIMER_REG(CNTP_CTL_EL0, NULL),
3561
TIMER_REG(CNTP_CVAL_EL0, NULL),
3562
3563
{ SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
3564
TIMER_REG(CNTV_CTL_EL0, NULL),
3565
TIMER_REG(CNTV_CVAL_EL0, NULL),
3566
3567
/* PMEVCNTRn_EL0 */
3568
PMU_PMEVCNTR_EL0(0),
3569
PMU_PMEVCNTR_EL0(1),
3570
PMU_PMEVCNTR_EL0(2),
3571
PMU_PMEVCNTR_EL0(3),
3572
PMU_PMEVCNTR_EL0(4),
3573
PMU_PMEVCNTR_EL0(5),
3574
PMU_PMEVCNTR_EL0(6),
3575
PMU_PMEVCNTR_EL0(7),
3576
PMU_PMEVCNTR_EL0(8),
3577
PMU_PMEVCNTR_EL0(9),
3578
PMU_PMEVCNTR_EL0(10),
3579
PMU_PMEVCNTR_EL0(11),
3580
PMU_PMEVCNTR_EL0(12),
3581
PMU_PMEVCNTR_EL0(13),
3582
PMU_PMEVCNTR_EL0(14),
3583
PMU_PMEVCNTR_EL0(15),
3584
PMU_PMEVCNTR_EL0(16),
3585
PMU_PMEVCNTR_EL0(17),
3586
PMU_PMEVCNTR_EL0(18),
3587
PMU_PMEVCNTR_EL0(19),
3588
PMU_PMEVCNTR_EL0(20),
3589
PMU_PMEVCNTR_EL0(21),
3590
PMU_PMEVCNTR_EL0(22),
3591
PMU_PMEVCNTR_EL0(23),
3592
PMU_PMEVCNTR_EL0(24),
3593
PMU_PMEVCNTR_EL0(25),
3594
PMU_PMEVCNTR_EL0(26),
3595
PMU_PMEVCNTR_EL0(27),
3596
PMU_PMEVCNTR_EL0(28),
3597
PMU_PMEVCNTR_EL0(29),
3598
PMU_PMEVCNTR_EL0(30),
3599
/* PMEVTYPERn_EL0 */
3600
PMU_PMEVTYPER_EL0(0),
3601
PMU_PMEVTYPER_EL0(1),
3602
PMU_PMEVTYPER_EL0(2),
3603
PMU_PMEVTYPER_EL0(3),
3604
PMU_PMEVTYPER_EL0(4),
3605
PMU_PMEVTYPER_EL0(5),
3606
PMU_PMEVTYPER_EL0(6),
3607
PMU_PMEVTYPER_EL0(7),
3608
PMU_PMEVTYPER_EL0(8),
3609
PMU_PMEVTYPER_EL0(9),
3610
PMU_PMEVTYPER_EL0(10),
3611
PMU_PMEVTYPER_EL0(11),
3612
PMU_PMEVTYPER_EL0(12),
3613
PMU_PMEVTYPER_EL0(13),
3614
PMU_PMEVTYPER_EL0(14),
3615
PMU_PMEVTYPER_EL0(15),
3616
PMU_PMEVTYPER_EL0(16),
3617
PMU_PMEVTYPER_EL0(17),
3618
PMU_PMEVTYPER_EL0(18),
3619
PMU_PMEVTYPER_EL0(19),
3620
PMU_PMEVTYPER_EL0(20),
3621
PMU_PMEVTYPER_EL0(21),
3622
PMU_PMEVTYPER_EL0(22),
3623
PMU_PMEVTYPER_EL0(23),
3624
PMU_PMEVTYPER_EL0(24),
3625
PMU_PMEVTYPER_EL0(25),
3626
PMU_PMEVTYPER_EL0(26),
3627
PMU_PMEVTYPER_EL0(27),
3628
PMU_PMEVTYPER_EL0(28),
3629
PMU_PMEVTYPER_EL0(29),
3630
PMU_PMEVTYPER_EL0(30),
3631
/*
3632
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
3633
* in 32bit mode. Here we choose to reset it as zero for consistency.
3634
*/
3635
{ PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
3636
.reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
3637
3638
EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
3639
EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
3640
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
3641
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
3642
EL2_REG_FILTERED(SCTLR2_EL2, access_vm_reg, reset_val, 0,
3643
sctlr2_el2_visibility),
3644
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
3645
EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0),
3646
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
3647
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
3648
EL2_REG_VNCR_FILT(HFGRTR_EL2, fgt_visibility),
3649
EL2_REG_VNCR_FILT(HFGWTR_EL2, fgt_visibility),
3650
EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
3651
EL2_REG_VNCR(HACR_EL2, reset_val, 0),
3652
3653
EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0,
3654
sve_el2_visibility),
3655
3656
EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
3657
3658
EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
3659
EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
3660
EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
3661
EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1,
3662
tcr2_el2_visibility),
3663
EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
3664
EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
3665
EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0,
3666
vncr_el2_visibility),
3667
3668
{ SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
3669
EL2_REG_VNCR_FILT(HDFGRTR2_EL2, fgt2_visibility),
3670
EL2_REG_VNCR_FILT(HDFGWTR2_EL2, fgt2_visibility),
3671
EL2_REG_VNCR_FILT(HFGRTR2_EL2, fgt2_visibility),
3672
EL2_REG_VNCR_FILT(HFGWTR2_EL2, fgt2_visibility),
3673
EL2_REG_VNCR_FILT(HDFGRTR_EL2, fgt_visibility),
3674
EL2_REG_VNCR_FILT(HDFGWTR_EL2, fgt_visibility),
3675
EL2_REG_VNCR_FILT(HAFGRTR_EL2, fgt_visibility),
3676
EL2_REG_VNCR_FILT(HFGITR2_EL2, fgt2_visibility),
3677
EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
3678
EL2_REG_REDIR(ELR_EL2, reset_val, 0),
3679
{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
3680
3681
/* AArch32 SPSR_* are RES0 if trapped from a NV guest */
3682
{ SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi },
3683
{ SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi },
3684
{ SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi },
3685
{ SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi },
3686
3687
{ SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 },
3688
EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
3689
EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
3690
EL2_REG_REDIR(ESR_EL2, reset_val, 0),
3691
EL2_REG_VNCR(VSESR_EL2, reset_unknown, 0),
3692
{ SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
3693
3694
EL2_REG_REDIR(FAR_EL2, reset_val, 0),
3695
EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
3696
3697
EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
3698
EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0,
3699
s1pie_el2_visibility),
3700
EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0,
3701
s1pie_el2_visibility),
3702
EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0,
3703
s1poe_el2_visibility),
3704
EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
3705
{ SYS_DESC(SYS_MPAMHCR_EL2), undef_access },
3706
{ SYS_DESC(SYS_MPAMVPMV_EL2), undef_access },
3707
{ SYS_DESC(SYS_MPAM2_EL2), undef_access },
3708
{ SYS_DESC(SYS_MPAMVPM0_EL2), undef_access },
3709
{ SYS_DESC(SYS_MPAMVPM1_EL2), undef_access },
3710
{ SYS_DESC(SYS_MPAMVPM2_EL2), undef_access },
3711
{ SYS_DESC(SYS_MPAMVPM3_EL2), undef_access },
3712
{ SYS_DESC(SYS_MPAMVPM4_EL2), undef_access },
3713
{ SYS_DESC(SYS_MPAMVPM5_EL2), undef_access },
3714
{ SYS_DESC(SYS_MPAMVPM6_EL2), undef_access },
3715
{ SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
3716
3717
EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
3718
{ SYS_DESC(SYS_RVBAR_EL2), undef_access },
3719
{ SYS_DESC(SYS_RMR_EL2), undef_access },
3720
EL2_REG_VNCR(VDISR_EL2, reset_unknown, 0),
3721
3722
EL2_REG_VNCR_GICv3(ICH_AP0R0_EL2),
3723
EL2_REG_VNCR_GICv3(ICH_AP0R1_EL2),
3724
EL2_REG_VNCR_GICv3(ICH_AP0R2_EL2),
3725
EL2_REG_VNCR_GICv3(ICH_AP0R3_EL2),
3726
EL2_REG_VNCR_GICv3(ICH_AP1R0_EL2),
3727
EL2_REG_VNCR_GICv3(ICH_AP1R1_EL2),
3728
EL2_REG_VNCR_GICv3(ICH_AP1R2_EL2),
3729
EL2_REG_VNCR_GICv3(ICH_AP1R3_EL2),
3730
3731
{ SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre },
3732
3733
EL2_REG_VNCR_GICv3(ICH_HCR_EL2),
3734
{ SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr },
3735
{ SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr },
3736
{ SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr },
3737
{ SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr },
3738
EL2_REG_VNCR_GICv3(ICH_VMCR_EL2),
3739
3740
EL2_REG_VNCR_GICv3(ICH_LR0_EL2),
3741
EL2_REG_VNCR_GICv3(ICH_LR1_EL2),
3742
EL2_REG_VNCR_GICv3(ICH_LR2_EL2),
3743
EL2_REG_VNCR_GICv3(ICH_LR3_EL2),
3744
EL2_REG_VNCR_GICv3(ICH_LR4_EL2),
3745
EL2_REG_VNCR_GICv3(ICH_LR5_EL2),
3746
EL2_REG_VNCR_GICv3(ICH_LR6_EL2),
3747
EL2_REG_VNCR_GICv3(ICH_LR7_EL2),
3748
EL2_REG_VNCR_GICv3(ICH_LR8_EL2),
3749
EL2_REG_VNCR_GICv3(ICH_LR9_EL2),
3750
EL2_REG_VNCR_GICv3(ICH_LR10_EL2),
3751
EL2_REG_VNCR_GICv3(ICH_LR11_EL2),
3752
EL2_REG_VNCR_GICv3(ICH_LR12_EL2),
3753
EL2_REG_VNCR_GICv3(ICH_LR13_EL2),
3754
EL2_REG_VNCR_GICv3(ICH_LR14_EL2),
3755
EL2_REG_VNCR_GICv3(ICH_LR15_EL2),
3756
3757
EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
3758
EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
3759
3760
EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
3761
EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
3762
{ SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
3763
TIMER_REG(CNTHP_CTL_EL2, el2_visibility),
3764
TIMER_REG(CNTHP_CVAL_EL2, el2_visibility),
3765
3766
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer, .visibility = cnthv_visibility },
3767
TIMER_REG(CNTHV_CTL_EL2, cnthv_visibility),
3768
TIMER_REG(CNTHV_CVAL_EL2, cnthv_visibility),
3769
3770
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
3771
3772
{ SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer },
3773
{ SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer },
3774
{ SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer },
3775
3776
{ SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer },
3777
{ SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer },
3778
{ SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer },
3779
3780
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
3781
};
3782
3783
static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3784
const struct sys_reg_desc *r)
3785
{
3786
u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3787
3788
if (__kvm_at_s1e01(vcpu, op, p->regval))
3789
return false;
3790
3791
return true;
3792
}
3793
3794
static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3795
const struct sys_reg_desc *r)
3796
{
3797
u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3798
3799
/* There is no FGT associated with AT S1E2A :-( */
3800
if (op == OP_AT_S1E2A &&
3801
!kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) {
3802
kvm_inject_undefined(vcpu);
3803
return false;
3804
}
3805
3806
if (__kvm_at_s1e2(vcpu, op, p->regval))
3807
return false;
3808
3809
return true;
3810
}
3811
3812
static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3813
const struct sys_reg_desc *r)
3814
{
3815
u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3816
3817
if (__kvm_at_s12(vcpu, op, p->regval))
3818
return false;
3819
3820
return true;
3821
}
3822
3823
static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
3824
{
3825
struct kvm *kvm = vpcu->kvm;
3826
u8 CRm = sys_reg_CRm(instr);
3827
3828
if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3829
!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3830
return false;
3831
3832
if (CRm == TLBI_CRm_nROS &&
3833
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3834
return false;
3835
3836
return true;
3837
}
3838
3839
static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3840
const struct sys_reg_desc *r)
3841
{
3842
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3843
3844
if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3845
return undef_access(vcpu, p, r);
3846
3847
write_lock(&vcpu->kvm->mmu_lock);
3848
3849
/*
3850
* Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
3851
* corresponding VMIDs.
3852
*/
3853
kvm_nested_s2_unmap(vcpu->kvm, true);
3854
3855
write_unlock(&vcpu->kvm->mmu_lock);
3856
3857
return true;
3858
}
3859
3860
static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
3861
{
3862
struct kvm *kvm = vpcu->kvm;
3863
u8 CRm = sys_reg_CRm(instr);
3864
u8 Op2 = sys_reg_Op2(instr);
3865
3866
if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3867
!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3868
return false;
3869
3870
if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
3871
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3872
return false;
3873
3874
if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
3875
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3876
return false;
3877
3878
if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
3879
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3880
return false;
3881
3882
return true;
3883
}
3884
3885
/* Only defined here as this is an internal "abstraction" */
3886
union tlbi_info {
3887
struct {
3888
u64 start;
3889
u64 size;
3890
} range;
3891
3892
struct {
3893
u64 addr;
3894
} ipa;
3895
3896
struct {
3897
u64 addr;
3898
u32 encoding;
3899
} va;
3900
};
3901
3902
static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
3903
const union tlbi_info *info)
3904
{
3905
/*
3906
* The unmap operation is allowed to drop the MMU lock and block, which
3907
* means that @mmu could be used for a different context than the one
3908
* currently being invalidated.
3909
*
3910
* This behavior is still safe, as:
3911
*
3912
* 1) The vCPU(s) that recycled the MMU are responsible for invalidating
3913
* the entire MMU before reusing it, which still honors the intent
3914
* of a TLBI.
3915
*
3916
* 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC
3917
* and ERET to the guest), other vCPUs are allowed to use stale
3918
* translations.
3919
*
3920
* 3) Accidentally unmapping an unrelated MMU context is nonfatal, and
3921
* at worst may cause more aborts for shadow stage-2 fills.
3922
*
3923
* Dropping the MMU lock also implies that shadow stage-2 fills could
3924
* happen behind the back of the TLBI. This is still safe, though, as
3925
* the L1 needs to put its stage-2 in a consistent state before doing
3926
* the TLBI.
3927
*/
3928
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
3929
}
3930
3931
static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3932
const struct sys_reg_desc *r)
3933
{
3934
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3935
u64 limit, vttbr;
3936
3937
if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3938
return undef_access(vcpu, p, r);
3939
3940
vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3941
limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
3942
3943
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3944
&(union tlbi_info) {
3945
.range = {
3946
.start = 0,
3947
.size = limit,
3948
},
3949
},
3950
s2_mmu_unmap_range);
3951
3952
return true;
3953
}
3954
3955
static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3956
const struct sys_reg_desc *r)
3957
{
3958
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3959
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3960
u64 base, range;
3961
3962
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3963
return undef_access(vcpu, p, r);
3964
3965
/*
3966
* Because the shadow S2 structure doesn't necessarily reflect that
3967
* of the guest's S2 (different base granule size, for example), we
3968
* decide to ignore TTL and only use the described range.
3969
*/
3970
base = decode_range_tlbi(p->regval, &range, NULL);
3971
3972
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3973
&(union tlbi_info) {
3974
.range = {
3975
.start = base,
3976
.size = range,
3977
},
3978
},
3979
s2_mmu_unmap_range);
3980
3981
return true;
3982
}
3983
3984
static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
3985
const union tlbi_info *info)
3986
{
3987
unsigned long max_size;
3988
u64 base_addr;
3989
3990
/*
3991
* We drop a number of things from the supplied value:
3992
*
3993
* - NS bit: we're non-secure only.
3994
*
3995
* - IPA[51:48]: We don't support 52bit IPA just yet...
3996
*
3997
* And of course, adjust the IPA to be on an actual address.
3998
*/
3999
base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
4000
max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
4001
base_addr &= ~(max_size - 1);
4002
4003
/*
4004
* See comment in s2_mmu_unmap_range() for why this is allowed to
4005
* reschedule.
4006
*/
4007
kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
4008
}
4009
4010
static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4011
const struct sys_reg_desc *r)
4012
{
4013
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4014
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
4015
4016
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
4017
return undef_access(vcpu, p, r);
4018
4019
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
4020
&(union tlbi_info) {
4021
.ipa = {
4022
.addr = p->regval,
4023
},
4024
},
4025
s2_mmu_unmap_ipa);
4026
4027
return true;
4028
}
4029
4030
static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
4031
const union tlbi_info *info)
4032
{
4033
WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
4034
}
4035
4036
static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4037
const struct sys_reg_desc *r)
4038
{
4039
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4040
4041
if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding))
4042
return undef_access(vcpu, p, r);
4043
4044
kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
4045
return true;
4046
}
4047
4048
static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
4049
const struct sys_reg_desc *r)
4050
{
4051
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
4052
4053
/*
4054
* If we're here, this is because we've trapped on a EL1 TLBI
4055
* instruction that affects the EL1 translation regime while
4056
* we're running in a context that doesn't allow us to let the
4057
* HW do its thing (aka vEL2):
4058
*
4059
* - HCR_EL2.E2H == 0 : a non-VHE guest
4060
* - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
4061
*
4062
* Another possibility is that we are invalidating the EL2 context
4063
* using EL1 instructions, but that we landed here because we need
4064
* additional invalidation for structures that are not held in the
4065
* CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In
4066
* that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 }
4067
* as we don't allow an NV-capable L1 in a nVHE configuration.
4068
*
4069
* We don't expect these helpers to ever be called when running
4070
* in a vEL1 context.
4071
*/
4072
4073
WARN_ON(!vcpu_is_el2(vcpu));
4074
4075
if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
4076
return undef_access(vcpu, p, r);
4077
4078
if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) {
4079
kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
4080
return true;
4081
}
4082
4083
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm,
4084
get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)),
4085
&(union tlbi_info) {
4086
.va = {
4087
.addr = p->regval,
4088
.encoding = sys_encoding,
4089
},
4090
},
4091
s2_mmu_tlbi_s1e1);
4092
4093
return true;
4094
}
4095
4096
#define SYS_INSN(insn, access_fn) \
4097
{ \
4098
SYS_DESC(OP_##insn), \
4099
.access = (access_fn), \
4100
}
4101
4102
static struct sys_reg_desc sys_insn_descs[] = {
4103
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
4104
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
4105
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
4106
4107
SYS_INSN(AT_S1E1R, handle_at_s1e01),
4108
SYS_INSN(AT_S1E1W, handle_at_s1e01),
4109
SYS_INSN(AT_S1E0R, handle_at_s1e01),
4110
SYS_INSN(AT_S1E0W, handle_at_s1e01),
4111
SYS_INSN(AT_S1E1RP, handle_at_s1e01),
4112
SYS_INSN(AT_S1E1WP, handle_at_s1e01),
4113
4114
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
4115
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
4116
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
4117
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
4118
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
4119
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
4120
4121
SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
4122
SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
4123
SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
4124
SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
4125
SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
4126
SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
4127
4128
SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
4129
SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
4130
SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
4131
SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
4132
4133
SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
4134
SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
4135
SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
4136
SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
4137
SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
4138
SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
4139
4140
SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
4141
SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
4142
SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
4143
SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
4144
4145
SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
4146
SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
4147
SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
4148
SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
4149
4150
SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
4151
SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
4152
SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
4153
SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
4154
SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
4155
SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
4156
4157
SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
4158
SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
4159
SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
4160
SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
4161
SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
4162
SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
4163
4164
SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
4165
SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
4166
SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
4167
SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
4168
4169
SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
4170
SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
4171
SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
4172
SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
4173
SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
4174
SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
4175
4176
SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
4177
SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
4178
SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
4179
SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
4180
4181
SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
4182
SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
4183
SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
4184
SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
4185
4186
SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
4187
SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
4188
SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
4189
SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
4190
SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
4191
SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
4192
4193
SYS_INSN(AT_S1E2R, handle_at_s1e2),
4194
SYS_INSN(AT_S1E2W, handle_at_s1e2),
4195
SYS_INSN(AT_S12E1R, handle_at_s12),
4196
SYS_INSN(AT_S12E1W, handle_at_s12),
4197
SYS_INSN(AT_S12E0R, handle_at_s12),
4198
SYS_INSN(AT_S12E0W, handle_at_s12),
4199
SYS_INSN(AT_S1E2A, handle_at_s1e2),
4200
4201
SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
4202
SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
4203
SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
4204
SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
4205
4206
SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2),
4207
SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2),
4208
SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
4209
SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2),
4210
SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
4211
4212
SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2),
4213
SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2),
4214
SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2),
4215
SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2),
4216
4217
SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
4218
4219
SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2),
4220
4221
SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
4222
SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
4223
SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
4224
SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
4225
SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
4226
SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
4227
SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
4228
SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
4229
SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
4230
SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2),
4231
SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2),
4232
SYS_INSN(TLBI_RVAE2, handle_tlbi_el2),
4233
SYS_INSN(TLBI_RVALE2, handle_tlbi_el2),
4234
SYS_INSN(TLBI_ALLE2, handle_tlbi_el2),
4235
SYS_INSN(TLBI_VAE2, handle_tlbi_el2),
4236
4237
SYS_INSN(TLBI_ALLE1, handle_alle1is),
4238
4239
SYS_INSN(TLBI_VALE2, handle_tlbi_el2),
4240
4241
SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
4242
4243
SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
4244
SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
4245
SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
4246
SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
4247
4248
SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2),
4249
SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2),
4250
SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
4251
SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2),
4252
SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
4253
4254
SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2),
4255
SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2),
4256
SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2),
4257
SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2),
4258
4259
SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
4260
SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2),
4261
SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
4262
SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
4263
SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
4264
SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
4265
SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
4266
SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
4267
SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
4268
SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
4269
SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
4270
SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2),
4271
SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2),
4272
SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2),
4273
SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2),
4274
SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2),
4275
SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2),
4276
SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
4277
SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2),
4278
SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
4279
};
4280
4281
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
4282
struct sys_reg_params *p,
4283
const struct sys_reg_desc *r)
4284
{
4285
if (p->is_write) {
4286
return ignore_write(vcpu, p);
4287
} else {
4288
u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
4289
u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
4290
4291
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
4292
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
4293
(SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
4294
(SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
4295
(1 << 15) | (el3 << 14) | (el3 << 12));
4296
return true;
4297
}
4298
}
4299
4300
/*
4301
* AArch32 debug register mappings
4302
*
4303
* AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
4304
* AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
4305
*
4306
* None of the other registers share their location, so treat them as
4307
* if they were 64bit.
4308
*/
4309
#define DBG_BCR_BVR_WCR_WVR(n) \
4310
/* DBGBVRn */ \
4311
{ AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \
4312
trap_dbg_wb_reg, NULL, n }, \
4313
/* DBGBCRn */ \
4314
{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \
4315
/* DBGWVRn */ \
4316
{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \
4317
/* DBGWCRn */ \
4318
{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n }
4319
4320
#define DBGBXVR(n) \
4321
{ AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \
4322
trap_dbg_wb_reg, NULL, n }
4323
4324
/*
4325
* Trapped cp14 registers. We generally ignore most of the external
4326
* debug, on the principle that they don't really make sense to a
4327
* guest. Revisit this one day, would this principle change.
4328
*/
4329
static const struct sys_reg_desc cp14_regs[] = {
4330
/* DBGDIDR */
4331
{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
4332
/* DBGDTRRXext */
4333
{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
4334
4335
DBG_BCR_BVR_WCR_WVR(0),
4336
/* DBGDSCRint */
4337
{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
4338
DBG_BCR_BVR_WCR_WVR(1),
4339
/* DBGDCCINT */
4340
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
4341
/* DBGDSCRext */
4342
{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
4343
DBG_BCR_BVR_WCR_WVR(2),
4344
/* DBGDTR[RT]Xint */
4345
{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
4346
/* DBGDTR[RT]Xext */
4347
{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
4348
DBG_BCR_BVR_WCR_WVR(3),
4349
DBG_BCR_BVR_WCR_WVR(4),
4350
DBG_BCR_BVR_WCR_WVR(5),
4351
/* DBGWFAR */
4352
{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
4353
/* DBGOSECCR */
4354
{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
4355
DBG_BCR_BVR_WCR_WVR(6),
4356
/* DBGVCR */
4357
{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
4358
DBG_BCR_BVR_WCR_WVR(7),
4359
DBG_BCR_BVR_WCR_WVR(8),
4360
DBG_BCR_BVR_WCR_WVR(9),
4361
DBG_BCR_BVR_WCR_WVR(10),
4362
DBG_BCR_BVR_WCR_WVR(11),
4363
DBG_BCR_BVR_WCR_WVR(12),
4364
DBG_BCR_BVR_WCR_WVR(13),
4365
DBG_BCR_BVR_WCR_WVR(14),
4366
DBG_BCR_BVR_WCR_WVR(15),
4367
4368
/* DBGDRAR (32bit) */
4369
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
4370
4371
DBGBXVR(0),
4372
/* DBGOSLAR */
4373
{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
4374
DBGBXVR(1),
4375
/* DBGOSLSR */
4376
{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
4377
DBGBXVR(2),
4378
DBGBXVR(3),
4379
/* DBGOSDLR */
4380
{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
4381
DBGBXVR(4),
4382
/* DBGPRCR */
4383
{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
4384
DBGBXVR(5),
4385
DBGBXVR(6),
4386
DBGBXVR(7),
4387
DBGBXVR(8),
4388
DBGBXVR(9),
4389
DBGBXVR(10),
4390
DBGBXVR(11),
4391
DBGBXVR(12),
4392
DBGBXVR(13),
4393
DBGBXVR(14),
4394
DBGBXVR(15),
4395
4396
/* DBGDSAR (32bit) */
4397
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
4398
4399
/* DBGDEVID2 */
4400
{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
4401
/* DBGDEVID1 */
4402
{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
4403
/* DBGDEVID */
4404
{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
4405
/* DBGCLAIMSET */
4406
{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
4407
/* DBGCLAIMCLR */
4408
{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
4409
/* DBGAUTHSTATUS */
4410
{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
4411
};
4412
4413
/* Trapped cp14 64bit registers */
4414
static const struct sys_reg_desc cp14_64_regs[] = {
4415
/* DBGDRAR (64bit) */
4416
{ Op1( 0), CRm( 1), .access = trap_raz_wi },
4417
4418
/* DBGDSAR (64bit) */
4419
{ Op1( 0), CRm( 2), .access = trap_raz_wi },
4420
};
4421
4422
#define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
4423
AA32(_map), \
4424
Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
4425
.visibility = pmu_visibility
4426
4427
/* Macro to expand the PMEVCNTRn register */
4428
#define PMU_PMEVCNTR(n) \
4429
{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
4430
(0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
4431
.access = access_pmu_evcntr }
4432
4433
/* Macro to expand the PMEVTYPERn register */
4434
#define PMU_PMEVTYPER(n) \
4435
{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
4436
(0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
4437
.access = access_pmu_evtyper }
4438
/*
4439
* Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
4440
* depending on the way they are accessed (as a 32bit or a 64bit
4441
* register).
4442
*/
4443
static const struct sys_reg_desc cp15_regs[] = {
4444
{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
4445
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
4446
/* ACTLR */
4447
{ AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
4448
/* ACTLR2 */
4449
{ AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
4450
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4451
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
4452
/* TTBCR */
4453
{ AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
4454
/* TTBCR2 */
4455
{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
4456
{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
4457
{ CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
4458
/* DFSR */
4459
{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
4460
{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
4461
/* ADFSR */
4462
{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
4463
/* AIFSR */
4464
{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
4465
/* DFAR */
4466
{ AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
4467
/* IFAR */
4468
{ AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
4469
4470
/*
4471
* DC{C,I,CI}SW operations:
4472
*/
4473
{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
4474
{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
4475
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4476
4477
/* PMU */
4478
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
4479
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
4480
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
4481
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
4482
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
4483
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
4484
{ CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
4485
{ CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
4486
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
4487
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
4488
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
4489
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
4490
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
4491
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
4492
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
4493
{ CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
4494
{ CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
4495
/* PMMIR */
4496
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
4497
4498
/* PRRR/MAIR0 */
4499
{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
4500
/* NMRR/MAIR1 */
4501
{ AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
4502
/* AMAIR0 */
4503
{ AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
4504
/* AMAIR1 */
4505
{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
4506
4507
{ CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
4508
{ CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
4509
{ CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
4510
{ CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
4511
{ CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
4512
{ CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
4513
{ CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
4514
{ CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
4515
{ CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
4516
{ CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
4517
{ CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
4518
{ CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
4519
{ CP15_SYS_DESC(SYS_ICC_DIR_EL1), access_gic_dir },
4520
{ CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
4521
{ CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
4522
{ CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
4523
{ CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
4524
{ CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
4525
{ CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
4526
{ CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
4527
{ CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
4528
{ CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
4529
4530
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
4531
4532
/* Arch Tmers */
4533
{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
4534
{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
4535
4536
/* PMEVCNTRn */
4537
PMU_PMEVCNTR(0),
4538
PMU_PMEVCNTR(1),
4539
PMU_PMEVCNTR(2),
4540
PMU_PMEVCNTR(3),
4541
PMU_PMEVCNTR(4),
4542
PMU_PMEVCNTR(5),
4543
PMU_PMEVCNTR(6),
4544
PMU_PMEVCNTR(7),
4545
PMU_PMEVCNTR(8),
4546
PMU_PMEVCNTR(9),
4547
PMU_PMEVCNTR(10),
4548
PMU_PMEVCNTR(11),
4549
PMU_PMEVCNTR(12),
4550
PMU_PMEVCNTR(13),
4551
PMU_PMEVCNTR(14),
4552
PMU_PMEVCNTR(15),
4553
PMU_PMEVCNTR(16),
4554
PMU_PMEVCNTR(17),
4555
PMU_PMEVCNTR(18),
4556
PMU_PMEVCNTR(19),
4557
PMU_PMEVCNTR(20),
4558
PMU_PMEVCNTR(21),
4559
PMU_PMEVCNTR(22),
4560
PMU_PMEVCNTR(23),
4561
PMU_PMEVCNTR(24),
4562
PMU_PMEVCNTR(25),
4563
PMU_PMEVCNTR(26),
4564
PMU_PMEVCNTR(27),
4565
PMU_PMEVCNTR(28),
4566
PMU_PMEVCNTR(29),
4567
PMU_PMEVCNTR(30),
4568
/* PMEVTYPERn */
4569
PMU_PMEVTYPER(0),
4570
PMU_PMEVTYPER(1),
4571
PMU_PMEVTYPER(2),
4572
PMU_PMEVTYPER(3),
4573
PMU_PMEVTYPER(4),
4574
PMU_PMEVTYPER(5),
4575
PMU_PMEVTYPER(6),
4576
PMU_PMEVTYPER(7),
4577
PMU_PMEVTYPER(8),
4578
PMU_PMEVTYPER(9),
4579
PMU_PMEVTYPER(10),
4580
PMU_PMEVTYPER(11),
4581
PMU_PMEVTYPER(12),
4582
PMU_PMEVTYPER(13),
4583
PMU_PMEVTYPER(14),
4584
PMU_PMEVTYPER(15),
4585
PMU_PMEVTYPER(16),
4586
PMU_PMEVTYPER(17),
4587
PMU_PMEVTYPER(18),
4588
PMU_PMEVTYPER(19),
4589
PMU_PMEVTYPER(20),
4590
PMU_PMEVTYPER(21),
4591
PMU_PMEVTYPER(22),
4592
PMU_PMEVTYPER(23),
4593
PMU_PMEVTYPER(24),
4594
PMU_PMEVTYPER(25),
4595
PMU_PMEVTYPER(26),
4596
PMU_PMEVTYPER(27),
4597
PMU_PMEVTYPER(28),
4598
PMU_PMEVTYPER(29),
4599
PMU_PMEVTYPER(30),
4600
/* PMCCFILTR */
4601
{ CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
4602
4603
{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
4604
{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
4605
4606
/* CCSIDR2 */
4607
{ Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
4608
4609
{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
4610
};
4611
4612
static const struct sys_reg_desc cp15_64_regs[] = {
4613
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4614
{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
4615
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
4616
{ SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
4617
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
4618
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
4619
{ SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer },
4620
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
4621
{ SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
4622
{ SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
4623
{ SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer },
4624
};
4625
4626
static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
4627
bool reset_check)
4628
{
4629
unsigned int i;
4630
4631
for (i = 0; i < n; i++) {
4632
if (reset_check && table[i].reg && !table[i].reset) {
4633
kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
4634
&table[i], i, table[i].name);
4635
return false;
4636
}
4637
4638
if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
4639
kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
4640
&table[i], i, table[i - 1].name, table[i].name);
4641
return false;
4642
}
4643
}
4644
4645
return true;
4646
}
4647
4648
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
4649
{
4650
kvm_inject_undefined(vcpu);
4651
return 1;
4652
}
4653
4654
static void perform_access(struct kvm_vcpu *vcpu,
4655
struct sys_reg_params *params,
4656
const struct sys_reg_desc *r)
4657
{
4658
trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
4659
4660
/* Check for regs disabled by runtime config */
4661
if (sysreg_hidden(vcpu, r)) {
4662
kvm_inject_undefined(vcpu);
4663
return;
4664
}
4665
4666
/*
4667
* Not having an accessor means that we have configured a trap
4668
* that we don't know how to handle. This certainly qualifies
4669
* as a gross bug that should be fixed right away.
4670
*/
4671
if (!r->access) {
4672
bad_trap(vcpu, params, r, "register access");
4673
return;
4674
}
4675
4676
/* Skip instruction if instructed so */
4677
if (likely(r->access(vcpu, params, r)))
4678
kvm_incr_pc(vcpu);
4679
}
4680
4681
/*
4682
* emulate_cp -- tries to match a sys_reg access in a handling table, and
4683
* call the corresponding trap handler.
4684
*
4685
* @params: pointer to the descriptor of the access
4686
* @table: array of trap descriptors
4687
* @num: size of the trap descriptor array
4688
*
4689
* Return true if the access has been handled, false if not.
4690
*/
4691
static bool emulate_cp(struct kvm_vcpu *vcpu,
4692
struct sys_reg_params *params,
4693
const struct sys_reg_desc *table,
4694
size_t num)
4695
{
4696
const struct sys_reg_desc *r;
4697
4698
if (!table)
4699
return false; /* Not handled */
4700
4701
r = find_reg(params, table, num);
4702
4703
if (r) {
4704
perform_access(vcpu, params, r);
4705
return true;
4706
}
4707
4708
/* Not handled */
4709
return false;
4710
}
4711
4712
static void unhandled_cp_access(struct kvm_vcpu *vcpu,
4713
struct sys_reg_params *params)
4714
{
4715
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
4716
int cp = -1;
4717
4718
switch (esr_ec) {
4719
case ESR_ELx_EC_CP15_32:
4720
case ESR_ELx_EC_CP15_64:
4721
cp = 15;
4722
break;
4723
case ESR_ELx_EC_CP14_MR:
4724
case ESR_ELx_EC_CP14_64:
4725
cp = 14;
4726
break;
4727
default:
4728
WARN_ON(1);
4729
}
4730
4731
print_sys_reg_msg(params,
4732
"Unsupported guest CP%d access at: %08lx [%08lx]\n",
4733
cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4734
kvm_inject_undefined(vcpu);
4735
}
4736
4737
/**
4738
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
4739
* @vcpu: The VCPU pointer
4740
* @global: &struct sys_reg_desc
4741
* @nr_global: size of the @global array
4742
*/
4743
static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
4744
const struct sys_reg_desc *global,
4745
size_t nr_global)
4746
{
4747
struct sys_reg_params params;
4748
u64 esr = kvm_vcpu_get_esr(vcpu);
4749
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4750
int Rt2 = (esr >> 10) & 0x1f;
4751
4752
params.CRm = (esr >> 1) & 0xf;
4753
params.is_write = ((esr & 1) == 0);
4754
4755
params.Op0 = 0;
4756
params.Op1 = (esr >> 16) & 0xf;
4757
params.Op2 = 0;
4758
params.CRn = 0;
4759
4760
/*
4761
* Make a 64-bit value out of Rt and Rt2. As we use the same trap
4762
* backends between AArch32 and AArch64, we get away with it.
4763
*/
4764
if (params.is_write) {
4765
params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
4766
params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
4767
}
4768
4769
/*
4770
* If the table contains a handler, handle the
4771
* potential register operation in the case of a read and return
4772
* with success.
4773
*/
4774
if (emulate_cp(vcpu, &params, global, nr_global)) {
4775
/* Split up the value between registers for the read side */
4776
if (!params.is_write) {
4777
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
4778
vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
4779
}
4780
4781
return 1;
4782
}
4783
4784
unhandled_cp_access(vcpu, &params);
4785
return 1;
4786
}
4787
4788
static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
4789
4790
/*
4791
* The CP10 ID registers are architecturally mapped to AArch64 feature
4792
* registers. Abuse that fact so we can rely on the AArch64 handler for accesses
4793
* from AArch32.
4794
*/
4795
static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
4796
{
4797
u8 reg_id = (esr >> 10) & 0xf;
4798
bool valid;
4799
4800
params->is_write = ((esr & 1) == 0);
4801
params->Op0 = 3;
4802
params->Op1 = 0;
4803
params->CRn = 0;
4804
params->CRm = 3;
4805
4806
/* CP10 ID registers are read-only */
4807
valid = !params->is_write;
4808
4809
switch (reg_id) {
4810
/* MVFR0 */
4811
case 0b0111:
4812
params->Op2 = 0;
4813
break;
4814
/* MVFR1 */
4815
case 0b0110:
4816
params->Op2 = 1;
4817
break;
4818
/* MVFR2 */
4819
case 0b0101:
4820
params->Op2 = 2;
4821
break;
4822
default:
4823
valid = false;
4824
}
4825
4826
if (valid)
4827
return true;
4828
4829
kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
4830
str_write_read(params->is_write), reg_id);
4831
return false;
4832
}
4833
4834
/**
4835
* kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
4836
* VFP Register' from AArch32.
4837
* @vcpu: The vCPU pointer
4838
*
4839
* MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
4840
* Work out the correct AArch64 system register encoding and reroute to the
4841
* AArch64 system register emulation.
4842
*/
4843
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
4844
{
4845
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4846
u64 esr = kvm_vcpu_get_esr(vcpu);
4847
struct sys_reg_params params;
4848
4849
/* UNDEF on any unhandled register access */
4850
if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
4851
kvm_inject_undefined(vcpu);
4852
return 1;
4853
}
4854
4855
if (emulate_sys_reg(vcpu, &params))
4856
vcpu_set_reg(vcpu, Rt, params.regval);
4857
4858
return 1;
4859
}
4860
4861
/**
4862
* kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
4863
* CRn=0, which corresponds to the AArch32 feature
4864
* registers.
4865
* @vcpu: the vCPU pointer
4866
* @params: the system register access parameters.
4867
*
4868
* Our cp15 system register tables do not enumerate the AArch32 feature
4869
* registers. Conveniently, our AArch64 table does, and the AArch32 system
4870
* register encoding can be trivially remapped into the AArch64 for the feature
4871
* registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
4872
*
4873
* According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
4874
* System registers with (coproc=0b1111, CRn==c0)", read accesses from this
4875
* range are either UNKNOWN or RES0. Rerouting remains architectural as we
4876
* treat undefined registers in this range as RAZ.
4877
*/
4878
static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
4879
struct sys_reg_params *params)
4880
{
4881
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4882
4883
/* Treat impossible writes to RO registers as UNDEFINED */
4884
if (params->is_write) {
4885
unhandled_cp_access(vcpu, params);
4886
return 1;
4887
}
4888
4889
params->Op0 = 3;
4890
4891
/*
4892
* All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
4893
* Avoid conflicting with future expansion of AArch64 feature registers
4894
* and simply treat them as RAZ here.
4895
*/
4896
if (params->CRm > 3)
4897
params->regval = 0;
4898
else if (!emulate_sys_reg(vcpu, params))
4899
return 1;
4900
4901
vcpu_set_reg(vcpu, Rt, params->regval);
4902
return 1;
4903
}
4904
4905
/**
4906
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
4907
* @vcpu: The VCPU pointer
4908
* @params: &struct sys_reg_params
4909
* @global: &struct sys_reg_desc
4910
* @nr_global: size of the @global array
4911
*/
4912
static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
4913
struct sys_reg_params *params,
4914
const struct sys_reg_desc *global,
4915
size_t nr_global)
4916
{
4917
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4918
4919
params->regval = vcpu_get_reg(vcpu, Rt);
4920
4921
if (emulate_cp(vcpu, params, global, nr_global)) {
4922
if (!params->is_write)
4923
vcpu_set_reg(vcpu, Rt, params->regval);
4924
return 1;
4925
}
4926
4927
unhandled_cp_access(vcpu, params);
4928
return 1;
4929
}
4930
4931
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
4932
{
4933
return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
4934
}
4935
4936
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
4937
{
4938
struct sys_reg_params params;
4939
4940
params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4941
4942
/*
4943
* Certain AArch32 ID registers are handled by rerouting to the AArch64
4944
* system register table. Registers in the ID range where CRm=0 are
4945
* excluded from this scheme as they do not trivially map into AArch64
4946
* system register encodings, except for AIDR/REVIDR.
4947
*/
4948
if (params.Op1 == 0 && params.CRn == 0 &&
4949
(params.CRm || params.Op2 == 6 /* REVIDR */))
4950
return kvm_emulate_cp15_id_reg(vcpu, &params);
4951
if (params.Op1 == 1 && params.CRn == 0 &&
4952
params.CRm == 0 && params.Op2 == 7 /* AIDR */)
4953
return kvm_emulate_cp15_id_reg(vcpu, &params);
4954
4955
return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
4956
}
4957
4958
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
4959
{
4960
return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
4961
}
4962
4963
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
4964
{
4965
struct sys_reg_params params;
4966
4967
params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4968
4969
return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
4970
}
4971
4972
/**
4973
* emulate_sys_reg - Emulate a guest access to an AArch64 system register
4974
* @vcpu: The VCPU pointer
4975
* @params: Decoded system register parameters
4976
*
4977
* Return: true if the system register access was successful, false otherwise.
4978
*/
4979
static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
4980
struct sys_reg_params *params)
4981
{
4982
const struct sys_reg_desc *r;
4983
4984
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4985
if (likely(r)) {
4986
perform_access(vcpu, params, r);
4987
return true;
4988
}
4989
4990
print_sys_reg_msg(params,
4991
"Unsupported guest sys_reg access at: %lx [%08lx]\n",
4992
*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4993
kvm_inject_undefined(vcpu);
4994
4995
return false;
4996
}
4997
4998
static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos)
4999
{
5000
unsigned long i, idreg_idx = 0;
5001
5002
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5003
const struct sys_reg_desc *r = &sys_reg_descs[i];
5004
5005
if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
5006
continue;
5007
5008
if (idreg_idx == pos)
5009
return r;
5010
5011
idreg_idx++;
5012
}
5013
5014
return NULL;
5015
}
5016
5017
static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
5018
{
5019
struct kvm *kvm = s->private;
5020
u8 *iter;
5021
5022
mutex_lock(&kvm->arch.config_lock);
5023
5024
iter = &kvm->arch.idreg_debugfs_iter;
5025
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
5026
*iter == (u8)~0) {
5027
*iter = *pos;
5028
if (!idregs_debug_find(kvm, *iter))
5029
iter = NULL;
5030
} else {
5031
iter = ERR_PTR(-EBUSY);
5032
}
5033
5034
mutex_unlock(&kvm->arch.config_lock);
5035
5036
return iter;
5037
}
5038
5039
static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
5040
{
5041
struct kvm *kvm = s->private;
5042
5043
(*pos)++;
5044
5045
if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) {
5046
kvm->arch.idreg_debugfs_iter++;
5047
5048
return &kvm->arch.idreg_debugfs_iter;
5049
}
5050
5051
return NULL;
5052
}
5053
5054
static void idregs_debug_stop(struct seq_file *s, void *v)
5055
{
5056
struct kvm *kvm = s->private;
5057
5058
if (IS_ERR(v))
5059
return;
5060
5061
mutex_lock(&kvm->arch.config_lock);
5062
5063
kvm->arch.idreg_debugfs_iter = ~0;
5064
5065
mutex_unlock(&kvm->arch.config_lock);
5066
}
5067
5068
static int idregs_debug_show(struct seq_file *s, void *v)
5069
{
5070
const struct sys_reg_desc *desc;
5071
struct kvm *kvm = s->private;
5072
5073
desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter);
5074
5075
if (!desc->name)
5076
return 0;
5077
5078
seq_printf(s, "%20s:\t%016llx\n",
5079
desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
5080
5081
return 0;
5082
}
5083
5084
static const struct seq_operations idregs_debug_sops = {
5085
.start = idregs_debug_start,
5086
.next = idregs_debug_next,
5087
.stop = idregs_debug_stop,
5088
.show = idregs_debug_show,
5089
};
5090
5091
DEFINE_SEQ_ATTRIBUTE(idregs_debug);
5092
5093
void kvm_sys_regs_create_debugfs(struct kvm *kvm)
5094
{
5095
kvm->arch.idreg_debugfs_iter = ~0;
5096
5097
debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
5098
&idregs_debug_fops);
5099
}
5100
5101
static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
5102
{
5103
u32 id = reg_to_encoding(reg);
5104
struct kvm *kvm = vcpu->kvm;
5105
5106
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
5107
return;
5108
5109
kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
5110
}
5111
5112
static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
5113
const struct sys_reg_desc *reg)
5114
{
5115
if (kvm_vcpu_initialized(vcpu))
5116
return;
5117
5118
reg->reset(vcpu, reg);
5119
}
5120
5121
/**
5122
* kvm_reset_sys_regs - sets system registers to reset value
5123
* @vcpu: The VCPU pointer
5124
*
5125
* This function finds the right table above and sets the registers on the
5126
* virtual CPU struct to their architecturally defined reset values.
5127
*/
5128
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
5129
{
5130
struct kvm *kvm = vcpu->kvm;
5131
unsigned long i;
5132
5133
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5134
const struct sys_reg_desc *r = &sys_reg_descs[i];
5135
5136
if (!r->reset)
5137
continue;
5138
5139
if (is_vm_ftr_id_reg(reg_to_encoding(r)))
5140
reset_vm_ftr_id_reg(vcpu, r);
5141
else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
5142
reset_vcpu_ftr_id_reg(vcpu, r);
5143
else
5144
r->reset(vcpu, r);
5145
5146
if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
5147
__vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
5148
}
5149
5150
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
5151
5152
if (kvm_vcpu_has_pmu(vcpu))
5153
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
5154
}
5155
5156
/**
5157
* kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
5158
* trap on a guest execution
5159
* @vcpu: The VCPU pointer
5160
*/
5161
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
5162
{
5163
const struct sys_reg_desc *desc = NULL;
5164
struct sys_reg_params params;
5165
unsigned long esr = kvm_vcpu_get_esr(vcpu);
5166
int Rt = kvm_vcpu_sys_get_rt(vcpu);
5167
int sr_idx;
5168
5169
trace_kvm_handle_sys_reg(esr);
5170
5171
if (triage_sysreg_trap(vcpu, &sr_idx))
5172
return 1;
5173
5174
params = esr_sys64_to_params(esr);
5175
params.regval = vcpu_get_reg(vcpu, Rt);
5176
5177
/* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
5178
if (params.Op0 == 2 || params.Op0 == 3)
5179
desc = &sys_reg_descs[sr_idx];
5180
else
5181
desc = &sys_insn_descs[sr_idx];
5182
5183
perform_access(vcpu, &params, desc);
5184
5185
/* Read from system register? */
5186
if (!params.is_write &&
5187
(params.Op0 == 2 || params.Op0 == 3))
5188
vcpu_set_reg(vcpu, Rt, params.regval);
5189
5190
return 1;
5191
}
5192
5193
/******************************************************************************
5194
* Userspace API
5195
*****************************************************************************/
5196
5197
static bool index_to_params(u64 id, struct sys_reg_params *params)
5198
{
5199
switch (id & KVM_REG_SIZE_MASK) {
5200
case KVM_REG_SIZE_U64:
5201
/* Any unused index bits means it's not valid. */
5202
if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
5203
| KVM_REG_ARM_COPROC_MASK
5204
| KVM_REG_ARM64_SYSREG_OP0_MASK
5205
| KVM_REG_ARM64_SYSREG_OP1_MASK
5206
| KVM_REG_ARM64_SYSREG_CRN_MASK
5207
| KVM_REG_ARM64_SYSREG_CRM_MASK
5208
| KVM_REG_ARM64_SYSREG_OP2_MASK))
5209
return false;
5210
params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
5211
>> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
5212
params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
5213
>> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
5214
params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
5215
>> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
5216
params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
5217
>> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
5218
params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
5219
>> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
5220
return true;
5221
default:
5222
return false;
5223
}
5224
}
5225
5226
const struct sys_reg_desc *get_reg_by_id(u64 id,
5227
const struct sys_reg_desc table[],
5228
unsigned int num)
5229
{
5230
struct sys_reg_params params;
5231
5232
if (!index_to_params(id, &params))
5233
return NULL;
5234
5235
return find_reg(&params, table, num);
5236
}
5237
5238
/* Decode an index value, and find the sys_reg_desc entry. */
5239
static const struct sys_reg_desc *
5240
id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
5241
const struct sys_reg_desc table[], unsigned int num)
5242
5243
{
5244
const struct sys_reg_desc *r;
5245
5246
/* We only do sys_reg for now. */
5247
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
5248
return NULL;
5249
5250
r = get_reg_by_id(id, table, num);
5251
5252
/* Not saved in the sys_reg array and not otherwise accessible? */
5253
if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
5254
r = NULL;
5255
5256
return r;
5257
}
5258
5259
static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5260
{
5261
u32 val;
5262
u32 __user *uval = uaddr;
5263
5264
/* Fail if we have unknown bits set. */
5265
if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5266
| ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5267
return -ENOENT;
5268
5269
switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5270
case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5271
if (KVM_REG_SIZE(id) != 4)
5272
return -ENOENT;
5273
val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5274
>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5275
if (val >= CSSELR_MAX)
5276
return -ENOENT;
5277
5278
return put_user(get_ccsidr(vcpu, val), uval);
5279
default:
5280
return -ENOENT;
5281
}
5282
}
5283
5284
static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5285
{
5286
u32 val, newval;
5287
u32 __user *uval = uaddr;
5288
5289
/* Fail if we have unknown bits set. */
5290
if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5291
| ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5292
return -ENOENT;
5293
5294
switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5295
case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5296
if (KVM_REG_SIZE(id) != 4)
5297
return -ENOENT;
5298
val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5299
>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5300
if (val >= CSSELR_MAX)
5301
return -ENOENT;
5302
5303
if (get_user(newval, uval))
5304
return -EFAULT;
5305
5306
return set_ccsidr(vcpu, val, newval);
5307
default:
5308
return -ENOENT;
5309
}
5310
}
5311
5312
static u64 kvm_one_reg_to_id(const struct kvm_one_reg *reg)
5313
{
5314
switch(reg->id) {
5315
case KVM_REG_ARM_TIMER_CVAL:
5316
return TO_ARM64_SYS_REG(CNTV_CVAL_EL0);
5317
case KVM_REG_ARM_TIMER_CNT:
5318
return TO_ARM64_SYS_REG(CNTVCT_EL0);
5319
default:
5320
return reg->id;
5321
}
5322
}
5323
5324
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5325
const struct sys_reg_desc table[], unsigned int num)
5326
{
5327
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5328
const struct sys_reg_desc *r;
5329
u64 id = kvm_one_reg_to_id(reg);
5330
u64 val;
5331
int ret;
5332
5333
r = id_to_sys_reg_desc(vcpu, id, table, num);
5334
if (!r || sysreg_hidden(vcpu, r))
5335
return -ENOENT;
5336
5337
if (r->get_user) {
5338
ret = (r->get_user)(vcpu, r, &val);
5339
} else {
5340
val = __vcpu_sys_reg(vcpu, r->reg);
5341
ret = 0;
5342
}
5343
5344
if (!ret)
5345
ret = put_user(val, uaddr);
5346
5347
return ret;
5348
}
5349
5350
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5351
{
5352
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5353
5354
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5355
return demux_c15_get(vcpu, reg->id, uaddr);
5356
5357
return kvm_sys_reg_get_user(vcpu, reg,
5358
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5359
}
5360
5361
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5362
const struct sys_reg_desc table[], unsigned int num)
5363
{
5364
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5365
const struct sys_reg_desc *r;
5366
u64 id = kvm_one_reg_to_id(reg);
5367
u64 val;
5368
int ret;
5369
5370
if (get_user(val, uaddr))
5371
return -EFAULT;
5372
5373
r = id_to_sys_reg_desc(vcpu, id, table, num);
5374
if (!r || sysreg_hidden(vcpu, r))
5375
return -ENOENT;
5376
5377
if (sysreg_user_write_ignore(vcpu, r))
5378
return 0;
5379
5380
if (r->set_user) {
5381
ret = (r->set_user)(vcpu, r, val);
5382
} else {
5383
__vcpu_assign_sys_reg(vcpu, r->reg, val);
5384
ret = 0;
5385
}
5386
5387
return ret;
5388
}
5389
5390
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5391
{
5392
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5393
5394
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5395
return demux_c15_set(vcpu, reg->id, uaddr);
5396
5397
return kvm_sys_reg_set_user(vcpu, reg,
5398
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5399
}
5400
5401
static unsigned int num_demux_regs(void)
5402
{
5403
return CSSELR_MAX;
5404
}
5405
5406
static int write_demux_regids(u64 __user *uindices)
5407
{
5408
u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
5409
unsigned int i;
5410
5411
val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
5412
for (i = 0; i < CSSELR_MAX; i++) {
5413
if (put_user(val | i, uindices))
5414
return -EFAULT;
5415
uindices++;
5416
}
5417
return 0;
5418
}
5419
5420
static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
5421
{
5422
return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
5423
KVM_REG_ARM64_SYSREG |
5424
(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
5425
(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
5426
(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
5427
(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
5428
(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
5429
}
5430
5431
static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
5432
{
5433
u64 idx;
5434
5435
if (!*uind)
5436
return true;
5437
5438
switch (reg_to_encoding(reg)) {
5439
case SYS_CNTV_CVAL_EL0:
5440
idx = KVM_REG_ARM_TIMER_CVAL;
5441
break;
5442
case SYS_CNTVCT_EL0:
5443
idx = KVM_REG_ARM_TIMER_CNT;
5444
break;
5445
default:
5446
idx = sys_reg_to_index(reg);
5447
}
5448
5449
if (put_user(idx, *uind))
5450
return false;
5451
5452
(*uind)++;
5453
return true;
5454
}
5455
5456
static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
5457
const struct sys_reg_desc *rd,
5458
u64 __user **uind,
5459
unsigned int *total)
5460
{
5461
/*
5462
* Ignore registers we trap but don't save,
5463
* and for which no custom user accessor is provided.
5464
*/
5465
if (!(rd->reg || rd->get_user))
5466
return 0;
5467
5468
if (sysreg_hidden(vcpu, rd))
5469
return 0;
5470
5471
if (!copy_reg_to_user(rd, uind))
5472
return -EFAULT;
5473
5474
(*total)++;
5475
return 0;
5476
}
5477
5478
/* Assumed ordered tables, see kvm_sys_reg_table_init. */
5479
static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
5480
{
5481
const struct sys_reg_desc *i2, *end2;
5482
unsigned int total = 0;
5483
int err;
5484
5485
i2 = sys_reg_descs;
5486
end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
5487
5488
while (i2 != end2) {
5489
err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
5490
if (err)
5491
return err;
5492
}
5493
return total;
5494
}
5495
5496
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
5497
{
5498
return num_demux_regs()
5499
+ walk_sys_regs(vcpu, (u64 __user *)NULL);
5500
}
5501
5502
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
5503
{
5504
int err;
5505
5506
err = walk_sys_regs(vcpu, uindices);
5507
if (err < 0)
5508
return err;
5509
uindices += err;
5510
5511
return write_demux_regids(uindices);
5512
}
5513
5514
#define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
5515
KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
5516
sys_reg_Op1(r), \
5517
sys_reg_CRn(r), \
5518
sys_reg_CRm(r), \
5519
sys_reg_Op2(r))
5520
5521
int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
5522
{
5523
const void *zero_page = page_to_virt(ZERO_PAGE(0));
5524
u64 __user *masks = (u64 __user *)range->addr;
5525
5526
/* Only feature id range is supported, reserved[13] must be zero. */
5527
if (range->range ||
5528
memcmp(range->reserved, zero_page, sizeof(range->reserved)))
5529
return -EINVAL;
5530
5531
/* Wipe the whole thing first */
5532
if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
5533
return -EFAULT;
5534
5535
for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5536
const struct sys_reg_desc *reg = &sys_reg_descs[i];
5537
u32 encoding = reg_to_encoding(reg);
5538
u64 val;
5539
5540
if (!is_feature_id_reg(encoding) || !reg->set_user)
5541
continue;
5542
5543
if (!reg->val ||
5544
(is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
5545
continue;
5546
}
5547
val = reg->val;
5548
5549
if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
5550
return -EFAULT;
5551
}
5552
5553
return 0;
5554
}
5555
5556
static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
5557
{
5558
struct kvm *kvm = vcpu->kvm;
5559
5560
if (has_vhe() || has_hvhe())
5561
vcpu->arch.hcr_el2 |= HCR_E2H;
5562
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
5563
/* route synchronous external abort exceptions to EL2 */
5564
vcpu->arch.hcr_el2 |= HCR_TEA;
5565
/* trap error record accesses */
5566
vcpu->arch.hcr_el2 |= HCR_TERR;
5567
}
5568
5569
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
5570
vcpu->arch.hcr_el2 |= HCR_FWB;
5571
5572
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
5573
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
5574
kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
5575
vcpu->arch.hcr_el2 |= HCR_TID4;
5576
else
5577
vcpu->arch.hcr_el2 |= HCR_TID2;
5578
5579
if (vcpu_el1_is_32bit(vcpu))
5580
vcpu->arch.hcr_el2 &= ~HCR_RW;
5581
5582
if (kvm_has_mte(vcpu->kvm))
5583
vcpu->arch.hcr_el2 |= HCR_ATA;
5584
5585
/*
5586
* In the absence of FGT, we cannot independently trap TLBI
5587
* Range instructions. This isn't great, but trapping all
5588
* TLBIs would be far worse. Live with it...
5589
*/
5590
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
5591
vcpu->arch.hcr_el2 |= HCR_TTLBOS;
5592
}
5593
5594
void kvm_calculate_traps(struct kvm_vcpu *vcpu)
5595
{
5596
struct kvm *kvm = vcpu->kvm;
5597
5598
mutex_lock(&kvm->arch.config_lock);
5599
vcpu_set_hcr(vcpu);
5600
vcpu_set_ich_hcr(vcpu);
5601
vcpu_set_hcrx(vcpu);
5602
5603
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
5604
goto out;
5605
5606
compute_fgu(kvm, HFGRTR_GROUP);
5607
compute_fgu(kvm, HFGITR_GROUP);
5608
compute_fgu(kvm, HDFGRTR_GROUP);
5609
compute_fgu(kvm, HAFGRTR_GROUP);
5610
compute_fgu(kvm, HFGRTR2_GROUP);
5611
compute_fgu(kvm, HFGITR2_GROUP);
5612
compute_fgu(kvm, HDFGRTR2_GROUP);
5613
5614
set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
5615
out:
5616
mutex_unlock(&kvm->arch.config_lock);
5617
}
5618
5619
/*
5620
* Perform last adjustments to the ID registers that are implied by the
5621
* configuration outside of the ID regs themselves, as well as any
5622
* initialisation that directly depend on these ID registers (such as
5623
* RES0/RES1 behaviours). This is not the place to configure traps though.
5624
*
5625
* Because this can be called once per CPU, changes must be idempotent.
5626
*/
5627
int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
5628
{
5629
struct kvm *kvm = vcpu->kvm;
5630
5631
guard(mutex)(&kvm->arch.config_lock);
5632
5633
/*
5634
* This hacks into the ID registers, so only perform it when the
5635
* first vcpu runs, or the kvm_set_vm_id_reg() helper will scream.
5636
*/
5637
if (!irqchip_in_kernel(kvm) && !kvm_vm_has_ran_once(kvm)) {
5638
u64 val;
5639
5640
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
5641
kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, val);
5642
val = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
5643
kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, val);
5644
}
5645
5646
if (vcpu_has_nv(vcpu)) {
5647
int ret = kvm_init_nv_sysregs(vcpu);
5648
if (ret)
5649
return ret;
5650
}
5651
5652
return 0;
5653
}
5654
5655
int __init kvm_sys_reg_table_init(void)
5656
{
5657
const struct sys_reg_desc *gicv3_regs;
5658
bool valid = true;
5659
unsigned int i, sz;
5660
int ret = 0;
5661
5662
/* Make sure tables are unique and in order. */
5663
valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), true);
5664
valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), false);
5665
valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), false);
5666
valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), false);
5667
valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), false);
5668
valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
5669
5670
gicv3_regs = vgic_v3_get_sysreg_table(&sz);
5671
valid &= check_sysreg_table(gicv3_regs, sz, false);
5672
5673
if (!valid)
5674
return -EINVAL;
5675
5676
init_imp_id_regs();
5677
5678
ret = populate_nv_trap_config();
5679
5680
check_feature_map();
5681
5682
for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
5683
ret = populate_sysreg_config(sys_reg_descs + i, i);
5684
5685
for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
5686
ret = populate_sysreg_config(sys_insn_descs + i, i);
5687
5688
return ret;
5689
}
5690
5691