Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/sys_regs.c
26451 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2012,2013 - ARM Ltd
4
* Author: Marc Zyngier <[email protected]>
5
*
6
* Derived from arch/arm/kvm/coproc.c:
7
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
8
* Authors: Rusty Russell <[email protected]>
9
* Christoffer Dall <[email protected]>
10
*/
11
12
#include <linux/bitfield.h>
13
#include <linux/bsearch.h>
14
#include <linux/cacheinfo.h>
15
#include <linux/debugfs.h>
16
#include <linux/kvm_host.h>
17
#include <linux/mm.h>
18
#include <linux/printk.h>
19
#include <linux/uaccess.h>
20
#include <linux/irqchip/arm-gic-v3.h>
21
22
#include <asm/arm_pmuv3.h>
23
#include <asm/cacheflush.h>
24
#include <asm/cputype.h>
25
#include <asm/debug-monitors.h>
26
#include <asm/esr.h>
27
#include <asm/kvm_arm.h>
28
#include <asm/kvm_emulate.h>
29
#include <asm/kvm_hyp.h>
30
#include <asm/kvm_mmu.h>
31
#include <asm/kvm_nested.h>
32
#include <asm/perf_event.h>
33
#include <asm/sysreg.h>
34
35
#include <trace/events/kvm.h>
36
37
#include "sys_regs.h"
38
#include "vgic/vgic.h"
39
40
#include "trace.h"
41
42
/*
43
* For AArch32, we only take care of what is being trapped. Anything
44
* that has to do with init and userspace access has to go via the
45
* 64bit interface.
46
*/
47
48
static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
49
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
50
u64 val);
51
52
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
53
const struct sys_reg_desc *r)
54
{
55
kvm_inject_undefined(vcpu);
56
return false;
57
}
58
59
static bool bad_trap(struct kvm_vcpu *vcpu,
60
struct sys_reg_params *params,
61
const struct sys_reg_desc *r,
62
const char *msg)
63
{
64
WARN_ONCE(1, "Unexpected %s\n", msg);
65
print_sys_reg_instr(params);
66
return undef_access(vcpu, params, r);
67
}
68
69
static bool read_from_write_only(struct kvm_vcpu *vcpu,
70
struct sys_reg_params *params,
71
const struct sys_reg_desc *r)
72
{
73
return bad_trap(vcpu, params, r,
74
"sys_reg read to write-only register");
75
}
76
77
static bool write_to_read_only(struct kvm_vcpu *vcpu,
78
struct sys_reg_params *params,
79
const struct sys_reg_desc *r)
80
{
81
return bad_trap(vcpu, params, r,
82
"sys_reg write to read-only register");
83
}
84
85
enum sr_loc_attr {
86
SR_LOC_MEMORY = 0, /* Register definitely in memory */
87
SR_LOC_LOADED = BIT(0), /* Register on CPU, unless it cannot */
88
SR_LOC_MAPPED = BIT(1), /* Register in a different CPU register */
89
SR_LOC_XLATED = BIT(2), /* Register translated to fit another reg */
90
SR_LOC_SPECIAL = BIT(3), /* Demanding register, implies loaded */
91
};
92
93
struct sr_loc {
94
enum sr_loc_attr loc;
95
enum vcpu_sysreg map_reg;
96
u64 (*xlate)(u64);
97
};
98
99
static enum sr_loc_attr locate_direct_register(const struct kvm_vcpu *vcpu,
100
enum vcpu_sysreg reg)
101
{
102
switch (reg) {
103
case SCTLR_EL1:
104
case CPACR_EL1:
105
case TTBR0_EL1:
106
case TTBR1_EL1:
107
case TCR_EL1:
108
case TCR2_EL1:
109
case PIR_EL1:
110
case PIRE0_EL1:
111
case POR_EL1:
112
case ESR_EL1:
113
case AFSR0_EL1:
114
case AFSR1_EL1:
115
case FAR_EL1:
116
case MAIR_EL1:
117
case VBAR_EL1:
118
case CONTEXTIDR_EL1:
119
case AMAIR_EL1:
120
case CNTKCTL_EL1:
121
case ELR_EL1:
122
case SPSR_EL1:
123
case ZCR_EL1:
124
case SCTLR2_EL1:
125
/*
126
* EL1 registers which have an ELx2 mapping are loaded if
127
* we're not in hypervisor context.
128
*/
129
return is_hyp_ctxt(vcpu) ? SR_LOC_MEMORY : SR_LOC_LOADED;
130
131
case TPIDR_EL0:
132
case TPIDRRO_EL0:
133
case TPIDR_EL1:
134
case PAR_EL1:
135
case DACR32_EL2:
136
case IFSR32_EL2:
137
case DBGVCR32_EL2:
138
/* These registers are always loaded, no matter what */
139
return SR_LOC_LOADED;
140
141
default:
142
/* Non-mapped EL2 registers are by definition in memory. */
143
return SR_LOC_MEMORY;
144
}
145
}
146
147
static void locate_mapped_el2_register(const struct kvm_vcpu *vcpu,
148
enum vcpu_sysreg reg,
149
enum vcpu_sysreg map_reg,
150
u64 (*xlate)(u64),
151
struct sr_loc *loc)
152
{
153
if (!is_hyp_ctxt(vcpu)) {
154
loc->loc = SR_LOC_MEMORY;
155
return;
156
}
157
158
loc->loc = SR_LOC_LOADED | SR_LOC_MAPPED;
159
loc->map_reg = map_reg;
160
161
WARN_ON(locate_direct_register(vcpu, map_reg) != SR_LOC_MEMORY);
162
163
if (xlate != NULL && !vcpu_el2_e2h_is_set(vcpu)) {
164
loc->loc |= SR_LOC_XLATED;
165
loc->xlate = xlate;
166
}
167
}
168
169
#define MAPPED_EL2_SYSREG(r, m, t) \
170
case r: { \
171
locate_mapped_el2_register(vcpu, r, m, t, loc); \
172
break; \
173
}
174
175
static void locate_register(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg,
176
struct sr_loc *loc)
177
{
178
if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) {
179
loc->loc = SR_LOC_MEMORY;
180
return;
181
}
182
183
switch (reg) {
184
MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
185
translate_sctlr_el2_to_sctlr_el1 );
186
MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
187
translate_cptr_el2_to_cpacr_el1 );
188
MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1,
189
translate_ttbr0_el2_to_ttbr0_el1 );
190
MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL );
191
MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1,
192
translate_tcr_el2_to_tcr_el1 );
193
MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL );
194
MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL );
195
MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL );
196
MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
197
MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
198
MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
199
MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1, NULL );
200
MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1, NULL );
201
MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1, NULL );
202
MAPPED_EL2_SYSREG(POR_EL2, POR_EL1, NULL );
203
MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
204
MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
205
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
206
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
207
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
208
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1, NULL );
209
case CNTHCTL_EL2:
210
/* CNTHCTL_EL2 is super special, until we support NV2.1 */
211
loc->loc = ((is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu)) ?
212
SR_LOC_SPECIAL : SR_LOC_MEMORY);
213
break;
214
default:
215
loc->loc = locate_direct_register(vcpu, reg);
216
}
217
}
218
219
static u64 read_sr_from_cpu(enum vcpu_sysreg reg)
220
{
221
u64 val = 0x8badf00d8badf00d;
222
223
switch (reg) {
224
case SCTLR_EL1: val = read_sysreg_s(SYS_SCTLR_EL12); break;
225
case CPACR_EL1: val = read_sysreg_s(SYS_CPACR_EL12); break;
226
case TTBR0_EL1: val = read_sysreg_s(SYS_TTBR0_EL12); break;
227
case TTBR1_EL1: val = read_sysreg_s(SYS_TTBR1_EL12); break;
228
case TCR_EL1: val = read_sysreg_s(SYS_TCR_EL12); break;
229
case TCR2_EL1: val = read_sysreg_s(SYS_TCR2_EL12); break;
230
case PIR_EL1: val = read_sysreg_s(SYS_PIR_EL12); break;
231
case PIRE0_EL1: val = read_sysreg_s(SYS_PIRE0_EL12); break;
232
case POR_EL1: val = read_sysreg_s(SYS_POR_EL12); break;
233
case ESR_EL1: val = read_sysreg_s(SYS_ESR_EL12); break;
234
case AFSR0_EL1: val = read_sysreg_s(SYS_AFSR0_EL12); break;
235
case AFSR1_EL1: val = read_sysreg_s(SYS_AFSR1_EL12); break;
236
case FAR_EL1: val = read_sysreg_s(SYS_FAR_EL12); break;
237
case MAIR_EL1: val = read_sysreg_s(SYS_MAIR_EL12); break;
238
case VBAR_EL1: val = read_sysreg_s(SYS_VBAR_EL12); break;
239
case CONTEXTIDR_EL1: val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
240
case AMAIR_EL1: val = read_sysreg_s(SYS_AMAIR_EL12); break;
241
case CNTKCTL_EL1: val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
242
case ELR_EL1: val = read_sysreg_s(SYS_ELR_EL12); break;
243
case SPSR_EL1: val = read_sysreg_s(SYS_SPSR_EL12); break;
244
case ZCR_EL1: val = read_sysreg_s(SYS_ZCR_EL12); break;
245
case SCTLR2_EL1: val = read_sysreg_s(SYS_SCTLR2_EL12); break;
246
case TPIDR_EL0: val = read_sysreg_s(SYS_TPIDR_EL0); break;
247
case TPIDRRO_EL0: val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
248
case TPIDR_EL1: val = read_sysreg_s(SYS_TPIDR_EL1); break;
249
case PAR_EL1: val = read_sysreg_par(); break;
250
case DACR32_EL2: val = read_sysreg_s(SYS_DACR32_EL2); break;
251
case IFSR32_EL2: val = read_sysreg_s(SYS_IFSR32_EL2); break;
252
case DBGVCR32_EL2: val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
253
default: WARN_ON_ONCE(1);
254
}
255
256
return val;
257
}
258
259
static void write_sr_to_cpu(enum vcpu_sysreg reg, u64 val)
260
{
261
switch (reg) {
262
case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
263
case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
264
case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
265
case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
266
case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
267
case TCR2_EL1: write_sysreg_s(val, SYS_TCR2_EL12); break;
268
case PIR_EL1: write_sysreg_s(val, SYS_PIR_EL12); break;
269
case PIRE0_EL1: write_sysreg_s(val, SYS_PIRE0_EL12); break;
270
case POR_EL1: write_sysreg_s(val, SYS_POR_EL12); break;
271
case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
272
case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
273
case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
274
case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
275
case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
276
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
277
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
278
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
279
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
280
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
281
case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
282
case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
283
case SCTLR2_EL1: write_sysreg_s(val, SYS_SCTLR2_EL12); break;
284
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
285
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
286
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
287
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
288
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
289
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
290
case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
291
default: WARN_ON_ONCE(1);
292
}
293
}
294
295
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg reg)
296
{
297
struct sr_loc loc = {};
298
299
locate_register(vcpu, reg, &loc);
300
301
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
302
303
if (loc.loc & SR_LOC_SPECIAL) {
304
u64 val;
305
306
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
307
308
/*
309
* CNTHCTL_EL2 requires some special treatment to account
310
* for the bits that can be set via CNTKCTL_EL1 when E2H==1.
311
*/
312
switch (reg) {
313
case CNTHCTL_EL2:
314
val = read_sysreg_el1(SYS_CNTKCTL);
315
val &= CNTKCTL_VALID_BITS;
316
val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
317
return val;
318
default:
319
WARN_ON_ONCE(1);
320
}
321
}
322
323
if (loc.loc & SR_LOC_LOADED) {
324
enum vcpu_sysreg map_reg = reg;
325
326
if (loc.loc & SR_LOC_MAPPED)
327
map_reg = loc.map_reg;
328
329
if (!(loc.loc & SR_LOC_XLATED)) {
330
u64 val = read_sr_from_cpu(map_reg);
331
332
if (reg >= __SANITISED_REG_START__)
333
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
334
335
return val;
336
}
337
}
338
339
return __vcpu_sys_reg(vcpu, reg);
340
}
341
342
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, enum vcpu_sysreg reg)
343
{
344
struct sr_loc loc = {};
345
346
locate_register(vcpu, reg, &loc);
347
348
WARN_ON_ONCE(!has_vhe() && loc.loc != SR_LOC_MEMORY);
349
350
if (loc.loc & SR_LOC_SPECIAL) {
351
352
WARN_ON_ONCE(loc.loc & ~SR_LOC_SPECIAL);
353
354
switch (reg) {
355
case CNTHCTL_EL2:
356
/*
357
* If E2H=1, some of the bits are backed by
358
* CNTKCTL_EL1, while the rest is kept in memory.
359
* Yes, this is fun stuff.
360
*/
361
write_sysreg_el1(val, SYS_CNTKCTL);
362
break;
363
default:
364
WARN_ON_ONCE(1);
365
}
366
}
367
368
if (loc.loc & SR_LOC_LOADED) {
369
enum vcpu_sysreg map_reg = reg;
370
u64 xlated_val;
371
372
if (reg >= __SANITISED_REG_START__)
373
val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
374
375
if (loc.loc & SR_LOC_MAPPED)
376
map_reg = loc.map_reg;
377
378
if (loc.loc & SR_LOC_XLATED)
379
xlated_val = loc.xlate(val);
380
else
381
xlated_val = val;
382
383
write_sr_to_cpu(map_reg, xlated_val);
384
385
/*
386
* Fall through to write the backing store anyway, which
387
* allows translated registers to be directly read without a
388
* reverse translation.
389
*/
390
}
391
392
__vcpu_assign_sys_reg(vcpu, reg, val);
393
}
394
395
/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
396
#define CSSELR_MAX 14
397
398
/*
399
* Returns the minimum line size for the selected cache, expressed as
400
* Log2(bytes).
401
*/
402
static u8 get_min_cache_line_size(bool icache)
403
{
404
u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
405
u8 field;
406
407
if (icache)
408
field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
409
else
410
field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
411
412
/*
413
* Cache line size is represented as Log2(words) in CTR_EL0.
414
* Log2(bytes) can be derived with the following:
415
*
416
* Log2(words) + 2 = Log2(bytes / 4) + 2
417
* = Log2(bytes) - 2 + 2
418
* = Log2(bytes)
419
*/
420
return field + 2;
421
}
422
423
/* Which cache CCSIDR represents depends on CSSELR value. */
424
static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
425
{
426
u8 line_size;
427
428
if (vcpu->arch.ccsidr)
429
return vcpu->arch.ccsidr[csselr];
430
431
line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
432
433
/*
434
* Fabricate a CCSIDR value as the overriding value does not exist.
435
* The real CCSIDR value will not be used as it can vary by the
436
* physical CPU which the vcpu currently resides in.
437
*
438
* The line size is determined with get_min_cache_line_size(), which
439
* should be valid for all CPUs even if they have different cache
440
* configuration.
441
*
442
* The associativity bits are cleared, meaning the geometry of all data
443
* and unified caches (which are guaranteed to be PIPT and thus
444
* non-aliasing) are 1 set and 1 way.
445
* Guests should not be doing cache operations by set/way at all, and
446
* for this reason, we trap them and attempt to infer the intent, so
447
* that we can flush the entire guest's address space at the appropriate
448
* time. The exposed geometry minimizes the number of the traps.
449
* [If guests should attempt to infer aliasing properties from the
450
* geometry (which is not permitted by the architecture), they would
451
* only do so for virtually indexed caches.]
452
*
453
* We don't check if the cache level exists as it is allowed to return
454
* an UNKNOWN value if not.
455
*/
456
return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
457
}
458
459
static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
460
{
461
u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
462
u32 *ccsidr = vcpu->arch.ccsidr;
463
u32 i;
464
465
if ((val & CCSIDR_EL1_RES0) ||
466
line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
467
return -EINVAL;
468
469
if (!ccsidr) {
470
if (val == get_ccsidr(vcpu, csselr))
471
return 0;
472
473
ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
474
if (!ccsidr)
475
return -ENOMEM;
476
477
for (i = 0; i < CSSELR_MAX; i++)
478
ccsidr[i] = get_ccsidr(vcpu, i);
479
480
vcpu->arch.ccsidr = ccsidr;
481
}
482
483
ccsidr[csselr] = val;
484
485
return 0;
486
}
487
488
static bool access_rw(struct kvm_vcpu *vcpu,
489
struct sys_reg_params *p,
490
const struct sys_reg_desc *r)
491
{
492
if (p->is_write)
493
vcpu_write_sys_reg(vcpu, p->regval, r->reg);
494
else
495
p->regval = vcpu_read_sys_reg(vcpu, r->reg);
496
497
return true;
498
}
499
500
/*
501
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
502
*/
503
static bool access_dcsw(struct kvm_vcpu *vcpu,
504
struct sys_reg_params *p,
505
const struct sys_reg_desc *r)
506
{
507
if (!p->is_write)
508
return read_from_write_only(vcpu, p, r);
509
510
/*
511
* Only track S/W ops if we don't have FWB. It still indicates
512
* that the guest is a bit broken (S/W operations should only
513
* be done by firmware, knowing that there is only a single
514
* CPU left in the system, and certainly not from non-secure
515
* software).
516
*/
517
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
518
kvm_set_way_flush(vcpu);
519
520
return true;
521
}
522
523
static bool access_dcgsw(struct kvm_vcpu *vcpu,
524
struct sys_reg_params *p,
525
const struct sys_reg_desc *r)
526
{
527
if (!kvm_has_mte(vcpu->kvm))
528
return undef_access(vcpu, p, r);
529
530
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
531
return access_dcsw(vcpu, p, r);
532
}
533
534
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
535
{
536
switch (r->aarch32_map) {
537
case AA32_LO:
538
*mask = GENMASK_ULL(31, 0);
539
*shift = 0;
540
break;
541
case AA32_HI:
542
*mask = GENMASK_ULL(63, 32);
543
*shift = 32;
544
break;
545
default:
546
*mask = GENMASK_ULL(63, 0);
547
*shift = 0;
548
break;
549
}
550
}
551
552
/*
553
* Generic accessor for VM registers. Only called as long as HCR_TVM
554
* is set. If the guest enables the MMU, we stop trapping the VM
555
* sys_regs and leave it in complete control of the caches.
556
*/
557
static bool access_vm_reg(struct kvm_vcpu *vcpu,
558
struct sys_reg_params *p,
559
const struct sys_reg_desc *r)
560
{
561
bool was_enabled = vcpu_has_cache_enabled(vcpu);
562
u64 val, mask, shift;
563
564
BUG_ON(!p->is_write);
565
566
get_access_mask(r, &mask, &shift);
567
568
if (~mask) {
569
val = vcpu_read_sys_reg(vcpu, r->reg);
570
val &= ~mask;
571
} else {
572
val = 0;
573
}
574
575
val |= (p->regval & (mask >> shift)) << shift;
576
vcpu_write_sys_reg(vcpu, val, r->reg);
577
578
kvm_toggle_cache(vcpu, was_enabled);
579
return true;
580
}
581
582
static bool access_actlr(struct kvm_vcpu *vcpu,
583
struct sys_reg_params *p,
584
const struct sys_reg_desc *r)
585
{
586
u64 mask, shift;
587
588
if (p->is_write)
589
return ignore_write(vcpu, p);
590
591
get_access_mask(r, &mask, &shift);
592
p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
593
594
return true;
595
}
596
597
/*
598
* Trap handler for the GICv3 SGI generation system register.
599
* Forward the request to the VGIC emulation.
600
* The cp15_64 code makes sure this automatically works
601
* for both AArch64 and AArch32 accesses.
602
*/
603
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
604
struct sys_reg_params *p,
605
const struct sys_reg_desc *r)
606
{
607
bool g1;
608
609
if (!kvm_has_gicv3(vcpu->kvm))
610
return undef_access(vcpu, p, r);
611
612
if (!p->is_write)
613
return read_from_write_only(vcpu, p, r);
614
615
/*
616
* In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
617
* Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
618
* depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
619
* equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
620
* group.
621
*/
622
if (p->Op0 == 0) { /* AArch32 */
623
switch (p->Op1) {
624
default: /* Keep GCC quiet */
625
case 0: /* ICC_SGI1R */
626
g1 = true;
627
break;
628
case 1: /* ICC_ASGI1R */
629
case 2: /* ICC_SGI0R */
630
g1 = false;
631
break;
632
}
633
} else { /* AArch64 */
634
switch (p->Op2) {
635
default: /* Keep GCC quiet */
636
case 5: /* ICC_SGI1R_EL1 */
637
g1 = true;
638
break;
639
case 6: /* ICC_ASGI1R_EL1 */
640
case 7: /* ICC_SGI0R_EL1 */
641
g1 = false;
642
break;
643
}
644
}
645
646
vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
647
648
return true;
649
}
650
651
static bool access_gic_sre(struct kvm_vcpu *vcpu,
652
struct sys_reg_params *p,
653
const struct sys_reg_desc *r)
654
{
655
if (!kvm_has_gicv3(vcpu->kvm))
656
return undef_access(vcpu, p, r);
657
658
if (p->is_write)
659
return ignore_write(vcpu, p);
660
661
if (p->Op1 == 4) { /* ICC_SRE_EL2 */
662
p->regval = KVM_ICC_SRE_EL2;
663
} else { /* ICC_SRE_EL1 */
664
p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
665
}
666
667
return true;
668
}
669
670
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
671
struct sys_reg_params *p,
672
const struct sys_reg_desc *r)
673
{
674
if (p->is_write)
675
return ignore_write(vcpu, p);
676
else
677
return read_zero(vcpu, p);
678
}
679
680
/*
681
* ARMv8.1 mandates at least a trivial LORegion implementation, where all the
682
* RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
683
* system, these registers should UNDEF. LORID_EL1 being a RO register, we
684
* treat it separately.
685
*/
686
static bool trap_loregion(struct kvm_vcpu *vcpu,
687
struct sys_reg_params *p,
688
const struct sys_reg_desc *r)
689
{
690
u32 sr = reg_to_encoding(r);
691
692
if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP))
693
return undef_access(vcpu, p, r);
694
695
if (p->is_write && sr == SYS_LORID_EL1)
696
return write_to_read_only(vcpu, p, r);
697
698
return trap_raz_wi(vcpu, p, r);
699
}
700
701
static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
702
struct sys_reg_params *p,
703
const struct sys_reg_desc *r)
704
{
705
if (!p->is_write)
706
return read_from_write_only(vcpu, p, r);
707
708
kvm_debug_handle_oslar(vcpu, p->regval);
709
return true;
710
}
711
712
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
713
struct sys_reg_params *p,
714
const struct sys_reg_desc *r)
715
{
716
if (p->is_write)
717
return write_to_read_only(vcpu, p, r);
718
719
p->regval = __vcpu_sys_reg(vcpu, r->reg);
720
return true;
721
}
722
723
static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
724
u64 val)
725
{
726
/*
727
* The only modifiable bit is the OSLK bit. Refuse the write if
728
* userspace attempts to change any other bit in the register.
729
*/
730
if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
731
return -EINVAL;
732
733
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
734
return 0;
735
}
736
737
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
738
struct sys_reg_params *p,
739
const struct sys_reg_desc *r)
740
{
741
if (p->is_write) {
742
return ignore_write(vcpu, p);
743
} else {
744
p->regval = read_sysreg(dbgauthstatus_el1);
745
return true;
746
}
747
}
748
749
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
750
struct sys_reg_params *p,
751
const struct sys_reg_desc *r)
752
{
753
access_rw(vcpu, p, r);
754
755
kvm_debug_set_guest_ownership(vcpu);
756
return true;
757
}
758
759
/*
760
* reg_to_dbg/dbg_to_reg
761
*
762
* A 32 bit write to a debug register leave top bits alone
763
* A 32 bit read from a debug register only returns the bottom bits
764
*/
765
static void reg_to_dbg(struct kvm_vcpu *vcpu,
766
struct sys_reg_params *p,
767
const struct sys_reg_desc *rd,
768
u64 *dbg_reg)
769
{
770
u64 mask, shift, val;
771
772
get_access_mask(rd, &mask, &shift);
773
774
val = *dbg_reg;
775
val &= ~mask;
776
val |= (p->regval & (mask >> shift)) << shift;
777
*dbg_reg = val;
778
}
779
780
static void dbg_to_reg(struct kvm_vcpu *vcpu,
781
struct sys_reg_params *p,
782
const struct sys_reg_desc *rd,
783
u64 *dbg_reg)
784
{
785
u64 mask, shift;
786
787
get_access_mask(rd, &mask, &shift);
788
p->regval = (*dbg_reg & mask) >> shift;
789
}
790
791
static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
792
{
793
struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state;
794
795
switch (rd->Op2) {
796
case 0b100:
797
return &dbg->dbg_bvr[rd->CRm];
798
case 0b101:
799
return &dbg->dbg_bcr[rd->CRm];
800
case 0b110:
801
return &dbg->dbg_wvr[rd->CRm];
802
case 0b111:
803
return &dbg->dbg_wcr[rd->CRm];
804
default:
805
KVM_BUG_ON(1, vcpu->kvm);
806
return NULL;
807
}
808
}
809
810
static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
811
const struct sys_reg_desc *rd)
812
{
813
u64 *reg = demux_wb_reg(vcpu, rd);
814
815
if (!reg)
816
return false;
817
818
if (p->is_write)
819
reg_to_dbg(vcpu, p, rd, reg);
820
else
821
dbg_to_reg(vcpu, p, rd, reg);
822
823
kvm_debug_set_guest_ownership(vcpu);
824
return true;
825
}
826
827
static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
828
u64 val)
829
{
830
u64 *reg = demux_wb_reg(vcpu, rd);
831
832
if (!reg)
833
return -EINVAL;
834
835
*reg = val;
836
return 0;
837
}
838
839
static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
840
u64 *val)
841
{
842
u64 *reg = demux_wb_reg(vcpu, rd);
843
844
if (!reg)
845
return -EINVAL;
846
847
*val = *reg;
848
return 0;
849
}
850
851
static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
852
{
853
u64 *reg = demux_wb_reg(vcpu, rd);
854
855
/*
856
* Bail early if we couldn't find storage for the register, the
857
* KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever
858
* being run.
859
*/
860
if (!reg)
861
return 0;
862
863
*reg = rd->val;
864
return rd->val;
865
}
866
867
static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
868
{
869
u64 amair = read_sysreg(amair_el1);
870
vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
871
return amair;
872
}
873
874
static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
875
{
876
u64 actlr = read_sysreg(actlr_el1);
877
vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
878
return actlr;
879
}
880
881
static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
882
{
883
u64 mpidr;
884
885
/*
886
* Map the vcpu_id into the first three affinity level fields of
887
* the MPIDR. We limit the number of VCPUs in level 0 due to a
888
* limitation to 16 CPUs in that level in the ICC_SGIxR registers
889
* of the GICv3 to be able to address each CPU directly when
890
* sending IPIs.
891
*/
892
mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
893
mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
894
mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
895
mpidr |= (1ULL << 31);
896
vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
897
898
return mpidr;
899
}
900
901
static unsigned int hidden_visibility(const struct kvm_vcpu *vcpu,
902
const struct sys_reg_desc *r)
903
{
904
return REG_HIDDEN;
905
}
906
907
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
908
const struct sys_reg_desc *r)
909
{
910
if (kvm_vcpu_has_pmu(vcpu))
911
return 0;
912
913
return REG_HIDDEN;
914
}
915
916
static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
917
{
918
u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
919
u8 n = vcpu->kvm->arch.nr_pmu_counters;
920
921
if (n)
922
mask |= GENMASK(n - 1, 0);
923
924
reset_unknown(vcpu, r);
925
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
926
927
return __vcpu_sys_reg(vcpu, r->reg);
928
}
929
930
static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
931
{
932
reset_unknown(vcpu, r);
933
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
934
935
return __vcpu_sys_reg(vcpu, r->reg);
936
}
937
938
static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
939
{
940
/* This thing will UNDEF, who cares about the reset value? */
941
if (!kvm_vcpu_has_pmu(vcpu))
942
return 0;
943
944
reset_unknown(vcpu, r);
945
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
946
947
return __vcpu_sys_reg(vcpu, r->reg);
948
}
949
950
static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
951
{
952
reset_unknown(vcpu, r);
953
__vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
954
955
return __vcpu_sys_reg(vcpu, r->reg);
956
}
957
958
static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
959
{
960
u64 pmcr = 0;
961
962
if (!kvm_supports_32bit_el0())
963
pmcr |= ARMV8_PMU_PMCR_LC;
964
965
/*
966
* The value of PMCR.N field is included when the
967
* vCPU register is read via kvm_vcpu_read_pmcr().
968
*/
969
__vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
970
971
return __vcpu_sys_reg(vcpu, r->reg);
972
}
973
974
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
975
{
976
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
977
bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
978
979
if (!enabled)
980
kvm_inject_undefined(vcpu);
981
982
return !enabled;
983
}
984
985
static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
986
{
987
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
988
}
989
990
static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
991
{
992
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
993
}
994
995
static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
996
{
997
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
998
}
999
1000
static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
1001
{
1002
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
1003
}
1004
1005
static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1006
const struct sys_reg_desc *r)
1007
{
1008
u64 val;
1009
1010
if (pmu_access_el0_disabled(vcpu))
1011
return false;
1012
1013
if (p->is_write) {
1014
/*
1015
* Only update writeable bits of PMCR (continuing into
1016
* kvm_pmu_handle_pmcr() as well)
1017
*/
1018
val = kvm_vcpu_read_pmcr(vcpu);
1019
val &= ~ARMV8_PMU_PMCR_MASK;
1020
val |= p->regval & ARMV8_PMU_PMCR_MASK;
1021
if (!kvm_supports_32bit_el0())
1022
val |= ARMV8_PMU_PMCR_LC;
1023
kvm_pmu_handle_pmcr(vcpu, val);
1024
} else {
1025
/* PMCR.P & PMCR.C are RAZ */
1026
val = kvm_vcpu_read_pmcr(vcpu)
1027
& ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
1028
p->regval = val;
1029
}
1030
1031
return true;
1032
}
1033
1034
static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1035
const struct sys_reg_desc *r)
1036
{
1037
if (pmu_access_event_counter_el0_disabled(vcpu))
1038
return false;
1039
1040
if (p->is_write)
1041
__vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
1042
else
1043
/* return PMSELR.SEL field */
1044
p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
1045
& PMSELR_EL0_SEL_MASK;
1046
1047
return true;
1048
}
1049
1050
static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1051
const struct sys_reg_desc *r)
1052
{
1053
u64 pmceid, mask, shift;
1054
1055
BUG_ON(p->is_write);
1056
1057
if (pmu_access_el0_disabled(vcpu))
1058
return false;
1059
1060
get_access_mask(r, &mask, &shift);
1061
1062
pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
1063
pmceid &= mask;
1064
pmceid >>= shift;
1065
1066
p->regval = pmceid;
1067
1068
return true;
1069
}
1070
1071
static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
1072
{
1073
u64 pmcr, val;
1074
1075
pmcr = kvm_vcpu_read_pmcr(vcpu);
1076
val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
1077
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
1078
kvm_inject_undefined(vcpu);
1079
return false;
1080
}
1081
1082
return true;
1083
}
1084
1085
static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1086
u64 *val)
1087
{
1088
u64 idx;
1089
1090
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1091
/* PMCCNTR_EL0 */
1092
idx = ARMV8_PMU_CYCLE_IDX;
1093
else
1094
/* PMEVCNTRn_EL0 */
1095
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1096
1097
*val = kvm_pmu_get_counter_value(vcpu, idx);
1098
return 0;
1099
}
1100
1101
static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1102
u64 val)
1103
{
1104
u64 idx;
1105
1106
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
1107
/* PMCCNTR_EL0 */
1108
idx = ARMV8_PMU_CYCLE_IDX;
1109
else
1110
/* PMEVCNTRn_EL0 */
1111
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1112
1113
kvm_pmu_set_counter_value_user(vcpu, idx, val);
1114
return 0;
1115
}
1116
1117
static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
1118
struct sys_reg_params *p,
1119
const struct sys_reg_desc *r)
1120
{
1121
u64 idx = ~0UL;
1122
1123
if (r->CRn == 9 && r->CRm == 13) {
1124
if (r->Op2 == 2) {
1125
/* PMXEVCNTR_EL0 */
1126
if (pmu_access_event_counter_el0_disabled(vcpu))
1127
return false;
1128
1129
idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
1130
__vcpu_sys_reg(vcpu, PMSELR_EL0));
1131
} else if (r->Op2 == 0) {
1132
/* PMCCNTR_EL0 */
1133
if (pmu_access_cycle_counter_el0_disabled(vcpu))
1134
return false;
1135
1136
idx = ARMV8_PMU_CYCLE_IDX;
1137
}
1138
} else if (r->CRn == 0 && r->CRm == 9) {
1139
/* PMCCNTR */
1140
if (pmu_access_event_counter_el0_disabled(vcpu))
1141
return false;
1142
1143
idx = ARMV8_PMU_CYCLE_IDX;
1144
} else if (r->CRn == 14 && (r->CRm & 12) == 8) {
1145
/* PMEVCNTRn_EL0 */
1146
if (pmu_access_event_counter_el0_disabled(vcpu))
1147
return false;
1148
1149
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1150
}
1151
1152
/* Catch any decoding mistake */
1153
WARN_ON(idx == ~0UL);
1154
1155
if (!pmu_counter_idx_valid(vcpu, idx))
1156
return false;
1157
1158
if (p->is_write) {
1159
if (pmu_access_el0_disabled(vcpu))
1160
return false;
1161
1162
kvm_pmu_set_counter_value(vcpu, idx, p->regval);
1163
} else {
1164
p->regval = kvm_pmu_get_counter_value(vcpu, idx);
1165
}
1166
1167
return true;
1168
}
1169
1170
static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1171
const struct sys_reg_desc *r)
1172
{
1173
u64 idx, reg;
1174
1175
if (pmu_access_el0_disabled(vcpu))
1176
return false;
1177
1178
if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
1179
/* PMXEVTYPER_EL0 */
1180
idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
1181
reg = PMEVTYPER0_EL0 + idx;
1182
} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
1183
idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1184
if (idx == ARMV8_PMU_CYCLE_IDX)
1185
reg = PMCCFILTR_EL0;
1186
else
1187
/* PMEVTYPERn_EL0 */
1188
reg = PMEVTYPER0_EL0 + idx;
1189
} else {
1190
BUG();
1191
}
1192
1193
if (!pmu_counter_idx_valid(vcpu, idx))
1194
return false;
1195
1196
if (p->is_write) {
1197
kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
1198
kvm_vcpu_pmu_restore_guest(vcpu);
1199
} else {
1200
p->regval = __vcpu_sys_reg(vcpu, reg);
1201
}
1202
1203
return true;
1204
}
1205
1206
static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
1207
{
1208
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1209
1210
__vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
1211
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1212
1213
return 0;
1214
}
1215
1216
static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1217
{
1218
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1219
1220
*val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1221
return 0;
1222
}
1223
1224
static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1225
const struct sys_reg_desc *r)
1226
{
1227
u64 val, mask;
1228
1229
if (pmu_access_el0_disabled(vcpu))
1230
return false;
1231
1232
mask = kvm_pmu_accessible_counter_mask(vcpu);
1233
if (p->is_write) {
1234
val = p->regval & mask;
1235
if (r->Op2 & 0x1)
1236
/* accessing PMCNTENSET_EL0 */
1237
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
1238
else
1239
/* accessing PMCNTENCLR_EL0 */
1240
__vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
1241
1242
kvm_pmu_reprogram_counter_mask(vcpu, val);
1243
} else {
1244
p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1245
}
1246
1247
return true;
1248
}
1249
1250
static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1251
const struct sys_reg_desc *r)
1252
{
1253
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1254
1255
if (check_pmu_access_disabled(vcpu, 0))
1256
return false;
1257
1258
if (p->is_write) {
1259
u64 val = p->regval & mask;
1260
1261
if (r->Op2 & 0x1)
1262
/* accessing PMINTENSET_EL1 */
1263
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
1264
else
1265
/* accessing PMINTENCLR_EL1 */
1266
__vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
1267
} else {
1268
p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1269
}
1270
1271
return true;
1272
}
1273
1274
static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1275
const struct sys_reg_desc *r)
1276
{
1277
u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1278
1279
if (pmu_access_el0_disabled(vcpu))
1280
return false;
1281
1282
if (p->is_write) {
1283
if (r->CRm & 0x2)
1284
/* accessing PMOVSSET_EL0 */
1285
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
1286
else
1287
/* accessing PMOVSCLR_EL0 */
1288
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
1289
} else {
1290
p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1291
}
1292
1293
return true;
1294
}
1295
1296
static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1297
const struct sys_reg_desc *r)
1298
{
1299
u64 mask;
1300
1301
if (!p->is_write)
1302
return read_from_write_only(vcpu, p, r);
1303
1304
if (pmu_write_swinc_el0_disabled(vcpu))
1305
return false;
1306
1307
mask = kvm_pmu_accessible_counter_mask(vcpu);
1308
kvm_pmu_software_increment(vcpu, p->regval & mask);
1309
return true;
1310
}
1311
1312
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1313
const struct sys_reg_desc *r)
1314
{
1315
if (p->is_write) {
1316
if (!vcpu_mode_priv(vcpu))
1317
return undef_access(vcpu, p, r);
1318
1319
__vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
1320
(p->regval & ARMV8_PMU_USERENR_MASK));
1321
} else {
1322
p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1323
& ARMV8_PMU_USERENR_MASK;
1324
}
1325
1326
return true;
1327
}
1328
1329
static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1330
u64 *val)
1331
{
1332
*val = kvm_vcpu_read_pmcr(vcpu);
1333
return 0;
1334
}
1335
1336
static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1337
u64 val)
1338
{
1339
u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
1340
struct kvm *kvm = vcpu->kvm;
1341
1342
mutex_lock(&kvm->arch.config_lock);
1343
1344
/*
1345
* The vCPU can't have more counters than the PMU hardware
1346
* implements. Ignore this error to maintain compatibility
1347
* with the existing KVM behavior.
1348
*/
1349
if (!kvm_vm_has_ran_once(kvm) &&
1350
!vcpu_has_nv(vcpu) &&
1351
new_n <= kvm_arm_pmu_get_max_counters(kvm))
1352
kvm->arch.nr_pmu_counters = new_n;
1353
1354
mutex_unlock(&kvm->arch.config_lock);
1355
1356
/*
1357
* Ignore writes to RES0 bits, read only bits that are cleared on
1358
* vCPU reset, and writable bits that KVM doesn't support yet.
1359
* (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1360
* The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1361
* But, we leave the bit as it is here, as the vCPU's PMUver might
1362
* be changed later (NOTE: the bit will be cleared on first vCPU run
1363
* if necessary).
1364
*/
1365
val &= ARMV8_PMU_PMCR_MASK;
1366
1367
/* The LC bit is RES1 when AArch32 is not supported */
1368
if (!kvm_supports_32bit_el0())
1369
val |= ARMV8_PMU_PMCR_LC;
1370
1371
__vcpu_assign_sys_reg(vcpu, r->reg, val);
1372
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1373
1374
return 0;
1375
}
1376
1377
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1378
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1379
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1380
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1381
get_dbg_wb_reg, set_dbg_wb_reg }, \
1382
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1383
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1384
get_dbg_wb_reg, set_dbg_wb_reg }, \
1385
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1386
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1387
get_dbg_wb_reg, set_dbg_wb_reg }, \
1388
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1389
trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1390
get_dbg_wb_reg, set_dbg_wb_reg }
1391
1392
#define PMU_SYS_REG(name) \
1393
SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1394
.visibility = pmu_visibility
1395
1396
/* Macro to expand the PMEVCNTRn_EL0 register */
1397
#define PMU_PMEVCNTR_EL0(n) \
1398
{ PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
1399
.reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1400
.set_user = set_pmu_evcntr, \
1401
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1402
1403
/* Macro to expand the PMEVTYPERn_EL0 register */
1404
#define PMU_PMEVTYPER_EL0(n) \
1405
{ PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
1406
.reset = reset_pmevtyper, \
1407
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1408
1409
/* Macro to expand the AMU counter and type registers*/
1410
#define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1411
#define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1412
#define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1413
#define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1414
1415
static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1416
const struct sys_reg_desc *rd)
1417
{
1418
return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1419
}
1420
1421
/*
1422
* If we land here on a PtrAuth access, that is because we didn't
1423
* fixup the access on exit by allowing the PtrAuth sysregs. The only
1424
* way this happens is when the guest does not have PtrAuth support
1425
* enabled.
1426
*/
1427
#define __PTRAUTH_KEY(k) \
1428
{ SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1429
.visibility = ptrauth_visibility}
1430
1431
#define PTRAUTH_KEY(k) \
1432
__PTRAUTH_KEY(k ## KEYLO_EL1), \
1433
__PTRAUTH_KEY(k ## KEYHI_EL1)
1434
1435
static bool access_arch_timer(struct kvm_vcpu *vcpu,
1436
struct sys_reg_params *p,
1437
const struct sys_reg_desc *r)
1438
{
1439
enum kvm_arch_timers tmr;
1440
enum kvm_arch_timer_regs treg;
1441
u64 reg = reg_to_encoding(r);
1442
1443
switch (reg) {
1444
case SYS_CNTP_TVAL_EL0:
1445
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1446
tmr = TIMER_HPTIMER;
1447
else
1448
tmr = TIMER_PTIMER;
1449
treg = TIMER_REG_TVAL;
1450
break;
1451
1452
case SYS_CNTV_TVAL_EL0:
1453
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1454
tmr = TIMER_HVTIMER;
1455
else
1456
tmr = TIMER_VTIMER;
1457
treg = TIMER_REG_TVAL;
1458
break;
1459
1460
case SYS_AARCH32_CNTP_TVAL:
1461
case SYS_CNTP_TVAL_EL02:
1462
tmr = TIMER_PTIMER;
1463
treg = TIMER_REG_TVAL;
1464
break;
1465
1466
case SYS_CNTV_TVAL_EL02:
1467
tmr = TIMER_VTIMER;
1468
treg = TIMER_REG_TVAL;
1469
break;
1470
1471
case SYS_CNTHP_TVAL_EL2:
1472
tmr = TIMER_HPTIMER;
1473
treg = TIMER_REG_TVAL;
1474
break;
1475
1476
case SYS_CNTHV_TVAL_EL2:
1477
tmr = TIMER_HVTIMER;
1478
treg = TIMER_REG_TVAL;
1479
break;
1480
1481
case SYS_CNTP_CTL_EL0:
1482
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1483
tmr = TIMER_HPTIMER;
1484
else
1485
tmr = TIMER_PTIMER;
1486
treg = TIMER_REG_CTL;
1487
break;
1488
1489
case SYS_CNTV_CTL_EL0:
1490
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1491
tmr = TIMER_HVTIMER;
1492
else
1493
tmr = TIMER_VTIMER;
1494
treg = TIMER_REG_CTL;
1495
break;
1496
1497
case SYS_AARCH32_CNTP_CTL:
1498
case SYS_CNTP_CTL_EL02:
1499
tmr = TIMER_PTIMER;
1500
treg = TIMER_REG_CTL;
1501
break;
1502
1503
case SYS_CNTV_CTL_EL02:
1504
tmr = TIMER_VTIMER;
1505
treg = TIMER_REG_CTL;
1506
break;
1507
1508
case SYS_CNTHP_CTL_EL2:
1509
tmr = TIMER_HPTIMER;
1510
treg = TIMER_REG_CTL;
1511
break;
1512
1513
case SYS_CNTHV_CTL_EL2:
1514
tmr = TIMER_HVTIMER;
1515
treg = TIMER_REG_CTL;
1516
break;
1517
1518
case SYS_CNTP_CVAL_EL0:
1519
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1520
tmr = TIMER_HPTIMER;
1521
else
1522
tmr = TIMER_PTIMER;
1523
treg = TIMER_REG_CVAL;
1524
break;
1525
1526
case SYS_CNTV_CVAL_EL0:
1527
if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1528
tmr = TIMER_HVTIMER;
1529
else
1530
tmr = TIMER_VTIMER;
1531
treg = TIMER_REG_CVAL;
1532
break;
1533
1534
case SYS_AARCH32_CNTP_CVAL:
1535
case SYS_CNTP_CVAL_EL02:
1536
tmr = TIMER_PTIMER;
1537
treg = TIMER_REG_CVAL;
1538
break;
1539
1540
case SYS_CNTV_CVAL_EL02:
1541
tmr = TIMER_VTIMER;
1542
treg = TIMER_REG_CVAL;
1543
break;
1544
1545
case SYS_CNTHP_CVAL_EL2:
1546
tmr = TIMER_HPTIMER;
1547
treg = TIMER_REG_CVAL;
1548
break;
1549
1550
case SYS_CNTHV_CVAL_EL2:
1551
tmr = TIMER_HVTIMER;
1552
treg = TIMER_REG_CVAL;
1553
break;
1554
1555
case SYS_CNTPCT_EL0:
1556
case SYS_CNTPCTSS_EL0:
1557
if (is_hyp_ctxt(vcpu))
1558
tmr = TIMER_HPTIMER;
1559
else
1560
tmr = TIMER_PTIMER;
1561
treg = TIMER_REG_CNT;
1562
break;
1563
1564
case SYS_AARCH32_CNTPCT:
1565
case SYS_AARCH32_CNTPCTSS:
1566
tmr = TIMER_PTIMER;
1567
treg = TIMER_REG_CNT;
1568
break;
1569
1570
case SYS_CNTVCT_EL0:
1571
case SYS_CNTVCTSS_EL0:
1572
if (is_hyp_ctxt(vcpu))
1573
tmr = TIMER_HVTIMER;
1574
else
1575
tmr = TIMER_VTIMER;
1576
treg = TIMER_REG_CNT;
1577
break;
1578
1579
case SYS_AARCH32_CNTVCT:
1580
case SYS_AARCH32_CNTVCTSS:
1581
tmr = TIMER_VTIMER;
1582
treg = TIMER_REG_CNT;
1583
break;
1584
1585
default:
1586
print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1587
return undef_access(vcpu, p, r);
1588
}
1589
1590
if (p->is_write)
1591
kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1592
else
1593
p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1594
1595
return true;
1596
}
1597
1598
static bool access_hv_timer(struct kvm_vcpu *vcpu,
1599
struct sys_reg_params *p,
1600
const struct sys_reg_desc *r)
1601
{
1602
if (!vcpu_el2_e2h_is_set(vcpu))
1603
return undef_access(vcpu, p, r);
1604
1605
return access_arch_timer(vcpu, p, r);
1606
}
1607
1608
static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1609
s64 new, s64 cur)
1610
{
1611
struct arm64_ftr_bits kvm_ftr = *ftrp;
1612
1613
/* Some features have different safe value type in KVM than host features */
1614
switch (id) {
1615
case SYS_ID_AA64DFR0_EL1:
1616
switch (kvm_ftr.shift) {
1617
case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1618
kvm_ftr.type = FTR_LOWER_SAFE;
1619
break;
1620
case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1621
kvm_ftr.type = FTR_LOWER_SAFE;
1622
break;
1623
}
1624
break;
1625
case SYS_ID_DFR0_EL1:
1626
if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1627
kvm_ftr.type = FTR_LOWER_SAFE;
1628
break;
1629
}
1630
1631
return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1632
}
1633
1634
/*
1635
* arm64_check_features() - Check if a feature register value constitutes
1636
* a subset of features indicated by the idreg's KVM sanitised limit.
1637
*
1638
* This function will check if each feature field of @val is the "safe" value
1639
* against idreg's KVM sanitised limit return from reset() callback.
1640
* If a field value in @val is the same as the one in limit, it is always
1641
* considered the safe value regardless For register fields that are not in
1642
* writable, only the value in limit is considered the safe value.
1643
*
1644
* Return: 0 if all the fields are safe. Otherwise, return negative errno.
1645
*/
1646
static int arm64_check_features(struct kvm_vcpu *vcpu,
1647
const struct sys_reg_desc *rd,
1648
u64 val)
1649
{
1650
const struct arm64_ftr_reg *ftr_reg;
1651
const struct arm64_ftr_bits *ftrp = NULL;
1652
u32 id = reg_to_encoding(rd);
1653
u64 writable_mask = rd->val;
1654
u64 limit = rd->reset(vcpu, rd);
1655
u64 mask = 0;
1656
1657
/*
1658
* Hidden and unallocated ID registers may not have a corresponding
1659
* struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1660
* only safe value is 0.
1661
*/
1662
if (sysreg_visible_as_raz(vcpu, rd))
1663
return val ? -E2BIG : 0;
1664
1665
ftr_reg = get_arm64_ftr_reg(id);
1666
if (!ftr_reg)
1667
return -EINVAL;
1668
1669
ftrp = ftr_reg->ftr_bits;
1670
1671
for (; ftrp && ftrp->width; ftrp++) {
1672
s64 f_val, f_lim, safe_val;
1673
u64 ftr_mask;
1674
1675
ftr_mask = arm64_ftr_mask(ftrp);
1676
if ((ftr_mask & writable_mask) != ftr_mask)
1677
continue;
1678
1679
f_val = arm64_ftr_value(ftrp, val);
1680
f_lim = arm64_ftr_value(ftrp, limit);
1681
mask |= ftr_mask;
1682
1683
if (f_val == f_lim)
1684
safe_val = f_val;
1685
else
1686
safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1687
1688
if (safe_val != f_val)
1689
return -E2BIG;
1690
}
1691
1692
/* For fields that are not writable, values in limit are the safe values. */
1693
if ((val & ~mask) != (limit & ~mask))
1694
return -E2BIG;
1695
1696
return 0;
1697
}
1698
1699
static u8 pmuver_to_perfmon(u8 pmuver)
1700
{
1701
switch (pmuver) {
1702
case ID_AA64DFR0_EL1_PMUVer_IMP:
1703
return ID_DFR0_EL1_PerfMon_PMUv3;
1704
case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1705
return ID_DFR0_EL1_PerfMon_IMPDEF;
1706
default:
1707
/* Anything ARMv8.1+ and NI have the same value. For now. */
1708
return pmuver;
1709
}
1710
}
1711
1712
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1713
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val);
1714
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1715
1716
/* Read a sanitised cpufeature ID register by sys_reg_desc */
1717
static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1718
const struct sys_reg_desc *r)
1719
{
1720
u32 id = reg_to_encoding(r);
1721
u64 val;
1722
1723
if (sysreg_visible_as_raz(vcpu, r))
1724
return 0;
1725
1726
val = read_sanitised_ftr_reg(id);
1727
1728
switch (id) {
1729
case SYS_ID_AA64DFR0_EL1:
1730
val = sanitise_id_aa64dfr0_el1(vcpu, val);
1731
break;
1732
case SYS_ID_AA64PFR0_EL1:
1733
val = sanitise_id_aa64pfr0_el1(vcpu, val);
1734
break;
1735
case SYS_ID_AA64PFR1_EL1:
1736
val = sanitise_id_aa64pfr1_el1(vcpu, val);
1737
break;
1738
case SYS_ID_AA64PFR2_EL1:
1739
val &= ID_AA64PFR2_EL1_FPMR |
1740
(kvm_has_mte(vcpu->kvm) ?
1741
ID_AA64PFR2_EL1_MTEFAR | ID_AA64PFR2_EL1_MTESTOREONLY :
1742
0);
1743
break;
1744
case SYS_ID_AA64ISAR1_EL1:
1745
if (!vcpu_has_ptrauth(vcpu))
1746
val &= ~(ID_AA64ISAR1_EL1_APA |
1747
ID_AA64ISAR1_EL1_API |
1748
ID_AA64ISAR1_EL1_GPA |
1749
ID_AA64ISAR1_EL1_GPI);
1750
break;
1751
case SYS_ID_AA64ISAR2_EL1:
1752
if (!vcpu_has_ptrauth(vcpu))
1753
val &= ~(ID_AA64ISAR2_EL1_APA3 |
1754
ID_AA64ISAR2_EL1_GPA3);
1755
if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
1756
has_broken_cntvoff())
1757
val &= ~ID_AA64ISAR2_EL1_WFxT;
1758
break;
1759
case SYS_ID_AA64ISAR3_EL1:
1760
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
1761
break;
1762
case SYS_ID_AA64MMFR2_EL1:
1763
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1764
val &= ~ID_AA64MMFR2_EL1_NV;
1765
break;
1766
case SYS_ID_AA64MMFR3_EL1:
1767
val &= ID_AA64MMFR3_EL1_TCRX |
1768
ID_AA64MMFR3_EL1_SCTLRX |
1769
ID_AA64MMFR3_EL1_S1POE |
1770
ID_AA64MMFR3_EL1_S1PIE;
1771
break;
1772
case SYS_ID_MMFR4_EL1:
1773
val &= ~ID_MMFR4_EL1_CCIDX;
1774
break;
1775
}
1776
1777
if (vcpu_has_nv(vcpu))
1778
val = limit_nv_id_reg(vcpu->kvm, id, val);
1779
1780
return val;
1781
}
1782
1783
static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1784
const struct sys_reg_desc *r)
1785
{
1786
return __kvm_read_sanitised_id_reg(vcpu, r);
1787
}
1788
1789
static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1790
{
1791
return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
1792
}
1793
1794
static bool is_feature_id_reg(u32 encoding)
1795
{
1796
return (sys_reg_Op0(encoding) == 3 &&
1797
(sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
1798
sys_reg_CRn(encoding) == 0 &&
1799
sys_reg_CRm(encoding) <= 7);
1800
}
1801
1802
/*
1803
* Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1804
* (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
1805
* registers KVM maintains on a per-VM basis.
1806
*
1807
* Additionally, the implementation ID registers and CTR_EL0 are handled as
1808
* per-VM registers.
1809
*/
1810
static inline bool is_vm_ftr_id_reg(u32 id)
1811
{
1812
switch (id) {
1813
case SYS_CTR_EL0:
1814
case SYS_MIDR_EL1:
1815
case SYS_REVIDR_EL1:
1816
case SYS_AIDR_EL1:
1817
return true;
1818
default:
1819
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1820
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1821
sys_reg_CRm(id) < 8);
1822
1823
}
1824
}
1825
1826
static inline bool is_vcpu_ftr_id_reg(u32 id)
1827
{
1828
return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
1829
}
1830
1831
static inline bool is_aa32_id_reg(u32 id)
1832
{
1833
return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1834
sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1835
sys_reg_CRm(id) <= 3);
1836
}
1837
1838
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1839
const struct sys_reg_desc *r)
1840
{
1841
u32 id = reg_to_encoding(r);
1842
1843
switch (id) {
1844
case SYS_ID_AA64ZFR0_EL1:
1845
if (!vcpu_has_sve(vcpu))
1846
return REG_RAZ;
1847
break;
1848
}
1849
1850
return 0;
1851
}
1852
1853
static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1854
const struct sys_reg_desc *r)
1855
{
1856
/*
1857
* AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1858
* EL. Promote to RAZ/WI in order to guarantee consistency between
1859
* systems.
1860
*/
1861
if (!kvm_supports_32bit_el0())
1862
return REG_RAZ | REG_USER_WI;
1863
1864
return id_visibility(vcpu, r);
1865
}
1866
1867
static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1868
const struct sys_reg_desc *r)
1869
{
1870
return REG_RAZ;
1871
}
1872
1873
/* cpufeature ID register access trap handlers */
1874
1875
static bool access_id_reg(struct kvm_vcpu *vcpu,
1876
struct sys_reg_params *p,
1877
const struct sys_reg_desc *r)
1878
{
1879
if (p->is_write)
1880
return write_to_read_only(vcpu, p, r);
1881
1882
p->regval = read_id_reg(vcpu, r);
1883
1884
return true;
1885
}
1886
1887
/* Visibility overrides for SVE-specific control registers */
1888
static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1889
const struct sys_reg_desc *rd)
1890
{
1891
if (vcpu_has_sve(vcpu))
1892
return 0;
1893
1894
return REG_HIDDEN;
1895
}
1896
1897
static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
1898
const struct sys_reg_desc *rd)
1899
{
1900
if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
1901
return 0;
1902
1903
return REG_HIDDEN;
1904
}
1905
1906
static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
1907
const struct sys_reg_desc *rd)
1908
{
1909
if (kvm_has_fpmr(vcpu->kvm))
1910
return 0;
1911
1912
return REG_HIDDEN;
1913
}
1914
1915
static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
1916
{
1917
if (!vcpu_has_sve(vcpu))
1918
val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1919
1920
/*
1921
* The default is to expose CSV2 == 1 if the HW isn't affected.
1922
* Although this is a per-CPU feature, we make it global because
1923
* asymmetric systems are just a nuisance.
1924
*
1925
* Userspace can override this as long as it doesn't promise
1926
* the impossible.
1927
*/
1928
if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1929
val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1930
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1931
}
1932
if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1933
val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1934
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1935
}
1936
1937
if (vgic_is_v3(vcpu->kvm)) {
1938
val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1939
val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1940
}
1941
1942
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1943
1944
/*
1945
* MPAM is disabled by default as KVM also needs a set of PARTID to
1946
* program the MPAMVPMx_EL2 PARTID remapping registers with. But some
1947
* older kernels let the guest see the ID bit.
1948
*/
1949
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
1950
1951
return val;
1952
}
1953
1954
static u64 sanitise_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu, u64 val)
1955
{
1956
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1957
1958
if (!kvm_has_mte(vcpu->kvm)) {
1959
val &= ~ID_AA64PFR1_EL1_MTE;
1960
val &= ~ID_AA64PFR1_EL1_MTE_frac;
1961
}
1962
1963
if (!(cpus_have_final_cap(ARM64_HAS_RASV1P1_EXTN) &&
1964
SYS_FIELD_GET(ID_AA64PFR0_EL1, RAS, pfr0) == ID_AA64PFR0_EL1_RAS_IMP))
1965
val &= ~ID_AA64PFR1_EL1_RAS_frac;
1966
1967
val &= ~ID_AA64PFR1_EL1_SME;
1968
val &= ~ID_AA64PFR1_EL1_RNDR_trap;
1969
val &= ~ID_AA64PFR1_EL1_NMI;
1970
val &= ~ID_AA64PFR1_EL1_GCS;
1971
val &= ~ID_AA64PFR1_EL1_THE;
1972
val &= ~ID_AA64PFR1_EL1_MTEX;
1973
val &= ~ID_AA64PFR1_EL1_PFAR;
1974
val &= ~ID_AA64PFR1_EL1_MPAM_frac;
1975
1976
return val;
1977
}
1978
1979
static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
1980
{
1981
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
1982
1983
/*
1984
* Only initialize the PMU version if the vCPU was configured with one.
1985
*/
1986
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1987
if (kvm_vcpu_has_pmu(vcpu))
1988
val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1989
kvm_arm_pmu_get_pmuver_limit());
1990
1991
/* Hide SPE from guests */
1992
val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1993
1994
/* Hide BRBE from guests */
1995
val &= ~ID_AA64DFR0_EL1_BRBE_MASK;
1996
1997
return val;
1998
}
1999
2000
static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
2001
const struct sys_reg_desc *rd,
2002
u64 val)
2003
{
2004
u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
2005
u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
2006
2007
/*
2008
* Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
2009
* ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
2010
* exposed an IMP_DEF PMU to userspace and the guest on systems w/
2011
* non-architectural PMUs. Of course, PMUv3 is the only game in town for
2012
* PMU virtualization, so the IMP_DEF value was rather user-hostile.
2013
*
2014
* At minimum, we're on the hook to allow values that were given to
2015
* userspace by KVM. Cover our tracks here and replace the IMP_DEF value
2016
* with a more sensible NI. The value of an ID register changing under
2017
* the nose of the guest is unfortunate, but is certainly no more
2018
* surprising than an ill-guided PMU driver poking at impdef system
2019
* registers that end in an UNDEF...
2020
*/
2021
if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
2022
val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
2023
2024
/*
2025
* ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
2026
* nonzero minimum safe value.
2027
*/
2028
if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
2029
return -EINVAL;
2030
2031
return set_id_reg(vcpu, rd, val);
2032
}
2033
2034
static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
2035
const struct sys_reg_desc *rd)
2036
{
2037
u8 perfmon;
2038
u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
2039
2040
val &= ~ID_DFR0_EL1_PerfMon_MASK;
2041
if (kvm_vcpu_has_pmu(vcpu)) {
2042
perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
2043
val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
2044
}
2045
2046
val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
2047
2048
return val;
2049
}
2050
2051
static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
2052
const struct sys_reg_desc *rd,
2053
u64 val)
2054
{
2055
u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
2056
u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
2057
2058
if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
2059
val &= ~ID_DFR0_EL1_PerfMon_MASK;
2060
perfmon = 0;
2061
}
2062
2063
/*
2064
* Allow DFR0_EL1.PerfMon to be set from userspace as long as
2065
* it doesn't promise more than what the HW gives us on the
2066
* AArch64 side (as everything is emulated with that), and
2067
* that this is a PMUv3.
2068
*/
2069
if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
2070
return -EINVAL;
2071
2072
if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
2073
return -EINVAL;
2074
2075
return set_id_reg(vcpu, rd, val);
2076
}
2077
2078
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
2079
const struct sys_reg_desc *rd, u64 user_val)
2080
{
2081
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
2082
u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
2083
2084
/*
2085
* Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
2086
* in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
2087
* guests, but didn't add trap handling. KVM doesn't support MPAM and
2088
* always returns an UNDEF for these registers. The guest must see 0
2089
* for this field.
2090
*
2091
* But KVM must also accept values from user-space that were provided
2092
* by KVM. On CPUs that support MPAM, permit user-space to write
2093
* the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
2094
*/
2095
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2096
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
2097
2098
/* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
2099
if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
2100
!FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
2101
(vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
2102
return -EINVAL;
2103
2104
/*
2105
* If we are running on a GICv5 host and support FEAT_GCIE_LEGACY, then
2106
* we support GICv3. Fail attempts to do anything but set that to IMP.
2107
*/
2108
if (vgic_is_v3_compat(vcpu->kvm) &&
2109
FIELD_GET(ID_AA64PFR0_EL1_GIC_MASK, user_val) != ID_AA64PFR0_EL1_GIC_IMP)
2110
return -EINVAL;
2111
2112
return set_id_reg(vcpu, rd, user_val);
2113
}
2114
2115
static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
2116
const struct sys_reg_desc *rd, u64 user_val)
2117
{
2118
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
2119
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
2120
u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
2121
u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
2122
u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
2123
2124
/* See set_id_aa64pfr0_el1 for comment about MPAM */
2125
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
2126
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
2127
2128
/*
2129
* Previously MTE_frac was hidden from guest. However, if the
2130
* hardware supports MTE2 but not MTE_ASYM_FAULT then a value
2131
* of 0 for this field indicates that the hardware supports
2132
* MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
2133
*
2134
* As KVM must accept values from KVM provided by user-space,
2135
* when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
2136
* ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
2137
* incorrectly claiming hardware support for MTE_ASYNC in the
2138
* guest.
2139
*/
2140
2141
if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
2142
hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
2143
user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
2144
user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
2145
user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
2146
}
2147
2148
return set_id_reg(vcpu, rd, user_val);
2149
}
2150
2151
static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
2152
const struct sys_reg_desc *rd, u64 user_val)
2153
{
2154
u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd);
2155
u64 tgran2_mask = ID_AA64MMFR0_EL1_TGRAN4_2_MASK |
2156
ID_AA64MMFR0_EL1_TGRAN16_2_MASK |
2157
ID_AA64MMFR0_EL1_TGRAN64_2_MASK;
2158
2159
if (vcpu_has_nv(vcpu) &&
2160
((sanitized_val & tgran2_mask) != (user_val & tgran2_mask)))
2161
return -EINVAL;
2162
2163
return set_id_reg(vcpu, rd, user_val);
2164
}
2165
2166
static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
2167
const struct sys_reg_desc *rd, u64 user_val)
2168
{
2169
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2170
u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK;
2171
2172
/*
2173
* We made the mistake to expose the now deprecated NV field,
2174
* so allow userspace to write it, but silently ignore it.
2175
*/
2176
if ((hw_val & nv_mask) == (user_val & nv_mask))
2177
user_val &= ~nv_mask;
2178
2179
return set_id_reg(vcpu, rd, user_val);
2180
}
2181
2182
static int set_ctr_el0(struct kvm_vcpu *vcpu,
2183
const struct sys_reg_desc *rd, u64 user_val)
2184
{
2185
u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val);
2186
2187
/*
2188
* Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved.
2189
* Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based
2190
* on what hardware reports.
2191
*
2192
* Using a VIPT software model on PIPT will lead to over invalidation,
2193
* but still correct. Hence, we can allow downgrading PIPT to VIPT,
2194
* but not the other way around. This is handled via arm64_ftr_safe_value()
2195
* as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value
2196
* set as VIPT.
2197
*/
2198
switch (user_L1Ip) {
2199
case CTR_EL0_L1Ip_RESERVED_VPIPT:
2200
case CTR_EL0_L1Ip_RESERVED_AIVIVT:
2201
return -EINVAL;
2202
case CTR_EL0_L1Ip_VIPT:
2203
case CTR_EL0_L1Ip_PIPT:
2204
return set_id_reg(vcpu, rd, user_val);
2205
default:
2206
return -ENOENT;
2207
}
2208
}
2209
2210
/*
2211
* cpufeature ID register user accessors
2212
*
2213
* For now, these registers are immutable for userspace, so no values
2214
* are stored, and for set_id_reg() we don't allow the effective value
2215
* to be changed.
2216
*/
2217
static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2218
u64 *val)
2219
{
2220
/*
2221
* Avoid locking if the VM has already started, as the ID registers are
2222
* guaranteed to be invariant at that point.
2223
*/
2224
if (kvm_vm_has_ran_once(vcpu->kvm)) {
2225
*val = read_id_reg(vcpu, rd);
2226
return 0;
2227
}
2228
2229
mutex_lock(&vcpu->kvm->arch.config_lock);
2230
*val = read_id_reg(vcpu, rd);
2231
mutex_unlock(&vcpu->kvm->arch.config_lock);
2232
2233
return 0;
2234
}
2235
2236
static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2237
u64 val)
2238
{
2239
u32 id = reg_to_encoding(rd);
2240
int ret;
2241
2242
mutex_lock(&vcpu->kvm->arch.config_lock);
2243
2244
/*
2245
* Once the VM has started the ID registers are immutable. Reject any
2246
* write that does not match the final register value.
2247
*/
2248
if (kvm_vm_has_ran_once(vcpu->kvm)) {
2249
if (val != read_id_reg(vcpu, rd))
2250
ret = -EBUSY;
2251
else
2252
ret = 0;
2253
2254
mutex_unlock(&vcpu->kvm->arch.config_lock);
2255
return ret;
2256
}
2257
2258
ret = arm64_check_features(vcpu, rd, val);
2259
if (!ret)
2260
kvm_set_vm_id_reg(vcpu->kvm, id, val);
2261
2262
mutex_unlock(&vcpu->kvm->arch.config_lock);
2263
2264
/*
2265
* arm64_check_features() returns -E2BIG to indicate the register's
2266
* feature set is a superset of the maximally-allowed register value.
2267
* While it would be nice to precisely describe this to userspace, the
2268
* existing UAPI for KVM_SET_ONE_REG has it that invalid register
2269
* writes return -EINVAL.
2270
*/
2271
if (ret == -E2BIG)
2272
ret = -EINVAL;
2273
return ret;
2274
}
2275
2276
void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
2277
{
2278
u64 *p = __vm_id_reg(&kvm->arch, reg);
2279
2280
lockdep_assert_held(&kvm->arch.config_lock);
2281
2282
if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
2283
return;
2284
2285
*p = val;
2286
}
2287
2288
static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2289
u64 *val)
2290
{
2291
*val = 0;
2292
return 0;
2293
}
2294
2295
static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2296
u64 val)
2297
{
2298
return 0;
2299
}
2300
2301
static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2302
const struct sys_reg_desc *r)
2303
{
2304
if (p->is_write)
2305
return write_to_read_only(vcpu, p, r);
2306
2307
p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
2308
return true;
2309
}
2310
2311
static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2312
const struct sys_reg_desc *r)
2313
{
2314
if (p->is_write)
2315
return write_to_read_only(vcpu, p, r);
2316
2317
p->regval = __vcpu_sys_reg(vcpu, r->reg);
2318
return true;
2319
}
2320
2321
/*
2322
* Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
2323
* by the physical CPU which the vcpu currently resides in.
2324
*/
2325
static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2326
{
2327
u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2328
u64 clidr;
2329
u8 loc;
2330
2331
if ((ctr_el0 & CTR_EL0_IDC)) {
2332
/*
2333
* Data cache clean to the PoU is not required so LoUU and LoUIS
2334
* will not be set and a unified cache, which will be marked as
2335
* LoC, will be added.
2336
*
2337
* If not DIC, let the unified cache L2 so that an instruction
2338
* cache can be added as L1 later.
2339
*/
2340
loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
2341
clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
2342
} else {
2343
/*
2344
* Data cache clean to the PoU is required so let L1 have a data
2345
* cache and mark it as LoUU and LoUIS. As L1 has a data cache,
2346
* it can be marked as LoC too.
2347
*/
2348
loc = 1;
2349
clidr = 1 << CLIDR_LOUU_SHIFT;
2350
clidr |= 1 << CLIDR_LOUIS_SHIFT;
2351
clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
2352
}
2353
2354
/*
2355
* Instruction cache invalidation to the PoU is required so let L1 have
2356
* an instruction cache. If L1 already has a data cache, it will be
2357
* CACHE_TYPE_SEPARATE.
2358
*/
2359
if (!(ctr_el0 & CTR_EL0_DIC))
2360
clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
2361
2362
clidr |= loc << CLIDR_LOC_SHIFT;
2363
2364
/*
2365
* Add tag cache unified to data cache. Allocation tags and data are
2366
* unified in a cache line so that it looks valid even if there is only
2367
* one cache line.
2368
*/
2369
if (kvm_has_mte(vcpu->kvm))
2370
clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
2371
2372
__vcpu_assign_sys_reg(vcpu, r->reg, clidr);
2373
2374
return __vcpu_sys_reg(vcpu, r->reg);
2375
}
2376
2377
static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2378
u64 val)
2379
{
2380
u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2381
u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
2382
2383
if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
2384
return -EINVAL;
2385
2386
__vcpu_assign_sys_reg(vcpu, rd->reg, val);
2387
2388
return 0;
2389
}
2390
2391
static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2392
const struct sys_reg_desc *r)
2393
{
2394
int reg = r->reg;
2395
2396
if (p->is_write)
2397
vcpu_write_sys_reg(vcpu, p->regval, reg);
2398
else
2399
p->regval = vcpu_read_sys_reg(vcpu, reg);
2400
return true;
2401
}
2402
2403
static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2404
const struct sys_reg_desc *r)
2405
{
2406
u32 csselr;
2407
2408
if (p->is_write)
2409
return write_to_read_only(vcpu, p, r);
2410
2411
csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
2412
csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
2413
if (csselr < CSSELR_MAX)
2414
p->regval = get_ccsidr(vcpu, csselr);
2415
2416
return true;
2417
}
2418
2419
static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
2420
const struct sys_reg_desc *rd)
2421
{
2422
if (kvm_has_mte(vcpu->kvm))
2423
return 0;
2424
2425
return REG_HIDDEN;
2426
}
2427
2428
#define MTE_REG(name) { \
2429
SYS_DESC(SYS_##name), \
2430
.access = undef_access, \
2431
.reset = reset_unknown, \
2432
.reg = name, \
2433
.visibility = mte_visibility, \
2434
}
2435
2436
static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
2437
const struct sys_reg_desc *rd)
2438
{
2439
if (vcpu_has_nv(vcpu))
2440
return 0;
2441
2442
return REG_HIDDEN;
2443
}
2444
2445
static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
2446
struct sys_reg_params *p,
2447
const struct sys_reg_desc *r)
2448
{
2449
/*
2450
* We really shouldn't be here, and this is likely the result
2451
* of a misconfigured trap, as this register should target the
2452
* VNCR page, and nothing else.
2453
*/
2454
return bad_trap(vcpu, p, r,
2455
"trap of VNCR-backed register");
2456
}
2457
2458
static bool bad_redir_trap(struct kvm_vcpu *vcpu,
2459
struct sys_reg_params *p,
2460
const struct sys_reg_desc *r)
2461
{
2462
/*
2463
* We really shouldn't be here, and this is likely the result
2464
* of a misconfigured trap, as this register should target the
2465
* corresponding EL1, and nothing else.
2466
*/
2467
return bad_trap(vcpu, p, r,
2468
"trap of EL2 register redirected to EL1");
2469
}
2470
2471
#define EL2_REG_FILTERED(name, acc, rst, v, filter) { \
2472
SYS_DESC(SYS_##name), \
2473
.access = acc, \
2474
.reset = rst, \
2475
.reg = name, \
2476
.visibility = filter, \
2477
.val = v, \
2478
}
2479
2480
#define EL2_REG(name, acc, rst, v) \
2481
EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
2482
2483
#define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
2484
#define EL2_REG_VNCR_FILT(name, vis) \
2485
EL2_REG_FILTERED(name, bad_vncr_trap, reset_val, 0, vis)
2486
#define EL2_REG_VNCR_GICv3(name) \
2487
EL2_REG_VNCR_FILT(name, hidden_visibility)
2488
#define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
2489
2490
/*
2491
* Since reset() callback and field val are not used for idregs, they will be
2492
* used for specific purposes for idregs.
2493
* The reset() would return KVM sanitised register value. The value would be the
2494
* same as the host kernel sanitised value if there is no KVM sanitisation.
2495
* The val would be used as a mask indicating writable fields for the idreg.
2496
* Only bits with 1 are writable from userspace. This mask might not be
2497
* necessary in the future whenever all ID registers are enabled as writable
2498
* from userspace.
2499
*/
2500
2501
#define ID_DESC_DEFAULT_CALLBACKS \
2502
.access = access_id_reg, \
2503
.get_user = get_id_reg, \
2504
.set_user = set_id_reg, \
2505
.visibility = id_visibility, \
2506
.reset = kvm_read_sanitised_id_reg
2507
2508
#define ID_DESC(name) \
2509
SYS_DESC(SYS_##name), \
2510
ID_DESC_DEFAULT_CALLBACKS
2511
2512
/* sys_reg_desc initialiser for known cpufeature ID registers */
2513
#define ID_SANITISED(name) { \
2514
ID_DESC(name), \
2515
.val = 0, \
2516
}
2517
2518
/* sys_reg_desc initialiser for known cpufeature ID registers */
2519
#define AA32_ID_SANITISED(name) { \
2520
ID_DESC(name), \
2521
.visibility = aa32_id_visibility, \
2522
.val = 0, \
2523
}
2524
2525
/* sys_reg_desc initialiser for writable ID registers */
2526
#define ID_WRITABLE(name, mask) { \
2527
ID_DESC(name), \
2528
.val = mask, \
2529
}
2530
2531
/* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
2532
#define ID_FILTERED(sysreg, name, mask) { \
2533
ID_DESC(sysreg), \
2534
.set_user = set_##name, \
2535
.val = (mask), \
2536
}
2537
2538
/*
2539
* sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2540
* register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2541
* (1 <= crm < 8, 0 <= Op2 < 8).
2542
*/
2543
#define ID_UNALLOCATED(crm, op2) { \
2544
.name = "S3_0_0_" #crm "_" #op2, \
2545
Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
2546
ID_DESC_DEFAULT_CALLBACKS, \
2547
.visibility = raz_visibility, \
2548
.val = 0, \
2549
}
2550
2551
/*
2552
* sys_reg_desc initialiser for known ID registers that we hide from guests.
2553
* For now, these are exposed just like unallocated ID regs: they appear
2554
* RAZ for the guest.
2555
*/
2556
#define ID_HIDDEN(name) { \
2557
ID_DESC(name), \
2558
.visibility = raz_visibility, \
2559
.val = 0, \
2560
}
2561
2562
static bool access_sp_el1(struct kvm_vcpu *vcpu,
2563
struct sys_reg_params *p,
2564
const struct sys_reg_desc *r)
2565
{
2566
if (p->is_write)
2567
__vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
2568
else
2569
p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2570
2571
return true;
2572
}
2573
2574
static bool access_elr(struct kvm_vcpu *vcpu,
2575
struct sys_reg_params *p,
2576
const struct sys_reg_desc *r)
2577
{
2578
if (p->is_write)
2579
vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2580
else
2581
p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2582
2583
return true;
2584
}
2585
2586
static bool access_spsr(struct kvm_vcpu *vcpu,
2587
struct sys_reg_params *p,
2588
const struct sys_reg_desc *r)
2589
{
2590
if (p->is_write)
2591
__vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
2592
else
2593
p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2594
2595
return true;
2596
}
2597
2598
static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
2599
struct sys_reg_params *p,
2600
const struct sys_reg_desc *r)
2601
{
2602
if (p->is_write)
2603
__vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
2604
else
2605
p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
2606
2607
return true;
2608
}
2609
2610
static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2611
{
2612
u64 val = r->val;
2613
2614
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
2615
val |= HCR_E2H;
2616
2617
__vcpu_assign_sys_reg(vcpu, r->reg, val);
2618
2619
return __vcpu_sys_reg(vcpu, r->reg);
2620
}
2621
2622
static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
2623
const struct sys_reg_desc *rd,
2624
unsigned int (*fn)(const struct kvm_vcpu *,
2625
const struct sys_reg_desc *))
2626
{
2627
return el2_visibility(vcpu, rd) ?: fn(vcpu, rd);
2628
}
2629
2630
static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
2631
const struct sys_reg_desc *rd)
2632
{
2633
return __el2_visibility(vcpu, rd, sve_visibility);
2634
}
2635
2636
static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu,
2637
const struct sys_reg_desc *rd)
2638
{
2639
if (el2_visibility(vcpu, rd) == 0 &&
2640
kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
2641
return 0;
2642
2643
return REG_HIDDEN;
2644
}
2645
2646
static unsigned int sctlr2_visibility(const struct kvm_vcpu *vcpu,
2647
const struct sys_reg_desc *rd)
2648
{
2649
if (kvm_has_sctlr2(vcpu->kvm))
2650
return 0;
2651
2652
return REG_HIDDEN;
2653
}
2654
2655
static unsigned int sctlr2_el2_visibility(const struct kvm_vcpu *vcpu,
2656
const struct sys_reg_desc *rd)
2657
{
2658
return __el2_visibility(vcpu, rd, sctlr2_visibility);
2659
}
2660
2661
static bool access_zcr_el2(struct kvm_vcpu *vcpu,
2662
struct sys_reg_params *p,
2663
const struct sys_reg_desc *r)
2664
{
2665
unsigned int vq;
2666
2667
if (guest_hyp_sve_traps_enabled(vcpu)) {
2668
kvm_inject_nested_sve_trap(vcpu);
2669
return true;
2670
}
2671
2672
if (!p->is_write) {
2673
p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2);
2674
return true;
2675
}
2676
2677
vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
2678
vq = min(vq, vcpu_sve_max_vq(vcpu));
2679
vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
2680
2681
return true;
2682
}
2683
2684
static bool access_gic_vtr(struct kvm_vcpu *vcpu,
2685
struct sys_reg_params *p,
2686
const struct sys_reg_desc *r)
2687
{
2688
if (p->is_write)
2689
return write_to_read_only(vcpu, p, r);
2690
2691
p->regval = kvm_get_guest_vtr_el2();
2692
2693
return true;
2694
}
2695
2696
static bool access_gic_misr(struct kvm_vcpu *vcpu,
2697
struct sys_reg_params *p,
2698
const struct sys_reg_desc *r)
2699
{
2700
if (p->is_write)
2701
return write_to_read_only(vcpu, p, r);
2702
2703
p->regval = vgic_v3_get_misr(vcpu);
2704
2705
return true;
2706
}
2707
2708
static bool access_gic_eisr(struct kvm_vcpu *vcpu,
2709
struct sys_reg_params *p,
2710
const struct sys_reg_desc *r)
2711
{
2712
if (p->is_write)
2713
return write_to_read_only(vcpu, p, r);
2714
2715
p->regval = vgic_v3_get_eisr(vcpu);
2716
2717
return true;
2718
}
2719
2720
static bool access_gic_elrsr(struct kvm_vcpu *vcpu,
2721
struct sys_reg_params *p,
2722
const struct sys_reg_desc *r)
2723
{
2724
if (p->is_write)
2725
return write_to_read_only(vcpu, p, r);
2726
2727
p->regval = vgic_v3_get_elrsr(vcpu);
2728
2729
return true;
2730
}
2731
2732
static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
2733
const struct sys_reg_desc *rd)
2734
{
2735
if (kvm_has_s1poe(vcpu->kvm))
2736
return 0;
2737
2738
return REG_HIDDEN;
2739
}
2740
2741
static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu,
2742
const struct sys_reg_desc *rd)
2743
{
2744
return __el2_visibility(vcpu, rd, s1poe_visibility);
2745
}
2746
2747
static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu,
2748
const struct sys_reg_desc *rd)
2749
{
2750
if (kvm_has_tcr2(vcpu->kvm))
2751
return 0;
2752
2753
return REG_HIDDEN;
2754
}
2755
2756
static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu,
2757
const struct sys_reg_desc *rd)
2758
{
2759
return __el2_visibility(vcpu, rd, tcr2_visibility);
2760
}
2761
2762
static unsigned int fgt2_visibility(const struct kvm_vcpu *vcpu,
2763
const struct sys_reg_desc *rd)
2764
{
2765
if (el2_visibility(vcpu, rd) == 0 &&
2766
kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, FGT2))
2767
return 0;
2768
2769
return REG_HIDDEN;
2770
}
2771
2772
static unsigned int fgt_visibility(const struct kvm_vcpu *vcpu,
2773
const struct sys_reg_desc *rd)
2774
{
2775
if (el2_visibility(vcpu, rd) == 0 &&
2776
kvm_has_feat(vcpu->kvm, ID_AA64MMFR0_EL1, FGT, IMP))
2777
return 0;
2778
2779
return REG_HIDDEN;
2780
}
2781
2782
static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu,
2783
const struct sys_reg_desc *rd)
2784
{
2785
if (kvm_has_s1pie(vcpu->kvm))
2786
return 0;
2787
2788
return REG_HIDDEN;
2789
}
2790
2791
static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
2792
const struct sys_reg_desc *rd)
2793
{
2794
return __el2_visibility(vcpu, rd, s1pie_visibility);
2795
}
2796
2797
static bool access_mdcr(struct kvm_vcpu *vcpu,
2798
struct sys_reg_params *p,
2799
const struct sys_reg_desc *r)
2800
{
2801
u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2);
2802
2803
if (!p->is_write) {
2804
p->regval = old;
2805
return true;
2806
}
2807
2808
val = p->regval;
2809
hpmn = FIELD_GET(MDCR_EL2_HPMN, val);
2810
2811
/*
2812
* If HPMN is out of bounds, limit it to what we actually
2813
* support. This matches the UNKNOWN definition of the field
2814
* in that case, and keeps the emulation simple. Sort of.
2815
*/
2816
if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
2817
hpmn = vcpu->kvm->arch.nr_pmu_counters;
2818
u64p_replace_bits(&val, hpmn, MDCR_EL2_HPMN);
2819
}
2820
2821
__vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
2822
2823
/*
2824
* Request a reload of the PMU to enable/disable the counters
2825
* affected by HPME.
2826
*/
2827
if ((old ^ val) & MDCR_EL2_HPME)
2828
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
2829
2830
return true;
2831
}
2832
2833
static bool access_ras(struct kvm_vcpu *vcpu,
2834
struct sys_reg_params *p,
2835
const struct sys_reg_desc *r)
2836
{
2837
struct kvm *kvm = vcpu->kvm;
2838
2839
switch(reg_to_encoding(r)) {
2840
case SYS_ERXPFGCDN_EL1:
2841
case SYS_ERXPFGCTL_EL1:
2842
case SYS_ERXPFGF_EL1:
2843
case SYS_ERXMISC2_EL1:
2844
case SYS_ERXMISC3_EL1:
2845
if (!(kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1) ||
2846
(kvm_has_feat_enum(kvm, ID_AA64PFR0_EL1, RAS, IMP) &&
2847
kvm_has_feat(kvm, ID_AA64PFR1_EL1, RAS_frac, RASv1p1)))) {
2848
kvm_inject_undefined(vcpu);
2849
return false;
2850
}
2851
break;
2852
default:
2853
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP)) {
2854
kvm_inject_undefined(vcpu);
2855
return false;
2856
}
2857
}
2858
2859
return trap_raz_wi(vcpu, p, r);
2860
}
2861
2862
/*
2863
* For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
2864
* AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
2865
* The values made visible to userspace were the register values of the boot
2866
* CPU.
2867
*
2868
* At the same time, reads from these registers at EL1 previously were not
2869
* trapped, allowing the guest to read the actual hardware value. On big-little
2870
* machines, this means the VM can see different values depending on where a
2871
* given vCPU got scheduled.
2872
*
2873
* These registers are now trapped as collateral damage from SME, and what
2874
* follows attempts to give a user / guest view consistent with the existing
2875
* ABI.
2876
*/
2877
static bool access_imp_id_reg(struct kvm_vcpu *vcpu,
2878
struct sys_reg_params *p,
2879
const struct sys_reg_desc *r)
2880
{
2881
if (p->is_write)
2882
return write_to_read_only(vcpu, p, r);
2883
2884
/*
2885
* Return the VM-scoped implementation ID register values if userspace
2886
* has made them writable.
2887
*/
2888
if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags))
2889
return access_id_reg(vcpu, p, r);
2890
2891
/*
2892
* Otherwise, fall back to the old behavior of returning the value of
2893
* the current CPU.
2894
*/
2895
switch (reg_to_encoding(r)) {
2896
case SYS_REVIDR_EL1:
2897
p->regval = read_sysreg(revidr_el1);
2898
break;
2899
case SYS_AIDR_EL1:
2900
p->regval = read_sysreg(aidr_el1);
2901
break;
2902
default:
2903
WARN_ON_ONCE(1);
2904
}
2905
2906
return true;
2907
}
2908
2909
static u64 __ro_after_init boot_cpu_midr_val;
2910
static u64 __ro_after_init boot_cpu_revidr_val;
2911
static u64 __ro_after_init boot_cpu_aidr_val;
2912
2913
static void init_imp_id_regs(void)
2914
{
2915
boot_cpu_midr_val = read_sysreg(midr_el1);
2916
boot_cpu_revidr_val = read_sysreg(revidr_el1);
2917
boot_cpu_aidr_val = read_sysreg(aidr_el1);
2918
}
2919
2920
static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2921
{
2922
switch (reg_to_encoding(r)) {
2923
case SYS_MIDR_EL1:
2924
return boot_cpu_midr_val;
2925
case SYS_REVIDR_EL1:
2926
return boot_cpu_revidr_val;
2927
case SYS_AIDR_EL1:
2928
return boot_cpu_aidr_val;
2929
default:
2930
KVM_BUG_ON(1, vcpu->kvm);
2931
return 0;
2932
}
2933
}
2934
2935
static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
2936
u64 val)
2937
{
2938
struct kvm *kvm = vcpu->kvm;
2939
u64 expected;
2940
2941
guard(mutex)(&kvm->arch.config_lock);
2942
2943
expected = read_id_reg(vcpu, r);
2944
if (expected == val)
2945
return 0;
2946
2947
if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags))
2948
return -EINVAL;
2949
2950
/*
2951
* Once the VM has started the ID registers are immutable. Reject the
2952
* write if userspace tries to change it.
2953
*/
2954
if (kvm_vm_has_ran_once(kvm))
2955
return -EBUSY;
2956
2957
/*
2958
* Any value is allowed for the implementation ID registers so long as
2959
* it is within the writable mask.
2960
*/
2961
if ((val & r->val) != val)
2962
return -EINVAL;
2963
2964
kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val);
2965
return 0;
2966
}
2967
2968
#define IMPLEMENTATION_ID(reg, mask) { \
2969
SYS_DESC(SYS_##reg), \
2970
.access = access_imp_id_reg, \
2971
.get_user = get_id_reg, \
2972
.set_user = set_imp_id_reg, \
2973
.reset = reset_imp_id_reg, \
2974
.val = mask, \
2975
}
2976
2977
static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2978
{
2979
__vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
2980
return vcpu->kvm->arch.nr_pmu_counters;
2981
}
2982
2983
/*
2984
* Architected system registers.
2985
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
2986
*
2987
* Debug handling: We do trap most, if not all debug related system
2988
* registers. The implementation is good enough to ensure that a guest
2989
* can use these with minimal performance degradation. The drawback is
2990
* that we don't implement any of the external debug architecture.
2991
* This should be revisited if we ever encounter a more demanding
2992
* guest...
2993
*/
2994
static const struct sys_reg_desc sys_reg_descs[] = {
2995
DBG_BCR_BVR_WCR_WVR_EL1(0),
2996
DBG_BCR_BVR_WCR_WVR_EL1(1),
2997
{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
2998
{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
2999
DBG_BCR_BVR_WCR_WVR_EL1(2),
3000
DBG_BCR_BVR_WCR_WVR_EL1(3),
3001
DBG_BCR_BVR_WCR_WVR_EL1(4),
3002
DBG_BCR_BVR_WCR_WVR_EL1(5),
3003
DBG_BCR_BVR_WCR_WVR_EL1(6),
3004
DBG_BCR_BVR_WCR_WVR_EL1(7),
3005
DBG_BCR_BVR_WCR_WVR_EL1(8),
3006
DBG_BCR_BVR_WCR_WVR_EL1(9),
3007
DBG_BCR_BVR_WCR_WVR_EL1(10),
3008
DBG_BCR_BVR_WCR_WVR_EL1(11),
3009
DBG_BCR_BVR_WCR_WVR_EL1(12),
3010
DBG_BCR_BVR_WCR_WVR_EL1(13),
3011
DBG_BCR_BVR_WCR_WVR_EL1(14),
3012
DBG_BCR_BVR_WCR_WVR_EL1(15),
3013
3014
{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
3015
{ SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
3016
{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
3017
OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
3018
{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
3019
{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
3020
{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
3021
{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
3022
{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
3023
3024
{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
3025
{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
3026
// DBGDTR[TR]X_EL0 share the same encoding
3027
{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
3028
3029
{ SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
3030
3031
IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)),
3032
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
3033
IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)),
3034
3035
/*
3036
* ID regs: all ID_SANITISED() entries here must have corresponding
3037
* entries in arm64_ftr_regs[].
3038
*/
3039
3040
/* AArch64 mappings of the AArch32 ID registers */
3041
/* CRm=1 */
3042
AA32_ID_SANITISED(ID_PFR0_EL1),
3043
AA32_ID_SANITISED(ID_PFR1_EL1),
3044
{ SYS_DESC(SYS_ID_DFR0_EL1),
3045
.access = access_id_reg,
3046
.get_user = get_id_reg,
3047
.set_user = set_id_dfr0_el1,
3048
.visibility = aa32_id_visibility,
3049
.reset = read_sanitised_id_dfr0_el1,
3050
.val = ID_DFR0_EL1_PerfMon_MASK |
3051
ID_DFR0_EL1_CopDbg_MASK, },
3052
ID_HIDDEN(ID_AFR0_EL1),
3053
AA32_ID_SANITISED(ID_MMFR0_EL1),
3054
AA32_ID_SANITISED(ID_MMFR1_EL1),
3055
AA32_ID_SANITISED(ID_MMFR2_EL1),
3056
AA32_ID_SANITISED(ID_MMFR3_EL1),
3057
3058
/* CRm=2 */
3059
AA32_ID_SANITISED(ID_ISAR0_EL1),
3060
AA32_ID_SANITISED(ID_ISAR1_EL1),
3061
AA32_ID_SANITISED(ID_ISAR2_EL1),
3062
AA32_ID_SANITISED(ID_ISAR3_EL1),
3063
AA32_ID_SANITISED(ID_ISAR4_EL1),
3064
AA32_ID_SANITISED(ID_ISAR5_EL1),
3065
AA32_ID_SANITISED(ID_MMFR4_EL1),
3066
AA32_ID_SANITISED(ID_ISAR6_EL1),
3067
3068
/* CRm=3 */
3069
AA32_ID_SANITISED(MVFR0_EL1),
3070
AA32_ID_SANITISED(MVFR1_EL1),
3071
AA32_ID_SANITISED(MVFR2_EL1),
3072
ID_UNALLOCATED(3,3),
3073
AA32_ID_SANITISED(ID_PFR2_EL1),
3074
ID_HIDDEN(ID_DFR1_EL1),
3075
AA32_ID_SANITISED(ID_MMFR5_EL1),
3076
ID_UNALLOCATED(3,7),
3077
3078
/* AArch64 ID registers */
3079
/* CRm=4 */
3080
ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
3081
~(ID_AA64PFR0_EL1_AMU |
3082
ID_AA64PFR0_EL1_MPAM |
3083
ID_AA64PFR0_EL1_SVE |
3084
ID_AA64PFR0_EL1_AdvSIMD |
3085
ID_AA64PFR0_EL1_FP)),
3086
ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
3087
~(ID_AA64PFR1_EL1_PFAR |
3088
ID_AA64PFR1_EL1_MTEX |
3089
ID_AA64PFR1_EL1_THE |
3090
ID_AA64PFR1_EL1_GCS |
3091
ID_AA64PFR1_EL1_MTE_frac |
3092
ID_AA64PFR1_EL1_NMI |
3093
ID_AA64PFR1_EL1_RNDR_trap |
3094
ID_AA64PFR1_EL1_SME |
3095
ID_AA64PFR1_EL1_RES0 |
3096
ID_AA64PFR1_EL1_MPAM_frac |
3097
ID_AA64PFR1_EL1_MTE)),
3098
ID_WRITABLE(ID_AA64PFR2_EL1,
3099
ID_AA64PFR2_EL1_FPMR |
3100
ID_AA64PFR2_EL1_MTEFAR |
3101
ID_AA64PFR2_EL1_MTESTOREONLY),
3102
ID_UNALLOCATED(4,3),
3103
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
3104
ID_HIDDEN(ID_AA64SMFR0_EL1),
3105
ID_UNALLOCATED(4,6),
3106
ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
3107
3108
/* CRm=5 */
3109
/*
3110
* Prior to FEAT_Debugv8.9, the architecture defines context-aware
3111
* breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
3112
* KVM does not trap + emulate the breakpoint registers, and as such
3113
* cannot support a layout that misaligns with the underlying hardware.
3114
* While it may be possible to describe a subset that aligns with
3115
* hardware, just prevent changes to BRPs and CTX_CMPs altogether for
3116
* simplicity.
3117
*
3118
* See DDI0487K.a, section D2.8.3 Breakpoint types and linking
3119
* of breakpoints for more details.
3120
*/
3121
ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
3122
ID_AA64DFR0_EL1_DoubleLock_MASK |
3123
ID_AA64DFR0_EL1_WRPs_MASK |
3124
ID_AA64DFR0_EL1_PMUVer_MASK |
3125
ID_AA64DFR0_EL1_DebugVer_MASK),
3126
ID_SANITISED(ID_AA64DFR1_EL1),
3127
ID_UNALLOCATED(5,2),
3128
ID_UNALLOCATED(5,3),
3129
ID_HIDDEN(ID_AA64AFR0_EL1),
3130
ID_HIDDEN(ID_AA64AFR1_EL1),
3131
ID_UNALLOCATED(5,6),
3132
ID_UNALLOCATED(5,7),
3133
3134
/* CRm=6 */
3135
ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
3136
ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
3137
ID_AA64ISAR1_EL1_GPA |
3138
ID_AA64ISAR1_EL1_API |
3139
ID_AA64ISAR1_EL1_APA)),
3140
ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
3141
ID_AA64ISAR2_EL1_APA3 |
3142
ID_AA64ISAR2_EL1_GPA3)),
3143
ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
3144
ID_AA64ISAR3_EL1_FAMINMAX)),
3145
ID_UNALLOCATED(6,4),
3146
ID_UNALLOCATED(6,5),
3147
ID_UNALLOCATED(6,6),
3148
ID_UNALLOCATED(6,7),
3149
3150
/* CRm=7 */
3151
ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1,
3152
~(ID_AA64MMFR0_EL1_RES0 |
3153
ID_AA64MMFR0_EL1_ASIDBITS)),
3154
ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
3155
ID_AA64MMFR1_EL1_HCX |
3156
ID_AA64MMFR1_EL1_TWED |
3157
ID_AA64MMFR1_EL1_XNX |
3158
ID_AA64MMFR1_EL1_VH |
3159
ID_AA64MMFR1_EL1_VMIDBits)),
3160
ID_FILTERED(ID_AA64MMFR2_EL1,
3161
id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 |
3162
ID_AA64MMFR2_EL1_EVT |
3163
ID_AA64MMFR2_EL1_FWB |
3164
ID_AA64MMFR2_EL1_IDS |
3165
ID_AA64MMFR2_EL1_NV |
3166
ID_AA64MMFR2_EL1_CCIDX)),
3167
ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
3168
ID_AA64MMFR3_EL1_SCTLRX |
3169
ID_AA64MMFR3_EL1_S1PIE |
3170
ID_AA64MMFR3_EL1_S1POE)),
3171
ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac),
3172
ID_UNALLOCATED(7,5),
3173
ID_UNALLOCATED(7,6),
3174
ID_UNALLOCATED(7,7),
3175
3176
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
3177
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
3178
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
3179
{ SYS_DESC(SYS_SCTLR2_EL1), access_vm_reg, reset_val, SCTLR2_EL1, 0,
3180
.visibility = sctlr2_visibility },
3181
3182
MTE_REG(RGSR_EL1),
3183
MTE_REG(GCR_EL1),
3184
3185
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
3186
{ SYS_DESC(SYS_TRFCR_EL1), undef_access },
3187
{ SYS_DESC(SYS_SMPRI_EL1), undef_access },
3188
{ SYS_DESC(SYS_SMCR_EL1), undef_access },
3189
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
3190
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
3191
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
3192
{ SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0,
3193
.visibility = tcr2_visibility },
3194
3195
PTRAUTH_KEY(APIA),
3196
PTRAUTH_KEY(APIB),
3197
PTRAUTH_KEY(APDA),
3198
PTRAUTH_KEY(APDB),
3199
PTRAUTH_KEY(APGA),
3200
3201
{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
3202
{ SYS_DESC(SYS_ELR_EL1), access_elr},
3203
3204
{ SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
3205
3206
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
3207
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
3208
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
3209
3210
{ SYS_DESC(SYS_ERRIDR_EL1), access_ras },
3211
{ SYS_DESC(SYS_ERRSELR_EL1), access_ras },
3212
{ SYS_DESC(SYS_ERXFR_EL1), access_ras },
3213
{ SYS_DESC(SYS_ERXCTLR_EL1), access_ras },
3214
{ SYS_DESC(SYS_ERXSTATUS_EL1), access_ras },
3215
{ SYS_DESC(SYS_ERXADDR_EL1), access_ras },
3216
{ SYS_DESC(SYS_ERXPFGF_EL1), access_ras },
3217
{ SYS_DESC(SYS_ERXPFGCTL_EL1), access_ras },
3218
{ SYS_DESC(SYS_ERXPFGCDN_EL1), access_ras },
3219
{ SYS_DESC(SYS_ERXMISC0_EL1), access_ras },
3220
{ SYS_DESC(SYS_ERXMISC1_EL1), access_ras },
3221
{ SYS_DESC(SYS_ERXMISC2_EL1), access_ras },
3222
{ SYS_DESC(SYS_ERXMISC3_EL1), access_ras },
3223
3224
MTE_REG(TFSR_EL1),
3225
MTE_REG(TFSRE0_EL1),
3226
3227
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
3228
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
3229
3230
{ SYS_DESC(SYS_PMSCR_EL1), undef_access },
3231
{ SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
3232
{ SYS_DESC(SYS_PMSICR_EL1), undef_access },
3233
{ SYS_DESC(SYS_PMSIRR_EL1), undef_access },
3234
{ SYS_DESC(SYS_PMSFCR_EL1), undef_access },
3235
{ SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
3236
{ SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
3237
{ SYS_DESC(SYS_PMSIDR_EL1), undef_access },
3238
{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
3239
{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
3240
{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
3241
/* PMBIDR_EL1 is not trapped */
3242
3243
{ PMU_SYS_REG(PMINTENSET_EL1),
3244
.access = access_pminten, .reg = PMINTENSET_EL1,
3245
.get_user = get_pmreg, .set_user = set_pmreg },
3246
{ PMU_SYS_REG(PMINTENCLR_EL1),
3247
.access = access_pminten, .reg = PMINTENSET_EL1,
3248
.get_user = get_pmreg, .set_user = set_pmreg },
3249
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
3250
3251
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
3252
{ SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1,
3253
.visibility = s1pie_visibility },
3254
{ SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1,
3255
.visibility = s1pie_visibility },
3256
{ SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
3257
.visibility = s1poe_visibility },
3258
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
3259
3260
{ SYS_DESC(SYS_LORSA_EL1), trap_loregion },
3261
{ SYS_DESC(SYS_LOREA_EL1), trap_loregion },
3262
{ SYS_DESC(SYS_LORN_EL1), trap_loregion },
3263
{ SYS_DESC(SYS_LORC_EL1), trap_loregion },
3264
{ SYS_DESC(SYS_MPAMIDR_EL1), undef_access },
3265
{ SYS_DESC(SYS_LORID_EL1), trap_loregion },
3266
3267
{ SYS_DESC(SYS_MPAM1_EL1), undef_access },
3268
{ SYS_DESC(SYS_MPAM0_EL1), undef_access },
3269
{ SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
3270
{ SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
3271
3272
{ SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
3273
{ SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
3274
{ SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
3275
{ SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
3276
{ SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
3277
{ SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
3278
{ SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
3279
{ SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
3280
{ SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
3281
{ SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
3282
{ SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
3283
{ SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
3284
{ SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
3285
{ SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
3286
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
3287
{ SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
3288
{ SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
3289
{ SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
3290
{ SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
3291
{ SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
3292
{ SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
3293
{ SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
3294
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
3295
{ SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
3296
{ SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
3297
3298
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
3299
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
3300
3301
{ SYS_DESC(SYS_ACCDATA_EL1), undef_access },
3302
3303
{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
3304
3305
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
3306
3307
{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
3308
{ SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
3309
.set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
3310
{ SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
3311
{ SYS_DESC(SYS_SMIDR_EL1), undef_access },
3312
IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)),
3313
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
3314
ID_FILTERED(CTR_EL0, ctr_el0,
3315
CTR_EL0_DIC_MASK |
3316
CTR_EL0_IDC_MASK |
3317
CTR_EL0_DminLine_MASK |
3318
CTR_EL0_L1Ip_MASK |
3319
CTR_EL0_IminLine_MASK),
3320
{ SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
3321
{ SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
3322
3323
{ PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
3324
.reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
3325
{ PMU_SYS_REG(PMCNTENSET_EL0),
3326
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
3327
.get_user = get_pmreg, .set_user = set_pmreg },
3328
{ PMU_SYS_REG(PMCNTENCLR_EL0),
3329
.access = access_pmcnten, .reg = PMCNTENSET_EL0,
3330
.get_user = get_pmreg, .set_user = set_pmreg },
3331
{ PMU_SYS_REG(PMOVSCLR_EL0),
3332
.access = access_pmovs, .reg = PMOVSSET_EL0,
3333
.get_user = get_pmreg, .set_user = set_pmreg },
3334
/*
3335
* PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
3336
* previously (and pointlessly) advertised in the past...
3337
*/
3338
{ PMU_SYS_REG(PMSWINC_EL0),
3339
.get_user = get_raz_reg, .set_user = set_wi_reg,
3340
.access = access_pmswinc, .reset = NULL },
3341
{ PMU_SYS_REG(PMSELR_EL0),
3342
.access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
3343
{ PMU_SYS_REG(PMCEID0_EL0),
3344
.access = access_pmceid, .reset = NULL },
3345
{ PMU_SYS_REG(PMCEID1_EL0),
3346
.access = access_pmceid, .reset = NULL },
3347
{ PMU_SYS_REG(PMCCNTR_EL0),
3348
.access = access_pmu_evcntr, .reset = reset_unknown,
3349
.reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
3350
.set_user = set_pmu_evcntr },
3351
{ PMU_SYS_REG(PMXEVTYPER_EL0),
3352
.access = access_pmu_evtyper, .reset = NULL },
3353
{ PMU_SYS_REG(PMXEVCNTR_EL0),
3354
.access = access_pmu_evcntr, .reset = NULL },
3355
/*
3356
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
3357
* in 32bit mode. Here we choose to reset it as zero for consistency.
3358
*/
3359
{ PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
3360
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
3361
{ PMU_SYS_REG(PMOVSSET_EL0),
3362
.access = access_pmovs, .reg = PMOVSSET_EL0,
3363
.get_user = get_pmreg, .set_user = set_pmreg },
3364
3365
{ SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
3366
.visibility = s1poe_visibility },
3367
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
3368
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
3369
{ SYS_DESC(SYS_TPIDR2_EL0), undef_access },
3370
3371
{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
3372
3373
{ SYS_DESC(SYS_AMCR_EL0), undef_access },
3374
{ SYS_DESC(SYS_AMCFGR_EL0), undef_access },
3375
{ SYS_DESC(SYS_AMCGCR_EL0), undef_access },
3376
{ SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
3377
{ SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
3378
{ SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
3379
{ SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
3380
{ SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
3381
AMU_AMEVCNTR0_EL0(0),
3382
AMU_AMEVCNTR0_EL0(1),
3383
AMU_AMEVCNTR0_EL0(2),
3384
AMU_AMEVCNTR0_EL0(3),
3385
AMU_AMEVCNTR0_EL0(4),
3386
AMU_AMEVCNTR0_EL0(5),
3387
AMU_AMEVCNTR0_EL0(6),
3388
AMU_AMEVCNTR0_EL0(7),
3389
AMU_AMEVCNTR0_EL0(8),
3390
AMU_AMEVCNTR0_EL0(9),
3391
AMU_AMEVCNTR0_EL0(10),
3392
AMU_AMEVCNTR0_EL0(11),
3393
AMU_AMEVCNTR0_EL0(12),
3394
AMU_AMEVCNTR0_EL0(13),
3395
AMU_AMEVCNTR0_EL0(14),
3396
AMU_AMEVCNTR0_EL0(15),
3397
AMU_AMEVTYPER0_EL0(0),
3398
AMU_AMEVTYPER0_EL0(1),
3399
AMU_AMEVTYPER0_EL0(2),
3400
AMU_AMEVTYPER0_EL0(3),
3401
AMU_AMEVTYPER0_EL0(4),
3402
AMU_AMEVTYPER0_EL0(5),
3403
AMU_AMEVTYPER0_EL0(6),
3404
AMU_AMEVTYPER0_EL0(7),
3405
AMU_AMEVTYPER0_EL0(8),
3406
AMU_AMEVTYPER0_EL0(9),
3407
AMU_AMEVTYPER0_EL0(10),
3408
AMU_AMEVTYPER0_EL0(11),
3409
AMU_AMEVTYPER0_EL0(12),
3410
AMU_AMEVTYPER0_EL0(13),
3411
AMU_AMEVTYPER0_EL0(14),
3412
AMU_AMEVTYPER0_EL0(15),
3413
AMU_AMEVCNTR1_EL0(0),
3414
AMU_AMEVCNTR1_EL0(1),
3415
AMU_AMEVCNTR1_EL0(2),
3416
AMU_AMEVCNTR1_EL0(3),
3417
AMU_AMEVCNTR1_EL0(4),
3418
AMU_AMEVCNTR1_EL0(5),
3419
AMU_AMEVCNTR1_EL0(6),
3420
AMU_AMEVCNTR1_EL0(7),
3421
AMU_AMEVCNTR1_EL0(8),
3422
AMU_AMEVCNTR1_EL0(9),
3423
AMU_AMEVCNTR1_EL0(10),
3424
AMU_AMEVCNTR1_EL0(11),
3425
AMU_AMEVCNTR1_EL0(12),
3426
AMU_AMEVCNTR1_EL0(13),
3427
AMU_AMEVCNTR1_EL0(14),
3428
AMU_AMEVCNTR1_EL0(15),
3429
AMU_AMEVTYPER1_EL0(0),
3430
AMU_AMEVTYPER1_EL0(1),
3431
AMU_AMEVTYPER1_EL0(2),
3432
AMU_AMEVTYPER1_EL0(3),
3433
AMU_AMEVTYPER1_EL0(4),
3434
AMU_AMEVTYPER1_EL0(5),
3435
AMU_AMEVTYPER1_EL0(6),
3436
AMU_AMEVTYPER1_EL0(7),
3437
AMU_AMEVTYPER1_EL0(8),
3438
AMU_AMEVTYPER1_EL0(9),
3439
AMU_AMEVTYPER1_EL0(10),
3440
AMU_AMEVTYPER1_EL0(11),
3441
AMU_AMEVTYPER1_EL0(12),
3442
AMU_AMEVTYPER1_EL0(13),
3443
AMU_AMEVTYPER1_EL0(14),
3444
AMU_AMEVTYPER1_EL0(15),
3445
3446
{ SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
3447
{ SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer },
3448
{ SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
3449
{ SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
3450
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
3451
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
3452
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
3453
3454
{ SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
3455
{ SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer },
3456
{ SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer },
3457
3458
/* PMEVCNTRn_EL0 */
3459
PMU_PMEVCNTR_EL0(0),
3460
PMU_PMEVCNTR_EL0(1),
3461
PMU_PMEVCNTR_EL0(2),
3462
PMU_PMEVCNTR_EL0(3),
3463
PMU_PMEVCNTR_EL0(4),
3464
PMU_PMEVCNTR_EL0(5),
3465
PMU_PMEVCNTR_EL0(6),
3466
PMU_PMEVCNTR_EL0(7),
3467
PMU_PMEVCNTR_EL0(8),
3468
PMU_PMEVCNTR_EL0(9),
3469
PMU_PMEVCNTR_EL0(10),
3470
PMU_PMEVCNTR_EL0(11),
3471
PMU_PMEVCNTR_EL0(12),
3472
PMU_PMEVCNTR_EL0(13),
3473
PMU_PMEVCNTR_EL0(14),
3474
PMU_PMEVCNTR_EL0(15),
3475
PMU_PMEVCNTR_EL0(16),
3476
PMU_PMEVCNTR_EL0(17),
3477
PMU_PMEVCNTR_EL0(18),
3478
PMU_PMEVCNTR_EL0(19),
3479
PMU_PMEVCNTR_EL0(20),
3480
PMU_PMEVCNTR_EL0(21),
3481
PMU_PMEVCNTR_EL0(22),
3482
PMU_PMEVCNTR_EL0(23),
3483
PMU_PMEVCNTR_EL0(24),
3484
PMU_PMEVCNTR_EL0(25),
3485
PMU_PMEVCNTR_EL0(26),
3486
PMU_PMEVCNTR_EL0(27),
3487
PMU_PMEVCNTR_EL0(28),
3488
PMU_PMEVCNTR_EL0(29),
3489
PMU_PMEVCNTR_EL0(30),
3490
/* PMEVTYPERn_EL0 */
3491
PMU_PMEVTYPER_EL0(0),
3492
PMU_PMEVTYPER_EL0(1),
3493
PMU_PMEVTYPER_EL0(2),
3494
PMU_PMEVTYPER_EL0(3),
3495
PMU_PMEVTYPER_EL0(4),
3496
PMU_PMEVTYPER_EL0(5),
3497
PMU_PMEVTYPER_EL0(6),
3498
PMU_PMEVTYPER_EL0(7),
3499
PMU_PMEVTYPER_EL0(8),
3500
PMU_PMEVTYPER_EL0(9),
3501
PMU_PMEVTYPER_EL0(10),
3502
PMU_PMEVTYPER_EL0(11),
3503
PMU_PMEVTYPER_EL0(12),
3504
PMU_PMEVTYPER_EL0(13),
3505
PMU_PMEVTYPER_EL0(14),
3506
PMU_PMEVTYPER_EL0(15),
3507
PMU_PMEVTYPER_EL0(16),
3508
PMU_PMEVTYPER_EL0(17),
3509
PMU_PMEVTYPER_EL0(18),
3510
PMU_PMEVTYPER_EL0(19),
3511
PMU_PMEVTYPER_EL0(20),
3512
PMU_PMEVTYPER_EL0(21),
3513
PMU_PMEVTYPER_EL0(22),
3514
PMU_PMEVTYPER_EL0(23),
3515
PMU_PMEVTYPER_EL0(24),
3516
PMU_PMEVTYPER_EL0(25),
3517
PMU_PMEVTYPER_EL0(26),
3518
PMU_PMEVTYPER_EL0(27),
3519
PMU_PMEVTYPER_EL0(28),
3520
PMU_PMEVTYPER_EL0(29),
3521
PMU_PMEVTYPER_EL0(30),
3522
/*
3523
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
3524
* in 32bit mode. Here we choose to reset it as zero for consistency.
3525
*/
3526
{ PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
3527
.reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
3528
3529
EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
3530
EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
3531
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
3532
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
3533
EL2_REG_FILTERED(SCTLR2_EL2, access_vm_reg, reset_val, 0,
3534
sctlr2_el2_visibility),
3535
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
3536
EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0),
3537
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
3538
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
3539
EL2_REG_VNCR_FILT(HFGRTR_EL2, fgt_visibility),
3540
EL2_REG_VNCR_FILT(HFGWTR_EL2, fgt_visibility),
3541
EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
3542
EL2_REG_VNCR(HACR_EL2, reset_val, 0),
3543
3544
EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0,
3545
sve_el2_visibility),
3546
3547
EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
3548
3549
EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
3550
EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
3551
EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
3552
EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1,
3553
tcr2_el2_visibility),
3554
EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
3555
EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
3556
EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0,
3557
vncr_el2_visibility),
3558
3559
{ SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
3560
EL2_REG_VNCR_FILT(HDFGRTR2_EL2, fgt2_visibility),
3561
EL2_REG_VNCR_FILT(HDFGWTR2_EL2, fgt2_visibility),
3562
EL2_REG_VNCR_FILT(HFGRTR2_EL2, fgt2_visibility),
3563
EL2_REG_VNCR_FILT(HFGWTR2_EL2, fgt2_visibility),
3564
EL2_REG_VNCR_FILT(HDFGRTR_EL2, fgt_visibility),
3565
EL2_REG_VNCR_FILT(HDFGWTR_EL2, fgt_visibility),
3566
EL2_REG_VNCR_FILT(HAFGRTR_EL2, fgt_visibility),
3567
EL2_REG_VNCR_FILT(HFGITR2_EL2, fgt2_visibility),
3568
EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
3569
EL2_REG_REDIR(ELR_EL2, reset_val, 0),
3570
{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
3571
3572
/* AArch32 SPSR_* are RES0 if trapped from a NV guest */
3573
{ SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi },
3574
{ SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi },
3575
{ SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi },
3576
{ SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi },
3577
3578
{ SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 },
3579
EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
3580
EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
3581
EL2_REG_REDIR(ESR_EL2, reset_val, 0),
3582
EL2_REG_VNCR(VSESR_EL2, reset_unknown, 0),
3583
{ SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
3584
3585
EL2_REG_REDIR(FAR_EL2, reset_val, 0),
3586
EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
3587
3588
EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
3589
EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0,
3590
s1pie_el2_visibility),
3591
EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0,
3592
s1pie_el2_visibility),
3593
EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0,
3594
s1poe_el2_visibility),
3595
EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
3596
{ SYS_DESC(SYS_MPAMHCR_EL2), undef_access },
3597
{ SYS_DESC(SYS_MPAMVPMV_EL2), undef_access },
3598
{ SYS_DESC(SYS_MPAM2_EL2), undef_access },
3599
{ SYS_DESC(SYS_MPAMVPM0_EL2), undef_access },
3600
{ SYS_DESC(SYS_MPAMVPM1_EL2), undef_access },
3601
{ SYS_DESC(SYS_MPAMVPM2_EL2), undef_access },
3602
{ SYS_DESC(SYS_MPAMVPM3_EL2), undef_access },
3603
{ SYS_DESC(SYS_MPAMVPM4_EL2), undef_access },
3604
{ SYS_DESC(SYS_MPAMVPM5_EL2), undef_access },
3605
{ SYS_DESC(SYS_MPAMVPM6_EL2), undef_access },
3606
{ SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
3607
3608
EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
3609
{ SYS_DESC(SYS_RVBAR_EL2), undef_access },
3610
{ SYS_DESC(SYS_RMR_EL2), undef_access },
3611
EL2_REG_VNCR(VDISR_EL2, reset_unknown, 0),
3612
3613
EL2_REG_VNCR_GICv3(ICH_AP0R0_EL2),
3614
EL2_REG_VNCR_GICv3(ICH_AP0R1_EL2),
3615
EL2_REG_VNCR_GICv3(ICH_AP0R2_EL2),
3616
EL2_REG_VNCR_GICv3(ICH_AP0R3_EL2),
3617
EL2_REG_VNCR_GICv3(ICH_AP1R0_EL2),
3618
EL2_REG_VNCR_GICv3(ICH_AP1R1_EL2),
3619
EL2_REG_VNCR_GICv3(ICH_AP1R2_EL2),
3620
EL2_REG_VNCR_GICv3(ICH_AP1R3_EL2),
3621
3622
{ SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre },
3623
3624
EL2_REG_VNCR_GICv3(ICH_HCR_EL2),
3625
{ SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr },
3626
{ SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr },
3627
{ SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr },
3628
{ SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr },
3629
EL2_REG_VNCR_GICv3(ICH_VMCR_EL2),
3630
3631
EL2_REG_VNCR_GICv3(ICH_LR0_EL2),
3632
EL2_REG_VNCR_GICv3(ICH_LR1_EL2),
3633
EL2_REG_VNCR_GICv3(ICH_LR2_EL2),
3634
EL2_REG_VNCR_GICv3(ICH_LR3_EL2),
3635
EL2_REG_VNCR_GICv3(ICH_LR4_EL2),
3636
EL2_REG_VNCR_GICv3(ICH_LR5_EL2),
3637
EL2_REG_VNCR_GICv3(ICH_LR6_EL2),
3638
EL2_REG_VNCR_GICv3(ICH_LR7_EL2),
3639
EL2_REG_VNCR_GICv3(ICH_LR8_EL2),
3640
EL2_REG_VNCR_GICv3(ICH_LR9_EL2),
3641
EL2_REG_VNCR_GICv3(ICH_LR10_EL2),
3642
EL2_REG_VNCR_GICv3(ICH_LR11_EL2),
3643
EL2_REG_VNCR_GICv3(ICH_LR12_EL2),
3644
EL2_REG_VNCR_GICv3(ICH_LR13_EL2),
3645
EL2_REG_VNCR_GICv3(ICH_LR14_EL2),
3646
EL2_REG_VNCR_GICv3(ICH_LR15_EL2),
3647
3648
EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
3649
EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
3650
3651
EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
3652
EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
3653
{ SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
3654
EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
3655
EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
3656
3657
{ SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer },
3658
EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0),
3659
EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0),
3660
3661
{ SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
3662
3663
{ SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer },
3664
{ SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer },
3665
{ SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer },
3666
3667
{ SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer },
3668
{ SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer },
3669
{ SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer },
3670
3671
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
3672
};
3673
3674
static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3675
const struct sys_reg_desc *r)
3676
{
3677
u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3678
3679
__kvm_at_s1e01(vcpu, op, p->regval);
3680
3681
return true;
3682
}
3683
3684
static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3685
const struct sys_reg_desc *r)
3686
{
3687
u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3688
3689
/* There is no FGT associated with AT S1E2A :-( */
3690
if (op == OP_AT_S1E2A &&
3691
!kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) {
3692
kvm_inject_undefined(vcpu);
3693
return false;
3694
}
3695
3696
__kvm_at_s1e2(vcpu, op, p->regval);
3697
3698
return true;
3699
}
3700
3701
static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3702
const struct sys_reg_desc *r)
3703
{
3704
u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3705
3706
__kvm_at_s12(vcpu, op, p->regval);
3707
3708
return true;
3709
}
3710
3711
static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
3712
{
3713
struct kvm *kvm = vpcu->kvm;
3714
u8 CRm = sys_reg_CRm(instr);
3715
3716
if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3717
!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3718
return false;
3719
3720
if (CRm == TLBI_CRm_nROS &&
3721
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3722
return false;
3723
3724
return true;
3725
}
3726
3727
static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3728
const struct sys_reg_desc *r)
3729
{
3730
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3731
3732
if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3733
return undef_access(vcpu, p, r);
3734
3735
write_lock(&vcpu->kvm->mmu_lock);
3736
3737
/*
3738
* Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
3739
* corresponding VMIDs.
3740
*/
3741
kvm_nested_s2_unmap(vcpu->kvm, true);
3742
3743
write_unlock(&vcpu->kvm->mmu_lock);
3744
3745
return true;
3746
}
3747
3748
static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
3749
{
3750
struct kvm *kvm = vpcu->kvm;
3751
u8 CRm = sys_reg_CRm(instr);
3752
u8 Op2 = sys_reg_Op2(instr);
3753
3754
if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3755
!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3756
return false;
3757
3758
if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
3759
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3760
return false;
3761
3762
if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
3763
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3764
return false;
3765
3766
if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
3767
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3768
return false;
3769
3770
return true;
3771
}
3772
3773
/* Only defined here as this is an internal "abstraction" */
3774
union tlbi_info {
3775
struct {
3776
u64 start;
3777
u64 size;
3778
} range;
3779
3780
struct {
3781
u64 addr;
3782
} ipa;
3783
3784
struct {
3785
u64 addr;
3786
u32 encoding;
3787
} va;
3788
};
3789
3790
static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
3791
const union tlbi_info *info)
3792
{
3793
/*
3794
* The unmap operation is allowed to drop the MMU lock and block, which
3795
* means that @mmu could be used for a different context than the one
3796
* currently being invalidated.
3797
*
3798
* This behavior is still safe, as:
3799
*
3800
* 1) The vCPU(s) that recycled the MMU are responsible for invalidating
3801
* the entire MMU before reusing it, which still honors the intent
3802
* of a TLBI.
3803
*
3804
* 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC
3805
* and ERET to the guest), other vCPUs are allowed to use stale
3806
* translations.
3807
*
3808
* 3) Accidentally unmapping an unrelated MMU context is nonfatal, and
3809
* at worst may cause more aborts for shadow stage-2 fills.
3810
*
3811
* Dropping the MMU lock also implies that shadow stage-2 fills could
3812
* happen behind the back of the TLBI. This is still safe, though, as
3813
* the L1 needs to put its stage-2 in a consistent state before doing
3814
* the TLBI.
3815
*/
3816
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
3817
}
3818
3819
static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3820
const struct sys_reg_desc *r)
3821
{
3822
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3823
u64 limit, vttbr;
3824
3825
if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3826
return undef_access(vcpu, p, r);
3827
3828
vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3829
limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
3830
3831
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3832
&(union tlbi_info) {
3833
.range = {
3834
.start = 0,
3835
.size = limit,
3836
},
3837
},
3838
s2_mmu_unmap_range);
3839
3840
return true;
3841
}
3842
3843
static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3844
const struct sys_reg_desc *r)
3845
{
3846
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3847
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3848
u64 base, range;
3849
3850
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3851
return undef_access(vcpu, p, r);
3852
3853
/*
3854
* Because the shadow S2 structure doesn't necessarily reflect that
3855
* of the guest's S2 (different base granule size, for example), we
3856
* decide to ignore TTL and only use the described range.
3857
*/
3858
base = decode_range_tlbi(p->regval, &range, NULL);
3859
3860
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3861
&(union tlbi_info) {
3862
.range = {
3863
.start = base,
3864
.size = range,
3865
},
3866
},
3867
s2_mmu_unmap_range);
3868
3869
return true;
3870
}
3871
3872
static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
3873
const union tlbi_info *info)
3874
{
3875
unsigned long max_size;
3876
u64 base_addr;
3877
3878
/*
3879
* We drop a number of things from the supplied value:
3880
*
3881
* - NS bit: we're non-secure only.
3882
*
3883
* - IPA[51:48]: We don't support 52bit IPA just yet...
3884
*
3885
* And of course, adjust the IPA to be on an actual address.
3886
*/
3887
base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
3888
max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
3889
base_addr &= ~(max_size - 1);
3890
3891
/*
3892
* See comment in s2_mmu_unmap_range() for why this is allowed to
3893
* reschedule.
3894
*/
3895
kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
3896
}
3897
3898
static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3899
const struct sys_reg_desc *r)
3900
{
3901
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3902
u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3903
3904
if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3905
return undef_access(vcpu, p, r);
3906
3907
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3908
&(union tlbi_info) {
3909
.ipa = {
3910
.addr = p->regval,
3911
},
3912
},
3913
s2_mmu_unmap_ipa);
3914
3915
return true;
3916
}
3917
3918
static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
3919
const union tlbi_info *info)
3920
{
3921
WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
3922
}
3923
3924
static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3925
const struct sys_reg_desc *r)
3926
{
3927
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3928
3929
if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding))
3930
return undef_access(vcpu, p, r);
3931
3932
kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
3933
return true;
3934
}
3935
3936
static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3937
const struct sys_reg_desc *r)
3938
{
3939
u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3940
3941
/*
3942
* If we're here, this is because we've trapped on a EL1 TLBI
3943
* instruction that affects the EL1 translation regime while
3944
* we're running in a context that doesn't allow us to let the
3945
* HW do its thing (aka vEL2):
3946
*
3947
* - HCR_EL2.E2H == 0 : a non-VHE guest
3948
* - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
3949
*
3950
* Another possibility is that we are invalidating the EL2 context
3951
* using EL1 instructions, but that we landed here because we need
3952
* additional invalidation for structures that are not held in the
3953
* CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In
3954
* that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 }
3955
* as we don't allow an NV-capable L1 in a nVHE configuration.
3956
*
3957
* We don't expect these helpers to ever be called when running
3958
* in a vEL1 context.
3959
*/
3960
3961
WARN_ON(!vcpu_is_el2(vcpu));
3962
3963
if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
3964
return undef_access(vcpu, p, r);
3965
3966
if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) {
3967
kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
3968
return true;
3969
}
3970
3971
kvm_s2_mmu_iterate_by_vmid(vcpu->kvm,
3972
get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)),
3973
&(union tlbi_info) {
3974
.va = {
3975
.addr = p->regval,
3976
.encoding = sys_encoding,
3977
},
3978
},
3979
s2_mmu_tlbi_s1e1);
3980
3981
return true;
3982
}
3983
3984
#define SYS_INSN(insn, access_fn) \
3985
{ \
3986
SYS_DESC(OP_##insn), \
3987
.access = (access_fn), \
3988
}
3989
3990
static struct sys_reg_desc sys_insn_descs[] = {
3991
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
3992
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
3993
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
3994
3995
SYS_INSN(AT_S1E1R, handle_at_s1e01),
3996
SYS_INSN(AT_S1E1W, handle_at_s1e01),
3997
SYS_INSN(AT_S1E0R, handle_at_s1e01),
3998
SYS_INSN(AT_S1E0W, handle_at_s1e01),
3999
SYS_INSN(AT_S1E1RP, handle_at_s1e01),
4000
SYS_INSN(AT_S1E1WP, handle_at_s1e01),
4001
4002
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
4003
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
4004
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
4005
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
4006
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
4007
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
4008
4009
SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
4010
SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
4011
SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
4012
SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
4013
SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
4014
SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
4015
4016
SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
4017
SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
4018
SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
4019
SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
4020
4021
SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
4022
SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
4023
SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
4024
SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
4025
SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
4026
SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
4027
4028
SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
4029
SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
4030
SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
4031
SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
4032
4033
SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
4034
SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
4035
SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
4036
SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
4037
4038
SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
4039
SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
4040
SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
4041
SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
4042
SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
4043
SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
4044
4045
SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
4046
SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
4047
SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
4048
SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
4049
SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
4050
SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
4051
4052
SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
4053
SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
4054
SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
4055
SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
4056
4057
SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
4058
SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
4059
SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
4060
SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
4061
SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
4062
SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
4063
4064
SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
4065
SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
4066
SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
4067
SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
4068
4069
SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
4070
SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
4071
SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
4072
SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
4073
4074
SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
4075
SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
4076
SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
4077
SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
4078
SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
4079
SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
4080
4081
SYS_INSN(AT_S1E2R, handle_at_s1e2),
4082
SYS_INSN(AT_S1E2W, handle_at_s1e2),
4083
SYS_INSN(AT_S12E1R, handle_at_s12),
4084
SYS_INSN(AT_S12E1W, handle_at_s12),
4085
SYS_INSN(AT_S12E0R, handle_at_s12),
4086
SYS_INSN(AT_S12E0W, handle_at_s12),
4087
SYS_INSN(AT_S1E2A, handle_at_s1e2),
4088
4089
SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
4090
SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
4091
SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
4092
SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
4093
4094
SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2),
4095
SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2),
4096
SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
4097
SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2),
4098
SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
4099
4100
SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2),
4101
SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2),
4102
SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2),
4103
SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2),
4104
4105
SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
4106
4107
SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2),
4108
4109
SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
4110
SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
4111
SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
4112
SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
4113
SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
4114
SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
4115
SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
4116
SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
4117
SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
4118
SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2),
4119
SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2),
4120
SYS_INSN(TLBI_RVAE2, handle_tlbi_el2),
4121
SYS_INSN(TLBI_RVALE2, handle_tlbi_el2),
4122
SYS_INSN(TLBI_ALLE2, handle_tlbi_el2),
4123
SYS_INSN(TLBI_VAE2, handle_tlbi_el2),
4124
4125
SYS_INSN(TLBI_ALLE1, handle_alle1is),
4126
4127
SYS_INSN(TLBI_VALE2, handle_tlbi_el2),
4128
4129
SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
4130
4131
SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
4132
SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
4133
SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
4134
SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
4135
4136
SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2),
4137
SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2),
4138
SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
4139
SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2),
4140
SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
4141
4142
SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2),
4143
SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2),
4144
SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2),
4145
SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2),
4146
4147
SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
4148
SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2),
4149
SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
4150
SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
4151
SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
4152
SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
4153
SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
4154
SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
4155
SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
4156
SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
4157
SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
4158
SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2),
4159
SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2),
4160
SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2),
4161
SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2),
4162
SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2),
4163
SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2),
4164
SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
4165
SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2),
4166
SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
4167
};
4168
4169
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
4170
struct sys_reg_params *p,
4171
const struct sys_reg_desc *r)
4172
{
4173
if (p->is_write) {
4174
return ignore_write(vcpu, p);
4175
} else {
4176
u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
4177
u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
4178
4179
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
4180
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
4181
(SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
4182
(SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
4183
(1 << 15) | (el3 << 14) | (el3 << 12));
4184
return true;
4185
}
4186
}
4187
4188
/*
4189
* AArch32 debug register mappings
4190
*
4191
* AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
4192
* AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
4193
*
4194
* None of the other registers share their location, so treat them as
4195
* if they were 64bit.
4196
*/
4197
#define DBG_BCR_BVR_WCR_WVR(n) \
4198
/* DBGBVRn */ \
4199
{ AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \
4200
trap_dbg_wb_reg, NULL, n }, \
4201
/* DBGBCRn */ \
4202
{ Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \
4203
/* DBGWVRn */ \
4204
{ Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \
4205
/* DBGWCRn */ \
4206
{ Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n }
4207
4208
#define DBGBXVR(n) \
4209
{ AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \
4210
trap_dbg_wb_reg, NULL, n }
4211
4212
/*
4213
* Trapped cp14 registers. We generally ignore most of the external
4214
* debug, on the principle that they don't really make sense to a
4215
* guest. Revisit this one day, would this principle change.
4216
*/
4217
static const struct sys_reg_desc cp14_regs[] = {
4218
/* DBGDIDR */
4219
{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
4220
/* DBGDTRRXext */
4221
{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
4222
4223
DBG_BCR_BVR_WCR_WVR(0),
4224
/* DBGDSCRint */
4225
{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
4226
DBG_BCR_BVR_WCR_WVR(1),
4227
/* DBGDCCINT */
4228
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
4229
/* DBGDSCRext */
4230
{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
4231
DBG_BCR_BVR_WCR_WVR(2),
4232
/* DBGDTR[RT]Xint */
4233
{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
4234
/* DBGDTR[RT]Xext */
4235
{ Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
4236
DBG_BCR_BVR_WCR_WVR(3),
4237
DBG_BCR_BVR_WCR_WVR(4),
4238
DBG_BCR_BVR_WCR_WVR(5),
4239
/* DBGWFAR */
4240
{ Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
4241
/* DBGOSECCR */
4242
{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
4243
DBG_BCR_BVR_WCR_WVR(6),
4244
/* DBGVCR */
4245
{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
4246
DBG_BCR_BVR_WCR_WVR(7),
4247
DBG_BCR_BVR_WCR_WVR(8),
4248
DBG_BCR_BVR_WCR_WVR(9),
4249
DBG_BCR_BVR_WCR_WVR(10),
4250
DBG_BCR_BVR_WCR_WVR(11),
4251
DBG_BCR_BVR_WCR_WVR(12),
4252
DBG_BCR_BVR_WCR_WVR(13),
4253
DBG_BCR_BVR_WCR_WVR(14),
4254
DBG_BCR_BVR_WCR_WVR(15),
4255
4256
/* DBGDRAR (32bit) */
4257
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
4258
4259
DBGBXVR(0),
4260
/* DBGOSLAR */
4261
{ Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
4262
DBGBXVR(1),
4263
/* DBGOSLSR */
4264
{ Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
4265
DBGBXVR(2),
4266
DBGBXVR(3),
4267
/* DBGOSDLR */
4268
{ Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
4269
DBGBXVR(4),
4270
/* DBGPRCR */
4271
{ Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
4272
DBGBXVR(5),
4273
DBGBXVR(6),
4274
DBGBXVR(7),
4275
DBGBXVR(8),
4276
DBGBXVR(9),
4277
DBGBXVR(10),
4278
DBGBXVR(11),
4279
DBGBXVR(12),
4280
DBGBXVR(13),
4281
DBGBXVR(14),
4282
DBGBXVR(15),
4283
4284
/* DBGDSAR (32bit) */
4285
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
4286
4287
/* DBGDEVID2 */
4288
{ Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
4289
/* DBGDEVID1 */
4290
{ Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
4291
/* DBGDEVID */
4292
{ Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
4293
/* DBGCLAIMSET */
4294
{ Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
4295
/* DBGCLAIMCLR */
4296
{ Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
4297
/* DBGAUTHSTATUS */
4298
{ Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
4299
};
4300
4301
/* Trapped cp14 64bit registers */
4302
static const struct sys_reg_desc cp14_64_regs[] = {
4303
/* DBGDRAR (64bit) */
4304
{ Op1( 0), CRm( 1), .access = trap_raz_wi },
4305
4306
/* DBGDSAR (64bit) */
4307
{ Op1( 0), CRm( 2), .access = trap_raz_wi },
4308
};
4309
4310
#define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
4311
AA32(_map), \
4312
Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
4313
.visibility = pmu_visibility
4314
4315
/* Macro to expand the PMEVCNTRn register */
4316
#define PMU_PMEVCNTR(n) \
4317
{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
4318
(0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
4319
.access = access_pmu_evcntr }
4320
4321
/* Macro to expand the PMEVTYPERn register */
4322
#define PMU_PMEVTYPER(n) \
4323
{ CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
4324
(0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
4325
.access = access_pmu_evtyper }
4326
/*
4327
* Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
4328
* depending on the way they are accessed (as a 32bit or a 64bit
4329
* register).
4330
*/
4331
static const struct sys_reg_desc cp15_regs[] = {
4332
{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
4333
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
4334
/* ACTLR */
4335
{ AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
4336
/* ACTLR2 */
4337
{ AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
4338
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4339
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
4340
/* TTBCR */
4341
{ AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
4342
/* TTBCR2 */
4343
{ AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
4344
{ Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
4345
{ CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
4346
/* DFSR */
4347
{ Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
4348
{ Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
4349
/* ADFSR */
4350
{ Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
4351
/* AIFSR */
4352
{ Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
4353
/* DFAR */
4354
{ AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
4355
/* IFAR */
4356
{ AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
4357
4358
/*
4359
* DC{C,I,CI}SW operations:
4360
*/
4361
{ Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
4362
{ Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
4363
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4364
4365
/* PMU */
4366
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
4367
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
4368
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
4369
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
4370
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
4371
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
4372
{ CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
4373
{ CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
4374
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
4375
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
4376
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
4377
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
4378
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
4379
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
4380
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
4381
{ CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
4382
{ CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
4383
/* PMMIR */
4384
{ CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
4385
4386
/* PRRR/MAIR0 */
4387
{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
4388
/* NMRR/MAIR1 */
4389
{ AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
4390
/* AMAIR0 */
4391
{ AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
4392
/* AMAIR1 */
4393
{ AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
4394
4395
{ CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
4396
{ CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
4397
{ CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
4398
{ CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
4399
{ CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
4400
{ CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
4401
{ CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
4402
{ CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
4403
{ CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
4404
{ CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
4405
{ CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
4406
{ CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
4407
{ CP15_SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
4408
{ CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
4409
{ CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
4410
{ CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
4411
{ CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
4412
{ CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
4413
{ CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
4414
{ CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
4415
{ CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
4416
{ CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
4417
4418
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
4419
4420
/* Arch Tmers */
4421
{ SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
4422
{ SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
4423
4424
/* PMEVCNTRn */
4425
PMU_PMEVCNTR(0),
4426
PMU_PMEVCNTR(1),
4427
PMU_PMEVCNTR(2),
4428
PMU_PMEVCNTR(3),
4429
PMU_PMEVCNTR(4),
4430
PMU_PMEVCNTR(5),
4431
PMU_PMEVCNTR(6),
4432
PMU_PMEVCNTR(7),
4433
PMU_PMEVCNTR(8),
4434
PMU_PMEVCNTR(9),
4435
PMU_PMEVCNTR(10),
4436
PMU_PMEVCNTR(11),
4437
PMU_PMEVCNTR(12),
4438
PMU_PMEVCNTR(13),
4439
PMU_PMEVCNTR(14),
4440
PMU_PMEVCNTR(15),
4441
PMU_PMEVCNTR(16),
4442
PMU_PMEVCNTR(17),
4443
PMU_PMEVCNTR(18),
4444
PMU_PMEVCNTR(19),
4445
PMU_PMEVCNTR(20),
4446
PMU_PMEVCNTR(21),
4447
PMU_PMEVCNTR(22),
4448
PMU_PMEVCNTR(23),
4449
PMU_PMEVCNTR(24),
4450
PMU_PMEVCNTR(25),
4451
PMU_PMEVCNTR(26),
4452
PMU_PMEVCNTR(27),
4453
PMU_PMEVCNTR(28),
4454
PMU_PMEVCNTR(29),
4455
PMU_PMEVCNTR(30),
4456
/* PMEVTYPERn */
4457
PMU_PMEVTYPER(0),
4458
PMU_PMEVTYPER(1),
4459
PMU_PMEVTYPER(2),
4460
PMU_PMEVTYPER(3),
4461
PMU_PMEVTYPER(4),
4462
PMU_PMEVTYPER(5),
4463
PMU_PMEVTYPER(6),
4464
PMU_PMEVTYPER(7),
4465
PMU_PMEVTYPER(8),
4466
PMU_PMEVTYPER(9),
4467
PMU_PMEVTYPER(10),
4468
PMU_PMEVTYPER(11),
4469
PMU_PMEVTYPER(12),
4470
PMU_PMEVTYPER(13),
4471
PMU_PMEVTYPER(14),
4472
PMU_PMEVTYPER(15),
4473
PMU_PMEVTYPER(16),
4474
PMU_PMEVTYPER(17),
4475
PMU_PMEVTYPER(18),
4476
PMU_PMEVTYPER(19),
4477
PMU_PMEVTYPER(20),
4478
PMU_PMEVTYPER(21),
4479
PMU_PMEVTYPER(22),
4480
PMU_PMEVTYPER(23),
4481
PMU_PMEVTYPER(24),
4482
PMU_PMEVTYPER(25),
4483
PMU_PMEVTYPER(26),
4484
PMU_PMEVTYPER(27),
4485
PMU_PMEVTYPER(28),
4486
PMU_PMEVTYPER(29),
4487
PMU_PMEVTYPER(30),
4488
/* PMCCFILTR */
4489
{ CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
4490
4491
{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
4492
{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
4493
4494
/* CCSIDR2 */
4495
{ Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
4496
4497
{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
4498
};
4499
4500
static const struct sys_reg_desc cp15_64_regs[] = {
4501
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4502
{ CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
4503
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
4504
{ SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
4505
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
4506
{ Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
4507
{ SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer },
4508
{ Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
4509
{ SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
4510
{ SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
4511
{ SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer },
4512
};
4513
4514
static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
4515
bool reset_check)
4516
{
4517
unsigned int i;
4518
4519
for (i = 0; i < n; i++) {
4520
if (reset_check && table[i].reg && !table[i].reset) {
4521
kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
4522
&table[i], i, table[i].name);
4523
return false;
4524
}
4525
4526
if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
4527
kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
4528
&table[i], i, table[i - 1].name, table[i].name);
4529
return false;
4530
}
4531
}
4532
4533
return true;
4534
}
4535
4536
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
4537
{
4538
kvm_inject_undefined(vcpu);
4539
return 1;
4540
}
4541
4542
static void perform_access(struct kvm_vcpu *vcpu,
4543
struct sys_reg_params *params,
4544
const struct sys_reg_desc *r)
4545
{
4546
trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
4547
4548
/* Check for regs disabled by runtime config */
4549
if (sysreg_hidden(vcpu, r)) {
4550
kvm_inject_undefined(vcpu);
4551
return;
4552
}
4553
4554
/*
4555
* Not having an accessor means that we have configured a trap
4556
* that we don't know how to handle. This certainly qualifies
4557
* as a gross bug that should be fixed right away.
4558
*/
4559
BUG_ON(!r->access);
4560
4561
/* Skip instruction if instructed so */
4562
if (likely(r->access(vcpu, params, r)))
4563
kvm_incr_pc(vcpu);
4564
}
4565
4566
/*
4567
* emulate_cp -- tries to match a sys_reg access in a handling table, and
4568
* call the corresponding trap handler.
4569
*
4570
* @params: pointer to the descriptor of the access
4571
* @table: array of trap descriptors
4572
* @num: size of the trap descriptor array
4573
*
4574
* Return true if the access has been handled, false if not.
4575
*/
4576
static bool emulate_cp(struct kvm_vcpu *vcpu,
4577
struct sys_reg_params *params,
4578
const struct sys_reg_desc *table,
4579
size_t num)
4580
{
4581
const struct sys_reg_desc *r;
4582
4583
if (!table)
4584
return false; /* Not handled */
4585
4586
r = find_reg(params, table, num);
4587
4588
if (r) {
4589
perform_access(vcpu, params, r);
4590
return true;
4591
}
4592
4593
/* Not handled */
4594
return false;
4595
}
4596
4597
static void unhandled_cp_access(struct kvm_vcpu *vcpu,
4598
struct sys_reg_params *params)
4599
{
4600
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
4601
int cp = -1;
4602
4603
switch (esr_ec) {
4604
case ESR_ELx_EC_CP15_32:
4605
case ESR_ELx_EC_CP15_64:
4606
cp = 15;
4607
break;
4608
case ESR_ELx_EC_CP14_MR:
4609
case ESR_ELx_EC_CP14_64:
4610
cp = 14;
4611
break;
4612
default:
4613
WARN_ON(1);
4614
}
4615
4616
print_sys_reg_msg(params,
4617
"Unsupported guest CP%d access at: %08lx [%08lx]\n",
4618
cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4619
kvm_inject_undefined(vcpu);
4620
}
4621
4622
/**
4623
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
4624
* @vcpu: The VCPU pointer
4625
* @global: &struct sys_reg_desc
4626
* @nr_global: size of the @global array
4627
*/
4628
static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
4629
const struct sys_reg_desc *global,
4630
size_t nr_global)
4631
{
4632
struct sys_reg_params params;
4633
u64 esr = kvm_vcpu_get_esr(vcpu);
4634
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4635
int Rt2 = (esr >> 10) & 0x1f;
4636
4637
params.CRm = (esr >> 1) & 0xf;
4638
params.is_write = ((esr & 1) == 0);
4639
4640
params.Op0 = 0;
4641
params.Op1 = (esr >> 16) & 0xf;
4642
params.Op2 = 0;
4643
params.CRn = 0;
4644
4645
/*
4646
* Make a 64-bit value out of Rt and Rt2. As we use the same trap
4647
* backends between AArch32 and AArch64, we get away with it.
4648
*/
4649
if (params.is_write) {
4650
params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
4651
params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
4652
}
4653
4654
/*
4655
* If the table contains a handler, handle the
4656
* potential register operation in the case of a read and return
4657
* with success.
4658
*/
4659
if (emulate_cp(vcpu, &params, global, nr_global)) {
4660
/* Split up the value between registers for the read side */
4661
if (!params.is_write) {
4662
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
4663
vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
4664
}
4665
4666
return 1;
4667
}
4668
4669
unhandled_cp_access(vcpu, &params);
4670
return 1;
4671
}
4672
4673
static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
4674
4675
/*
4676
* The CP10 ID registers are architecturally mapped to AArch64 feature
4677
* registers. Abuse that fact so we can rely on the AArch64 handler for accesses
4678
* from AArch32.
4679
*/
4680
static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
4681
{
4682
u8 reg_id = (esr >> 10) & 0xf;
4683
bool valid;
4684
4685
params->is_write = ((esr & 1) == 0);
4686
params->Op0 = 3;
4687
params->Op1 = 0;
4688
params->CRn = 0;
4689
params->CRm = 3;
4690
4691
/* CP10 ID registers are read-only */
4692
valid = !params->is_write;
4693
4694
switch (reg_id) {
4695
/* MVFR0 */
4696
case 0b0111:
4697
params->Op2 = 0;
4698
break;
4699
/* MVFR1 */
4700
case 0b0110:
4701
params->Op2 = 1;
4702
break;
4703
/* MVFR2 */
4704
case 0b0101:
4705
params->Op2 = 2;
4706
break;
4707
default:
4708
valid = false;
4709
}
4710
4711
if (valid)
4712
return true;
4713
4714
kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
4715
str_write_read(params->is_write), reg_id);
4716
return false;
4717
}
4718
4719
/**
4720
* kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
4721
* VFP Register' from AArch32.
4722
* @vcpu: The vCPU pointer
4723
*
4724
* MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
4725
* Work out the correct AArch64 system register encoding and reroute to the
4726
* AArch64 system register emulation.
4727
*/
4728
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
4729
{
4730
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4731
u64 esr = kvm_vcpu_get_esr(vcpu);
4732
struct sys_reg_params params;
4733
4734
/* UNDEF on any unhandled register access */
4735
if (!kvm_esr_cp10_id_to_sys64(esr, &params)) {
4736
kvm_inject_undefined(vcpu);
4737
return 1;
4738
}
4739
4740
if (emulate_sys_reg(vcpu, &params))
4741
vcpu_set_reg(vcpu, Rt, params.regval);
4742
4743
return 1;
4744
}
4745
4746
/**
4747
* kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
4748
* CRn=0, which corresponds to the AArch32 feature
4749
* registers.
4750
* @vcpu: the vCPU pointer
4751
* @params: the system register access parameters.
4752
*
4753
* Our cp15 system register tables do not enumerate the AArch32 feature
4754
* registers. Conveniently, our AArch64 table does, and the AArch32 system
4755
* register encoding can be trivially remapped into the AArch64 for the feature
4756
* registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
4757
*
4758
* According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
4759
* System registers with (coproc=0b1111, CRn==c0)", read accesses from this
4760
* range are either UNKNOWN or RES0. Rerouting remains architectural as we
4761
* treat undefined registers in this range as RAZ.
4762
*/
4763
static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
4764
struct sys_reg_params *params)
4765
{
4766
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4767
4768
/* Treat impossible writes to RO registers as UNDEFINED */
4769
if (params->is_write) {
4770
unhandled_cp_access(vcpu, params);
4771
return 1;
4772
}
4773
4774
params->Op0 = 3;
4775
4776
/*
4777
* All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
4778
* Avoid conflicting with future expansion of AArch64 feature registers
4779
* and simply treat them as RAZ here.
4780
*/
4781
if (params->CRm > 3)
4782
params->regval = 0;
4783
else if (!emulate_sys_reg(vcpu, params))
4784
return 1;
4785
4786
vcpu_set_reg(vcpu, Rt, params->regval);
4787
return 1;
4788
}
4789
4790
/**
4791
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
4792
* @vcpu: The VCPU pointer
4793
* @params: &struct sys_reg_params
4794
* @global: &struct sys_reg_desc
4795
* @nr_global: size of the @global array
4796
*/
4797
static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
4798
struct sys_reg_params *params,
4799
const struct sys_reg_desc *global,
4800
size_t nr_global)
4801
{
4802
int Rt = kvm_vcpu_sys_get_rt(vcpu);
4803
4804
params->regval = vcpu_get_reg(vcpu, Rt);
4805
4806
if (emulate_cp(vcpu, params, global, nr_global)) {
4807
if (!params->is_write)
4808
vcpu_set_reg(vcpu, Rt, params->regval);
4809
return 1;
4810
}
4811
4812
unhandled_cp_access(vcpu, params);
4813
return 1;
4814
}
4815
4816
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
4817
{
4818
return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
4819
}
4820
4821
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
4822
{
4823
struct sys_reg_params params;
4824
4825
params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4826
4827
/*
4828
* Certain AArch32 ID registers are handled by rerouting to the AArch64
4829
* system register table. Registers in the ID range where CRm=0 are
4830
* excluded from this scheme as they do not trivially map into AArch64
4831
* system register encodings, except for AIDR/REVIDR.
4832
*/
4833
if (params.Op1 == 0 && params.CRn == 0 &&
4834
(params.CRm || params.Op2 == 6 /* REVIDR */))
4835
return kvm_emulate_cp15_id_reg(vcpu, &params);
4836
if (params.Op1 == 1 && params.CRn == 0 &&
4837
params.CRm == 0 && params.Op2 == 7 /* AIDR */)
4838
return kvm_emulate_cp15_id_reg(vcpu, &params);
4839
4840
return kvm_handle_cp_32(vcpu, &params, cp15_regs, ARRAY_SIZE(cp15_regs));
4841
}
4842
4843
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
4844
{
4845
return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
4846
}
4847
4848
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
4849
{
4850
struct sys_reg_params params;
4851
4852
params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4853
4854
return kvm_handle_cp_32(vcpu, &params, cp14_regs, ARRAY_SIZE(cp14_regs));
4855
}
4856
4857
/**
4858
* emulate_sys_reg - Emulate a guest access to an AArch64 system register
4859
* @vcpu: The VCPU pointer
4860
* @params: Decoded system register parameters
4861
*
4862
* Return: true if the system register access was successful, false otherwise.
4863
*/
4864
static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
4865
struct sys_reg_params *params)
4866
{
4867
const struct sys_reg_desc *r;
4868
4869
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4870
if (likely(r)) {
4871
perform_access(vcpu, params, r);
4872
return true;
4873
}
4874
4875
print_sys_reg_msg(params,
4876
"Unsupported guest sys_reg access at: %lx [%08lx]\n",
4877
*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4878
kvm_inject_undefined(vcpu);
4879
4880
return false;
4881
}
4882
4883
static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos)
4884
{
4885
unsigned long i, idreg_idx = 0;
4886
4887
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
4888
const struct sys_reg_desc *r = &sys_reg_descs[i];
4889
4890
if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
4891
continue;
4892
4893
if (idreg_idx == pos)
4894
return r;
4895
4896
idreg_idx++;
4897
}
4898
4899
return NULL;
4900
}
4901
4902
static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
4903
{
4904
struct kvm *kvm = s->private;
4905
u8 *iter;
4906
4907
mutex_lock(&kvm->arch.config_lock);
4908
4909
iter = &kvm->arch.idreg_debugfs_iter;
4910
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
4911
*iter == (u8)~0) {
4912
*iter = *pos;
4913
if (!idregs_debug_find(kvm, *iter))
4914
iter = NULL;
4915
} else {
4916
iter = ERR_PTR(-EBUSY);
4917
}
4918
4919
mutex_unlock(&kvm->arch.config_lock);
4920
4921
return iter;
4922
}
4923
4924
static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
4925
{
4926
struct kvm *kvm = s->private;
4927
4928
(*pos)++;
4929
4930
if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) {
4931
kvm->arch.idreg_debugfs_iter++;
4932
4933
return &kvm->arch.idreg_debugfs_iter;
4934
}
4935
4936
return NULL;
4937
}
4938
4939
static void idregs_debug_stop(struct seq_file *s, void *v)
4940
{
4941
struct kvm *kvm = s->private;
4942
4943
if (IS_ERR(v))
4944
return;
4945
4946
mutex_lock(&kvm->arch.config_lock);
4947
4948
kvm->arch.idreg_debugfs_iter = ~0;
4949
4950
mutex_unlock(&kvm->arch.config_lock);
4951
}
4952
4953
static int idregs_debug_show(struct seq_file *s, void *v)
4954
{
4955
const struct sys_reg_desc *desc;
4956
struct kvm *kvm = s->private;
4957
4958
desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter);
4959
4960
if (!desc->name)
4961
return 0;
4962
4963
seq_printf(s, "%20s:\t%016llx\n",
4964
desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
4965
4966
return 0;
4967
}
4968
4969
static const struct seq_operations idregs_debug_sops = {
4970
.start = idregs_debug_start,
4971
.next = idregs_debug_next,
4972
.stop = idregs_debug_stop,
4973
.show = idregs_debug_show,
4974
};
4975
4976
DEFINE_SEQ_ATTRIBUTE(idregs_debug);
4977
4978
void kvm_sys_regs_create_debugfs(struct kvm *kvm)
4979
{
4980
kvm->arch.idreg_debugfs_iter = ~0;
4981
4982
debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
4983
&idregs_debug_fops);
4984
}
4985
4986
static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
4987
{
4988
u32 id = reg_to_encoding(reg);
4989
struct kvm *kvm = vcpu->kvm;
4990
4991
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
4992
return;
4993
4994
kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
4995
}
4996
4997
static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
4998
const struct sys_reg_desc *reg)
4999
{
5000
if (kvm_vcpu_initialized(vcpu))
5001
return;
5002
5003
reg->reset(vcpu, reg);
5004
}
5005
5006
/**
5007
* kvm_reset_sys_regs - sets system registers to reset value
5008
* @vcpu: The VCPU pointer
5009
*
5010
* This function finds the right table above and sets the registers on the
5011
* virtual CPU struct to their architecturally defined reset values.
5012
*/
5013
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
5014
{
5015
struct kvm *kvm = vcpu->kvm;
5016
unsigned long i;
5017
5018
for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5019
const struct sys_reg_desc *r = &sys_reg_descs[i];
5020
5021
if (!r->reset)
5022
continue;
5023
5024
if (is_vm_ftr_id_reg(reg_to_encoding(r)))
5025
reset_vm_ftr_id_reg(vcpu, r);
5026
else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
5027
reset_vcpu_ftr_id_reg(vcpu, r);
5028
else
5029
r->reset(vcpu, r);
5030
5031
if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
5032
__vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
5033
}
5034
5035
set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
5036
5037
if (kvm_vcpu_has_pmu(vcpu))
5038
kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
5039
}
5040
5041
/**
5042
* kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
5043
* trap on a guest execution
5044
* @vcpu: The VCPU pointer
5045
*/
5046
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
5047
{
5048
const struct sys_reg_desc *desc = NULL;
5049
struct sys_reg_params params;
5050
unsigned long esr = kvm_vcpu_get_esr(vcpu);
5051
int Rt = kvm_vcpu_sys_get_rt(vcpu);
5052
int sr_idx;
5053
5054
trace_kvm_handle_sys_reg(esr);
5055
5056
if (triage_sysreg_trap(vcpu, &sr_idx))
5057
return 1;
5058
5059
params = esr_sys64_to_params(esr);
5060
params.regval = vcpu_get_reg(vcpu, Rt);
5061
5062
/* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
5063
if (params.Op0 == 2 || params.Op0 == 3)
5064
desc = &sys_reg_descs[sr_idx];
5065
else
5066
desc = &sys_insn_descs[sr_idx];
5067
5068
perform_access(vcpu, &params, desc);
5069
5070
/* Read from system register? */
5071
if (!params.is_write &&
5072
(params.Op0 == 2 || params.Op0 == 3))
5073
vcpu_set_reg(vcpu, Rt, params.regval);
5074
5075
return 1;
5076
}
5077
5078
/******************************************************************************
5079
* Userspace API
5080
*****************************************************************************/
5081
5082
static bool index_to_params(u64 id, struct sys_reg_params *params)
5083
{
5084
switch (id & KVM_REG_SIZE_MASK) {
5085
case KVM_REG_SIZE_U64:
5086
/* Any unused index bits means it's not valid. */
5087
if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
5088
| KVM_REG_ARM_COPROC_MASK
5089
| KVM_REG_ARM64_SYSREG_OP0_MASK
5090
| KVM_REG_ARM64_SYSREG_OP1_MASK
5091
| KVM_REG_ARM64_SYSREG_CRN_MASK
5092
| KVM_REG_ARM64_SYSREG_CRM_MASK
5093
| KVM_REG_ARM64_SYSREG_OP2_MASK))
5094
return false;
5095
params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
5096
>> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
5097
params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
5098
>> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
5099
params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
5100
>> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
5101
params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
5102
>> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
5103
params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
5104
>> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
5105
return true;
5106
default:
5107
return false;
5108
}
5109
}
5110
5111
const struct sys_reg_desc *get_reg_by_id(u64 id,
5112
const struct sys_reg_desc table[],
5113
unsigned int num)
5114
{
5115
struct sys_reg_params params;
5116
5117
if (!index_to_params(id, &params))
5118
return NULL;
5119
5120
return find_reg(&params, table, num);
5121
}
5122
5123
/* Decode an index value, and find the sys_reg_desc entry. */
5124
static const struct sys_reg_desc *
5125
id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
5126
const struct sys_reg_desc table[], unsigned int num)
5127
5128
{
5129
const struct sys_reg_desc *r;
5130
5131
/* We only do sys_reg for now. */
5132
if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
5133
return NULL;
5134
5135
r = get_reg_by_id(id, table, num);
5136
5137
/* Not saved in the sys_reg array and not otherwise accessible? */
5138
if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
5139
r = NULL;
5140
5141
return r;
5142
}
5143
5144
static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5145
{
5146
u32 val;
5147
u32 __user *uval = uaddr;
5148
5149
/* Fail if we have unknown bits set. */
5150
if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5151
| ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5152
return -ENOENT;
5153
5154
switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5155
case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5156
if (KVM_REG_SIZE(id) != 4)
5157
return -ENOENT;
5158
val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5159
>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5160
if (val >= CSSELR_MAX)
5161
return -ENOENT;
5162
5163
return put_user(get_ccsidr(vcpu, val), uval);
5164
default:
5165
return -ENOENT;
5166
}
5167
}
5168
5169
static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
5170
{
5171
u32 val, newval;
5172
u32 __user *uval = uaddr;
5173
5174
/* Fail if we have unknown bits set. */
5175
if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
5176
| ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
5177
return -ENOENT;
5178
5179
switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
5180
case KVM_REG_ARM_DEMUX_ID_CCSIDR:
5181
if (KVM_REG_SIZE(id) != 4)
5182
return -ENOENT;
5183
val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
5184
>> KVM_REG_ARM_DEMUX_VAL_SHIFT;
5185
if (val >= CSSELR_MAX)
5186
return -ENOENT;
5187
5188
if (get_user(newval, uval))
5189
return -EFAULT;
5190
5191
return set_ccsidr(vcpu, val, newval);
5192
default:
5193
return -ENOENT;
5194
}
5195
}
5196
5197
int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5198
const struct sys_reg_desc table[], unsigned int num)
5199
{
5200
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5201
const struct sys_reg_desc *r;
5202
u64 val;
5203
int ret;
5204
5205
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
5206
if (!r || sysreg_hidden(vcpu, r))
5207
return -ENOENT;
5208
5209
if (r->get_user) {
5210
ret = (r->get_user)(vcpu, r, &val);
5211
} else {
5212
val = __vcpu_sys_reg(vcpu, r->reg);
5213
ret = 0;
5214
}
5215
5216
if (!ret)
5217
ret = put_user(val, uaddr);
5218
5219
return ret;
5220
}
5221
5222
int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5223
{
5224
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5225
5226
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5227
return demux_c15_get(vcpu, reg->id, uaddr);
5228
5229
return kvm_sys_reg_get_user(vcpu, reg,
5230
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5231
}
5232
5233
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
5234
const struct sys_reg_desc table[], unsigned int num)
5235
{
5236
u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5237
const struct sys_reg_desc *r;
5238
u64 val;
5239
int ret;
5240
5241
if (get_user(val, uaddr))
5242
return -EFAULT;
5243
5244
r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
5245
if (!r || sysreg_hidden(vcpu, r))
5246
return -ENOENT;
5247
5248
if (sysreg_user_write_ignore(vcpu, r))
5249
return 0;
5250
5251
if (r->set_user) {
5252
ret = (r->set_user)(vcpu, r, val);
5253
} else {
5254
__vcpu_assign_sys_reg(vcpu, r->reg, val);
5255
ret = 0;
5256
}
5257
5258
return ret;
5259
}
5260
5261
int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5262
{
5263
void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5264
5265
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5266
return demux_c15_set(vcpu, reg->id, uaddr);
5267
5268
return kvm_sys_reg_set_user(vcpu, reg,
5269
sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5270
}
5271
5272
static unsigned int num_demux_regs(void)
5273
{
5274
return CSSELR_MAX;
5275
}
5276
5277
static int write_demux_regids(u64 __user *uindices)
5278
{
5279
u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
5280
unsigned int i;
5281
5282
val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
5283
for (i = 0; i < CSSELR_MAX; i++) {
5284
if (put_user(val | i, uindices))
5285
return -EFAULT;
5286
uindices++;
5287
}
5288
return 0;
5289
}
5290
5291
static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
5292
{
5293
return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
5294
KVM_REG_ARM64_SYSREG |
5295
(reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
5296
(reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
5297
(reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
5298
(reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
5299
(reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
5300
}
5301
5302
static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
5303
{
5304
if (!*uind)
5305
return true;
5306
5307
if (put_user(sys_reg_to_index(reg), *uind))
5308
return false;
5309
5310
(*uind)++;
5311
return true;
5312
}
5313
5314
static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
5315
const struct sys_reg_desc *rd,
5316
u64 __user **uind,
5317
unsigned int *total)
5318
{
5319
/*
5320
* Ignore registers we trap but don't save,
5321
* and for which no custom user accessor is provided.
5322
*/
5323
if (!(rd->reg || rd->get_user))
5324
return 0;
5325
5326
if (sysreg_hidden(vcpu, rd))
5327
return 0;
5328
5329
if (!copy_reg_to_user(rd, uind))
5330
return -EFAULT;
5331
5332
(*total)++;
5333
return 0;
5334
}
5335
5336
/* Assumed ordered tables, see kvm_sys_reg_table_init. */
5337
static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
5338
{
5339
const struct sys_reg_desc *i2, *end2;
5340
unsigned int total = 0;
5341
int err;
5342
5343
i2 = sys_reg_descs;
5344
end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
5345
5346
while (i2 != end2) {
5347
err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
5348
if (err)
5349
return err;
5350
}
5351
return total;
5352
}
5353
5354
unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
5355
{
5356
return num_demux_regs()
5357
+ walk_sys_regs(vcpu, (u64 __user *)NULL);
5358
}
5359
5360
int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
5361
{
5362
int err;
5363
5364
err = walk_sys_regs(vcpu, uindices);
5365
if (err < 0)
5366
return err;
5367
uindices += err;
5368
5369
return write_demux_regids(uindices);
5370
}
5371
5372
#define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
5373
KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
5374
sys_reg_Op1(r), \
5375
sys_reg_CRn(r), \
5376
sys_reg_CRm(r), \
5377
sys_reg_Op2(r))
5378
5379
int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
5380
{
5381
const void *zero_page = page_to_virt(ZERO_PAGE(0));
5382
u64 __user *masks = (u64 __user *)range->addr;
5383
5384
/* Only feature id range is supported, reserved[13] must be zero. */
5385
if (range->range ||
5386
memcmp(range->reserved, zero_page, sizeof(range->reserved)))
5387
return -EINVAL;
5388
5389
/* Wipe the whole thing first */
5390
if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
5391
return -EFAULT;
5392
5393
for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5394
const struct sys_reg_desc *reg = &sys_reg_descs[i];
5395
u32 encoding = reg_to_encoding(reg);
5396
u64 val;
5397
5398
if (!is_feature_id_reg(encoding) || !reg->set_user)
5399
continue;
5400
5401
if (!reg->val ||
5402
(is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
5403
continue;
5404
}
5405
val = reg->val;
5406
5407
if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
5408
return -EFAULT;
5409
}
5410
5411
return 0;
5412
}
5413
5414
static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
5415
{
5416
struct kvm *kvm = vcpu->kvm;
5417
5418
if (has_vhe() || has_hvhe())
5419
vcpu->arch.hcr_el2 |= HCR_E2H;
5420
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
5421
/* route synchronous external abort exceptions to EL2 */
5422
vcpu->arch.hcr_el2 |= HCR_TEA;
5423
/* trap error record accesses */
5424
vcpu->arch.hcr_el2 |= HCR_TERR;
5425
}
5426
5427
if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
5428
vcpu->arch.hcr_el2 |= HCR_FWB;
5429
5430
if (cpus_have_final_cap(ARM64_HAS_EVT) &&
5431
!cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
5432
kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
5433
vcpu->arch.hcr_el2 |= HCR_TID4;
5434
else
5435
vcpu->arch.hcr_el2 |= HCR_TID2;
5436
5437
if (vcpu_el1_is_32bit(vcpu))
5438
vcpu->arch.hcr_el2 &= ~HCR_RW;
5439
5440
if (kvm_has_mte(vcpu->kvm))
5441
vcpu->arch.hcr_el2 |= HCR_ATA;
5442
5443
/*
5444
* In the absence of FGT, we cannot independently trap TLBI
5445
* Range instructions. This isn't great, but trapping all
5446
* TLBIs would be far worse. Live with it...
5447
*/
5448
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
5449
vcpu->arch.hcr_el2 |= HCR_TTLBOS;
5450
}
5451
5452
void kvm_calculate_traps(struct kvm_vcpu *vcpu)
5453
{
5454
struct kvm *kvm = vcpu->kvm;
5455
5456
mutex_lock(&kvm->arch.config_lock);
5457
vcpu_set_hcr(vcpu);
5458
vcpu_set_ich_hcr(vcpu);
5459
vcpu_set_hcrx(vcpu);
5460
5461
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
5462
goto out;
5463
5464
compute_fgu(kvm, HFGRTR_GROUP);
5465
compute_fgu(kvm, HFGITR_GROUP);
5466
compute_fgu(kvm, HDFGRTR_GROUP);
5467
compute_fgu(kvm, HAFGRTR_GROUP);
5468
compute_fgu(kvm, HFGRTR2_GROUP);
5469
compute_fgu(kvm, HFGITR2_GROUP);
5470
compute_fgu(kvm, HDFGRTR2_GROUP);
5471
5472
set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
5473
out:
5474
mutex_unlock(&kvm->arch.config_lock);
5475
}
5476
5477
/*
5478
* Perform last adjustments to the ID registers that are implied by the
5479
* configuration outside of the ID regs themselves, as well as any
5480
* initialisation that directly depend on these ID registers (such as
5481
* RES0/RES1 behaviours). This is not the place to configure traps though.
5482
*
5483
* Because this can be called once per CPU, changes must be idempotent.
5484
*/
5485
int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
5486
{
5487
struct kvm *kvm = vcpu->kvm;
5488
5489
guard(mutex)(&kvm->arch.config_lock);
5490
5491
if (!(static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
5492
irqchip_in_kernel(kvm) &&
5493
kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) {
5494
kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK;
5495
kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK;
5496
}
5497
5498
if (vcpu_has_nv(vcpu)) {
5499
int ret = kvm_init_nv_sysregs(vcpu);
5500
if (ret)
5501
return ret;
5502
}
5503
5504
return 0;
5505
}
5506
5507
int __init kvm_sys_reg_table_init(void)
5508
{
5509
const struct sys_reg_desc *gicv3_regs;
5510
bool valid = true;
5511
unsigned int i, sz;
5512
int ret = 0;
5513
5514
/* Make sure tables are unique and in order. */
5515
valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), true);
5516
valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), false);
5517
valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), false);
5518
valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), false);
5519
valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), false);
5520
valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
5521
5522
gicv3_regs = vgic_v3_get_sysreg_table(&sz);
5523
valid &= check_sysreg_table(gicv3_regs, sz, false);
5524
5525
if (!valid)
5526
return -EINVAL;
5527
5528
init_imp_id_regs();
5529
5530
ret = populate_nv_trap_config();
5531
5532
check_feature_map();
5533
5534
for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
5535
ret = populate_sysreg_config(sys_reg_descs + i, i);
5536
5537
for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
5538
ret = populate_sysreg_config(sys_insn_descs + i, i);
5539
5540
return ret;
5541
}
5542
5543