Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm64/vmm/vmm_hyp.c
39478 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2021 Andrew Turner
5
*
6
* This work was supported by Innovate UK project 105694, "Digital Security
7
* by Design (DSbD) Technology Platform Prototype".
8
*
9
* Redistribution and use in source and binary forms, with or without
10
* modification, are permitted provided that the following conditions
11
* are met:
12
* 1. Redistributions of source code must retain the above copyright
13
* notice, this list of conditions and the following disclaimer.
14
* 2. Redistributions in binary form must reproduce the above copyright
15
* notice, this list of conditions and the following disclaimer in the
16
* documentation and/or other materials provided with the distribution.
17
*
18
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
* SUCH DAMAGE.
29
*/
30
31
#include <sys/cdefs.h>
32
#include <sys/types.h>
33
#include <sys/proc.h>
34
35
#include <machine/armreg.h>
36
37
#include "arm64.h"
38
#include "hyp.h"
39
40
struct hypctx;
41
42
uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
43
44
static void
45
vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest,
46
bool ecv_poff)
47
{
48
uint64_t dfr0;
49
50
if (guest) {
51
/* Store the timer registers */
52
hypctx->vtimer_cpu.cntkctl_el1 =
53
READ_SPECIALREG(EL1_REG(CNTKCTL));
54
hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 =
55
READ_SPECIALREG(EL0_REG(CNTV_CVAL));
56
hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 =
57
READ_SPECIALREG(EL0_REG(CNTV_CTL));
58
}
59
if (guest_or_nonvhe(guest) && ecv_poff) {
60
/*
61
* If we have ECV then the guest could modify these registers.
62
* If VHE is enabled then the kernel will see a different view
63
* of the registers, so doesn't need to handle them.
64
*/
65
hypctx->vtimer_cpu.phys_timer.cntx_cval_el0 =
66
READ_SPECIALREG(EL0_REG(CNTP_CVAL));
67
hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0 =
68
READ_SPECIALREG(EL0_REG(CNTP_CTL));
69
}
70
71
if (guest) {
72
/* Store the GICv3 registers */
73
hypctx->vgic_v3_regs.ich_eisr_el2 =
74
READ_SPECIALREG(ich_eisr_el2);
75
hypctx->vgic_v3_regs.ich_elrsr_el2 =
76
READ_SPECIALREG(ich_elrsr_el2);
77
hypctx->vgic_v3_regs.ich_hcr_el2 =
78
READ_SPECIALREG(ich_hcr_el2);
79
hypctx->vgic_v3_regs.ich_misr_el2 =
80
READ_SPECIALREG(ich_misr_el2);
81
hypctx->vgic_v3_regs.ich_vmcr_el2 =
82
READ_SPECIALREG(ich_vmcr_el2);
83
switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
84
#define STORE_LR(x) \
85
case x: \
86
hypctx->vgic_v3_regs.ich_lr_el2[x] = \
87
READ_SPECIALREG(ich_lr ## x ##_el2)
88
STORE_LR(15);
89
STORE_LR(14);
90
STORE_LR(13);
91
STORE_LR(12);
92
STORE_LR(11);
93
STORE_LR(10);
94
STORE_LR(9);
95
STORE_LR(8);
96
STORE_LR(7);
97
STORE_LR(6);
98
STORE_LR(5);
99
STORE_LR(4);
100
STORE_LR(3);
101
STORE_LR(2);
102
STORE_LR(1);
103
default:
104
STORE_LR(0);
105
#undef STORE_LR
106
}
107
108
switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
109
#define STORE_APR(x) \
110
case x: \
111
hypctx->vgic_v3_regs.ich_ap0r_el2[x] = \
112
READ_SPECIALREG(ich_ap0r ## x ##_el2); \
113
hypctx->vgic_v3_regs.ich_ap1r_el2[x] = \
114
READ_SPECIALREG(ich_ap1r ## x ##_el2)
115
STORE_APR(3);
116
STORE_APR(2);
117
STORE_APR(1);
118
default:
119
STORE_APR(0);
120
#undef STORE_APR
121
}
122
}
123
124
hypctx->dbgclaimset_el1 = READ_SPECIALREG(dbgclaimset_el1);
125
126
dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
127
switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
128
#define STORE_DBG_BRP(x) \
129
case x: \
130
hypctx->dbgbcr_el1[x] = \
131
READ_SPECIALREG(dbgbcr ## x ## _el1); \
132
hypctx->dbgbvr_el1[x] = \
133
READ_SPECIALREG(dbgbvr ## x ## _el1)
134
STORE_DBG_BRP(15);
135
STORE_DBG_BRP(14);
136
STORE_DBG_BRP(13);
137
STORE_DBG_BRP(12);
138
STORE_DBG_BRP(11);
139
STORE_DBG_BRP(10);
140
STORE_DBG_BRP(9);
141
STORE_DBG_BRP(8);
142
STORE_DBG_BRP(7);
143
STORE_DBG_BRP(6);
144
STORE_DBG_BRP(5);
145
STORE_DBG_BRP(4);
146
STORE_DBG_BRP(3);
147
STORE_DBG_BRP(2);
148
STORE_DBG_BRP(1);
149
default:
150
STORE_DBG_BRP(0);
151
#undef STORE_DBG_BRP
152
}
153
154
switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
155
#define STORE_DBG_WRP(x) \
156
case x: \
157
hypctx->dbgwcr_el1[x] = \
158
READ_SPECIALREG(dbgwcr ## x ## _el1); \
159
hypctx->dbgwvr_el1[x] = \
160
READ_SPECIALREG(dbgwvr ## x ## _el1)
161
STORE_DBG_WRP(15);
162
STORE_DBG_WRP(14);
163
STORE_DBG_WRP(13);
164
STORE_DBG_WRP(12);
165
STORE_DBG_WRP(11);
166
STORE_DBG_WRP(10);
167
STORE_DBG_WRP(9);
168
STORE_DBG_WRP(8);
169
STORE_DBG_WRP(7);
170
STORE_DBG_WRP(6);
171
STORE_DBG_WRP(5);
172
STORE_DBG_WRP(4);
173
STORE_DBG_WRP(3);
174
STORE_DBG_WRP(2);
175
STORE_DBG_WRP(1);
176
default:
177
STORE_DBG_WRP(0);
178
#undef STORE_DBG_WRP
179
}
180
181
/* Store the PMU registers */
182
hypctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0);
183
hypctx->pmccntr_el0 = READ_SPECIALREG(pmccntr_el0);
184
hypctx->pmccfiltr_el0 = READ_SPECIALREG(pmccfiltr_el0);
185
hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
186
hypctx->pmselr_el0 = READ_SPECIALREG(pmselr_el0);
187
hypctx->pmxevcntr_el0 = READ_SPECIALREG(pmxevcntr_el0);
188
hypctx->pmcntenset_el0 = READ_SPECIALREG(pmcntenset_el0);
189
hypctx->pmintenset_el1 = READ_SPECIALREG(pmintenset_el1);
190
hypctx->pmovsset_el0 = READ_SPECIALREG(pmovsset_el0);
191
192
switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
193
#define STORE_PMU(x) \
194
case (x + 1): \
195
hypctx->pmevcntr_el0[x] = \
196
READ_SPECIALREG(pmevcntr ## x ## _el0); \
197
hypctx->pmevtyper_el0[x] = \
198
READ_SPECIALREG(pmevtyper ## x ## _el0)
199
STORE_PMU(30);
200
STORE_PMU(29);
201
STORE_PMU(28);
202
STORE_PMU(27);
203
STORE_PMU(26);
204
STORE_PMU(25);
205
STORE_PMU(24);
206
STORE_PMU(23);
207
STORE_PMU(22);
208
STORE_PMU(21);
209
STORE_PMU(20);
210
STORE_PMU(19);
211
STORE_PMU(18);
212
STORE_PMU(17);
213
STORE_PMU(16);
214
STORE_PMU(15);
215
STORE_PMU(14);
216
STORE_PMU(13);
217
STORE_PMU(12);
218
STORE_PMU(11);
219
STORE_PMU(10);
220
STORE_PMU(9);
221
STORE_PMU(8);
222
STORE_PMU(7);
223
STORE_PMU(6);
224
STORE_PMU(5);
225
STORE_PMU(4);
226
STORE_PMU(3);
227
STORE_PMU(2);
228
STORE_PMU(1);
229
STORE_PMU(0);
230
default: /* N == 0 when only PMCCNTR_EL0 is available */
231
break;
232
#undef STORE_PMU
233
}
234
235
/* Store the special to from the trapframe */
236
hypctx->tf.tf_sp = READ_SPECIALREG(sp_el1);
237
hypctx->tf.tf_elr = READ_SPECIALREG(elr_el2);
238
hypctx->tf.tf_spsr = READ_SPECIALREG(spsr_el2);
239
if (guest) {
240
hypctx->tf.tf_esr = READ_SPECIALREG(esr_el2);
241
hypctx->par_el1 = READ_SPECIALREG(par_el1);
242
}
243
244
/* Store the guest special registers */
245
hypctx->sp_el0 = READ_SPECIALREG(sp_el0);
246
hypctx->tpidr_el0 = READ_SPECIALREG(tpidr_el0);
247
hypctx->tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
248
hypctx->tpidr_el1 = READ_SPECIALREG(tpidr_el1);
249
250
hypctx->actlr_el1 = READ_SPECIALREG(actlr_el1);
251
hypctx->csselr_el1 = READ_SPECIALREG(csselr_el1);
252
hypctx->mdccint_el1 = READ_SPECIALREG(mdccint_el1);
253
hypctx->mdscr_el1 = READ_SPECIALREG(mdscr_el1);
254
255
if (guest_or_nonvhe(guest)) {
256
hypctx->elr_el1 = READ_SPECIALREG(EL1_REG(ELR));
257
hypctx->vbar_el1 = READ_SPECIALREG(EL1_REG(VBAR));
258
259
hypctx->afsr0_el1 = READ_SPECIALREG(EL1_REG(AFSR0));
260
hypctx->afsr1_el1 = READ_SPECIALREG(EL1_REG(AFSR1));
261
hypctx->amair_el1 = READ_SPECIALREG(EL1_REG(AMAIR));
262
hypctx->contextidr_el1 = READ_SPECIALREG(EL1_REG(CONTEXTIDR));
263
hypctx->cpacr_el1 = READ_SPECIALREG(EL1_REG(CPACR));
264
hypctx->esr_el1 = READ_SPECIALREG(EL1_REG(ESR));
265
hypctx->far_el1 = READ_SPECIALREG(EL1_REG(FAR));
266
hypctx->mair_el1 = READ_SPECIALREG(EL1_REG(MAIR));
267
hypctx->sctlr_el1 = READ_SPECIALREG(EL1_REG(SCTLR));
268
hypctx->spsr_el1 = READ_SPECIALREG(EL1_REG(SPSR));
269
hypctx->tcr_el1 = READ_SPECIALREG(EL1_REG(TCR));
270
/* TODO: Support when this is not res0 */
271
hypctx->tcr2_el1 = 0;
272
hypctx->ttbr0_el1 = READ_SPECIALREG(EL1_REG(TTBR0));
273
hypctx->ttbr1_el1 = READ_SPECIALREG(EL1_REG(TTBR1));
274
}
275
276
hypctx->cptr_el2 = READ_SPECIALREG(cptr_el2);
277
hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2);
278
hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2);
279
hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2);
280
}
281
282
static void
283
vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest,
284
bool ecv_poff)
285
{
286
uint64_t dfr0;
287
288
/* Restore the special registers */
289
WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
290
291
if (guest) {
292
if ((hyp->feats & HYP_FEAT_HCX) != 0)
293
WRITE_SPECIALREG(HCRX_EL2_REG, hypctx->hcrx_el2);
294
}
295
isb();
296
297
WRITE_SPECIALREG(sp_el0, hypctx->sp_el0);
298
WRITE_SPECIALREG(tpidr_el0, hypctx->tpidr_el0);
299
WRITE_SPECIALREG(tpidrro_el0, hypctx->tpidrro_el0);
300
WRITE_SPECIALREG(tpidr_el1, hypctx->tpidr_el1);
301
302
WRITE_SPECIALREG(actlr_el1, hypctx->actlr_el1);
303
WRITE_SPECIALREG(csselr_el1, hypctx->csselr_el1);
304
WRITE_SPECIALREG(mdccint_el1, hypctx->mdccint_el1);
305
WRITE_SPECIALREG(mdscr_el1, hypctx->mdscr_el1);
306
307
if (guest_or_nonvhe(guest)) {
308
WRITE_SPECIALREG(EL1_REG(ELR), hypctx->elr_el1);
309
WRITE_SPECIALREG(EL1_REG(VBAR), hypctx->vbar_el1);
310
311
WRITE_SPECIALREG(EL1_REG(AFSR0), hypctx->afsr0_el1);
312
WRITE_SPECIALREG(EL1_REG(AFSR1), hypctx->afsr1_el1);
313
WRITE_SPECIALREG(EL1_REG(AMAIR), hypctx->amair_el1);
314
WRITE_SPECIALREG(EL1_REG(CONTEXTIDR), hypctx->contextidr_el1);
315
WRITE_SPECIALREG(EL1_REG(CPACR), hypctx->cpacr_el1);
316
WRITE_SPECIALREG(EL1_REG(ESR), hypctx->esr_el1);
317
WRITE_SPECIALREG(EL1_REG(FAR), hypctx->far_el1);
318
WRITE_SPECIALREG(EL1_REG(MAIR), hypctx->mair_el1); //
319
320
WRITE_SPECIALREG(EL1_REG(SCTLR), hypctx->sctlr_el1);
321
WRITE_SPECIALREG(EL1_REG(SPSR), hypctx->spsr_el1);
322
WRITE_SPECIALREG(EL1_REG(TCR), hypctx->tcr_el1);
323
/* TODO: tcr2_el1 */
324
WRITE_SPECIALREG(EL1_REG(TTBR0), hypctx->ttbr0_el1);
325
WRITE_SPECIALREG(EL1_REG(TTBR1), hypctx->ttbr1_el1);
326
}
327
328
if (guest) {
329
WRITE_SPECIALREG(par_el1, hypctx->par_el1);
330
}
331
332
WRITE_SPECIALREG(cptr_el2, hypctx->cptr_el2);
333
WRITE_SPECIALREG(vpidr_el2, hypctx->vpidr_el2);
334
WRITE_SPECIALREG(vmpidr_el2, hypctx->vmpidr_el2);
335
336
/* Load the special regs from the trapframe */
337
WRITE_SPECIALREG(sp_el1, hypctx->tf.tf_sp);
338
WRITE_SPECIALREG(elr_el2, hypctx->tf.tf_elr);
339
WRITE_SPECIALREG(spsr_el2, hypctx->tf.tf_spsr);
340
341
/* Restore the PMU registers */
342
WRITE_SPECIALREG(pmcr_el0, hypctx->pmcr_el0);
343
WRITE_SPECIALREG(pmccntr_el0, hypctx->pmccntr_el0);
344
WRITE_SPECIALREG(pmccfiltr_el0, hypctx->pmccfiltr_el0);
345
WRITE_SPECIALREG(pmuserenr_el0, hypctx->pmuserenr_el0);
346
WRITE_SPECIALREG(pmselr_el0, hypctx->pmselr_el0);
347
WRITE_SPECIALREG(pmxevcntr_el0, hypctx->pmxevcntr_el0);
348
/* Clear all events/interrupts then enable them */
349
WRITE_SPECIALREG(pmcntenclr_el0, ~0ul);
350
WRITE_SPECIALREG(pmcntenset_el0, hypctx->pmcntenset_el0);
351
WRITE_SPECIALREG(pmintenclr_el1, ~0ul);
352
WRITE_SPECIALREG(pmintenset_el1, hypctx->pmintenset_el1);
353
WRITE_SPECIALREG(pmovsclr_el0, ~0ul);
354
WRITE_SPECIALREG(pmovsset_el0, hypctx->pmovsset_el0);
355
356
switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
357
#define LOAD_PMU(x) \
358
case (x + 1): \
359
WRITE_SPECIALREG(pmevcntr ## x ## _el0, \
360
hypctx->pmevcntr_el0[x]); \
361
WRITE_SPECIALREG(pmevtyper ## x ## _el0, \
362
hypctx->pmevtyper_el0[x])
363
LOAD_PMU(30);
364
LOAD_PMU(29);
365
LOAD_PMU(28);
366
LOAD_PMU(27);
367
LOAD_PMU(26);
368
LOAD_PMU(25);
369
LOAD_PMU(24);
370
LOAD_PMU(23);
371
LOAD_PMU(22);
372
LOAD_PMU(21);
373
LOAD_PMU(20);
374
LOAD_PMU(19);
375
LOAD_PMU(18);
376
LOAD_PMU(17);
377
LOAD_PMU(16);
378
LOAD_PMU(15);
379
LOAD_PMU(14);
380
LOAD_PMU(13);
381
LOAD_PMU(12);
382
LOAD_PMU(11);
383
LOAD_PMU(10);
384
LOAD_PMU(9);
385
LOAD_PMU(8);
386
LOAD_PMU(7);
387
LOAD_PMU(6);
388
LOAD_PMU(5);
389
LOAD_PMU(4);
390
LOAD_PMU(3);
391
LOAD_PMU(2);
392
LOAD_PMU(1);
393
LOAD_PMU(0);
394
default: /* N == 0 when only PMCCNTR_EL0 is available */
395
break;
396
#undef LOAD_PMU
397
}
398
399
WRITE_SPECIALREG(dbgclaimclr_el1, ~0ul);
400
WRITE_SPECIALREG(dbgclaimclr_el1, hypctx->dbgclaimset_el1);
401
402
dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
403
switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
404
#define LOAD_DBG_BRP(x) \
405
case x: \
406
WRITE_SPECIALREG(dbgbcr ## x ## _el1, \
407
hypctx->dbgbcr_el1[x]); \
408
WRITE_SPECIALREG(dbgbvr ## x ## _el1, \
409
hypctx->dbgbvr_el1[x])
410
LOAD_DBG_BRP(15);
411
LOAD_DBG_BRP(14);
412
LOAD_DBG_BRP(13);
413
LOAD_DBG_BRP(12);
414
LOAD_DBG_BRP(11);
415
LOAD_DBG_BRP(10);
416
LOAD_DBG_BRP(9);
417
LOAD_DBG_BRP(8);
418
LOAD_DBG_BRP(7);
419
LOAD_DBG_BRP(6);
420
LOAD_DBG_BRP(5);
421
LOAD_DBG_BRP(4);
422
LOAD_DBG_BRP(3);
423
LOAD_DBG_BRP(2);
424
LOAD_DBG_BRP(1);
425
default:
426
LOAD_DBG_BRP(0);
427
#undef LOAD_DBG_BRP
428
}
429
430
switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
431
#define LOAD_DBG_WRP(x) \
432
case x: \
433
WRITE_SPECIALREG(dbgwcr ## x ## _el1, \
434
hypctx->dbgwcr_el1[x]); \
435
WRITE_SPECIALREG(dbgwvr ## x ## _el1, \
436
hypctx->dbgwvr_el1[x])
437
LOAD_DBG_WRP(15);
438
LOAD_DBG_WRP(14);
439
LOAD_DBG_WRP(13);
440
LOAD_DBG_WRP(12);
441
LOAD_DBG_WRP(11);
442
LOAD_DBG_WRP(10);
443
LOAD_DBG_WRP(9);
444
LOAD_DBG_WRP(8);
445
LOAD_DBG_WRP(7);
446
LOAD_DBG_WRP(6);
447
LOAD_DBG_WRP(5);
448
LOAD_DBG_WRP(4);
449
LOAD_DBG_WRP(3);
450
LOAD_DBG_WRP(2);
451
LOAD_DBG_WRP(1);
452
default:
453
LOAD_DBG_WRP(0);
454
#undef LOAD_DBG_WRP
455
}
456
457
if (guest) {
458
/* Load the timer registers */
459
WRITE_SPECIALREG(EL1_REG(CNTKCTL),
460
hypctx->vtimer_cpu.cntkctl_el1);
461
WRITE_SPECIALREG(EL0_REG(CNTV_CVAL),
462
hypctx->vtimer_cpu.virt_timer.cntx_cval_el0);
463
WRITE_SPECIALREG(EL0_REG(CNTV_CTL),
464
hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0);
465
WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2);
466
WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2);
467
468
if (ecv_poff) {
469
/*
470
* Load the same offset as the virtual timer
471
* to keep in sync.
472
*/
473
WRITE_SPECIALREG(CNTPOFF_EL2_REG,
474
hyp->vtimer.cntvoff_el2);
475
isb();
476
}
477
}
478
if (guest_or_nonvhe(guest) && ecv_poff) {
479
/*
480
* If we have ECV then the guest could modify these registers.
481
* If VHE is enabled then the kernel will see a different view
482
* of the registers, so doesn't need to handle them.
483
*/
484
WRITE_SPECIALREG(EL0_REG(CNTP_CVAL),
485
hypctx->vtimer_cpu.phys_timer.cntx_cval_el0);
486
WRITE_SPECIALREG(EL0_REG(CNTP_CTL),
487
hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0);
488
}
489
490
if (guest) {
491
/* Load the GICv3 registers */
492
WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2);
493
WRITE_SPECIALREG(ich_vmcr_el2,
494
hypctx->vgic_v3_regs.ich_vmcr_el2);
495
switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
496
#define LOAD_LR(x) \
497
case x: \
498
WRITE_SPECIALREG(ich_lr ## x ##_el2, \
499
hypctx->vgic_v3_regs.ich_lr_el2[x])
500
LOAD_LR(15);
501
LOAD_LR(14);
502
LOAD_LR(13);
503
LOAD_LR(12);
504
LOAD_LR(11);
505
LOAD_LR(10);
506
LOAD_LR(9);
507
LOAD_LR(8);
508
LOAD_LR(7);
509
LOAD_LR(6);
510
LOAD_LR(5);
511
LOAD_LR(4);
512
LOAD_LR(3);
513
LOAD_LR(2);
514
LOAD_LR(1);
515
default:
516
LOAD_LR(0);
517
#undef LOAD_LR
518
}
519
520
switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
521
#define LOAD_APR(x) \
522
case x: \
523
WRITE_SPECIALREG(ich_ap0r ## x ##_el2, \
524
hypctx->vgic_v3_regs.ich_ap0r_el2[x]); \
525
WRITE_SPECIALREG(ich_ap1r ## x ##_el2, \
526
hypctx->vgic_v3_regs.ich_ap1r_el2[x])
527
LOAD_APR(3);
528
LOAD_APR(2);
529
LOAD_APR(1);
530
default:
531
LOAD_APR(0);
532
#undef LOAD_APR
533
}
534
}
535
}
536
537
static uint64_t
538
vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
539
{
540
struct hypctx host_hypctx;
541
uint64_t cntvoff_el2;
542
uint64_t ich_hcr_el2, ich_vmcr_el2, cnthctl_el2, cntkctl_el1;
543
#ifndef VMM_VHE
544
uint64_t hcrx_el2;
545
#endif
546
uint64_t ret;
547
uint64_t s1e1r, hpfar_el2;
548
bool ecv_poff, hpfar_valid;
549
550
ecv_poff = (hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0;
551
vmm_hyp_reg_store(&host_hypctx, NULL, false, ecv_poff);
552
#ifndef VMM_VHE
553
if ((hyp->feats & HYP_FEAT_HCX) != 0)
554
hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2));
555
#endif
556
557
/* Save the host special registers */
558
cnthctl_el2 = READ_SPECIALREG(cnthctl_el2);
559
cntkctl_el1 = READ_SPECIALREG(cntkctl_el1);
560
cntvoff_el2 = READ_SPECIALREG(cntvoff_el2);
561
562
ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2);
563
ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2);
564
565
vmm_hyp_reg_restore(hypctx, hyp, true, ecv_poff);
566
567
/* Load the common hypervisor registers */
568
WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2);
569
570
host_hypctx.mdcr_el2 = READ_SPECIALREG(mdcr_el2);
571
WRITE_SPECIALREG(mdcr_el2, hypctx->mdcr_el2);
572
573
/* Call into the guest */
574
ret = VMM_HYP_FUNC(do_call_guest)(hypctx);
575
576
WRITE_SPECIALREG(mdcr_el2, host_hypctx.mdcr_el2);
577
isb();
578
579
/* Store the exit info */
580
hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2);
581
vmm_hyp_reg_store(hypctx, hyp, true, ecv_poff);
582
583
hpfar_valid = true;
584
if (ret == EXCP_TYPE_EL1_SYNC) {
585
switch (ESR_ELx_EXCEPTION(hypctx->tf.tf_esr)) {
586
case EXCP_INSN_ABORT_L:
587
case EXCP_DATA_ABORT_L:
588
/*
589
* The hpfar_el2 register is valid for:
590
* - Translation and Access faults.
591
* - Translation, Access, and permission faults on
592
* the translation table walk on the stage 1 tables.
593
* - A stage 2 Address size fault.
594
*
595
* As we only need it in the first 2 cases we can just
596
* exclude it on permission faults that are not from
597
* the stage 1 table walk.
598
*
599
* TODO: Add a case for Arm erratum 834220.
600
*/
601
if ((hypctx->tf.tf_esr & ISS_DATA_S1PTW) != 0)
602
break;
603
switch (hypctx->tf.tf_esr & ISS_DATA_DFSC_MASK) {
604
case ISS_DATA_DFSC_PF_L1:
605
case ISS_DATA_DFSC_PF_L2:
606
case ISS_DATA_DFSC_PF_L3:
607
hpfar_valid = false;
608
break;
609
}
610
break;
611
}
612
}
613
if (hpfar_valid) {
614
hypctx->exit_info.hpfar_el2 = READ_SPECIALREG(hpfar_el2);
615
} else {
616
/*
617
* TODO: There is a risk the at instruction could cause an
618
* exception here. We should handle it & return a failure.
619
*/
620
s1e1r =
621
arm64_address_translate_s1e1r(hypctx->exit_info.far_el2);
622
if (PAR_SUCCESS(s1e1r)) {
623
hpfar_el2 = (s1e1r & PAR_PA_MASK) >> PAR_PA_SHIFT;
624
hpfar_el2 <<= HPFAR_EL2_FIPA_SHIFT;
625
hypctx->exit_info.hpfar_el2 = hpfar_el2;
626
} else {
627
ret = EXCP_TYPE_REENTER;
628
}
629
}
630
631
vmm_hyp_reg_restore(&host_hypctx, NULL, false, ecv_poff);
632
633
#ifndef VMM_VHE
634
if ((hyp->feats & HYP_FEAT_HCX) != 0)
635
WRITE_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2), hcrx_el2);
636
#endif
637
638
/* Restore the host special registers */
639
WRITE_SPECIALREG(ich_hcr_el2, ich_hcr_el2);
640
WRITE_SPECIALREG(ich_vmcr_el2, ich_vmcr_el2);
641
642
WRITE_SPECIALREG(cnthctl_el2, cnthctl_el2);
643
WRITE_SPECIALREG(cntkctl_el1, cntkctl_el1);
644
WRITE_SPECIALREG(cntvoff_el2, cntvoff_el2);
645
646
return (ret);
647
}
648
649
VMM_STATIC uint64_t
650
VMM_HYP_FUNC(enter_guest)(struct hyp *hyp, struct hypctx *hypctx)
651
{
652
uint64_t ret;
653
654
do {
655
ret = vmm_hyp_call_guest(hyp, hypctx);
656
} while (ret == EXCP_TYPE_REENTER);
657
658
return (ret);
659
}
660
661
VMM_STATIC uint64_t
662
VMM_HYP_FUNC(read_reg)(uint64_t reg)
663
{
664
switch (reg) {
665
case HYP_REG_ICH_VTR:
666
return (READ_SPECIALREG(ich_vtr_el2));
667
}
668
669
return (0);
670
}
671
672
VMM_STATIC void
673
VMM_HYP_FUNC(clean_s2_tlbi)(void)
674
{
675
dsb(ishst);
676
__asm __volatile("tlbi alle1is");
677
dsb(ish);
678
}
679
680
VMM_STATIC void
681
VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva,
682
bool final_only)
683
{
684
uint64_t end, r, start;
685
uint64_t host_vttbr;
686
#ifdef VMM_VHE
687
uint64_t host_tcr;
688
#endif
689
690
#ifdef VMM_VHE
691
dsb(ishst);
692
#endif
693
694
#define TLBI_VA_SHIFT 12
695
#define TLBI_VA_MASK ((1ul << 44) - 1)
696
#define TLBI_VA(addr) (((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK)
697
#define TLBI_VA_L3_INCR (L3_SIZE >> TLBI_VA_SHIFT)
698
699
/* Switch to the guest vttbr */
700
/* TODO: Handle Cortex-A57/A72 erratum 131936 */
701
host_vttbr = READ_SPECIALREG(vttbr_el2);
702
WRITE_SPECIALREG(vttbr_el2, vttbr);
703
isb();
704
705
#ifdef VMM_VHE
706
host_tcr = READ_SPECIALREG(tcr_el2);
707
WRITE_SPECIALREG(tcr_el2, host_tcr & ~HCR_TGE);
708
isb();
709
#endif
710
711
/*
712
* The CPU can cache the stage 1 + 2 combination so we need to ensure
713
* the stage 2 is invalidated first, then when this has completed we
714
* invalidate the stage 1 TLB. As we don't know which stage 1 virtual
715
* addresses point at the stage 2 IPA we need to invalidate the entire
716
* stage 1 TLB.
717
*/
718
719
start = TLBI_VA(sva);
720
end = TLBI_VA(eva);
721
for (r = start; r < end; r += TLBI_VA_L3_INCR) {
722
/* Invalidate the stage 2 TLB entry */
723
if (final_only)
724
__asm __volatile("tlbi ipas2le1is, %0" : : "r"(r));
725
else
726
__asm __volatile("tlbi ipas2e1is, %0" : : "r"(r));
727
}
728
/* Ensure the entry has been invalidated */
729
dsb(ish);
730
/* Invalidate the stage 1 TLB. */
731
__asm __volatile("tlbi vmalle1is");
732
dsb(ish);
733
isb();
734
735
#ifdef VMM_VHE
736
WRITE_SPECIALREG(tcr_el2, host_tcr);
737
isb();
738
#endif
739
740
/* Switch back to the host vttbr */
741
WRITE_SPECIALREG(vttbr_el2, host_vttbr);
742
isb();
743
}
744
745
VMM_STATIC void
746
VMM_HYP_FUNC(s2_tlbi_all)(uint64_t vttbr)
747
{
748
uint64_t host_vttbr;
749
750
#ifdef VMM_VHE
751
dsb(ishst);
752
#endif
753
754
/* Switch to the guest vttbr */
755
/* TODO: Handle Cortex-A57/A72 erratum 131936 */
756
host_vttbr = READ_SPECIALREG(vttbr_el2);
757
WRITE_SPECIALREG(vttbr_el2, vttbr);
758
isb();
759
760
__asm __volatile("tlbi vmalls12e1is");
761
dsb(ish);
762
isb();
763
764
/* Switch back t othe host vttbr */
765
WRITE_SPECIALREG(vttbr_el2, host_vttbr);
766
isb();
767
}
768
769